1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include <linux/bitfield.h>
35 #include "pci.h"
36
37 DEFINE_MUTEX(pci_slot_mutex);
38
39 const char *pci_power_names[] = {
40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41 };
42 EXPORT_SYMBOL_GPL(pci_power_names);
43
44 #ifdef CONFIG_X86_32
45 int isa_dma_bridge_buggy;
46 EXPORT_SYMBOL(isa_dma_bridge_buggy);
47 #endif
48
49 int pci_pci_problems;
50 EXPORT_SYMBOL(pci_pci_problems);
51
52 unsigned int pci_pm_d3hot_delay;
53
54 static void pci_pme_list_scan(struct work_struct *work);
55
56 static LIST_HEAD(pci_pme_list);
57 static DEFINE_MUTEX(pci_pme_list_mutex);
58 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59
60 struct pci_pme_device {
61 struct list_head list;
62 struct pci_dev *dev;
63 };
64
65 #define PME_TIMEOUT 1000 /* How long between PME checks */
66
67 /*
68 * Following exit from Conventional Reset, devices must be ready within 1 sec
69 * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional
70 * Reset (PCIe r6.0 sec 5.8).
71 */
72 #define PCI_RESET_WAIT 1000 /* msec */
73
74 /*
75 * Devices may extend the 1 sec period through Request Retry Status
76 * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper
77 * limit, but 60 sec ought to be enough for any device to become
78 * responsive.
79 */
80 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
81
pci_dev_d3_sleep(struct pci_dev * dev)82 static void pci_dev_d3_sleep(struct pci_dev *dev)
83 {
84 unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
85 unsigned int upper;
86
87 if (delay_ms) {
88 /* Use a 20% upper bound, 1ms minimum */
89 upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
90 usleep_range(delay_ms * USEC_PER_MSEC,
91 (delay_ms + upper) * USEC_PER_MSEC);
92 }
93 }
94
pci_reset_supported(struct pci_dev * dev)95 bool pci_reset_supported(struct pci_dev *dev)
96 {
97 return dev->reset_methods[0] != 0;
98 }
99
100 #ifdef CONFIG_PCI_DOMAINS
101 int pci_domains_supported = 1;
102 #endif
103
104 #define DEFAULT_CARDBUS_IO_SIZE (256)
105 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
106 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
107 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
108 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
109
110 #define DEFAULT_HOTPLUG_IO_SIZE (256)
111 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
112 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
113 /* hpiosize=nn can override this */
114 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
115 /*
116 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
117 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
118 * pci=hpmemsize=nnM overrides both
119 */
120 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
121 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
122
123 #define DEFAULT_HOTPLUG_BUS_SIZE 1
124 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
125
126
127 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
128 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
130 #elif defined CONFIG_PCIE_BUS_SAFE
131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
132 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
134 #elif defined CONFIG_PCIE_BUS_PEER2PEER
135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
136 #else
137 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
138 #endif
139
140 /*
141 * The default CLS is used if arch didn't set CLS explicitly and not
142 * all pci devices agree on the same value. Arch can override either
143 * the dfl or actual value as it sees fit. Don't forget this is
144 * measured in 32-bit words, not bytes.
145 */
146 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
147 u8 pci_cache_line_size;
148
149 /*
150 * If we set up a device for bus mastering, we need to check the latency
151 * timer as certain BIOSes forget to set it properly.
152 */
153 unsigned int pcibios_max_latency = 255;
154
155 /* If set, the PCIe ARI capability will not be used. */
156 static bool pcie_ari_disabled;
157
158 /* If set, the PCIe ATS capability will not be used. */
159 static bool pcie_ats_disabled;
160
161 /* If set, the PCI config space of each device is printed during boot. */
162 bool pci_early_dump;
163
pci_ats_disabled(void)164 bool pci_ats_disabled(void)
165 {
166 return pcie_ats_disabled;
167 }
168 EXPORT_SYMBOL_GPL(pci_ats_disabled);
169
170 /* Disable bridge_d3 for all PCIe ports */
171 static bool pci_bridge_d3_disable;
172 /* Force bridge_d3 for all PCIe ports */
173 static bool pci_bridge_d3_force;
174
pcie_port_pm_setup(char * str)175 static int __init pcie_port_pm_setup(char *str)
176 {
177 if (!strcmp(str, "off"))
178 pci_bridge_d3_disable = true;
179 else if (!strcmp(str, "force"))
180 pci_bridge_d3_force = true;
181 return 1;
182 }
183 __setup("pcie_port_pm=", pcie_port_pm_setup);
184
185 /**
186 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
187 * @bus: pointer to PCI bus structure to search
188 *
189 * Given a PCI bus, returns the highest PCI bus number present in the set
190 * including the given PCI bus and its list of child PCI buses.
191 */
pci_bus_max_busnr(struct pci_bus * bus)192 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
193 {
194 struct pci_bus *tmp;
195 unsigned char max, n;
196
197 max = bus->busn_res.end;
198 list_for_each_entry(tmp, &bus->children, node) {
199 n = pci_bus_max_busnr(tmp);
200 if (n > max)
201 max = n;
202 }
203 return max;
204 }
205 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
206
207 /**
208 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
209 * @pdev: the PCI device
210 *
211 * Returns error bits set in PCI_STATUS and clears them.
212 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)213 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
214 {
215 u16 status;
216 int ret;
217
218 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
219 if (ret != PCIBIOS_SUCCESSFUL)
220 return -EIO;
221
222 status &= PCI_STATUS_ERROR_BITS;
223 if (status)
224 pci_write_config_word(pdev, PCI_STATUS, status);
225
226 return status;
227 }
228 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
229
230 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)231 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
232 bool write_combine)
233 {
234 struct resource *res = &pdev->resource[bar];
235 resource_size_t start = res->start;
236 resource_size_t size = resource_size(res);
237
238 /*
239 * Make sure the BAR is actually a memory resource, not an IO resource
240 */
241 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
242 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
243 return NULL;
244 }
245
246 if (write_combine)
247 return ioremap_wc(start, size);
248
249 return ioremap(start, size);
250 }
251
pci_ioremap_bar(struct pci_dev * pdev,int bar)252 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
253 {
254 return __pci_ioremap_resource(pdev, bar, false);
255 }
256 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
257
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)258 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
259 {
260 return __pci_ioremap_resource(pdev, bar, true);
261 }
262 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
263 #endif
264
265 /**
266 * pci_dev_str_match_path - test if a path string matches a device
267 * @dev: the PCI device to test
268 * @path: string to match the device against
269 * @endptr: pointer to the string after the match
270 *
271 * Test if a string (typically from a kernel parameter) formatted as a
272 * path of device/function addresses matches a PCI device. The string must
273 * be of the form:
274 *
275 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
276 *
277 * A path for a device can be obtained using 'lspci -t'. Using a path
278 * is more robust against bus renumbering than using only a single bus,
279 * device and function address.
280 *
281 * Returns 1 if the string matches the device, 0 if it does not and
282 * a negative error code if it fails to parse the string.
283 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)284 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
285 const char **endptr)
286 {
287 int ret;
288 unsigned int seg, bus, slot, func;
289 char *wpath, *p;
290 char end;
291
292 *endptr = strchrnul(path, ';');
293
294 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
295 if (!wpath)
296 return -ENOMEM;
297
298 while (1) {
299 p = strrchr(wpath, '/');
300 if (!p)
301 break;
302 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
303 if (ret != 2) {
304 ret = -EINVAL;
305 goto free_and_exit;
306 }
307
308 if (dev->devfn != PCI_DEVFN(slot, func)) {
309 ret = 0;
310 goto free_and_exit;
311 }
312
313 /*
314 * Note: we don't need to get a reference to the upstream
315 * bridge because we hold a reference to the top level
316 * device which should hold a reference to the bridge,
317 * and so on.
318 */
319 dev = pci_upstream_bridge(dev);
320 if (!dev) {
321 ret = 0;
322 goto free_and_exit;
323 }
324
325 *p = 0;
326 }
327
328 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
329 &func, &end);
330 if (ret != 4) {
331 seg = 0;
332 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
333 if (ret != 3) {
334 ret = -EINVAL;
335 goto free_and_exit;
336 }
337 }
338
339 ret = (seg == pci_domain_nr(dev->bus) &&
340 bus == dev->bus->number &&
341 dev->devfn == PCI_DEVFN(slot, func));
342
343 free_and_exit:
344 kfree(wpath);
345 return ret;
346 }
347
348 /**
349 * pci_dev_str_match - test if a string matches a device
350 * @dev: the PCI device to test
351 * @p: string to match the device against
352 * @endptr: pointer to the string after the match
353 *
354 * Test if a string (typically from a kernel parameter) matches a specified
355 * PCI device. The string may be of one of the following formats:
356 *
357 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
358 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
359 *
360 * The first format specifies a PCI bus/device/function address which
361 * may change if new hardware is inserted, if motherboard firmware changes,
362 * or due to changes caused in kernel parameters. If the domain is
363 * left unspecified, it is taken to be 0. In order to be robust against
364 * bus renumbering issues, a path of PCI device/function numbers may be used
365 * to address the specific device. The path for a device can be determined
366 * through the use of 'lspci -t'.
367 *
368 * The second format matches devices using IDs in the configuration
369 * space which may match multiple devices in the system. A value of 0
370 * for any field will match all devices. (Note: this differs from
371 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
372 * legacy reasons and convenience so users don't have to specify
373 * FFFFFFFFs on the command line.)
374 *
375 * Returns 1 if the string matches the device, 0 if it does not and
376 * a negative error code if the string cannot be parsed.
377 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)378 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
379 const char **endptr)
380 {
381 int ret;
382 int count;
383 unsigned short vendor, device, subsystem_vendor, subsystem_device;
384
385 if (strncmp(p, "pci:", 4) == 0) {
386 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
387 p += 4;
388 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
389 &subsystem_vendor, &subsystem_device, &count);
390 if (ret != 4) {
391 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
392 if (ret != 2)
393 return -EINVAL;
394
395 subsystem_vendor = 0;
396 subsystem_device = 0;
397 }
398
399 p += count;
400
401 if ((!vendor || vendor == dev->vendor) &&
402 (!device || device == dev->device) &&
403 (!subsystem_vendor ||
404 subsystem_vendor == dev->subsystem_vendor) &&
405 (!subsystem_device ||
406 subsystem_device == dev->subsystem_device))
407 goto found;
408 } else {
409 /*
410 * PCI Bus, Device, Function IDs are specified
411 * (optionally, may include a path of devfns following it)
412 */
413 ret = pci_dev_str_match_path(dev, p, &p);
414 if (ret < 0)
415 return ret;
416 else if (ret)
417 goto found;
418 }
419
420 *endptr = p;
421 return 0;
422
423 found:
424 *endptr = p;
425 return 1;
426 }
427
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)428 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
429 u8 pos, int cap, int *ttl)
430 {
431 u8 id;
432 u16 ent;
433
434 pci_bus_read_config_byte(bus, devfn, pos, &pos);
435
436 while ((*ttl)--) {
437 if (pos < 0x40)
438 break;
439 pos &= ~3;
440 pci_bus_read_config_word(bus, devfn, pos, &ent);
441
442 id = ent & 0xff;
443 if (id == 0xff)
444 break;
445 if (id == cap)
446 return pos;
447 pos = (ent >> 8);
448 }
449 return 0;
450 }
451
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)452 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
453 u8 pos, int cap)
454 {
455 int ttl = PCI_FIND_CAP_TTL;
456
457 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
458 }
459
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)460 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
461 {
462 return __pci_find_next_cap(dev->bus, dev->devfn,
463 pos + PCI_CAP_LIST_NEXT, cap);
464 }
465 EXPORT_SYMBOL_GPL(pci_find_next_capability);
466
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)467 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
468 unsigned int devfn, u8 hdr_type)
469 {
470 u16 status;
471
472 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
473 if (!(status & PCI_STATUS_CAP_LIST))
474 return 0;
475
476 switch (hdr_type) {
477 case PCI_HEADER_TYPE_NORMAL:
478 case PCI_HEADER_TYPE_BRIDGE:
479 return PCI_CAPABILITY_LIST;
480 case PCI_HEADER_TYPE_CARDBUS:
481 return PCI_CB_CAPABILITY_LIST;
482 }
483
484 return 0;
485 }
486
487 /**
488 * pci_find_capability - query for devices' capabilities
489 * @dev: PCI device to query
490 * @cap: capability code
491 *
492 * Tell if a device supports a given PCI capability.
493 * Returns the address of the requested capability structure within the
494 * device's PCI configuration space or 0 in case the device does not
495 * support it. Possible values for @cap include:
496 *
497 * %PCI_CAP_ID_PM Power Management
498 * %PCI_CAP_ID_AGP Accelerated Graphics Port
499 * %PCI_CAP_ID_VPD Vital Product Data
500 * %PCI_CAP_ID_SLOTID Slot Identification
501 * %PCI_CAP_ID_MSI Message Signalled Interrupts
502 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
503 * %PCI_CAP_ID_PCIX PCI-X
504 * %PCI_CAP_ID_EXP PCI Express
505 */
pci_find_capability(struct pci_dev * dev,int cap)506 u8 pci_find_capability(struct pci_dev *dev, int cap)
507 {
508 u8 pos;
509
510 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
511 if (pos)
512 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
513
514 return pos;
515 }
516 EXPORT_SYMBOL(pci_find_capability);
517
518 /**
519 * pci_bus_find_capability - query for devices' capabilities
520 * @bus: the PCI bus to query
521 * @devfn: PCI device to query
522 * @cap: capability code
523 *
524 * Like pci_find_capability() but works for PCI devices that do not have a
525 * pci_dev structure set up yet.
526 *
527 * Returns the address of the requested capability structure within the
528 * device's PCI configuration space or 0 in case the device does not
529 * support it.
530 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)531 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
532 {
533 u8 hdr_type, pos;
534
535 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
536
537 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
538 if (pos)
539 pos = __pci_find_next_cap(bus, devfn, pos, cap);
540
541 return pos;
542 }
543 EXPORT_SYMBOL(pci_bus_find_capability);
544
545 /**
546 * pci_find_next_ext_capability - Find an extended capability
547 * @dev: PCI device to query
548 * @start: address at which to start looking (0 to start at beginning of list)
549 * @cap: capability code
550 *
551 * Returns the address of the next matching extended capability structure
552 * within the device's PCI configuration space or 0 if the device does
553 * not support it. Some capabilities can occur several times, e.g., the
554 * vendor-specific capability, and this provides a way to find them all.
555 */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)556 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
557 {
558 u32 header;
559 int ttl;
560 u16 pos = PCI_CFG_SPACE_SIZE;
561
562 /* minimum 8 bytes per capability */
563 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
564
565 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
566 return 0;
567
568 if (start)
569 pos = start;
570
571 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
572 return 0;
573
574 /*
575 * If we have no capabilities, this is indicated by cap ID,
576 * cap version and next pointer all being 0.
577 */
578 if (header == 0)
579 return 0;
580
581 while (ttl-- > 0) {
582 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
583 return pos;
584
585 pos = PCI_EXT_CAP_NEXT(header);
586 if (pos < PCI_CFG_SPACE_SIZE)
587 break;
588
589 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
590 break;
591 }
592
593 return 0;
594 }
595 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
596
597 /**
598 * pci_find_ext_capability - Find an extended capability
599 * @dev: PCI device to query
600 * @cap: capability code
601 *
602 * Returns the address of the requested extended capability structure
603 * within the device's PCI configuration space or 0 if the device does
604 * not support it. Possible values for @cap include:
605 *
606 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
607 * %PCI_EXT_CAP_ID_VC Virtual Channel
608 * %PCI_EXT_CAP_ID_DSN Device Serial Number
609 * %PCI_EXT_CAP_ID_PWR Power Budgeting
610 */
pci_find_ext_capability(struct pci_dev * dev,int cap)611 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
612 {
613 return pci_find_next_ext_capability(dev, 0, cap);
614 }
615 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
616
617 /**
618 * pci_get_dsn - Read and return the 8-byte Device Serial Number
619 * @dev: PCI device to query
620 *
621 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
622 * Number.
623 *
624 * Returns the DSN, or zero if the capability does not exist.
625 */
pci_get_dsn(struct pci_dev * dev)626 u64 pci_get_dsn(struct pci_dev *dev)
627 {
628 u32 dword;
629 u64 dsn;
630 int pos;
631
632 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
633 if (!pos)
634 return 0;
635
636 /*
637 * The Device Serial Number is two dwords offset 4 bytes from the
638 * capability position. The specification says that the first dword is
639 * the lower half, and the second dword is the upper half.
640 */
641 pos += 4;
642 pci_read_config_dword(dev, pos, &dword);
643 dsn = (u64)dword;
644 pci_read_config_dword(dev, pos + 4, &dword);
645 dsn |= ((u64)dword) << 32;
646
647 return dsn;
648 }
649 EXPORT_SYMBOL_GPL(pci_get_dsn);
650
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)651 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
652 {
653 int rc, ttl = PCI_FIND_CAP_TTL;
654 u8 cap, mask;
655
656 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
657 mask = HT_3BIT_CAP_MASK;
658 else
659 mask = HT_5BIT_CAP_MASK;
660
661 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
662 PCI_CAP_ID_HT, &ttl);
663 while (pos) {
664 rc = pci_read_config_byte(dev, pos + 3, &cap);
665 if (rc != PCIBIOS_SUCCESSFUL)
666 return 0;
667
668 if ((cap & mask) == ht_cap)
669 return pos;
670
671 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
672 pos + PCI_CAP_LIST_NEXT,
673 PCI_CAP_ID_HT, &ttl);
674 }
675
676 return 0;
677 }
678
679 /**
680 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
681 * @dev: PCI device to query
682 * @pos: Position from which to continue searching
683 * @ht_cap: HyperTransport capability code
684 *
685 * To be used in conjunction with pci_find_ht_capability() to search for
686 * all capabilities matching @ht_cap. @pos should always be a value returned
687 * from pci_find_ht_capability().
688 *
689 * NB. To be 100% safe against broken PCI devices, the caller should take
690 * steps to avoid an infinite loop.
691 */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)692 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
693 {
694 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
695 }
696 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
697
698 /**
699 * pci_find_ht_capability - query a device's HyperTransport capabilities
700 * @dev: PCI device to query
701 * @ht_cap: HyperTransport capability code
702 *
703 * Tell if a device supports a given HyperTransport capability.
704 * Returns an address within the device's PCI configuration space
705 * or 0 in case the device does not support the request capability.
706 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
707 * which has a HyperTransport capability matching @ht_cap.
708 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)709 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
710 {
711 u8 pos;
712
713 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
714 if (pos)
715 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
716
717 return pos;
718 }
719 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
720
721 /**
722 * pci_find_vsec_capability - Find a vendor-specific extended capability
723 * @dev: PCI device to query
724 * @vendor: Vendor ID for which capability is defined
725 * @cap: Vendor-specific capability ID
726 *
727 * If @dev has Vendor ID @vendor, search for a VSEC capability with
728 * VSEC ID @cap. If found, return the capability offset in
729 * config space; otherwise return 0.
730 */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)731 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
732 {
733 u16 vsec = 0;
734 u32 header;
735 int ret;
736
737 if (vendor != dev->vendor)
738 return 0;
739
740 while ((vsec = pci_find_next_ext_capability(dev, vsec,
741 PCI_EXT_CAP_ID_VNDR))) {
742 ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
743 if (ret != PCIBIOS_SUCCESSFUL)
744 continue;
745
746 if (PCI_VNDR_HEADER_ID(header) == cap)
747 return vsec;
748 }
749
750 return 0;
751 }
752 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
753
754 /**
755 * pci_find_dvsec_capability - Find DVSEC for vendor
756 * @dev: PCI device to query
757 * @vendor: Vendor ID to match for the DVSEC
758 * @dvsec: Designated Vendor-specific capability ID
759 *
760 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
761 * offset in config space; otherwise return 0.
762 */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)763 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
764 {
765 int pos;
766
767 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
768 if (!pos)
769 return 0;
770
771 while (pos) {
772 u16 v, id;
773
774 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
775 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
776 if (vendor == v && dvsec == id)
777 return pos;
778
779 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
780 }
781
782 return 0;
783 }
784 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
785
786 /**
787 * pci_find_parent_resource - return resource region of parent bus of given
788 * region
789 * @dev: PCI device structure contains resources to be searched
790 * @res: child resource record for which parent is sought
791 *
792 * For given resource region of given device, return the resource region of
793 * parent bus the given region is contained in.
794 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)795 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
796 struct resource *res)
797 {
798 const struct pci_bus *bus = dev->bus;
799 struct resource *r;
800
801 pci_bus_for_each_resource(bus, r) {
802 if (!r)
803 continue;
804 if (resource_contains(r, res)) {
805
806 /*
807 * If the window is prefetchable but the BAR is
808 * not, the allocator made a mistake.
809 */
810 if (r->flags & IORESOURCE_PREFETCH &&
811 !(res->flags & IORESOURCE_PREFETCH))
812 return NULL;
813
814 /*
815 * If we're below a transparent bridge, there may
816 * be both a positively-decoded aperture and a
817 * subtractively-decoded region that contain the BAR.
818 * We want the positively-decoded one, so this depends
819 * on pci_bus_for_each_resource() giving us those
820 * first.
821 */
822 return r;
823 }
824 }
825 return NULL;
826 }
827 EXPORT_SYMBOL(pci_find_parent_resource);
828
829 /**
830 * pci_find_resource - Return matching PCI device resource
831 * @dev: PCI device to query
832 * @res: Resource to look for
833 *
834 * Goes over standard PCI resources (BARs) and checks if the given resource
835 * is partially or fully contained in any of them. In that case the
836 * matching resource is returned, %NULL otherwise.
837 */
pci_find_resource(struct pci_dev * dev,struct resource * res)838 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
839 {
840 int i;
841
842 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
843 struct resource *r = &dev->resource[i];
844
845 if (r->start && resource_contains(r, res))
846 return r;
847 }
848
849 return NULL;
850 }
851 EXPORT_SYMBOL(pci_find_resource);
852
853 /**
854 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
855 * @dev: the PCI device to operate on
856 * @pos: config space offset of status word
857 * @mask: mask of bit(s) to care about in status word
858 *
859 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
860 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)861 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
862 {
863 int i;
864
865 /* Wait for Transaction Pending bit clean */
866 for (i = 0; i < 4; i++) {
867 u16 status;
868 if (i)
869 msleep((1 << (i - 1)) * 100);
870
871 pci_read_config_word(dev, pos, &status);
872 if (!(status & mask))
873 return 1;
874 }
875
876 return 0;
877 }
878
879 static int pci_acs_enable;
880
881 /**
882 * pci_request_acs - ask for ACS to be enabled if supported
883 */
pci_request_acs(void)884 void pci_request_acs(void)
885 {
886 pci_acs_enable = 1;
887 }
888
889 static const char *disable_acs_redir_param;
890
891 /**
892 * pci_disable_acs_redir - disable ACS redirect capabilities
893 * @dev: the PCI device
894 *
895 * For only devices specified in the disable_acs_redir parameter.
896 */
pci_disable_acs_redir(struct pci_dev * dev)897 static void pci_disable_acs_redir(struct pci_dev *dev)
898 {
899 int ret = 0;
900 const char *p;
901 int pos;
902 u16 ctrl;
903
904 if (!disable_acs_redir_param)
905 return;
906
907 p = disable_acs_redir_param;
908 while (*p) {
909 ret = pci_dev_str_match(dev, p, &p);
910 if (ret < 0) {
911 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
912 disable_acs_redir_param);
913
914 break;
915 } else if (ret == 1) {
916 /* Found a match */
917 break;
918 }
919
920 if (*p != ';' && *p != ',') {
921 /* End of param or invalid format */
922 break;
923 }
924 p++;
925 }
926
927 if (ret != 1)
928 return;
929
930 if (!pci_dev_specific_disable_acs_redir(dev))
931 return;
932
933 pos = dev->acs_cap;
934 if (!pos) {
935 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
936 return;
937 }
938
939 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
940
941 /* P2P Request & Completion Redirect */
942 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
943
944 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
945
946 pci_info(dev, "disabled ACS redirect\n");
947 }
948
949 /**
950 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
951 * @dev: the PCI device
952 */
pci_std_enable_acs(struct pci_dev * dev)953 static void pci_std_enable_acs(struct pci_dev *dev)
954 {
955 int pos;
956 u16 cap;
957 u16 ctrl;
958
959 pos = dev->acs_cap;
960 if (!pos)
961 return;
962
963 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
964 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
965
966 /* Source Validation */
967 ctrl |= (cap & PCI_ACS_SV);
968
969 /* P2P Request Redirect */
970 ctrl |= (cap & PCI_ACS_RR);
971
972 /* P2P Completion Redirect */
973 ctrl |= (cap & PCI_ACS_CR);
974
975 /* Upstream Forwarding */
976 ctrl |= (cap & PCI_ACS_UF);
977
978 /* Enable Translation Blocking for external devices and noats */
979 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
980 ctrl |= (cap & PCI_ACS_TB);
981
982 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
983 }
984
985 /**
986 * pci_enable_acs - enable ACS if hardware support it
987 * @dev: the PCI device
988 */
pci_enable_acs(struct pci_dev * dev)989 static void pci_enable_acs(struct pci_dev *dev)
990 {
991 if (!pci_acs_enable)
992 goto disable_acs_redir;
993
994 if (!pci_dev_specific_enable_acs(dev))
995 goto disable_acs_redir;
996
997 pci_std_enable_acs(dev);
998
999 disable_acs_redir:
1000 /*
1001 * Note: pci_disable_acs_redir() must be called even if ACS was not
1002 * enabled by the kernel because it may have been enabled by
1003 * platform firmware. So if we are told to disable it, we should
1004 * always disable it after setting the kernel's default
1005 * preferences.
1006 */
1007 pci_disable_acs_redir(dev);
1008 }
1009
1010 /**
1011 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1012 * @dev: PCI device to have its BARs restored
1013 *
1014 * Restore the BAR values for a given device, so as to make it
1015 * accessible by its driver.
1016 */
pci_restore_bars(struct pci_dev * dev)1017 static void pci_restore_bars(struct pci_dev *dev)
1018 {
1019 int i;
1020
1021 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1022 pci_update_resource(dev, i);
1023 }
1024
platform_pci_power_manageable(struct pci_dev * dev)1025 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1026 {
1027 if (pci_use_mid_pm())
1028 return true;
1029
1030 return acpi_pci_power_manageable(dev);
1031 }
1032
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1033 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1034 pci_power_t t)
1035 {
1036 if (pci_use_mid_pm())
1037 return mid_pci_set_power_state(dev, t);
1038
1039 return acpi_pci_set_power_state(dev, t);
1040 }
1041
platform_pci_get_power_state(struct pci_dev * dev)1042 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1043 {
1044 if (pci_use_mid_pm())
1045 return mid_pci_get_power_state(dev);
1046
1047 return acpi_pci_get_power_state(dev);
1048 }
1049
platform_pci_refresh_power_state(struct pci_dev * dev)1050 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1051 {
1052 if (!pci_use_mid_pm())
1053 acpi_pci_refresh_power_state(dev);
1054 }
1055
platform_pci_choose_state(struct pci_dev * dev)1056 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1057 {
1058 if (pci_use_mid_pm())
1059 return PCI_POWER_ERROR;
1060
1061 return acpi_pci_choose_state(dev);
1062 }
1063
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1064 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1065 {
1066 if (pci_use_mid_pm())
1067 return PCI_POWER_ERROR;
1068
1069 return acpi_pci_wakeup(dev, enable);
1070 }
1071
platform_pci_need_resume(struct pci_dev * dev)1072 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1073 {
1074 if (pci_use_mid_pm())
1075 return false;
1076
1077 return acpi_pci_need_resume(dev);
1078 }
1079
platform_pci_bridge_d3(struct pci_dev * dev)1080 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1081 {
1082 if (pci_use_mid_pm())
1083 return false;
1084
1085 return acpi_pci_bridge_d3(dev);
1086 }
1087
1088 /**
1089 * pci_update_current_state - Read power state of given device and cache it
1090 * @dev: PCI device to handle.
1091 * @state: State to cache in case the device doesn't have the PM capability
1092 *
1093 * The power state is read from the PMCSR register, which however is
1094 * inaccessible in D3cold. The platform firmware is therefore queried first
1095 * to detect accessibility of the register. In case the platform firmware
1096 * reports an incorrect state or the device isn't power manageable by the
1097 * platform at all, we try to detect D3cold by testing accessibility of the
1098 * vendor ID in config space.
1099 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1100 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1101 {
1102 if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1103 dev->current_state = PCI_D3cold;
1104 } else if (dev->pm_cap) {
1105 u16 pmcsr;
1106
1107 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1108 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1109 dev->current_state = PCI_D3cold;
1110 return;
1111 }
1112 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1113 } else {
1114 dev->current_state = state;
1115 }
1116 }
1117
1118 /**
1119 * pci_refresh_power_state - Refresh the given device's power state data
1120 * @dev: Target PCI device.
1121 *
1122 * Ask the platform to refresh the devices power state information and invoke
1123 * pci_update_current_state() to update its current PCI power state.
1124 */
pci_refresh_power_state(struct pci_dev * dev)1125 void pci_refresh_power_state(struct pci_dev *dev)
1126 {
1127 platform_pci_refresh_power_state(dev);
1128 pci_update_current_state(dev, dev->current_state);
1129 }
1130
1131 /**
1132 * pci_platform_power_transition - Use platform to change device power state
1133 * @dev: PCI device to handle.
1134 * @state: State to put the device into.
1135 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1136 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1137 {
1138 int error;
1139
1140 error = platform_pci_set_power_state(dev, state);
1141 if (!error)
1142 pci_update_current_state(dev, state);
1143 else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1144 dev->current_state = PCI_D0;
1145
1146 return error;
1147 }
1148 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1149
pci_resume_one(struct pci_dev * pci_dev,void * ign)1150 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1151 {
1152 pm_request_resume(&pci_dev->dev);
1153 return 0;
1154 }
1155
1156 /**
1157 * pci_resume_bus - Walk given bus and runtime resume devices on it
1158 * @bus: Top bus of the subtree to walk.
1159 */
pci_resume_bus(struct pci_bus * bus)1160 void pci_resume_bus(struct pci_bus *bus)
1161 {
1162 if (bus)
1163 pci_walk_bus(bus, pci_resume_one, NULL);
1164 }
1165
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1166 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1167 {
1168 int delay = 1;
1169 bool retrain = false;
1170 struct pci_dev *bridge;
1171
1172 if (pci_is_pcie(dev)) {
1173 bridge = pci_upstream_bridge(dev);
1174 if (bridge)
1175 retrain = true;
1176 }
1177
1178 /*
1179 * After reset, the device should not silently discard config
1180 * requests, but it may still indicate that it needs more time by
1181 * responding to them with CRS completions. The Root Port will
1182 * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
1183 * the read (except when CRS SV is enabled and the read was for the
1184 * Vendor ID; in that case it synthesizes 0x0001 data).
1185 *
1186 * Wait for the device to return a non-CRS completion. Read the
1187 * Command register instead of Vendor ID so we don't have to
1188 * contend with the CRS SV value.
1189 */
1190 for (;;) {
1191 u32 id;
1192
1193 pci_read_config_dword(dev, PCI_COMMAND, &id);
1194 if (!PCI_POSSIBLE_ERROR(id))
1195 break;
1196
1197 if (delay > timeout) {
1198 pci_warn(dev, "not ready %dms after %s; giving up\n",
1199 delay - 1, reset_type);
1200 return -ENOTTY;
1201 }
1202
1203 if (delay > PCI_RESET_WAIT) {
1204 if (retrain) {
1205 retrain = false;
1206 if (pcie_failed_link_retrain(bridge)) {
1207 delay = 1;
1208 continue;
1209 }
1210 }
1211 pci_info(dev, "not ready %dms after %s; waiting\n",
1212 delay - 1, reset_type);
1213 }
1214
1215 msleep(delay);
1216 delay *= 2;
1217 }
1218
1219 if (delay > PCI_RESET_WAIT)
1220 pci_info(dev, "ready %dms after %s\n", delay - 1,
1221 reset_type);
1222
1223 return 0;
1224 }
1225
1226 /**
1227 * pci_power_up - Put the given device into D0
1228 * @dev: PCI device to power up
1229 *
1230 * On success, return 0 or 1, depending on whether or not it is necessary to
1231 * restore the device's BARs subsequently (1 is returned in that case).
1232 *
1233 * On failure, return a negative error code. Always return failure if @dev
1234 * lacks a Power Management Capability, even if the platform was able to
1235 * put the device in D0 via non-PCI means.
1236 */
pci_power_up(struct pci_dev * dev)1237 int pci_power_up(struct pci_dev *dev)
1238 {
1239 bool need_restore;
1240 pci_power_t state;
1241 u16 pmcsr;
1242
1243 platform_pci_set_power_state(dev, PCI_D0);
1244
1245 if (!dev->pm_cap) {
1246 state = platform_pci_get_power_state(dev);
1247 if (state == PCI_UNKNOWN)
1248 dev->current_state = PCI_D0;
1249 else
1250 dev->current_state = state;
1251
1252 return -EIO;
1253 }
1254
1255 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1256 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1257 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1258 pci_power_name(dev->current_state));
1259 dev->current_state = PCI_D3cold;
1260 return -EIO;
1261 }
1262
1263 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1264
1265 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1266 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1267
1268 if (state == PCI_D0)
1269 goto end;
1270
1271 /*
1272 * Force the entire word to 0. This doesn't affect PME_Status, disables
1273 * PME_En, and sets PowerState to 0.
1274 */
1275 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1276
1277 /* Mandatory transition delays; see PCI PM 1.2. */
1278 if (state == PCI_D3hot)
1279 pci_dev_d3_sleep(dev);
1280 else if (state == PCI_D2)
1281 udelay(PCI_PM_D2_DELAY);
1282
1283 end:
1284 dev->current_state = PCI_D0;
1285 if (need_restore)
1286 return 1;
1287
1288 return 0;
1289 }
1290
1291 /**
1292 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1293 * @dev: PCI device to power up
1294 * @locked: whether pci_bus_sem is held
1295 *
1296 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1297 * to confirm the state change, restore its BARs if they might be lost and
1298 * reconfigure ASPM in accordance with the new power state.
1299 *
1300 * If pci_restore_state() is going to be called right after a power state change
1301 * to D0, it is more efficient to use pci_power_up() directly instead of this
1302 * function.
1303 */
pci_set_full_power_state(struct pci_dev * dev,bool locked)1304 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1305 {
1306 u16 pmcsr;
1307 int ret;
1308
1309 ret = pci_power_up(dev);
1310 if (ret < 0) {
1311 if (dev->current_state == PCI_D0)
1312 return 0;
1313
1314 return ret;
1315 }
1316
1317 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1318 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1319 if (dev->current_state != PCI_D0) {
1320 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1321 pci_power_name(dev->current_state));
1322 } else if (ret > 0) {
1323 /*
1324 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1325 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1326 * from D3hot to D0 _may_ perform an internal reset, thereby
1327 * going to "D0 Uninitialized" rather than "D0 Initialized".
1328 * For example, at least some versions of the 3c905B and the
1329 * 3c556B exhibit this behaviour.
1330 *
1331 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1332 * devices in a D3hot state at boot. Consequently, we need to
1333 * restore at least the BARs so that the device will be
1334 * accessible to its driver.
1335 */
1336 pci_restore_bars(dev);
1337 }
1338
1339 if (dev->bus->self)
1340 pcie_aspm_pm_state_change(dev->bus->self, locked);
1341
1342 return 0;
1343 }
1344
1345 /**
1346 * __pci_dev_set_current_state - Set current state of a PCI device
1347 * @dev: Device to handle
1348 * @data: pointer to state to be set
1349 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1350 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1351 {
1352 pci_power_t state = *(pci_power_t *)data;
1353
1354 dev->current_state = state;
1355 return 0;
1356 }
1357
1358 /**
1359 * pci_bus_set_current_state - Walk given bus and set current state of devices
1360 * @bus: Top bus of the subtree to walk.
1361 * @state: state to be set
1362 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1363 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1364 {
1365 if (bus)
1366 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1367 }
1368
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state,bool locked)1369 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1370 {
1371 if (!bus)
1372 return;
1373
1374 if (locked)
1375 pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1376 else
1377 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1378 }
1379
1380 /**
1381 * pci_set_low_power_state - Put a PCI device into a low-power state.
1382 * @dev: PCI device to handle.
1383 * @state: PCI power state (D1, D2, D3hot) to put the device into.
1384 * @locked: whether pci_bus_sem is held
1385 *
1386 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1387 *
1388 * RETURN VALUE:
1389 * -EINVAL if the requested state is invalid.
1390 * -EIO if device does not support PCI PM or its PM capabilities register has a
1391 * wrong version, or device doesn't support the requested state.
1392 * 0 if device already is in the requested state.
1393 * 0 if device's power state has been successfully changed.
1394 */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1395 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1396 {
1397 u16 pmcsr;
1398
1399 if (!dev->pm_cap)
1400 return -EIO;
1401
1402 /*
1403 * Validate transition: We can enter D0 from any state, but if
1404 * we're already in a low-power state, we can only go deeper. E.g.,
1405 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1406 * we'd have to go from D3 to D0, then to D1.
1407 */
1408 if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1409 pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1410 pci_power_name(dev->current_state),
1411 pci_power_name(state));
1412 return -EINVAL;
1413 }
1414
1415 /* Check if this device supports the desired state */
1416 if ((state == PCI_D1 && !dev->d1_support)
1417 || (state == PCI_D2 && !dev->d2_support))
1418 return -EIO;
1419
1420 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1421 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1422 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1423 pci_power_name(dev->current_state),
1424 pci_power_name(state));
1425 dev->current_state = PCI_D3cold;
1426 return -EIO;
1427 }
1428
1429 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1430 pmcsr |= state;
1431
1432 /* Enter specified state */
1433 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1434
1435 /* Mandatory power management transition delays; see PCI PM 1.2. */
1436 if (state == PCI_D3hot)
1437 pci_dev_d3_sleep(dev);
1438 else if (state == PCI_D2)
1439 udelay(PCI_PM_D2_DELAY);
1440
1441 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1442 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1443 if (dev->current_state != state)
1444 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1445 pci_power_name(dev->current_state),
1446 pci_power_name(state));
1447
1448 if (dev->bus->self)
1449 pcie_aspm_pm_state_change(dev->bus->self, locked);
1450
1451 return 0;
1452 }
1453
__pci_set_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1454 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1455 {
1456 int error;
1457
1458 /* Bound the state we're entering */
1459 if (state > PCI_D3cold)
1460 state = PCI_D3cold;
1461 else if (state < PCI_D0)
1462 state = PCI_D0;
1463 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1464
1465 /*
1466 * If the device or the parent bridge do not support PCI
1467 * PM, ignore the request if we're doing anything other
1468 * than putting it into D0 (which would only happen on
1469 * boot).
1470 */
1471 return 0;
1472
1473 /* Check if we're already there */
1474 if (dev->current_state == state)
1475 return 0;
1476
1477 if (state == PCI_D0)
1478 return pci_set_full_power_state(dev, locked);
1479
1480 /*
1481 * This device is quirked not to be put into D3, so don't put it in
1482 * D3
1483 */
1484 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1485 return 0;
1486
1487 if (state == PCI_D3cold) {
1488 /*
1489 * To put the device in D3cold, put it into D3hot in the native
1490 * way, then put it into D3cold using platform ops.
1491 */
1492 error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1493
1494 if (pci_platform_power_transition(dev, PCI_D3cold))
1495 return error;
1496
1497 /* Powering off a bridge may power off the whole hierarchy */
1498 if (dev->current_state == PCI_D3cold)
1499 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1500 } else {
1501 error = pci_set_low_power_state(dev, state, locked);
1502
1503 if (pci_platform_power_transition(dev, state))
1504 return error;
1505 }
1506
1507 return 0;
1508 }
1509
1510 /**
1511 * pci_set_power_state - Set the power state of a PCI device
1512 * @dev: PCI device to handle.
1513 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1514 *
1515 * Transition a device to a new power state, using the platform firmware and/or
1516 * the device's PCI PM registers.
1517 *
1518 * RETURN VALUE:
1519 * -EINVAL if the requested state is invalid.
1520 * -EIO if device does not support PCI PM or its PM capabilities register has a
1521 * wrong version, or device doesn't support the requested state.
1522 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1523 * 0 if device already is in the requested state.
1524 * 0 if the transition is to D3 but D3 is not supported.
1525 * 0 if device's power state has been successfully changed.
1526 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1527 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1528 {
1529 return __pci_set_power_state(dev, state, false);
1530 }
1531 EXPORT_SYMBOL(pci_set_power_state);
1532
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)1533 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1534 {
1535 lockdep_assert_held(&pci_bus_sem);
1536
1537 return __pci_set_power_state(dev, state, true);
1538 }
1539 EXPORT_SYMBOL(pci_set_power_state_locked);
1540
1541 #define PCI_EXP_SAVE_REGS 7
1542
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1543 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1544 u16 cap, bool extended)
1545 {
1546 struct pci_cap_saved_state *tmp;
1547
1548 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1549 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1550 return tmp;
1551 }
1552 return NULL;
1553 }
1554
pci_find_saved_cap(struct pci_dev * dev,char cap)1555 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1556 {
1557 return _pci_find_saved_cap(dev, cap, false);
1558 }
1559
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1560 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1561 {
1562 return _pci_find_saved_cap(dev, cap, true);
1563 }
1564
pci_save_pcie_state(struct pci_dev * dev)1565 static int pci_save_pcie_state(struct pci_dev *dev)
1566 {
1567 int i = 0;
1568 struct pci_cap_saved_state *save_state;
1569 u16 *cap;
1570
1571 if (!pci_is_pcie(dev))
1572 return 0;
1573
1574 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1575 if (!save_state) {
1576 pci_err(dev, "buffer not found in %s\n", __func__);
1577 return -ENOMEM;
1578 }
1579
1580 cap = (u16 *)&save_state->cap.data[0];
1581 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1582 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1583 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1584 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1585 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1586 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1587 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1588
1589 return 0;
1590 }
1591
pci_bridge_reconfigure_ltr(struct pci_dev * dev)1592 void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1593 {
1594 #ifdef CONFIG_PCIEASPM
1595 struct pci_dev *bridge;
1596 u32 ctl;
1597
1598 bridge = pci_upstream_bridge(dev);
1599 if (bridge && bridge->ltr_path) {
1600 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1601 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1602 pci_dbg(bridge, "re-enabling LTR\n");
1603 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1604 PCI_EXP_DEVCTL2_LTR_EN);
1605 }
1606 }
1607 #endif
1608 }
1609
pci_restore_pcie_state(struct pci_dev * dev)1610 static void pci_restore_pcie_state(struct pci_dev *dev)
1611 {
1612 int i = 0;
1613 struct pci_cap_saved_state *save_state;
1614 u16 *cap;
1615
1616 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1617 if (!save_state)
1618 return;
1619
1620 /*
1621 * Downstream ports reset the LTR enable bit when link goes down.
1622 * Check and re-configure the bit here before restoring device.
1623 * PCIe r5.0, sec 7.5.3.16.
1624 */
1625 pci_bridge_reconfigure_ltr(dev);
1626
1627 cap = (u16 *)&save_state->cap.data[0];
1628 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1629 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1630 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1631 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1632 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1633 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1634 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1635 }
1636
pci_save_pcix_state(struct pci_dev * dev)1637 static int pci_save_pcix_state(struct pci_dev *dev)
1638 {
1639 int pos;
1640 struct pci_cap_saved_state *save_state;
1641
1642 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1643 if (!pos)
1644 return 0;
1645
1646 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1647 if (!save_state) {
1648 pci_err(dev, "buffer not found in %s\n", __func__);
1649 return -ENOMEM;
1650 }
1651
1652 pci_read_config_word(dev, pos + PCI_X_CMD,
1653 (u16 *)save_state->cap.data);
1654
1655 return 0;
1656 }
1657
pci_restore_pcix_state(struct pci_dev * dev)1658 static void pci_restore_pcix_state(struct pci_dev *dev)
1659 {
1660 int i = 0, pos;
1661 struct pci_cap_saved_state *save_state;
1662 u16 *cap;
1663
1664 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1665 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1666 if (!save_state || !pos)
1667 return;
1668 cap = (u16 *)&save_state->cap.data[0];
1669
1670 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1671 }
1672
pci_save_ltr_state(struct pci_dev * dev)1673 static void pci_save_ltr_state(struct pci_dev *dev)
1674 {
1675 int ltr;
1676 struct pci_cap_saved_state *save_state;
1677 u32 *cap;
1678
1679 if (!pci_is_pcie(dev))
1680 return;
1681
1682 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1683 if (!ltr)
1684 return;
1685
1686 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1687 if (!save_state) {
1688 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1689 return;
1690 }
1691
1692 /* Some broken devices only support dword access to LTR */
1693 cap = &save_state->cap.data[0];
1694 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1695 }
1696
pci_restore_ltr_state(struct pci_dev * dev)1697 static void pci_restore_ltr_state(struct pci_dev *dev)
1698 {
1699 struct pci_cap_saved_state *save_state;
1700 int ltr;
1701 u32 *cap;
1702
1703 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1704 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1705 if (!save_state || !ltr)
1706 return;
1707
1708 /* Some broken devices only support dword access to LTR */
1709 cap = &save_state->cap.data[0];
1710 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1711 }
1712
1713 /**
1714 * pci_save_state - save the PCI configuration space of a device before
1715 * suspending
1716 * @dev: PCI device that we're dealing with
1717 */
pci_save_state(struct pci_dev * dev)1718 int pci_save_state(struct pci_dev *dev)
1719 {
1720 int i;
1721 /* XXX: 100% dword access ok here? */
1722 for (i = 0; i < 16; i++) {
1723 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1724 pci_dbg(dev, "save config %#04x: %#010x\n",
1725 i * 4, dev->saved_config_space[i]);
1726 }
1727 dev->state_saved = true;
1728
1729 i = pci_save_pcie_state(dev);
1730 if (i != 0)
1731 return i;
1732
1733 i = pci_save_pcix_state(dev);
1734 if (i != 0)
1735 return i;
1736
1737 pci_save_ltr_state(dev);
1738 pci_save_dpc_state(dev);
1739 pci_save_aer_state(dev);
1740 pci_save_ptm_state(dev);
1741 return pci_save_vc_state(dev);
1742 }
1743 EXPORT_SYMBOL(pci_save_state);
1744
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1745 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1746 u32 saved_val, int retry, bool force)
1747 {
1748 u32 val;
1749
1750 pci_read_config_dword(pdev, offset, &val);
1751 if (!force && val == saved_val)
1752 return;
1753
1754 for (;;) {
1755 pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1756 offset, val, saved_val);
1757 pci_write_config_dword(pdev, offset, saved_val);
1758 if (retry-- <= 0)
1759 return;
1760
1761 pci_read_config_dword(pdev, offset, &val);
1762 if (val == saved_val)
1763 return;
1764
1765 mdelay(1);
1766 }
1767 }
1768
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1769 static void pci_restore_config_space_range(struct pci_dev *pdev,
1770 int start, int end, int retry,
1771 bool force)
1772 {
1773 int index;
1774
1775 for (index = end; index >= start; index--)
1776 pci_restore_config_dword(pdev, 4 * index,
1777 pdev->saved_config_space[index],
1778 retry, force);
1779 }
1780
pci_restore_config_space(struct pci_dev * pdev)1781 static void pci_restore_config_space(struct pci_dev *pdev)
1782 {
1783 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1784 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1785 /* Restore BARs before the command register. */
1786 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1787 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1788 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1789 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1790
1791 /*
1792 * Force rewriting of prefetch registers to avoid S3 resume
1793 * issues on Intel PCI bridges that occur when these
1794 * registers are not explicitly written.
1795 */
1796 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1797 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1798 } else {
1799 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1800 }
1801 }
1802
pci_restore_rebar_state(struct pci_dev * pdev)1803 static void pci_restore_rebar_state(struct pci_dev *pdev)
1804 {
1805 unsigned int pos, nbars, i;
1806 u32 ctrl;
1807
1808 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1809 if (!pos)
1810 return;
1811
1812 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1813 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1814 PCI_REBAR_CTRL_NBAR_SHIFT;
1815
1816 for (i = 0; i < nbars; i++, pos += 8) {
1817 struct resource *res;
1818 int bar_idx, size;
1819
1820 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1821 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1822 res = pdev->resource + bar_idx;
1823 size = pci_rebar_bytes_to_size(resource_size(res));
1824 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1825 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1826 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1827 }
1828 }
1829
1830 /**
1831 * pci_restore_state - Restore the saved state of a PCI device
1832 * @dev: PCI device that we're dealing with
1833 */
pci_restore_state(struct pci_dev * dev)1834 void pci_restore_state(struct pci_dev *dev)
1835 {
1836 if (!dev->state_saved)
1837 return;
1838
1839 /*
1840 * Restore max latencies (in the LTR capability) before enabling
1841 * LTR itself (in the PCIe capability).
1842 */
1843 pci_restore_ltr_state(dev);
1844
1845 pci_restore_pcie_state(dev);
1846 pci_restore_pasid_state(dev);
1847 pci_restore_pri_state(dev);
1848 pci_restore_ats_state(dev);
1849 pci_restore_vc_state(dev);
1850 pci_restore_rebar_state(dev);
1851 pci_restore_dpc_state(dev);
1852 pci_restore_ptm_state(dev);
1853
1854 pci_aer_clear_status(dev);
1855 pci_restore_aer_state(dev);
1856
1857 pci_restore_config_space(dev);
1858
1859 pci_restore_pcix_state(dev);
1860 pci_restore_msi_state(dev);
1861
1862 /* Restore ACS and IOV configuration state */
1863 pci_enable_acs(dev);
1864 pci_restore_iov_state(dev);
1865
1866 dev->state_saved = false;
1867 }
1868 EXPORT_SYMBOL(pci_restore_state);
1869
1870 struct pci_saved_state {
1871 u32 config_space[16];
1872 struct pci_cap_saved_data cap[];
1873 };
1874
1875 /**
1876 * pci_store_saved_state - Allocate and return an opaque struct containing
1877 * the device saved state.
1878 * @dev: PCI device that we're dealing with
1879 *
1880 * Return NULL if no state or error.
1881 */
pci_store_saved_state(struct pci_dev * dev)1882 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1883 {
1884 struct pci_saved_state *state;
1885 struct pci_cap_saved_state *tmp;
1886 struct pci_cap_saved_data *cap;
1887 size_t size;
1888
1889 if (!dev->state_saved)
1890 return NULL;
1891
1892 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1893
1894 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1895 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1896
1897 state = kzalloc(size, GFP_KERNEL);
1898 if (!state)
1899 return NULL;
1900
1901 memcpy(state->config_space, dev->saved_config_space,
1902 sizeof(state->config_space));
1903
1904 cap = state->cap;
1905 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1906 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1907 memcpy(cap, &tmp->cap, len);
1908 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1909 }
1910 /* Empty cap_save terminates list */
1911
1912 return state;
1913 }
1914 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1915
1916 /**
1917 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1918 * @dev: PCI device that we're dealing with
1919 * @state: Saved state returned from pci_store_saved_state()
1920 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1921 int pci_load_saved_state(struct pci_dev *dev,
1922 struct pci_saved_state *state)
1923 {
1924 struct pci_cap_saved_data *cap;
1925
1926 dev->state_saved = false;
1927
1928 if (!state)
1929 return 0;
1930
1931 memcpy(dev->saved_config_space, state->config_space,
1932 sizeof(state->config_space));
1933
1934 cap = state->cap;
1935 while (cap->size) {
1936 struct pci_cap_saved_state *tmp;
1937
1938 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1939 if (!tmp || tmp->cap.size != cap->size)
1940 return -EINVAL;
1941
1942 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1943 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1944 sizeof(struct pci_cap_saved_data) + cap->size);
1945 }
1946
1947 dev->state_saved = true;
1948 return 0;
1949 }
1950 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1951
1952 /**
1953 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1954 * and free the memory allocated for it.
1955 * @dev: PCI device that we're dealing with
1956 * @state: Pointer to saved state returned from pci_store_saved_state()
1957 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1958 int pci_load_and_free_saved_state(struct pci_dev *dev,
1959 struct pci_saved_state **state)
1960 {
1961 int ret = pci_load_saved_state(dev, *state);
1962 kfree(*state);
1963 *state = NULL;
1964 return ret;
1965 }
1966 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1967
pcibios_enable_device(struct pci_dev * dev,int bars)1968 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1969 {
1970 return pci_enable_resources(dev, bars);
1971 }
1972
do_pci_enable_device(struct pci_dev * dev,int bars)1973 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1974 {
1975 int err;
1976 struct pci_dev *bridge;
1977 u16 cmd;
1978 u8 pin;
1979
1980 err = pci_set_power_state(dev, PCI_D0);
1981 if (err < 0 && err != -EIO)
1982 return err;
1983
1984 bridge = pci_upstream_bridge(dev);
1985 if (bridge)
1986 pcie_aspm_powersave_config_link(bridge);
1987
1988 err = pcibios_enable_device(dev, bars);
1989 if (err < 0)
1990 return err;
1991 pci_fixup_device(pci_fixup_enable, dev);
1992
1993 if (dev->msi_enabled || dev->msix_enabled)
1994 return 0;
1995
1996 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1997 if (pin) {
1998 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1999 if (cmd & PCI_COMMAND_INTX_DISABLE)
2000 pci_write_config_word(dev, PCI_COMMAND,
2001 cmd & ~PCI_COMMAND_INTX_DISABLE);
2002 }
2003
2004 return 0;
2005 }
2006
2007 /**
2008 * pci_reenable_device - Resume abandoned device
2009 * @dev: PCI device to be resumed
2010 *
2011 * NOTE: This function is a backend of pci_default_resume() and is not supposed
2012 * to be called by normal code, write proper resume handler and use it instead.
2013 */
pci_reenable_device(struct pci_dev * dev)2014 int pci_reenable_device(struct pci_dev *dev)
2015 {
2016 if (pci_is_enabled(dev))
2017 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2018 return 0;
2019 }
2020 EXPORT_SYMBOL(pci_reenable_device);
2021
pci_enable_bridge(struct pci_dev * dev)2022 static void pci_enable_bridge(struct pci_dev *dev)
2023 {
2024 struct pci_dev *bridge;
2025 int retval;
2026
2027 bridge = pci_upstream_bridge(dev);
2028 if (bridge)
2029 pci_enable_bridge(bridge);
2030
2031 if (pci_is_enabled(dev)) {
2032 if (!dev->is_busmaster)
2033 pci_set_master(dev);
2034 return;
2035 }
2036
2037 retval = pci_enable_device(dev);
2038 if (retval)
2039 pci_err(dev, "Error enabling bridge (%d), continuing\n",
2040 retval);
2041 pci_set_master(dev);
2042 }
2043
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)2044 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2045 {
2046 struct pci_dev *bridge;
2047 int err;
2048 int i, bars = 0;
2049
2050 /*
2051 * Power state could be unknown at this point, either due to a fresh
2052 * boot or a device removal call. So get the current power state
2053 * so that things like MSI message writing will behave as expected
2054 * (e.g. if the device really is in D0 at enable time).
2055 */
2056 pci_update_current_state(dev, dev->current_state);
2057
2058 if (atomic_inc_return(&dev->enable_cnt) > 1)
2059 return 0; /* already enabled */
2060
2061 bridge = pci_upstream_bridge(dev);
2062 if (bridge)
2063 pci_enable_bridge(bridge);
2064
2065 /* only skip sriov related */
2066 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2067 if (dev->resource[i].flags & flags)
2068 bars |= (1 << i);
2069 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2070 if (dev->resource[i].flags & flags)
2071 bars |= (1 << i);
2072
2073 err = do_pci_enable_device(dev, bars);
2074 if (err < 0)
2075 atomic_dec(&dev->enable_cnt);
2076 return err;
2077 }
2078
2079 /**
2080 * pci_enable_device_io - Initialize a device for use with IO space
2081 * @dev: PCI device to be initialized
2082 *
2083 * Initialize device before it's used by a driver. Ask low-level code
2084 * to enable I/O resources. Wake up the device if it was suspended.
2085 * Beware, this function can fail.
2086 */
pci_enable_device_io(struct pci_dev * dev)2087 int pci_enable_device_io(struct pci_dev *dev)
2088 {
2089 return pci_enable_device_flags(dev, IORESOURCE_IO);
2090 }
2091 EXPORT_SYMBOL(pci_enable_device_io);
2092
2093 /**
2094 * pci_enable_device_mem - Initialize a device for use with Memory space
2095 * @dev: PCI device to be initialized
2096 *
2097 * Initialize device before it's used by a driver. Ask low-level code
2098 * to enable Memory resources. Wake up the device if it was suspended.
2099 * Beware, this function can fail.
2100 */
pci_enable_device_mem(struct pci_dev * dev)2101 int pci_enable_device_mem(struct pci_dev *dev)
2102 {
2103 return pci_enable_device_flags(dev, IORESOURCE_MEM);
2104 }
2105 EXPORT_SYMBOL(pci_enable_device_mem);
2106
2107 /**
2108 * pci_enable_device - Initialize device before it's used by a driver.
2109 * @dev: PCI device to be initialized
2110 *
2111 * Initialize device before it's used by a driver. Ask low-level code
2112 * to enable I/O and memory. Wake up the device if it was suspended.
2113 * Beware, this function can fail.
2114 *
2115 * Note we don't actually enable the device many times if we call
2116 * this function repeatedly (we just increment the count).
2117 */
pci_enable_device(struct pci_dev * dev)2118 int pci_enable_device(struct pci_dev *dev)
2119 {
2120 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2121 }
2122 EXPORT_SYMBOL(pci_enable_device);
2123
2124 /*
2125 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
2126 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
2127 * there's no need to track it separately. pci_devres is initialized
2128 * when a device is enabled using managed PCI device enable interface.
2129 */
2130 struct pci_devres {
2131 unsigned int enabled:1;
2132 unsigned int pinned:1;
2133 unsigned int orig_intx:1;
2134 unsigned int restore_intx:1;
2135 unsigned int mwi:1;
2136 u32 region_mask;
2137 };
2138
pcim_release(struct device * gendev,void * res)2139 static void pcim_release(struct device *gendev, void *res)
2140 {
2141 struct pci_dev *dev = to_pci_dev(gendev);
2142 struct pci_devres *this = res;
2143 int i;
2144
2145 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2146 if (this->region_mask & (1 << i))
2147 pci_release_region(dev, i);
2148
2149 if (this->mwi)
2150 pci_clear_mwi(dev);
2151
2152 if (this->restore_intx)
2153 pci_intx(dev, this->orig_intx);
2154
2155 if (this->enabled && !this->pinned)
2156 pci_disable_device(dev);
2157 }
2158
get_pci_dr(struct pci_dev * pdev)2159 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2160 {
2161 struct pci_devres *dr, *new_dr;
2162
2163 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2164 if (dr)
2165 return dr;
2166
2167 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2168 if (!new_dr)
2169 return NULL;
2170 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2171 }
2172
find_pci_dr(struct pci_dev * pdev)2173 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2174 {
2175 if (pci_is_managed(pdev))
2176 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2177 return NULL;
2178 }
2179
2180 /**
2181 * pcim_enable_device - Managed pci_enable_device()
2182 * @pdev: PCI device to be initialized
2183 *
2184 * Managed pci_enable_device().
2185 */
pcim_enable_device(struct pci_dev * pdev)2186 int pcim_enable_device(struct pci_dev *pdev)
2187 {
2188 struct pci_devres *dr;
2189 int rc;
2190
2191 dr = get_pci_dr(pdev);
2192 if (unlikely(!dr))
2193 return -ENOMEM;
2194 if (dr->enabled)
2195 return 0;
2196
2197 rc = pci_enable_device(pdev);
2198 if (!rc) {
2199 pdev->is_managed = 1;
2200 dr->enabled = 1;
2201 }
2202 return rc;
2203 }
2204 EXPORT_SYMBOL(pcim_enable_device);
2205
2206 /**
2207 * pcim_pin_device - Pin managed PCI device
2208 * @pdev: PCI device to pin
2209 *
2210 * Pin managed PCI device @pdev. Pinned device won't be disabled on
2211 * driver detach. @pdev must have been enabled with
2212 * pcim_enable_device().
2213 */
pcim_pin_device(struct pci_dev * pdev)2214 void pcim_pin_device(struct pci_dev *pdev)
2215 {
2216 struct pci_devres *dr;
2217
2218 dr = find_pci_dr(pdev);
2219 WARN_ON(!dr || !dr->enabled);
2220 if (dr)
2221 dr->pinned = 1;
2222 }
2223 EXPORT_SYMBOL(pcim_pin_device);
2224
2225 /*
2226 * pcibios_device_add - provide arch specific hooks when adding device dev
2227 * @dev: the PCI device being added
2228 *
2229 * Permits the platform to provide architecture specific functionality when
2230 * devices are added. This is the default implementation. Architecture
2231 * implementations can override this.
2232 */
pcibios_device_add(struct pci_dev * dev)2233 int __weak pcibios_device_add(struct pci_dev *dev)
2234 {
2235 return 0;
2236 }
2237
2238 /**
2239 * pcibios_release_device - provide arch specific hooks when releasing
2240 * device dev
2241 * @dev: the PCI device being released
2242 *
2243 * Permits the platform to provide architecture specific functionality when
2244 * devices are released. This is the default implementation. Architecture
2245 * implementations can override this.
2246 */
pcibios_release_device(struct pci_dev * dev)2247 void __weak pcibios_release_device(struct pci_dev *dev) {}
2248
2249 /**
2250 * pcibios_disable_device - disable arch specific PCI resources for device dev
2251 * @dev: the PCI device to disable
2252 *
2253 * Disables architecture specific PCI resources for the device. This
2254 * is the default implementation. Architecture implementations can
2255 * override this.
2256 */
pcibios_disable_device(struct pci_dev * dev)2257 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2258
2259 /**
2260 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2261 * @irq: ISA IRQ to penalize
2262 * @active: IRQ active or not
2263 *
2264 * Permits the platform to provide architecture-specific functionality when
2265 * penalizing ISA IRQs. This is the default implementation. Architecture
2266 * implementations can override this.
2267 */
pcibios_penalize_isa_irq(int irq,int active)2268 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2269
do_pci_disable_device(struct pci_dev * dev)2270 static void do_pci_disable_device(struct pci_dev *dev)
2271 {
2272 u16 pci_command;
2273
2274 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2275 if (pci_command & PCI_COMMAND_MASTER) {
2276 pci_command &= ~PCI_COMMAND_MASTER;
2277 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2278 }
2279
2280 pcibios_disable_device(dev);
2281 }
2282
2283 /**
2284 * pci_disable_enabled_device - Disable device without updating enable_cnt
2285 * @dev: PCI device to disable
2286 *
2287 * NOTE: This function is a backend of PCI power management routines and is
2288 * not supposed to be called drivers.
2289 */
pci_disable_enabled_device(struct pci_dev * dev)2290 void pci_disable_enabled_device(struct pci_dev *dev)
2291 {
2292 if (pci_is_enabled(dev))
2293 do_pci_disable_device(dev);
2294 }
2295
2296 /**
2297 * pci_disable_device - Disable PCI device after use
2298 * @dev: PCI device to be disabled
2299 *
2300 * Signal to the system that the PCI device is not in use by the system
2301 * anymore. This only involves disabling PCI bus-mastering, if active.
2302 *
2303 * Note we don't actually disable the device until all callers of
2304 * pci_enable_device() have called pci_disable_device().
2305 */
pci_disable_device(struct pci_dev * dev)2306 void pci_disable_device(struct pci_dev *dev)
2307 {
2308 struct pci_devres *dr;
2309
2310 dr = find_pci_dr(dev);
2311 if (dr)
2312 dr->enabled = 0;
2313
2314 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2315 "disabling already-disabled device");
2316
2317 if (atomic_dec_return(&dev->enable_cnt) != 0)
2318 return;
2319
2320 do_pci_disable_device(dev);
2321
2322 dev->is_busmaster = 0;
2323 }
2324 EXPORT_SYMBOL(pci_disable_device);
2325
2326 /**
2327 * pcibios_set_pcie_reset_state - set reset state for device dev
2328 * @dev: the PCIe device reset
2329 * @state: Reset state to enter into
2330 *
2331 * Set the PCIe reset state for the device. This is the default
2332 * implementation. Architecture implementations can override this.
2333 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2334 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2335 enum pcie_reset_state state)
2336 {
2337 return -EINVAL;
2338 }
2339
2340 /**
2341 * pci_set_pcie_reset_state - set reset state for device dev
2342 * @dev: the PCIe device reset
2343 * @state: Reset state to enter into
2344 *
2345 * Sets the PCI reset state for the device.
2346 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2347 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2348 {
2349 return pcibios_set_pcie_reset_state(dev, state);
2350 }
2351 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2352
2353 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2354 void pcie_clear_device_status(struct pci_dev *dev)
2355 {
2356 u16 sta;
2357
2358 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2359 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2360 }
2361 #endif
2362
2363 /**
2364 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2365 * @dev: PCIe root port or event collector.
2366 */
pcie_clear_root_pme_status(struct pci_dev * dev)2367 void pcie_clear_root_pme_status(struct pci_dev *dev)
2368 {
2369 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2370 }
2371
2372 /**
2373 * pci_check_pme_status - Check if given device has generated PME.
2374 * @dev: Device to check.
2375 *
2376 * Check the PME status of the device and if set, clear it and clear PME enable
2377 * (if set). Return 'true' if PME status and PME enable were both set or
2378 * 'false' otherwise.
2379 */
pci_check_pme_status(struct pci_dev * dev)2380 bool pci_check_pme_status(struct pci_dev *dev)
2381 {
2382 int pmcsr_pos;
2383 u16 pmcsr;
2384 bool ret = false;
2385
2386 if (!dev->pm_cap)
2387 return false;
2388
2389 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2390 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2391 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2392 return false;
2393
2394 /* Clear PME status. */
2395 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2396 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2397 /* Disable PME to avoid interrupt flood. */
2398 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2399 ret = true;
2400 }
2401
2402 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2403
2404 return ret;
2405 }
2406
2407 /**
2408 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2409 * @dev: Device to handle.
2410 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2411 *
2412 * Check if @dev has generated PME and queue a resume request for it in that
2413 * case.
2414 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2415 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2416 {
2417 if (pme_poll_reset && dev->pme_poll)
2418 dev->pme_poll = false;
2419
2420 if (pci_check_pme_status(dev)) {
2421 pci_wakeup_event(dev);
2422 pm_request_resume(&dev->dev);
2423 }
2424 return 0;
2425 }
2426
2427 /**
2428 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2429 * @bus: Top bus of the subtree to walk.
2430 */
pci_pme_wakeup_bus(struct pci_bus * bus)2431 void pci_pme_wakeup_bus(struct pci_bus *bus)
2432 {
2433 if (bus)
2434 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2435 }
2436
2437
2438 /**
2439 * pci_pme_capable - check the capability of PCI device to generate PME#
2440 * @dev: PCI device to handle.
2441 * @state: PCI state from which device will issue PME#.
2442 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2443 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2444 {
2445 if (!dev->pm_cap)
2446 return false;
2447
2448 return !!(dev->pme_support & (1 << state));
2449 }
2450 EXPORT_SYMBOL(pci_pme_capable);
2451
pci_pme_list_scan(struct work_struct * work)2452 static void pci_pme_list_scan(struct work_struct *work)
2453 {
2454 struct pci_pme_device *pme_dev, *n;
2455
2456 mutex_lock(&pci_pme_list_mutex);
2457 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2458 struct pci_dev *pdev = pme_dev->dev;
2459
2460 if (pdev->pme_poll) {
2461 struct pci_dev *bridge = pdev->bus->self;
2462 struct device *dev = &pdev->dev;
2463 struct device *bdev = bridge ? &bridge->dev : NULL;
2464 int bref = 0;
2465
2466 /*
2467 * If we have a bridge, it should be in an active/D0
2468 * state or the configuration space of subordinate
2469 * devices may not be accessible or stable over the
2470 * course of the call.
2471 */
2472 if (bdev) {
2473 bref = pm_runtime_get_if_active(bdev, true);
2474 if (!bref)
2475 continue;
2476
2477 if (bridge->current_state != PCI_D0)
2478 goto put_bridge;
2479 }
2480
2481 /*
2482 * The device itself should be suspended but config
2483 * space must be accessible, therefore it cannot be in
2484 * D3cold.
2485 */
2486 if (pm_runtime_suspended(dev) &&
2487 pdev->current_state != PCI_D3cold)
2488 pci_pme_wakeup(pdev, NULL);
2489
2490 put_bridge:
2491 if (bref > 0)
2492 pm_runtime_put(bdev);
2493 } else {
2494 list_del(&pme_dev->list);
2495 kfree(pme_dev);
2496 }
2497 }
2498 if (!list_empty(&pci_pme_list))
2499 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2500 msecs_to_jiffies(PME_TIMEOUT));
2501 mutex_unlock(&pci_pme_list_mutex);
2502 }
2503
__pci_pme_active(struct pci_dev * dev,bool enable)2504 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2505 {
2506 u16 pmcsr;
2507
2508 if (!dev->pme_support)
2509 return;
2510
2511 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2512 /* Clear PME_Status by writing 1 to it and enable PME# */
2513 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2514 if (!enable)
2515 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2516
2517 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2518 }
2519
2520 /**
2521 * pci_pme_restore - Restore PME configuration after config space restore.
2522 * @dev: PCI device to update.
2523 */
pci_pme_restore(struct pci_dev * dev)2524 void pci_pme_restore(struct pci_dev *dev)
2525 {
2526 u16 pmcsr;
2527
2528 if (!dev->pme_support)
2529 return;
2530
2531 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2532 if (dev->wakeup_prepared) {
2533 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2534 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2535 } else {
2536 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2537 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2538 }
2539 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2540 }
2541
2542 /**
2543 * pci_pme_active - enable or disable PCI device's PME# function
2544 * @dev: PCI device to handle.
2545 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2546 *
2547 * The caller must verify that the device is capable of generating PME# before
2548 * calling this function with @enable equal to 'true'.
2549 */
pci_pme_active(struct pci_dev * dev,bool enable)2550 void pci_pme_active(struct pci_dev *dev, bool enable)
2551 {
2552 __pci_pme_active(dev, enable);
2553
2554 /*
2555 * PCI (as opposed to PCIe) PME requires that the device have
2556 * its PME# line hooked up correctly. Not all hardware vendors
2557 * do this, so the PME never gets delivered and the device
2558 * remains asleep. The easiest way around this is to
2559 * periodically walk the list of suspended devices and check
2560 * whether any have their PME flag set. The assumption is that
2561 * we'll wake up often enough anyway that this won't be a huge
2562 * hit, and the power savings from the devices will still be a
2563 * win.
2564 *
2565 * Although PCIe uses in-band PME message instead of PME# line
2566 * to report PME, PME does not work for some PCIe devices in
2567 * reality. For example, there are devices that set their PME
2568 * status bits, but don't really bother to send a PME message;
2569 * there are PCI Express Root Ports that don't bother to
2570 * trigger interrupts when they receive PME messages from the
2571 * devices below. So PME poll is used for PCIe devices too.
2572 */
2573
2574 if (dev->pme_poll) {
2575 struct pci_pme_device *pme_dev;
2576 if (enable) {
2577 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2578 GFP_KERNEL);
2579 if (!pme_dev) {
2580 pci_warn(dev, "can't enable PME#\n");
2581 return;
2582 }
2583 pme_dev->dev = dev;
2584 mutex_lock(&pci_pme_list_mutex);
2585 list_add(&pme_dev->list, &pci_pme_list);
2586 if (list_is_singular(&pci_pme_list))
2587 queue_delayed_work(system_freezable_wq,
2588 &pci_pme_work,
2589 msecs_to_jiffies(PME_TIMEOUT));
2590 mutex_unlock(&pci_pme_list_mutex);
2591 } else {
2592 mutex_lock(&pci_pme_list_mutex);
2593 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2594 if (pme_dev->dev == dev) {
2595 list_del(&pme_dev->list);
2596 kfree(pme_dev);
2597 break;
2598 }
2599 }
2600 mutex_unlock(&pci_pme_list_mutex);
2601 }
2602 }
2603
2604 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2605 }
2606 EXPORT_SYMBOL(pci_pme_active);
2607
2608 /**
2609 * __pci_enable_wake - enable PCI device as wakeup event source
2610 * @dev: PCI device affected
2611 * @state: PCI state from which device will issue wakeup events
2612 * @enable: True to enable event generation; false to disable
2613 *
2614 * This enables the device as a wakeup event source, or disables it.
2615 * When such events involves platform-specific hooks, those hooks are
2616 * called automatically by this routine.
2617 *
2618 * Devices with legacy power management (no standard PCI PM capabilities)
2619 * always require such platform hooks.
2620 *
2621 * RETURN VALUE:
2622 * 0 is returned on success
2623 * -EINVAL is returned if device is not supposed to wake up the system
2624 * Error code depending on the platform is returned if both the platform and
2625 * the native mechanism fail to enable the generation of wake-up events
2626 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2627 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2628 {
2629 int ret = 0;
2630
2631 /*
2632 * Bridges that are not power-manageable directly only signal
2633 * wakeup on behalf of subordinate devices which is set up
2634 * elsewhere, so skip them. However, bridges that are
2635 * power-manageable may signal wakeup for themselves (for example,
2636 * on a hotplug event) and they need to be covered here.
2637 */
2638 if (!pci_power_manageable(dev))
2639 return 0;
2640
2641 /* Don't do the same thing twice in a row for one device. */
2642 if (!!enable == !!dev->wakeup_prepared)
2643 return 0;
2644
2645 /*
2646 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2647 * Anderson we should be doing PME# wake enable followed by ACPI wake
2648 * enable. To disable wake-up we call the platform first, for symmetry.
2649 */
2650
2651 if (enable) {
2652 int error;
2653
2654 /*
2655 * Enable PME signaling if the device can signal PME from
2656 * D3cold regardless of whether or not it can signal PME from
2657 * the current target state, because that will allow it to
2658 * signal PME when the hierarchy above it goes into D3cold and
2659 * the device itself ends up in D3cold as a result of that.
2660 */
2661 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2662 pci_pme_active(dev, true);
2663 else
2664 ret = 1;
2665 error = platform_pci_set_wakeup(dev, true);
2666 if (ret)
2667 ret = error;
2668 if (!ret)
2669 dev->wakeup_prepared = true;
2670 } else {
2671 platform_pci_set_wakeup(dev, false);
2672 pci_pme_active(dev, false);
2673 dev->wakeup_prepared = false;
2674 }
2675
2676 return ret;
2677 }
2678
2679 /**
2680 * pci_enable_wake - change wakeup settings for a PCI device
2681 * @pci_dev: Target device
2682 * @state: PCI state from which device will issue wakeup events
2683 * @enable: Whether or not to enable event generation
2684 *
2685 * If @enable is set, check device_may_wakeup() for the device before calling
2686 * __pci_enable_wake() for it.
2687 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2688 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2689 {
2690 if (enable && !device_may_wakeup(&pci_dev->dev))
2691 return -EINVAL;
2692
2693 return __pci_enable_wake(pci_dev, state, enable);
2694 }
2695 EXPORT_SYMBOL(pci_enable_wake);
2696
2697 /**
2698 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2699 * @dev: PCI device to prepare
2700 * @enable: True to enable wake-up event generation; false to disable
2701 *
2702 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2703 * and this function allows them to set that up cleanly - pci_enable_wake()
2704 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2705 * ordering constraints.
2706 *
2707 * This function only returns error code if the device is not allowed to wake
2708 * up the system from sleep or it is not capable of generating PME# from both
2709 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2710 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2711 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2712 {
2713 return pci_pme_capable(dev, PCI_D3cold) ?
2714 pci_enable_wake(dev, PCI_D3cold, enable) :
2715 pci_enable_wake(dev, PCI_D3hot, enable);
2716 }
2717 EXPORT_SYMBOL(pci_wake_from_d3);
2718
2719 /**
2720 * pci_target_state - find an appropriate low power state for a given PCI dev
2721 * @dev: PCI device
2722 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2723 *
2724 * Use underlying platform code to find a supported low power state for @dev.
2725 * If the platform can't manage @dev, return the deepest state from which it
2726 * can generate wake events, based on any available PME info.
2727 */
pci_target_state(struct pci_dev * dev,bool wakeup)2728 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2729 {
2730 if (platform_pci_power_manageable(dev)) {
2731 /*
2732 * Call the platform to find the target state for the device.
2733 */
2734 pci_power_t state = platform_pci_choose_state(dev);
2735
2736 switch (state) {
2737 case PCI_POWER_ERROR:
2738 case PCI_UNKNOWN:
2739 return PCI_D3hot;
2740
2741 case PCI_D1:
2742 case PCI_D2:
2743 if (pci_no_d1d2(dev))
2744 return PCI_D3hot;
2745 }
2746
2747 return state;
2748 }
2749
2750 /*
2751 * If the device is in D3cold even though it's not power-manageable by
2752 * the platform, it may have been powered down by non-standard means.
2753 * Best to let it slumber.
2754 */
2755 if (dev->current_state == PCI_D3cold)
2756 return PCI_D3cold;
2757 else if (!dev->pm_cap)
2758 return PCI_D0;
2759
2760 if (wakeup && dev->pme_support) {
2761 pci_power_t state = PCI_D3hot;
2762
2763 /*
2764 * Find the deepest state from which the device can generate
2765 * PME#.
2766 */
2767 while (state && !(dev->pme_support & (1 << state)))
2768 state--;
2769
2770 if (state)
2771 return state;
2772 else if (dev->pme_support & 1)
2773 return PCI_D0;
2774 }
2775
2776 return PCI_D3hot;
2777 }
2778
2779 /**
2780 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2781 * into a sleep state
2782 * @dev: Device to handle.
2783 *
2784 * Choose the power state appropriate for the device depending on whether
2785 * it can wake up the system and/or is power manageable by the platform
2786 * (PCI_D3hot is the default) and put the device into that state.
2787 */
pci_prepare_to_sleep(struct pci_dev * dev)2788 int pci_prepare_to_sleep(struct pci_dev *dev)
2789 {
2790 bool wakeup = device_may_wakeup(&dev->dev);
2791 pci_power_t target_state = pci_target_state(dev, wakeup);
2792 int error;
2793
2794 if (target_state == PCI_POWER_ERROR)
2795 return -EIO;
2796
2797 pci_enable_wake(dev, target_state, wakeup);
2798
2799 error = pci_set_power_state(dev, target_state);
2800
2801 if (error)
2802 pci_enable_wake(dev, target_state, false);
2803
2804 return error;
2805 }
2806 EXPORT_SYMBOL(pci_prepare_to_sleep);
2807
2808 /**
2809 * pci_back_from_sleep - turn PCI device on during system-wide transition
2810 * into working state
2811 * @dev: Device to handle.
2812 *
2813 * Disable device's system wake-up capability and put it into D0.
2814 */
pci_back_from_sleep(struct pci_dev * dev)2815 int pci_back_from_sleep(struct pci_dev *dev)
2816 {
2817 int ret = pci_set_power_state(dev, PCI_D0);
2818
2819 if (ret)
2820 return ret;
2821
2822 pci_enable_wake(dev, PCI_D0, false);
2823 return 0;
2824 }
2825 EXPORT_SYMBOL(pci_back_from_sleep);
2826
2827 /**
2828 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2829 * @dev: PCI device being suspended.
2830 *
2831 * Prepare @dev to generate wake-up events at run time and put it into a low
2832 * power state.
2833 */
pci_finish_runtime_suspend(struct pci_dev * dev)2834 int pci_finish_runtime_suspend(struct pci_dev *dev)
2835 {
2836 pci_power_t target_state;
2837 int error;
2838
2839 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2840 if (target_state == PCI_POWER_ERROR)
2841 return -EIO;
2842
2843 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2844
2845 error = pci_set_power_state(dev, target_state);
2846
2847 if (error)
2848 pci_enable_wake(dev, target_state, false);
2849
2850 return error;
2851 }
2852
2853 /**
2854 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2855 * @dev: Device to check.
2856 *
2857 * Return true if the device itself is capable of generating wake-up events
2858 * (through the platform or using the native PCIe PME) or if the device supports
2859 * PME and one of its upstream bridges can generate wake-up events.
2860 */
pci_dev_run_wake(struct pci_dev * dev)2861 bool pci_dev_run_wake(struct pci_dev *dev)
2862 {
2863 struct pci_bus *bus = dev->bus;
2864
2865 if (!dev->pme_support)
2866 return false;
2867
2868 /* PME-capable in principle, but not from the target power state */
2869 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2870 return false;
2871
2872 if (device_can_wakeup(&dev->dev))
2873 return true;
2874
2875 while (bus->parent) {
2876 struct pci_dev *bridge = bus->self;
2877
2878 if (device_can_wakeup(&bridge->dev))
2879 return true;
2880
2881 bus = bus->parent;
2882 }
2883
2884 /* We have reached the root bus. */
2885 if (bus->bridge)
2886 return device_can_wakeup(bus->bridge);
2887
2888 return false;
2889 }
2890 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2891
2892 /**
2893 * pci_dev_need_resume - Check if it is necessary to resume the device.
2894 * @pci_dev: Device to check.
2895 *
2896 * Return 'true' if the device is not runtime-suspended or it has to be
2897 * reconfigured due to wakeup settings difference between system and runtime
2898 * suspend, or the current power state of it is not suitable for the upcoming
2899 * (system-wide) transition.
2900 */
pci_dev_need_resume(struct pci_dev * pci_dev)2901 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2902 {
2903 struct device *dev = &pci_dev->dev;
2904 pci_power_t target_state;
2905
2906 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2907 return true;
2908
2909 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2910
2911 /*
2912 * If the earlier platform check has not triggered, D3cold is just power
2913 * removal on top of D3hot, so no need to resume the device in that
2914 * case.
2915 */
2916 return target_state != pci_dev->current_state &&
2917 target_state != PCI_D3cold &&
2918 pci_dev->current_state != PCI_D3hot;
2919 }
2920
2921 /**
2922 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2923 * @pci_dev: Device to check.
2924 *
2925 * If the device is suspended and it is not configured for system wakeup,
2926 * disable PME for it to prevent it from waking up the system unnecessarily.
2927 *
2928 * Note that if the device's power state is D3cold and the platform check in
2929 * pci_dev_need_resume() has not triggered, the device's configuration need not
2930 * be changed.
2931 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2932 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2933 {
2934 struct device *dev = &pci_dev->dev;
2935
2936 spin_lock_irq(&dev->power.lock);
2937
2938 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2939 pci_dev->current_state < PCI_D3cold)
2940 __pci_pme_active(pci_dev, false);
2941
2942 spin_unlock_irq(&dev->power.lock);
2943 }
2944
2945 /**
2946 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2947 * @pci_dev: Device to handle.
2948 *
2949 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2950 * it might have been disabled during the prepare phase of system suspend if
2951 * the device was not configured for system wakeup.
2952 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2953 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2954 {
2955 struct device *dev = &pci_dev->dev;
2956
2957 if (!pci_dev_run_wake(pci_dev))
2958 return;
2959
2960 spin_lock_irq(&dev->power.lock);
2961
2962 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2963 __pci_pme_active(pci_dev, true);
2964
2965 spin_unlock_irq(&dev->power.lock);
2966 }
2967
2968 /**
2969 * pci_choose_state - Choose the power state of a PCI device.
2970 * @dev: Target PCI device.
2971 * @state: Target state for the whole system.
2972 *
2973 * Returns PCI power state suitable for @dev and @state.
2974 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2975 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2976 {
2977 if (state.event == PM_EVENT_ON)
2978 return PCI_D0;
2979
2980 return pci_target_state(dev, false);
2981 }
2982 EXPORT_SYMBOL(pci_choose_state);
2983
pci_config_pm_runtime_get(struct pci_dev * pdev)2984 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2985 {
2986 struct device *dev = &pdev->dev;
2987 struct device *parent = dev->parent;
2988
2989 if (parent)
2990 pm_runtime_get_sync(parent);
2991 pm_runtime_get_noresume(dev);
2992 /*
2993 * pdev->current_state is set to PCI_D3cold during suspending,
2994 * so wait until suspending completes
2995 */
2996 pm_runtime_barrier(dev);
2997 /*
2998 * Only need to resume devices in D3cold, because config
2999 * registers are still accessible for devices suspended but
3000 * not in D3cold.
3001 */
3002 if (pdev->current_state == PCI_D3cold)
3003 pm_runtime_resume(dev);
3004 }
3005
pci_config_pm_runtime_put(struct pci_dev * pdev)3006 void pci_config_pm_runtime_put(struct pci_dev *pdev)
3007 {
3008 struct device *dev = &pdev->dev;
3009 struct device *parent = dev->parent;
3010
3011 pm_runtime_put(dev);
3012 if (parent)
3013 pm_runtime_put_sync(parent);
3014 }
3015
3016 static const struct dmi_system_id bridge_d3_blacklist[] = {
3017 #ifdef CONFIG_X86
3018 {
3019 /*
3020 * Gigabyte X299 root port is not marked as hotplug capable
3021 * which allows Linux to power manage it. However, this
3022 * confuses the BIOS SMI handler so don't power manage root
3023 * ports on that system.
3024 */
3025 .ident = "X299 DESIGNARE EX-CF",
3026 .matches = {
3027 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
3028 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
3029 },
3030 },
3031 {
3032 /*
3033 * Downstream device is not accessible after putting a root port
3034 * into D3cold and back into D0 on Elo Continental Z2 board
3035 */
3036 .ident = "Elo Continental Z2",
3037 .matches = {
3038 DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
3039 DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
3040 DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
3041 },
3042 },
3043 #endif
3044 { }
3045 };
3046
3047 /**
3048 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3049 * @bridge: Bridge to check
3050 *
3051 * This function checks if it is possible to move the bridge to D3.
3052 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
3053 */
pci_bridge_d3_possible(struct pci_dev * bridge)3054 bool pci_bridge_d3_possible(struct pci_dev *bridge)
3055 {
3056 if (!pci_is_pcie(bridge))
3057 return false;
3058
3059 switch (pci_pcie_type(bridge)) {
3060 case PCI_EXP_TYPE_ROOT_PORT:
3061 case PCI_EXP_TYPE_UPSTREAM:
3062 case PCI_EXP_TYPE_DOWNSTREAM:
3063 if (pci_bridge_d3_disable)
3064 return false;
3065
3066 /*
3067 * Hotplug ports handled by firmware in System Management Mode
3068 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3069 */
3070 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3071 return false;
3072
3073 if (pci_bridge_d3_force)
3074 return true;
3075
3076 /* Even the oldest 2010 Thunderbolt controller supports D3. */
3077 if (bridge->is_thunderbolt)
3078 return true;
3079
3080 /* Platform might know better if the bridge supports D3 */
3081 if (platform_pci_bridge_d3(bridge))
3082 return true;
3083
3084 /*
3085 * Hotplug ports handled natively by the OS were not validated
3086 * by vendors for runtime D3 at least until 2018 because there
3087 * was no OS support.
3088 */
3089 if (bridge->is_hotplug_bridge)
3090 return false;
3091
3092 if (dmi_check_system(bridge_d3_blacklist))
3093 return false;
3094
3095 /*
3096 * It should be safe to put PCIe ports from 2015 or newer
3097 * to D3.
3098 */
3099 if (dmi_get_bios_year() >= 2015)
3100 return true;
3101 break;
3102 }
3103
3104 return false;
3105 }
3106
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3107 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3108 {
3109 bool *d3cold_ok = data;
3110
3111 if (/* The device needs to be allowed to go D3cold ... */
3112 dev->no_d3cold || !dev->d3cold_allowed ||
3113
3114 /* ... and if it is wakeup capable to do so from D3cold. */
3115 (device_may_wakeup(&dev->dev) &&
3116 !pci_pme_capable(dev, PCI_D3cold)) ||
3117
3118 /* If it is a bridge it must be allowed to go to D3. */
3119 !pci_power_manageable(dev))
3120
3121 *d3cold_ok = false;
3122
3123 return !*d3cold_ok;
3124 }
3125
3126 /*
3127 * pci_bridge_d3_update - Update bridge D3 capabilities
3128 * @dev: PCI device which is changed
3129 *
3130 * Update upstream bridge PM capabilities accordingly depending on if the
3131 * device PM configuration was changed or the device is being removed. The
3132 * change is also propagated upstream.
3133 */
pci_bridge_d3_update(struct pci_dev * dev)3134 void pci_bridge_d3_update(struct pci_dev *dev)
3135 {
3136 bool remove = !device_is_registered(&dev->dev);
3137 struct pci_dev *bridge;
3138 bool d3cold_ok = true;
3139
3140 bridge = pci_upstream_bridge(dev);
3141 if (!bridge || !pci_bridge_d3_possible(bridge))
3142 return;
3143
3144 /*
3145 * If D3 is currently allowed for the bridge, removing one of its
3146 * children won't change that.
3147 */
3148 if (remove && bridge->bridge_d3)
3149 return;
3150
3151 /*
3152 * If D3 is currently allowed for the bridge and a child is added or
3153 * changed, disallowance of D3 can only be caused by that child, so
3154 * we only need to check that single device, not any of its siblings.
3155 *
3156 * If D3 is currently not allowed for the bridge, checking the device
3157 * first may allow us to skip checking its siblings.
3158 */
3159 if (!remove)
3160 pci_dev_check_d3cold(dev, &d3cold_ok);
3161
3162 /*
3163 * If D3 is currently not allowed for the bridge, this may be caused
3164 * either by the device being changed/removed or any of its siblings,
3165 * so we need to go through all children to find out if one of them
3166 * continues to block D3.
3167 */
3168 if (d3cold_ok && !bridge->bridge_d3)
3169 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3170 &d3cold_ok);
3171
3172 if (bridge->bridge_d3 != d3cold_ok) {
3173 bridge->bridge_d3 = d3cold_ok;
3174 /* Propagate change to upstream bridges */
3175 pci_bridge_d3_update(bridge);
3176 }
3177 }
3178
3179 /**
3180 * pci_d3cold_enable - Enable D3cold for device
3181 * @dev: PCI device to handle
3182 *
3183 * This function can be used in drivers to enable D3cold from the device
3184 * they handle. It also updates upstream PCI bridge PM capabilities
3185 * accordingly.
3186 */
pci_d3cold_enable(struct pci_dev * dev)3187 void pci_d3cold_enable(struct pci_dev *dev)
3188 {
3189 if (dev->no_d3cold) {
3190 dev->no_d3cold = false;
3191 pci_bridge_d3_update(dev);
3192 }
3193 }
3194 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3195
3196 /**
3197 * pci_d3cold_disable - Disable D3cold for device
3198 * @dev: PCI device to handle
3199 *
3200 * This function can be used in drivers to disable D3cold from the device
3201 * they handle. It also updates upstream PCI bridge PM capabilities
3202 * accordingly.
3203 */
pci_d3cold_disable(struct pci_dev * dev)3204 void pci_d3cold_disable(struct pci_dev *dev)
3205 {
3206 if (!dev->no_d3cold) {
3207 dev->no_d3cold = true;
3208 pci_bridge_d3_update(dev);
3209 }
3210 }
3211 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3212
3213 /**
3214 * pci_pm_init - Initialize PM functions of given PCI device
3215 * @dev: PCI device to handle.
3216 */
pci_pm_init(struct pci_dev * dev)3217 void pci_pm_init(struct pci_dev *dev)
3218 {
3219 int pm;
3220 u16 status;
3221 u16 pmc;
3222
3223 pm_runtime_forbid(&dev->dev);
3224 pm_runtime_set_active(&dev->dev);
3225 pm_runtime_enable(&dev->dev);
3226 device_enable_async_suspend(&dev->dev);
3227 dev->wakeup_prepared = false;
3228
3229 dev->pm_cap = 0;
3230 dev->pme_support = 0;
3231
3232 /* find PCI PM capability in list */
3233 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3234 if (!pm)
3235 return;
3236 /* Check device's ability to generate PME# */
3237 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3238
3239 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3240 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3241 pmc & PCI_PM_CAP_VER_MASK);
3242 return;
3243 }
3244
3245 dev->pm_cap = pm;
3246 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3247 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3248 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3249 dev->d3cold_allowed = true;
3250
3251 dev->d1_support = false;
3252 dev->d2_support = false;
3253 if (!pci_no_d1d2(dev)) {
3254 if (pmc & PCI_PM_CAP_D1)
3255 dev->d1_support = true;
3256 if (pmc & PCI_PM_CAP_D2)
3257 dev->d2_support = true;
3258
3259 if (dev->d1_support || dev->d2_support)
3260 pci_info(dev, "supports%s%s\n",
3261 dev->d1_support ? " D1" : "",
3262 dev->d2_support ? " D2" : "");
3263 }
3264
3265 pmc &= PCI_PM_CAP_PME_MASK;
3266 if (pmc) {
3267 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3268 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3269 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3270 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3271 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3272 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3273 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3274 dev->pme_poll = true;
3275 /*
3276 * Make device's PM flags reflect the wake-up capability, but
3277 * let the user space enable it to wake up the system as needed.
3278 */
3279 device_set_wakeup_capable(&dev->dev, true);
3280 /* Disable the PME# generation functionality */
3281 pci_pme_active(dev, false);
3282 }
3283
3284 pci_read_config_word(dev, PCI_STATUS, &status);
3285 if (status & PCI_STATUS_IMM_READY)
3286 dev->imm_ready = 1;
3287 }
3288
pci_ea_flags(struct pci_dev * dev,u8 prop)3289 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3290 {
3291 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3292
3293 switch (prop) {
3294 case PCI_EA_P_MEM:
3295 case PCI_EA_P_VF_MEM:
3296 flags |= IORESOURCE_MEM;
3297 break;
3298 case PCI_EA_P_MEM_PREFETCH:
3299 case PCI_EA_P_VF_MEM_PREFETCH:
3300 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3301 break;
3302 case PCI_EA_P_IO:
3303 flags |= IORESOURCE_IO;
3304 break;
3305 default:
3306 return 0;
3307 }
3308
3309 return flags;
3310 }
3311
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3312 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3313 u8 prop)
3314 {
3315 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3316 return &dev->resource[bei];
3317 #ifdef CONFIG_PCI_IOV
3318 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3319 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3320 return &dev->resource[PCI_IOV_RESOURCES +
3321 bei - PCI_EA_BEI_VF_BAR0];
3322 #endif
3323 else if (bei == PCI_EA_BEI_ROM)
3324 return &dev->resource[PCI_ROM_RESOURCE];
3325 else
3326 return NULL;
3327 }
3328
3329 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3330 static int pci_ea_read(struct pci_dev *dev, int offset)
3331 {
3332 struct resource *res;
3333 int ent_size, ent_offset = offset;
3334 resource_size_t start, end;
3335 unsigned long flags;
3336 u32 dw0, bei, base, max_offset;
3337 u8 prop;
3338 bool support_64 = (sizeof(resource_size_t) >= 8);
3339
3340 pci_read_config_dword(dev, ent_offset, &dw0);
3341 ent_offset += 4;
3342
3343 /* Entry size field indicates DWORDs after 1st */
3344 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3345
3346 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3347 goto out;
3348
3349 bei = (dw0 & PCI_EA_BEI) >> 4;
3350 prop = (dw0 & PCI_EA_PP) >> 8;
3351
3352 /*
3353 * If the Property is in the reserved range, try the Secondary
3354 * Property instead.
3355 */
3356 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3357 prop = (dw0 & PCI_EA_SP) >> 16;
3358 if (prop > PCI_EA_P_BRIDGE_IO)
3359 goto out;
3360
3361 res = pci_ea_get_resource(dev, bei, prop);
3362 if (!res) {
3363 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3364 goto out;
3365 }
3366
3367 flags = pci_ea_flags(dev, prop);
3368 if (!flags) {
3369 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3370 goto out;
3371 }
3372
3373 /* Read Base */
3374 pci_read_config_dword(dev, ent_offset, &base);
3375 start = (base & PCI_EA_FIELD_MASK);
3376 ent_offset += 4;
3377
3378 /* Read MaxOffset */
3379 pci_read_config_dword(dev, ent_offset, &max_offset);
3380 ent_offset += 4;
3381
3382 /* Read Base MSBs (if 64-bit entry) */
3383 if (base & PCI_EA_IS_64) {
3384 u32 base_upper;
3385
3386 pci_read_config_dword(dev, ent_offset, &base_upper);
3387 ent_offset += 4;
3388
3389 flags |= IORESOURCE_MEM_64;
3390
3391 /* entry starts above 32-bit boundary, can't use */
3392 if (!support_64 && base_upper)
3393 goto out;
3394
3395 if (support_64)
3396 start |= ((u64)base_upper << 32);
3397 }
3398
3399 end = start + (max_offset | 0x03);
3400
3401 /* Read MaxOffset MSBs (if 64-bit entry) */
3402 if (max_offset & PCI_EA_IS_64) {
3403 u32 max_offset_upper;
3404
3405 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3406 ent_offset += 4;
3407
3408 flags |= IORESOURCE_MEM_64;
3409
3410 /* entry too big, can't use */
3411 if (!support_64 && max_offset_upper)
3412 goto out;
3413
3414 if (support_64)
3415 end += ((u64)max_offset_upper << 32);
3416 }
3417
3418 if (end < start) {
3419 pci_err(dev, "EA Entry crosses address boundary\n");
3420 goto out;
3421 }
3422
3423 if (ent_size != ent_offset - offset) {
3424 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3425 ent_size, ent_offset - offset);
3426 goto out;
3427 }
3428
3429 res->name = pci_name(dev);
3430 res->start = start;
3431 res->end = end;
3432 res->flags = flags;
3433
3434 if (bei <= PCI_EA_BEI_BAR5)
3435 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3436 bei, res, prop);
3437 else if (bei == PCI_EA_BEI_ROM)
3438 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3439 res, prop);
3440 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3441 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3442 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3443 else
3444 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3445 bei, res, prop);
3446
3447 out:
3448 return offset + ent_size;
3449 }
3450
3451 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3452 void pci_ea_init(struct pci_dev *dev)
3453 {
3454 int ea;
3455 u8 num_ent;
3456 int offset;
3457 int i;
3458
3459 /* find PCI EA capability in list */
3460 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3461 if (!ea)
3462 return;
3463
3464 /* determine the number of entries */
3465 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3466 &num_ent);
3467 num_ent &= PCI_EA_NUM_ENT_MASK;
3468
3469 offset = ea + PCI_EA_FIRST_ENT;
3470
3471 /* Skip DWORD 2 for type 1 functions */
3472 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3473 offset += 4;
3474
3475 /* parse each EA entry */
3476 for (i = 0; i < num_ent; ++i)
3477 offset = pci_ea_read(dev, offset);
3478 }
3479
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3480 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3481 struct pci_cap_saved_state *new_cap)
3482 {
3483 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3484 }
3485
3486 /**
3487 * _pci_add_cap_save_buffer - allocate buffer for saving given
3488 * capability registers
3489 * @dev: the PCI device
3490 * @cap: the capability to allocate the buffer for
3491 * @extended: Standard or Extended capability ID
3492 * @size: requested size of the buffer
3493 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3494 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3495 bool extended, unsigned int size)
3496 {
3497 int pos;
3498 struct pci_cap_saved_state *save_state;
3499
3500 if (extended)
3501 pos = pci_find_ext_capability(dev, cap);
3502 else
3503 pos = pci_find_capability(dev, cap);
3504
3505 if (!pos)
3506 return 0;
3507
3508 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3509 if (!save_state)
3510 return -ENOMEM;
3511
3512 save_state->cap.cap_nr = cap;
3513 save_state->cap.cap_extended = extended;
3514 save_state->cap.size = size;
3515 pci_add_saved_cap(dev, save_state);
3516
3517 return 0;
3518 }
3519
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3520 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3521 {
3522 return _pci_add_cap_save_buffer(dev, cap, false, size);
3523 }
3524
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3525 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3526 {
3527 return _pci_add_cap_save_buffer(dev, cap, true, size);
3528 }
3529
3530 /**
3531 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3532 * @dev: the PCI device
3533 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3534 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3535 {
3536 int error;
3537
3538 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3539 PCI_EXP_SAVE_REGS * sizeof(u16));
3540 if (error)
3541 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3542
3543 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3544 if (error)
3545 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3546
3547 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3548 2 * sizeof(u16));
3549 if (error)
3550 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3551
3552 pci_allocate_vc_save_buffers(dev);
3553 }
3554
pci_free_cap_save_buffers(struct pci_dev * dev)3555 void pci_free_cap_save_buffers(struct pci_dev *dev)
3556 {
3557 struct pci_cap_saved_state *tmp;
3558 struct hlist_node *n;
3559
3560 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3561 kfree(tmp);
3562 }
3563
3564 /**
3565 * pci_configure_ari - enable or disable ARI forwarding
3566 * @dev: the PCI device
3567 *
3568 * If @dev and its upstream bridge both support ARI, enable ARI in the
3569 * bridge. Otherwise, disable ARI in the bridge.
3570 */
pci_configure_ari(struct pci_dev * dev)3571 void pci_configure_ari(struct pci_dev *dev)
3572 {
3573 u32 cap;
3574 struct pci_dev *bridge;
3575
3576 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3577 return;
3578
3579 bridge = dev->bus->self;
3580 if (!bridge)
3581 return;
3582
3583 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3584 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3585 return;
3586
3587 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3588 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3589 PCI_EXP_DEVCTL2_ARI);
3590 bridge->ari_enabled = 1;
3591 } else {
3592 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3593 PCI_EXP_DEVCTL2_ARI);
3594 bridge->ari_enabled = 0;
3595 }
3596 }
3597
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3598 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3599 {
3600 int pos;
3601 u16 cap, ctrl;
3602
3603 pos = pdev->acs_cap;
3604 if (!pos)
3605 return false;
3606
3607 /*
3608 * Except for egress control, capabilities are either required
3609 * or only required if controllable. Features missing from the
3610 * capability field can therefore be assumed as hard-wired enabled.
3611 */
3612 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3613 acs_flags &= (cap | PCI_ACS_EC);
3614
3615 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3616 return (ctrl & acs_flags) == acs_flags;
3617 }
3618
3619 /**
3620 * pci_acs_enabled - test ACS against required flags for a given device
3621 * @pdev: device to test
3622 * @acs_flags: required PCI ACS flags
3623 *
3624 * Return true if the device supports the provided flags. Automatically
3625 * filters out flags that are not implemented on multifunction devices.
3626 *
3627 * Note that this interface checks the effective ACS capabilities of the
3628 * device rather than the actual capabilities. For instance, most single
3629 * function endpoints are not required to support ACS because they have no
3630 * opportunity for peer-to-peer access. We therefore return 'true'
3631 * regardless of whether the device exposes an ACS capability. This makes
3632 * it much easier for callers of this function to ignore the actual type
3633 * or topology of the device when testing ACS support.
3634 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3635 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3636 {
3637 int ret;
3638
3639 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3640 if (ret >= 0)
3641 return ret > 0;
3642
3643 /*
3644 * Conventional PCI and PCI-X devices never support ACS, either
3645 * effectively or actually. The shared bus topology implies that
3646 * any device on the bus can receive or snoop DMA.
3647 */
3648 if (!pci_is_pcie(pdev))
3649 return false;
3650
3651 switch (pci_pcie_type(pdev)) {
3652 /*
3653 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3654 * but since their primary interface is PCI/X, we conservatively
3655 * handle them as we would a non-PCIe device.
3656 */
3657 case PCI_EXP_TYPE_PCIE_BRIDGE:
3658 /*
3659 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3660 * applicable... must never implement an ACS Extended Capability...".
3661 * This seems arbitrary, but we take a conservative interpretation
3662 * of this statement.
3663 */
3664 case PCI_EXP_TYPE_PCI_BRIDGE:
3665 case PCI_EXP_TYPE_RC_EC:
3666 return false;
3667 /*
3668 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3669 * implement ACS in order to indicate their peer-to-peer capabilities,
3670 * regardless of whether they are single- or multi-function devices.
3671 */
3672 case PCI_EXP_TYPE_DOWNSTREAM:
3673 case PCI_EXP_TYPE_ROOT_PORT:
3674 return pci_acs_flags_enabled(pdev, acs_flags);
3675 /*
3676 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3677 * implemented by the remaining PCIe types to indicate peer-to-peer
3678 * capabilities, but only when they are part of a multifunction
3679 * device. The footnote for section 6.12 indicates the specific
3680 * PCIe types included here.
3681 */
3682 case PCI_EXP_TYPE_ENDPOINT:
3683 case PCI_EXP_TYPE_UPSTREAM:
3684 case PCI_EXP_TYPE_LEG_END:
3685 case PCI_EXP_TYPE_RC_END:
3686 if (!pdev->multifunction)
3687 break;
3688
3689 return pci_acs_flags_enabled(pdev, acs_flags);
3690 }
3691
3692 /*
3693 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3694 * to single function devices with the exception of downstream ports.
3695 */
3696 return true;
3697 }
3698
3699 /**
3700 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3701 * @start: starting downstream device
3702 * @end: ending upstream device or NULL to search to the root bus
3703 * @acs_flags: required flags
3704 *
3705 * Walk up a device tree from start to end testing PCI ACS support. If
3706 * any step along the way does not support the required flags, return false.
3707 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3708 bool pci_acs_path_enabled(struct pci_dev *start,
3709 struct pci_dev *end, u16 acs_flags)
3710 {
3711 struct pci_dev *pdev, *parent = start;
3712
3713 do {
3714 pdev = parent;
3715
3716 if (!pci_acs_enabled(pdev, acs_flags))
3717 return false;
3718
3719 if (pci_is_root_bus(pdev->bus))
3720 return (end == NULL);
3721
3722 parent = pdev->bus->self;
3723 } while (pdev != end);
3724
3725 return true;
3726 }
3727
3728 /**
3729 * pci_acs_init - Initialize ACS if hardware supports it
3730 * @dev: the PCI device
3731 */
pci_acs_init(struct pci_dev * dev)3732 void pci_acs_init(struct pci_dev *dev)
3733 {
3734 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3735
3736 /*
3737 * Attempt to enable ACS regardless of capability because some Root
3738 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3739 * the standard ACS capability but still support ACS via those
3740 * quirks.
3741 */
3742 pci_enable_acs(dev);
3743 }
3744
3745 /**
3746 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3747 * @pdev: PCI device
3748 * @bar: BAR to find
3749 *
3750 * Helper to find the position of the ctrl register for a BAR.
3751 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3752 * Returns -ENOENT if no ctrl register for the BAR could be found.
3753 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3754 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3755 {
3756 unsigned int pos, nbars, i;
3757 u32 ctrl;
3758
3759 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3760 if (!pos)
3761 return -ENOTSUPP;
3762
3763 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3764 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3765 PCI_REBAR_CTRL_NBAR_SHIFT;
3766
3767 for (i = 0; i < nbars; i++, pos += 8) {
3768 int bar_idx;
3769
3770 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3771 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3772 if (bar_idx == bar)
3773 return pos;
3774 }
3775
3776 return -ENOENT;
3777 }
3778
3779 /**
3780 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3781 * @pdev: PCI device
3782 * @bar: BAR to query
3783 *
3784 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3785 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3786 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3787 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3788 {
3789 int pos;
3790 u32 cap;
3791
3792 pos = pci_rebar_find_pos(pdev, bar);
3793 if (pos < 0)
3794 return 0;
3795
3796 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3797 cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3798
3799 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3800 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3801 bar == 0 && cap == 0x700)
3802 return 0x3f00;
3803
3804 return cap;
3805 }
3806 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3807
3808 /**
3809 * pci_rebar_get_current_size - get the current size of a BAR
3810 * @pdev: PCI device
3811 * @bar: BAR to set size to
3812 *
3813 * Read the size of a BAR from the resizable BAR config.
3814 * Returns size if found or negative error code.
3815 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3816 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3817 {
3818 int pos;
3819 u32 ctrl;
3820
3821 pos = pci_rebar_find_pos(pdev, bar);
3822 if (pos < 0)
3823 return pos;
3824
3825 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3826 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3827 }
3828
3829 /**
3830 * pci_rebar_set_size - set a new size for a BAR
3831 * @pdev: PCI device
3832 * @bar: BAR to set size to
3833 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3834 *
3835 * Set the new size of a BAR as defined in the spec.
3836 * Returns zero if resizing was successful, error code otherwise.
3837 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3838 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3839 {
3840 int pos;
3841 u32 ctrl;
3842
3843 pos = pci_rebar_find_pos(pdev, bar);
3844 if (pos < 0)
3845 return pos;
3846
3847 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3848 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3849 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3850 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3851 return 0;
3852 }
3853
3854 /**
3855 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3856 * @dev: the PCI device
3857 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3858 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3859 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3860 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3861 *
3862 * Return 0 if all upstream bridges support AtomicOp routing, egress
3863 * blocking is disabled on all upstream ports, and the root port supports
3864 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3865 * AtomicOp completion), or negative otherwise.
3866 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3867 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3868 {
3869 struct pci_bus *bus = dev->bus;
3870 struct pci_dev *bridge;
3871 u32 cap, ctl2;
3872
3873 /*
3874 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3875 * in Device Control 2 is reserved in VFs and the PF value applies
3876 * to all associated VFs.
3877 */
3878 if (dev->is_virtfn)
3879 return -EINVAL;
3880
3881 if (!pci_is_pcie(dev))
3882 return -EINVAL;
3883
3884 /*
3885 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3886 * AtomicOp requesters. For now, we only support endpoints as
3887 * requesters and root ports as completers. No endpoints as
3888 * completers, and no peer-to-peer.
3889 */
3890
3891 switch (pci_pcie_type(dev)) {
3892 case PCI_EXP_TYPE_ENDPOINT:
3893 case PCI_EXP_TYPE_LEG_END:
3894 case PCI_EXP_TYPE_RC_END:
3895 break;
3896 default:
3897 return -EINVAL;
3898 }
3899
3900 while (bus->parent) {
3901 bridge = bus->self;
3902
3903 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3904
3905 switch (pci_pcie_type(bridge)) {
3906 /* Ensure switch ports support AtomicOp routing */
3907 case PCI_EXP_TYPE_UPSTREAM:
3908 case PCI_EXP_TYPE_DOWNSTREAM:
3909 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3910 return -EINVAL;
3911 break;
3912
3913 /* Ensure root port supports all the sizes we care about */
3914 case PCI_EXP_TYPE_ROOT_PORT:
3915 if ((cap & cap_mask) != cap_mask)
3916 return -EINVAL;
3917 break;
3918 }
3919
3920 /* Ensure upstream ports don't block AtomicOps on egress */
3921 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3922 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3923 &ctl2);
3924 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3925 return -EINVAL;
3926 }
3927
3928 bus = bus->parent;
3929 }
3930
3931 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3932 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3933 return 0;
3934 }
3935 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3936
3937 /**
3938 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3939 * @dev: the PCI device
3940 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3941 *
3942 * Perform INTx swizzling for a device behind one level of bridge. This is
3943 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3944 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3945 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3946 * the PCI Express Base Specification, Revision 2.1)
3947 */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)3948 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3949 {
3950 int slot;
3951
3952 if (pci_ari_enabled(dev->bus))
3953 slot = 0;
3954 else
3955 slot = PCI_SLOT(dev->devfn);
3956
3957 return (((pin - 1) + slot) % 4) + 1;
3958 }
3959
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)3960 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3961 {
3962 u8 pin;
3963
3964 pin = dev->pin;
3965 if (!pin)
3966 return -1;
3967
3968 while (!pci_is_root_bus(dev->bus)) {
3969 pin = pci_swizzle_interrupt_pin(dev, pin);
3970 dev = dev->bus->self;
3971 }
3972 *bridge = dev;
3973 return pin;
3974 }
3975
3976 /**
3977 * pci_common_swizzle - swizzle INTx all the way to root bridge
3978 * @dev: the PCI device
3979 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3980 *
3981 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3982 * bridges all the way up to a PCI root bus.
3983 */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)3984 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3985 {
3986 u8 pin = *pinp;
3987
3988 while (!pci_is_root_bus(dev->bus)) {
3989 pin = pci_swizzle_interrupt_pin(dev, pin);
3990 dev = dev->bus->self;
3991 }
3992 *pinp = pin;
3993 return PCI_SLOT(dev->devfn);
3994 }
3995 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3996
3997 /**
3998 * pci_release_region - Release a PCI bar
3999 * @pdev: PCI device whose resources were previously reserved by
4000 * pci_request_region()
4001 * @bar: BAR to release
4002 *
4003 * Releases the PCI I/O and memory resources previously reserved by a
4004 * successful call to pci_request_region(). Call this function only
4005 * after all use of the PCI regions has ceased.
4006 */
pci_release_region(struct pci_dev * pdev,int bar)4007 void pci_release_region(struct pci_dev *pdev, int bar)
4008 {
4009 struct pci_devres *dr;
4010
4011 if (pci_resource_len(pdev, bar) == 0)
4012 return;
4013 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
4014 release_region(pci_resource_start(pdev, bar),
4015 pci_resource_len(pdev, bar));
4016 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
4017 release_mem_region(pci_resource_start(pdev, bar),
4018 pci_resource_len(pdev, bar));
4019
4020 dr = find_pci_dr(pdev);
4021 if (dr)
4022 dr->region_mask &= ~(1 << bar);
4023 }
4024 EXPORT_SYMBOL(pci_release_region);
4025
4026 /**
4027 * __pci_request_region - Reserved PCI I/O and memory resource
4028 * @pdev: PCI device whose resources are to be reserved
4029 * @bar: BAR to be reserved
4030 * @res_name: Name to be associated with resource.
4031 * @exclusive: whether the region access is exclusive or not
4032 *
4033 * Mark the PCI region associated with PCI device @pdev BAR @bar as
4034 * being reserved by owner @res_name. Do not access any
4035 * address inside the PCI regions unless this call returns
4036 * successfully.
4037 *
4038 * If @exclusive is set, then the region is marked so that userspace
4039 * is explicitly not allowed to map the resource via /dev/mem or
4040 * sysfs MMIO access.
4041 *
4042 * Returns 0 on success, or %EBUSY on error. A warning
4043 * message is also printed on failure.
4044 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)4045 static int __pci_request_region(struct pci_dev *pdev, int bar,
4046 const char *res_name, int exclusive)
4047 {
4048 struct pci_devres *dr;
4049
4050 if (pci_resource_len(pdev, bar) == 0)
4051 return 0;
4052
4053 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
4054 if (!request_region(pci_resource_start(pdev, bar),
4055 pci_resource_len(pdev, bar), res_name))
4056 goto err_out;
4057 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
4058 if (!__request_mem_region(pci_resource_start(pdev, bar),
4059 pci_resource_len(pdev, bar), res_name,
4060 exclusive))
4061 goto err_out;
4062 }
4063
4064 dr = find_pci_dr(pdev);
4065 if (dr)
4066 dr->region_mask |= 1 << bar;
4067
4068 return 0;
4069
4070 err_out:
4071 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4072 &pdev->resource[bar]);
4073 return -EBUSY;
4074 }
4075
4076 /**
4077 * pci_request_region - Reserve PCI I/O and memory resource
4078 * @pdev: PCI device whose resources are to be reserved
4079 * @bar: BAR to be reserved
4080 * @res_name: Name to be associated with resource
4081 *
4082 * Mark the PCI region associated with PCI device @pdev BAR @bar as
4083 * being reserved by owner @res_name. Do not access any
4084 * address inside the PCI regions unless this call returns
4085 * successfully.
4086 *
4087 * Returns 0 on success, or %EBUSY on error. A warning
4088 * message is also printed on failure.
4089 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)4090 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4091 {
4092 return __pci_request_region(pdev, bar, res_name, 0);
4093 }
4094 EXPORT_SYMBOL(pci_request_region);
4095
4096 /**
4097 * pci_release_selected_regions - Release selected PCI I/O and memory resources
4098 * @pdev: PCI device whose resources were previously reserved
4099 * @bars: Bitmask of BARs to be released
4100 *
4101 * Release selected PCI I/O and memory resources previously reserved.
4102 * Call this function only after all use of the PCI regions has ceased.
4103 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)4104 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4105 {
4106 int i;
4107
4108 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4109 if (bars & (1 << i))
4110 pci_release_region(pdev, i);
4111 }
4112 EXPORT_SYMBOL(pci_release_selected_regions);
4113
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)4114 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4115 const char *res_name, int excl)
4116 {
4117 int i;
4118
4119 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4120 if (bars & (1 << i))
4121 if (__pci_request_region(pdev, i, res_name, excl))
4122 goto err_out;
4123 return 0;
4124
4125 err_out:
4126 while (--i >= 0)
4127 if (bars & (1 << i))
4128 pci_release_region(pdev, i);
4129
4130 return -EBUSY;
4131 }
4132
4133
4134 /**
4135 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4136 * @pdev: PCI device whose resources are to be reserved
4137 * @bars: Bitmask of BARs to be requested
4138 * @res_name: Name to be associated with resource
4139 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)4140 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4141 const char *res_name)
4142 {
4143 return __pci_request_selected_regions(pdev, bars, res_name, 0);
4144 }
4145 EXPORT_SYMBOL(pci_request_selected_regions);
4146
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)4147 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4148 const char *res_name)
4149 {
4150 return __pci_request_selected_regions(pdev, bars, res_name,
4151 IORESOURCE_EXCLUSIVE);
4152 }
4153 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4154
4155 /**
4156 * pci_release_regions - Release reserved PCI I/O and memory resources
4157 * @pdev: PCI device whose resources were previously reserved by
4158 * pci_request_regions()
4159 *
4160 * Releases all PCI I/O and memory resources previously reserved by a
4161 * successful call to pci_request_regions(). Call this function only
4162 * after all use of the PCI regions has ceased.
4163 */
4164
pci_release_regions(struct pci_dev * pdev)4165 void pci_release_regions(struct pci_dev *pdev)
4166 {
4167 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4168 }
4169 EXPORT_SYMBOL(pci_release_regions);
4170
4171 /**
4172 * pci_request_regions - Reserve PCI I/O and memory resources
4173 * @pdev: PCI device whose resources are to be reserved
4174 * @res_name: Name to be associated with resource.
4175 *
4176 * Mark all PCI regions associated with PCI device @pdev as
4177 * being reserved by owner @res_name. Do not access any
4178 * address inside the PCI regions unless this call returns
4179 * successfully.
4180 *
4181 * Returns 0 on success, or %EBUSY on error. A warning
4182 * message is also printed on failure.
4183 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)4184 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4185 {
4186 return pci_request_selected_regions(pdev,
4187 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4188 }
4189 EXPORT_SYMBOL(pci_request_regions);
4190
4191 /**
4192 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4193 * @pdev: PCI device whose resources are to be reserved
4194 * @res_name: Name to be associated with resource.
4195 *
4196 * Mark all PCI regions associated with PCI device @pdev as being reserved
4197 * by owner @res_name. Do not access any address inside the PCI regions
4198 * unless this call returns successfully.
4199 *
4200 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4201 * and the sysfs MMIO access will not be allowed.
4202 *
4203 * Returns 0 on success, or %EBUSY on error. A warning message is also
4204 * printed on failure.
4205 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)4206 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4207 {
4208 return pci_request_selected_regions_exclusive(pdev,
4209 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4210 }
4211 EXPORT_SYMBOL(pci_request_regions_exclusive);
4212
4213 /*
4214 * Record the PCI IO range (expressed as CPU physical address + size).
4215 * Return a negative value if an error has occurred, zero otherwise
4216 */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4217 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4218 resource_size_t size)
4219 {
4220 int ret = 0;
4221 #ifdef PCI_IOBASE
4222 struct logic_pio_hwaddr *range;
4223
4224 if (!size || addr + size < addr)
4225 return -EINVAL;
4226
4227 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4228 if (!range)
4229 return -ENOMEM;
4230
4231 range->fwnode = fwnode;
4232 range->size = size;
4233 range->hw_start = addr;
4234 range->flags = LOGIC_PIO_CPU_MMIO;
4235
4236 ret = logic_pio_register_range(range);
4237 if (ret)
4238 kfree(range);
4239
4240 /* Ignore duplicates due to deferred probing */
4241 if (ret == -EEXIST)
4242 ret = 0;
4243 #endif
4244
4245 return ret;
4246 }
4247
pci_pio_to_address(unsigned long pio)4248 phys_addr_t pci_pio_to_address(unsigned long pio)
4249 {
4250 #ifdef PCI_IOBASE
4251 if (pio < MMIO_UPPER_LIMIT)
4252 return logic_pio_to_hwaddr(pio);
4253 #endif
4254
4255 return (phys_addr_t) OF_BAD_ADDR;
4256 }
4257 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4258
pci_address_to_pio(phys_addr_t address)4259 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4260 {
4261 #ifdef PCI_IOBASE
4262 return logic_pio_trans_cpuaddr(address);
4263 #else
4264 if (address > IO_SPACE_LIMIT)
4265 return (unsigned long)-1;
4266
4267 return (unsigned long) address;
4268 #endif
4269 }
4270
4271 /**
4272 * pci_remap_iospace - Remap the memory mapped I/O space
4273 * @res: Resource describing the I/O space
4274 * @phys_addr: physical address of range to be mapped
4275 *
4276 * Remap the memory mapped I/O space described by the @res and the CPU
4277 * physical address @phys_addr into virtual address space. Only
4278 * architectures that have memory mapped IO functions defined (and the
4279 * PCI_IOBASE value defined) should call this function.
4280 */
4281 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4282 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4283 {
4284 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4285 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4286
4287 if (!(res->flags & IORESOURCE_IO))
4288 return -EINVAL;
4289
4290 if (res->end > IO_SPACE_LIMIT)
4291 return -EINVAL;
4292
4293 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4294 pgprot_device(PAGE_KERNEL));
4295 #else
4296 /*
4297 * This architecture does not have memory mapped I/O space,
4298 * so this function should never be called
4299 */
4300 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4301 return -ENODEV;
4302 #endif
4303 }
4304 EXPORT_SYMBOL(pci_remap_iospace);
4305 #endif
4306
4307 /**
4308 * pci_unmap_iospace - Unmap the memory mapped I/O space
4309 * @res: resource to be unmapped
4310 *
4311 * Unmap the CPU virtual address @res from virtual address space. Only
4312 * architectures that have memory mapped IO functions defined (and the
4313 * PCI_IOBASE value defined) should call this function.
4314 */
pci_unmap_iospace(struct resource * res)4315 void pci_unmap_iospace(struct resource *res)
4316 {
4317 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4318 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4319
4320 vunmap_range(vaddr, vaddr + resource_size(res));
4321 #endif
4322 }
4323 EXPORT_SYMBOL(pci_unmap_iospace);
4324
devm_pci_unmap_iospace(struct device * dev,void * ptr)4325 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4326 {
4327 struct resource **res = ptr;
4328
4329 pci_unmap_iospace(*res);
4330 }
4331
4332 /**
4333 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4334 * @dev: Generic device to remap IO address for
4335 * @res: Resource describing the I/O space
4336 * @phys_addr: physical address of range to be mapped
4337 *
4338 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
4339 * detach.
4340 */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)4341 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4342 phys_addr_t phys_addr)
4343 {
4344 const struct resource **ptr;
4345 int error;
4346
4347 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4348 if (!ptr)
4349 return -ENOMEM;
4350
4351 error = pci_remap_iospace(res, phys_addr);
4352 if (error) {
4353 devres_free(ptr);
4354 } else {
4355 *ptr = res;
4356 devres_add(dev, ptr);
4357 }
4358
4359 return error;
4360 }
4361 EXPORT_SYMBOL(devm_pci_remap_iospace);
4362
4363 /**
4364 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4365 * @dev: Generic device to remap IO address for
4366 * @offset: Resource address to map
4367 * @size: Size of map
4368 *
4369 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
4370 * detach.
4371 */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)4372 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4373 resource_size_t offset,
4374 resource_size_t size)
4375 {
4376 void __iomem **ptr, *addr;
4377
4378 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4379 if (!ptr)
4380 return NULL;
4381
4382 addr = pci_remap_cfgspace(offset, size);
4383 if (addr) {
4384 *ptr = addr;
4385 devres_add(dev, ptr);
4386 } else
4387 devres_free(ptr);
4388
4389 return addr;
4390 }
4391 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4392
4393 /**
4394 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4395 * @dev: generic device to handle the resource for
4396 * @res: configuration space resource to be handled
4397 *
4398 * Checks that a resource is a valid memory region, requests the memory
4399 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4400 * proper PCI configuration space memory attributes are guaranteed.
4401 *
4402 * All operations are managed and will be undone on driver detach.
4403 *
4404 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4405 * on failure. Usage example::
4406 *
4407 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4408 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4409 * if (IS_ERR(base))
4410 * return PTR_ERR(base);
4411 */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)4412 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4413 struct resource *res)
4414 {
4415 resource_size_t size;
4416 const char *name;
4417 void __iomem *dest_ptr;
4418
4419 BUG_ON(!dev);
4420
4421 if (!res || resource_type(res) != IORESOURCE_MEM) {
4422 dev_err(dev, "invalid resource\n");
4423 return IOMEM_ERR_PTR(-EINVAL);
4424 }
4425
4426 size = resource_size(res);
4427
4428 if (res->name)
4429 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4430 res->name);
4431 else
4432 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4433 if (!name)
4434 return IOMEM_ERR_PTR(-ENOMEM);
4435
4436 if (!devm_request_mem_region(dev, res->start, size, name)) {
4437 dev_err(dev, "can't request region for resource %pR\n", res);
4438 return IOMEM_ERR_PTR(-EBUSY);
4439 }
4440
4441 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4442 if (!dest_ptr) {
4443 dev_err(dev, "ioremap failed for resource %pR\n", res);
4444 devm_release_mem_region(dev, res->start, size);
4445 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4446 }
4447
4448 return dest_ptr;
4449 }
4450 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4451
__pci_set_master(struct pci_dev * dev,bool enable)4452 static void __pci_set_master(struct pci_dev *dev, bool enable)
4453 {
4454 u16 old_cmd, cmd;
4455
4456 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4457 if (enable)
4458 cmd = old_cmd | PCI_COMMAND_MASTER;
4459 else
4460 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4461 if (cmd != old_cmd) {
4462 pci_dbg(dev, "%s bus mastering\n",
4463 enable ? "enabling" : "disabling");
4464 pci_write_config_word(dev, PCI_COMMAND, cmd);
4465 }
4466 dev->is_busmaster = enable;
4467 }
4468
4469 /**
4470 * pcibios_setup - process "pci=" kernel boot arguments
4471 * @str: string used to pass in "pci=" kernel boot arguments
4472 *
4473 * Process kernel boot arguments. This is the default implementation.
4474 * Architecture specific implementations can override this as necessary.
4475 */
pcibios_setup(char * str)4476 char * __weak __init pcibios_setup(char *str)
4477 {
4478 return str;
4479 }
4480
4481 /**
4482 * pcibios_set_master - enable PCI bus-mastering for device dev
4483 * @dev: the PCI device to enable
4484 *
4485 * Enables PCI bus-mastering for the device. This is the default
4486 * implementation. Architecture specific implementations can override
4487 * this if necessary.
4488 */
pcibios_set_master(struct pci_dev * dev)4489 void __weak pcibios_set_master(struct pci_dev *dev)
4490 {
4491 u8 lat;
4492
4493 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4494 if (pci_is_pcie(dev))
4495 return;
4496
4497 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4498 if (lat < 16)
4499 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4500 else if (lat > pcibios_max_latency)
4501 lat = pcibios_max_latency;
4502 else
4503 return;
4504
4505 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4506 }
4507
4508 /**
4509 * pci_set_master - enables bus-mastering for device dev
4510 * @dev: the PCI device to enable
4511 *
4512 * Enables bus-mastering on the device and calls pcibios_set_master()
4513 * to do the needed arch specific settings.
4514 */
pci_set_master(struct pci_dev * dev)4515 void pci_set_master(struct pci_dev *dev)
4516 {
4517 __pci_set_master(dev, true);
4518 pcibios_set_master(dev);
4519 }
4520 EXPORT_SYMBOL(pci_set_master);
4521
4522 /**
4523 * pci_clear_master - disables bus-mastering for device dev
4524 * @dev: the PCI device to disable
4525 */
pci_clear_master(struct pci_dev * dev)4526 void pci_clear_master(struct pci_dev *dev)
4527 {
4528 __pci_set_master(dev, false);
4529 }
4530 EXPORT_SYMBOL(pci_clear_master);
4531
4532 /**
4533 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4534 * @dev: the PCI device for which MWI is to be enabled
4535 *
4536 * Helper function for pci_set_mwi.
4537 * Originally copied from drivers/net/acenic.c.
4538 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4539 *
4540 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4541 */
pci_set_cacheline_size(struct pci_dev * dev)4542 int pci_set_cacheline_size(struct pci_dev *dev)
4543 {
4544 u8 cacheline_size;
4545
4546 if (!pci_cache_line_size)
4547 return -EINVAL;
4548
4549 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4550 equal to or multiple of the right value. */
4551 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4552 if (cacheline_size >= pci_cache_line_size &&
4553 (cacheline_size % pci_cache_line_size) == 0)
4554 return 0;
4555
4556 /* Write the correct value. */
4557 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4558 /* Read it back. */
4559 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4560 if (cacheline_size == pci_cache_line_size)
4561 return 0;
4562
4563 pci_dbg(dev, "cache line size of %d is not supported\n",
4564 pci_cache_line_size << 2);
4565
4566 return -EINVAL;
4567 }
4568 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4569
4570 /**
4571 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4572 * @dev: the PCI device for which MWI is enabled
4573 *
4574 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4575 *
4576 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4577 */
pci_set_mwi(struct pci_dev * dev)4578 int pci_set_mwi(struct pci_dev *dev)
4579 {
4580 #ifdef PCI_DISABLE_MWI
4581 return 0;
4582 #else
4583 int rc;
4584 u16 cmd;
4585
4586 rc = pci_set_cacheline_size(dev);
4587 if (rc)
4588 return rc;
4589
4590 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4591 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4592 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4593 cmd |= PCI_COMMAND_INVALIDATE;
4594 pci_write_config_word(dev, PCI_COMMAND, cmd);
4595 }
4596 return 0;
4597 #endif
4598 }
4599 EXPORT_SYMBOL(pci_set_mwi);
4600
4601 /**
4602 * pcim_set_mwi - a device-managed pci_set_mwi()
4603 * @dev: the PCI device for which MWI is enabled
4604 *
4605 * Managed pci_set_mwi().
4606 *
4607 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4608 */
pcim_set_mwi(struct pci_dev * dev)4609 int pcim_set_mwi(struct pci_dev *dev)
4610 {
4611 struct pci_devres *dr;
4612
4613 dr = find_pci_dr(dev);
4614 if (!dr)
4615 return -ENOMEM;
4616
4617 dr->mwi = 1;
4618 return pci_set_mwi(dev);
4619 }
4620 EXPORT_SYMBOL(pcim_set_mwi);
4621
4622 /**
4623 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4624 * @dev: the PCI device for which MWI is enabled
4625 *
4626 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4627 * Callers are not required to check the return value.
4628 *
4629 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4630 */
pci_try_set_mwi(struct pci_dev * dev)4631 int pci_try_set_mwi(struct pci_dev *dev)
4632 {
4633 #ifdef PCI_DISABLE_MWI
4634 return 0;
4635 #else
4636 return pci_set_mwi(dev);
4637 #endif
4638 }
4639 EXPORT_SYMBOL(pci_try_set_mwi);
4640
4641 /**
4642 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4643 * @dev: the PCI device to disable
4644 *
4645 * Disables PCI Memory-Write-Invalidate transaction on the device
4646 */
pci_clear_mwi(struct pci_dev * dev)4647 void pci_clear_mwi(struct pci_dev *dev)
4648 {
4649 #ifndef PCI_DISABLE_MWI
4650 u16 cmd;
4651
4652 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4653 if (cmd & PCI_COMMAND_INVALIDATE) {
4654 cmd &= ~PCI_COMMAND_INVALIDATE;
4655 pci_write_config_word(dev, PCI_COMMAND, cmd);
4656 }
4657 #endif
4658 }
4659 EXPORT_SYMBOL(pci_clear_mwi);
4660
4661 /**
4662 * pci_disable_parity - disable parity checking for device
4663 * @dev: the PCI device to operate on
4664 *
4665 * Disable parity checking for device @dev
4666 */
pci_disable_parity(struct pci_dev * dev)4667 void pci_disable_parity(struct pci_dev *dev)
4668 {
4669 u16 cmd;
4670
4671 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4672 if (cmd & PCI_COMMAND_PARITY) {
4673 cmd &= ~PCI_COMMAND_PARITY;
4674 pci_write_config_word(dev, PCI_COMMAND, cmd);
4675 }
4676 }
4677
4678 /**
4679 * pci_intx - enables/disables PCI INTx for device dev
4680 * @pdev: the PCI device to operate on
4681 * @enable: boolean: whether to enable or disable PCI INTx
4682 *
4683 * Enables/disables PCI INTx for device @pdev
4684 */
pci_intx(struct pci_dev * pdev,int enable)4685 void pci_intx(struct pci_dev *pdev, int enable)
4686 {
4687 u16 pci_command, new;
4688
4689 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4690
4691 if (enable)
4692 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4693 else
4694 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4695
4696 if (new != pci_command) {
4697 struct pci_devres *dr;
4698
4699 pci_write_config_word(pdev, PCI_COMMAND, new);
4700
4701 dr = find_pci_dr(pdev);
4702 if (dr && !dr->restore_intx) {
4703 dr->restore_intx = 1;
4704 dr->orig_intx = !enable;
4705 }
4706 }
4707 }
4708 EXPORT_SYMBOL_GPL(pci_intx);
4709
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)4710 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4711 {
4712 struct pci_bus *bus = dev->bus;
4713 bool mask_updated = true;
4714 u32 cmd_status_dword;
4715 u16 origcmd, newcmd;
4716 unsigned long flags;
4717 bool irq_pending;
4718
4719 /*
4720 * We do a single dword read to retrieve both command and status.
4721 * Document assumptions that make this possible.
4722 */
4723 BUILD_BUG_ON(PCI_COMMAND % 4);
4724 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4725
4726 raw_spin_lock_irqsave(&pci_lock, flags);
4727
4728 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4729
4730 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4731
4732 /*
4733 * Check interrupt status register to see whether our device
4734 * triggered the interrupt (when masking) or the next IRQ is
4735 * already pending (when unmasking).
4736 */
4737 if (mask != irq_pending) {
4738 mask_updated = false;
4739 goto done;
4740 }
4741
4742 origcmd = cmd_status_dword;
4743 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4744 if (mask)
4745 newcmd |= PCI_COMMAND_INTX_DISABLE;
4746 if (newcmd != origcmd)
4747 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4748
4749 done:
4750 raw_spin_unlock_irqrestore(&pci_lock, flags);
4751
4752 return mask_updated;
4753 }
4754
4755 /**
4756 * pci_check_and_mask_intx - mask INTx on pending interrupt
4757 * @dev: the PCI device to operate on
4758 *
4759 * Check if the device dev has its INTx line asserted, mask it and return
4760 * true in that case. False is returned if no interrupt was pending.
4761 */
pci_check_and_mask_intx(struct pci_dev * dev)4762 bool pci_check_and_mask_intx(struct pci_dev *dev)
4763 {
4764 return pci_check_and_set_intx_mask(dev, true);
4765 }
4766 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4767
4768 /**
4769 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4770 * @dev: the PCI device to operate on
4771 *
4772 * Check if the device dev has its INTx line asserted, unmask it if not and
4773 * return true. False is returned and the mask remains active if there was
4774 * still an interrupt pending.
4775 */
pci_check_and_unmask_intx(struct pci_dev * dev)4776 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4777 {
4778 return pci_check_and_set_intx_mask(dev, false);
4779 }
4780 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4781
4782 /**
4783 * pci_wait_for_pending_transaction - wait for pending transaction
4784 * @dev: the PCI device to operate on
4785 *
4786 * Return 0 if transaction is pending 1 otherwise.
4787 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4788 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4789 {
4790 if (!pci_is_pcie(dev))
4791 return 1;
4792
4793 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4794 PCI_EXP_DEVSTA_TRPND);
4795 }
4796 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4797
4798 /**
4799 * pcie_flr - initiate a PCIe function level reset
4800 * @dev: device to reset
4801 *
4802 * Initiate a function level reset unconditionally on @dev without
4803 * checking any flags and DEVCAP
4804 */
pcie_flr(struct pci_dev * dev)4805 int pcie_flr(struct pci_dev *dev)
4806 {
4807 if (!pci_wait_for_pending_transaction(dev))
4808 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4809
4810 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4811
4812 if (dev->imm_ready)
4813 return 0;
4814
4815 /*
4816 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4817 * 100ms, but may silently discard requests while the FLR is in
4818 * progress. Wait 100ms before trying to access the device.
4819 */
4820 msleep(100);
4821
4822 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4823 }
4824 EXPORT_SYMBOL_GPL(pcie_flr);
4825
4826 /**
4827 * pcie_reset_flr - initiate a PCIe function level reset
4828 * @dev: device to reset
4829 * @probe: if true, return 0 if device can be reset this way
4830 *
4831 * Initiate a function level reset on @dev.
4832 */
pcie_reset_flr(struct pci_dev * dev,bool probe)4833 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4834 {
4835 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4836 return -ENOTTY;
4837
4838 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4839 return -ENOTTY;
4840
4841 if (probe)
4842 return 0;
4843
4844 return pcie_flr(dev);
4845 }
4846 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4847
pci_af_flr(struct pci_dev * dev,bool probe)4848 static int pci_af_flr(struct pci_dev *dev, bool probe)
4849 {
4850 int pos;
4851 u8 cap;
4852
4853 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4854 if (!pos)
4855 return -ENOTTY;
4856
4857 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4858 return -ENOTTY;
4859
4860 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4861 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4862 return -ENOTTY;
4863
4864 if (probe)
4865 return 0;
4866
4867 /*
4868 * Wait for Transaction Pending bit to clear. A word-aligned test
4869 * is used, so we use the control offset rather than status and shift
4870 * the test bit to match.
4871 */
4872 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4873 PCI_AF_STATUS_TP << 8))
4874 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4875
4876 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4877
4878 if (dev->imm_ready)
4879 return 0;
4880
4881 /*
4882 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4883 * updated 27 July 2006; a device must complete an FLR within
4884 * 100ms, but may silently discard requests while the FLR is in
4885 * progress. Wait 100ms before trying to access the device.
4886 */
4887 msleep(100);
4888
4889 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4890 }
4891
4892 /**
4893 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4894 * @dev: Device to reset.
4895 * @probe: if true, return 0 if the device can be reset this way.
4896 *
4897 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4898 * unset, it will be reinitialized internally when going from PCI_D3hot to
4899 * PCI_D0. If that's the case and the device is not in a low-power state
4900 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4901 *
4902 * NOTE: This causes the caller to sleep for twice the device power transition
4903 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4904 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4905 * Moreover, only devices in D0 can be reset by this function.
4906 */
pci_pm_reset(struct pci_dev * dev,bool probe)4907 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4908 {
4909 u16 csr;
4910
4911 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4912 return -ENOTTY;
4913
4914 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4915 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4916 return -ENOTTY;
4917
4918 if (probe)
4919 return 0;
4920
4921 if (dev->current_state != PCI_D0)
4922 return -EINVAL;
4923
4924 csr &= ~PCI_PM_CTRL_STATE_MASK;
4925 csr |= PCI_D3hot;
4926 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4927 pci_dev_d3_sleep(dev);
4928
4929 csr &= ~PCI_PM_CTRL_STATE_MASK;
4930 csr |= PCI_D0;
4931 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4932 pci_dev_d3_sleep(dev);
4933
4934 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4935 }
4936
4937 /**
4938 * pcie_wait_for_link_status - Wait for link status change
4939 * @pdev: Device whose link to wait for.
4940 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4941 * @active: Waiting for active or inactive?
4942 *
4943 * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4944 * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4945 */
pcie_wait_for_link_status(struct pci_dev * pdev,bool use_lt,bool active)4946 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4947 bool use_lt, bool active)
4948 {
4949 u16 lnksta_mask, lnksta_match;
4950 unsigned long end_jiffies;
4951 u16 lnksta;
4952
4953 lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4954 lnksta_match = active ? lnksta_mask : 0;
4955
4956 end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4957 do {
4958 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4959 if ((lnksta & lnksta_mask) == lnksta_match)
4960 return 0;
4961 msleep(1);
4962 } while (time_before(jiffies, end_jiffies));
4963
4964 return -ETIMEDOUT;
4965 }
4966
4967 /**
4968 * pcie_retrain_link - Request a link retrain and wait for it to complete
4969 * @pdev: Device whose link to retrain.
4970 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4971 *
4972 * Retrain completion status is retrieved from the Link Status Register
4973 * according to @use_lt. It is not verified whether the use of the DLLLA
4974 * bit is valid.
4975 *
4976 * Return 0 if successful, or -ETIMEDOUT if training has not completed
4977 * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4978 */
pcie_retrain_link(struct pci_dev * pdev,bool use_lt)4979 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4980 {
4981 int rc;
4982
4983 /*
4984 * Ensure the updated LNKCTL parameters are used during link
4985 * training by checking that there is no ongoing link training to
4986 * avoid LTSSM race as recommended in Implementation Note at the
4987 * end of PCIe r6.0.1 sec 7.5.3.7.
4988 */
4989 rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4990 if (rc)
4991 return rc;
4992
4993 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4994 if (pdev->clear_retrain_link) {
4995 /*
4996 * Due to an erratum in some devices the Retrain Link bit
4997 * needs to be cleared again manually to allow the link
4998 * training to succeed.
4999 */
5000 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
5001 }
5002
5003 return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
5004 }
5005
5006 /**
5007 * pcie_wait_for_link_delay - Wait until link is active or inactive
5008 * @pdev: Bridge device
5009 * @active: waiting for active or inactive?
5010 * @delay: Delay to wait after link has become active (in ms)
5011 *
5012 * Use this to wait till link becomes active or inactive.
5013 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)5014 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
5015 int delay)
5016 {
5017 int rc;
5018
5019 /*
5020 * Some controllers might not implement link active reporting. In this
5021 * case, we wait for 1000 ms + any delay requested by the caller.
5022 */
5023 if (!pdev->link_active_reporting) {
5024 msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
5025 return true;
5026 }
5027
5028 /*
5029 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
5030 * after which we should expect an link active if the reset was
5031 * successful. If so, software must wait a minimum 100ms before sending
5032 * configuration requests to devices downstream this port.
5033 *
5034 * If the link fails to activate, either the device was physically
5035 * removed or the link is permanently failed.
5036 */
5037 if (active)
5038 msleep(20);
5039 rc = pcie_wait_for_link_status(pdev, false, active);
5040 if (active) {
5041 if (rc)
5042 rc = pcie_failed_link_retrain(pdev);
5043 if (rc)
5044 return false;
5045
5046 msleep(delay);
5047 return true;
5048 }
5049
5050 if (rc)
5051 return false;
5052
5053 return true;
5054 }
5055
5056 /**
5057 * pcie_wait_for_link - Wait until link is active or inactive
5058 * @pdev: Bridge device
5059 * @active: waiting for active or inactive?
5060 *
5061 * Use this to wait till link becomes active or inactive.
5062 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)5063 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
5064 {
5065 return pcie_wait_for_link_delay(pdev, active, 100);
5066 }
5067
5068 /*
5069 * Find maximum D3cold delay required by all the devices on the bus. The
5070 * spec says 100 ms, but firmware can lower it and we allow drivers to
5071 * increase it as well.
5072 *
5073 * Called with @pci_bus_sem locked for reading.
5074 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)5075 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
5076 {
5077 const struct pci_dev *pdev;
5078 int min_delay = 100;
5079 int max_delay = 0;
5080
5081 list_for_each_entry(pdev, &bus->devices, bus_list) {
5082 if (pdev->d3cold_delay < min_delay)
5083 min_delay = pdev->d3cold_delay;
5084 if (pdev->d3cold_delay > max_delay)
5085 max_delay = pdev->d3cold_delay;
5086 }
5087
5088 return max(min_delay, max_delay);
5089 }
5090
5091 /**
5092 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
5093 * @dev: PCI bridge
5094 * @reset_type: reset type in human-readable form
5095 *
5096 * Handle necessary delays before access to the devices on the secondary
5097 * side of the bridge are permitted after D3cold to D0 transition
5098 * or Conventional Reset.
5099 *
5100 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
5101 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
5102 * 4.3.2.
5103 *
5104 * Return 0 on success or -ENOTTY if the first device on the secondary bus
5105 * failed to become accessible.
5106 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type)5107 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
5108 {
5109 struct pci_dev *child;
5110 int delay;
5111
5112 if (pci_dev_is_disconnected(dev))
5113 return 0;
5114
5115 if (!pci_is_bridge(dev))
5116 return 0;
5117
5118 down_read(&pci_bus_sem);
5119
5120 /*
5121 * We only deal with devices that are present currently on the bus.
5122 * For any hot-added devices the access delay is handled in pciehp
5123 * board_added(). In case of ACPI hotplug the firmware is expected
5124 * to configure the devices before OS is notified.
5125 */
5126 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
5127 up_read(&pci_bus_sem);
5128 return 0;
5129 }
5130
5131 /* Take d3cold_delay requirements into account */
5132 delay = pci_bus_max_d3cold_delay(dev->subordinate);
5133 if (!delay) {
5134 up_read(&pci_bus_sem);
5135 return 0;
5136 }
5137
5138 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
5139 bus_list);
5140 up_read(&pci_bus_sem);
5141
5142 /*
5143 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
5144 * accessing the device after reset (that is 1000 ms + 100 ms).
5145 */
5146 if (!pci_is_pcie(dev)) {
5147 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
5148 msleep(1000 + delay);
5149 return 0;
5150 }
5151
5152 /*
5153 * For PCIe downstream and root ports that do not support speeds
5154 * greater than 5 GT/s need to wait minimum 100 ms. For higher
5155 * speeds (gen3) we need to wait first for the data link layer to
5156 * become active.
5157 *
5158 * However, 100 ms is the minimum and the PCIe spec says the
5159 * software must allow at least 1s before it can determine that the
5160 * device that did not respond is a broken device. Also device can
5161 * take longer than that to respond if it indicates so through Request
5162 * Retry Status completions.
5163 *
5164 * Therefore we wait for 100 ms and check for the device presence
5165 * until the timeout expires.
5166 */
5167 if (!pcie_downstream_port(dev))
5168 return 0;
5169
5170 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
5171 u16 status;
5172
5173 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
5174 msleep(delay);
5175
5176 if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
5177 return 0;
5178
5179 /*
5180 * If the port supports active link reporting we now check
5181 * whether the link is active and if not bail out early with
5182 * the assumption that the device is not present anymore.
5183 */
5184 if (!dev->link_active_reporting)
5185 return -ENOTTY;
5186
5187 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
5188 if (!(status & PCI_EXP_LNKSTA_DLLLA))
5189 return -ENOTTY;
5190
5191 return pci_dev_wait(child, reset_type,
5192 PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
5193 }
5194
5195 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5196 delay);
5197 if (!pcie_wait_for_link_delay(dev, true, delay)) {
5198 /* Did not train, no need to wait any further */
5199 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5200 return -ENOTTY;
5201 }
5202
5203 return pci_dev_wait(child, reset_type,
5204 PCIE_RESET_READY_POLL_MS - delay);
5205 }
5206
pci_reset_secondary_bus(struct pci_dev * dev)5207 void pci_reset_secondary_bus(struct pci_dev *dev)
5208 {
5209 u16 ctrl;
5210
5211 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5212 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5213 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5214
5215 /*
5216 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
5217 * this to 2ms to ensure that we meet the minimum requirement.
5218 */
5219 msleep(2);
5220
5221 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5222 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5223 }
5224
pcibios_reset_secondary_bus(struct pci_dev * dev)5225 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5226 {
5227 pci_reset_secondary_bus(dev);
5228 }
5229
5230 /**
5231 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
5232 * @dev: Bridge device
5233 *
5234 * Use the bridge control register to assert reset on the secondary bus.
5235 * Devices on the secondary bus are left in power-on state.
5236 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)5237 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5238 {
5239 pcibios_reset_secondary_bus(dev);
5240
5241 return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5242 }
5243 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5244
pci_parent_bus_reset(struct pci_dev * dev,bool probe)5245 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5246 {
5247 struct pci_dev *pdev;
5248
5249 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5250 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5251 return -ENOTTY;
5252
5253 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5254 if (pdev != dev)
5255 return -ENOTTY;
5256
5257 if (probe)
5258 return 0;
5259
5260 return pci_bridge_secondary_bus_reset(dev->bus->self);
5261 }
5262
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5263 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5264 {
5265 int rc = -ENOTTY;
5266
5267 if (!hotplug || !try_module_get(hotplug->owner))
5268 return rc;
5269
5270 if (hotplug->ops->reset_slot)
5271 rc = hotplug->ops->reset_slot(hotplug, probe);
5272
5273 module_put(hotplug->owner);
5274
5275 return rc;
5276 }
5277
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5278 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5279 {
5280 if (dev->multifunction || dev->subordinate || !dev->slot ||
5281 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5282 return -ENOTTY;
5283
5284 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5285 }
5286
pci_reset_bus_function(struct pci_dev * dev,bool probe)5287 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5288 {
5289 int rc;
5290
5291 rc = pci_dev_reset_slot_function(dev, probe);
5292 if (rc != -ENOTTY)
5293 return rc;
5294 return pci_parent_bus_reset(dev, probe);
5295 }
5296
pci_dev_lock(struct pci_dev * dev)5297 void pci_dev_lock(struct pci_dev *dev)
5298 {
5299 /* block PM suspend, driver probe, etc. */
5300 device_lock(&dev->dev);
5301 pci_cfg_access_lock(dev);
5302 }
5303 EXPORT_SYMBOL_GPL(pci_dev_lock);
5304
5305 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5306 int pci_dev_trylock(struct pci_dev *dev)
5307 {
5308 if (device_trylock(&dev->dev)) {
5309 if (pci_cfg_access_trylock(dev))
5310 return 1;
5311 device_unlock(&dev->dev);
5312 }
5313
5314 return 0;
5315 }
5316 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5317
pci_dev_unlock(struct pci_dev * dev)5318 void pci_dev_unlock(struct pci_dev *dev)
5319 {
5320 pci_cfg_access_unlock(dev);
5321 device_unlock(&dev->dev);
5322 }
5323 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5324
pci_dev_save_and_disable(struct pci_dev * dev)5325 static void pci_dev_save_and_disable(struct pci_dev *dev)
5326 {
5327 const struct pci_error_handlers *err_handler =
5328 dev->driver ? dev->driver->err_handler : NULL;
5329
5330 /*
5331 * dev->driver->err_handler->reset_prepare() is protected against
5332 * races with ->remove() by the device lock, which must be held by
5333 * the caller.
5334 */
5335 if (err_handler && err_handler->reset_prepare)
5336 err_handler->reset_prepare(dev);
5337
5338 /*
5339 * Wake-up device prior to save. PM registers default to D0 after
5340 * reset and a simple register restore doesn't reliably return
5341 * to a non-D0 state anyway.
5342 */
5343 pci_set_power_state(dev, PCI_D0);
5344
5345 pci_save_state(dev);
5346 /*
5347 * Disable the device by clearing the Command register, except for
5348 * INTx-disable which is set. This not only disables MMIO and I/O port
5349 * BARs, but also prevents the device from being Bus Master, preventing
5350 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5351 * compliant devices, INTx-disable prevents legacy interrupts.
5352 */
5353 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5354 }
5355
pci_dev_restore(struct pci_dev * dev)5356 static void pci_dev_restore(struct pci_dev *dev)
5357 {
5358 const struct pci_error_handlers *err_handler =
5359 dev->driver ? dev->driver->err_handler : NULL;
5360
5361 pci_restore_state(dev);
5362
5363 /*
5364 * dev->driver->err_handler->reset_done() is protected against
5365 * races with ->remove() by the device lock, which must be held by
5366 * the caller.
5367 */
5368 if (err_handler && err_handler->reset_done)
5369 err_handler->reset_done(dev);
5370 }
5371
5372 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5373 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5374 { },
5375 { pci_dev_specific_reset, .name = "device_specific" },
5376 { pci_dev_acpi_reset, .name = "acpi" },
5377 { pcie_reset_flr, .name = "flr" },
5378 { pci_af_flr, .name = "af_flr" },
5379 { pci_pm_reset, .name = "pm" },
5380 { pci_reset_bus_function, .name = "bus" },
5381 };
5382
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)5383 static ssize_t reset_method_show(struct device *dev,
5384 struct device_attribute *attr, char *buf)
5385 {
5386 struct pci_dev *pdev = to_pci_dev(dev);
5387 ssize_t len = 0;
5388 int i, m;
5389
5390 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5391 m = pdev->reset_methods[i];
5392 if (!m)
5393 break;
5394
5395 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5396 pci_reset_fn_methods[m].name);
5397 }
5398
5399 if (len)
5400 len += sysfs_emit_at(buf, len, "\n");
5401
5402 return len;
5403 }
5404
reset_method_lookup(const char * name)5405 static int reset_method_lookup(const char *name)
5406 {
5407 int m;
5408
5409 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5410 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5411 return m;
5412 }
5413
5414 return 0; /* not found */
5415 }
5416
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5417 static ssize_t reset_method_store(struct device *dev,
5418 struct device_attribute *attr,
5419 const char *buf, size_t count)
5420 {
5421 struct pci_dev *pdev = to_pci_dev(dev);
5422 char *options, *name;
5423 int m, n;
5424 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5425
5426 if (sysfs_streq(buf, "")) {
5427 pdev->reset_methods[0] = 0;
5428 pci_warn(pdev, "All device reset methods disabled by user");
5429 return count;
5430 }
5431
5432 if (sysfs_streq(buf, "default")) {
5433 pci_init_reset_methods(pdev);
5434 return count;
5435 }
5436
5437 options = kstrndup(buf, count, GFP_KERNEL);
5438 if (!options)
5439 return -ENOMEM;
5440
5441 n = 0;
5442 while ((name = strsep(&options, " ")) != NULL) {
5443 if (sysfs_streq(name, ""))
5444 continue;
5445
5446 name = strim(name);
5447
5448 m = reset_method_lookup(name);
5449 if (!m) {
5450 pci_err(pdev, "Invalid reset method '%s'", name);
5451 goto error;
5452 }
5453
5454 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5455 pci_err(pdev, "Unsupported reset method '%s'", name);
5456 goto error;
5457 }
5458
5459 if (n == PCI_NUM_RESET_METHODS - 1) {
5460 pci_err(pdev, "Too many reset methods\n");
5461 goto error;
5462 }
5463
5464 reset_methods[n++] = m;
5465 }
5466
5467 reset_methods[n] = 0;
5468
5469 /* Warn if dev-specific supported but not highest priority */
5470 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5471 reset_methods[0] != 1)
5472 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5473 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5474 kfree(options);
5475 return count;
5476
5477 error:
5478 /* Leave previous methods unchanged */
5479 kfree(options);
5480 return -EINVAL;
5481 }
5482 static DEVICE_ATTR_RW(reset_method);
5483
5484 static struct attribute *pci_dev_reset_method_attrs[] = {
5485 &dev_attr_reset_method.attr,
5486 NULL,
5487 };
5488
pci_dev_reset_method_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)5489 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5490 struct attribute *a, int n)
5491 {
5492 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5493
5494 if (!pci_reset_supported(pdev))
5495 return 0;
5496
5497 return a->mode;
5498 }
5499
5500 const struct attribute_group pci_dev_reset_method_attr_group = {
5501 .attrs = pci_dev_reset_method_attrs,
5502 .is_visible = pci_dev_reset_method_attr_is_visible,
5503 };
5504
5505 /**
5506 * __pci_reset_function_locked - reset a PCI device function while holding
5507 * the @dev mutex lock.
5508 * @dev: PCI device to reset
5509 *
5510 * Some devices allow an individual function to be reset without affecting
5511 * other functions in the same device. The PCI device must be responsive
5512 * to PCI config space in order to use this function.
5513 *
5514 * The device function is presumed to be unused and the caller is holding
5515 * the device mutex lock when this function is called.
5516 *
5517 * Resetting the device will make the contents of PCI configuration space
5518 * random, so any caller of this must be prepared to reinitialise the
5519 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5520 * etc.
5521 *
5522 * Returns 0 if the device function was successfully reset or negative if the
5523 * device doesn't support resetting a single function.
5524 */
__pci_reset_function_locked(struct pci_dev * dev)5525 int __pci_reset_function_locked(struct pci_dev *dev)
5526 {
5527 int i, m, rc;
5528
5529 might_sleep();
5530
5531 /*
5532 * A reset method returns -ENOTTY if it doesn't support this device and
5533 * we should try the next method.
5534 *
5535 * If it returns 0 (success), we're finished. If it returns any other
5536 * error, we're also finished: this indicates that further reset
5537 * mechanisms might be broken on the device.
5538 */
5539 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5540 m = dev->reset_methods[i];
5541 if (!m)
5542 return -ENOTTY;
5543
5544 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5545 if (!rc)
5546 return 0;
5547 if (rc != -ENOTTY)
5548 return rc;
5549 }
5550
5551 return -ENOTTY;
5552 }
5553 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5554
5555 /**
5556 * pci_init_reset_methods - check whether device can be safely reset
5557 * and store supported reset mechanisms.
5558 * @dev: PCI device to check for reset mechanisms
5559 *
5560 * Some devices allow an individual function to be reset without affecting
5561 * other functions in the same device. The PCI device must be in D0-D3hot
5562 * state.
5563 *
5564 * Stores reset mechanisms supported by device in reset_methods byte array
5565 * which is a member of struct pci_dev.
5566 */
pci_init_reset_methods(struct pci_dev * dev)5567 void pci_init_reset_methods(struct pci_dev *dev)
5568 {
5569 int m, i, rc;
5570
5571 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5572
5573 might_sleep();
5574
5575 i = 0;
5576 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5577 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5578 if (!rc)
5579 dev->reset_methods[i++] = m;
5580 else if (rc != -ENOTTY)
5581 break;
5582 }
5583
5584 dev->reset_methods[i] = 0;
5585 }
5586
5587 /**
5588 * pci_reset_function - quiesce and reset a PCI device function
5589 * @dev: PCI device to reset
5590 *
5591 * Some devices allow an individual function to be reset without affecting
5592 * other functions in the same device. The PCI device must be responsive
5593 * to PCI config space in order to use this function.
5594 *
5595 * This function does not just reset the PCI portion of a device, but
5596 * clears all the state associated with the device. This function differs
5597 * from __pci_reset_function_locked() in that it saves and restores device state
5598 * over the reset and takes the PCI device lock.
5599 *
5600 * Returns 0 if the device function was successfully reset or negative if the
5601 * device doesn't support resetting a single function.
5602 */
pci_reset_function(struct pci_dev * dev)5603 int pci_reset_function(struct pci_dev *dev)
5604 {
5605 int rc;
5606
5607 if (!pci_reset_supported(dev))
5608 return -ENOTTY;
5609
5610 pci_dev_lock(dev);
5611 pci_dev_save_and_disable(dev);
5612
5613 rc = __pci_reset_function_locked(dev);
5614
5615 pci_dev_restore(dev);
5616 pci_dev_unlock(dev);
5617
5618 return rc;
5619 }
5620 EXPORT_SYMBOL_GPL(pci_reset_function);
5621
5622 /**
5623 * pci_reset_function_locked - quiesce and reset a PCI device function
5624 * @dev: PCI device to reset
5625 *
5626 * Some devices allow an individual function to be reset without affecting
5627 * other functions in the same device. The PCI device must be responsive
5628 * to PCI config space in order to use this function.
5629 *
5630 * This function does not just reset the PCI portion of a device, but
5631 * clears all the state associated with the device. This function differs
5632 * from __pci_reset_function_locked() in that it saves and restores device state
5633 * over the reset. It also differs from pci_reset_function() in that it
5634 * requires the PCI device lock to be held.
5635 *
5636 * Returns 0 if the device function was successfully reset or negative if the
5637 * device doesn't support resetting a single function.
5638 */
pci_reset_function_locked(struct pci_dev * dev)5639 int pci_reset_function_locked(struct pci_dev *dev)
5640 {
5641 int rc;
5642
5643 if (!pci_reset_supported(dev))
5644 return -ENOTTY;
5645
5646 pci_dev_save_and_disable(dev);
5647
5648 rc = __pci_reset_function_locked(dev);
5649
5650 pci_dev_restore(dev);
5651
5652 return rc;
5653 }
5654 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5655
5656 /**
5657 * pci_try_reset_function - quiesce and reset a PCI device function
5658 * @dev: PCI device to reset
5659 *
5660 * Same as above, except return -EAGAIN if unable to lock device.
5661 */
pci_try_reset_function(struct pci_dev * dev)5662 int pci_try_reset_function(struct pci_dev *dev)
5663 {
5664 int rc;
5665
5666 if (!pci_reset_supported(dev))
5667 return -ENOTTY;
5668
5669 if (!pci_dev_trylock(dev))
5670 return -EAGAIN;
5671
5672 pci_dev_save_and_disable(dev);
5673 rc = __pci_reset_function_locked(dev);
5674 pci_dev_restore(dev);
5675 pci_dev_unlock(dev);
5676
5677 return rc;
5678 }
5679 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5680
5681 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resettable(struct pci_bus * bus)5682 static bool pci_bus_resettable(struct pci_bus *bus)
5683 {
5684 struct pci_dev *dev;
5685
5686
5687 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5688 return false;
5689
5690 list_for_each_entry(dev, &bus->devices, bus_list) {
5691 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5692 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5693 return false;
5694 }
5695
5696 return true;
5697 }
5698
5699 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5700 static void pci_bus_lock(struct pci_bus *bus)
5701 {
5702 struct pci_dev *dev;
5703
5704 list_for_each_entry(dev, &bus->devices, bus_list) {
5705 pci_dev_lock(dev);
5706 if (dev->subordinate)
5707 pci_bus_lock(dev->subordinate);
5708 }
5709 }
5710
5711 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5712 static void pci_bus_unlock(struct pci_bus *bus)
5713 {
5714 struct pci_dev *dev;
5715
5716 list_for_each_entry(dev, &bus->devices, bus_list) {
5717 if (dev->subordinate)
5718 pci_bus_unlock(dev->subordinate);
5719 pci_dev_unlock(dev);
5720 }
5721 }
5722
5723 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5724 static int pci_bus_trylock(struct pci_bus *bus)
5725 {
5726 struct pci_dev *dev;
5727
5728 list_for_each_entry(dev, &bus->devices, bus_list) {
5729 if (!pci_dev_trylock(dev))
5730 goto unlock;
5731 if (dev->subordinate) {
5732 if (!pci_bus_trylock(dev->subordinate)) {
5733 pci_dev_unlock(dev);
5734 goto unlock;
5735 }
5736 }
5737 }
5738 return 1;
5739
5740 unlock:
5741 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5742 if (dev->subordinate)
5743 pci_bus_unlock(dev->subordinate);
5744 pci_dev_unlock(dev);
5745 }
5746 return 0;
5747 }
5748
5749 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resettable(struct pci_slot * slot)5750 static bool pci_slot_resettable(struct pci_slot *slot)
5751 {
5752 struct pci_dev *dev;
5753
5754 if (slot->bus->self &&
5755 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5756 return false;
5757
5758 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5759 if (!dev->slot || dev->slot != slot)
5760 continue;
5761 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5762 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5763 return false;
5764 }
5765
5766 return true;
5767 }
5768
5769 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5770 static void pci_slot_lock(struct pci_slot *slot)
5771 {
5772 struct pci_dev *dev;
5773
5774 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5775 if (!dev->slot || dev->slot != slot)
5776 continue;
5777 pci_dev_lock(dev);
5778 if (dev->subordinate)
5779 pci_bus_lock(dev->subordinate);
5780 }
5781 }
5782
5783 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5784 static void pci_slot_unlock(struct pci_slot *slot)
5785 {
5786 struct pci_dev *dev;
5787
5788 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5789 if (!dev->slot || dev->slot != slot)
5790 continue;
5791 if (dev->subordinate)
5792 pci_bus_unlock(dev->subordinate);
5793 pci_dev_unlock(dev);
5794 }
5795 }
5796
5797 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5798 static int pci_slot_trylock(struct pci_slot *slot)
5799 {
5800 struct pci_dev *dev;
5801
5802 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5803 if (!dev->slot || dev->slot != slot)
5804 continue;
5805 if (!pci_dev_trylock(dev))
5806 goto unlock;
5807 if (dev->subordinate) {
5808 if (!pci_bus_trylock(dev->subordinate)) {
5809 pci_dev_unlock(dev);
5810 goto unlock;
5811 }
5812 }
5813 }
5814 return 1;
5815
5816 unlock:
5817 list_for_each_entry_continue_reverse(dev,
5818 &slot->bus->devices, bus_list) {
5819 if (!dev->slot || dev->slot != slot)
5820 continue;
5821 if (dev->subordinate)
5822 pci_bus_unlock(dev->subordinate);
5823 pci_dev_unlock(dev);
5824 }
5825 return 0;
5826 }
5827
5828 /*
5829 * Save and disable devices from the top of the tree down while holding
5830 * the @dev mutex lock for the entire tree.
5831 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5832 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5833 {
5834 struct pci_dev *dev;
5835
5836 list_for_each_entry(dev, &bus->devices, bus_list) {
5837 pci_dev_save_and_disable(dev);
5838 if (dev->subordinate)
5839 pci_bus_save_and_disable_locked(dev->subordinate);
5840 }
5841 }
5842
5843 /*
5844 * Restore devices from top of the tree down while holding @dev mutex lock
5845 * for the entire tree. Parent bridges need to be restored before we can
5846 * get to subordinate devices.
5847 */
pci_bus_restore_locked(struct pci_bus * bus)5848 static void pci_bus_restore_locked(struct pci_bus *bus)
5849 {
5850 struct pci_dev *dev;
5851
5852 list_for_each_entry(dev, &bus->devices, bus_list) {
5853 pci_dev_restore(dev);
5854 if (dev->subordinate)
5855 pci_bus_restore_locked(dev->subordinate);
5856 }
5857 }
5858
5859 /*
5860 * Save and disable devices from the top of the tree down while holding
5861 * the @dev mutex lock for the entire tree.
5862 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5863 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5864 {
5865 struct pci_dev *dev;
5866
5867 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5868 if (!dev->slot || dev->slot != slot)
5869 continue;
5870 pci_dev_save_and_disable(dev);
5871 if (dev->subordinate)
5872 pci_bus_save_and_disable_locked(dev->subordinate);
5873 }
5874 }
5875
5876 /*
5877 * Restore devices from top of the tree down while holding @dev mutex lock
5878 * for the entire tree. Parent bridges need to be restored before we can
5879 * get to subordinate devices.
5880 */
pci_slot_restore_locked(struct pci_slot * slot)5881 static void pci_slot_restore_locked(struct pci_slot *slot)
5882 {
5883 struct pci_dev *dev;
5884
5885 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5886 if (!dev->slot || dev->slot != slot)
5887 continue;
5888 pci_dev_restore(dev);
5889 if (dev->subordinate)
5890 pci_bus_restore_locked(dev->subordinate);
5891 }
5892 }
5893
pci_slot_reset(struct pci_slot * slot,bool probe)5894 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5895 {
5896 int rc;
5897
5898 if (!slot || !pci_slot_resettable(slot))
5899 return -ENOTTY;
5900
5901 if (!probe)
5902 pci_slot_lock(slot);
5903
5904 might_sleep();
5905
5906 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5907
5908 if (!probe)
5909 pci_slot_unlock(slot);
5910
5911 return rc;
5912 }
5913
5914 /**
5915 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5916 * @slot: PCI slot to probe
5917 *
5918 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5919 */
pci_probe_reset_slot(struct pci_slot * slot)5920 int pci_probe_reset_slot(struct pci_slot *slot)
5921 {
5922 return pci_slot_reset(slot, PCI_RESET_PROBE);
5923 }
5924 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5925
5926 /**
5927 * __pci_reset_slot - Try to reset a PCI slot
5928 * @slot: PCI slot to reset
5929 *
5930 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5931 * independent of other slots. For instance, some slots may support slot power
5932 * control. In the case of a 1:1 bus to slot architecture, this function may
5933 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5934 * Generally a slot reset should be attempted before a bus reset. All of the
5935 * function of the slot and any subordinate buses behind the slot are reset
5936 * through this function. PCI config space of all devices in the slot and
5937 * behind the slot is saved before and restored after reset.
5938 *
5939 * Same as above except return -EAGAIN if the slot cannot be locked
5940 */
__pci_reset_slot(struct pci_slot * slot)5941 static int __pci_reset_slot(struct pci_slot *slot)
5942 {
5943 int rc;
5944
5945 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5946 if (rc)
5947 return rc;
5948
5949 if (pci_slot_trylock(slot)) {
5950 pci_slot_save_and_disable_locked(slot);
5951 might_sleep();
5952 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5953 pci_slot_restore_locked(slot);
5954 pci_slot_unlock(slot);
5955 } else
5956 rc = -EAGAIN;
5957
5958 return rc;
5959 }
5960
pci_bus_reset(struct pci_bus * bus,bool probe)5961 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5962 {
5963 int ret;
5964
5965 if (!bus->self || !pci_bus_resettable(bus))
5966 return -ENOTTY;
5967
5968 if (probe)
5969 return 0;
5970
5971 pci_bus_lock(bus);
5972
5973 might_sleep();
5974
5975 ret = pci_bridge_secondary_bus_reset(bus->self);
5976
5977 pci_bus_unlock(bus);
5978
5979 return ret;
5980 }
5981
5982 /**
5983 * pci_bus_error_reset - reset the bridge's subordinate bus
5984 * @bridge: The parent device that connects to the bus to reset
5985 *
5986 * This function will first try to reset the slots on this bus if the method is
5987 * available. If slot reset fails or is not available, this will fall back to a
5988 * secondary bus reset.
5989 */
pci_bus_error_reset(struct pci_dev * bridge)5990 int pci_bus_error_reset(struct pci_dev *bridge)
5991 {
5992 struct pci_bus *bus = bridge->subordinate;
5993 struct pci_slot *slot;
5994
5995 if (!bus)
5996 return -ENOTTY;
5997
5998 mutex_lock(&pci_slot_mutex);
5999 if (list_empty(&bus->slots))
6000 goto bus_reset;
6001
6002 list_for_each_entry(slot, &bus->slots, list)
6003 if (pci_probe_reset_slot(slot))
6004 goto bus_reset;
6005
6006 list_for_each_entry(slot, &bus->slots, list)
6007 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
6008 goto bus_reset;
6009
6010 mutex_unlock(&pci_slot_mutex);
6011 return 0;
6012 bus_reset:
6013 mutex_unlock(&pci_slot_mutex);
6014 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
6015 }
6016
6017 /**
6018 * pci_probe_reset_bus - probe whether a PCI bus can be reset
6019 * @bus: PCI bus to probe
6020 *
6021 * Return 0 if bus can be reset, negative if a bus reset is not supported.
6022 */
pci_probe_reset_bus(struct pci_bus * bus)6023 int pci_probe_reset_bus(struct pci_bus *bus)
6024 {
6025 return pci_bus_reset(bus, PCI_RESET_PROBE);
6026 }
6027 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
6028
6029 /**
6030 * __pci_reset_bus - Try to reset a PCI bus
6031 * @bus: top level PCI bus to reset
6032 *
6033 * Same as above except return -EAGAIN if the bus cannot be locked
6034 */
__pci_reset_bus(struct pci_bus * bus)6035 static int __pci_reset_bus(struct pci_bus *bus)
6036 {
6037 int rc;
6038
6039 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
6040 if (rc)
6041 return rc;
6042
6043 if (pci_bus_trylock(bus)) {
6044 pci_bus_save_and_disable_locked(bus);
6045 might_sleep();
6046 rc = pci_bridge_secondary_bus_reset(bus->self);
6047 pci_bus_restore_locked(bus);
6048 pci_bus_unlock(bus);
6049 } else
6050 rc = -EAGAIN;
6051
6052 return rc;
6053 }
6054
6055 /**
6056 * pci_reset_bus - Try to reset a PCI bus
6057 * @pdev: top level PCI device to reset via slot/bus
6058 *
6059 * Same as above except return -EAGAIN if the bus cannot be locked
6060 */
pci_reset_bus(struct pci_dev * pdev)6061 int pci_reset_bus(struct pci_dev *pdev)
6062 {
6063 return (!pci_probe_reset_slot(pdev->slot)) ?
6064 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
6065 }
6066 EXPORT_SYMBOL_GPL(pci_reset_bus);
6067
6068 /**
6069 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
6070 * @dev: PCI device to query
6071 *
6072 * Returns mmrbc: maximum designed memory read count in bytes or
6073 * appropriate error value.
6074 */
pcix_get_max_mmrbc(struct pci_dev * dev)6075 int pcix_get_max_mmrbc(struct pci_dev *dev)
6076 {
6077 int cap;
6078 u32 stat;
6079
6080 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6081 if (!cap)
6082 return -EINVAL;
6083
6084 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6085 return -EINVAL;
6086
6087 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
6088 }
6089 EXPORT_SYMBOL(pcix_get_max_mmrbc);
6090
6091 /**
6092 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
6093 * @dev: PCI device to query
6094 *
6095 * Returns mmrbc: maximum memory read count in bytes or appropriate error
6096 * value.
6097 */
pcix_get_mmrbc(struct pci_dev * dev)6098 int pcix_get_mmrbc(struct pci_dev *dev)
6099 {
6100 int cap;
6101 u16 cmd;
6102
6103 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6104 if (!cap)
6105 return -EINVAL;
6106
6107 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6108 return -EINVAL;
6109
6110 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
6111 }
6112 EXPORT_SYMBOL(pcix_get_mmrbc);
6113
6114 /**
6115 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
6116 * @dev: PCI device to query
6117 * @mmrbc: maximum memory read count in bytes
6118 * valid values are 512, 1024, 2048, 4096
6119 *
6120 * If possible sets maximum memory read byte count, some bridges have errata
6121 * that prevent this.
6122 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)6123 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
6124 {
6125 int cap;
6126 u32 stat, v, o;
6127 u16 cmd;
6128
6129 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
6130 return -EINVAL;
6131
6132 v = ffs(mmrbc) - 10;
6133
6134 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6135 if (!cap)
6136 return -EINVAL;
6137
6138 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6139 return -EINVAL;
6140
6141 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
6142 return -E2BIG;
6143
6144 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6145 return -EINVAL;
6146
6147 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
6148 if (o != v) {
6149 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
6150 return -EIO;
6151
6152 cmd &= ~PCI_X_CMD_MAX_READ;
6153 cmd |= v << 2;
6154 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6155 return -EIO;
6156 }
6157 return 0;
6158 }
6159 EXPORT_SYMBOL(pcix_set_mmrbc);
6160
6161 /**
6162 * pcie_get_readrq - get PCI Express read request size
6163 * @dev: PCI device to query
6164 *
6165 * Returns maximum memory read request in bytes or appropriate error value.
6166 */
pcie_get_readrq(struct pci_dev * dev)6167 int pcie_get_readrq(struct pci_dev *dev)
6168 {
6169 u16 ctl;
6170
6171 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6172
6173 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6174 }
6175 EXPORT_SYMBOL(pcie_get_readrq);
6176
6177 /**
6178 * pcie_set_readrq - set PCI Express maximum memory read request
6179 * @dev: PCI device to query
6180 * @rq: maximum memory read count in bytes
6181 * valid values are 128, 256, 512, 1024, 2048, 4096
6182 *
6183 * If possible sets maximum memory read request in bytes
6184 */
pcie_set_readrq(struct pci_dev * dev,int rq)6185 int pcie_set_readrq(struct pci_dev *dev, int rq)
6186 {
6187 u16 v;
6188 int ret;
6189 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
6190
6191 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6192 return -EINVAL;
6193
6194 /*
6195 * If using the "performance" PCIe config, we clamp the read rq
6196 * size to the max packet size to keep the host bridge from
6197 * generating requests larger than we can cope with.
6198 */
6199 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6200 int mps = pcie_get_mps(dev);
6201
6202 if (mps < rq)
6203 rq = mps;
6204 }
6205
6206 v = (ffs(rq) - 8) << 12;
6207
6208 if (bridge->no_inc_mrrs) {
6209 int max_mrrs = pcie_get_readrq(dev);
6210
6211 if (rq > max_mrrs) {
6212 pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
6213 return -EINVAL;
6214 }
6215 }
6216
6217 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6218 PCI_EXP_DEVCTL_READRQ, v);
6219
6220 return pcibios_err_to_errno(ret);
6221 }
6222 EXPORT_SYMBOL(pcie_set_readrq);
6223
6224 /**
6225 * pcie_get_mps - get PCI Express maximum payload size
6226 * @dev: PCI device to query
6227 *
6228 * Returns maximum payload size in bytes
6229 */
pcie_get_mps(struct pci_dev * dev)6230 int pcie_get_mps(struct pci_dev *dev)
6231 {
6232 u16 ctl;
6233
6234 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6235
6236 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6237 }
6238 EXPORT_SYMBOL(pcie_get_mps);
6239
6240 /**
6241 * pcie_set_mps - set PCI Express maximum payload size
6242 * @dev: PCI device to query
6243 * @mps: maximum payload size in bytes
6244 * valid values are 128, 256, 512, 1024, 2048, 4096
6245 *
6246 * If possible sets maximum payload size
6247 */
pcie_set_mps(struct pci_dev * dev,int mps)6248 int pcie_set_mps(struct pci_dev *dev, int mps)
6249 {
6250 u16 v;
6251 int ret;
6252
6253 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6254 return -EINVAL;
6255
6256 v = ffs(mps) - 8;
6257 if (v > dev->pcie_mpss)
6258 return -EINVAL;
6259 v <<= 5;
6260
6261 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6262 PCI_EXP_DEVCTL_PAYLOAD, v);
6263
6264 return pcibios_err_to_errno(ret);
6265 }
6266 EXPORT_SYMBOL(pcie_set_mps);
6267
6268 /**
6269 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6270 * device and its bandwidth limitation
6271 * @dev: PCI device to query
6272 * @limiting_dev: storage for device causing the bandwidth limitation
6273 * @speed: storage for speed of limiting device
6274 * @width: storage for width of limiting device
6275 *
6276 * Walk up the PCI device chain and find the point where the minimum
6277 * bandwidth is available. Return the bandwidth available there and (if
6278 * limiting_dev, speed, and width pointers are supplied) information about
6279 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
6280 * raw bandwidth.
6281 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6282 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6283 enum pci_bus_speed *speed,
6284 enum pcie_link_width *width)
6285 {
6286 u16 lnksta;
6287 enum pci_bus_speed next_speed;
6288 enum pcie_link_width next_width;
6289 u32 bw, next_bw;
6290
6291 if (speed)
6292 *speed = PCI_SPEED_UNKNOWN;
6293 if (width)
6294 *width = PCIE_LNK_WIDTH_UNKNOWN;
6295
6296 bw = 0;
6297
6298 while (dev) {
6299 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6300
6301 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6302 next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6303
6304 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6305
6306 /* Check if current device limits the total bandwidth */
6307 if (!bw || next_bw <= bw) {
6308 bw = next_bw;
6309
6310 if (limiting_dev)
6311 *limiting_dev = dev;
6312 if (speed)
6313 *speed = next_speed;
6314 if (width)
6315 *width = next_width;
6316 }
6317
6318 dev = pci_upstream_bridge(dev);
6319 }
6320
6321 return bw;
6322 }
6323 EXPORT_SYMBOL(pcie_bandwidth_available);
6324
6325 /**
6326 * pcie_get_speed_cap - query for the PCI device's link speed capability
6327 * @dev: PCI device to query
6328 *
6329 * Query the PCI device speed capability. Return the maximum link speed
6330 * supported by the device.
6331 */
pcie_get_speed_cap(struct pci_dev * dev)6332 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6333 {
6334 u32 lnkcap2, lnkcap;
6335
6336 /*
6337 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
6338 * implementation note there recommends using the Supported Link
6339 * Speeds Vector in Link Capabilities 2 when supported.
6340 *
6341 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6342 * should use the Supported Link Speeds field in Link Capabilities,
6343 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6344 */
6345 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6346
6347 /* PCIe r3.0-compliant */
6348 if (lnkcap2)
6349 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6350
6351 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6352 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6353 return PCIE_SPEED_5_0GT;
6354 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6355 return PCIE_SPEED_2_5GT;
6356
6357 return PCI_SPEED_UNKNOWN;
6358 }
6359 EXPORT_SYMBOL(pcie_get_speed_cap);
6360
6361 /**
6362 * pcie_get_width_cap - query for the PCI device's link width capability
6363 * @dev: PCI device to query
6364 *
6365 * Query the PCI device width capability. Return the maximum link width
6366 * supported by the device.
6367 */
pcie_get_width_cap(struct pci_dev * dev)6368 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6369 {
6370 u32 lnkcap;
6371
6372 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6373 if (lnkcap)
6374 return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6375
6376 return PCIE_LNK_WIDTH_UNKNOWN;
6377 }
6378 EXPORT_SYMBOL(pcie_get_width_cap);
6379
6380 /**
6381 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6382 * @dev: PCI device
6383 * @speed: storage for link speed
6384 * @width: storage for link width
6385 *
6386 * Calculate a PCI device's link bandwidth by querying for its link speed
6387 * and width, multiplying them, and applying encoding overhead. The result
6388 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6389 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6390 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6391 enum pcie_link_width *width)
6392 {
6393 *speed = pcie_get_speed_cap(dev);
6394 *width = pcie_get_width_cap(dev);
6395
6396 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6397 return 0;
6398
6399 return *width * PCIE_SPEED2MBS_ENC(*speed);
6400 }
6401
6402 /**
6403 * __pcie_print_link_status - Report the PCI device's link speed and width
6404 * @dev: PCI device to query
6405 * @verbose: Print info even when enough bandwidth is available
6406 *
6407 * If the available bandwidth at the device is less than the device is
6408 * capable of, report the device's maximum possible bandwidth and the
6409 * upstream link that limits its performance. If @verbose, always print
6410 * the available bandwidth, even if the device isn't constrained.
6411 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6412 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6413 {
6414 enum pcie_link_width width, width_cap;
6415 enum pci_bus_speed speed, speed_cap;
6416 struct pci_dev *limiting_dev = NULL;
6417 u32 bw_avail, bw_cap;
6418
6419 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6420 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6421
6422 if (bw_avail >= bw_cap && verbose)
6423 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6424 bw_cap / 1000, bw_cap % 1000,
6425 pci_speed_string(speed_cap), width_cap);
6426 else if (bw_avail < bw_cap)
6427 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6428 bw_avail / 1000, bw_avail % 1000,
6429 pci_speed_string(speed), width,
6430 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6431 bw_cap / 1000, bw_cap % 1000,
6432 pci_speed_string(speed_cap), width_cap);
6433 }
6434
6435 /**
6436 * pcie_print_link_status - Report the PCI device's link speed and width
6437 * @dev: PCI device to query
6438 *
6439 * Report the available bandwidth at the device.
6440 */
pcie_print_link_status(struct pci_dev * dev)6441 void pcie_print_link_status(struct pci_dev *dev)
6442 {
6443 __pcie_print_link_status(dev, true);
6444 }
6445 EXPORT_SYMBOL(pcie_print_link_status);
6446
6447 /**
6448 * pci_select_bars - Make BAR mask from the type of resource
6449 * @dev: the PCI device for which BAR mask is made
6450 * @flags: resource type mask to be selected
6451 *
6452 * This helper routine makes bar mask from the type of resource.
6453 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6454 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6455 {
6456 int i, bars = 0;
6457 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6458 if (pci_resource_flags(dev, i) & flags)
6459 bars |= (1 << i);
6460 return bars;
6461 }
6462 EXPORT_SYMBOL(pci_select_bars);
6463
6464 /* Some architectures require additional programming to enable VGA */
6465 static arch_set_vga_state_t arch_set_vga_state;
6466
pci_register_set_vga_state(arch_set_vga_state_t func)6467 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6468 {
6469 arch_set_vga_state = func; /* NULL disables */
6470 }
6471
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6472 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6473 unsigned int command_bits, u32 flags)
6474 {
6475 if (arch_set_vga_state)
6476 return arch_set_vga_state(dev, decode, command_bits,
6477 flags);
6478 return 0;
6479 }
6480
6481 /**
6482 * pci_set_vga_state - set VGA decode state on device and parents if requested
6483 * @dev: the PCI device
6484 * @decode: true = enable decoding, false = disable decoding
6485 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6486 * @flags: traverse ancestors and change bridges
6487 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6488 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6489 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6490 unsigned int command_bits, u32 flags)
6491 {
6492 struct pci_bus *bus;
6493 struct pci_dev *bridge;
6494 u16 cmd;
6495 int rc;
6496
6497 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6498
6499 /* ARCH specific VGA enables */
6500 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6501 if (rc)
6502 return rc;
6503
6504 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6505 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6506 if (decode)
6507 cmd |= command_bits;
6508 else
6509 cmd &= ~command_bits;
6510 pci_write_config_word(dev, PCI_COMMAND, cmd);
6511 }
6512
6513 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6514 return 0;
6515
6516 bus = dev->bus;
6517 while (bus) {
6518 bridge = bus->self;
6519 if (bridge) {
6520 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6521 &cmd);
6522 if (decode)
6523 cmd |= PCI_BRIDGE_CTL_VGA;
6524 else
6525 cmd &= ~PCI_BRIDGE_CTL_VGA;
6526 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6527 cmd);
6528 }
6529 bus = bus->parent;
6530 }
6531 return 0;
6532 }
6533
6534 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6535 bool pci_pr3_present(struct pci_dev *pdev)
6536 {
6537 struct acpi_device *adev;
6538
6539 if (acpi_disabled)
6540 return false;
6541
6542 adev = ACPI_COMPANION(&pdev->dev);
6543 if (!adev)
6544 return false;
6545
6546 return adev->power.flags.power_resources &&
6547 acpi_has_method(adev->handle, "_PR3");
6548 }
6549 EXPORT_SYMBOL_GPL(pci_pr3_present);
6550 #endif
6551
6552 /**
6553 * pci_add_dma_alias - Add a DMA devfn alias for a device
6554 * @dev: the PCI device for which alias is added
6555 * @devfn_from: alias slot and function
6556 * @nr_devfns: number of subsequent devfns to alias
6557 *
6558 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6559 * which is used to program permissible bus-devfn source addresses for DMA
6560 * requests in an IOMMU. These aliases factor into IOMMU group creation
6561 * and are useful for devices generating DMA requests beyond or different
6562 * from their logical bus-devfn. Examples include device quirks where the
6563 * device simply uses the wrong devfn, as well as non-transparent bridges
6564 * where the alias may be a proxy for devices in another domain.
6565 *
6566 * IOMMU group creation is performed during device discovery or addition,
6567 * prior to any potential DMA mapping and therefore prior to driver probing
6568 * (especially for userspace assigned devices where IOMMU group definition
6569 * cannot be left as a userspace activity). DMA aliases should therefore
6570 * be configured via quirks, such as the PCI fixup header quirk.
6571 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6572 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6573 unsigned int nr_devfns)
6574 {
6575 int devfn_to;
6576
6577 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6578 devfn_to = devfn_from + nr_devfns - 1;
6579
6580 if (!dev->dma_alias_mask)
6581 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6582 if (!dev->dma_alias_mask) {
6583 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6584 return;
6585 }
6586
6587 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6588
6589 if (nr_devfns == 1)
6590 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6591 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6592 else if (nr_devfns > 1)
6593 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6594 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6595 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6596 }
6597
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6598 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6599 {
6600 return (dev1->dma_alias_mask &&
6601 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6602 (dev2->dma_alias_mask &&
6603 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6604 pci_real_dma_dev(dev1) == dev2 ||
6605 pci_real_dma_dev(dev2) == dev1;
6606 }
6607
pci_device_is_present(struct pci_dev * pdev)6608 bool pci_device_is_present(struct pci_dev *pdev)
6609 {
6610 u32 v;
6611
6612 /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6613 pdev = pci_physfn(pdev);
6614 if (pci_dev_is_disconnected(pdev))
6615 return false;
6616 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6617 }
6618 EXPORT_SYMBOL_GPL(pci_device_is_present);
6619
pci_ignore_hotplug(struct pci_dev * dev)6620 void pci_ignore_hotplug(struct pci_dev *dev)
6621 {
6622 struct pci_dev *bridge = dev->bus->self;
6623
6624 dev->ignore_hotplug = 1;
6625 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6626 if (bridge)
6627 bridge->ignore_hotplug = 1;
6628 }
6629 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6630
6631 /**
6632 * pci_real_dma_dev - Get PCI DMA device for PCI device
6633 * @dev: the PCI device that may have a PCI DMA alias
6634 *
6635 * Permits the platform to provide architecture-specific functionality to
6636 * devices needing to alias DMA to another PCI device on another PCI bus. If
6637 * the PCI device is on the same bus, it is recommended to use
6638 * pci_add_dma_alias(). This is the default implementation. Architecture
6639 * implementations can override this.
6640 */
pci_real_dma_dev(struct pci_dev * dev)6641 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6642 {
6643 return dev;
6644 }
6645
pcibios_default_alignment(void)6646 resource_size_t __weak pcibios_default_alignment(void)
6647 {
6648 return 0;
6649 }
6650
6651 /*
6652 * Arches that don't want to expose struct resource to userland as-is in
6653 * sysfs and /proc can implement their own pci_resource_to_user().
6654 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6655 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6656 const struct resource *rsrc,
6657 resource_size_t *start, resource_size_t *end)
6658 {
6659 *start = rsrc->start;
6660 *end = rsrc->end;
6661 }
6662
6663 static char *resource_alignment_param;
6664 static DEFINE_SPINLOCK(resource_alignment_lock);
6665
6666 /**
6667 * pci_specified_resource_alignment - get resource alignment specified by user.
6668 * @dev: the PCI device to get
6669 * @resize: whether or not to change resources' size when reassigning alignment
6670 *
6671 * RETURNS: Resource alignment if it is specified.
6672 * Zero if it is not specified.
6673 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6674 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6675 bool *resize)
6676 {
6677 int align_order, count;
6678 resource_size_t align = pcibios_default_alignment();
6679 const char *p;
6680 int ret;
6681
6682 spin_lock(&resource_alignment_lock);
6683 p = resource_alignment_param;
6684 if (!p || !*p)
6685 goto out;
6686 if (pci_has_flag(PCI_PROBE_ONLY)) {
6687 align = 0;
6688 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6689 goto out;
6690 }
6691
6692 while (*p) {
6693 count = 0;
6694 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6695 p[count] == '@') {
6696 p += count + 1;
6697 if (align_order > 63) {
6698 pr_err("PCI: Invalid requested alignment (order %d)\n",
6699 align_order);
6700 align_order = PAGE_SHIFT;
6701 }
6702 } else {
6703 align_order = PAGE_SHIFT;
6704 }
6705
6706 ret = pci_dev_str_match(dev, p, &p);
6707 if (ret == 1) {
6708 *resize = true;
6709 align = 1ULL << align_order;
6710 break;
6711 } else if (ret < 0) {
6712 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6713 p);
6714 break;
6715 }
6716
6717 if (*p != ';' && *p != ',') {
6718 /* End of param or invalid format */
6719 break;
6720 }
6721 p++;
6722 }
6723 out:
6724 spin_unlock(&resource_alignment_lock);
6725 return align;
6726 }
6727
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6728 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6729 resource_size_t align, bool resize)
6730 {
6731 struct resource *r = &dev->resource[bar];
6732 resource_size_t size;
6733
6734 if (!(r->flags & IORESOURCE_MEM))
6735 return;
6736
6737 if (r->flags & IORESOURCE_PCI_FIXED) {
6738 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6739 bar, r, (unsigned long long)align);
6740 return;
6741 }
6742
6743 size = resource_size(r);
6744 if (size >= align)
6745 return;
6746
6747 /*
6748 * Increase the alignment of the resource. There are two ways we
6749 * can do this:
6750 *
6751 * 1) Increase the size of the resource. BARs are aligned on their
6752 * size, so when we reallocate space for this resource, we'll
6753 * allocate it with the larger alignment. This also prevents
6754 * assignment of any other BARs inside the alignment region, so
6755 * if we're requesting page alignment, this means no other BARs
6756 * will share the page.
6757 *
6758 * The disadvantage is that this makes the resource larger than
6759 * the hardware BAR, which may break drivers that compute things
6760 * based on the resource size, e.g., to find registers at a
6761 * fixed offset before the end of the BAR.
6762 *
6763 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6764 * set r->start to the desired alignment. By itself this
6765 * doesn't prevent other BARs being put inside the alignment
6766 * region, but if we realign *every* resource of every device in
6767 * the system, none of them will share an alignment region.
6768 *
6769 * When the user has requested alignment for only some devices via
6770 * the "pci=resource_alignment" argument, "resize" is true and we
6771 * use the first method. Otherwise we assume we're aligning all
6772 * devices and we use the second.
6773 */
6774
6775 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6776 bar, r, (unsigned long long)align);
6777
6778 if (resize) {
6779 r->start = 0;
6780 r->end = align - 1;
6781 } else {
6782 r->flags &= ~IORESOURCE_SIZEALIGN;
6783 r->flags |= IORESOURCE_STARTALIGN;
6784 r->start = align;
6785 r->end = r->start + size - 1;
6786 }
6787 r->flags |= IORESOURCE_UNSET;
6788 }
6789
6790 /*
6791 * This function disables memory decoding and releases memory resources
6792 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6793 * It also rounds up size to specified alignment.
6794 * Later on, the kernel will assign page-aligned memory resource back
6795 * to the device.
6796 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6797 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6798 {
6799 int i;
6800 struct resource *r;
6801 resource_size_t align;
6802 u16 command;
6803 bool resize = false;
6804
6805 /*
6806 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6807 * 3.4.1.11. Their resources are allocated from the space
6808 * described by the VF BARx register in the PF's SR-IOV capability.
6809 * We can't influence their alignment here.
6810 */
6811 if (dev->is_virtfn)
6812 return;
6813
6814 /* check if specified PCI is target device to reassign */
6815 align = pci_specified_resource_alignment(dev, &resize);
6816 if (!align)
6817 return;
6818
6819 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6820 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6821 pci_warn(dev, "Can't reassign resources to host bridge\n");
6822 return;
6823 }
6824
6825 pci_read_config_word(dev, PCI_COMMAND, &command);
6826 command &= ~PCI_COMMAND_MEMORY;
6827 pci_write_config_word(dev, PCI_COMMAND, command);
6828
6829 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6830 pci_request_resource_alignment(dev, i, align, resize);
6831
6832 /*
6833 * Need to disable bridge's resource window,
6834 * to enable the kernel to reassign new resource
6835 * window later on.
6836 */
6837 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6838 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6839 r = &dev->resource[i];
6840 if (!(r->flags & IORESOURCE_MEM))
6841 continue;
6842 r->flags |= IORESOURCE_UNSET;
6843 r->end = resource_size(r) - 1;
6844 r->start = 0;
6845 }
6846 pci_disable_bridge_window(dev);
6847 }
6848 }
6849
resource_alignment_show(const struct bus_type * bus,char * buf)6850 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6851 {
6852 size_t count = 0;
6853
6854 spin_lock(&resource_alignment_lock);
6855 if (resource_alignment_param)
6856 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6857 spin_unlock(&resource_alignment_lock);
6858
6859 return count;
6860 }
6861
resource_alignment_store(const struct bus_type * bus,const char * buf,size_t count)6862 static ssize_t resource_alignment_store(const struct bus_type *bus,
6863 const char *buf, size_t count)
6864 {
6865 char *param, *old, *end;
6866
6867 if (count >= (PAGE_SIZE - 1))
6868 return -EINVAL;
6869
6870 param = kstrndup(buf, count, GFP_KERNEL);
6871 if (!param)
6872 return -ENOMEM;
6873
6874 end = strchr(param, '\n');
6875 if (end)
6876 *end = '\0';
6877
6878 spin_lock(&resource_alignment_lock);
6879 old = resource_alignment_param;
6880 if (strlen(param)) {
6881 resource_alignment_param = param;
6882 } else {
6883 kfree(param);
6884 resource_alignment_param = NULL;
6885 }
6886 spin_unlock(&resource_alignment_lock);
6887
6888 kfree(old);
6889
6890 return count;
6891 }
6892
6893 static BUS_ATTR_RW(resource_alignment);
6894
pci_resource_alignment_sysfs_init(void)6895 static int __init pci_resource_alignment_sysfs_init(void)
6896 {
6897 return bus_create_file(&pci_bus_type,
6898 &bus_attr_resource_alignment);
6899 }
6900 late_initcall(pci_resource_alignment_sysfs_init);
6901
pci_no_domains(void)6902 static void pci_no_domains(void)
6903 {
6904 #ifdef CONFIG_PCI_DOMAINS
6905 pci_domains_supported = 0;
6906 #endif
6907 }
6908
6909 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6910 static DEFINE_IDA(pci_domain_nr_static_ida);
6911 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6912
of_pci_reserve_static_domain_nr(void)6913 static void of_pci_reserve_static_domain_nr(void)
6914 {
6915 struct device_node *np;
6916 int domain_nr;
6917
6918 for_each_node_by_type(np, "pci") {
6919 domain_nr = of_get_pci_domain_nr(np);
6920 if (domain_nr < 0)
6921 continue;
6922 /*
6923 * Permanently allocate domain_nr in dynamic_ida
6924 * to prevent it from dynamic allocation.
6925 */
6926 ida_alloc_range(&pci_domain_nr_dynamic_ida,
6927 domain_nr, domain_nr, GFP_KERNEL);
6928 }
6929 }
6930
of_pci_bus_find_domain_nr(struct device * parent)6931 static int of_pci_bus_find_domain_nr(struct device *parent)
6932 {
6933 static bool static_domains_reserved = false;
6934 int domain_nr;
6935
6936 /* On the first call scan device tree for static allocations. */
6937 if (!static_domains_reserved) {
6938 of_pci_reserve_static_domain_nr();
6939 static_domains_reserved = true;
6940 }
6941
6942 if (parent) {
6943 /*
6944 * If domain is in DT, allocate it in static IDA. This
6945 * prevents duplicate static allocations in case of errors
6946 * in DT.
6947 */
6948 domain_nr = of_get_pci_domain_nr(parent->of_node);
6949 if (domain_nr >= 0)
6950 return ida_alloc_range(&pci_domain_nr_static_ida,
6951 domain_nr, domain_nr,
6952 GFP_KERNEL);
6953 }
6954
6955 /*
6956 * If domain was not specified in DT, choose a free ID from dynamic
6957 * allocations. All domain numbers from DT are permanently in
6958 * dynamic allocations to prevent assigning them to other DT nodes
6959 * without static domain.
6960 */
6961 return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6962 }
6963
of_pci_bus_release_domain_nr(struct pci_bus * bus,struct device * parent)6964 static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
6965 {
6966 if (bus->domain_nr < 0)
6967 return;
6968
6969 /* Release domain from IDA where it was allocated. */
6970 if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
6971 ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
6972 else
6973 ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
6974 }
6975
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6976 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6977 {
6978 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6979 acpi_pci_bus_find_domain_nr(bus);
6980 }
6981
pci_bus_release_domain_nr(struct pci_bus * bus,struct device * parent)6982 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
6983 {
6984 if (!acpi_disabled)
6985 return;
6986 of_pci_bus_release_domain_nr(bus, parent);
6987 }
6988 #endif
6989
6990 /**
6991 * pci_ext_cfg_avail - can we access extended PCI config space?
6992 *
6993 * Returns 1 if we can access PCI extended config space (offsets
6994 * greater than 0xff). This is the default implementation. Architecture
6995 * implementations can override this.
6996 */
pci_ext_cfg_avail(void)6997 int __weak pci_ext_cfg_avail(void)
6998 {
6999 return 1;
7000 }
7001
pci_fixup_cardbus(struct pci_bus * bus)7002 void __weak pci_fixup_cardbus(struct pci_bus *bus)
7003 {
7004 }
7005 EXPORT_SYMBOL(pci_fixup_cardbus);
7006
pci_setup(char * str)7007 static int __init pci_setup(char *str)
7008 {
7009 while (str) {
7010 char *k = strchr(str, ',');
7011 if (k)
7012 *k++ = 0;
7013 if (*str && (str = pcibios_setup(str)) && *str) {
7014 if (!strcmp(str, "nomsi")) {
7015 pci_no_msi();
7016 } else if (!strncmp(str, "noats", 5)) {
7017 pr_info("PCIe: ATS is disabled\n");
7018 pcie_ats_disabled = true;
7019 } else if (!strcmp(str, "noaer")) {
7020 pci_no_aer();
7021 } else if (!strcmp(str, "earlydump")) {
7022 pci_early_dump = true;
7023 } else if (!strncmp(str, "realloc=", 8)) {
7024 pci_realloc_get_opt(str + 8);
7025 } else if (!strncmp(str, "realloc", 7)) {
7026 pci_realloc_get_opt("on");
7027 } else if (!strcmp(str, "nodomains")) {
7028 pci_no_domains();
7029 } else if (!strncmp(str, "noari", 5)) {
7030 pcie_ari_disabled = true;
7031 } else if (!strncmp(str, "cbiosize=", 9)) {
7032 pci_cardbus_io_size = memparse(str + 9, &str);
7033 } else if (!strncmp(str, "cbmemsize=", 10)) {
7034 pci_cardbus_mem_size = memparse(str + 10, &str);
7035 } else if (!strncmp(str, "resource_alignment=", 19)) {
7036 resource_alignment_param = str + 19;
7037 } else if (!strncmp(str, "ecrc=", 5)) {
7038 pcie_ecrc_get_policy(str + 5);
7039 } else if (!strncmp(str, "hpiosize=", 9)) {
7040 pci_hotplug_io_size = memparse(str + 9, &str);
7041 } else if (!strncmp(str, "hpmmiosize=", 11)) {
7042 pci_hotplug_mmio_size = memparse(str + 11, &str);
7043 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
7044 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
7045 } else if (!strncmp(str, "hpmemsize=", 10)) {
7046 pci_hotplug_mmio_size = memparse(str + 10, &str);
7047 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
7048 } else if (!strncmp(str, "hpbussize=", 10)) {
7049 pci_hotplug_bus_size =
7050 simple_strtoul(str + 10, &str, 0);
7051 if (pci_hotplug_bus_size > 0xff)
7052 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
7053 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
7054 pcie_bus_config = PCIE_BUS_TUNE_OFF;
7055 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
7056 pcie_bus_config = PCIE_BUS_SAFE;
7057 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
7058 pcie_bus_config = PCIE_BUS_PERFORMANCE;
7059 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
7060 pcie_bus_config = PCIE_BUS_PEER2PEER;
7061 } else if (!strncmp(str, "pcie_scan_all", 13)) {
7062 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
7063 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
7064 disable_acs_redir_param = str + 18;
7065 } else {
7066 pr_err("PCI: Unknown option `%s'\n", str);
7067 }
7068 }
7069 str = k;
7070 }
7071 return 0;
7072 }
7073 early_param("pci", pci_setup);
7074
7075 /*
7076 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
7077 * in pci_setup(), above, to point to data in the __initdata section which
7078 * will be freed after the init sequence is complete. We can't allocate memory
7079 * in pci_setup() because some architectures do not have any memory allocation
7080 * service available during an early_param() call. So we allocate memory and
7081 * copy the variable here before the init section is freed.
7082 *
7083 */
pci_realloc_setup_params(void)7084 static int __init pci_realloc_setup_params(void)
7085 {
7086 resource_alignment_param = kstrdup(resource_alignment_param,
7087 GFP_KERNEL);
7088 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
7089
7090 return 0;
7091 }
7092 pure_initcall(pci_realloc_setup_params);
7093