xref: /openbmc/linux/drivers/pci/probe.c (revision 8730046c)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l, sz, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
513 static void pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 
520 	pci_free_resource_list(&bridge->windows);
521 
522 	kfree(bridge);
523 }
524 
525 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
526 {
527 	struct pci_host_bridge *bridge;
528 
529 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
530 	if (!bridge)
531 		return NULL;
532 
533 	INIT_LIST_HEAD(&bridge->windows);
534 
535 	return bridge;
536 }
537 EXPORT_SYMBOL(pci_alloc_host_bridge);
538 
539 static const unsigned char pcix_bus_speed[] = {
540 	PCI_SPEED_UNKNOWN,		/* 0 */
541 	PCI_SPEED_66MHz_PCIX,		/* 1 */
542 	PCI_SPEED_100MHz_PCIX,		/* 2 */
543 	PCI_SPEED_133MHz_PCIX,		/* 3 */
544 	PCI_SPEED_UNKNOWN,		/* 4 */
545 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
546 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
547 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
548 	PCI_SPEED_UNKNOWN,		/* 8 */
549 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
550 	PCI_SPEED_100MHz_PCIX_266,	/* A */
551 	PCI_SPEED_133MHz_PCIX_266,	/* B */
552 	PCI_SPEED_UNKNOWN,		/* C */
553 	PCI_SPEED_66MHz_PCIX_533,	/* D */
554 	PCI_SPEED_100MHz_PCIX_533,	/* E */
555 	PCI_SPEED_133MHz_PCIX_533	/* F */
556 };
557 
558 const unsigned char pcie_link_speed[] = {
559 	PCI_SPEED_UNKNOWN,		/* 0 */
560 	PCIE_SPEED_2_5GT,		/* 1 */
561 	PCIE_SPEED_5_0GT,		/* 2 */
562 	PCIE_SPEED_8_0GT,		/* 3 */
563 	PCI_SPEED_UNKNOWN,		/* 4 */
564 	PCI_SPEED_UNKNOWN,		/* 5 */
565 	PCI_SPEED_UNKNOWN,		/* 6 */
566 	PCI_SPEED_UNKNOWN,		/* 7 */
567 	PCI_SPEED_UNKNOWN,		/* 8 */
568 	PCI_SPEED_UNKNOWN,		/* 9 */
569 	PCI_SPEED_UNKNOWN,		/* A */
570 	PCI_SPEED_UNKNOWN,		/* B */
571 	PCI_SPEED_UNKNOWN,		/* C */
572 	PCI_SPEED_UNKNOWN,		/* D */
573 	PCI_SPEED_UNKNOWN,		/* E */
574 	PCI_SPEED_UNKNOWN		/* F */
575 };
576 
577 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
578 {
579 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
580 }
581 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
582 
583 static unsigned char agp_speeds[] = {
584 	AGP_UNKNOWN,
585 	AGP_1X,
586 	AGP_2X,
587 	AGP_4X,
588 	AGP_8X
589 };
590 
591 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
592 {
593 	int index = 0;
594 
595 	if (agpstat & 4)
596 		index = 3;
597 	else if (agpstat & 2)
598 		index = 2;
599 	else if (agpstat & 1)
600 		index = 1;
601 	else
602 		goto out;
603 
604 	if (agp3) {
605 		index += 2;
606 		if (index == 5)
607 			index = 0;
608 	}
609 
610  out:
611 	return agp_speeds[index];
612 }
613 
614 static void pci_set_bus_speed(struct pci_bus *bus)
615 {
616 	struct pci_dev *bridge = bus->self;
617 	int pos;
618 
619 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
620 	if (!pos)
621 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
622 	if (pos) {
623 		u32 agpstat, agpcmd;
624 
625 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
626 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
627 
628 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
629 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
630 	}
631 
632 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
633 	if (pos) {
634 		u16 status;
635 		enum pci_bus_speed max;
636 
637 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
638 				     &status);
639 
640 		if (status & PCI_X_SSTATUS_533MHZ) {
641 			max = PCI_SPEED_133MHz_PCIX_533;
642 		} else if (status & PCI_X_SSTATUS_266MHZ) {
643 			max = PCI_SPEED_133MHz_PCIX_266;
644 		} else if (status & PCI_X_SSTATUS_133MHZ) {
645 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
646 				max = PCI_SPEED_133MHz_PCIX_ECC;
647 			else
648 				max = PCI_SPEED_133MHz_PCIX;
649 		} else {
650 			max = PCI_SPEED_66MHz_PCIX;
651 		}
652 
653 		bus->max_bus_speed = max;
654 		bus->cur_bus_speed = pcix_bus_speed[
655 			(status & PCI_X_SSTATUS_FREQ) >> 6];
656 
657 		return;
658 	}
659 
660 	if (pci_is_pcie(bridge)) {
661 		u32 linkcap;
662 		u16 linksta;
663 
664 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
665 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
666 
667 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
668 		pcie_update_link_speed(bus, linksta);
669 	}
670 }
671 
672 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
673 {
674 	struct irq_domain *d;
675 
676 	/*
677 	 * Any firmware interface that can resolve the msi_domain
678 	 * should be called from here.
679 	 */
680 	d = pci_host_bridge_of_msi_domain(bus);
681 	if (!d)
682 		d = pci_host_bridge_acpi_msi_domain(bus);
683 
684 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
685 	/*
686 	 * If no IRQ domain was found via the OF tree, try looking it up
687 	 * directly through the fwnode_handle.
688 	 */
689 	if (!d) {
690 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
691 
692 		if (fwnode)
693 			d = irq_find_matching_fwnode(fwnode,
694 						     DOMAIN_BUS_PCI_MSI);
695 	}
696 #endif
697 
698 	return d;
699 }
700 
701 static void pci_set_bus_msi_domain(struct pci_bus *bus)
702 {
703 	struct irq_domain *d;
704 	struct pci_bus *b;
705 
706 	/*
707 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
708 	 * created by an SR-IOV device.  Walk up to the first bridge device
709 	 * found or derive the domain from the host bridge.
710 	 */
711 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
712 		if (b->self)
713 			d = dev_get_msi_domain(&b->self->dev);
714 	}
715 
716 	if (!d)
717 		d = pci_host_bridge_msi_domain(b);
718 
719 	dev_set_msi_domain(&bus->dev, d);
720 }
721 
722 int pci_register_host_bridge(struct pci_host_bridge *bridge)
723 {
724 	struct device *parent = bridge->dev.parent;
725 	struct resource_entry *window, *n;
726 	struct pci_bus *bus, *b;
727 	resource_size_t offset;
728 	LIST_HEAD(resources);
729 	struct resource *res;
730 	char addr[64], *fmt;
731 	const char *name;
732 	int err;
733 
734 	bus = pci_alloc_bus(NULL);
735 	if (!bus)
736 		return -ENOMEM;
737 
738 	bridge->bus = bus;
739 
740 	/* temporarily move resources off the list */
741 	list_splice_init(&bridge->windows, &resources);
742 	bus->sysdata = bridge->sysdata;
743 	bus->msi = bridge->msi;
744 	bus->ops = bridge->ops;
745 	bus->number = bus->busn_res.start = bridge->busnr;
746 #ifdef CONFIG_PCI_DOMAINS_GENERIC
747 	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
748 #endif
749 
750 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
751 	if (b) {
752 		/* If we already got to this bus through a different bridge, ignore it */
753 		dev_dbg(&b->dev, "bus already known\n");
754 		err = -EEXIST;
755 		goto free;
756 	}
757 
758 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
759 		     bridge->busnr);
760 
761 	err = pcibios_root_bridge_prepare(bridge);
762 	if (err)
763 		goto free;
764 
765 	err = device_register(&bridge->dev);
766 	if (err)
767 		put_device(&bridge->dev);
768 
769 	bus->bridge = get_device(&bridge->dev);
770 	device_enable_async_suspend(bus->bridge);
771 	pci_set_bus_of_node(bus);
772 	pci_set_bus_msi_domain(bus);
773 
774 	if (!parent)
775 		set_dev_node(bus->bridge, pcibus_to_node(bus));
776 
777 	bus->dev.class = &pcibus_class;
778 	bus->dev.parent = bus->bridge;
779 
780 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
781 	name = dev_name(&bus->dev);
782 
783 	err = device_register(&bus->dev);
784 	if (err)
785 		goto unregister;
786 
787 	pcibios_add_bus(bus);
788 
789 	/* Create legacy_io and legacy_mem files for this bus */
790 	pci_create_legacy_files(bus);
791 
792 	if (parent)
793 		dev_info(parent, "PCI host bridge to bus %s\n", name);
794 	else
795 		pr_info("PCI host bridge to bus %s\n", name);
796 
797 	/* Add initial resources to the bus */
798 	resource_list_for_each_entry_safe(window, n, &resources) {
799 		list_move_tail(&window->node, &bridge->windows);
800 		offset = window->offset;
801 		res = window->res;
802 
803 		if (res->flags & IORESOURCE_BUS)
804 			pci_bus_insert_busn_res(bus, bus->number, res->end);
805 		else
806 			pci_bus_add_resource(bus, res, 0);
807 
808 		if (offset) {
809 			if (resource_type(res) == IORESOURCE_IO)
810 				fmt = " (bus address [%#06llx-%#06llx])";
811 			else
812 				fmt = " (bus address [%#010llx-%#010llx])";
813 
814 			snprintf(addr, sizeof(addr), fmt,
815 				 (unsigned long long)(res->start - offset),
816 				 (unsigned long long)(res->end - offset));
817 		} else
818 			addr[0] = '\0';
819 
820 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
821 	}
822 
823 	down_write(&pci_bus_sem);
824 	list_add_tail(&bus->node, &pci_root_buses);
825 	up_write(&pci_bus_sem);
826 
827 	return 0;
828 
829 unregister:
830 	put_device(&bridge->dev);
831 	device_unregister(&bridge->dev);
832 
833 free:
834 	kfree(bus);
835 	return err;
836 }
837 EXPORT_SYMBOL(pci_register_host_bridge);
838 
839 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
840 					   struct pci_dev *bridge, int busnr)
841 {
842 	struct pci_bus *child;
843 	int i;
844 	int ret;
845 
846 	/*
847 	 * Allocate a new bus, and inherit stuff from the parent..
848 	 */
849 	child = pci_alloc_bus(parent);
850 	if (!child)
851 		return NULL;
852 
853 	child->parent = parent;
854 	child->ops = parent->ops;
855 	child->msi = parent->msi;
856 	child->sysdata = parent->sysdata;
857 	child->bus_flags = parent->bus_flags;
858 
859 	/* initialize some portions of the bus device, but don't register it
860 	 * now as the parent is not properly set up yet.
861 	 */
862 	child->dev.class = &pcibus_class;
863 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
864 
865 	/*
866 	 * Set up the primary, secondary and subordinate
867 	 * bus numbers.
868 	 */
869 	child->number = child->busn_res.start = busnr;
870 	child->primary = parent->busn_res.start;
871 	child->busn_res.end = 0xff;
872 
873 	if (!bridge) {
874 		child->dev.parent = parent->bridge;
875 		goto add_dev;
876 	}
877 
878 	child->self = bridge;
879 	child->bridge = get_device(&bridge->dev);
880 	child->dev.parent = child->bridge;
881 	pci_set_bus_of_node(child);
882 	pci_set_bus_speed(child);
883 
884 	/* Set up default resource pointers and names.. */
885 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
886 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
887 		child->resource[i]->name = child->name;
888 	}
889 	bridge->subordinate = child;
890 
891 add_dev:
892 	pci_set_bus_msi_domain(child);
893 	ret = device_register(&child->dev);
894 	WARN_ON(ret < 0);
895 
896 	pcibios_add_bus(child);
897 
898 	if (child->ops->add_bus) {
899 		ret = child->ops->add_bus(child);
900 		if (WARN_ON(ret < 0))
901 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
902 	}
903 
904 	/* Create legacy_io and legacy_mem files for this bus */
905 	pci_create_legacy_files(child);
906 
907 	return child;
908 }
909 
910 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
911 				int busnr)
912 {
913 	struct pci_bus *child;
914 
915 	child = pci_alloc_child_bus(parent, dev, busnr);
916 	if (child) {
917 		down_write(&pci_bus_sem);
918 		list_add_tail(&child->node, &parent->children);
919 		up_write(&pci_bus_sem);
920 	}
921 	return child;
922 }
923 EXPORT_SYMBOL(pci_add_new_bus);
924 
925 static void pci_enable_crs(struct pci_dev *pdev)
926 {
927 	u16 root_cap = 0;
928 
929 	/* Enable CRS Software Visibility if supported */
930 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
931 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
932 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
933 					 PCI_EXP_RTCTL_CRSSVE);
934 }
935 
936 /*
937  * If it's a bridge, configure it and scan the bus behind it.
938  * For CardBus bridges, we don't scan behind as the devices will
939  * be handled by the bridge driver itself.
940  *
941  * We need to process bridges in two passes -- first we scan those
942  * already configured by the BIOS and after we are done with all of
943  * them, we proceed to assigning numbers to the remaining buses in
944  * order to avoid overlaps between old and new bus numbers.
945  */
946 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
947 {
948 	struct pci_bus *child;
949 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
950 	u32 buses, i, j = 0;
951 	u16 bctl;
952 	u8 primary, secondary, subordinate;
953 	int broken = 0;
954 
955 	/*
956 	 * Make sure the bridge is powered on to be able to access config
957 	 * space of devices below it.
958 	 */
959 	pm_runtime_get_sync(&dev->dev);
960 
961 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
962 	primary = buses & 0xFF;
963 	secondary = (buses >> 8) & 0xFF;
964 	subordinate = (buses >> 16) & 0xFF;
965 
966 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
967 		secondary, subordinate, pass);
968 
969 	if (!primary && (primary != bus->number) && secondary && subordinate) {
970 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
971 		primary = bus->number;
972 	}
973 
974 	/* Check if setup is sensible at all */
975 	if (!pass &&
976 	    (primary != bus->number || secondary <= bus->number ||
977 	     secondary > subordinate)) {
978 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
979 			 secondary, subordinate);
980 		broken = 1;
981 	}
982 
983 	/* Disable MasterAbortMode during probing to avoid reporting
984 	   of bus errors (in some architectures) */
985 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
986 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
987 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
988 
989 	pci_enable_crs(dev);
990 
991 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
992 	    !is_cardbus && !broken) {
993 		unsigned int cmax;
994 		/*
995 		 * Bus already configured by firmware, process it in the first
996 		 * pass and just note the configuration.
997 		 */
998 		if (pass)
999 			goto out;
1000 
1001 		/*
1002 		 * The bus might already exist for two reasons: Either we are
1003 		 * rescanning the bus or the bus is reachable through more than
1004 		 * one bridge. The second case can happen with the i450NX
1005 		 * chipset.
1006 		 */
1007 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1008 		if (!child) {
1009 			child = pci_add_new_bus(bus, dev, secondary);
1010 			if (!child)
1011 				goto out;
1012 			child->primary = primary;
1013 			pci_bus_insert_busn_res(child, secondary, subordinate);
1014 			child->bridge_ctl = bctl;
1015 		}
1016 
1017 		cmax = pci_scan_child_bus(child);
1018 		if (cmax > subordinate)
1019 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1020 				 subordinate, cmax);
1021 		/* subordinate should equal child->busn_res.end */
1022 		if (subordinate > max)
1023 			max = subordinate;
1024 	} else {
1025 		/*
1026 		 * We need to assign a number to this bus which we always
1027 		 * do in the second pass.
1028 		 */
1029 		if (!pass) {
1030 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1031 				/* Temporarily disable forwarding of the
1032 				   configuration cycles on all bridges in
1033 				   this bus segment to avoid possible
1034 				   conflicts in the second pass between two
1035 				   bridges programmed with overlapping
1036 				   bus ranges. */
1037 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1038 						       buses & ~0xffffff);
1039 			goto out;
1040 		}
1041 
1042 		/* Clear errors */
1043 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1044 
1045 		/* Prevent assigning a bus number that already exists.
1046 		 * This can happen when a bridge is hot-plugged, so in
1047 		 * this case we only re-scan this bus. */
1048 		child = pci_find_bus(pci_domain_nr(bus), max+1);
1049 		if (!child) {
1050 			child = pci_add_new_bus(bus, dev, max+1);
1051 			if (!child)
1052 				goto out;
1053 			pci_bus_insert_busn_res(child, max+1, 0xff);
1054 		}
1055 		max++;
1056 		buses = (buses & 0xff000000)
1057 		      | ((unsigned int)(child->primary)     <<  0)
1058 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1059 		      | ((unsigned int)(child->busn_res.end) << 16);
1060 
1061 		/*
1062 		 * yenta.c forces a secondary latency timer of 176.
1063 		 * Copy that behaviour here.
1064 		 */
1065 		if (is_cardbus) {
1066 			buses &= ~0xff000000;
1067 			buses |= CARDBUS_LATENCY_TIMER << 24;
1068 		}
1069 
1070 		/*
1071 		 * We need to blast all three values with a single write.
1072 		 */
1073 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1074 
1075 		if (!is_cardbus) {
1076 			child->bridge_ctl = bctl;
1077 			max = pci_scan_child_bus(child);
1078 		} else {
1079 			/*
1080 			 * For CardBus bridges, we leave 4 bus numbers
1081 			 * as cards with a PCI-to-PCI bridge can be
1082 			 * inserted later.
1083 			 */
1084 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1085 				struct pci_bus *parent = bus;
1086 				if (pci_find_bus(pci_domain_nr(bus),
1087 							max+i+1))
1088 					break;
1089 				while (parent->parent) {
1090 					if ((!pcibios_assign_all_busses()) &&
1091 					    (parent->busn_res.end > max) &&
1092 					    (parent->busn_res.end <= max+i)) {
1093 						j = 1;
1094 					}
1095 					parent = parent->parent;
1096 				}
1097 				if (j) {
1098 					/*
1099 					 * Often, there are two cardbus bridges
1100 					 * -- try to leave one valid bus number
1101 					 * for each one.
1102 					 */
1103 					i /= 2;
1104 					break;
1105 				}
1106 			}
1107 			max += i;
1108 		}
1109 		/*
1110 		 * Set the subordinate bus number to its real value.
1111 		 */
1112 		pci_bus_update_busn_res_end(child, max);
1113 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1114 	}
1115 
1116 	sprintf(child->name,
1117 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1118 		pci_domain_nr(bus), child->number);
1119 
1120 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1121 	while (bus->parent) {
1122 		if ((child->busn_res.end > bus->busn_res.end) ||
1123 		    (child->number > bus->busn_res.end) ||
1124 		    (child->number < bus->number) ||
1125 		    (child->busn_res.end < bus->number)) {
1126 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1127 				&child->busn_res,
1128 				(bus->number > child->busn_res.end &&
1129 				 bus->busn_res.end < child->number) ?
1130 					"wholly" : "partially",
1131 				bus->self->transparent ? " transparent" : "",
1132 				dev_name(&bus->dev),
1133 				&bus->busn_res);
1134 		}
1135 		bus = bus->parent;
1136 	}
1137 
1138 out:
1139 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1140 
1141 	pm_runtime_put(&dev->dev);
1142 
1143 	return max;
1144 }
1145 EXPORT_SYMBOL(pci_scan_bridge);
1146 
1147 /*
1148  * Read interrupt line and base address registers.
1149  * The architecture-dependent code can tweak these, of course.
1150  */
1151 static void pci_read_irq(struct pci_dev *dev)
1152 {
1153 	unsigned char irq;
1154 
1155 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1156 	dev->pin = irq;
1157 	if (irq)
1158 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1159 	dev->irq = irq;
1160 }
1161 
1162 void set_pcie_port_type(struct pci_dev *pdev)
1163 {
1164 	int pos;
1165 	u16 reg16;
1166 	int type;
1167 	struct pci_dev *parent;
1168 
1169 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1170 	if (!pos)
1171 		return;
1172 	pdev->pcie_cap = pos;
1173 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1174 	pdev->pcie_flags_reg = reg16;
1175 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1176 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1177 
1178 	/*
1179 	 * A Root Port is always the upstream end of a Link.  No PCIe
1180 	 * component has two Links.  Two Links are connected by a Switch
1181 	 * that has a Port on each Link and internal logic to connect the
1182 	 * two Ports.
1183 	 */
1184 	type = pci_pcie_type(pdev);
1185 	if (type == PCI_EXP_TYPE_ROOT_PORT)
1186 		pdev->has_secondary_link = 1;
1187 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1188 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1189 		parent = pci_upstream_bridge(pdev);
1190 
1191 		/*
1192 		 * Usually there's an upstream device (Root Port or Switch
1193 		 * Downstream Port), but we can't assume one exists.
1194 		 */
1195 		if (parent && !parent->has_secondary_link)
1196 			pdev->has_secondary_link = 1;
1197 	}
1198 }
1199 
1200 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1201 {
1202 	u32 reg32;
1203 
1204 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1205 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1206 		pdev->is_hotplug_bridge = 1;
1207 }
1208 
1209 /**
1210  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1211  * @dev: PCI device
1212  *
1213  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1214  * when forwarding a type1 configuration request the bridge must check that
1215  * the extended register address field is zero.  The bridge is not permitted
1216  * to forward the transactions and must handle it as an Unsupported Request.
1217  * Some bridges do not follow this rule and simply drop the extended register
1218  * bits, resulting in the standard config space being aliased, every 256
1219  * bytes across the entire configuration space.  Test for this condition by
1220  * comparing the first dword of each potential alias to the vendor/device ID.
1221  * Known offenders:
1222  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1223  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1224  */
1225 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1226 {
1227 #ifdef CONFIG_PCI_QUIRKS
1228 	int pos;
1229 	u32 header, tmp;
1230 
1231 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1232 
1233 	for (pos = PCI_CFG_SPACE_SIZE;
1234 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1235 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1236 		    || header != tmp)
1237 			return false;
1238 	}
1239 
1240 	return true;
1241 #else
1242 	return false;
1243 #endif
1244 }
1245 
1246 /**
1247  * pci_cfg_space_size - get the configuration space size of the PCI device.
1248  * @dev: PCI device
1249  *
1250  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1251  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1252  * access it.  Maybe we don't have a way to generate extended config space
1253  * accesses, or the device is behind a reverse Express bridge.  So we try
1254  * reading the dword at 0x100 which must either be 0 or a valid extended
1255  * capability header.
1256  */
1257 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1258 {
1259 	u32 status;
1260 	int pos = PCI_CFG_SPACE_SIZE;
1261 
1262 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1263 		return PCI_CFG_SPACE_SIZE;
1264 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1265 		return PCI_CFG_SPACE_SIZE;
1266 
1267 	return PCI_CFG_SPACE_EXP_SIZE;
1268 }
1269 
1270 int pci_cfg_space_size(struct pci_dev *dev)
1271 {
1272 	int pos;
1273 	u32 status;
1274 	u16 class;
1275 
1276 	class = dev->class >> 8;
1277 	if (class == PCI_CLASS_BRIDGE_HOST)
1278 		return pci_cfg_space_size_ext(dev);
1279 
1280 	if (pci_is_pcie(dev))
1281 		return pci_cfg_space_size_ext(dev);
1282 
1283 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1284 	if (!pos)
1285 		return PCI_CFG_SPACE_SIZE;
1286 
1287 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1288 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1289 		return pci_cfg_space_size_ext(dev);
1290 
1291 	return PCI_CFG_SPACE_SIZE;
1292 }
1293 
1294 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1295 
1296 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1297 {
1298 	/*
1299 	 * Disable the MSI hardware to avoid screaming interrupts
1300 	 * during boot.  This is the power on reset default so
1301 	 * usually this should be a noop.
1302 	 */
1303 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1304 	if (dev->msi_cap)
1305 		pci_msi_set_enable(dev, 0);
1306 
1307 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1308 	if (dev->msix_cap)
1309 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1310 }
1311 
1312 /**
1313  * pci_setup_device - fill in class and map information of a device
1314  * @dev: the device structure to fill
1315  *
1316  * Initialize the device structure with information about the device's
1317  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1318  * Called at initialisation of the PCI subsystem and by CardBus services.
1319  * Returns 0 on success and negative if unknown type of device (not normal,
1320  * bridge or CardBus).
1321  */
1322 int pci_setup_device(struct pci_dev *dev)
1323 {
1324 	u32 class;
1325 	u16 cmd;
1326 	u8 hdr_type;
1327 	int pos = 0;
1328 	struct pci_bus_region region;
1329 	struct resource *res;
1330 
1331 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1332 		return -EIO;
1333 
1334 	dev->sysdata = dev->bus->sysdata;
1335 	dev->dev.parent = dev->bus->bridge;
1336 	dev->dev.bus = &pci_bus_type;
1337 	dev->hdr_type = hdr_type & 0x7f;
1338 	dev->multifunction = !!(hdr_type & 0x80);
1339 	dev->error_state = pci_channel_io_normal;
1340 	set_pcie_port_type(dev);
1341 
1342 	pci_dev_assign_slot(dev);
1343 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1344 	   set this higher, assuming the system even supports it.  */
1345 	dev->dma_mask = 0xffffffff;
1346 
1347 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1348 		     dev->bus->number, PCI_SLOT(dev->devfn),
1349 		     PCI_FUNC(dev->devfn));
1350 
1351 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1352 	dev->revision = class & 0xff;
1353 	dev->class = class >> 8;		    /* upper 3 bytes */
1354 
1355 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1356 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1357 
1358 	/* need to have dev->class ready */
1359 	dev->cfg_size = pci_cfg_space_size(dev);
1360 
1361 	/* "Unknown power state" */
1362 	dev->current_state = PCI_UNKNOWN;
1363 
1364 	/* Early fixups, before probing the BARs */
1365 	pci_fixup_device(pci_fixup_early, dev);
1366 	/* device class may be changed after fixup */
1367 	class = dev->class >> 8;
1368 
1369 	if (dev->non_compliant_bars) {
1370 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1371 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1372 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1373 			cmd &= ~PCI_COMMAND_IO;
1374 			cmd &= ~PCI_COMMAND_MEMORY;
1375 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1376 		}
1377 	}
1378 
1379 	switch (dev->hdr_type) {		    /* header type */
1380 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1381 		if (class == PCI_CLASS_BRIDGE_PCI)
1382 			goto bad;
1383 		pci_read_irq(dev);
1384 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1385 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1386 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1387 
1388 		/*
1389 		 * Do the ugly legacy mode stuff here rather than broken chip
1390 		 * quirk code. Legacy mode ATA controllers have fixed
1391 		 * addresses. These are not always echoed in BAR0-3, and
1392 		 * BAR0-3 in a few cases contain junk!
1393 		 */
1394 		if (class == PCI_CLASS_STORAGE_IDE) {
1395 			u8 progif;
1396 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1397 			if ((progif & 1) == 0) {
1398 				region.start = 0x1F0;
1399 				region.end = 0x1F7;
1400 				res = &dev->resource[0];
1401 				res->flags = LEGACY_IO_RESOURCE;
1402 				pcibios_bus_to_resource(dev->bus, res, &region);
1403 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1404 					 res);
1405 				region.start = 0x3F6;
1406 				region.end = 0x3F6;
1407 				res = &dev->resource[1];
1408 				res->flags = LEGACY_IO_RESOURCE;
1409 				pcibios_bus_to_resource(dev->bus, res, &region);
1410 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1411 					 res);
1412 			}
1413 			if ((progif & 4) == 0) {
1414 				region.start = 0x170;
1415 				region.end = 0x177;
1416 				res = &dev->resource[2];
1417 				res->flags = LEGACY_IO_RESOURCE;
1418 				pcibios_bus_to_resource(dev->bus, res, &region);
1419 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1420 					 res);
1421 				region.start = 0x376;
1422 				region.end = 0x376;
1423 				res = &dev->resource[3];
1424 				res->flags = LEGACY_IO_RESOURCE;
1425 				pcibios_bus_to_resource(dev->bus, res, &region);
1426 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1427 					 res);
1428 			}
1429 		}
1430 		break;
1431 
1432 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1433 		if (class != PCI_CLASS_BRIDGE_PCI)
1434 			goto bad;
1435 		/* The PCI-to-PCI bridge spec requires that subtractive
1436 		   decoding (i.e. transparent) bridge must have programming
1437 		   interface code of 0x01. */
1438 		pci_read_irq(dev);
1439 		dev->transparent = ((dev->class & 0xff) == 1);
1440 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1441 		set_pcie_hotplug_bridge(dev);
1442 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1443 		if (pos) {
1444 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1445 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1446 		}
1447 		break;
1448 
1449 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1450 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1451 			goto bad;
1452 		pci_read_irq(dev);
1453 		pci_read_bases(dev, 1, 0);
1454 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1455 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1456 		break;
1457 
1458 	default:				    /* unknown header */
1459 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1460 			dev->hdr_type);
1461 		return -EIO;
1462 
1463 	bad:
1464 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1465 			dev->class, dev->hdr_type);
1466 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1467 	}
1468 
1469 	/* We found a fine healthy device, go go go... */
1470 	return 0;
1471 }
1472 
1473 static void pci_configure_mps(struct pci_dev *dev)
1474 {
1475 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1476 	int mps, p_mps, rc;
1477 
1478 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1479 		return;
1480 
1481 	mps = pcie_get_mps(dev);
1482 	p_mps = pcie_get_mps(bridge);
1483 
1484 	if (mps == p_mps)
1485 		return;
1486 
1487 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1488 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1489 			 mps, pci_name(bridge), p_mps);
1490 		return;
1491 	}
1492 
1493 	/*
1494 	 * Fancier MPS configuration is done later by
1495 	 * pcie_bus_configure_settings()
1496 	 */
1497 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1498 		return;
1499 
1500 	rc = pcie_set_mps(dev, p_mps);
1501 	if (rc) {
1502 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1503 			 p_mps);
1504 		return;
1505 	}
1506 
1507 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1508 		 p_mps, mps, 128 << dev->pcie_mpss);
1509 }
1510 
1511 static struct hpp_type0 pci_default_type0 = {
1512 	.revision = 1,
1513 	.cache_line_size = 8,
1514 	.latency_timer = 0x40,
1515 	.enable_serr = 0,
1516 	.enable_perr = 0,
1517 };
1518 
1519 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1520 {
1521 	u16 pci_cmd, pci_bctl;
1522 
1523 	if (!hpp)
1524 		hpp = &pci_default_type0;
1525 
1526 	if (hpp->revision > 1) {
1527 		dev_warn(&dev->dev,
1528 			 "PCI settings rev %d not supported; using defaults\n",
1529 			 hpp->revision);
1530 		hpp = &pci_default_type0;
1531 	}
1532 
1533 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1534 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1535 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1536 	if (hpp->enable_serr)
1537 		pci_cmd |= PCI_COMMAND_SERR;
1538 	if (hpp->enable_perr)
1539 		pci_cmd |= PCI_COMMAND_PARITY;
1540 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1541 
1542 	/* Program bridge control value */
1543 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1544 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1545 				      hpp->latency_timer);
1546 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1547 		if (hpp->enable_serr)
1548 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1549 		if (hpp->enable_perr)
1550 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1551 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1552 	}
1553 }
1554 
1555 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1556 {
1557 	if (hpp)
1558 		dev_warn(&dev->dev, "PCI-X settings not supported\n");
1559 }
1560 
1561 static bool pcie_root_rcb_set(struct pci_dev *dev)
1562 {
1563 	struct pci_dev *rp = pcie_find_root_port(dev);
1564 	u16 lnkctl;
1565 
1566 	if (!rp)
1567 		return false;
1568 
1569 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1570 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1571 		return true;
1572 
1573 	return false;
1574 }
1575 
1576 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1577 {
1578 	int pos;
1579 	u32 reg32;
1580 
1581 	if (!hpp)
1582 		return;
1583 
1584 	if (hpp->revision > 1) {
1585 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1586 			 hpp->revision);
1587 		return;
1588 	}
1589 
1590 	/*
1591 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1592 	 * those to make sure they're consistent with the rest of the
1593 	 * platform.
1594 	 */
1595 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1596 				    PCI_EXP_DEVCTL_READRQ;
1597 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1598 				    PCI_EXP_DEVCTL_READRQ);
1599 
1600 	/* Initialize Device Control Register */
1601 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1602 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1603 
1604 	/* Initialize Link Control Register */
1605 	if (pcie_cap_has_lnkctl(dev)) {
1606 
1607 		/*
1608 		 * If the Root Port supports Read Completion Boundary of
1609 		 * 128, set RCB to 128.  Otherwise, clear it.
1610 		 */
1611 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1612 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1613 		if (pcie_root_rcb_set(dev))
1614 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1615 
1616 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1617 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1618 	}
1619 
1620 	/* Find Advanced Error Reporting Enhanced Capability */
1621 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1622 	if (!pos)
1623 		return;
1624 
1625 	/* Initialize Uncorrectable Error Mask Register */
1626 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1627 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1628 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1629 
1630 	/* Initialize Uncorrectable Error Severity Register */
1631 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1632 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1633 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1634 
1635 	/* Initialize Correctable Error Mask Register */
1636 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1637 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1638 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1639 
1640 	/* Initialize Advanced Error Capabilities and Control Register */
1641 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1642 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1643 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1644 
1645 	/*
1646 	 * FIXME: The following two registers are not supported yet.
1647 	 *
1648 	 *   o Secondary Uncorrectable Error Severity Register
1649 	 *   o Secondary Uncorrectable Error Mask Register
1650 	 */
1651 }
1652 
1653 static void pci_configure_device(struct pci_dev *dev)
1654 {
1655 	struct hotplug_params hpp;
1656 	int ret;
1657 
1658 	pci_configure_mps(dev);
1659 
1660 	memset(&hpp, 0, sizeof(hpp));
1661 	ret = pci_get_hp_params(dev, &hpp);
1662 	if (ret)
1663 		return;
1664 
1665 	program_hpp_type2(dev, hpp.t2);
1666 	program_hpp_type1(dev, hpp.t1);
1667 	program_hpp_type0(dev, hpp.t0);
1668 }
1669 
1670 static void pci_release_capabilities(struct pci_dev *dev)
1671 {
1672 	pci_vpd_release(dev);
1673 	pci_iov_release(dev);
1674 	pci_free_cap_save_buffers(dev);
1675 }
1676 
1677 /**
1678  * pci_release_dev - free a pci device structure when all users of it are finished.
1679  * @dev: device that's been disconnected
1680  *
1681  * Will be called only by the device core when all users of this pci device are
1682  * done.
1683  */
1684 static void pci_release_dev(struct device *dev)
1685 {
1686 	struct pci_dev *pci_dev;
1687 
1688 	pci_dev = to_pci_dev(dev);
1689 	pci_release_capabilities(pci_dev);
1690 	pci_release_of_node(pci_dev);
1691 	pcibios_release_device(pci_dev);
1692 	pci_bus_put(pci_dev->bus);
1693 	kfree(pci_dev->driver_override);
1694 	kfree(pci_dev->dma_alias_mask);
1695 	kfree(pci_dev);
1696 }
1697 
1698 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1699 {
1700 	struct pci_dev *dev;
1701 
1702 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1703 	if (!dev)
1704 		return NULL;
1705 
1706 	INIT_LIST_HEAD(&dev->bus_list);
1707 	dev->dev.type = &pci_dev_type;
1708 	dev->bus = pci_bus_get(bus);
1709 
1710 	return dev;
1711 }
1712 EXPORT_SYMBOL(pci_alloc_dev);
1713 
1714 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1715 				int crs_timeout)
1716 {
1717 	int delay = 1;
1718 
1719 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1720 		return false;
1721 
1722 	/* some broken boards return 0 or ~0 if a slot is empty: */
1723 	if (*l == 0xffffffff || *l == 0x00000000 ||
1724 	    *l == 0x0000ffff || *l == 0xffff0000)
1725 		return false;
1726 
1727 	/*
1728 	 * Configuration Request Retry Status.  Some root ports return the
1729 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1730 	 * by the PCIe spec.  Ignore the device ID and only check for
1731 	 * (vendor id == 1).
1732 	 */
1733 	while ((*l & 0xffff) == 0x0001) {
1734 		if (!crs_timeout)
1735 			return false;
1736 
1737 		msleep(delay);
1738 		delay *= 2;
1739 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1740 			return false;
1741 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1742 		if (delay > crs_timeout) {
1743 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1744 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1745 			       PCI_FUNC(devfn));
1746 			return false;
1747 		}
1748 	}
1749 
1750 	return true;
1751 }
1752 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1753 
1754 /*
1755  * Read the config data for a PCI device, sanity-check it
1756  * and fill in the dev structure...
1757  */
1758 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1759 {
1760 	struct pci_dev *dev;
1761 	u32 l;
1762 
1763 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1764 		return NULL;
1765 
1766 	dev = pci_alloc_dev(bus);
1767 	if (!dev)
1768 		return NULL;
1769 
1770 	dev->devfn = devfn;
1771 	dev->vendor = l & 0xffff;
1772 	dev->device = (l >> 16) & 0xffff;
1773 
1774 	pci_set_of_node(dev);
1775 
1776 	if (pci_setup_device(dev)) {
1777 		pci_bus_put(dev->bus);
1778 		kfree(dev);
1779 		return NULL;
1780 	}
1781 
1782 	return dev;
1783 }
1784 
1785 static void pci_init_capabilities(struct pci_dev *dev)
1786 {
1787 	/* Enhanced Allocation */
1788 	pci_ea_init(dev);
1789 
1790 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
1791 	pci_msi_setup_pci_dev(dev);
1792 
1793 	/* Buffers for saving PCIe and PCI-X capabilities */
1794 	pci_allocate_cap_save_buffers(dev);
1795 
1796 	/* Power Management */
1797 	pci_pm_init(dev);
1798 
1799 	/* Vital Product Data */
1800 	pci_vpd_init(dev);
1801 
1802 	/* Alternative Routing-ID Forwarding */
1803 	pci_configure_ari(dev);
1804 
1805 	/* Single Root I/O Virtualization */
1806 	pci_iov_init(dev);
1807 
1808 	/* Address Translation Services */
1809 	pci_ats_init(dev);
1810 
1811 	/* Enable ACS P2P upstream forwarding */
1812 	pci_enable_acs(dev);
1813 
1814 	/* Precision Time Measurement */
1815 	pci_ptm_init(dev);
1816 
1817 	/* Advanced Error Reporting */
1818 	pci_aer_init(dev);
1819 }
1820 
1821 /*
1822  * This is the equivalent of pci_host_bridge_msi_domain that acts on
1823  * devices. Firmware interfaces that can select the MSI domain on a
1824  * per-device basis should be called from here.
1825  */
1826 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1827 {
1828 	struct irq_domain *d;
1829 
1830 	/*
1831 	 * If a domain has been set through the pcibios_add_device
1832 	 * callback, then this is the one (platform code knows best).
1833 	 */
1834 	d = dev_get_msi_domain(&dev->dev);
1835 	if (d)
1836 		return d;
1837 
1838 	/*
1839 	 * Let's see if we have a firmware interface able to provide
1840 	 * the domain.
1841 	 */
1842 	d = pci_msi_get_device_domain(dev);
1843 	if (d)
1844 		return d;
1845 
1846 	return NULL;
1847 }
1848 
1849 static void pci_set_msi_domain(struct pci_dev *dev)
1850 {
1851 	struct irq_domain *d;
1852 
1853 	/*
1854 	 * If the platform or firmware interfaces cannot supply a
1855 	 * device-specific MSI domain, then inherit the default domain
1856 	 * from the host bridge itself.
1857 	 */
1858 	d = pci_dev_msi_domain(dev);
1859 	if (!d)
1860 		d = dev_get_msi_domain(&dev->bus->dev);
1861 
1862 	dev_set_msi_domain(&dev->dev, d);
1863 }
1864 
1865 /**
1866  * pci_dma_configure - Setup DMA configuration
1867  * @dev: ptr to pci_dev struct of the PCI device
1868  *
1869  * Function to update PCI devices's DMA configuration using the same
1870  * info from the OF node or ACPI node of host bridge's parent (if any).
1871  */
1872 static void pci_dma_configure(struct pci_dev *dev)
1873 {
1874 	struct device *bridge = pci_get_host_bridge_device(dev);
1875 
1876 	if (IS_ENABLED(CONFIG_OF) &&
1877 		bridge->parent && bridge->parent->of_node) {
1878 			of_dma_configure(&dev->dev, bridge->parent->of_node);
1879 	} else if (has_acpi_companion(bridge)) {
1880 		struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
1881 		enum dev_dma_attr attr = acpi_get_dma_attr(adev);
1882 
1883 		if (attr == DEV_DMA_NOT_SUPPORTED)
1884 			dev_warn(&dev->dev, "DMA not supported.\n");
1885 		else
1886 			acpi_dma_configure(&dev->dev, attr);
1887 	}
1888 
1889 	pci_put_host_bridge_device(bridge);
1890 }
1891 
1892 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1893 {
1894 	int ret;
1895 
1896 	pci_configure_device(dev);
1897 
1898 	device_initialize(&dev->dev);
1899 	dev->dev.release = pci_release_dev;
1900 
1901 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1902 	dev->dev.dma_mask = &dev->dma_mask;
1903 	dev->dev.dma_parms = &dev->dma_parms;
1904 	dev->dev.coherent_dma_mask = 0xffffffffull;
1905 	pci_dma_configure(dev);
1906 
1907 	pci_set_dma_max_seg_size(dev, 65536);
1908 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1909 
1910 	/* Fix up broken headers */
1911 	pci_fixup_device(pci_fixup_header, dev);
1912 
1913 	/* moved out from quirk header fixup code */
1914 	pci_reassigndev_resource_alignment(dev);
1915 
1916 	/* Clear the state_saved flag. */
1917 	dev->state_saved = false;
1918 
1919 	/* Initialize various capabilities */
1920 	pci_init_capabilities(dev);
1921 
1922 	/*
1923 	 * Add the device to our list of discovered devices
1924 	 * and the bus list for fixup functions, etc.
1925 	 */
1926 	down_write(&pci_bus_sem);
1927 	list_add_tail(&dev->bus_list, &bus->devices);
1928 	up_write(&pci_bus_sem);
1929 
1930 	ret = pcibios_add_device(dev);
1931 	WARN_ON(ret < 0);
1932 
1933 	/* Setup MSI irq domain */
1934 	pci_set_msi_domain(dev);
1935 
1936 	/* Notifier could use PCI capabilities */
1937 	dev->match_driver = false;
1938 	ret = device_add(&dev->dev);
1939 	WARN_ON(ret < 0);
1940 }
1941 
1942 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1943 {
1944 	struct pci_dev *dev;
1945 
1946 	dev = pci_get_slot(bus, devfn);
1947 	if (dev) {
1948 		pci_dev_put(dev);
1949 		return dev;
1950 	}
1951 
1952 	dev = pci_scan_device(bus, devfn);
1953 	if (!dev)
1954 		return NULL;
1955 
1956 	pci_device_add(dev, bus);
1957 
1958 	return dev;
1959 }
1960 EXPORT_SYMBOL(pci_scan_single_device);
1961 
1962 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1963 {
1964 	int pos;
1965 	u16 cap = 0;
1966 	unsigned next_fn;
1967 
1968 	if (pci_ari_enabled(bus)) {
1969 		if (!dev)
1970 			return 0;
1971 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1972 		if (!pos)
1973 			return 0;
1974 
1975 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1976 		next_fn = PCI_ARI_CAP_NFN(cap);
1977 		if (next_fn <= fn)
1978 			return 0;	/* protect against malformed list */
1979 
1980 		return next_fn;
1981 	}
1982 
1983 	/* dev may be NULL for non-contiguous multifunction devices */
1984 	if (!dev || dev->multifunction)
1985 		return (fn + 1) % 8;
1986 
1987 	return 0;
1988 }
1989 
1990 static int only_one_child(struct pci_bus *bus)
1991 {
1992 	struct pci_dev *parent = bus->self;
1993 
1994 	if (!parent || !pci_is_pcie(parent))
1995 		return 0;
1996 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1997 		return 1;
1998 
1999 	/*
2000 	 * PCIe downstream ports are bridges that normally lead to only a
2001 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2002 	 * possible devices, not just device 0.  See PCIe spec r3.0,
2003 	 * sec 7.3.1.
2004 	 */
2005 	if (parent->has_secondary_link &&
2006 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2007 		return 1;
2008 	return 0;
2009 }
2010 
2011 /**
2012  * pci_scan_slot - scan a PCI slot on a bus for devices.
2013  * @bus: PCI bus to scan
2014  * @devfn: slot number to scan (must have zero function.)
2015  *
2016  * Scan a PCI slot on the specified PCI bus for devices, adding
2017  * discovered devices to the @bus->devices list.  New devices
2018  * will not have is_added set.
2019  *
2020  * Returns the number of new devices found.
2021  */
2022 int pci_scan_slot(struct pci_bus *bus, int devfn)
2023 {
2024 	unsigned fn, nr = 0;
2025 	struct pci_dev *dev;
2026 
2027 	if (only_one_child(bus) && (devfn > 0))
2028 		return 0; /* Already scanned the entire slot */
2029 
2030 	dev = pci_scan_single_device(bus, devfn);
2031 	if (!dev)
2032 		return 0;
2033 	if (!dev->is_added)
2034 		nr++;
2035 
2036 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2037 		dev = pci_scan_single_device(bus, devfn + fn);
2038 		if (dev) {
2039 			if (!dev->is_added)
2040 				nr++;
2041 			dev->multifunction = 1;
2042 		}
2043 	}
2044 
2045 	/* only one slot has pcie device */
2046 	if (bus->self && nr)
2047 		pcie_aspm_init_link_state(bus->self);
2048 
2049 	return nr;
2050 }
2051 EXPORT_SYMBOL(pci_scan_slot);
2052 
2053 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2054 {
2055 	u8 *smpss = data;
2056 
2057 	if (!pci_is_pcie(dev))
2058 		return 0;
2059 
2060 	/*
2061 	 * We don't have a way to change MPS settings on devices that have
2062 	 * drivers attached.  A hot-added device might support only the minimum
2063 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2064 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2065 	 * hot-added devices will work correctly.
2066 	 *
2067 	 * However, if we hot-add a device to a slot directly below a Root
2068 	 * Port, it's impossible for there to be other existing devices below
2069 	 * the port.  We don't limit the MPS in this case because we can
2070 	 * reconfigure MPS on both the Root Port and the hot-added device,
2071 	 * and there are no other devices involved.
2072 	 *
2073 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2074 	 */
2075 	if (dev->is_hotplug_bridge &&
2076 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2077 		*smpss = 0;
2078 
2079 	if (*smpss > dev->pcie_mpss)
2080 		*smpss = dev->pcie_mpss;
2081 
2082 	return 0;
2083 }
2084 
2085 static void pcie_write_mps(struct pci_dev *dev, int mps)
2086 {
2087 	int rc;
2088 
2089 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2090 		mps = 128 << dev->pcie_mpss;
2091 
2092 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2093 		    dev->bus->self)
2094 			/* For "Performance", the assumption is made that
2095 			 * downstream communication will never be larger than
2096 			 * the MRRS.  So, the MPS only needs to be configured
2097 			 * for the upstream communication.  This being the case,
2098 			 * walk from the top down and set the MPS of the child
2099 			 * to that of the parent bus.
2100 			 *
2101 			 * Configure the device MPS with the smaller of the
2102 			 * device MPSS or the bridge MPS (which is assumed to be
2103 			 * properly configured at this point to the largest
2104 			 * allowable MPS based on its parent bus).
2105 			 */
2106 			mps = min(mps, pcie_get_mps(dev->bus->self));
2107 	}
2108 
2109 	rc = pcie_set_mps(dev, mps);
2110 	if (rc)
2111 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2112 }
2113 
2114 static void pcie_write_mrrs(struct pci_dev *dev)
2115 {
2116 	int rc, mrrs;
2117 
2118 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2119 	 * issues with setting MRRS to 0 on a number of devices.
2120 	 */
2121 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2122 		return;
2123 
2124 	/* For Max performance, the MRRS must be set to the largest supported
2125 	 * value.  However, it cannot be configured larger than the MPS the
2126 	 * device or the bus can support.  This should already be properly
2127 	 * configured by a prior call to pcie_write_mps.
2128 	 */
2129 	mrrs = pcie_get_mps(dev);
2130 
2131 	/* MRRS is a R/W register.  Invalid values can be written, but a
2132 	 * subsequent read will verify if the value is acceptable or not.
2133 	 * If the MRRS value provided is not acceptable (e.g., too large),
2134 	 * shrink the value until it is acceptable to the HW.
2135 	 */
2136 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2137 		rc = pcie_set_readrq(dev, mrrs);
2138 		if (!rc)
2139 			break;
2140 
2141 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2142 		mrrs /= 2;
2143 	}
2144 
2145 	if (mrrs < 128)
2146 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2147 }
2148 
2149 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2150 {
2151 	int mps, orig_mps;
2152 
2153 	if (!pci_is_pcie(dev))
2154 		return 0;
2155 
2156 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2157 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2158 		return 0;
2159 
2160 	mps = 128 << *(u8 *)data;
2161 	orig_mps = pcie_get_mps(dev);
2162 
2163 	pcie_write_mps(dev, mps);
2164 	pcie_write_mrrs(dev);
2165 
2166 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2167 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2168 		 orig_mps, pcie_get_readrq(dev));
2169 
2170 	return 0;
2171 }
2172 
2173 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2174  * parents then children fashion.  If this changes, then this code will not
2175  * work as designed.
2176  */
2177 void pcie_bus_configure_settings(struct pci_bus *bus)
2178 {
2179 	u8 smpss = 0;
2180 
2181 	if (!bus->self)
2182 		return;
2183 
2184 	if (!pci_is_pcie(bus->self))
2185 		return;
2186 
2187 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2188 	 * to be aware of the MPS of the destination.  To work around this,
2189 	 * simply force the MPS of the entire system to the smallest possible.
2190 	 */
2191 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2192 		smpss = 0;
2193 
2194 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2195 		smpss = bus->self->pcie_mpss;
2196 
2197 		pcie_find_smpss(bus->self, &smpss);
2198 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2199 	}
2200 
2201 	pcie_bus_configure_set(bus->self, &smpss);
2202 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2203 }
2204 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2205 
2206 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2207 {
2208 	unsigned int devfn, pass, max = bus->busn_res.start;
2209 	struct pci_dev *dev;
2210 
2211 	dev_dbg(&bus->dev, "scanning bus\n");
2212 
2213 	/* Go find them, Rover! */
2214 	for (devfn = 0; devfn < 0x100; devfn += 8)
2215 		pci_scan_slot(bus, devfn);
2216 
2217 	/* Reserve buses for SR-IOV capability. */
2218 	max += pci_iov_bus_range(bus);
2219 
2220 	/*
2221 	 * After performing arch-dependent fixup of the bus, look behind
2222 	 * all PCI-to-PCI bridges on this bus.
2223 	 */
2224 	if (!bus->is_added) {
2225 		dev_dbg(&bus->dev, "fixups for bus\n");
2226 		pcibios_fixup_bus(bus);
2227 		bus->is_added = 1;
2228 	}
2229 
2230 	for (pass = 0; pass < 2; pass++)
2231 		list_for_each_entry(dev, &bus->devices, bus_list) {
2232 			if (pci_is_bridge(dev))
2233 				max = pci_scan_bridge(bus, dev, max, pass);
2234 		}
2235 
2236 	/*
2237 	 * Make sure a hotplug bridge has at least the minimum requested
2238 	 * number of buses.
2239 	 */
2240 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2241 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2242 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2243 	}
2244 
2245 	/*
2246 	 * We've scanned the bus and so we know all about what's on
2247 	 * the other side of any bridges that may be on this bus plus
2248 	 * any devices.
2249 	 *
2250 	 * Return how far we've got finding sub-buses.
2251 	 */
2252 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2253 	return max;
2254 }
2255 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2256 
2257 /**
2258  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2259  * @bridge: Host bridge to set up.
2260  *
2261  * Default empty implementation.  Replace with an architecture-specific setup
2262  * routine, if necessary.
2263  */
2264 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2265 {
2266 	return 0;
2267 }
2268 
2269 void __weak pcibios_add_bus(struct pci_bus *bus)
2270 {
2271 }
2272 
2273 void __weak pcibios_remove_bus(struct pci_bus *bus)
2274 {
2275 }
2276 
2277 static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
2278 		int bus, struct pci_ops *ops, void *sysdata,
2279 		struct list_head *resources, struct msi_controller *msi)
2280 {
2281 	int error;
2282 	struct pci_host_bridge *bridge;
2283 
2284 	bridge = pci_alloc_host_bridge(0);
2285 	if (!bridge)
2286 		return NULL;
2287 
2288 	bridge->dev.parent = parent;
2289 	bridge->dev.release = pci_release_host_bridge_dev;
2290 
2291 	list_splice_init(resources, &bridge->windows);
2292 	bridge->sysdata = sysdata;
2293 	bridge->busnr = bus;
2294 	bridge->ops = ops;
2295 	bridge->msi = msi;
2296 
2297 	error = pci_register_host_bridge(bridge);
2298 	if (error < 0)
2299 		goto err_out;
2300 
2301 	return bridge->bus;
2302 
2303 err_out:
2304 	kfree(bridge);
2305 	return NULL;
2306 }
2307 
2308 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2309 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2310 {
2311 	return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
2312 				       NULL);
2313 }
2314 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2315 
2316 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2317 {
2318 	struct resource *res = &b->busn_res;
2319 	struct resource *parent_res, *conflict;
2320 
2321 	res->start = bus;
2322 	res->end = bus_max;
2323 	res->flags = IORESOURCE_BUS;
2324 
2325 	if (!pci_is_root_bus(b))
2326 		parent_res = &b->parent->busn_res;
2327 	else {
2328 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2329 		res->flags |= IORESOURCE_PCI_FIXED;
2330 	}
2331 
2332 	conflict = request_resource_conflict(parent_res, res);
2333 
2334 	if (conflict)
2335 		dev_printk(KERN_DEBUG, &b->dev,
2336 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2337 			    res, pci_is_root_bus(b) ? "domain " : "",
2338 			    parent_res, conflict->name, conflict);
2339 
2340 	return conflict == NULL;
2341 }
2342 
2343 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2344 {
2345 	struct resource *res = &b->busn_res;
2346 	struct resource old_res = *res;
2347 	resource_size_t size;
2348 	int ret;
2349 
2350 	if (res->start > bus_max)
2351 		return -EINVAL;
2352 
2353 	size = bus_max - res->start + 1;
2354 	ret = adjust_resource(res, res->start, size);
2355 	dev_printk(KERN_DEBUG, &b->dev,
2356 			"busn_res: %pR end %s updated to %02x\n",
2357 			&old_res, ret ? "can not be" : "is", bus_max);
2358 
2359 	if (!ret && !res->parent)
2360 		pci_bus_insert_busn_res(b, res->start, res->end);
2361 
2362 	return ret;
2363 }
2364 
2365 void pci_bus_release_busn_res(struct pci_bus *b)
2366 {
2367 	struct resource *res = &b->busn_res;
2368 	int ret;
2369 
2370 	if (!res->flags || !res->parent)
2371 		return;
2372 
2373 	ret = release_resource(res);
2374 	dev_printk(KERN_DEBUG, &b->dev,
2375 			"busn_res: %pR %s released\n",
2376 			res, ret ? "can not be" : "is");
2377 }
2378 
2379 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2380 		struct pci_ops *ops, void *sysdata,
2381 		struct list_head *resources, struct msi_controller *msi)
2382 {
2383 	struct resource_entry *window;
2384 	bool found = false;
2385 	struct pci_bus *b;
2386 	int max;
2387 
2388 	resource_list_for_each_entry(window, resources)
2389 		if (window->res->flags & IORESOURCE_BUS) {
2390 			found = true;
2391 			break;
2392 		}
2393 
2394 	b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
2395 	if (!b)
2396 		return NULL;
2397 
2398 	if (!found) {
2399 		dev_info(&b->dev,
2400 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2401 			bus);
2402 		pci_bus_insert_busn_res(b, bus, 255);
2403 	}
2404 
2405 	max = pci_scan_child_bus(b);
2406 
2407 	if (!found)
2408 		pci_bus_update_busn_res_end(b, max);
2409 
2410 	return b;
2411 }
2412 
2413 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2414 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2415 {
2416 	return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2417 				     NULL);
2418 }
2419 EXPORT_SYMBOL(pci_scan_root_bus);
2420 
2421 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2422 					void *sysdata)
2423 {
2424 	LIST_HEAD(resources);
2425 	struct pci_bus *b;
2426 
2427 	pci_add_resource(&resources, &ioport_resource);
2428 	pci_add_resource(&resources, &iomem_resource);
2429 	pci_add_resource(&resources, &busn_resource);
2430 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2431 	if (b) {
2432 		pci_scan_child_bus(b);
2433 	} else {
2434 		pci_free_resource_list(&resources);
2435 	}
2436 	return b;
2437 }
2438 EXPORT_SYMBOL(pci_scan_bus);
2439 
2440 /**
2441  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2442  * @bridge: PCI bridge for the bus to scan
2443  *
2444  * Scan a PCI bus and child buses for new devices, add them,
2445  * and enable them, resizing bridge mmio/io resource if necessary
2446  * and possible.  The caller must ensure the child devices are already
2447  * removed for resizing to occur.
2448  *
2449  * Returns the max number of subordinate bus discovered.
2450  */
2451 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2452 {
2453 	unsigned int max;
2454 	struct pci_bus *bus = bridge->subordinate;
2455 
2456 	max = pci_scan_child_bus(bus);
2457 
2458 	pci_assign_unassigned_bridge_resources(bridge);
2459 
2460 	pci_bus_add_devices(bus);
2461 
2462 	return max;
2463 }
2464 
2465 /**
2466  * pci_rescan_bus - scan a PCI bus for devices.
2467  * @bus: PCI bus to scan
2468  *
2469  * Scan a PCI bus and child buses for new devices, adds them,
2470  * and enables them.
2471  *
2472  * Returns the max number of subordinate bus discovered.
2473  */
2474 unsigned int pci_rescan_bus(struct pci_bus *bus)
2475 {
2476 	unsigned int max;
2477 
2478 	max = pci_scan_child_bus(bus);
2479 	pci_assign_unassigned_bus_resources(bus);
2480 	pci_bus_add_devices(bus);
2481 
2482 	return max;
2483 }
2484 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2485 
2486 /*
2487  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2488  * routines should always be executed under this mutex.
2489  */
2490 static DEFINE_MUTEX(pci_rescan_remove_lock);
2491 
2492 void pci_lock_rescan_remove(void)
2493 {
2494 	mutex_lock(&pci_rescan_remove_lock);
2495 }
2496 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2497 
2498 void pci_unlock_rescan_remove(void)
2499 {
2500 	mutex_unlock(&pci_rescan_remove_lock);
2501 }
2502 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2503 
2504 static int __init pci_sort_bf_cmp(const struct device *d_a,
2505 				  const struct device *d_b)
2506 {
2507 	const struct pci_dev *a = to_pci_dev(d_a);
2508 	const struct pci_dev *b = to_pci_dev(d_b);
2509 
2510 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2511 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2512 
2513 	if      (a->bus->number < b->bus->number) return -1;
2514 	else if (a->bus->number > b->bus->number) return  1;
2515 
2516 	if      (a->devfn < b->devfn) return -1;
2517 	else if (a->devfn > b->devfn) return  1;
2518 
2519 	return 0;
2520 }
2521 
2522 void __init pci_sort_breadthfirst(void)
2523 {
2524 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2525 }
2526