xref: /openbmc/linux/drivers/pci/probe.c (revision bb0eb050)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l = 0, sz = 0, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
513 static void pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 
520 	pci_free_resource_list(&bridge->windows);
521 
522 	kfree(bridge);
523 }
524 
525 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
526 {
527 	struct pci_host_bridge *bridge;
528 
529 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
530 	if (!bridge)
531 		return NULL;
532 
533 	INIT_LIST_HEAD(&bridge->windows);
534 
535 	return bridge;
536 }
537 EXPORT_SYMBOL(pci_alloc_host_bridge);
538 
539 static const unsigned char pcix_bus_speed[] = {
540 	PCI_SPEED_UNKNOWN,		/* 0 */
541 	PCI_SPEED_66MHz_PCIX,		/* 1 */
542 	PCI_SPEED_100MHz_PCIX,		/* 2 */
543 	PCI_SPEED_133MHz_PCIX,		/* 3 */
544 	PCI_SPEED_UNKNOWN,		/* 4 */
545 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
546 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
547 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
548 	PCI_SPEED_UNKNOWN,		/* 8 */
549 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
550 	PCI_SPEED_100MHz_PCIX_266,	/* A */
551 	PCI_SPEED_133MHz_PCIX_266,	/* B */
552 	PCI_SPEED_UNKNOWN,		/* C */
553 	PCI_SPEED_66MHz_PCIX_533,	/* D */
554 	PCI_SPEED_100MHz_PCIX_533,	/* E */
555 	PCI_SPEED_133MHz_PCIX_533	/* F */
556 };
557 
558 const unsigned char pcie_link_speed[] = {
559 	PCI_SPEED_UNKNOWN,		/* 0 */
560 	PCIE_SPEED_2_5GT,		/* 1 */
561 	PCIE_SPEED_5_0GT,		/* 2 */
562 	PCIE_SPEED_8_0GT,		/* 3 */
563 	PCI_SPEED_UNKNOWN,		/* 4 */
564 	PCI_SPEED_UNKNOWN,		/* 5 */
565 	PCI_SPEED_UNKNOWN,		/* 6 */
566 	PCI_SPEED_UNKNOWN,		/* 7 */
567 	PCI_SPEED_UNKNOWN,		/* 8 */
568 	PCI_SPEED_UNKNOWN,		/* 9 */
569 	PCI_SPEED_UNKNOWN,		/* A */
570 	PCI_SPEED_UNKNOWN,		/* B */
571 	PCI_SPEED_UNKNOWN,		/* C */
572 	PCI_SPEED_UNKNOWN,		/* D */
573 	PCI_SPEED_UNKNOWN,		/* E */
574 	PCI_SPEED_UNKNOWN		/* F */
575 };
576 
577 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
578 {
579 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
580 }
581 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
582 
583 static unsigned char agp_speeds[] = {
584 	AGP_UNKNOWN,
585 	AGP_1X,
586 	AGP_2X,
587 	AGP_4X,
588 	AGP_8X
589 };
590 
591 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
592 {
593 	int index = 0;
594 
595 	if (agpstat & 4)
596 		index = 3;
597 	else if (agpstat & 2)
598 		index = 2;
599 	else if (agpstat & 1)
600 		index = 1;
601 	else
602 		goto out;
603 
604 	if (agp3) {
605 		index += 2;
606 		if (index == 5)
607 			index = 0;
608 	}
609 
610  out:
611 	return agp_speeds[index];
612 }
613 
614 static void pci_set_bus_speed(struct pci_bus *bus)
615 {
616 	struct pci_dev *bridge = bus->self;
617 	int pos;
618 
619 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
620 	if (!pos)
621 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
622 	if (pos) {
623 		u32 agpstat, agpcmd;
624 
625 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
626 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
627 
628 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
629 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
630 	}
631 
632 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
633 	if (pos) {
634 		u16 status;
635 		enum pci_bus_speed max;
636 
637 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
638 				     &status);
639 
640 		if (status & PCI_X_SSTATUS_533MHZ) {
641 			max = PCI_SPEED_133MHz_PCIX_533;
642 		} else if (status & PCI_X_SSTATUS_266MHZ) {
643 			max = PCI_SPEED_133MHz_PCIX_266;
644 		} else if (status & PCI_X_SSTATUS_133MHZ) {
645 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
646 				max = PCI_SPEED_133MHz_PCIX_ECC;
647 			else
648 				max = PCI_SPEED_133MHz_PCIX;
649 		} else {
650 			max = PCI_SPEED_66MHz_PCIX;
651 		}
652 
653 		bus->max_bus_speed = max;
654 		bus->cur_bus_speed = pcix_bus_speed[
655 			(status & PCI_X_SSTATUS_FREQ) >> 6];
656 
657 		return;
658 	}
659 
660 	if (pci_is_pcie(bridge)) {
661 		u32 linkcap;
662 		u16 linksta;
663 
664 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
665 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
666 
667 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
668 		pcie_update_link_speed(bus, linksta);
669 	}
670 }
671 
672 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
673 {
674 	struct irq_domain *d;
675 
676 	/*
677 	 * Any firmware interface that can resolve the msi_domain
678 	 * should be called from here.
679 	 */
680 	d = pci_host_bridge_of_msi_domain(bus);
681 	if (!d)
682 		d = pci_host_bridge_acpi_msi_domain(bus);
683 
684 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
685 	/*
686 	 * If no IRQ domain was found via the OF tree, try looking it up
687 	 * directly through the fwnode_handle.
688 	 */
689 	if (!d) {
690 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
691 
692 		if (fwnode)
693 			d = irq_find_matching_fwnode(fwnode,
694 						     DOMAIN_BUS_PCI_MSI);
695 	}
696 #endif
697 
698 	return d;
699 }
700 
701 static void pci_set_bus_msi_domain(struct pci_bus *bus)
702 {
703 	struct irq_domain *d;
704 	struct pci_bus *b;
705 
706 	/*
707 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
708 	 * created by an SR-IOV device.  Walk up to the first bridge device
709 	 * found or derive the domain from the host bridge.
710 	 */
711 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
712 		if (b->self)
713 			d = dev_get_msi_domain(&b->self->dev);
714 	}
715 
716 	if (!d)
717 		d = pci_host_bridge_msi_domain(b);
718 
719 	dev_set_msi_domain(&bus->dev, d);
720 }
721 
722 int pci_register_host_bridge(struct pci_host_bridge *bridge)
723 {
724 	struct device *parent = bridge->dev.parent;
725 	struct resource_entry *window, *n;
726 	struct pci_bus *bus, *b;
727 	resource_size_t offset;
728 	LIST_HEAD(resources);
729 	struct resource *res;
730 	char addr[64], *fmt;
731 	const char *name;
732 	int err;
733 
734 	bus = pci_alloc_bus(NULL);
735 	if (!bus)
736 		return -ENOMEM;
737 
738 	bridge->bus = bus;
739 
740 	/* temporarily move resources off the list */
741 	list_splice_init(&bridge->windows, &resources);
742 	bus->sysdata = bridge->sysdata;
743 	bus->msi = bridge->msi;
744 	bus->ops = bridge->ops;
745 	bus->number = bus->busn_res.start = bridge->busnr;
746 #ifdef CONFIG_PCI_DOMAINS_GENERIC
747 	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
748 #endif
749 
750 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
751 	if (b) {
752 		/* If we already got to this bus through a different bridge, ignore it */
753 		dev_dbg(&b->dev, "bus already known\n");
754 		err = -EEXIST;
755 		goto free;
756 	}
757 
758 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
759 		     bridge->busnr);
760 
761 	err = pcibios_root_bridge_prepare(bridge);
762 	if (err)
763 		goto free;
764 
765 	err = device_register(&bridge->dev);
766 	if (err)
767 		put_device(&bridge->dev);
768 
769 	bus->bridge = get_device(&bridge->dev);
770 	device_enable_async_suspend(bus->bridge);
771 	pci_set_bus_of_node(bus);
772 	pci_set_bus_msi_domain(bus);
773 
774 	if (!parent)
775 		set_dev_node(bus->bridge, pcibus_to_node(bus));
776 
777 	bus->dev.class = &pcibus_class;
778 	bus->dev.parent = bus->bridge;
779 
780 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
781 	name = dev_name(&bus->dev);
782 
783 	err = device_register(&bus->dev);
784 	if (err)
785 		goto unregister;
786 
787 	pcibios_add_bus(bus);
788 
789 	/* Create legacy_io and legacy_mem files for this bus */
790 	pci_create_legacy_files(bus);
791 
792 	if (parent)
793 		dev_info(parent, "PCI host bridge to bus %s\n", name);
794 	else
795 		pr_info("PCI host bridge to bus %s\n", name);
796 
797 	/* Add initial resources to the bus */
798 	resource_list_for_each_entry_safe(window, n, &resources) {
799 		list_move_tail(&window->node, &bridge->windows);
800 		offset = window->offset;
801 		res = window->res;
802 
803 		if (res->flags & IORESOURCE_BUS)
804 			pci_bus_insert_busn_res(bus, bus->number, res->end);
805 		else
806 			pci_bus_add_resource(bus, res, 0);
807 
808 		if (offset) {
809 			if (resource_type(res) == IORESOURCE_IO)
810 				fmt = " (bus address [%#06llx-%#06llx])";
811 			else
812 				fmt = " (bus address [%#010llx-%#010llx])";
813 
814 			snprintf(addr, sizeof(addr), fmt,
815 				 (unsigned long long)(res->start - offset),
816 				 (unsigned long long)(res->end - offset));
817 		} else
818 			addr[0] = '\0';
819 
820 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
821 	}
822 
823 	down_write(&pci_bus_sem);
824 	list_add_tail(&bus->node, &pci_root_buses);
825 	up_write(&pci_bus_sem);
826 
827 	return 0;
828 
829 unregister:
830 	put_device(&bridge->dev);
831 	device_unregister(&bridge->dev);
832 
833 free:
834 	kfree(bus);
835 	return err;
836 }
837 EXPORT_SYMBOL(pci_register_host_bridge);
838 
839 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
840 					   struct pci_dev *bridge, int busnr)
841 {
842 	struct pci_bus *child;
843 	int i;
844 	int ret;
845 
846 	/*
847 	 * Allocate a new bus, and inherit stuff from the parent..
848 	 */
849 	child = pci_alloc_bus(parent);
850 	if (!child)
851 		return NULL;
852 
853 	child->parent = parent;
854 	child->ops = parent->ops;
855 	child->msi = parent->msi;
856 	child->sysdata = parent->sysdata;
857 	child->bus_flags = parent->bus_flags;
858 
859 	/* initialize some portions of the bus device, but don't register it
860 	 * now as the parent is not properly set up yet.
861 	 */
862 	child->dev.class = &pcibus_class;
863 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
864 
865 	/*
866 	 * Set up the primary, secondary and subordinate
867 	 * bus numbers.
868 	 */
869 	child->number = child->busn_res.start = busnr;
870 	child->primary = parent->busn_res.start;
871 	child->busn_res.end = 0xff;
872 
873 	if (!bridge) {
874 		child->dev.parent = parent->bridge;
875 		goto add_dev;
876 	}
877 
878 	child->self = bridge;
879 	child->bridge = get_device(&bridge->dev);
880 	child->dev.parent = child->bridge;
881 	pci_set_bus_of_node(child);
882 	pci_set_bus_speed(child);
883 
884 	/* Set up default resource pointers and names.. */
885 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
886 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
887 		child->resource[i]->name = child->name;
888 	}
889 	bridge->subordinate = child;
890 
891 add_dev:
892 	pci_set_bus_msi_domain(child);
893 	ret = device_register(&child->dev);
894 	WARN_ON(ret < 0);
895 
896 	pcibios_add_bus(child);
897 
898 	if (child->ops->add_bus) {
899 		ret = child->ops->add_bus(child);
900 		if (WARN_ON(ret < 0))
901 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
902 	}
903 
904 	/* Create legacy_io and legacy_mem files for this bus */
905 	pci_create_legacy_files(child);
906 
907 	return child;
908 }
909 
910 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
911 				int busnr)
912 {
913 	struct pci_bus *child;
914 
915 	child = pci_alloc_child_bus(parent, dev, busnr);
916 	if (child) {
917 		down_write(&pci_bus_sem);
918 		list_add_tail(&child->node, &parent->children);
919 		up_write(&pci_bus_sem);
920 	}
921 	return child;
922 }
923 EXPORT_SYMBOL(pci_add_new_bus);
924 
925 static void pci_enable_crs(struct pci_dev *pdev)
926 {
927 	u16 root_cap = 0;
928 
929 	/* Enable CRS Software Visibility if supported */
930 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
931 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
932 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
933 					 PCI_EXP_RTCTL_CRSSVE);
934 }
935 
936 /*
937  * If it's a bridge, configure it and scan the bus behind it.
938  * For CardBus bridges, we don't scan behind as the devices will
939  * be handled by the bridge driver itself.
940  *
941  * We need to process bridges in two passes -- first we scan those
942  * already configured by the BIOS and after we are done with all of
943  * them, we proceed to assigning numbers to the remaining buses in
944  * order to avoid overlaps between old and new bus numbers.
945  */
946 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
947 {
948 	struct pci_bus *child;
949 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
950 	u32 buses, i, j = 0;
951 	u16 bctl;
952 	u8 primary, secondary, subordinate;
953 	int broken = 0;
954 
955 	/*
956 	 * Make sure the bridge is powered on to be able to access config
957 	 * space of devices below it.
958 	 */
959 	pm_runtime_get_sync(&dev->dev);
960 
961 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
962 	primary = buses & 0xFF;
963 	secondary = (buses >> 8) & 0xFF;
964 	subordinate = (buses >> 16) & 0xFF;
965 
966 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
967 		secondary, subordinate, pass);
968 
969 	if (!primary && (primary != bus->number) && secondary && subordinate) {
970 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
971 		primary = bus->number;
972 	}
973 
974 	/* Check if setup is sensible at all */
975 	if (!pass &&
976 	    (primary != bus->number || secondary <= bus->number ||
977 	     secondary > subordinate)) {
978 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
979 			 secondary, subordinate);
980 		broken = 1;
981 	}
982 
983 	/* Disable MasterAbortMode during probing to avoid reporting
984 	   of bus errors (in some architectures) */
985 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
986 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
987 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
988 
989 	pci_enable_crs(dev);
990 
991 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
992 	    !is_cardbus && !broken) {
993 		unsigned int cmax;
994 		/*
995 		 * Bus already configured by firmware, process it in the first
996 		 * pass and just note the configuration.
997 		 */
998 		if (pass)
999 			goto out;
1000 
1001 		/*
1002 		 * The bus might already exist for two reasons: Either we are
1003 		 * rescanning the bus or the bus is reachable through more than
1004 		 * one bridge. The second case can happen with the i450NX
1005 		 * chipset.
1006 		 */
1007 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1008 		if (!child) {
1009 			child = pci_add_new_bus(bus, dev, secondary);
1010 			if (!child)
1011 				goto out;
1012 			child->primary = primary;
1013 			pci_bus_insert_busn_res(child, secondary, subordinate);
1014 			child->bridge_ctl = bctl;
1015 		}
1016 
1017 		cmax = pci_scan_child_bus(child);
1018 		if (cmax > subordinate)
1019 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1020 				 subordinate, cmax);
1021 		/* subordinate should equal child->busn_res.end */
1022 		if (subordinate > max)
1023 			max = subordinate;
1024 	} else {
1025 		/*
1026 		 * We need to assign a number to this bus which we always
1027 		 * do in the second pass.
1028 		 */
1029 		if (!pass) {
1030 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1031 				/* Temporarily disable forwarding of the
1032 				   configuration cycles on all bridges in
1033 				   this bus segment to avoid possible
1034 				   conflicts in the second pass between two
1035 				   bridges programmed with overlapping
1036 				   bus ranges. */
1037 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1038 						       buses & ~0xffffff);
1039 			goto out;
1040 		}
1041 
1042 		/* Clear errors */
1043 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1044 
1045 		/* Prevent assigning a bus number that already exists.
1046 		 * This can happen when a bridge is hot-plugged, so in
1047 		 * this case we only re-scan this bus. */
1048 		child = pci_find_bus(pci_domain_nr(bus), max+1);
1049 		if (!child) {
1050 			child = pci_add_new_bus(bus, dev, max+1);
1051 			if (!child)
1052 				goto out;
1053 			pci_bus_insert_busn_res(child, max+1, 0xff);
1054 		}
1055 		max++;
1056 		buses = (buses & 0xff000000)
1057 		      | ((unsigned int)(child->primary)     <<  0)
1058 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1059 		      | ((unsigned int)(child->busn_res.end) << 16);
1060 
1061 		/*
1062 		 * yenta.c forces a secondary latency timer of 176.
1063 		 * Copy that behaviour here.
1064 		 */
1065 		if (is_cardbus) {
1066 			buses &= ~0xff000000;
1067 			buses |= CARDBUS_LATENCY_TIMER << 24;
1068 		}
1069 
1070 		/*
1071 		 * We need to blast all three values with a single write.
1072 		 */
1073 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1074 
1075 		if (!is_cardbus) {
1076 			child->bridge_ctl = bctl;
1077 			max = pci_scan_child_bus(child);
1078 		} else {
1079 			/*
1080 			 * For CardBus bridges, we leave 4 bus numbers
1081 			 * as cards with a PCI-to-PCI bridge can be
1082 			 * inserted later.
1083 			 */
1084 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1085 				struct pci_bus *parent = bus;
1086 				if (pci_find_bus(pci_domain_nr(bus),
1087 							max+i+1))
1088 					break;
1089 				while (parent->parent) {
1090 					if ((!pcibios_assign_all_busses()) &&
1091 					    (parent->busn_res.end > max) &&
1092 					    (parent->busn_res.end <= max+i)) {
1093 						j = 1;
1094 					}
1095 					parent = parent->parent;
1096 				}
1097 				if (j) {
1098 					/*
1099 					 * Often, there are two cardbus bridges
1100 					 * -- try to leave one valid bus number
1101 					 * for each one.
1102 					 */
1103 					i /= 2;
1104 					break;
1105 				}
1106 			}
1107 			max += i;
1108 		}
1109 		/*
1110 		 * Set the subordinate bus number to its real value.
1111 		 */
1112 		pci_bus_update_busn_res_end(child, max);
1113 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1114 	}
1115 
1116 	sprintf(child->name,
1117 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1118 		pci_domain_nr(bus), child->number);
1119 
1120 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1121 	while (bus->parent) {
1122 		if ((child->busn_res.end > bus->busn_res.end) ||
1123 		    (child->number > bus->busn_res.end) ||
1124 		    (child->number < bus->number) ||
1125 		    (child->busn_res.end < bus->number)) {
1126 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1127 				&child->busn_res,
1128 				(bus->number > child->busn_res.end &&
1129 				 bus->busn_res.end < child->number) ?
1130 					"wholly" : "partially",
1131 				bus->self->transparent ? " transparent" : "",
1132 				dev_name(&bus->dev),
1133 				&bus->busn_res);
1134 		}
1135 		bus = bus->parent;
1136 	}
1137 
1138 out:
1139 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1140 
1141 	pm_runtime_put(&dev->dev);
1142 
1143 	return max;
1144 }
1145 EXPORT_SYMBOL(pci_scan_bridge);
1146 
1147 /*
1148  * Read interrupt line and base address registers.
1149  * The architecture-dependent code can tweak these, of course.
1150  */
1151 static void pci_read_irq(struct pci_dev *dev)
1152 {
1153 	unsigned char irq;
1154 
1155 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1156 	dev->pin = irq;
1157 	if (irq)
1158 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1159 	dev->irq = irq;
1160 }
1161 
1162 void set_pcie_port_type(struct pci_dev *pdev)
1163 {
1164 	int pos;
1165 	u16 reg16;
1166 	int type;
1167 	struct pci_dev *parent;
1168 
1169 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1170 	if (!pos)
1171 		return;
1172 
1173 	pdev->pcie_cap = pos;
1174 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1175 	pdev->pcie_flags_reg = reg16;
1176 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1177 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1178 
1179 	/*
1180 	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1181 	 * of a Link.  No PCIe component has two Links.  Two Links are
1182 	 * connected by a Switch that has a Port on each Link and internal
1183 	 * logic to connect the two Ports.
1184 	 */
1185 	type = pci_pcie_type(pdev);
1186 	if (type == PCI_EXP_TYPE_ROOT_PORT ||
1187 	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
1188 		pdev->has_secondary_link = 1;
1189 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1190 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1191 		parent = pci_upstream_bridge(pdev);
1192 
1193 		/*
1194 		 * Usually there's an upstream device (Root Port or Switch
1195 		 * Downstream Port), but we can't assume one exists.
1196 		 */
1197 		if (parent && !parent->has_secondary_link)
1198 			pdev->has_secondary_link = 1;
1199 	}
1200 }
1201 
1202 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1203 {
1204 	u32 reg32;
1205 
1206 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1207 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1208 		pdev->is_hotplug_bridge = 1;
1209 }
1210 
1211 static void set_pcie_thunderbolt(struct pci_dev *dev)
1212 {
1213 	int vsec = 0;
1214 	u32 header;
1215 
1216 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
1217 						    PCI_EXT_CAP_ID_VNDR))) {
1218 		pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1219 
1220 		/* Is the device part of a Thunderbolt controller? */
1221 		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1222 		    PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1223 			dev->is_thunderbolt = 1;
1224 			return;
1225 		}
1226 	}
1227 }
1228 
1229 /**
1230  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1231  * @dev: PCI device
1232  *
1233  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1234  * when forwarding a type1 configuration request the bridge must check that
1235  * the extended register address field is zero.  The bridge is not permitted
1236  * to forward the transactions and must handle it as an Unsupported Request.
1237  * Some bridges do not follow this rule and simply drop the extended register
1238  * bits, resulting in the standard config space being aliased, every 256
1239  * bytes across the entire configuration space.  Test for this condition by
1240  * comparing the first dword of each potential alias to the vendor/device ID.
1241  * Known offenders:
1242  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1243  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1244  */
1245 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1246 {
1247 #ifdef CONFIG_PCI_QUIRKS
1248 	int pos;
1249 	u32 header, tmp;
1250 
1251 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1252 
1253 	for (pos = PCI_CFG_SPACE_SIZE;
1254 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1255 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1256 		    || header != tmp)
1257 			return false;
1258 	}
1259 
1260 	return true;
1261 #else
1262 	return false;
1263 #endif
1264 }
1265 
1266 /**
1267  * pci_cfg_space_size - get the configuration space size of the PCI device.
1268  * @dev: PCI device
1269  *
1270  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1271  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1272  * access it.  Maybe we don't have a way to generate extended config space
1273  * accesses, or the device is behind a reverse Express bridge.  So we try
1274  * reading the dword at 0x100 which must either be 0 or a valid extended
1275  * capability header.
1276  */
1277 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1278 {
1279 	u32 status;
1280 	int pos = PCI_CFG_SPACE_SIZE;
1281 
1282 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1283 		return PCI_CFG_SPACE_SIZE;
1284 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1285 		return PCI_CFG_SPACE_SIZE;
1286 
1287 	return PCI_CFG_SPACE_EXP_SIZE;
1288 }
1289 
1290 int pci_cfg_space_size(struct pci_dev *dev)
1291 {
1292 	int pos;
1293 	u32 status;
1294 	u16 class;
1295 
1296 	class = dev->class >> 8;
1297 	if (class == PCI_CLASS_BRIDGE_HOST)
1298 		return pci_cfg_space_size_ext(dev);
1299 
1300 	if (pci_is_pcie(dev))
1301 		return pci_cfg_space_size_ext(dev);
1302 
1303 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1304 	if (!pos)
1305 		return PCI_CFG_SPACE_SIZE;
1306 
1307 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1308 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1309 		return pci_cfg_space_size_ext(dev);
1310 
1311 	return PCI_CFG_SPACE_SIZE;
1312 }
1313 
1314 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1315 
1316 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1317 {
1318 	/*
1319 	 * Disable the MSI hardware to avoid screaming interrupts
1320 	 * during boot.  This is the power on reset default so
1321 	 * usually this should be a noop.
1322 	 */
1323 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1324 	if (dev->msi_cap)
1325 		pci_msi_set_enable(dev, 0);
1326 
1327 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1328 	if (dev->msix_cap)
1329 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1330 }
1331 
1332 /**
1333  * pci_setup_device - fill in class and map information of a device
1334  * @dev: the device structure to fill
1335  *
1336  * Initialize the device structure with information about the device's
1337  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1338  * Called at initialisation of the PCI subsystem and by CardBus services.
1339  * Returns 0 on success and negative if unknown type of device (not normal,
1340  * bridge or CardBus).
1341  */
1342 int pci_setup_device(struct pci_dev *dev)
1343 {
1344 	u32 class;
1345 	u16 cmd;
1346 	u8 hdr_type;
1347 	int pos = 0;
1348 	struct pci_bus_region region;
1349 	struct resource *res;
1350 
1351 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1352 		return -EIO;
1353 
1354 	dev->sysdata = dev->bus->sysdata;
1355 	dev->dev.parent = dev->bus->bridge;
1356 	dev->dev.bus = &pci_bus_type;
1357 	dev->hdr_type = hdr_type & 0x7f;
1358 	dev->multifunction = !!(hdr_type & 0x80);
1359 	dev->error_state = pci_channel_io_normal;
1360 	set_pcie_port_type(dev);
1361 
1362 	pci_dev_assign_slot(dev);
1363 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1364 	   set this higher, assuming the system even supports it.  */
1365 	dev->dma_mask = 0xffffffff;
1366 
1367 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1368 		     dev->bus->number, PCI_SLOT(dev->devfn),
1369 		     PCI_FUNC(dev->devfn));
1370 
1371 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1372 	dev->revision = class & 0xff;
1373 	dev->class = class >> 8;		    /* upper 3 bytes */
1374 
1375 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1376 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1377 
1378 	/* need to have dev->class ready */
1379 	dev->cfg_size = pci_cfg_space_size(dev);
1380 
1381 	/* need to have dev->cfg_size ready */
1382 	set_pcie_thunderbolt(dev);
1383 
1384 	/* "Unknown power state" */
1385 	dev->current_state = PCI_UNKNOWN;
1386 
1387 	/* Early fixups, before probing the BARs */
1388 	pci_fixup_device(pci_fixup_early, dev);
1389 	/* device class may be changed after fixup */
1390 	class = dev->class >> 8;
1391 
1392 	if (dev->non_compliant_bars) {
1393 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1394 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1395 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1396 			cmd &= ~PCI_COMMAND_IO;
1397 			cmd &= ~PCI_COMMAND_MEMORY;
1398 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1399 		}
1400 	}
1401 
1402 	switch (dev->hdr_type) {		    /* header type */
1403 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1404 		if (class == PCI_CLASS_BRIDGE_PCI)
1405 			goto bad;
1406 		pci_read_irq(dev);
1407 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1408 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1409 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1410 
1411 		/*
1412 		 * Do the ugly legacy mode stuff here rather than broken chip
1413 		 * quirk code. Legacy mode ATA controllers have fixed
1414 		 * addresses. These are not always echoed in BAR0-3, and
1415 		 * BAR0-3 in a few cases contain junk!
1416 		 */
1417 		if (class == PCI_CLASS_STORAGE_IDE) {
1418 			u8 progif;
1419 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1420 			if ((progif & 1) == 0) {
1421 				region.start = 0x1F0;
1422 				region.end = 0x1F7;
1423 				res = &dev->resource[0];
1424 				res->flags = LEGACY_IO_RESOURCE;
1425 				pcibios_bus_to_resource(dev->bus, res, &region);
1426 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1427 					 res);
1428 				region.start = 0x3F6;
1429 				region.end = 0x3F6;
1430 				res = &dev->resource[1];
1431 				res->flags = LEGACY_IO_RESOURCE;
1432 				pcibios_bus_to_resource(dev->bus, res, &region);
1433 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1434 					 res);
1435 			}
1436 			if ((progif & 4) == 0) {
1437 				region.start = 0x170;
1438 				region.end = 0x177;
1439 				res = &dev->resource[2];
1440 				res->flags = LEGACY_IO_RESOURCE;
1441 				pcibios_bus_to_resource(dev->bus, res, &region);
1442 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1443 					 res);
1444 				region.start = 0x376;
1445 				region.end = 0x376;
1446 				res = &dev->resource[3];
1447 				res->flags = LEGACY_IO_RESOURCE;
1448 				pcibios_bus_to_resource(dev->bus, res, &region);
1449 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1450 					 res);
1451 			}
1452 		}
1453 		break;
1454 
1455 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1456 		if (class != PCI_CLASS_BRIDGE_PCI)
1457 			goto bad;
1458 		/* The PCI-to-PCI bridge spec requires that subtractive
1459 		   decoding (i.e. transparent) bridge must have programming
1460 		   interface code of 0x01. */
1461 		pci_read_irq(dev);
1462 		dev->transparent = ((dev->class & 0xff) == 1);
1463 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1464 		set_pcie_hotplug_bridge(dev);
1465 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1466 		if (pos) {
1467 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1468 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1469 		}
1470 		break;
1471 
1472 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1473 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1474 			goto bad;
1475 		pci_read_irq(dev);
1476 		pci_read_bases(dev, 1, 0);
1477 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1478 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1479 		break;
1480 
1481 	default:				    /* unknown header */
1482 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1483 			dev->hdr_type);
1484 		return -EIO;
1485 
1486 	bad:
1487 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1488 			dev->class, dev->hdr_type);
1489 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1490 	}
1491 
1492 	/* We found a fine healthy device, go go go... */
1493 	return 0;
1494 }
1495 
1496 static void pci_configure_mps(struct pci_dev *dev)
1497 {
1498 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1499 	int mps, p_mps, rc;
1500 
1501 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1502 		return;
1503 
1504 	mps = pcie_get_mps(dev);
1505 	p_mps = pcie_get_mps(bridge);
1506 
1507 	if (mps == p_mps)
1508 		return;
1509 
1510 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1511 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1512 			 mps, pci_name(bridge), p_mps);
1513 		return;
1514 	}
1515 
1516 	/*
1517 	 * Fancier MPS configuration is done later by
1518 	 * pcie_bus_configure_settings()
1519 	 */
1520 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1521 		return;
1522 
1523 	rc = pcie_set_mps(dev, p_mps);
1524 	if (rc) {
1525 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1526 			 p_mps);
1527 		return;
1528 	}
1529 
1530 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1531 		 p_mps, mps, 128 << dev->pcie_mpss);
1532 }
1533 
1534 static struct hpp_type0 pci_default_type0 = {
1535 	.revision = 1,
1536 	.cache_line_size = 8,
1537 	.latency_timer = 0x40,
1538 	.enable_serr = 0,
1539 	.enable_perr = 0,
1540 };
1541 
1542 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1543 {
1544 	u16 pci_cmd, pci_bctl;
1545 
1546 	if (!hpp)
1547 		hpp = &pci_default_type0;
1548 
1549 	if (hpp->revision > 1) {
1550 		dev_warn(&dev->dev,
1551 			 "PCI settings rev %d not supported; using defaults\n",
1552 			 hpp->revision);
1553 		hpp = &pci_default_type0;
1554 	}
1555 
1556 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1557 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1558 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1559 	if (hpp->enable_serr)
1560 		pci_cmd |= PCI_COMMAND_SERR;
1561 	if (hpp->enable_perr)
1562 		pci_cmd |= PCI_COMMAND_PARITY;
1563 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1564 
1565 	/* Program bridge control value */
1566 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1567 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1568 				      hpp->latency_timer);
1569 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1570 		if (hpp->enable_serr)
1571 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1572 		if (hpp->enable_perr)
1573 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1574 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1575 	}
1576 }
1577 
1578 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1579 {
1580 	int pos;
1581 
1582 	if (!hpp)
1583 		return;
1584 
1585 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1586 	if (!pos)
1587 		return;
1588 
1589 	dev_warn(&dev->dev, "PCI-X settings not supported\n");
1590 }
1591 
1592 static bool pcie_root_rcb_set(struct pci_dev *dev)
1593 {
1594 	struct pci_dev *rp = pcie_find_root_port(dev);
1595 	u16 lnkctl;
1596 
1597 	if (!rp)
1598 		return false;
1599 
1600 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1601 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1602 		return true;
1603 
1604 	return false;
1605 }
1606 
1607 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1608 {
1609 	int pos;
1610 	u32 reg32;
1611 
1612 	if (!hpp)
1613 		return;
1614 
1615 	if (!pci_is_pcie(dev))
1616 		return;
1617 
1618 	if (hpp->revision > 1) {
1619 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1620 			 hpp->revision);
1621 		return;
1622 	}
1623 
1624 	/*
1625 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1626 	 * those to make sure they're consistent with the rest of the
1627 	 * platform.
1628 	 */
1629 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1630 				    PCI_EXP_DEVCTL_READRQ;
1631 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1632 				    PCI_EXP_DEVCTL_READRQ);
1633 
1634 	/* Initialize Device Control Register */
1635 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1636 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1637 
1638 	/* Initialize Link Control Register */
1639 	if (pcie_cap_has_lnkctl(dev)) {
1640 
1641 		/*
1642 		 * If the Root Port supports Read Completion Boundary of
1643 		 * 128, set RCB to 128.  Otherwise, clear it.
1644 		 */
1645 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1646 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1647 		if (pcie_root_rcb_set(dev))
1648 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1649 
1650 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1651 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1652 	}
1653 
1654 	/* Find Advanced Error Reporting Enhanced Capability */
1655 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1656 	if (!pos)
1657 		return;
1658 
1659 	/* Initialize Uncorrectable Error Mask Register */
1660 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1661 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1662 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1663 
1664 	/* Initialize Uncorrectable Error Severity Register */
1665 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1666 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1667 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1668 
1669 	/* Initialize Correctable Error Mask Register */
1670 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1671 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1672 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1673 
1674 	/* Initialize Advanced Error Capabilities and Control Register */
1675 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1676 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1677 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1678 
1679 	/*
1680 	 * FIXME: The following two registers are not supported yet.
1681 	 *
1682 	 *   o Secondary Uncorrectable Error Severity Register
1683 	 *   o Secondary Uncorrectable Error Mask Register
1684 	 */
1685 }
1686 
1687 static void pci_configure_extended_tags(struct pci_dev *dev)
1688 {
1689 	u32 dev_cap;
1690 	int ret;
1691 
1692 	if (!pci_is_pcie(dev))
1693 		return;
1694 
1695 	ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap);
1696 	if (ret)
1697 		return;
1698 
1699 	if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG)
1700 		pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1701 					 PCI_EXP_DEVCTL_EXT_TAG);
1702 }
1703 
1704 static void pci_configure_device(struct pci_dev *dev)
1705 {
1706 	struct hotplug_params hpp;
1707 	int ret;
1708 
1709 	pci_configure_mps(dev);
1710 	pci_configure_extended_tags(dev);
1711 
1712 	memset(&hpp, 0, sizeof(hpp));
1713 	ret = pci_get_hp_params(dev, &hpp);
1714 	if (ret)
1715 		return;
1716 
1717 	program_hpp_type2(dev, hpp.t2);
1718 	program_hpp_type1(dev, hpp.t1);
1719 	program_hpp_type0(dev, hpp.t0);
1720 }
1721 
1722 static void pci_release_capabilities(struct pci_dev *dev)
1723 {
1724 	pci_vpd_release(dev);
1725 	pci_iov_release(dev);
1726 	pci_free_cap_save_buffers(dev);
1727 }
1728 
1729 /**
1730  * pci_release_dev - free a pci device structure when all users of it are finished.
1731  * @dev: device that's been disconnected
1732  *
1733  * Will be called only by the device core when all users of this pci device are
1734  * done.
1735  */
1736 static void pci_release_dev(struct device *dev)
1737 {
1738 	struct pci_dev *pci_dev;
1739 
1740 	pci_dev = to_pci_dev(dev);
1741 	pci_release_capabilities(pci_dev);
1742 	pci_release_of_node(pci_dev);
1743 	pcibios_release_device(pci_dev);
1744 	pci_bus_put(pci_dev->bus);
1745 	kfree(pci_dev->driver_override);
1746 	kfree(pci_dev->dma_alias_mask);
1747 	kfree(pci_dev);
1748 }
1749 
1750 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1751 {
1752 	struct pci_dev *dev;
1753 
1754 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1755 	if (!dev)
1756 		return NULL;
1757 
1758 	INIT_LIST_HEAD(&dev->bus_list);
1759 	dev->dev.type = &pci_dev_type;
1760 	dev->bus = pci_bus_get(bus);
1761 
1762 	return dev;
1763 }
1764 EXPORT_SYMBOL(pci_alloc_dev);
1765 
1766 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1767 				int crs_timeout)
1768 {
1769 	int delay = 1;
1770 
1771 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1772 		return false;
1773 
1774 	/* some broken boards return 0 or ~0 if a slot is empty: */
1775 	if (*l == 0xffffffff || *l == 0x00000000 ||
1776 	    *l == 0x0000ffff || *l == 0xffff0000)
1777 		return false;
1778 
1779 	/*
1780 	 * Configuration Request Retry Status.  Some root ports return the
1781 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1782 	 * by the PCIe spec.  Ignore the device ID and only check for
1783 	 * (vendor id == 1).
1784 	 */
1785 	while ((*l & 0xffff) == 0x0001) {
1786 		if (!crs_timeout)
1787 			return false;
1788 
1789 		msleep(delay);
1790 		delay *= 2;
1791 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1792 			return false;
1793 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1794 		if (delay > crs_timeout) {
1795 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1796 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1797 			       PCI_FUNC(devfn));
1798 			return false;
1799 		}
1800 	}
1801 
1802 	return true;
1803 }
1804 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1805 
1806 /*
1807  * Read the config data for a PCI device, sanity-check it
1808  * and fill in the dev structure...
1809  */
1810 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1811 {
1812 	struct pci_dev *dev;
1813 	u32 l;
1814 
1815 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1816 		return NULL;
1817 
1818 	dev = pci_alloc_dev(bus);
1819 	if (!dev)
1820 		return NULL;
1821 
1822 	dev->devfn = devfn;
1823 	dev->vendor = l & 0xffff;
1824 	dev->device = (l >> 16) & 0xffff;
1825 
1826 	pci_set_of_node(dev);
1827 
1828 	if (pci_setup_device(dev)) {
1829 		pci_bus_put(dev->bus);
1830 		kfree(dev);
1831 		return NULL;
1832 	}
1833 
1834 	return dev;
1835 }
1836 
1837 static void pci_init_capabilities(struct pci_dev *dev)
1838 {
1839 	/* Enhanced Allocation */
1840 	pci_ea_init(dev);
1841 
1842 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
1843 	pci_msi_setup_pci_dev(dev);
1844 
1845 	/* Buffers for saving PCIe and PCI-X capabilities */
1846 	pci_allocate_cap_save_buffers(dev);
1847 
1848 	/* Power Management */
1849 	pci_pm_init(dev);
1850 
1851 	/* Vital Product Data */
1852 	pci_vpd_init(dev);
1853 
1854 	/* Alternative Routing-ID Forwarding */
1855 	pci_configure_ari(dev);
1856 
1857 	/* Single Root I/O Virtualization */
1858 	pci_iov_init(dev);
1859 
1860 	/* Address Translation Services */
1861 	pci_ats_init(dev);
1862 
1863 	/* Enable ACS P2P upstream forwarding */
1864 	pci_enable_acs(dev);
1865 
1866 	/* Precision Time Measurement */
1867 	pci_ptm_init(dev);
1868 
1869 	/* Advanced Error Reporting */
1870 	pci_aer_init(dev);
1871 }
1872 
1873 /*
1874  * This is the equivalent of pci_host_bridge_msi_domain that acts on
1875  * devices. Firmware interfaces that can select the MSI domain on a
1876  * per-device basis should be called from here.
1877  */
1878 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1879 {
1880 	struct irq_domain *d;
1881 
1882 	/*
1883 	 * If a domain has been set through the pcibios_add_device
1884 	 * callback, then this is the one (platform code knows best).
1885 	 */
1886 	d = dev_get_msi_domain(&dev->dev);
1887 	if (d)
1888 		return d;
1889 
1890 	/*
1891 	 * Let's see if we have a firmware interface able to provide
1892 	 * the domain.
1893 	 */
1894 	d = pci_msi_get_device_domain(dev);
1895 	if (d)
1896 		return d;
1897 
1898 	return NULL;
1899 }
1900 
1901 static void pci_set_msi_domain(struct pci_dev *dev)
1902 {
1903 	struct irq_domain *d;
1904 
1905 	/*
1906 	 * If the platform or firmware interfaces cannot supply a
1907 	 * device-specific MSI domain, then inherit the default domain
1908 	 * from the host bridge itself.
1909 	 */
1910 	d = pci_dev_msi_domain(dev);
1911 	if (!d)
1912 		d = dev_get_msi_domain(&dev->bus->dev);
1913 
1914 	dev_set_msi_domain(&dev->dev, d);
1915 }
1916 
1917 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1918 {
1919 	int ret;
1920 
1921 	pci_configure_device(dev);
1922 
1923 	device_initialize(&dev->dev);
1924 	dev->dev.release = pci_release_dev;
1925 
1926 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1927 	dev->dev.dma_mask = &dev->dma_mask;
1928 	dev->dev.dma_parms = &dev->dma_parms;
1929 	dev->dev.coherent_dma_mask = 0xffffffffull;
1930 
1931 	pci_set_dma_max_seg_size(dev, 65536);
1932 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1933 
1934 	/* Fix up broken headers */
1935 	pci_fixup_device(pci_fixup_header, dev);
1936 
1937 	/* moved out from quirk header fixup code */
1938 	pci_reassigndev_resource_alignment(dev);
1939 
1940 	/* Clear the state_saved flag. */
1941 	dev->state_saved = false;
1942 
1943 	/* Initialize various capabilities */
1944 	pci_init_capabilities(dev);
1945 
1946 	/*
1947 	 * Add the device to our list of discovered devices
1948 	 * and the bus list for fixup functions, etc.
1949 	 */
1950 	down_write(&pci_bus_sem);
1951 	list_add_tail(&dev->bus_list, &bus->devices);
1952 	up_write(&pci_bus_sem);
1953 
1954 	ret = pcibios_add_device(dev);
1955 	WARN_ON(ret < 0);
1956 
1957 	/* Setup MSI irq domain */
1958 	pci_set_msi_domain(dev);
1959 
1960 	/* Notifier could use PCI capabilities */
1961 	dev->match_driver = false;
1962 	ret = device_add(&dev->dev);
1963 	WARN_ON(ret < 0);
1964 }
1965 
1966 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1967 {
1968 	struct pci_dev *dev;
1969 
1970 	dev = pci_get_slot(bus, devfn);
1971 	if (dev) {
1972 		pci_dev_put(dev);
1973 		return dev;
1974 	}
1975 
1976 	dev = pci_scan_device(bus, devfn);
1977 	if (!dev)
1978 		return NULL;
1979 
1980 	pci_device_add(dev, bus);
1981 
1982 	return dev;
1983 }
1984 EXPORT_SYMBOL(pci_scan_single_device);
1985 
1986 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1987 {
1988 	int pos;
1989 	u16 cap = 0;
1990 	unsigned next_fn;
1991 
1992 	if (pci_ari_enabled(bus)) {
1993 		if (!dev)
1994 			return 0;
1995 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1996 		if (!pos)
1997 			return 0;
1998 
1999 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2000 		next_fn = PCI_ARI_CAP_NFN(cap);
2001 		if (next_fn <= fn)
2002 			return 0;	/* protect against malformed list */
2003 
2004 		return next_fn;
2005 	}
2006 
2007 	/* dev may be NULL for non-contiguous multifunction devices */
2008 	if (!dev || dev->multifunction)
2009 		return (fn + 1) % 8;
2010 
2011 	return 0;
2012 }
2013 
2014 static int only_one_child(struct pci_bus *bus)
2015 {
2016 	struct pci_dev *parent = bus->self;
2017 
2018 	if (!parent || !pci_is_pcie(parent))
2019 		return 0;
2020 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2021 		return 1;
2022 
2023 	/*
2024 	 * PCIe downstream ports are bridges that normally lead to only a
2025 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2026 	 * possible devices, not just device 0.  See PCIe spec r3.0,
2027 	 * sec 7.3.1.
2028 	 */
2029 	if (parent->has_secondary_link &&
2030 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2031 		return 1;
2032 	return 0;
2033 }
2034 
2035 /**
2036  * pci_scan_slot - scan a PCI slot on a bus for devices.
2037  * @bus: PCI bus to scan
2038  * @devfn: slot number to scan (must have zero function.)
2039  *
2040  * Scan a PCI slot on the specified PCI bus for devices, adding
2041  * discovered devices to the @bus->devices list.  New devices
2042  * will not have is_added set.
2043  *
2044  * Returns the number of new devices found.
2045  */
2046 int pci_scan_slot(struct pci_bus *bus, int devfn)
2047 {
2048 	unsigned fn, nr = 0;
2049 	struct pci_dev *dev;
2050 
2051 	if (only_one_child(bus) && (devfn > 0))
2052 		return 0; /* Already scanned the entire slot */
2053 
2054 	dev = pci_scan_single_device(bus, devfn);
2055 	if (!dev)
2056 		return 0;
2057 	if (!dev->is_added)
2058 		nr++;
2059 
2060 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2061 		dev = pci_scan_single_device(bus, devfn + fn);
2062 		if (dev) {
2063 			if (!dev->is_added)
2064 				nr++;
2065 			dev->multifunction = 1;
2066 		}
2067 	}
2068 
2069 	/* only one slot has pcie device */
2070 	if (bus->self && nr)
2071 		pcie_aspm_init_link_state(bus->self);
2072 
2073 	return nr;
2074 }
2075 EXPORT_SYMBOL(pci_scan_slot);
2076 
2077 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2078 {
2079 	u8 *smpss = data;
2080 
2081 	if (!pci_is_pcie(dev))
2082 		return 0;
2083 
2084 	/*
2085 	 * We don't have a way to change MPS settings on devices that have
2086 	 * drivers attached.  A hot-added device might support only the minimum
2087 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2088 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2089 	 * hot-added devices will work correctly.
2090 	 *
2091 	 * However, if we hot-add a device to a slot directly below a Root
2092 	 * Port, it's impossible for there to be other existing devices below
2093 	 * the port.  We don't limit the MPS in this case because we can
2094 	 * reconfigure MPS on both the Root Port and the hot-added device,
2095 	 * and there are no other devices involved.
2096 	 *
2097 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2098 	 */
2099 	if (dev->is_hotplug_bridge &&
2100 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2101 		*smpss = 0;
2102 
2103 	if (*smpss > dev->pcie_mpss)
2104 		*smpss = dev->pcie_mpss;
2105 
2106 	return 0;
2107 }
2108 
2109 static void pcie_write_mps(struct pci_dev *dev, int mps)
2110 {
2111 	int rc;
2112 
2113 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2114 		mps = 128 << dev->pcie_mpss;
2115 
2116 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2117 		    dev->bus->self)
2118 			/* For "Performance", the assumption is made that
2119 			 * downstream communication will never be larger than
2120 			 * the MRRS.  So, the MPS only needs to be configured
2121 			 * for the upstream communication.  This being the case,
2122 			 * walk from the top down and set the MPS of the child
2123 			 * to that of the parent bus.
2124 			 *
2125 			 * Configure the device MPS with the smaller of the
2126 			 * device MPSS or the bridge MPS (which is assumed to be
2127 			 * properly configured at this point to the largest
2128 			 * allowable MPS based on its parent bus).
2129 			 */
2130 			mps = min(mps, pcie_get_mps(dev->bus->self));
2131 	}
2132 
2133 	rc = pcie_set_mps(dev, mps);
2134 	if (rc)
2135 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2136 }
2137 
2138 static void pcie_write_mrrs(struct pci_dev *dev)
2139 {
2140 	int rc, mrrs;
2141 
2142 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2143 	 * issues with setting MRRS to 0 on a number of devices.
2144 	 */
2145 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2146 		return;
2147 
2148 	/* For Max performance, the MRRS must be set to the largest supported
2149 	 * value.  However, it cannot be configured larger than the MPS the
2150 	 * device or the bus can support.  This should already be properly
2151 	 * configured by a prior call to pcie_write_mps.
2152 	 */
2153 	mrrs = pcie_get_mps(dev);
2154 
2155 	/* MRRS is a R/W register.  Invalid values can be written, but a
2156 	 * subsequent read will verify if the value is acceptable or not.
2157 	 * If the MRRS value provided is not acceptable (e.g., too large),
2158 	 * shrink the value until it is acceptable to the HW.
2159 	 */
2160 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2161 		rc = pcie_set_readrq(dev, mrrs);
2162 		if (!rc)
2163 			break;
2164 
2165 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2166 		mrrs /= 2;
2167 	}
2168 
2169 	if (mrrs < 128)
2170 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2171 }
2172 
2173 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2174 {
2175 	int mps, orig_mps;
2176 
2177 	if (!pci_is_pcie(dev))
2178 		return 0;
2179 
2180 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2181 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2182 		return 0;
2183 
2184 	mps = 128 << *(u8 *)data;
2185 	orig_mps = pcie_get_mps(dev);
2186 
2187 	pcie_write_mps(dev, mps);
2188 	pcie_write_mrrs(dev);
2189 
2190 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2191 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2192 		 orig_mps, pcie_get_readrq(dev));
2193 
2194 	return 0;
2195 }
2196 
2197 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2198  * parents then children fashion.  If this changes, then this code will not
2199  * work as designed.
2200  */
2201 void pcie_bus_configure_settings(struct pci_bus *bus)
2202 {
2203 	u8 smpss = 0;
2204 
2205 	if (!bus->self)
2206 		return;
2207 
2208 	if (!pci_is_pcie(bus->self))
2209 		return;
2210 
2211 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2212 	 * to be aware of the MPS of the destination.  To work around this,
2213 	 * simply force the MPS of the entire system to the smallest possible.
2214 	 */
2215 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2216 		smpss = 0;
2217 
2218 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2219 		smpss = bus->self->pcie_mpss;
2220 
2221 		pcie_find_smpss(bus->self, &smpss);
2222 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2223 	}
2224 
2225 	pcie_bus_configure_set(bus->self, &smpss);
2226 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2227 }
2228 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2229 
2230 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2231 {
2232 	unsigned int devfn, pass, max = bus->busn_res.start;
2233 	struct pci_dev *dev;
2234 
2235 	dev_dbg(&bus->dev, "scanning bus\n");
2236 
2237 	/* Go find them, Rover! */
2238 	for (devfn = 0; devfn < 0x100; devfn += 8)
2239 		pci_scan_slot(bus, devfn);
2240 
2241 	/* Reserve buses for SR-IOV capability. */
2242 	max += pci_iov_bus_range(bus);
2243 
2244 	/*
2245 	 * After performing arch-dependent fixup of the bus, look behind
2246 	 * all PCI-to-PCI bridges on this bus.
2247 	 */
2248 	if (!bus->is_added) {
2249 		dev_dbg(&bus->dev, "fixups for bus\n");
2250 		pcibios_fixup_bus(bus);
2251 		bus->is_added = 1;
2252 	}
2253 
2254 	for (pass = 0; pass < 2; pass++)
2255 		list_for_each_entry(dev, &bus->devices, bus_list) {
2256 			if (pci_is_bridge(dev))
2257 				max = pci_scan_bridge(bus, dev, max, pass);
2258 		}
2259 
2260 	/*
2261 	 * Make sure a hotplug bridge has at least the minimum requested
2262 	 * number of buses.
2263 	 */
2264 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2265 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2266 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2267 	}
2268 
2269 	/*
2270 	 * We've scanned the bus and so we know all about what's on
2271 	 * the other side of any bridges that may be on this bus plus
2272 	 * any devices.
2273 	 *
2274 	 * Return how far we've got finding sub-buses.
2275 	 */
2276 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2277 	return max;
2278 }
2279 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2280 
2281 /**
2282  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2283  * @bridge: Host bridge to set up.
2284  *
2285  * Default empty implementation.  Replace with an architecture-specific setup
2286  * routine, if necessary.
2287  */
2288 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2289 {
2290 	return 0;
2291 }
2292 
2293 void __weak pcibios_add_bus(struct pci_bus *bus)
2294 {
2295 }
2296 
2297 void __weak pcibios_remove_bus(struct pci_bus *bus)
2298 {
2299 }
2300 
2301 static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
2302 		int bus, struct pci_ops *ops, void *sysdata,
2303 		struct list_head *resources, struct msi_controller *msi)
2304 {
2305 	int error;
2306 	struct pci_host_bridge *bridge;
2307 
2308 	bridge = pci_alloc_host_bridge(0);
2309 	if (!bridge)
2310 		return NULL;
2311 
2312 	bridge->dev.parent = parent;
2313 	bridge->dev.release = pci_release_host_bridge_dev;
2314 
2315 	list_splice_init(resources, &bridge->windows);
2316 	bridge->sysdata = sysdata;
2317 	bridge->busnr = bus;
2318 	bridge->ops = ops;
2319 	bridge->msi = msi;
2320 
2321 	error = pci_register_host_bridge(bridge);
2322 	if (error < 0)
2323 		goto err_out;
2324 
2325 	return bridge->bus;
2326 
2327 err_out:
2328 	kfree(bridge);
2329 	return NULL;
2330 }
2331 
2332 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2333 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2334 {
2335 	return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
2336 				       NULL);
2337 }
2338 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2339 
2340 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2341 {
2342 	struct resource *res = &b->busn_res;
2343 	struct resource *parent_res, *conflict;
2344 
2345 	res->start = bus;
2346 	res->end = bus_max;
2347 	res->flags = IORESOURCE_BUS;
2348 
2349 	if (!pci_is_root_bus(b))
2350 		parent_res = &b->parent->busn_res;
2351 	else {
2352 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2353 		res->flags |= IORESOURCE_PCI_FIXED;
2354 	}
2355 
2356 	conflict = request_resource_conflict(parent_res, res);
2357 
2358 	if (conflict)
2359 		dev_printk(KERN_DEBUG, &b->dev,
2360 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2361 			    res, pci_is_root_bus(b) ? "domain " : "",
2362 			    parent_res, conflict->name, conflict);
2363 
2364 	return conflict == NULL;
2365 }
2366 
2367 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2368 {
2369 	struct resource *res = &b->busn_res;
2370 	struct resource old_res = *res;
2371 	resource_size_t size;
2372 	int ret;
2373 
2374 	if (res->start > bus_max)
2375 		return -EINVAL;
2376 
2377 	size = bus_max - res->start + 1;
2378 	ret = adjust_resource(res, res->start, size);
2379 	dev_printk(KERN_DEBUG, &b->dev,
2380 			"busn_res: %pR end %s updated to %02x\n",
2381 			&old_res, ret ? "can not be" : "is", bus_max);
2382 
2383 	if (!ret && !res->parent)
2384 		pci_bus_insert_busn_res(b, res->start, res->end);
2385 
2386 	return ret;
2387 }
2388 
2389 void pci_bus_release_busn_res(struct pci_bus *b)
2390 {
2391 	struct resource *res = &b->busn_res;
2392 	int ret;
2393 
2394 	if (!res->flags || !res->parent)
2395 		return;
2396 
2397 	ret = release_resource(res);
2398 	dev_printk(KERN_DEBUG, &b->dev,
2399 			"busn_res: %pR %s released\n",
2400 			res, ret ? "can not be" : "is");
2401 }
2402 
2403 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2404 		struct pci_ops *ops, void *sysdata,
2405 		struct list_head *resources, struct msi_controller *msi)
2406 {
2407 	struct resource_entry *window;
2408 	bool found = false;
2409 	struct pci_bus *b;
2410 	int max;
2411 
2412 	resource_list_for_each_entry(window, resources)
2413 		if (window->res->flags & IORESOURCE_BUS) {
2414 			found = true;
2415 			break;
2416 		}
2417 
2418 	b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
2419 	if (!b)
2420 		return NULL;
2421 
2422 	if (!found) {
2423 		dev_info(&b->dev,
2424 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2425 			bus);
2426 		pci_bus_insert_busn_res(b, bus, 255);
2427 	}
2428 
2429 	max = pci_scan_child_bus(b);
2430 
2431 	if (!found)
2432 		pci_bus_update_busn_res_end(b, max);
2433 
2434 	return b;
2435 }
2436 
2437 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2438 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2439 {
2440 	return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2441 				     NULL);
2442 }
2443 EXPORT_SYMBOL(pci_scan_root_bus);
2444 
2445 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2446 					void *sysdata)
2447 {
2448 	LIST_HEAD(resources);
2449 	struct pci_bus *b;
2450 
2451 	pci_add_resource(&resources, &ioport_resource);
2452 	pci_add_resource(&resources, &iomem_resource);
2453 	pci_add_resource(&resources, &busn_resource);
2454 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2455 	if (b) {
2456 		pci_scan_child_bus(b);
2457 	} else {
2458 		pci_free_resource_list(&resources);
2459 	}
2460 	return b;
2461 }
2462 EXPORT_SYMBOL(pci_scan_bus);
2463 
2464 /**
2465  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2466  * @bridge: PCI bridge for the bus to scan
2467  *
2468  * Scan a PCI bus and child buses for new devices, add them,
2469  * and enable them, resizing bridge mmio/io resource if necessary
2470  * and possible.  The caller must ensure the child devices are already
2471  * removed for resizing to occur.
2472  *
2473  * Returns the max number of subordinate bus discovered.
2474  */
2475 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2476 {
2477 	unsigned int max;
2478 	struct pci_bus *bus = bridge->subordinate;
2479 
2480 	max = pci_scan_child_bus(bus);
2481 
2482 	pci_assign_unassigned_bridge_resources(bridge);
2483 
2484 	pci_bus_add_devices(bus);
2485 
2486 	return max;
2487 }
2488 
2489 /**
2490  * pci_rescan_bus - scan a PCI bus for devices.
2491  * @bus: PCI bus to scan
2492  *
2493  * Scan a PCI bus and child buses for new devices, adds them,
2494  * and enables them.
2495  *
2496  * Returns the max number of subordinate bus discovered.
2497  */
2498 unsigned int pci_rescan_bus(struct pci_bus *bus)
2499 {
2500 	unsigned int max;
2501 
2502 	max = pci_scan_child_bus(bus);
2503 	pci_assign_unassigned_bus_resources(bus);
2504 	pci_bus_add_devices(bus);
2505 
2506 	return max;
2507 }
2508 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2509 
2510 /*
2511  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2512  * routines should always be executed under this mutex.
2513  */
2514 static DEFINE_MUTEX(pci_rescan_remove_lock);
2515 
2516 void pci_lock_rescan_remove(void)
2517 {
2518 	mutex_lock(&pci_rescan_remove_lock);
2519 }
2520 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2521 
2522 void pci_unlock_rescan_remove(void)
2523 {
2524 	mutex_unlock(&pci_rescan_remove_lock);
2525 }
2526 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2527 
2528 static int __init pci_sort_bf_cmp(const struct device *d_a,
2529 				  const struct device *d_b)
2530 {
2531 	const struct pci_dev *a = to_pci_dev(d_a);
2532 	const struct pci_dev *b = to_pci_dev(d_b);
2533 
2534 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2535 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2536 
2537 	if      (a->bus->number < b->bus->number) return -1;
2538 	else if (a->bus->number > b->bus->number) return  1;
2539 
2540 	if      (a->devfn < b->devfn) return -1;
2541 	else if (a->devfn > b->devfn) return  1;
2542 
2543 	return 0;
2544 }
2545 
2546 void __init pci_sort_breadthfirst(void)
2547 {
2548 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2549 }
2550