xref: /openbmc/linux/drivers/pci/probe.c (revision 0d456bad)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15 
16 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR	3
18 
19 struct resource busn_resource = {
20 	.name	= "PCI busn",
21 	.start	= 0,
22 	.end	= 255,
23 	.flags	= IORESOURCE_BUS,
24 };
25 
26 /* Ugh.  Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29 
30 static LIST_HEAD(pci_domain_busn_res_list);
31 
32 struct pci_domain_busn_res {
33 	struct list_head list;
34 	struct resource res;
35 	int domain_nr;
36 };
37 
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 	struct pci_domain_busn_res *r;
41 
42 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 		if (r->domain_nr == domain_nr)
44 			return &r->res;
45 
46 	r = kzalloc(sizeof(*r), GFP_KERNEL);
47 	if (!r)
48 		return NULL;
49 
50 	r->domain_nr = domain_nr;
51 	r->res.start = 0;
52 	r->res.end = 0xff;
53 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54 
55 	list_add_tail(&r->list, &pci_domain_busn_res_list);
56 
57 	return &r->res;
58 }
59 
60 static int find_anything(struct device *dev, void *data)
61 {
62 	return 1;
63 }
64 
65 /*
66  * Some device drivers need know if pci is initiated.
67  * Basically, we think pci is not initiated when there
68  * is no device to be found on the pci_bus_type.
69  */
70 int no_pci_devices(void)
71 {
72 	struct device *dev;
73 	int no_devices;
74 
75 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 	no_devices = (dev == NULL);
77 	put_device(dev);
78 	return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81 
82 /*
83  * PCI Bus Class
84  */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 	struct pci_bus *pci_bus = to_pci_bus(dev);
88 
89 	if (pci_bus->bridge)
90 		put_device(pci_bus->bridge);
91 	pci_bus_remove_resources(pci_bus);
92 	pci_release_bus_of_node(pci_bus);
93 	kfree(pci_bus);
94 }
95 
96 static struct class pcibus_class = {
97 	.name		= "pci_bus",
98 	.dev_release	= &release_pcibus_dev,
99 	.dev_attrs	= pcibus_dev_attrs,
100 };
101 
102 static int __init pcibus_class_init(void)
103 {
104 	return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107 
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 	u64 size = mask & maxbase;	/* Find the significant bits */
111 	if (!size)
112 		return 0;
113 
114 	/* Get the lowest of them to find the decode size, and
115 	   from that the extent.  */
116 	size = (size & ~(size-1)) - 1;
117 
118 	/* base == maxbase can be valid only if the BAR has
119 	   already been programmed with all 1s.  */
120 	if (base == maxbase && ((base | size) & mask) != mask)
121 		return 0;
122 
123 	return size;
124 }
125 
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 	u32 mem_type;
129 	unsigned long flags;
130 
131 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 		flags |= IORESOURCE_IO;
134 		return flags;
135 	}
136 
137 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 	flags |= IORESOURCE_MEM;
139 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 		flags |= IORESOURCE_PREFETCH;
141 
142 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 	switch (mem_type) {
144 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 		break;
146 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 		/* 1M mem BAR treated as 32-bit BAR */
148 		break;
149 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 		flags |= IORESOURCE_MEM_64;
151 		break;
152 	default:
153 		/* mem unknown type treated as 32-bit BAR */
154 		break;
155 	}
156 	return flags;
157 }
158 
159 /**
160  * pci_read_base - read a PCI BAR
161  * @dev: the PCI device
162  * @type: type of the BAR
163  * @res: resource buffer to be filled in
164  * @pos: BAR position in the config space
165  *
166  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
167  */
168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
169 			struct resource *res, unsigned int pos)
170 {
171 	u32 l, sz, mask;
172 	u16 orig_cmd;
173 	struct pci_bus_region region;
174 	bool bar_too_big = false, bar_disabled = false;
175 
176 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
177 
178 	/* No printks while decoding is disabled! */
179 	if (!dev->mmio_always_on) {
180 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 		pci_write_config_word(dev, PCI_COMMAND,
182 			orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
183 	}
184 
185 	res->name = pci_name(dev);
186 
187 	pci_read_config_dword(dev, pos, &l);
188 	pci_write_config_dword(dev, pos, l | mask);
189 	pci_read_config_dword(dev, pos, &sz);
190 	pci_write_config_dword(dev, pos, l);
191 
192 	/*
193 	 * All bits set in sz means the device isn't working properly.
194 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
195 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
196 	 * 1 must be clear.
197 	 */
198 	if (!sz || sz == 0xffffffff)
199 		goto fail;
200 
201 	/*
202 	 * I don't know how l can have all bits set.  Copied from old code.
203 	 * Maybe it fixes a bug on some ancient platform.
204 	 */
205 	if (l == 0xffffffff)
206 		l = 0;
207 
208 	if (type == pci_bar_unknown) {
209 		res->flags = decode_bar(dev, l);
210 		res->flags |= IORESOURCE_SIZEALIGN;
211 		if (res->flags & IORESOURCE_IO) {
212 			l &= PCI_BASE_ADDRESS_IO_MASK;
213 			mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
214 		} else {
215 			l &= PCI_BASE_ADDRESS_MEM_MASK;
216 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
217 		}
218 	} else {
219 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
220 		l &= PCI_ROM_ADDRESS_MASK;
221 		mask = (u32)PCI_ROM_ADDRESS_MASK;
222 	}
223 
224 	if (res->flags & IORESOURCE_MEM_64) {
225 		u64 l64 = l;
226 		u64 sz64 = sz;
227 		u64 mask64 = mask | (u64)~0 << 32;
228 
229 		pci_read_config_dword(dev, pos + 4, &l);
230 		pci_write_config_dword(dev, pos + 4, ~0);
231 		pci_read_config_dword(dev, pos + 4, &sz);
232 		pci_write_config_dword(dev, pos + 4, l);
233 
234 		l64 |= ((u64)l << 32);
235 		sz64 |= ((u64)sz << 32);
236 
237 		sz64 = pci_size(l64, sz64, mask64);
238 
239 		if (!sz64)
240 			goto fail;
241 
242 		if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 			bar_too_big = true;
244 			goto fail;
245 		}
246 
247 		if ((sizeof(resource_size_t) < 8) && l) {
248 			/* Address above 32-bit boundary; disable the BAR */
249 			pci_write_config_dword(dev, pos, 0);
250 			pci_write_config_dword(dev, pos + 4, 0);
251 			region.start = 0;
252 			region.end = sz64;
253 			pcibios_bus_to_resource(dev, res, &region);
254 			bar_disabled = true;
255 		} else {
256 			region.start = l64;
257 			region.end = l64 + sz64;
258 			pcibios_bus_to_resource(dev, res, &region);
259 		}
260 	} else {
261 		sz = pci_size(l, sz, mask);
262 
263 		if (!sz)
264 			goto fail;
265 
266 		region.start = l;
267 		region.end = l + sz;
268 		pcibios_bus_to_resource(dev, res, &region);
269 	}
270 
271 	goto out;
272 
273 
274 fail:
275 	res->flags = 0;
276 out:
277 	if (!dev->mmio_always_on)
278 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
279 
280 	if (bar_too_big)
281 		dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
282 	if (res->flags && !bar_disabled)
283 		dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
284 
285 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
286 }
287 
288 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
289 {
290 	unsigned int pos, reg;
291 
292 	for (pos = 0; pos < howmany; pos++) {
293 		struct resource *res = &dev->resource[pos];
294 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
295 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
296 	}
297 
298 	if (rom) {
299 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
300 		dev->rom_base_reg = rom;
301 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
302 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
303 				IORESOURCE_SIZEALIGN;
304 		__pci_read_base(dev, pci_bar_mem32, res, rom);
305 	}
306 }
307 
308 static void pci_read_bridge_io(struct pci_bus *child)
309 {
310 	struct pci_dev *dev = child->self;
311 	u8 io_base_lo, io_limit_lo;
312 	unsigned long io_mask, io_granularity, base, limit;
313 	struct pci_bus_region region;
314 	struct resource *res;
315 
316 	io_mask = PCI_IO_RANGE_MASK;
317 	io_granularity = 0x1000;
318 	if (dev->io_window_1k) {
319 		/* Support 1K I/O space granularity */
320 		io_mask = PCI_IO_1K_RANGE_MASK;
321 		io_granularity = 0x400;
322 	}
323 
324 	res = child->resource[0];
325 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
326 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
327 	base = (io_base_lo & io_mask) << 8;
328 	limit = (io_limit_lo & io_mask) << 8;
329 
330 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
331 		u16 io_base_hi, io_limit_hi;
332 
333 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
334 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
335 		base |= ((unsigned long) io_base_hi << 16);
336 		limit |= ((unsigned long) io_limit_hi << 16);
337 	}
338 
339 	if (base <= limit) {
340 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
341 		region.start = base;
342 		region.end = limit + io_granularity - 1;
343 		pcibios_bus_to_resource(dev, res, &region);
344 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
345 	}
346 }
347 
348 static void pci_read_bridge_mmio(struct pci_bus *child)
349 {
350 	struct pci_dev *dev = child->self;
351 	u16 mem_base_lo, mem_limit_lo;
352 	unsigned long base, limit;
353 	struct pci_bus_region region;
354 	struct resource *res;
355 
356 	res = child->resource[1];
357 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
358 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
359 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
360 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
361 	if (base <= limit) {
362 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
363 		region.start = base;
364 		region.end = limit + 0xfffff;
365 		pcibios_bus_to_resource(dev, res, &region);
366 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
367 	}
368 }
369 
370 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
371 {
372 	struct pci_dev *dev = child->self;
373 	u16 mem_base_lo, mem_limit_lo;
374 	unsigned long base, limit;
375 	struct pci_bus_region region;
376 	struct resource *res;
377 
378 	res = child->resource[2];
379 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
380 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
381 	base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
382 	limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
383 
384 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
385 		u32 mem_base_hi, mem_limit_hi;
386 
387 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
388 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
389 
390 		/*
391 		 * Some bridges set the base > limit by default, and some
392 		 * (broken) BIOSes do not initialize them.  If we find
393 		 * this, just assume they are not being used.
394 		 */
395 		if (mem_base_hi <= mem_limit_hi) {
396 #if BITS_PER_LONG == 64
397 			base |= ((unsigned long) mem_base_hi) << 32;
398 			limit |= ((unsigned long) mem_limit_hi) << 32;
399 #else
400 			if (mem_base_hi || mem_limit_hi) {
401 				dev_err(&dev->dev, "can't handle 64-bit "
402 					"address space for bridge\n");
403 				return;
404 			}
405 #endif
406 		}
407 	}
408 	if (base <= limit) {
409 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
410 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
411 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
412 			res->flags |= IORESOURCE_MEM_64;
413 		region.start = base;
414 		region.end = limit + 0xfffff;
415 		pcibios_bus_to_resource(dev, res, &region);
416 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
417 	}
418 }
419 
420 void pci_read_bridge_bases(struct pci_bus *child)
421 {
422 	struct pci_dev *dev = child->self;
423 	struct resource *res;
424 	int i;
425 
426 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
427 		return;
428 
429 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
430 		 &child->busn_res,
431 		 dev->transparent ? " (subtractive decode)" : "");
432 
433 	pci_bus_remove_resources(child);
434 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
435 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
436 
437 	pci_read_bridge_io(child);
438 	pci_read_bridge_mmio(child);
439 	pci_read_bridge_mmio_pref(child);
440 
441 	if (dev->transparent) {
442 		pci_bus_for_each_resource(child->parent, res, i) {
443 			if (res) {
444 				pci_bus_add_resource(child, res,
445 						     PCI_SUBTRACTIVE_DECODE);
446 				dev_printk(KERN_DEBUG, &dev->dev,
447 					   "  bridge window %pR (subtractive decode)\n",
448 					   res);
449 			}
450 		}
451 	}
452 }
453 
454 static struct pci_bus * pci_alloc_bus(void)
455 {
456 	struct pci_bus *b;
457 
458 	b = kzalloc(sizeof(*b), GFP_KERNEL);
459 	if (b) {
460 		INIT_LIST_HEAD(&b->node);
461 		INIT_LIST_HEAD(&b->children);
462 		INIT_LIST_HEAD(&b->devices);
463 		INIT_LIST_HEAD(&b->slots);
464 		INIT_LIST_HEAD(&b->resources);
465 		b->max_bus_speed = PCI_SPEED_UNKNOWN;
466 		b->cur_bus_speed = PCI_SPEED_UNKNOWN;
467 	}
468 	return b;
469 }
470 
471 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
472 {
473 	struct pci_host_bridge *bridge;
474 
475 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
476 	if (bridge) {
477 		INIT_LIST_HEAD(&bridge->windows);
478 		bridge->bus = b;
479 	}
480 
481 	return bridge;
482 }
483 
484 static unsigned char pcix_bus_speed[] = {
485 	PCI_SPEED_UNKNOWN,		/* 0 */
486 	PCI_SPEED_66MHz_PCIX,		/* 1 */
487 	PCI_SPEED_100MHz_PCIX,		/* 2 */
488 	PCI_SPEED_133MHz_PCIX,		/* 3 */
489 	PCI_SPEED_UNKNOWN,		/* 4 */
490 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
491 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
492 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
493 	PCI_SPEED_UNKNOWN,		/* 8 */
494 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
495 	PCI_SPEED_100MHz_PCIX_266,	/* A */
496 	PCI_SPEED_133MHz_PCIX_266,	/* B */
497 	PCI_SPEED_UNKNOWN,		/* C */
498 	PCI_SPEED_66MHz_PCIX_533,	/* D */
499 	PCI_SPEED_100MHz_PCIX_533,	/* E */
500 	PCI_SPEED_133MHz_PCIX_533	/* F */
501 };
502 
503 static unsigned char pcie_link_speed[] = {
504 	PCI_SPEED_UNKNOWN,		/* 0 */
505 	PCIE_SPEED_2_5GT,		/* 1 */
506 	PCIE_SPEED_5_0GT,		/* 2 */
507 	PCIE_SPEED_8_0GT,		/* 3 */
508 	PCI_SPEED_UNKNOWN,		/* 4 */
509 	PCI_SPEED_UNKNOWN,		/* 5 */
510 	PCI_SPEED_UNKNOWN,		/* 6 */
511 	PCI_SPEED_UNKNOWN,		/* 7 */
512 	PCI_SPEED_UNKNOWN,		/* 8 */
513 	PCI_SPEED_UNKNOWN,		/* 9 */
514 	PCI_SPEED_UNKNOWN,		/* A */
515 	PCI_SPEED_UNKNOWN,		/* B */
516 	PCI_SPEED_UNKNOWN,		/* C */
517 	PCI_SPEED_UNKNOWN,		/* D */
518 	PCI_SPEED_UNKNOWN,		/* E */
519 	PCI_SPEED_UNKNOWN		/* F */
520 };
521 
522 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
523 {
524 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
525 }
526 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
527 
528 static unsigned char agp_speeds[] = {
529 	AGP_UNKNOWN,
530 	AGP_1X,
531 	AGP_2X,
532 	AGP_4X,
533 	AGP_8X
534 };
535 
536 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
537 {
538 	int index = 0;
539 
540 	if (agpstat & 4)
541 		index = 3;
542 	else if (agpstat & 2)
543 		index = 2;
544 	else if (agpstat & 1)
545 		index = 1;
546 	else
547 		goto out;
548 
549 	if (agp3) {
550 		index += 2;
551 		if (index == 5)
552 			index = 0;
553 	}
554 
555  out:
556 	return agp_speeds[index];
557 }
558 
559 
560 static void pci_set_bus_speed(struct pci_bus *bus)
561 {
562 	struct pci_dev *bridge = bus->self;
563 	int pos;
564 
565 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
566 	if (!pos)
567 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
568 	if (pos) {
569 		u32 agpstat, agpcmd;
570 
571 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
572 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
573 
574 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
575 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
576 	}
577 
578 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
579 	if (pos) {
580 		u16 status;
581 		enum pci_bus_speed max;
582 
583 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
584 				     &status);
585 
586 		if (status & PCI_X_SSTATUS_533MHZ) {
587 			max = PCI_SPEED_133MHz_PCIX_533;
588 		} else if (status & PCI_X_SSTATUS_266MHZ) {
589 			max = PCI_SPEED_133MHz_PCIX_266;
590 		} else if (status & PCI_X_SSTATUS_133MHZ) {
591 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
592 				max = PCI_SPEED_133MHz_PCIX_ECC;
593 			} else {
594 				max = PCI_SPEED_133MHz_PCIX;
595 			}
596 		} else {
597 			max = PCI_SPEED_66MHz_PCIX;
598 		}
599 
600 		bus->max_bus_speed = max;
601 		bus->cur_bus_speed = pcix_bus_speed[
602 			(status & PCI_X_SSTATUS_FREQ) >> 6];
603 
604 		return;
605 	}
606 
607 	pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
608 	if (pos) {
609 		u32 linkcap;
610 		u16 linksta;
611 
612 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
613 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
614 
615 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
616 		pcie_update_link_speed(bus, linksta);
617 	}
618 }
619 
620 
621 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
622 					   struct pci_dev *bridge, int busnr)
623 {
624 	struct pci_bus *child;
625 	int i;
626 
627 	/*
628 	 * Allocate a new bus, and inherit stuff from the parent..
629 	 */
630 	child = pci_alloc_bus();
631 	if (!child)
632 		return NULL;
633 
634 	child->parent = parent;
635 	child->ops = parent->ops;
636 	child->sysdata = parent->sysdata;
637 	child->bus_flags = parent->bus_flags;
638 
639 	/* initialize some portions of the bus device, but don't register it
640 	 * now as the parent is not properly set up yet.  This device will get
641 	 * registered later in pci_bus_add_devices()
642 	 */
643 	child->dev.class = &pcibus_class;
644 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
645 
646 	/*
647 	 * Set up the primary, secondary and subordinate
648 	 * bus numbers.
649 	 */
650 	child->number = child->busn_res.start = busnr;
651 	child->primary = parent->busn_res.start;
652 	child->busn_res.end = 0xff;
653 
654 	if (!bridge)
655 		return child;
656 
657 	child->self = bridge;
658 	child->bridge = get_device(&bridge->dev);
659 	pci_set_bus_of_node(child);
660 	pci_set_bus_speed(child);
661 
662 	/* Set up default resource pointers and names.. */
663 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
664 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
665 		child->resource[i]->name = child->name;
666 	}
667 	bridge->subordinate = child;
668 
669 	return child;
670 }
671 
672 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
673 {
674 	struct pci_bus *child;
675 
676 	child = pci_alloc_child_bus(parent, dev, busnr);
677 	if (child) {
678 		down_write(&pci_bus_sem);
679 		list_add_tail(&child->node, &parent->children);
680 		up_write(&pci_bus_sem);
681 	}
682 	return child;
683 }
684 
685 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
686 {
687 	struct pci_bus *parent = child->parent;
688 
689 	/* Attempts to fix that up are really dangerous unless
690 	   we're going to re-assign all bus numbers. */
691 	if (!pcibios_assign_all_busses())
692 		return;
693 
694 	while (parent->parent && parent->busn_res.end < max) {
695 		parent->busn_res.end = max;
696 		pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
697 		parent = parent->parent;
698 	}
699 }
700 
701 /*
702  * If it's a bridge, configure it and scan the bus behind it.
703  * For CardBus bridges, we don't scan behind as the devices will
704  * be handled by the bridge driver itself.
705  *
706  * We need to process bridges in two passes -- first we scan those
707  * already configured by the BIOS and after we are done with all of
708  * them, we proceed to assigning numbers to the remaining buses in
709  * order to avoid overlaps between old and new bus numbers.
710  */
711 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
712 {
713 	struct pci_bus *child;
714 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
715 	u32 buses, i, j = 0;
716 	u16 bctl;
717 	u8 primary, secondary, subordinate;
718 	int broken = 0;
719 
720 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
721 	primary = buses & 0xFF;
722 	secondary = (buses >> 8) & 0xFF;
723 	subordinate = (buses >> 16) & 0xFF;
724 
725 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
726 		secondary, subordinate, pass);
727 
728 	if (!primary && (primary != bus->number) && secondary && subordinate) {
729 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
730 		primary = bus->number;
731 	}
732 
733 	/* Check if setup is sensible at all */
734 	if (!pass &&
735 	    (primary != bus->number || secondary <= bus->number ||
736 	     secondary > subordinate)) {
737 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
738 			 secondary, subordinate);
739 		broken = 1;
740 	}
741 
742 	/* Disable MasterAbortMode during probing to avoid reporting
743 	   of bus errors (in some architectures) */
744 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
745 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
746 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
747 
748 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
749 	    !is_cardbus && !broken) {
750 		unsigned int cmax;
751 		/*
752 		 * Bus already configured by firmware, process it in the first
753 		 * pass and just note the configuration.
754 		 */
755 		if (pass)
756 			goto out;
757 
758 		/*
759 		 * If we already got to this bus through a different bridge,
760 		 * don't re-add it. This can happen with the i450NX chipset.
761 		 *
762 		 * However, we continue to descend down the hierarchy and
763 		 * scan remaining child buses.
764 		 */
765 		child = pci_find_bus(pci_domain_nr(bus), secondary);
766 		if (!child) {
767 			child = pci_add_new_bus(bus, dev, secondary);
768 			if (!child)
769 				goto out;
770 			child->primary = primary;
771 			pci_bus_insert_busn_res(child, secondary, subordinate);
772 			child->bridge_ctl = bctl;
773 		}
774 
775 		cmax = pci_scan_child_bus(child);
776 		if (cmax > max)
777 			max = cmax;
778 		if (child->busn_res.end > max)
779 			max = child->busn_res.end;
780 	} else {
781 		/*
782 		 * We need to assign a number to this bus which we always
783 		 * do in the second pass.
784 		 */
785 		if (!pass) {
786 			if (pcibios_assign_all_busses() || broken)
787 				/* Temporarily disable forwarding of the
788 				   configuration cycles on all bridges in
789 				   this bus segment to avoid possible
790 				   conflicts in the second pass between two
791 				   bridges programmed with overlapping
792 				   bus ranges. */
793 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
794 						       buses & ~0xffffff);
795 			goto out;
796 		}
797 
798 		/* Clear errors */
799 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
800 
801 		/* Prevent assigning a bus number that already exists.
802 		 * This can happen when a bridge is hot-plugged, so in
803 		 * this case we only re-scan this bus. */
804 		child = pci_find_bus(pci_domain_nr(bus), max+1);
805 		if (!child) {
806 			child = pci_add_new_bus(bus, dev, ++max);
807 			if (!child)
808 				goto out;
809 			pci_bus_insert_busn_res(child, max, 0xff);
810 		}
811 		buses = (buses & 0xff000000)
812 		      | ((unsigned int)(child->primary)     <<  0)
813 		      | ((unsigned int)(child->busn_res.start)   <<  8)
814 		      | ((unsigned int)(child->busn_res.end) << 16);
815 
816 		/*
817 		 * yenta.c forces a secondary latency timer of 176.
818 		 * Copy that behaviour here.
819 		 */
820 		if (is_cardbus) {
821 			buses &= ~0xff000000;
822 			buses |= CARDBUS_LATENCY_TIMER << 24;
823 		}
824 
825 		/*
826 		 * We need to blast all three values with a single write.
827 		 */
828 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
829 
830 		if (!is_cardbus) {
831 			child->bridge_ctl = bctl;
832 			/*
833 			 * Adjust subordinate busnr in parent buses.
834 			 * We do this before scanning for children because
835 			 * some devices may not be detected if the bios
836 			 * was lazy.
837 			 */
838 			pci_fixup_parent_subordinate_busnr(child, max);
839 			/* Now we can scan all subordinate buses... */
840 			max = pci_scan_child_bus(child);
841 			/*
842 			 * now fix it up again since we have found
843 			 * the real value of max.
844 			 */
845 			pci_fixup_parent_subordinate_busnr(child, max);
846 		} else {
847 			/*
848 			 * For CardBus bridges, we leave 4 bus numbers
849 			 * as cards with a PCI-to-PCI bridge can be
850 			 * inserted later.
851 			 */
852 			for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
853 				struct pci_bus *parent = bus;
854 				if (pci_find_bus(pci_domain_nr(bus),
855 							max+i+1))
856 					break;
857 				while (parent->parent) {
858 					if ((!pcibios_assign_all_busses()) &&
859 					    (parent->busn_res.end > max) &&
860 					    (parent->busn_res.end <= max+i)) {
861 						j = 1;
862 					}
863 					parent = parent->parent;
864 				}
865 				if (j) {
866 					/*
867 					 * Often, there are two cardbus bridges
868 					 * -- try to leave one valid bus number
869 					 * for each one.
870 					 */
871 					i /= 2;
872 					break;
873 				}
874 			}
875 			max += i;
876 			pci_fixup_parent_subordinate_busnr(child, max);
877 		}
878 		/*
879 		 * Set the subordinate bus number to its real value.
880 		 */
881 		pci_bus_update_busn_res_end(child, max);
882 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
883 	}
884 
885 	sprintf(child->name,
886 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
887 		pci_domain_nr(bus), child->number);
888 
889 	/* Has only triggered on CardBus, fixup is in yenta_socket */
890 	while (bus->parent) {
891 		if ((child->busn_res.end > bus->busn_res.end) ||
892 		    (child->number > bus->busn_res.end) ||
893 		    (child->number < bus->number) ||
894 		    (child->busn_res.end < bus->number)) {
895 			dev_info(&child->dev, "%pR %s "
896 				"hidden behind%s bridge %s %pR\n",
897 				&child->busn_res,
898 				(bus->number > child->busn_res.end &&
899 				 bus->busn_res.end < child->number) ?
900 					"wholly" : "partially",
901 				bus->self->transparent ? " transparent" : "",
902 				dev_name(&bus->dev),
903 				&bus->busn_res);
904 		}
905 		bus = bus->parent;
906 	}
907 
908 out:
909 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
910 
911 	return max;
912 }
913 
914 /*
915  * Read interrupt line and base address registers.
916  * The architecture-dependent code can tweak these, of course.
917  */
918 static void pci_read_irq(struct pci_dev *dev)
919 {
920 	unsigned char irq;
921 
922 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
923 	dev->pin = irq;
924 	if (irq)
925 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
926 	dev->irq = irq;
927 }
928 
929 void set_pcie_port_type(struct pci_dev *pdev)
930 {
931 	int pos;
932 	u16 reg16;
933 
934 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
935 	if (!pos)
936 		return;
937 	pdev->is_pcie = 1;
938 	pdev->pcie_cap = pos;
939 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
940 	pdev->pcie_flags_reg = reg16;
941 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
942 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
943 }
944 
945 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
946 {
947 	u32 reg32;
948 
949 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
950 	if (reg32 & PCI_EXP_SLTCAP_HPC)
951 		pdev->is_hotplug_bridge = 1;
952 }
953 
954 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
955 
956 /**
957  * pci_setup_device - fill in class and map information of a device
958  * @dev: the device structure to fill
959  *
960  * Initialize the device structure with information about the device's
961  * vendor,class,memory and IO-space addresses,IRQ lines etc.
962  * Called at initialisation of the PCI subsystem and by CardBus services.
963  * Returns 0 on success and negative if unknown type of device (not normal,
964  * bridge or CardBus).
965  */
966 int pci_setup_device(struct pci_dev *dev)
967 {
968 	u32 class;
969 	u8 hdr_type;
970 	struct pci_slot *slot;
971 	int pos = 0;
972 	struct pci_bus_region region;
973 	struct resource *res;
974 
975 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
976 		return -EIO;
977 
978 	dev->sysdata = dev->bus->sysdata;
979 	dev->dev.parent = dev->bus->bridge;
980 	dev->dev.bus = &pci_bus_type;
981 	dev->dev.type = &pci_dev_type;
982 	dev->hdr_type = hdr_type & 0x7f;
983 	dev->multifunction = !!(hdr_type & 0x80);
984 	dev->error_state = pci_channel_io_normal;
985 	set_pcie_port_type(dev);
986 
987 	list_for_each_entry(slot, &dev->bus->slots, list)
988 		if (PCI_SLOT(dev->devfn) == slot->number)
989 			dev->slot = slot;
990 
991 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
992 	   set this higher, assuming the system even supports it.  */
993 	dev->dma_mask = 0xffffffff;
994 
995 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
996 		     dev->bus->number, PCI_SLOT(dev->devfn),
997 		     PCI_FUNC(dev->devfn));
998 
999 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1000 	dev->revision = class & 0xff;
1001 	dev->class = class >> 8;		    /* upper 3 bytes */
1002 
1003 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1004 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1005 
1006 	/* need to have dev->class ready */
1007 	dev->cfg_size = pci_cfg_space_size(dev);
1008 
1009 	/* "Unknown power state" */
1010 	dev->current_state = PCI_UNKNOWN;
1011 
1012 	/* Early fixups, before probing the BARs */
1013 	pci_fixup_device(pci_fixup_early, dev);
1014 	/* device class may be changed after fixup */
1015 	class = dev->class >> 8;
1016 
1017 	switch (dev->hdr_type) {		    /* header type */
1018 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1019 		if (class == PCI_CLASS_BRIDGE_PCI)
1020 			goto bad;
1021 		pci_read_irq(dev);
1022 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1023 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1024 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1025 
1026 		/*
1027 		 *	Do the ugly legacy mode stuff here rather than broken chip
1028 		 *	quirk code. Legacy mode ATA controllers have fixed
1029 		 *	addresses. These are not always echoed in BAR0-3, and
1030 		 *	BAR0-3 in a few cases contain junk!
1031 		 */
1032 		if (class == PCI_CLASS_STORAGE_IDE) {
1033 			u8 progif;
1034 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1035 			if ((progif & 1) == 0) {
1036 				region.start = 0x1F0;
1037 				region.end = 0x1F7;
1038 				res = &dev->resource[0];
1039 				res->flags = LEGACY_IO_RESOURCE;
1040 				pcibios_bus_to_resource(dev, res, &region);
1041 				region.start = 0x3F6;
1042 				region.end = 0x3F6;
1043 				res = &dev->resource[1];
1044 				res->flags = LEGACY_IO_RESOURCE;
1045 				pcibios_bus_to_resource(dev, res, &region);
1046 			}
1047 			if ((progif & 4) == 0) {
1048 				region.start = 0x170;
1049 				region.end = 0x177;
1050 				res = &dev->resource[2];
1051 				res->flags = LEGACY_IO_RESOURCE;
1052 				pcibios_bus_to_resource(dev, res, &region);
1053 				region.start = 0x376;
1054 				region.end = 0x376;
1055 				res = &dev->resource[3];
1056 				res->flags = LEGACY_IO_RESOURCE;
1057 				pcibios_bus_to_resource(dev, res, &region);
1058 			}
1059 		}
1060 		break;
1061 
1062 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1063 		if (class != PCI_CLASS_BRIDGE_PCI)
1064 			goto bad;
1065 		/* The PCI-to-PCI bridge spec requires that subtractive
1066 		   decoding (i.e. transparent) bridge must have programming
1067 		   interface code of 0x01. */
1068 		pci_read_irq(dev);
1069 		dev->transparent = ((dev->class & 0xff) == 1);
1070 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1071 		set_pcie_hotplug_bridge(dev);
1072 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1073 		if (pos) {
1074 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1075 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1076 		}
1077 		break;
1078 
1079 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1080 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1081 			goto bad;
1082 		pci_read_irq(dev);
1083 		pci_read_bases(dev, 1, 0);
1084 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1085 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1086 		break;
1087 
1088 	default:				    /* unknown header */
1089 		dev_err(&dev->dev, "unknown header type %02x, "
1090 			"ignoring device\n", dev->hdr_type);
1091 		return -EIO;
1092 
1093 	bad:
1094 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1095 			"type %02x)\n", dev->class, dev->hdr_type);
1096 		dev->class = PCI_CLASS_NOT_DEFINED;
1097 	}
1098 
1099 	/* We found a fine healthy device, go go go... */
1100 	return 0;
1101 }
1102 
1103 static void pci_release_capabilities(struct pci_dev *dev)
1104 {
1105 	pci_vpd_release(dev);
1106 	pci_iov_release(dev);
1107 	pci_free_cap_save_buffers(dev);
1108 }
1109 
1110 /**
1111  * pci_release_dev - free a pci device structure when all users of it are finished.
1112  * @dev: device that's been disconnected
1113  *
1114  * Will be called only by the device core when all users of this pci device are
1115  * done.
1116  */
1117 static void pci_release_dev(struct device *dev)
1118 {
1119 	struct pci_dev *pci_dev;
1120 
1121 	pci_dev = to_pci_dev(dev);
1122 	pci_release_capabilities(pci_dev);
1123 	pci_release_of_node(pci_dev);
1124 	kfree(pci_dev);
1125 }
1126 
1127 /**
1128  * pci_cfg_space_size - get the configuration space size of the PCI device.
1129  * @dev: PCI device
1130  *
1131  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1132  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1133  * access it.  Maybe we don't have a way to generate extended config space
1134  * accesses, or the device is behind a reverse Express bridge.  So we try
1135  * reading the dword at 0x100 which must either be 0 or a valid extended
1136  * capability header.
1137  */
1138 int pci_cfg_space_size_ext(struct pci_dev *dev)
1139 {
1140 	u32 status;
1141 	int pos = PCI_CFG_SPACE_SIZE;
1142 
1143 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1144 		goto fail;
1145 	if (status == 0xffffffff)
1146 		goto fail;
1147 
1148 	return PCI_CFG_SPACE_EXP_SIZE;
1149 
1150  fail:
1151 	return PCI_CFG_SPACE_SIZE;
1152 }
1153 
1154 int pci_cfg_space_size(struct pci_dev *dev)
1155 {
1156 	int pos;
1157 	u32 status;
1158 	u16 class;
1159 
1160 	class = dev->class >> 8;
1161 	if (class == PCI_CLASS_BRIDGE_HOST)
1162 		return pci_cfg_space_size_ext(dev);
1163 
1164 	if (!pci_is_pcie(dev)) {
1165 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1166 		if (!pos)
1167 			goto fail;
1168 
1169 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1170 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1171 			goto fail;
1172 	}
1173 
1174 	return pci_cfg_space_size_ext(dev);
1175 
1176  fail:
1177 	return PCI_CFG_SPACE_SIZE;
1178 }
1179 
1180 static void pci_release_bus_bridge_dev(struct device *dev)
1181 {
1182 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
1183 
1184 	if (bridge->release_fn)
1185 		bridge->release_fn(bridge);
1186 
1187 	pci_free_resource_list(&bridge->windows);
1188 
1189 	kfree(bridge);
1190 }
1191 
1192 struct pci_dev *alloc_pci_dev(void)
1193 {
1194 	struct pci_dev *dev;
1195 
1196 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1197 	if (!dev)
1198 		return NULL;
1199 
1200 	INIT_LIST_HEAD(&dev->bus_list);
1201 
1202 	return dev;
1203 }
1204 EXPORT_SYMBOL(alloc_pci_dev);
1205 
1206 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1207 				 int crs_timeout)
1208 {
1209 	int delay = 1;
1210 
1211 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1212 		return false;
1213 
1214 	/* some broken boards return 0 or ~0 if a slot is empty: */
1215 	if (*l == 0xffffffff || *l == 0x00000000 ||
1216 	    *l == 0x0000ffff || *l == 0xffff0000)
1217 		return false;
1218 
1219 	/* Configuration request Retry Status */
1220 	while (*l == 0xffff0001) {
1221 		if (!crs_timeout)
1222 			return false;
1223 
1224 		msleep(delay);
1225 		delay *= 2;
1226 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1227 			return false;
1228 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1229 		if (delay > crs_timeout) {
1230 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1231 					"responding\n", pci_domain_nr(bus),
1232 					bus->number, PCI_SLOT(devfn),
1233 					PCI_FUNC(devfn));
1234 			return false;
1235 		}
1236 	}
1237 
1238 	return true;
1239 }
1240 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1241 
1242 /*
1243  * Read the config data for a PCI device, sanity-check it
1244  * and fill in the dev structure...
1245  */
1246 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1247 {
1248 	struct pci_dev *dev;
1249 	u32 l;
1250 
1251 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1252 		return NULL;
1253 
1254 	dev = alloc_pci_dev();
1255 	if (!dev)
1256 		return NULL;
1257 
1258 	dev->bus = bus;
1259 	dev->devfn = devfn;
1260 	dev->vendor = l & 0xffff;
1261 	dev->device = (l >> 16) & 0xffff;
1262 
1263 	pci_set_of_node(dev);
1264 
1265 	if (pci_setup_device(dev)) {
1266 		kfree(dev);
1267 		return NULL;
1268 	}
1269 
1270 	return dev;
1271 }
1272 
1273 static void pci_init_capabilities(struct pci_dev *dev)
1274 {
1275 	/* MSI/MSI-X list */
1276 	pci_msi_init_pci_dev(dev);
1277 
1278 	/* Buffers for saving PCIe and PCI-X capabilities */
1279 	pci_allocate_cap_save_buffers(dev);
1280 
1281 	/* Power Management */
1282 	pci_pm_init(dev);
1283 	platform_pci_wakeup_init(dev);
1284 
1285 	/* Vital Product Data */
1286 	pci_vpd_pci22_init(dev);
1287 
1288 	/* Alternative Routing-ID Forwarding */
1289 	pci_enable_ari(dev);
1290 
1291 	/* Single Root I/O Virtualization */
1292 	pci_iov_init(dev);
1293 
1294 	/* Enable ACS P2P upstream forwarding */
1295 	pci_enable_acs(dev);
1296 }
1297 
1298 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1299 {
1300 	device_initialize(&dev->dev);
1301 	dev->dev.release = pci_release_dev;
1302 	pci_dev_get(dev);
1303 
1304 	dev->dev.dma_mask = &dev->dma_mask;
1305 	dev->dev.dma_parms = &dev->dma_parms;
1306 	dev->dev.coherent_dma_mask = 0xffffffffull;
1307 
1308 	pci_set_dma_max_seg_size(dev, 65536);
1309 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1310 
1311 	/* Fix up broken headers */
1312 	pci_fixup_device(pci_fixup_header, dev);
1313 
1314 	/* moved out from quirk header fixup code */
1315 	pci_reassigndev_resource_alignment(dev);
1316 
1317 	/* Clear the state_saved flag. */
1318 	dev->state_saved = false;
1319 
1320 	/* Initialize various capabilities */
1321 	pci_init_capabilities(dev);
1322 
1323 	/*
1324 	 * Add the device to our list of discovered devices
1325 	 * and the bus list for fixup functions, etc.
1326 	 */
1327 	down_write(&pci_bus_sem);
1328 	list_add_tail(&dev->bus_list, &bus->devices);
1329 	up_write(&pci_bus_sem);
1330 }
1331 
1332 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1333 {
1334 	struct pci_dev *dev;
1335 
1336 	dev = pci_get_slot(bus, devfn);
1337 	if (dev) {
1338 		pci_dev_put(dev);
1339 		return dev;
1340 	}
1341 
1342 	dev = pci_scan_device(bus, devfn);
1343 	if (!dev)
1344 		return NULL;
1345 
1346 	pci_device_add(dev, bus);
1347 
1348 	return dev;
1349 }
1350 EXPORT_SYMBOL(pci_scan_single_device);
1351 
1352 static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
1353 {
1354 	u16 cap;
1355 	unsigned pos, next_fn;
1356 
1357 	if (!dev)
1358 		return 0;
1359 
1360 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1361 	if (!pos)
1362 		return 0;
1363 	pci_read_config_word(dev, pos + 4, &cap);
1364 	next_fn = cap >> 8;
1365 	if (next_fn <= fn)
1366 		return 0;
1367 	return next_fn;
1368 }
1369 
1370 static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
1371 {
1372 	return (fn + 1) % 8;
1373 }
1374 
1375 static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
1376 {
1377 	return 0;
1378 }
1379 
1380 static int only_one_child(struct pci_bus *bus)
1381 {
1382 	struct pci_dev *parent = bus->self;
1383 
1384 	if (!parent || !pci_is_pcie(parent))
1385 		return 0;
1386 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1387 		return 1;
1388 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1389 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1390 		return 1;
1391 	return 0;
1392 }
1393 
1394 /**
1395  * pci_scan_slot - scan a PCI slot on a bus for devices.
1396  * @bus: PCI bus to scan
1397  * @devfn: slot number to scan (must have zero function.)
1398  *
1399  * Scan a PCI slot on the specified PCI bus for devices, adding
1400  * discovered devices to the @bus->devices list.  New devices
1401  * will not have is_added set.
1402  *
1403  * Returns the number of new devices found.
1404  */
1405 int pci_scan_slot(struct pci_bus *bus, int devfn)
1406 {
1407 	unsigned fn, nr = 0;
1408 	struct pci_dev *dev;
1409 	unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
1410 
1411 	if (only_one_child(bus) && (devfn > 0))
1412 		return 0; /* Already scanned the entire slot */
1413 
1414 	dev = pci_scan_single_device(bus, devfn);
1415 	if (!dev)
1416 		return 0;
1417 	if (!dev->is_added)
1418 		nr++;
1419 
1420 	if (pci_ari_enabled(bus))
1421 		next_fn = next_ari_fn;
1422 	else if (dev->multifunction)
1423 		next_fn = next_trad_fn;
1424 
1425 	for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
1426 		dev = pci_scan_single_device(bus, devfn + fn);
1427 		if (dev) {
1428 			if (!dev->is_added)
1429 				nr++;
1430 			dev->multifunction = 1;
1431 		}
1432 	}
1433 
1434 	/* only one slot has pcie device */
1435 	if (bus->self && nr)
1436 		pcie_aspm_init_link_state(bus->self);
1437 
1438 	return nr;
1439 }
1440 
1441 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1442 {
1443 	u8 *smpss = data;
1444 
1445 	if (!pci_is_pcie(dev))
1446 		return 0;
1447 
1448 	/* For PCIE hotplug enabled slots not connected directly to a
1449 	 * PCI-E root port, there can be problems when hotplugging
1450 	 * devices.  This is due to the possibility of hotplugging a
1451 	 * device into the fabric with a smaller MPS that the devices
1452 	 * currently running have configured.  Modifying the MPS on the
1453 	 * running devices could cause a fatal bus error due to an
1454 	 * incoming frame being larger than the newly configured MPS.
1455 	 * To work around this, the MPS for the entire fabric must be
1456 	 * set to the minimum size.  Any devices hotplugged into this
1457 	 * fabric will have the minimum MPS set.  If the PCI hotplug
1458 	 * slot is directly connected to the root port and there are not
1459 	 * other devices on the fabric (which seems to be the most
1460 	 * common case), then this is not an issue and MPS discovery
1461 	 * will occur as normal.
1462 	 */
1463 	if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1464 	     (dev->bus->self &&
1465 	      pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
1466 		*smpss = 0;
1467 
1468 	if (*smpss > dev->pcie_mpss)
1469 		*smpss = dev->pcie_mpss;
1470 
1471 	return 0;
1472 }
1473 
1474 static void pcie_write_mps(struct pci_dev *dev, int mps)
1475 {
1476 	int rc;
1477 
1478 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1479 		mps = 128 << dev->pcie_mpss;
1480 
1481 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1482 		    dev->bus->self)
1483 			/* For "Performance", the assumption is made that
1484 			 * downstream communication will never be larger than
1485 			 * the MRRS.  So, the MPS only needs to be configured
1486 			 * for the upstream communication.  This being the case,
1487 			 * walk from the top down and set the MPS of the child
1488 			 * to that of the parent bus.
1489 			 *
1490 			 * Configure the device MPS with the smaller of the
1491 			 * device MPSS or the bridge MPS (which is assumed to be
1492 			 * properly configured at this point to the largest
1493 			 * allowable MPS based on its parent bus).
1494 			 */
1495 			mps = min(mps, pcie_get_mps(dev->bus->self));
1496 	}
1497 
1498 	rc = pcie_set_mps(dev, mps);
1499 	if (rc)
1500 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1501 }
1502 
1503 static void pcie_write_mrrs(struct pci_dev *dev)
1504 {
1505 	int rc, mrrs;
1506 
1507 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1508 	 * issues with setting MRRS to 0 on a number of devices.
1509 	 */
1510 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1511 		return;
1512 
1513 	/* For Max performance, the MRRS must be set to the largest supported
1514 	 * value.  However, it cannot be configured larger than the MPS the
1515 	 * device or the bus can support.  This should already be properly
1516 	 * configured by a prior call to pcie_write_mps.
1517 	 */
1518 	mrrs = pcie_get_mps(dev);
1519 
1520 	/* MRRS is a R/W register.  Invalid values can be written, but a
1521 	 * subsequent read will verify if the value is acceptable or not.
1522 	 * If the MRRS value provided is not acceptable (e.g., too large),
1523 	 * shrink the value until it is acceptable to the HW.
1524  	 */
1525 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1526 		rc = pcie_set_readrq(dev, mrrs);
1527 		if (!rc)
1528 			break;
1529 
1530 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1531 		mrrs /= 2;
1532 	}
1533 
1534 	if (mrrs < 128)
1535 		dev_err(&dev->dev, "MRRS was unable to be configured with a "
1536 			"safe value.  If problems are experienced, try running "
1537 			"with pci=pcie_bus_safe.\n");
1538 }
1539 
1540 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1541 {
1542 	int mps, orig_mps;
1543 
1544 	if (!pci_is_pcie(dev))
1545 		return 0;
1546 
1547 	mps = 128 << *(u8 *)data;
1548 	orig_mps = pcie_get_mps(dev);
1549 
1550 	pcie_write_mps(dev, mps);
1551 	pcie_write_mrrs(dev);
1552 
1553 	dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
1554 		 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1555 		 orig_mps, pcie_get_readrq(dev));
1556 
1557 	return 0;
1558 }
1559 
1560 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1561  * parents then children fashion.  If this changes, then this code will not
1562  * work as designed.
1563  */
1564 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1565 {
1566 	u8 smpss;
1567 
1568 	if (!pci_is_pcie(bus->self))
1569 		return;
1570 
1571 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1572 		return;
1573 
1574 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1575 	 * to be aware to the MPS of the destination.  To work around this,
1576 	 * simply force the MPS of the entire system to the smallest possible.
1577 	 */
1578 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1579 		smpss = 0;
1580 
1581 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1582 		smpss = mpss;
1583 
1584 		pcie_find_smpss(bus->self, &smpss);
1585 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1586 	}
1587 
1588 	pcie_bus_configure_set(bus->self, &smpss);
1589 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1590 }
1591 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1592 
1593 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1594 {
1595 	unsigned int devfn, pass, max = bus->busn_res.start;
1596 	struct pci_dev *dev;
1597 
1598 	dev_dbg(&bus->dev, "scanning bus\n");
1599 
1600 	/* Go find them, Rover! */
1601 	for (devfn = 0; devfn < 0x100; devfn += 8)
1602 		pci_scan_slot(bus, devfn);
1603 
1604 	/* Reserve buses for SR-IOV capability. */
1605 	max += pci_iov_bus_range(bus);
1606 
1607 	/*
1608 	 * After performing arch-dependent fixup of the bus, look behind
1609 	 * all PCI-to-PCI bridges on this bus.
1610 	 */
1611 	if (!bus->is_added) {
1612 		dev_dbg(&bus->dev, "fixups for bus\n");
1613 		pcibios_fixup_bus(bus);
1614 		if (pci_is_root_bus(bus))
1615 			bus->is_added = 1;
1616 	}
1617 
1618 	for (pass=0; pass < 2; pass++)
1619 		list_for_each_entry(dev, &bus->devices, bus_list) {
1620 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1621 			    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1622 				max = pci_scan_bridge(bus, dev, max, pass);
1623 		}
1624 
1625 	/*
1626 	 * We've scanned the bus and so we know all about what's on
1627 	 * the other side of any bridges that may be on this bus plus
1628 	 * any devices.
1629 	 *
1630 	 * Return how far we've got finding sub-buses.
1631 	 */
1632 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1633 	return max;
1634 }
1635 
1636 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1637 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1638 {
1639 	int error;
1640 	struct pci_host_bridge *bridge;
1641 	struct pci_bus *b, *b2;
1642 	struct pci_host_bridge_window *window, *n;
1643 	struct resource *res;
1644 	resource_size_t offset;
1645 	char bus_addr[64];
1646 	char *fmt;
1647 
1648 
1649 	b = pci_alloc_bus();
1650 	if (!b)
1651 		return NULL;
1652 
1653 	b->sysdata = sysdata;
1654 	b->ops = ops;
1655 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1656 	if (b2) {
1657 		/* If we already got to this bus through a different bridge, ignore it */
1658 		dev_dbg(&b2->dev, "bus already known\n");
1659 		goto err_out;
1660 	}
1661 
1662 	bridge = pci_alloc_host_bridge(b);
1663 	if (!bridge)
1664 		goto err_out;
1665 
1666 	bridge->dev.parent = parent;
1667 	bridge->dev.release = pci_release_bus_bridge_dev;
1668 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1669 	error = device_register(&bridge->dev);
1670 	if (error)
1671 		goto bridge_dev_reg_err;
1672 	b->bridge = get_device(&bridge->dev);
1673 	device_enable_async_suspend(b->bridge);
1674 	pci_set_bus_of_node(b);
1675 
1676 	if (!parent)
1677 		set_dev_node(b->bridge, pcibus_to_node(b));
1678 
1679 	b->dev.class = &pcibus_class;
1680 	b->dev.parent = b->bridge;
1681 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1682 	error = device_register(&b->dev);
1683 	if (error)
1684 		goto class_dev_reg_err;
1685 
1686 	/* Create legacy_io and legacy_mem files for this bus */
1687 	pci_create_legacy_files(b);
1688 
1689 	b->number = b->busn_res.start = bus;
1690 
1691 	if (parent)
1692 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1693 	else
1694 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1695 
1696 	/* Add initial resources to the bus */
1697 	list_for_each_entry_safe(window, n, resources, list) {
1698 		list_move_tail(&window->list, &bridge->windows);
1699 		res = window->res;
1700 		offset = window->offset;
1701 		if (res->flags & IORESOURCE_BUS)
1702 			pci_bus_insert_busn_res(b, bus, res->end);
1703 		else
1704 			pci_bus_add_resource(b, res, 0);
1705 		if (offset) {
1706 			if (resource_type(res) == IORESOURCE_IO)
1707 				fmt = " (bus address [%#06llx-%#06llx])";
1708 			else
1709 				fmt = " (bus address [%#010llx-%#010llx])";
1710 			snprintf(bus_addr, sizeof(bus_addr), fmt,
1711 				 (unsigned long long) (res->start - offset),
1712 				 (unsigned long long) (res->end - offset));
1713 		} else
1714 			bus_addr[0] = '\0';
1715 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1716 	}
1717 
1718 	down_write(&pci_bus_sem);
1719 	list_add_tail(&b->node, &pci_root_buses);
1720 	up_write(&pci_bus_sem);
1721 
1722 	return b;
1723 
1724 class_dev_reg_err:
1725 	put_device(&bridge->dev);
1726 	device_unregister(&bridge->dev);
1727 bridge_dev_reg_err:
1728 	kfree(bridge);
1729 err_out:
1730 	kfree(b);
1731 	return NULL;
1732 }
1733 
1734 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1735 {
1736 	struct resource *res = &b->busn_res;
1737 	struct resource *parent_res, *conflict;
1738 
1739 	res->start = bus;
1740 	res->end = bus_max;
1741 	res->flags = IORESOURCE_BUS;
1742 
1743 	if (!pci_is_root_bus(b))
1744 		parent_res = &b->parent->busn_res;
1745 	else {
1746 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1747 		res->flags |= IORESOURCE_PCI_FIXED;
1748 	}
1749 
1750 	conflict = insert_resource_conflict(parent_res, res);
1751 
1752 	if (conflict)
1753 		dev_printk(KERN_DEBUG, &b->dev,
1754 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1755 			    res, pci_is_root_bus(b) ? "domain " : "",
1756 			    parent_res, conflict->name, conflict);
1757 
1758 	return conflict == NULL;
1759 }
1760 
1761 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1762 {
1763 	struct resource *res = &b->busn_res;
1764 	struct resource old_res = *res;
1765 	resource_size_t size;
1766 	int ret;
1767 
1768 	if (res->start > bus_max)
1769 		return -EINVAL;
1770 
1771 	size = bus_max - res->start + 1;
1772 	ret = adjust_resource(res, res->start, size);
1773 	dev_printk(KERN_DEBUG, &b->dev,
1774 			"busn_res: %pR end %s updated to %02x\n",
1775 			&old_res, ret ? "can not be" : "is", bus_max);
1776 
1777 	if (!ret && !res->parent)
1778 		pci_bus_insert_busn_res(b, res->start, res->end);
1779 
1780 	return ret;
1781 }
1782 
1783 void pci_bus_release_busn_res(struct pci_bus *b)
1784 {
1785 	struct resource *res = &b->busn_res;
1786 	int ret;
1787 
1788 	if (!res->flags || !res->parent)
1789 		return;
1790 
1791 	ret = release_resource(res);
1792 	dev_printk(KERN_DEBUG, &b->dev,
1793 			"busn_res: %pR %s released\n",
1794 			res, ret ? "can not be" : "is");
1795 }
1796 
1797 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1798 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1799 {
1800 	struct pci_host_bridge_window *window;
1801 	bool found = false;
1802 	struct pci_bus *b;
1803 	int max;
1804 
1805 	list_for_each_entry(window, resources, list)
1806 		if (window->res->flags & IORESOURCE_BUS) {
1807 			found = true;
1808 			break;
1809 		}
1810 
1811 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1812 	if (!b)
1813 		return NULL;
1814 
1815 	if (!found) {
1816 		dev_info(&b->dev,
1817 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1818 			bus);
1819 		pci_bus_insert_busn_res(b, bus, 255);
1820 	}
1821 
1822 	max = pci_scan_child_bus(b);
1823 
1824 	if (!found)
1825 		pci_bus_update_busn_res_end(b, max);
1826 
1827 	pci_bus_add_devices(b);
1828 	return b;
1829 }
1830 EXPORT_SYMBOL(pci_scan_root_bus);
1831 
1832 /* Deprecated; use pci_scan_root_bus() instead */
1833 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1834 		int bus, struct pci_ops *ops, void *sysdata)
1835 {
1836 	LIST_HEAD(resources);
1837 	struct pci_bus *b;
1838 
1839 	pci_add_resource(&resources, &ioport_resource);
1840 	pci_add_resource(&resources, &iomem_resource);
1841 	pci_add_resource(&resources, &busn_resource);
1842 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1843 	if (b)
1844 		pci_scan_child_bus(b);
1845 	else
1846 		pci_free_resource_list(&resources);
1847 	return b;
1848 }
1849 EXPORT_SYMBOL(pci_scan_bus_parented);
1850 
1851 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1852 					void *sysdata)
1853 {
1854 	LIST_HEAD(resources);
1855 	struct pci_bus *b;
1856 
1857 	pci_add_resource(&resources, &ioport_resource);
1858 	pci_add_resource(&resources, &iomem_resource);
1859 	pci_add_resource(&resources, &busn_resource);
1860 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1861 	if (b) {
1862 		pci_scan_child_bus(b);
1863 		pci_bus_add_devices(b);
1864 	} else {
1865 		pci_free_resource_list(&resources);
1866 	}
1867 	return b;
1868 }
1869 EXPORT_SYMBOL(pci_scan_bus);
1870 
1871 /**
1872  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1873  * @bridge: PCI bridge for the bus to scan
1874  *
1875  * Scan a PCI bus and child buses for new devices, add them,
1876  * and enable them, resizing bridge mmio/io resource if necessary
1877  * and possible.  The caller must ensure the child devices are already
1878  * removed for resizing to occur.
1879  *
1880  * Returns the max number of subordinate bus discovered.
1881  */
1882 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1883 {
1884 	unsigned int max;
1885 	struct pci_bus *bus = bridge->subordinate;
1886 
1887 	max = pci_scan_child_bus(bus);
1888 
1889 	pci_assign_unassigned_bridge_resources(bridge);
1890 
1891 	pci_bus_add_devices(bus);
1892 
1893 	return max;
1894 }
1895 
1896 /**
1897  * pci_rescan_bus - scan a PCI bus for devices.
1898  * @bus: PCI bus to scan
1899  *
1900  * Scan a PCI bus and child buses for new devices, adds them,
1901  * and enables them.
1902  *
1903  * Returns the max number of subordinate bus discovered.
1904  */
1905 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1906 {
1907 	unsigned int max;
1908 
1909 	max = pci_scan_child_bus(bus);
1910 	pci_assign_unassigned_bus_resources(bus);
1911 	pci_enable_bridges(bus);
1912 	pci_bus_add_devices(bus);
1913 
1914 	return max;
1915 }
1916 EXPORT_SYMBOL_GPL(pci_rescan_bus);
1917 
1918 EXPORT_SYMBOL(pci_add_new_bus);
1919 EXPORT_SYMBOL(pci_scan_slot);
1920 EXPORT_SYMBOL(pci_scan_bridge);
1921 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1922 
1923 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1924 {
1925 	const struct pci_dev *a = to_pci_dev(d_a);
1926 	const struct pci_dev *b = to_pci_dev(d_b);
1927 
1928 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1929 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
1930 
1931 	if      (a->bus->number < b->bus->number) return -1;
1932 	else if (a->bus->number > b->bus->number) return  1;
1933 
1934 	if      (a->devfn < b->devfn) return -1;
1935 	else if (a->devfn > b->devfn) return  1;
1936 
1937 	return 0;
1938 }
1939 
1940 void __init pci_sort_breadthfirst(void)
1941 {
1942 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1943 }
1944