xref: /openbmc/linux/drivers/pci/probe.c (revision 9344dade)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15 
16 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR	3
18 
19 struct resource busn_resource = {
20 	.name	= "PCI busn",
21 	.start	= 0,
22 	.end	= 255,
23 	.flags	= IORESOURCE_BUS,
24 };
25 
26 /* Ugh.  Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29 
30 static LIST_HEAD(pci_domain_busn_res_list);
31 
32 struct pci_domain_busn_res {
33 	struct list_head list;
34 	struct resource res;
35 	int domain_nr;
36 };
37 
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 	struct pci_domain_busn_res *r;
41 
42 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 		if (r->domain_nr == domain_nr)
44 			return &r->res;
45 
46 	r = kzalloc(sizeof(*r), GFP_KERNEL);
47 	if (!r)
48 		return NULL;
49 
50 	r->domain_nr = domain_nr;
51 	r->res.start = 0;
52 	r->res.end = 0xff;
53 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54 
55 	list_add_tail(&r->list, &pci_domain_busn_res_list);
56 
57 	return &r->res;
58 }
59 
60 static int find_anything(struct device *dev, void *data)
61 {
62 	return 1;
63 }
64 
65 /*
66  * Some device drivers need know if pci is initiated.
67  * Basically, we think pci is not initiated when there
68  * is no device to be found on the pci_bus_type.
69  */
70 int no_pci_devices(void)
71 {
72 	struct device *dev;
73 	int no_devices;
74 
75 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 	no_devices = (dev == NULL);
77 	put_device(dev);
78 	return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81 
82 /*
83  * PCI Bus Class
84  */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 	struct pci_bus *pci_bus = to_pci_bus(dev);
88 
89 	if (pci_bus->bridge)
90 		put_device(pci_bus->bridge);
91 	pci_bus_remove_resources(pci_bus);
92 	pci_release_bus_of_node(pci_bus);
93 	kfree(pci_bus);
94 }
95 
96 static struct class pcibus_class = {
97 	.name		= "pci_bus",
98 	.dev_release	= &release_pcibus_dev,
99 	.dev_attrs	= pcibus_dev_attrs,
100 };
101 
102 static int __init pcibus_class_init(void)
103 {
104 	return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107 
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 	u64 size = mask & maxbase;	/* Find the significant bits */
111 	if (!size)
112 		return 0;
113 
114 	/* Get the lowest of them to find the decode size, and
115 	   from that the extent.  */
116 	size = (size & ~(size-1)) - 1;
117 
118 	/* base == maxbase can be valid only if the BAR has
119 	   already been programmed with all 1s.  */
120 	if (base == maxbase && ((base | size) & mask) != mask)
121 		return 0;
122 
123 	return size;
124 }
125 
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 	u32 mem_type;
129 	unsigned long flags;
130 
131 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 		flags |= IORESOURCE_IO;
134 		return flags;
135 	}
136 
137 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 	flags |= IORESOURCE_MEM;
139 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 		flags |= IORESOURCE_PREFETCH;
141 
142 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 	switch (mem_type) {
144 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 		break;
146 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 		/* 1M mem BAR treated as 32-bit BAR */
148 		break;
149 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 		flags |= IORESOURCE_MEM_64;
151 		break;
152 	default:
153 		/* mem unknown type treated as 32-bit BAR */
154 		break;
155 	}
156 	return flags;
157 }
158 
159 /**
160  * pci_read_base - read a PCI BAR
161  * @dev: the PCI device
162  * @type: type of the BAR
163  * @res: resource buffer to be filled in
164  * @pos: BAR position in the config space
165  *
166  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
167  */
168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
169 			struct resource *res, unsigned int pos)
170 {
171 	u32 l, sz, mask;
172 	u16 orig_cmd;
173 	struct pci_bus_region region, inverted_region;
174 	bool bar_too_big = false, bar_disabled = false;
175 
176 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
177 
178 	/* No printks while decoding is disabled! */
179 	if (!dev->mmio_always_on) {
180 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 		pci_write_config_word(dev, PCI_COMMAND,
182 			orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
183 	}
184 
185 	res->name = pci_name(dev);
186 
187 	pci_read_config_dword(dev, pos, &l);
188 	pci_write_config_dword(dev, pos, l | mask);
189 	pci_read_config_dword(dev, pos, &sz);
190 	pci_write_config_dword(dev, pos, l);
191 
192 	/*
193 	 * All bits set in sz means the device isn't working properly.
194 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
195 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
196 	 * 1 must be clear.
197 	 */
198 	if (!sz || sz == 0xffffffff)
199 		goto fail;
200 
201 	/*
202 	 * I don't know how l can have all bits set.  Copied from old code.
203 	 * Maybe it fixes a bug on some ancient platform.
204 	 */
205 	if (l == 0xffffffff)
206 		l = 0;
207 
208 	if (type == pci_bar_unknown) {
209 		res->flags = decode_bar(dev, l);
210 		res->flags |= IORESOURCE_SIZEALIGN;
211 		if (res->flags & IORESOURCE_IO) {
212 			l &= PCI_BASE_ADDRESS_IO_MASK;
213 			mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
214 		} else {
215 			l &= PCI_BASE_ADDRESS_MEM_MASK;
216 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
217 		}
218 	} else {
219 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
220 		l &= PCI_ROM_ADDRESS_MASK;
221 		mask = (u32)PCI_ROM_ADDRESS_MASK;
222 	}
223 
224 	if (res->flags & IORESOURCE_MEM_64) {
225 		u64 l64 = l;
226 		u64 sz64 = sz;
227 		u64 mask64 = mask | (u64)~0 << 32;
228 
229 		pci_read_config_dword(dev, pos + 4, &l);
230 		pci_write_config_dword(dev, pos + 4, ~0);
231 		pci_read_config_dword(dev, pos + 4, &sz);
232 		pci_write_config_dword(dev, pos + 4, l);
233 
234 		l64 |= ((u64)l << 32);
235 		sz64 |= ((u64)sz << 32);
236 
237 		sz64 = pci_size(l64, sz64, mask64);
238 
239 		if (!sz64)
240 			goto fail;
241 
242 		if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 			bar_too_big = true;
244 			goto fail;
245 		}
246 
247 		if ((sizeof(resource_size_t) < 8) && l) {
248 			/* Address above 32-bit boundary; disable the BAR */
249 			pci_write_config_dword(dev, pos, 0);
250 			pci_write_config_dword(dev, pos + 4, 0);
251 			region.start = 0;
252 			region.end = sz64;
253 			bar_disabled = true;
254 		} else {
255 			region.start = l64;
256 			region.end = l64 + sz64;
257 		}
258 	} else {
259 		sz = pci_size(l, sz, mask);
260 
261 		if (!sz)
262 			goto fail;
263 
264 		region.start = l;
265 		region.end = l + sz;
266 	}
267 
268 	pcibios_bus_to_resource(dev, res, &region);
269 	pcibios_resource_to_bus(dev, &inverted_region, res);
270 
271 	/*
272 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
273 	 * the corresponding resource address (the physical address used by
274 	 * the CPU.  Converting that resource address back to a bus address
275 	 * should yield the original BAR value:
276 	 *
277 	 *     resource_to_bus(bus_to_resource(A)) == A
278 	 *
279 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
280 	 * be claimed by the device.
281 	 */
282 	if (inverted_region.start != region.start) {
283 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
284 			 pos, &region.start);
285 		res->flags |= IORESOURCE_UNSET;
286 		res->end -= res->start;
287 		res->start = 0;
288 	}
289 
290 	goto out;
291 
292 
293 fail:
294 	res->flags = 0;
295 out:
296 	if (!dev->mmio_always_on)
297 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
298 
299 	if (bar_too_big)
300 		dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos);
301 	if (res->flags && !bar_disabled)
302 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
303 
304 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
305 }
306 
307 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
308 {
309 	unsigned int pos, reg;
310 
311 	for (pos = 0; pos < howmany; pos++) {
312 		struct resource *res = &dev->resource[pos];
313 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
314 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
315 	}
316 
317 	if (rom) {
318 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
319 		dev->rom_base_reg = rom;
320 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
321 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
322 				IORESOURCE_SIZEALIGN;
323 		__pci_read_base(dev, pci_bar_mem32, res, rom);
324 	}
325 }
326 
327 static void pci_read_bridge_io(struct pci_bus *child)
328 {
329 	struct pci_dev *dev = child->self;
330 	u8 io_base_lo, io_limit_lo;
331 	unsigned long io_mask, io_granularity, base, limit;
332 	struct pci_bus_region region;
333 	struct resource *res;
334 
335 	io_mask = PCI_IO_RANGE_MASK;
336 	io_granularity = 0x1000;
337 	if (dev->io_window_1k) {
338 		/* Support 1K I/O space granularity */
339 		io_mask = PCI_IO_1K_RANGE_MASK;
340 		io_granularity = 0x400;
341 	}
342 
343 	res = child->resource[0];
344 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
345 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
346 	base = (io_base_lo & io_mask) << 8;
347 	limit = (io_limit_lo & io_mask) << 8;
348 
349 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
350 		u16 io_base_hi, io_limit_hi;
351 
352 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
353 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
354 		base |= ((unsigned long) io_base_hi << 16);
355 		limit |= ((unsigned long) io_limit_hi << 16);
356 	}
357 
358 	if (base <= limit) {
359 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
360 		region.start = base;
361 		region.end = limit + io_granularity - 1;
362 		pcibios_bus_to_resource(dev, res, &region);
363 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
364 	}
365 }
366 
367 static void pci_read_bridge_mmio(struct pci_bus *child)
368 {
369 	struct pci_dev *dev = child->self;
370 	u16 mem_base_lo, mem_limit_lo;
371 	unsigned long base, limit;
372 	struct pci_bus_region region;
373 	struct resource *res;
374 
375 	res = child->resource[1];
376 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
377 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
378 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
379 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
380 	if (base <= limit) {
381 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
382 		region.start = base;
383 		region.end = limit + 0xfffff;
384 		pcibios_bus_to_resource(dev, res, &region);
385 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
386 	}
387 }
388 
389 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
390 {
391 	struct pci_dev *dev = child->self;
392 	u16 mem_base_lo, mem_limit_lo;
393 	unsigned long base, limit;
394 	struct pci_bus_region region;
395 	struct resource *res;
396 
397 	res = child->resource[2];
398 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
399 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
400 	base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
401 	limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
402 
403 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
404 		u32 mem_base_hi, mem_limit_hi;
405 
406 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
407 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
408 
409 		/*
410 		 * Some bridges set the base > limit by default, and some
411 		 * (broken) BIOSes do not initialize them.  If we find
412 		 * this, just assume they are not being used.
413 		 */
414 		if (mem_base_hi <= mem_limit_hi) {
415 #if BITS_PER_LONG == 64
416 			base |= ((unsigned long) mem_base_hi) << 32;
417 			limit |= ((unsigned long) mem_limit_hi) << 32;
418 #else
419 			if (mem_base_hi || mem_limit_hi) {
420 				dev_err(&dev->dev, "can't handle 64-bit "
421 					"address space for bridge\n");
422 				return;
423 			}
424 #endif
425 		}
426 	}
427 	if (base <= limit) {
428 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
429 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
430 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
431 			res->flags |= IORESOURCE_MEM_64;
432 		region.start = base;
433 		region.end = limit + 0xfffff;
434 		pcibios_bus_to_resource(dev, res, &region);
435 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
436 	}
437 }
438 
439 void pci_read_bridge_bases(struct pci_bus *child)
440 {
441 	struct pci_dev *dev = child->self;
442 	struct resource *res;
443 	int i;
444 
445 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
446 		return;
447 
448 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
449 		 &child->busn_res,
450 		 dev->transparent ? " (subtractive decode)" : "");
451 
452 	pci_bus_remove_resources(child);
453 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
454 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
455 
456 	pci_read_bridge_io(child);
457 	pci_read_bridge_mmio(child);
458 	pci_read_bridge_mmio_pref(child);
459 
460 	if (dev->transparent) {
461 		pci_bus_for_each_resource(child->parent, res, i) {
462 			if (res) {
463 				pci_bus_add_resource(child, res,
464 						     PCI_SUBTRACTIVE_DECODE);
465 				dev_printk(KERN_DEBUG, &dev->dev,
466 					   "  bridge window %pR (subtractive decode)\n",
467 					   res);
468 			}
469 		}
470 	}
471 }
472 
473 static struct pci_bus *pci_alloc_bus(void)
474 {
475 	struct pci_bus *b;
476 
477 	b = kzalloc(sizeof(*b), GFP_KERNEL);
478 	if (!b)
479 		return NULL;
480 
481 	INIT_LIST_HEAD(&b->node);
482 	INIT_LIST_HEAD(&b->children);
483 	INIT_LIST_HEAD(&b->devices);
484 	INIT_LIST_HEAD(&b->slots);
485 	INIT_LIST_HEAD(&b->resources);
486 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
487 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
488 	return b;
489 }
490 
491 static void pci_release_host_bridge_dev(struct device *dev)
492 {
493 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
494 
495 	if (bridge->release_fn)
496 		bridge->release_fn(bridge);
497 
498 	pci_free_resource_list(&bridge->windows);
499 
500 	kfree(bridge);
501 }
502 
503 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
504 {
505 	struct pci_host_bridge *bridge;
506 
507 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
508 	if (!bridge)
509 		return NULL;
510 
511 	INIT_LIST_HEAD(&bridge->windows);
512 	bridge->bus = b;
513 	return bridge;
514 }
515 
516 static unsigned char pcix_bus_speed[] = {
517 	PCI_SPEED_UNKNOWN,		/* 0 */
518 	PCI_SPEED_66MHz_PCIX,		/* 1 */
519 	PCI_SPEED_100MHz_PCIX,		/* 2 */
520 	PCI_SPEED_133MHz_PCIX,		/* 3 */
521 	PCI_SPEED_UNKNOWN,		/* 4 */
522 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
523 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
524 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
525 	PCI_SPEED_UNKNOWN,		/* 8 */
526 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
527 	PCI_SPEED_100MHz_PCIX_266,	/* A */
528 	PCI_SPEED_133MHz_PCIX_266,	/* B */
529 	PCI_SPEED_UNKNOWN,		/* C */
530 	PCI_SPEED_66MHz_PCIX_533,	/* D */
531 	PCI_SPEED_100MHz_PCIX_533,	/* E */
532 	PCI_SPEED_133MHz_PCIX_533	/* F */
533 };
534 
535 static unsigned char pcie_link_speed[] = {
536 	PCI_SPEED_UNKNOWN,		/* 0 */
537 	PCIE_SPEED_2_5GT,		/* 1 */
538 	PCIE_SPEED_5_0GT,		/* 2 */
539 	PCIE_SPEED_8_0GT,		/* 3 */
540 	PCI_SPEED_UNKNOWN,		/* 4 */
541 	PCI_SPEED_UNKNOWN,		/* 5 */
542 	PCI_SPEED_UNKNOWN,		/* 6 */
543 	PCI_SPEED_UNKNOWN,		/* 7 */
544 	PCI_SPEED_UNKNOWN,		/* 8 */
545 	PCI_SPEED_UNKNOWN,		/* 9 */
546 	PCI_SPEED_UNKNOWN,		/* A */
547 	PCI_SPEED_UNKNOWN,		/* B */
548 	PCI_SPEED_UNKNOWN,		/* C */
549 	PCI_SPEED_UNKNOWN,		/* D */
550 	PCI_SPEED_UNKNOWN,		/* E */
551 	PCI_SPEED_UNKNOWN		/* F */
552 };
553 
554 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
555 {
556 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
557 }
558 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
559 
560 static unsigned char agp_speeds[] = {
561 	AGP_UNKNOWN,
562 	AGP_1X,
563 	AGP_2X,
564 	AGP_4X,
565 	AGP_8X
566 };
567 
568 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
569 {
570 	int index = 0;
571 
572 	if (agpstat & 4)
573 		index = 3;
574 	else if (agpstat & 2)
575 		index = 2;
576 	else if (agpstat & 1)
577 		index = 1;
578 	else
579 		goto out;
580 
581 	if (agp3) {
582 		index += 2;
583 		if (index == 5)
584 			index = 0;
585 	}
586 
587  out:
588 	return agp_speeds[index];
589 }
590 
591 
592 static void pci_set_bus_speed(struct pci_bus *bus)
593 {
594 	struct pci_dev *bridge = bus->self;
595 	int pos;
596 
597 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
598 	if (!pos)
599 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
600 	if (pos) {
601 		u32 agpstat, agpcmd;
602 
603 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
604 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
605 
606 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
607 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
608 	}
609 
610 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
611 	if (pos) {
612 		u16 status;
613 		enum pci_bus_speed max;
614 
615 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
616 				     &status);
617 
618 		if (status & PCI_X_SSTATUS_533MHZ) {
619 			max = PCI_SPEED_133MHz_PCIX_533;
620 		} else if (status & PCI_X_SSTATUS_266MHZ) {
621 			max = PCI_SPEED_133MHz_PCIX_266;
622 		} else if (status & PCI_X_SSTATUS_133MHZ) {
623 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
624 				max = PCI_SPEED_133MHz_PCIX_ECC;
625 			} else {
626 				max = PCI_SPEED_133MHz_PCIX;
627 			}
628 		} else {
629 			max = PCI_SPEED_66MHz_PCIX;
630 		}
631 
632 		bus->max_bus_speed = max;
633 		bus->cur_bus_speed = pcix_bus_speed[
634 			(status & PCI_X_SSTATUS_FREQ) >> 6];
635 
636 		return;
637 	}
638 
639 	pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
640 	if (pos) {
641 		u32 linkcap;
642 		u16 linksta;
643 
644 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
645 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
646 
647 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
648 		pcie_update_link_speed(bus, linksta);
649 	}
650 }
651 
652 
653 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
654 					   struct pci_dev *bridge, int busnr)
655 {
656 	struct pci_bus *child;
657 	int i;
658 	int ret;
659 
660 	/*
661 	 * Allocate a new bus, and inherit stuff from the parent..
662 	 */
663 	child = pci_alloc_bus();
664 	if (!child)
665 		return NULL;
666 
667 	child->parent = parent;
668 	child->ops = parent->ops;
669 	child->sysdata = parent->sysdata;
670 	child->bus_flags = parent->bus_flags;
671 
672 	/* initialize some portions of the bus device, but don't register it
673 	 * now as the parent is not properly set up yet.
674 	 */
675 	child->dev.class = &pcibus_class;
676 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
677 
678 	/*
679 	 * Set up the primary, secondary and subordinate
680 	 * bus numbers.
681 	 */
682 	child->number = child->busn_res.start = busnr;
683 	child->primary = parent->busn_res.start;
684 	child->busn_res.end = 0xff;
685 
686 	if (!bridge) {
687 		child->dev.parent = parent->bridge;
688 		goto add_dev;
689 	}
690 
691 	child->self = bridge;
692 	child->bridge = get_device(&bridge->dev);
693 	child->dev.parent = child->bridge;
694 	pci_set_bus_of_node(child);
695 	pci_set_bus_speed(child);
696 
697 	/* Set up default resource pointers and names.. */
698 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
699 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
700 		child->resource[i]->name = child->name;
701 	}
702 	bridge->subordinate = child;
703 
704 add_dev:
705 	ret = device_register(&child->dev);
706 	WARN_ON(ret < 0);
707 
708 	pcibios_add_bus(child);
709 
710 	/* Create legacy_io and legacy_mem files for this bus */
711 	pci_create_legacy_files(child);
712 
713 	return child;
714 }
715 
716 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
717 {
718 	struct pci_bus *child;
719 
720 	child = pci_alloc_child_bus(parent, dev, busnr);
721 	if (child) {
722 		down_write(&pci_bus_sem);
723 		list_add_tail(&child->node, &parent->children);
724 		up_write(&pci_bus_sem);
725 	}
726 	return child;
727 }
728 
729 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
730 {
731 	struct pci_bus *parent = child->parent;
732 
733 	/* Attempts to fix that up are really dangerous unless
734 	   we're going to re-assign all bus numbers. */
735 	if (!pcibios_assign_all_busses())
736 		return;
737 
738 	while (parent->parent && parent->busn_res.end < max) {
739 		parent->busn_res.end = max;
740 		pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
741 		parent = parent->parent;
742 	}
743 }
744 
745 /*
746  * If it's a bridge, configure it and scan the bus behind it.
747  * For CardBus bridges, we don't scan behind as the devices will
748  * be handled by the bridge driver itself.
749  *
750  * We need to process bridges in two passes -- first we scan those
751  * already configured by the BIOS and after we are done with all of
752  * them, we proceed to assigning numbers to the remaining buses in
753  * order to avoid overlaps between old and new bus numbers.
754  */
755 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
756 {
757 	struct pci_bus *child;
758 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
759 	u32 buses, i, j = 0;
760 	u16 bctl;
761 	u8 primary, secondary, subordinate;
762 	int broken = 0;
763 
764 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
765 	primary = buses & 0xFF;
766 	secondary = (buses >> 8) & 0xFF;
767 	subordinate = (buses >> 16) & 0xFF;
768 
769 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
770 		secondary, subordinate, pass);
771 
772 	if (!primary && (primary != bus->number) && secondary && subordinate) {
773 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
774 		primary = bus->number;
775 	}
776 
777 	/* Check if setup is sensible at all */
778 	if (!pass &&
779 	    (primary != bus->number || secondary <= bus->number ||
780 	     secondary > subordinate)) {
781 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
782 			 secondary, subordinate);
783 		broken = 1;
784 	}
785 
786 	/* Disable MasterAbortMode during probing to avoid reporting
787 	   of bus errors (in some architectures) */
788 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
789 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
790 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
791 
792 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
793 	    !is_cardbus && !broken) {
794 		unsigned int cmax;
795 		/*
796 		 * Bus already configured by firmware, process it in the first
797 		 * pass and just note the configuration.
798 		 */
799 		if (pass)
800 			goto out;
801 
802 		/*
803 		 * If we already got to this bus through a different bridge,
804 		 * don't re-add it. This can happen with the i450NX chipset.
805 		 *
806 		 * However, we continue to descend down the hierarchy and
807 		 * scan remaining child buses.
808 		 */
809 		child = pci_find_bus(pci_domain_nr(bus), secondary);
810 		if (!child) {
811 			child = pci_add_new_bus(bus, dev, secondary);
812 			if (!child)
813 				goto out;
814 			child->primary = primary;
815 			pci_bus_insert_busn_res(child, secondary, subordinate);
816 			child->bridge_ctl = bctl;
817 		}
818 
819 		cmax = pci_scan_child_bus(child);
820 		if (cmax > max)
821 			max = cmax;
822 		if (child->busn_res.end > max)
823 			max = child->busn_res.end;
824 	} else {
825 		/*
826 		 * We need to assign a number to this bus which we always
827 		 * do in the second pass.
828 		 */
829 		if (!pass) {
830 			if (pcibios_assign_all_busses() || broken)
831 				/* Temporarily disable forwarding of the
832 				   configuration cycles on all bridges in
833 				   this bus segment to avoid possible
834 				   conflicts in the second pass between two
835 				   bridges programmed with overlapping
836 				   bus ranges. */
837 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
838 						       buses & ~0xffffff);
839 			goto out;
840 		}
841 
842 		/* Clear errors */
843 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
844 
845 		/* Prevent assigning a bus number that already exists.
846 		 * This can happen when a bridge is hot-plugged, so in
847 		 * this case we only re-scan this bus. */
848 		child = pci_find_bus(pci_domain_nr(bus), max+1);
849 		if (!child) {
850 			child = pci_add_new_bus(bus, dev, ++max);
851 			if (!child)
852 				goto out;
853 			pci_bus_insert_busn_res(child, max, 0xff);
854 		}
855 		buses = (buses & 0xff000000)
856 		      | ((unsigned int)(child->primary)     <<  0)
857 		      | ((unsigned int)(child->busn_res.start)   <<  8)
858 		      | ((unsigned int)(child->busn_res.end) << 16);
859 
860 		/*
861 		 * yenta.c forces a secondary latency timer of 176.
862 		 * Copy that behaviour here.
863 		 */
864 		if (is_cardbus) {
865 			buses &= ~0xff000000;
866 			buses |= CARDBUS_LATENCY_TIMER << 24;
867 		}
868 
869 		/*
870 		 * We need to blast all three values with a single write.
871 		 */
872 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
873 
874 		if (!is_cardbus) {
875 			child->bridge_ctl = bctl;
876 			/*
877 			 * Adjust subordinate busnr in parent buses.
878 			 * We do this before scanning for children because
879 			 * some devices may not be detected if the bios
880 			 * was lazy.
881 			 */
882 			pci_fixup_parent_subordinate_busnr(child, max);
883 			/* Now we can scan all subordinate buses... */
884 			max = pci_scan_child_bus(child);
885 			/*
886 			 * now fix it up again since we have found
887 			 * the real value of max.
888 			 */
889 			pci_fixup_parent_subordinate_busnr(child, max);
890 		} else {
891 			/*
892 			 * For CardBus bridges, we leave 4 bus numbers
893 			 * as cards with a PCI-to-PCI bridge can be
894 			 * inserted later.
895 			 */
896 			for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
897 				struct pci_bus *parent = bus;
898 				if (pci_find_bus(pci_domain_nr(bus),
899 							max+i+1))
900 					break;
901 				while (parent->parent) {
902 					if ((!pcibios_assign_all_busses()) &&
903 					    (parent->busn_res.end > max) &&
904 					    (parent->busn_res.end <= max+i)) {
905 						j = 1;
906 					}
907 					parent = parent->parent;
908 				}
909 				if (j) {
910 					/*
911 					 * Often, there are two cardbus bridges
912 					 * -- try to leave one valid bus number
913 					 * for each one.
914 					 */
915 					i /= 2;
916 					break;
917 				}
918 			}
919 			max += i;
920 			pci_fixup_parent_subordinate_busnr(child, max);
921 		}
922 		/*
923 		 * Set the subordinate bus number to its real value.
924 		 */
925 		pci_bus_update_busn_res_end(child, max);
926 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
927 	}
928 
929 	sprintf(child->name,
930 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
931 		pci_domain_nr(bus), child->number);
932 
933 	/* Has only triggered on CardBus, fixup is in yenta_socket */
934 	while (bus->parent) {
935 		if ((child->busn_res.end > bus->busn_res.end) ||
936 		    (child->number > bus->busn_res.end) ||
937 		    (child->number < bus->number) ||
938 		    (child->busn_res.end < bus->number)) {
939 			dev_info(&child->dev, "%pR %s "
940 				"hidden behind%s bridge %s %pR\n",
941 				&child->busn_res,
942 				(bus->number > child->busn_res.end &&
943 				 bus->busn_res.end < child->number) ?
944 					"wholly" : "partially",
945 				bus->self->transparent ? " transparent" : "",
946 				dev_name(&bus->dev),
947 				&bus->busn_res);
948 		}
949 		bus = bus->parent;
950 	}
951 
952 out:
953 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
954 
955 	return max;
956 }
957 
958 /*
959  * Read interrupt line and base address registers.
960  * The architecture-dependent code can tweak these, of course.
961  */
962 static void pci_read_irq(struct pci_dev *dev)
963 {
964 	unsigned char irq;
965 
966 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
967 	dev->pin = irq;
968 	if (irq)
969 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
970 	dev->irq = irq;
971 }
972 
973 void set_pcie_port_type(struct pci_dev *pdev)
974 {
975 	int pos;
976 	u16 reg16;
977 
978 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
979 	if (!pos)
980 		return;
981 	pdev->is_pcie = 1;
982 	pdev->pcie_cap = pos;
983 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
984 	pdev->pcie_flags_reg = reg16;
985 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
986 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
987 }
988 
989 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
990 {
991 	u32 reg32;
992 
993 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
994 	if (reg32 & PCI_EXP_SLTCAP_HPC)
995 		pdev->is_hotplug_bridge = 1;
996 }
997 
998 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
999 
1000 /**
1001  * pci_setup_device - fill in class and map information of a device
1002  * @dev: the device structure to fill
1003  *
1004  * Initialize the device structure with information about the device's
1005  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1006  * Called at initialisation of the PCI subsystem and by CardBus services.
1007  * Returns 0 on success and negative if unknown type of device (not normal,
1008  * bridge or CardBus).
1009  */
1010 int pci_setup_device(struct pci_dev *dev)
1011 {
1012 	u32 class;
1013 	u8 hdr_type;
1014 	struct pci_slot *slot;
1015 	int pos = 0;
1016 	struct pci_bus_region region;
1017 	struct resource *res;
1018 
1019 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1020 		return -EIO;
1021 
1022 	dev->sysdata = dev->bus->sysdata;
1023 	dev->dev.parent = dev->bus->bridge;
1024 	dev->dev.bus = &pci_bus_type;
1025 	dev->hdr_type = hdr_type & 0x7f;
1026 	dev->multifunction = !!(hdr_type & 0x80);
1027 	dev->error_state = pci_channel_io_normal;
1028 	set_pcie_port_type(dev);
1029 
1030 	list_for_each_entry(slot, &dev->bus->slots, list)
1031 		if (PCI_SLOT(dev->devfn) == slot->number)
1032 			dev->slot = slot;
1033 
1034 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1035 	   set this higher, assuming the system even supports it.  */
1036 	dev->dma_mask = 0xffffffff;
1037 
1038 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1039 		     dev->bus->number, PCI_SLOT(dev->devfn),
1040 		     PCI_FUNC(dev->devfn));
1041 
1042 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1043 	dev->revision = class & 0xff;
1044 	dev->class = class >> 8;		    /* upper 3 bytes */
1045 
1046 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1047 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1048 
1049 	/* need to have dev->class ready */
1050 	dev->cfg_size = pci_cfg_space_size(dev);
1051 
1052 	/* "Unknown power state" */
1053 	dev->current_state = PCI_UNKNOWN;
1054 
1055 	/* Early fixups, before probing the BARs */
1056 	pci_fixup_device(pci_fixup_early, dev);
1057 	/* device class may be changed after fixup */
1058 	class = dev->class >> 8;
1059 
1060 	switch (dev->hdr_type) {		    /* header type */
1061 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1062 		if (class == PCI_CLASS_BRIDGE_PCI)
1063 			goto bad;
1064 		pci_read_irq(dev);
1065 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1066 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1067 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1068 
1069 		/*
1070 		 *	Do the ugly legacy mode stuff here rather than broken chip
1071 		 *	quirk code. Legacy mode ATA controllers have fixed
1072 		 *	addresses. These are not always echoed in BAR0-3, and
1073 		 *	BAR0-3 in a few cases contain junk!
1074 		 */
1075 		if (class == PCI_CLASS_STORAGE_IDE) {
1076 			u8 progif;
1077 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1078 			if ((progif & 1) == 0) {
1079 				region.start = 0x1F0;
1080 				region.end = 0x1F7;
1081 				res = &dev->resource[0];
1082 				res->flags = LEGACY_IO_RESOURCE;
1083 				pcibios_bus_to_resource(dev, res, &region);
1084 				region.start = 0x3F6;
1085 				region.end = 0x3F6;
1086 				res = &dev->resource[1];
1087 				res->flags = LEGACY_IO_RESOURCE;
1088 				pcibios_bus_to_resource(dev, res, &region);
1089 			}
1090 			if ((progif & 4) == 0) {
1091 				region.start = 0x170;
1092 				region.end = 0x177;
1093 				res = &dev->resource[2];
1094 				res->flags = LEGACY_IO_RESOURCE;
1095 				pcibios_bus_to_resource(dev, res, &region);
1096 				region.start = 0x376;
1097 				region.end = 0x376;
1098 				res = &dev->resource[3];
1099 				res->flags = LEGACY_IO_RESOURCE;
1100 				pcibios_bus_to_resource(dev, res, &region);
1101 			}
1102 		}
1103 		break;
1104 
1105 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1106 		if (class != PCI_CLASS_BRIDGE_PCI)
1107 			goto bad;
1108 		/* The PCI-to-PCI bridge spec requires that subtractive
1109 		   decoding (i.e. transparent) bridge must have programming
1110 		   interface code of 0x01. */
1111 		pci_read_irq(dev);
1112 		dev->transparent = ((dev->class & 0xff) == 1);
1113 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1114 		set_pcie_hotplug_bridge(dev);
1115 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1116 		if (pos) {
1117 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1118 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1119 		}
1120 		break;
1121 
1122 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1123 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1124 			goto bad;
1125 		pci_read_irq(dev);
1126 		pci_read_bases(dev, 1, 0);
1127 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1128 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1129 		break;
1130 
1131 	default:				    /* unknown header */
1132 		dev_err(&dev->dev, "unknown header type %02x, "
1133 			"ignoring device\n", dev->hdr_type);
1134 		return -EIO;
1135 
1136 	bad:
1137 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1138 			"type %02x)\n", dev->class, dev->hdr_type);
1139 		dev->class = PCI_CLASS_NOT_DEFINED;
1140 	}
1141 
1142 	/* We found a fine healthy device, go go go... */
1143 	return 0;
1144 }
1145 
1146 static void pci_release_capabilities(struct pci_dev *dev)
1147 {
1148 	pci_vpd_release(dev);
1149 	pci_iov_release(dev);
1150 	pci_free_cap_save_buffers(dev);
1151 }
1152 
1153 /**
1154  * pci_release_dev - free a pci device structure when all users of it are finished.
1155  * @dev: device that's been disconnected
1156  *
1157  * Will be called only by the device core when all users of this pci device are
1158  * done.
1159  */
1160 static void pci_release_dev(struct device *dev)
1161 {
1162 	struct pci_dev *pci_dev;
1163 
1164 	pci_dev = to_pci_dev(dev);
1165 	pci_release_capabilities(pci_dev);
1166 	pci_release_of_node(pci_dev);
1167 	pcibios_release_device(pci_dev);
1168 	pci_bus_put(pci_dev->bus);
1169 	kfree(pci_dev);
1170 }
1171 
1172 /**
1173  * pci_cfg_space_size - get the configuration space size of the PCI device.
1174  * @dev: PCI device
1175  *
1176  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1177  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1178  * access it.  Maybe we don't have a way to generate extended config space
1179  * accesses, or the device is behind a reverse Express bridge.  So we try
1180  * reading the dword at 0x100 which must either be 0 or a valid extended
1181  * capability header.
1182  */
1183 int pci_cfg_space_size_ext(struct pci_dev *dev)
1184 {
1185 	u32 status;
1186 	int pos = PCI_CFG_SPACE_SIZE;
1187 
1188 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1189 		goto fail;
1190 	if (status == 0xffffffff)
1191 		goto fail;
1192 
1193 	return PCI_CFG_SPACE_EXP_SIZE;
1194 
1195  fail:
1196 	return PCI_CFG_SPACE_SIZE;
1197 }
1198 
1199 int pci_cfg_space_size(struct pci_dev *dev)
1200 {
1201 	int pos;
1202 	u32 status;
1203 	u16 class;
1204 
1205 	class = dev->class >> 8;
1206 	if (class == PCI_CLASS_BRIDGE_HOST)
1207 		return pci_cfg_space_size_ext(dev);
1208 
1209 	if (!pci_is_pcie(dev)) {
1210 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1211 		if (!pos)
1212 			goto fail;
1213 
1214 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1215 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1216 			goto fail;
1217 	}
1218 
1219 	return pci_cfg_space_size_ext(dev);
1220 
1221  fail:
1222 	return PCI_CFG_SPACE_SIZE;
1223 }
1224 
1225 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1226 {
1227 	struct pci_dev *dev;
1228 
1229 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1230 	if (!dev)
1231 		return NULL;
1232 
1233 	INIT_LIST_HEAD(&dev->bus_list);
1234 	dev->dev.type = &pci_dev_type;
1235 	dev->bus = pci_bus_get(bus);
1236 
1237 	return dev;
1238 }
1239 EXPORT_SYMBOL(pci_alloc_dev);
1240 
1241 struct pci_dev *alloc_pci_dev(void)
1242 {
1243 	return pci_alloc_dev(NULL);
1244 }
1245 EXPORT_SYMBOL(alloc_pci_dev);
1246 
1247 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1248 				 int crs_timeout)
1249 {
1250 	int delay = 1;
1251 
1252 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1253 		return false;
1254 
1255 	/* some broken boards return 0 or ~0 if a slot is empty: */
1256 	if (*l == 0xffffffff || *l == 0x00000000 ||
1257 	    *l == 0x0000ffff || *l == 0xffff0000)
1258 		return false;
1259 
1260 	/* Configuration request Retry Status */
1261 	while (*l == 0xffff0001) {
1262 		if (!crs_timeout)
1263 			return false;
1264 
1265 		msleep(delay);
1266 		delay *= 2;
1267 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1268 			return false;
1269 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1270 		if (delay > crs_timeout) {
1271 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1272 					"responding\n", pci_domain_nr(bus),
1273 					bus->number, PCI_SLOT(devfn),
1274 					PCI_FUNC(devfn));
1275 			return false;
1276 		}
1277 	}
1278 
1279 	return true;
1280 }
1281 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1282 
1283 /*
1284  * Read the config data for a PCI device, sanity-check it
1285  * and fill in the dev structure...
1286  */
1287 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1288 {
1289 	struct pci_dev *dev;
1290 	u32 l;
1291 
1292 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1293 		return NULL;
1294 
1295 	dev = pci_alloc_dev(bus);
1296 	if (!dev)
1297 		return NULL;
1298 
1299 	dev->devfn = devfn;
1300 	dev->vendor = l & 0xffff;
1301 	dev->device = (l >> 16) & 0xffff;
1302 
1303 	pci_set_of_node(dev);
1304 
1305 	if (pci_setup_device(dev)) {
1306 		pci_bus_put(dev->bus);
1307 		kfree(dev);
1308 		return NULL;
1309 	}
1310 
1311 	return dev;
1312 }
1313 
1314 static void pci_init_capabilities(struct pci_dev *dev)
1315 {
1316 	/* MSI/MSI-X list */
1317 	pci_msi_init_pci_dev(dev);
1318 
1319 	/* Buffers for saving PCIe and PCI-X capabilities */
1320 	pci_allocate_cap_save_buffers(dev);
1321 
1322 	/* Power Management */
1323 	pci_pm_init(dev);
1324 
1325 	/* Vital Product Data */
1326 	pci_vpd_pci22_init(dev);
1327 
1328 	/* Alternative Routing-ID Forwarding */
1329 	pci_configure_ari(dev);
1330 
1331 	/* Single Root I/O Virtualization */
1332 	pci_iov_init(dev);
1333 
1334 	/* Enable ACS P2P upstream forwarding */
1335 	pci_enable_acs(dev);
1336 }
1337 
1338 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1339 {
1340 	int ret;
1341 
1342 	device_initialize(&dev->dev);
1343 	dev->dev.release = pci_release_dev;
1344 
1345 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1346 	dev->dev.dma_mask = &dev->dma_mask;
1347 	dev->dev.dma_parms = &dev->dma_parms;
1348 	dev->dev.coherent_dma_mask = 0xffffffffull;
1349 
1350 	pci_set_dma_max_seg_size(dev, 65536);
1351 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1352 
1353 	/* Fix up broken headers */
1354 	pci_fixup_device(pci_fixup_header, dev);
1355 
1356 	/* moved out from quirk header fixup code */
1357 	pci_reassigndev_resource_alignment(dev);
1358 
1359 	/* Clear the state_saved flag. */
1360 	dev->state_saved = false;
1361 
1362 	/* Initialize various capabilities */
1363 	pci_init_capabilities(dev);
1364 
1365 	/*
1366 	 * Add the device to our list of discovered devices
1367 	 * and the bus list for fixup functions, etc.
1368 	 */
1369 	down_write(&pci_bus_sem);
1370 	list_add_tail(&dev->bus_list, &bus->devices);
1371 	up_write(&pci_bus_sem);
1372 
1373 	ret = pcibios_add_device(dev);
1374 	WARN_ON(ret < 0);
1375 
1376 	/* Notifier could use PCI capabilities */
1377 	dev->match_driver = false;
1378 	ret = device_add(&dev->dev);
1379 	WARN_ON(ret < 0);
1380 
1381 	pci_proc_attach_device(dev);
1382 }
1383 
1384 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1385 {
1386 	struct pci_dev *dev;
1387 
1388 	dev = pci_get_slot(bus, devfn);
1389 	if (dev) {
1390 		pci_dev_put(dev);
1391 		return dev;
1392 	}
1393 
1394 	dev = pci_scan_device(bus, devfn);
1395 	if (!dev)
1396 		return NULL;
1397 
1398 	pci_device_add(dev, bus);
1399 
1400 	return dev;
1401 }
1402 EXPORT_SYMBOL(pci_scan_single_device);
1403 
1404 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1405 {
1406 	int pos;
1407 	u16 cap = 0;
1408 	unsigned next_fn;
1409 
1410 	if (pci_ari_enabled(bus)) {
1411 		if (!dev)
1412 			return 0;
1413 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1414 		if (!pos)
1415 			return 0;
1416 
1417 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1418 		next_fn = PCI_ARI_CAP_NFN(cap);
1419 		if (next_fn <= fn)
1420 			return 0;	/* protect against malformed list */
1421 
1422 		return next_fn;
1423 	}
1424 
1425 	/* dev may be NULL for non-contiguous multifunction devices */
1426 	if (!dev || dev->multifunction)
1427 		return (fn + 1) % 8;
1428 
1429 	return 0;
1430 }
1431 
1432 static int only_one_child(struct pci_bus *bus)
1433 {
1434 	struct pci_dev *parent = bus->self;
1435 
1436 	if (!parent || !pci_is_pcie(parent))
1437 		return 0;
1438 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1439 		return 1;
1440 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1441 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1442 		return 1;
1443 	return 0;
1444 }
1445 
1446 /**
1447  * pci_scan_slot - scan a PCI slot on a bus for devices.
1448  * @bus: PCI bus to scan
1449  * @devfn: slot number to scan (must have zero function.)
1450  *
1451  * Scan a PCI slot on the specified PCI bus for devices, adding
1452  * discovered devices to the @bus->devices list.  New devices
1453  * will not have is_added set.
1454  *
1455  * Returns the number of new devices found.
1456  */
1457 int pci_scan_slot(struct pci_bus *bus, int devfn)
1458 {
1459 	unsigned fn, nr = 0;
1460 	struct pci_dev *dev;
1461 
1462 	if (only_one_child(bus) && (devfn > 0))
1463 		return 0; /* Already scanned the entire slot */
1464 
1465 	dev = pci_scan_single_device(bus, devfn);
1466 	if (!dev)
1467 		return 0;
1468 	if (!dev->is_added)
1469 		nr++;
1470 
1471 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1472 		dev = pci_scan_single_device(bus, devfn + fn);
1473 		if (dev) {
1474 			if (!dev->is_added)
1475 				nr++;
1476 			dev->multifunction = 1;
1477 		}
1478 	}
1479 
1480 	/* only one slot has pcie device */
1481 	if (bus->self && nr)
1482 		pcie_aspm_init_link_state(bus->self);
1483 
1484 	return nr;
1485 }
1486 
1487 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1488 {
1489 	u8 *smpss = data;
1490 
1491 	if (!pci_is_pcie(dev))
1492 		return 0;
1493 
1494 	/* For PCIE hotplug enabled slots not connected directly to a
1495 	 * PCI-E root port, there can be problems when hotplugging
1496 	 * devices.  This is due to the possibility of hotplugging a
1497 	 * device into the fabric with a smaller MPS that the devices
1498 	 * currently running have configured.  Modifying the MPS on the
1499 	 * running devices could cause a fatal bus error due to an
1500 	 * incoming frame being larger than the newly configured MPS.
1501 	 * To work around this, the MPS for the entire fabric must be
1502 	 * set to the minimum size.  Any devices hotplugged into this
1503 	 * fabric will have the minimum MPS set.  If the PCI hotplug
1504 	 * slot is directly connected to the root port and there are not
1505 	 * other devices on the fabric (which seems to be the most
1506 	 * common case), then this is not an issue and MPS discovery
1507 	 * will occur as normal.
1508 	 */
1509 	if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1510 	     (dev->bus->self &&
1511 	      pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
1512 		*smpss = 0;
1513 
1514 	if (*smpss > dev->pcie_mpss)
1515 		*smpss = dev->pcie_mpss;
1516 
1517 	return 0;
1518 }
1519 
1520 static void pcie_write_mps(struct pci_dev *dev, int mps)
1521 {
1522 	int rc;
1523 
1524 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1525 		mps = 128 << dev->pcie_mpss;
1526 
1527 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1528 		    dev->bus->self)
1529 			/* For "Performance", the assumption is made that
1530 			 * downstream communication will never be larger than
1531 			 * the MRRS.  So, the MPS only needs to be configured
1532 			 * for the upstream communication.  This being the case,
1533 			 * walk from the top down and set the MPS of the child
1534 			 * to that of the parent bus.
1535 			 *
1536 			 * Configure the device MPS with the smaller of the
1537 			 * device MPSS or the bridge MPS (which is assumed to be
1538 			 * properly configured at this point to the largest
1539 			 * allowable MPS based on its parent bus).
1540 			 */
1541 			mps = min(mps, pcie_get_mps(dev->bus->self));
1542 	}
1543 
1544 	rc = pcie_set_mps(dev, mps);
1545 	if (rc)
1546 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1547 }
1548 
1549 static void pcie_write_mrrs(struct pci_dev *dev)
1550 {
1551 	int rc, mrrs;
1552 
1553 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1554 	 * issues with setting MRRS to 0 on a number of devices.
1555 	 */
1556 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1557 		return;
1558 
1559 	/* For Max performance, the MRRS must be set to the largest supported
1560 	 * value.  However, it cannot be configured larger than the MPS the
1561 	 * device or the bus can support.  This should already be properly
1562 	 * configured by a prior call to pcie_write_mps.
1563 	 */
1564 	mrrs = pcie_get_mps(dev);
1565 
1566 	/* MRRS is a R/W register.  Invalid values can be written, but a
1567 	 * subsequent read will verify if the value is acceptable or not.
1568 	 * If the MRRS value provided is not acceptable (e.g., too large),
1569 	 * shrink the value until it is acceptable to the HW.
1570  	 */
1571 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1572 		rc = pcie_set_readrq(dev, mrrs);
1573 		if (!rc)
1574 			break;
1575 
1576 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1577 		mrrs /= 2;
1578 	}
1579 
1580 	if (mrrs < 128)
1581 		dev_err(&dev->dev, "MRRS was unable to be configured with a "
1582 			"safe value.  If problems are experienced, try running "
1583 			"with pci=pcie_bus_safe.\n");
1584 }
1585 
1586 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1587 {
1588 	int mps, orig_mps;
1589 
1590 	if (!pci_is_pcie(dev))
1591 		return 0;
1592 
1593 	mps = 128 << *(u8 *)data;
1594 	orig_mps = pcie_get_mps(dev);
1595 
1596 	pcie_write_mps(dev, mps);
1597 	pcie_write_mrrs(dev);
1598 
1599 	dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
1600 		 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1601 		 orig_mps, pcie_get_readrq(dev));
1602 
1603 	return 0;
1604 }
1605 
1606 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1607  * parents then children fashion.  If this changes, then this code will not
1608  * work as designed.
1609  */
1610 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1611 {
1612 	u8 smpss;
1613 
1614 	if (!pci_is_pcie(bus->self))
1615 		return;
1616 
1617 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1618 		return;
1619 
1620 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1621 	 * to be aware to the MPS of the destination.  To work around this,
1622 	 * simply force the MPS of the entire system to the smallest possible.
1623 	 */
1624 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1625 		smpss = 0;
1626 
1627 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1628 		smpss = mpss;
1629 
1630 		pcie_find_smpss(bus->self, &smpss);
1631 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1632 	}
1633 
1634 	pcie_bus_configure_set(bus->self, &smpss);
1635 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1636 }
1637 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1638 
1639 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1640 {
1641 	unsigned int devfn, pass, max = bus->busn_res.start;
1642 	struct pci_dev *dev;
1643 
1644 	dev_dbg(&bus->dev, "scanning bus\n");
1645 
1646 	/* Go find them, Rover! */
1647 	for (devfn = 0; devfn < 0x100; devfn += 8)
1648 		pci_scan_slot(bus, devfn);
1649 
1650 	/* Reserve buses for SR-IOV capability. */
1651 	max += pci_iov_bus_range(bus);
1652 
1653 	/*
1654 	 * After performing arch-dependent fixup of the bus, look behind
1655 	 * all PCI-to-PCI bridges on this bus.
1656 	 */
1657 	if (!bus->is_added) {
1658 		dev_dbg(&bus->dev, "fixups for bus\n");
1659 		pcibios_fixup_bus(bus);
1660 		bus->is_added = 1;
1661 	}
1662 
1663 	for (pass=0; pass < 2; pass++)
1664 		list_for_each_entry(dev, &bus->devices, bus_list) {
1665 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1666 			    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1667 				max = pci_scan_bridge(bus, dev, max, pass);
1668 		}
1669 
1670 	/*
1671 	 * We've scanned the bus and so we know all about what's on
1672 	 * the other side of any bridges that may be on this bus plus
1673 	 * any devices.
1674 	 *
1675 	 * Return how far we've got finding sub-buses.
1676 	 */
1677 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1678 	return max;
1679 }
1680 
1681 /**
1682  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1683  * @bridge: Host bridge to set up.
1684  *
1685  * Default empty implementation.  Replace with an architecture-specific setup
1686  * routine, if necessary.
1687  */
1688 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1689 {
1690 	return 0;
1691 }
1692 
1693 void __weak pcibios_add_bus(struct pci_bus *bus)
1694 {
1695 }
1696 
1697 void __weak pcibios_remove_bus(struct pci_bus *bus)
1698 {
1699 }
1700 
1701 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1702 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1703 {
1704 	int error;
1705 	struct pci_host_bridge *bridge;
1706 	struct pci_bus *b, *b2;
1707 	struct pci_host_bridge_window *window, *n;
1708 	struct resource *res;
1709 	resource_size_t offset;
1710 	char bus_addr[64];
1711 	char *fmt;
1712 
1713 	b = pci_alloc_bus();
1714 	if (!b)
1715 		return NULL;
1716 
1717 	b->sysdata = sysdata;
1718 	b->ops = ops;
1719 	b->number = b->busn_res.start = bus;
1720 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1721 	if (b2) {
1722 		/* If we already got to this bus through a different bridge, ignore it */
1723 		dev_dbg(&b2->dev, "bus already known\n");
1724 		goto err_out;
1725 	}
1726 
1727 	bridge = pci_alloc_host_bridge(b);
1728 	if (!bridge)
1729 		goto err_out;
1730 
1731 	bridge->dev.parent = parent;
1732 	bridge->dev.release = pci_release_host_bridge_dev;
1733 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1734 	error = pcibios_root_bridge_prepare(bridge);
1735 	if (error) {
1736 		kfree(bridge);
1737 		goto err_out;
1738 	}
1739 
1740 	error = device_register(&bridge->dev);
1741 	if (error) {
1742 		put_device(&bridge->dev);
1743 		goto err_out;
1744 	}
1745 	b->bridge = get_device(&bridge->dev);
1746 	device_enable_async_suspend(b->bridge);
1747 	pci_set_bus_of_node(b);
1748 
1749 	if (!parent)
1750 		set_dev_node(b->bridge, pcibus_to_node(b));
1751 
1752 	b->dev.class = &pcibus_class;
1753 	b->dev.parent = b->bridge;
1754 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1755 	error = device_register(&b->dev);
1756 	if (error)
1757 		goto class_dev_reg_err;
1758 
1759 	pcibios_add_bus(b);
1760 
1761 	/* Create legacy_io and legacy_mem files for this bus */
1762 	pci_create_legacy_files(b);
1763 
1764 	if (parent)
1765 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1766 	else
1767 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1768 
1769 	/* Add initial resources to the bus */
1770 	list_for_each_entry_safe(window, n, resources, list) {
1771 		list_move_tail(&window->list, &bridge->windows);
1772 		res = window->res;
1773 		offset = window->offset;
1774 		if (res->flags & IORESOURCE_BUS)
1775 			pci_bus_insert_busn_res(b, bus, res->end);
1776 		else
1777 			pci_bus_add_resource(b, res, 0);
1778 		if (offset) {
1779 			if (resource_type(res) == IORESOURCE_IO)
1780 				fmt = " (bus address [%#06llx-%#06llx])";
1781 			else
1782 				fmt = " (bus address [%#010llx-%#010llx])";
1783 			snprintf(bus_addr, sizeof(bus_addr), fmt,
1784 				 (unsigned long long) (res->start - offset),
1785 				 (unsigned long long) (res->end - offset));
1786 		} else
1787 			bus_addr[0] = '\0';
1788 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1789 	}
1790 
1791 	down_write(&pci_bus_sem);
1792 	list_add_tail(&b->node, &pci_root_buses);
1793 	up_write(&pci_bus_sem);
1794 
1795 	return b;
1796 
1797 class_dev_reg_err:
1798 	put_device(&bridge->dev);
1799 	device_unregister(&bridge->dev);
1800 err_out:
1801 	kfree(b);
1802 	return NULL;
1803 }
1804 
1805 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1806 {
1807 	struct resource *res = &b->busn_res;
1808 	struct resource *parent_res, *conflict;
1809 
1810 	res->start = bus;
1811 	res->end = bus_max;
1812 	res->flags = IORESOURCE_BUS;
1813 
1814 	if (!pci_is_root_bus(b))
1815 		parent_res = &b->parent->busn_res;
1816 	else {
1817 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1818 		res->flags |= IORESOURCE_PCI_FIXED;
1819 	}
1820 
1821 	conflict = insert_resource_conflict(parent_res, res);
1822 
1823 	if (conflict)
1824 		dev_printk(KERN_DEBUG, &b->dev,
1825 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1826 			    res, pci_is_root_bus(b) ? "domain " : "",
1827 			    parent_res, conflict->name, conflict);
1828 
1829 	return conflict == NULL;
1830 }
1831 
1832 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1833 {
1834 	struct resource *res = &b->busn_res;
1835 	struct resource old_res = *res;
1836 	resource_size_t size;
1837 	int ret;
1838 
1839 	if (res->start > bus_max)
1840 		return -EINVAL;
1841 
1842 	size = bus_max - res->start + 1;
1843 	ret = adjust_resource(res, res->start, size);
1844 	dev_printk(KERN_DEBUG, &b->dev,
1845 			"busn_res: %pR end %s updated to %02x\n",
1846 			&old_res, ret ? "can not be" : "is", bus_max);
1847 
1848 	if (!ret && !res->parent)
1849 		pci_bus_insert_busn_res(b, res->start, res->end);
1850 
1851 	return ret;
1852 }
1853 
1854 void pci_bus_release_busn_res(struct pci_bus *b)
1855 {
1856 	struct resource *res = &b->busn_res;
1857 	int ret;
1858 
1859 	if (!res->flags || !res->parent)
1860 		return;
1861 
1862 	ret = release_resource(res);
1863 	dev_printk(KERN_DEBUG, &b->dev,
1864 			"busn_res: %pR %s released\n",
1865 			res, ret ? "can not be" : "is");
1866 }
1867 
1868 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1869 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1870 {
1871 	struct pci_host_bridge_window *window;
1872 	bool found = false;
1873 	struct pci_bus *b;
1874 	int max;
1875 
1876 	list_for_each_entry(window, resources, list)
1877 		if (window->res->flags & IORESOURCE_BUS) {
1878 			found = true;
1879 			break;
1880 		}
1881 
1882 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1883 	if (!b)
1884 		return NULL;
1885 
1886 	if (!found) {
1887 		dev_info(&b->dev,
1888 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1889 			bus);
1890 		pci_bus_insert_busn_res(b, bus, 255);
1891 	}
1892 
1893 	max = pci_scan_child_bus(b);
1894 
1895 	if (!found)
1896 		pci_bus_update_busn_res_end(b, max);
1897 
1898 	pci_bus_add_devices(b);
1899 	return b;
1900 }
1901 EXPORT_SYMBOL(pci_scan_root_bus);
1902 
1903 /* Deprecated; use pci_scan_root_bus() instead */
1904 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1905 		int bus, struct pci_ops *ops, void *sysdata)
1906 {
1907 	LIST_HEAD(resources);
1908 	struct pci_bus *b;
1909 
1910 	pci_add_resource(&resources, &ioport_resource);
1911 	pci_add_resource(&resources, &iomem_resource);
1912 	pci_add_resource(&resources, &busn_resource);
1913 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1914 	if (b)
1915 		pci_scan_child_bus(b);
1916 	else
1917 		pci_free_resource_list(&resources);
1918 	return b;
1919 }
1920 EXPORT_SYMBOL(pci_scan_bus_parented);
1921 
1922 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1923 					void *sysdata)
1924 {
1925 	LIST_HEAD(resources);
1926 	struct pci_bus *b;
1927 
1928 	pci_add_resource(&resources, &ioport_resource);
1929 	pci_add_resource(&resources, &iomem_resource);
1930 	pci_add_resource(&resources, &busn_resource);
1931 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1932 	if (b) {
1933 		pci_scan_child_bus(b);
1934 		pci_bus_add_devices(b);
1935 	} else {
1936 		pci_free_resource_list(&resources);
1937 	}
1938 	return b;
1939 }
1940 EXPORT_SYMBOL(pci_scan_bus);
1941 
1942 /**
1943  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1944  * @bridge: PCI bridge for the bus to scan
1945  *
1946  * Scan a PCI bus and child buses for new devices, add them,
1947  * and enable them, resizing bridge mmio/io resource if necessary
1948  * and possible.  The caller must ensure the child devices are already
1949  * removed for resizing to occur.
1950  *
1951  * Returns the max number of subordinate bus discovered.
1952  */
1953 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1954 {
1955 	unsigned int max;
1956 	struct pci_bus *bus = bridge->subordinate;
1957 
1958 	max = pci_scan_child_bus(bus);
1959 
1960 	pci_assign_unassigned_bridge_resources(bridge);
1961 
1962 	pci_bus_add_devices(bus);
1963 
1964 	return max;
1965 }
1966 
1967 /**
1968  * pci_rescan_bus - scan a PCI bus for devices.
1969  * @bus: PCI bus to scan
1970  *
1971  * Scan a PCI bus and child buses for new devices, adds them,
1972  * and enables them.
1973  *
1974  * Returns the max number of subordinate bus discovered.
1975  */
1976 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1977 {
1978 	unsigned int max;
1979 
1980 	max = pci_scan_child_bus(bus);
1981 	pci_assign_unassigned_bus_resources(bus);
1982 	pci_enable_bridges(bus);
1983 	pci_bus_add_devices(bus);
1984 
1985 	return max;
1986 }
1987 EXPORT_SYMBOL_GPL(pci_rescan_bus);
1988 
1989 EXPORT_SYMBOL(pci_add_new_bus);
1990 EXPORT_SYMBOL(pci_scan_slot);
1991 EXPORT_SYMBOL(pci_scan_bridge);
1992 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1993 
1994 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1995 {
1996 	const struct pci_dev *a = to_pci_dev(d_a);
1997 	const struct pci_dev *b = to_pci_dev(d_b);
1998 
1999 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2000 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2001 
2002 	if      (a->bus->number < b->bus->number) return -1;
2003 	else if (a->bus->number > b->bus->number) return  1;
2004 
2005 	if      (a->devfn < b->devfn) return -1;
2006 	else if (a->devfn > b->devfn) return  1;
2007 
2008 	return 0;
2009 }
2010 
2011 void __init pci_sort_breadthfirst(void)
2012 {
2013 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2014 }
2015