xref: /openbmc/linux/drivers/pci/probe.c (revision afb46f79)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15 
16 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR	3
18 
19 static struct resource busn_resource = {
20 	.name	= "PCI busn",
21 	.start	= 0,
22 	.end	= 255,
23 	.flags	= IORESOURCE_BUS,
24 };
25 
26 /* Ugh.  Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29 
30 static LIST_HEAD(pci_domain_busn_res_list);
31 
32 struct pci_domain_busn_res {
33 	struct list_head list;
34 	struct resource res;
35 	int domain_nr;
36 };
37 
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 	struct pci_domain_busn_res *r;
41 
42 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 		if (r->domain_nr == domain_nr)
44 			return &r->res;
45 
46 	r = kzalloc(sizeof(*r), GFP_KERNEL);
47 	if (!r)
48 		return NULL;
49 
50 	r->domain_nr = domain_nr;
51 	r->res.start = 0;
52 	r->res.end = 0xff;
53 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54 
55 	list_add_tail(&r->list, &pci_domain_busn_res_list);
56 
57 	return &r->res;
58 }
59 
60 static int find_anything(struct device *dev, void *data)
61 {
62 	return 1;
63 }
64 
65 /*
66  * Some device drivers need know if pci is initiated.
67  * Basically, we think pci is not initiated when there
68  * is no device to be found on the pci_bus_type.
69  */
70 int no_pci_devices(void)
71 {
72 	struct device *dev;
73 	int no_devices;
74 
75 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 	no_devices = (dev == NULL);
77 	put_device(dev);
78 	return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81 
82 /*
83  * PCI Bus Class
84  */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 	struct pci_bus *pci_bus = to_pci_bus(dev);
88 
89 	if (pci_bus->bridge)
90 		put_device(pci_bus->bridge);
91 	pci_bus_remove_resources(pci_bus);
92 	pci_release_bus_of_node(pci_bus);
93 	kfree(pci_bus);
94 }
95 
96 static struct class pcibus_class = {
97 	.name		= "pci_bus",
98 	.dev_release	= &release_pcibus_dev,
99 	.dev_groups	= pcibus_groups,
100 };
101 
102 static int __init pcibus_class_init(void)
103 {
104 	return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107 
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 	u64 size = mask & maxbase;	/* Find the significant bits */
111 	if (!size)
112 		return 0;
113 
114 	/* Get the lowest of them to find the decode size, and
115 	   from that the extent.  */
116 	size = (size & ~(size-1)) - 1;
117 
118 	/* base == maxbase can be valid only if the BAR has
119 	   already been programmed with all 1s.  */
120 	if (base == maxbase && ((base | size) & mask) != mask)
121 		return 0;
122 
123 	return size;
124 }
125 
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 	u32 mem_type;
129 	unsigned long flags;
130 
131 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 		flags |= IORESOURCE_IO;
134 		return flags;
135 	}
136 
137 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 	flags |= IORESOURCE_MEM;
139 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 		flags |= IORESOURCE_PREFETCH;
141 
142 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 	switch (mem_type) {
144 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 		break;
146 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 		/* 1M mem BAR treated as 32-bit BAR */
148 		break;
149 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 		flags |= IORESOURCE_MEM_64;
151 		break;
152 	default:
153 		/* mem unknown type treated as 32-bit BAR */
154 		break;
155 	}
156 	return flags;
157 }
158 
159 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
160 
161 /**
162  * pci_read_base - read a PCI BAR
163  * @dev: the PCI device
164  * @type: type of the BAR
165  * @res: resource buffer to be filled in
166  * @pos: BAR position in the config space
167  *
168  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
169  */
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 			struct resource *res, unsigned int pos)
172 {
173 	u32 l, sz, mask;
174 	u16 orig_cmd;
175 	struct pci_bus_region region, inverted_region;
176 	bool bar_too_big = false, bar_disabled = false;
177 
178 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
179 
180 	/* No printks while decoding is disabled! */
181 	if (!dev->mmio_always_on) {
182 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
183 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
184 			pci_write_config_word(dev, PCI_COMMAND,
185 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
186 		}
187 	}
188 
189 	res->name = pci_name(dev);
190 
191 	pci_read_config_dword(dev, pos, &l);
192 	pci_write_config_dword(dev, pos, l | mask);
193 	pci_read_config_dword(dev, pos, &sz);
194 	pci_write_config_dword(dev, pos, l);
195 
196 	/*
197 	 * All bits set in sz means the device isn't working properly.
198 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
199 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
200 	 * 1 must be clear.
201 	 */
202 	if (!sz || sz == 0xffffffff)
203 		goto fail;
204 
205 	/*
206 	 * I don't know how l can have all bits set.  Copied from old code.
207 	 * Maybe it fixes a bug on some ancient platform.
208 	 */
209 	if (l == 0xffffffff)
210 		l = 0;
211 
212 	if (type == pci_bar_unknown) {
213 		res->flags = decode_bar(dev, l);
214 		res->flags |= IORESOURCE_SIZEALIGN;
215 		if (res->flags & IORESOURCE_IO) {
216 			l &= PCI_BASE_ADDRESS_IO_MASK;
217 			mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
218 		} else {
219 			l &= PCI_BASE_ADDRESS_MEM_MASK;
220 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
221 		}
222 	} else {
223 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
224 		l &= PCI_ROM_ADDRESS_MASK;
225 		mask = (u32)PCI_ROM_ADDRESS_MASK;
226 	}
227 
228 	if (res->flags & IORESOURCE_MEM_64) {
229 		u64 l64 = l;
230 		u64 sz64 = sz;
231 		u64 mask64 = mask | (u64)~0 << 32;
232 
233 		pci_read_config_dword(dev, pos + 4, &l);
234 		pci_write_config_dword(dev, pos + 4, ~0);
235 		pci_read_config_dword(dev, pos + 4, &sz);
236 		pci_write_config_dword(dev, pos + 4, l);
237 
238 		l64 |= ((u64)l << 32);
239 		sz64 |= ((u64)sz << 32);
240 
241 		sz64 = pci_size(l64, sz64, mask64);
242 
243 		if (!sz64)
244 			goto fail;
245 
246 		if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
247 			bar_too_big = true;
248 			goto fail;
249 		}
250 
251 		if ((sizeof(resource_size_t) < 8) && l) {
252 			/* Address above 32-bit boundary; disable the BAR */
253 			pci_write_config_dword(dev, pos, 0);
254 			pci_write_config_dword(dev, pos + 4, 0);
255 			res->flags |= IORESOURCE_UNSET;
256 			region.start = 0;
257 			region.end = sz64;
258 			bar_disabled = true;
259 		} else {
260 			region.start = l64;
261 			region.end = l64 + sz64;
262 		}
263 	} else {
264 		sz = pci_size(l, sz, mask);
265 
266 		if (!sz)
267 			goto fail;
268 
269 		region.start = l;
270 		region.end = l + sz;
271 	}
272 
273 	pcibios_bus_to_resource(dev->bus, res, &region);
274 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
275 
276 	/*
277 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
278 	 * the corresponding resource address (the physical address used by
279 	 * the CPU.  Converting that resource address back to a bus address
280 	 * should yield the original BAR value:
281 	 *
282 	 *     resource_to_bus(bus_to_resource(A)) == A
283 	 *
284 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
285 	 * be claimed by the device.
286 	 */
287 	if (inverted_region.start != region.start) {
288 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
289 			 pos, &region.start);
290 		res->flags |= IORESOURCE_UNSET;
291 		res->end -= res->start;
292 		res->start = 0;
293 	}
294 
295 	goto out;
296 
297 
298 fail:
299 	res->flags = 0;
300 out:
301 	if (!dev->mmio_always_on &&
302 	    (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
303 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
304 
305 	if (bar_too_big)
306 		dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos);
307 	if (res->flags && !bar_disabled)
308 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
309 
310 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
311 }
312 
313 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
314 {
315 	unsigned int pos, reg;
316 
317 	for (pos = 0; pos < howmany; pos++) {
318 		struct resource *res = &dev->resource[pos];
319 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
320 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
321 	}
322 
323 	if (rom) {
324 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
325 		dev->rom_base_reg = rom;
326 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
327 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
328 				IORESOURCE_SIZEALIGN;
329 		__pci_read_base(dev, pci_bar_mem32, res, rom);
330 	}
331 }
332 
333 static void pci_read_bridge_io(struct pci_bus *child)
334 {
335 	struct pci_dev *dev = child->self;
336 	u8 io_base_lo, io_limit_lo;
337 	unsigned long io_mask, io_granularity, base, limit;
338 	struct pci_bus_region region;
339 	struct resource *res;
340 
341 	io_mask = PCI_IO_RANGE_MASK;
342 	io_granularity = 0x1000;
343 	if (dev->io_window_1k) {
344 		/* Support 1K I/O space granularity */
345 		io_mask = PCI_IO_1K_RANGE_MASK;
346 		io_granularity = 0x400;
347 	}
348 
349 	res = child->resource[0];
350 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
351 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
352 	base = (io_base_lo & io_mask) << 8;
353 	limit = (io_limit_lo & io_mask) << 8;
354 
355 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
356 		u16 io_base_hi, io_limit_hi;
357 
358 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
359 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
360 		base |= ((unsigned long) io_base_hi << 16);
361 		limit |= ((unsigned long) io_limit_hi << 16);
362 	}
363 
364 	if (base <= limit) {
365 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
366 		region.start = base;
367 		region.end = limit + io_granularity - 1;
368 		pcibios_bus_to_resource(dev->bus, res, &region);
369 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
370 	}
371 }
372 
373 static void pci_read_bridge_mmio(struct pci_bus *child)
374 {
375 	struct pci_dev *dev = child->self;
376 	u16 mem_base_lo, mem_limit_lo;
377 	unsigned long base, limit;
378 	struct pci_bus_region region;
379 	struct resource *res;
380 
381 	res = child->resource[1];
382 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
383 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
384 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
385 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
386 	if (base <= limit) {
387 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
388 		region.start = base;
389 		region.end = limit + 0xfffff;
390 		pcibios_bus_to_resource(dev->bus, res, &region);
391 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
392 	}
393 }
394 
395 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
396 {
397 	struct pci_dev *dev = child->self;
398 	u16 mem_base_lo, mem_limit_lo;
399 	unsigned long base, limit;
400 	struct pci_bus_region region;
401 	struct resource *res;
402 
403 	res = child->resource[2];
404 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
405 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
406 	base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
407 	limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
408 
409 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
410 		u32 mem_base_hi, mem_limit_hi;
411 
412 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
413 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
414 
415 		/*
416 		 * Some bridges set the base > limit by default, and some
417 		 * (broken) BIOSes do not initialize them.  If we find
418 		 * this, just assume they are not being used.
419 		 */
420 		if (mem_base_hi <= mem_limit_hi) {
421 #if BITS_PER_LONG == 64
422 			base |= ((unsigned long) mem_base_hi) << 32;
423 			limit |= ((unsigned long) mem_limit_hi) << 32;
424 #else
425 			if (mem_base_hi || mem_limit_hi) {
426 				dev_err(&dev->dev, "can't handle 64-bit "
427 					"address space for bridge\n");
428 				return;
429 			}
430 #endif
431 		}
432 	}
433 	if (base <= limit) {
434 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
435 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
436 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
437 			res->flags |= IORESOURCE_MEM_64;
438 		region.start = base;
439 		region.end = limit + 0xfffff;
440 		pcibios_bus_to_resource(dev->bus, res, &region);
441 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
442 	}
443 }
444 
445 void pci_read_bridge_bases(struct pci_bus *child)
446 {
447 	struct pci_dev *dev = child->self;
448 	struct resource *res;
449 	int i;
450 
451 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
452 		return;
453 
454 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
455 		 &child->busn_res,
456 		 dev->transparent ? " (subtractive decode)" : "");
457 
458 	pci_bus_remove_resources(child);
459 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
460 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
461 
462 	pci_read_bridge_io(child);
463 	pci_read_bridge_mmio(child);
464 	pci_read_bridge_mmio_pref(child);
465 
466 	if (dev->transparent) {
467 		pci_bus_for_each_resource(child->parent, res, i) {
468 			if (res) {
469 				pci_bus_add_resource(child, res,
470 						     PCI_SUBTRACTIVE_DECODE);
471 				dev_printk(KERN_DEBUG, &dev->dev,
472 					   "  bridge window %pR (subtractive decode)\n",
473 					   res);
474 			}
475 		}
476 	}
477 }
478 
479 static struct pci_bus *pci_alloc_bus(void)
480 {
481 	struct pci_bus *b;
482 
483 	b = kzalloc(sizeof(*b), GFP_KERNEL);
484 	if (!b)
485 		return NULL;
486 
487 	INIT_LIST_HEAD(&b->node);
488 	INIT_LIST_HEAD(&b->children);
489 	INIT_LIST_HEAD(&b->devices);
490 	INIT_LIST_HEAD(&b->slots);
491 	INIT_LIST_HEAD(&b->resources);
492 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
493 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
494 	return b;
495 }
496 
497 static void pci_release_host_bridge_dev(struct device *dev)
498 {
499 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
500 
501 	if (bridge->release_fn)
502 		bridge->release_fn(bridge);
503 
504 	pci_free_resource_list(&bridge->windows);
505 
506 	kfree(bridge);
507 }
508 
509 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
510 {
511 	struct pci_host_bridge *bridge;
512 
513 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
514 	if (!bridge)
515 		return NULL;
516 
517 	INIT_LIST_HEAD(&bridge->windows);
518 	bridge->bus = b;
519 	return bridge;
520 }
521 
522 static const unsigned char pcix_bus_speed[] = {
523 	PCI_SPEED_UNKNOWN,		/* 0 */
524 	PCI_SPEED_66MHz_PCIX,		/* 1 */
525 	PCI_SPEED_100MHz_PCIX,		/* 2 */
526 	PCI_SPEED_133MHz_PCIX,		/* 3 */
527 	PCI_SPEED_UNKNOWN,		/* 4 */
528 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
529 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
530 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
531 	PCI_SPEED_UNKNOWN,		/* 8 */
532 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
533 	PCI_SPEED_100MHz_PCIX_266,	/* A */
534 	PCI_SPEED_133MHz_PCIX_266,	/* B */
535 	PCI_SPEED_UNKNOWN,		/* C */
536 	PCI_SPEED_66MHz_PCIX_533,	/* D */
537 	PCI_SPEED_100MHz_PCIX_533,	/* E */
538 	PCI_SPEED_133MHz_PCIX_533	/* F */
539 };
540 
541 const unsigned char pcie_link_speed[] = {
542 	PCI_SPEED_UNKNOWN,		/* 0 */
543 	PCIE_SPEED_2_5GT,		/* 1 */
544 	PCIE_SPEED_5_0GT,		/* 2 */
545 	PCIE_SPEED_8_0GT,		/* 3 */
546 	PCI_SPEED_UNKNOWN,		/* 4 */
547 	PCI_SPEED_UNKNOWN,		/* 5 */
548 	PCI_SPEED_UNKNOWN,		/* 6 */
549 	PCI_SPEED_UNKNOWN,		/* 7 */
550 	PCI_SPEED_UNKNOWN,		/* 8 */
551 	PCI_SPEED_UNKNOWN,		/* 9 */
552 	PCI_SPEED_UNKNOWN,		/* A */
553 	PCI_SPEED_UNKNOWN,		/* B */
554 	PCI_SPEED_UNKNOWN,		/* C */
555 	PCI_SPEED_UNKNOWN,		/* D */
556 	PCI_SPEED_UNKNOWN,		/* E */
557 	PCI_SPEED_UNKNOWN		/* F */
558 };
559 
560 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
561 {
562 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
563 }
564 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
565 
566 static unsigned char agp_speeds[] = {
567 	AGP_UNKNOWN,
568 	AGP_1X,
569 	AGP_2X,
570 	AGP_4X,
571 	AGP_8X
572 };
573 
574 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
575 {
576 	int index = 0;
577 
578 	if (agpstat & 4)
579 		index = 3;
580 	else if (agpstat & 2)
581 		index = 2;
582 	else if (agpstat & 1)
583 		index = 1;
584 	else
585 		goto out;
586 
587 	if (agp3) {
588 		index += 2;
589 		if (index == 5)
590 			index = 0;
591 	}
592 
593  out:
594 	return agp_speeds[index];
595 }
596 
597 
598 static void pci_set_bus_speed(struct pci_bus *bus)
599 {
600 	struct pci_dev *bridge = bus->self;
601 	int pos;
602 
603 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
604 	if (!pos)
605 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
606 	if (pos) {
607 		u32 agpstat, agpcmd;
608 
609 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
610 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
611 
612 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
613 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
614 	}
615 
616 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
617 	if (pos) {
618 		u16 status;
619 		enum pci_bus_speed max;
620 
621 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
622 				     &status);
623 
624 		if (status & PCI_X_SSTATUS_533MHZ) {
625 			max = PCI_SPEED_133MHz_PCIX_533;
626 		} else if (status & PCI_X_SSTATUS_266MHZ) {
627 			max = PCI_SPEED_133MHz_PCIX_266;
628 		} else if (status & PCI_X_SSTATUS_133MHZ) {
629 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
630 				max = PCI_SPEED_133MHz_PCIX_ECC;
631 			} else {
632 				max = PCI_SPEED_133MHz_PCIX;
633 			}
634 		} else {
635 			max = PCI_SPEED_66MHz_PCIX;
636 		}
637 
638 		bus->max_bus_speed = max;
639 		bus->cur_bus_speed = pcix_bus_speed[
640 			(status & PCI_X_SSTATUS_FREQ) >> 6];
641 
642 		return;
643 	}
644 
645 	if (pci_is_pcie(bridge)) {
646 		u32 linkcap;
647 		u16 linksta;
648 
649 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
650 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
651 
652 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
653 		pcie_update_link_speed(bus, linksta);
654 	}
655 }
656 
657 
658 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
659 					   struct pci_dev *bridge, int busnr)
660 {
661 	struct pci_bus *child;
662 	int i;
663 	int ret;
664 
665 	/*
666 	 * Allocate a new bus, and inherit stuff from the parent..
667 	 */
668 	child = pci_alloc_bus();
669 	if (!child)
670 		return NULL;
671 
672 	child->parent = parent;
673 	child->ops = parent->ops;
674 	child->msi = parent->msi;
675 	child->sysdata = parent->sysdata;
676 	child->bus_flags = parent->bus_flags;
677 
678 	/* initialize some portions of the bus device, but don't register it
679 	 * now as the parent is not properly set up yet.
680 	 */
681 	child->dev.class = &pcibus_class;
682 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
683 
684 	/*
685 	 * Set up the primary, secondary and subordinate
686 	 * bus numbers.
687 	 */
688 	child->number = child->busn_res.start = busnr;
689 	child->primary = parent->busn_res.start;
690 	child->busn_res.end = 0xff;
691 
692 	if (!bridge) {
693 		child->dev.parent = parent->bridge;
694 		goto add_dev;
695 	}
696 
697 	child->self = bridge;
698 	child->bridge = get_device(&bridge->dev);
699 	child->dev.parent = child->bridge;
700 	pci_set_bus_of_node(child);
701 	pci_set_bus_speed(child);
702 
703 	/* Set up default resource pointers and names.. */
704 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
705 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
706 		child->resource[i]->name = child->name;
707 	}
708 	bridge->subordinate = child;
709 
710 add_dev:
711 	ret = device_register(&child->dev);
712 	WARN_ON(ret < 0);
713 
714 	pcibios_add_bus(child);
715 
716 	/* Create legacy_io and legacy_mem files for this bus */
717 	pci_create_legacy_files(child);
718 
719 	return child;
720 }
721 
722 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
723 {
724 	struct pci_bus *child;
725 
726 	child = pci_alloc_child_bus(parent, dev, busnr);
727 	if (child) {
728 		down_write(&pci_bus_sem);
729 		list_add_tail(&child->node, &parent->children);
730 		up_write(&pci_bus_sem);
731 	}
732 	return child;
733 }
734 
735 /*
736  * If it's a bridge, configure it and scan the bus behind it.
737  * For CardBus bridges, we don't scan behind as the devices will
738  * be handled by the bridge driver itself.
739  *
740  * We need to process bridges in two passes -- first we scan those
741  * already configured by the BIOS and after we are done with all of
742  * them, we proceed to assigning numbers to the remaining buses in
743  * order to avoid overlaps between old and new bus numbers.
744  */
745 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
746 {
747 	struct pci_bus *child;
748 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
749 	u32 buses, i, j = 0;
750 	u16 bctl;
751 	u8 primary, secondary, subordinate;
752 	int broken = 0;
753 
754 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
755 	primary = buses & 0xFF;
756 	secondary = (buses >> 8) & 0xFF;
757 	subordinate = (buses >> 16) & 0xFF;
758 
759 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
760 		secondary, subordinate, pass);
761 
762 	if (!primary && (primary != bus->number) && secondary && subordinate) {
763 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
764 		primary = bus->number;
765 	}
766 
767 	/* Check if setup is sensible at all */
768 	if (!pass &&
769 	    (primary != bus->number || secondary <= bus->number ||
770 	     secondary > subordinate || subordinate > bus->busn_res.end)) {
771 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
772 			 secondary, subordinate);
773 		broken = 1;
774 	}
775 
776 	/* Disable MasterAbortMode during probing to avoid reporting
777 	   of bus errors (in some architectures) */
778 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
779 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
780 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
781 
782 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
783 	    !is_cardbus && !broken) {
784 		unsigned int cmax;
785 		/*
786 		 * Bus already configured by firmware, process it in the first
787 		 * pass and just note the configuration.
788 		 */
789 		if (pass)
790 			goto out;
791 
792 		/*
793 		 * The bus might already exist for two reasons: Either we are
794 		 * rescanning the bus or the bus is reachable through more than
795 		 * one bridge. The second case can happen with the i450NX
796 		 * chipset.
797 		 */
798 		child = pci_find_bus(pci_domain_nr(bus), secondary);
799 		if (!child) {
800 			child = pci_add_new_bus(bus, dev, secondary);
801 			if (!child)
802 				goto out;
803 			child->primary = primary;
804 			pci_bus_insert_busn_res(child, secondary, subordinate);
805 			child->bridge_ctl = bctl;
806 		}
807 
808 		cmax = pci_scan_child_bus(child);
809 		if (cmax > subordinate)
810 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
811 				 subordinate, cmax);
812 		/* subordinate should equal child->busn_res.end */
813 		if (subordinate > max)
814 			max = subordinate;
815 	} else {
816 		/*
817 		 * We need to assign a number to this bus which we always
818 		 * do in the second pass.
819 		 */
820 		if (!pass) {
821 			if (pcibios_assign_all_busses() || broken || is_cardbus)
822 				/* Temporarily disable forwarding of the
823 				   configuration cycles on all bridges in
824 				   this bus segment to avoid possible
825 				   conflicts in the second pass between two
826 				   bridges programmed with overlapping
827 				   bus ranges. */
828 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
829 						       buses & ~0xffffff);
830 			goto out;
831 		}
832 
833 		if (max >= bus->busn_res.end) {
834 			dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
835 				 max, &bus->busn_res);
836 			goto out;
837 		}
838 
839 		/* Clear errors */
840 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
841 
842 		/* The bus will already exist if we are rescanning */
843 		child = pci_find_bus(pci_domain_nr(bus), max+1);
844 		if (!child) {
845 			child = pci_add_new_bus(bus, dev, max+1);
846 			if (!child)
847 				goto out;
848 			pci_bus_insert_busn_res(child, max+1,
849 						bus->busn_res.end);
850 		}
851 		max++;
852 		buses = (buses & 0xff000000)
853 		      | ((unsigned int)(child->primary)     <<  0)
854 		      | ((unsigned int)(child->busn_res.start)   <<  8)
855 		      | ((unsigned int)(child->busn_res.end) << 16);
856 
857 		/*
858 		 * yenta.c forces a secondary latency timer of 176.
859 		 * Copy that behaviour here.
860 		 */
861 		if (is_cardbus) {
862 			buses &= ~0xff000000;
863 			buses |= CARDBUS_LATENCY_TIMER << 24;
864 		}
865 
866 		/*
867 		 * We need to blast all three values with a single write.
868 		 */
869 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
870 
871 		if (!is_cardbus) {
872 			child->bridge_ctl = bctl;
873 			max = pci_scan_child_bus(child);
874 		} else {
875 			/*
876 			 * For CardBus bridges, we leave 4 bus numbers
877 			 * as cards with a PCI-to-PCI bridge can be
878 			 * inserted later.
879 			 */
880 			for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
881 				struct pci_bus *parent = bus;
882 				if (pci_find_bus(pci_domain_nr(bus),
883 							max+i+1))
884 					break;
885 				while (parent->parent) {
886 					if ((!pcibios_assign_all_busses()) &&
887 					    (parent->busn_res.end > max) &&
888 					    (parent->busn_res.end <= max+i)) {
889 						j = 1;
890 					}
891 					parent = parent->parent;
892 				}
893 				if (j) {
894 					/*
895 					 * Often, there are two cardbus bridges
896 					 * -- try to leave one valid bus number
897 					 * for each one.
898 					 */
899 					i /= 2;
900 					break;
901 				}
902 			}
903 			max += i;
904 		}
905 		/*
906 		 * Set the subordinate bus number to its real value.
907 		 */
908 		if (max > bus->busn_res.end) {
909 			dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
910 				 max, &bus->busn_res);
911 			max = bus->busn_res.end;
912 		}
913 		pci_bus_update_busn_res_end(child, max);
914 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
915 	}
916 
917 	sprintf(child->name,
918 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
919 		pci_domain_nr(bus), child->number);
920 
921 	/* Has only triggered on CardBus, fixup is in yenta_socket */
922 	while (bus->parent) {
923 		if ((child->busn_res.end > bus->busn_res.end) ||
924 		    (child->number > bus->busn_res.end) ||
925 		    (child->number < bus->number) ||
926 		    (child->busn_res.end < bus->number)) {
927 			dev_info(&child->dev, "%pR %s "
928 				"hidden behind%s bridge %s %pR\n",
929 				&child->busn_res,
930 				(bus->number > child->busn_res.end &&
931 				 bus->busn_res.end < child->number) ?
932 					"wholly" : "partially",
933 				bus->self->transparent ? " transparent" : "",
934 				dev_name(&bus->dev),
935 				&bus->busn_res);
936 		}
937 		bus = bus->parent;
938 	}
939 
940 out:
941 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
942 
943 	return max;
944 }
945 
946 /*
947  * Read interrupt line and base address registers.
948  * The architecture-dependent code can tweak these, of course.
949  */
950 static void pci_read_irq(struct pci_dev *dev)
951 {
952 	unsigned char irq;
953 
954 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
955 	dev->pin = irq;
956 	if (irq)
957 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
958 	dev->irq = irq;
959 }
960 
961 void set_pcie_port_type(struct pci_dev *pdev)
962 {
963 	int pos;
964 	u16 reg16;
965 
966 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
967 	if (!pos)
968 		return;
969 	pdev->pcie_cap = pos;
970 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
971 	pdev->pcie_flags_reg = reg16;
972 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
973 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
974 }
975 
976 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
977 {
978 	u32 reg32;
979 
980 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
981 	if (reg32 & PCI_EXP_SLTCAP_HPC)
982 		pdev->is_hotplug_bridge = 1;
983 }
984 
985 
986 /**
987  * pci_cfg_space_size - get the configuration space size of the PCI device.
988  * @dev: PCI device
989  *
990  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
991  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
992  * access it.  Maybe we don't have a way to generate extended config space
993  * accesses, or the device is behind a reverse Express bridge.  So we try
994  * reading the dword at 0x100 which must either be 0 or a valid extended
995  * capability header.
996  */
997 static int pci_cfg_space_size_ext(struct pci_dev *dev)
998 {
999 	u32 status;
1000 	int pos = PCI_CFG_SPACE_SIZE;
1001 
1002 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1003 		goto fail;
1004 	if (status == 0xffffffff)
1005 		goto fail;
1006 
1007 	return PCI_CFG_SPACE_EXP_SIZE;
1008 
1009  fail:
1010 	return PCI_CFG_SPACE_SIZE;
1011 }
1012 
1013 int pci_cfg_space_size(struct pci_dev *dev)
1014 {
1015 	int pos;
1016 	u32 status;
1017 	u16 class;
1018 
1019 	class = dev->class >> 8;
1020 	if (class == PCI_CLASS_BRIDGE_HOST)
1021 		return pci_cfg_space_size_ext(dev);
1022 
1023 	if (!pci_is_pcie(dev)) {
1024 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1025 		if (!pos)
1026 			goto fail;
1027 
1028 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1029 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1030 			goto fail;
1031 	}
1032 
1033 	return pci_cfg_space_size_ext(dev);
1034 
1035  fail:
1036 	return PCI_CFG_SPACE_SIZE;
1037 }
1038 
1039 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1040 
1041 /**
1042  * pci_setup_device - fill in class and map information of a device
1043  * @dev: the device structure to fill
1044  *
1045  * Initialize the device structure with information about the device's
1046  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1047  * Called at initialisation of the PCI subsystem and by CardBus services.
1048  * Returns 0 on success and negative if unknown type of device (not normal,
1049  * bridge or CardBus).
1050  */
1051 int pci_setup_device(struct pci_dev *dev)
1052 {
1053 	u32 class;
1054 	u8 hdr_type;
1055 	struct pci_slot *slot;
1056 	int pos = 0;
1057 	struct pci_bus_region region;
1058 	struct resource *res;
1059 
1060 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1061 		return -EIO;
1062 
1063 	dev->sysdata = dev->bus->sysdata;
1064 	dev->dev.parent = dev->bus->bridge;
1065 	dev->dev.bus = &pci_bus_type;
1066 	dev->hdr_type = hdr_type & 0x7f;
1067 	dev->multifunction = !!(hdr_type & 0x80);
1068 	dev->error_state = pci_channel_io_normal;
1069 	set_pcie_port_type(dev);
1070 
1071 	list_for_each_entry(slot, &dev->bus->slots, list)
1072 		if (PCI_SLOT(dev->devfn) == slot->number)
1073 			dev->slot = slot;
1074 
1075 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1076 	   set this higher, assuming the system even supports it.  */
1077 	dev->dma_mask = 0xffffffff;
1078 
1079 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1080 		     dev->bus->number, PCI_SLOT(dev->devfn),
1081 		     PCI_FUNC(dev->devfn));
1082 
1083 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1084 	dev->revision = class & 0xff;
1085 	dev->class = class >> 8;		    /* upper 3 bytes */
1086 
1087 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1088 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1089 
1090 	/* need to have dev->class ready */
1091 	dev->cfg_size = pci_cfg_space_size(dev);
1092 
1093 	/* "Unknown power state" */
1094 	dev->current_state = PCI_UNKNOWN;
1095 
1096 	/* Early fixups, before probing the BARs */
1097 	pci_fixup_device(pci_fixup_early, dev);
1098 	/* device class may be changed after fixup */
1099 	class = dev->class >> 8;
1100 
1101 	switch (dev->hdr_type) {		    /* header type */
1102 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1103 		if (class == PCI_CLASS_BRIDGE_PCI)
1104 			goto bad;
1105 		pci_read_irq(dev);
1106 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1107 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1108 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1109 
1110 		/*
1111 		 * Do the ugly legacy mode stuff here rather than broken chip
1112 		 * quirk code. Legacy mode ATA controllers have fixed
1113 		 * addresses. These are not always echoed in BAR0-3, and
1114 		 * BAR0-3 in a few cases contain junk!
1115 		 */
1116 		if (class == PCI_CLASS_STORAGE_IDE) {
1117 			u8 progif;
1118 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1119 			if ((progif & 1) == 0) {
1120 				region.start = 0x1F0;
1121 				region.end = 0x1F7;
1122 				res = &dev->resource[0];
1123 				res->flags = LEGACY_IO_RESOURCE;
1124 				pcibios_bus_to_resource(dev->bus, res, &region);
1125 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1126 					 res);
1127 				region.start = 0x3F6;
1128 				region.end = 0x3F6;
1129 				res = &dev->resource[1];
1130 				res->flags = LEGACY_IO_RESOURCE;
1131 				pcibios_bus_to_resource(dev->bus, res, &region);
1132 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1133 					 res);
1134 			}
1135 			if ((progif & 4) == 0) {
1136 				region.start = 0x170;
1137 				region.end = 0x177;
1138 				res = &dev->resource[2];
1139 				res->flags = LEGACY_IO_RESOURCE;
1140 				pcibios_bus_to_resource(dev->bus, res, &region);
1141 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1142 					 res);
1143 				region.start = 0x376;
1144 				region.end = 0x376;
1145 				res = &dev->resource[3];
1146 				res->flags = LEGACY_IO_RESOURCE;
1147 				pcibios_bus_to_resource(dev->bus, res, &region);
1148 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1149 					 res);
1150 			}
1151 		}
1152 		break;
1153 
1154 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1155 		if (class != PCI_CLASS_BRIDGE_PCI)
1156 			goto bad;
1157 		/* The PCI-to-PCI bridge spec requires that subtractive
1158 		   decoding (i.e. transparent) bridge must have programming
1159 		   interface code of 0x01. */
1160 		pci_read_irq(dev);
1161 		dev->transparent = ((dev->class & 0xff) == 1);
1162 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1163 		set_pcie_hotplug_bridge(dev);
1164 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1165 		if (pos) {
1166 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1167 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1168 		}
1169 		break;
1170 
1171 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1172 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1173 			goto bad;
1174 		pci_read_irq(dev);
1175 		pci_read_bases(dev, 1, 0);
1176 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1177 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1178 		break;
1179 
1180 	default:				    /* unknown header */
1181 		dev_err(&dev->dev, "unknown header type %02x, "
1182 			"ignoring device\n", dev->hdr_type);
1183 		return -EIO;
1184 
1185 	bad:
1186 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1187 			"type %02x)\n", dev->class, dev->hdr_type);
1188 		dev->class = PCI_CLASS_NOT_DEFINED;
1189 	}
1190 
1191 	/* We found a fine healthy device, go go go... */
1192 	return 0;
1193 }
1194 
1195 static void pci_release_capabilities(struct pci_dev *dev)
1196 {
1197 	pci_vpd_release(dev);
1198 	pci_iov_release(dev);
1199 	pci_free_cap_save_buffers(dev);
1200 }
1201 
1202 /**
1203  * pci_release_dev - free a pci device structure when all users of it are finished.
1204  * @dev: device that's been disconnected
1205  *
1206  * Will be called only by the device core when all users of this pci device are
1207  * done.
1208  */
1209 static void pci_release_dev(struct device *dev)
1210 {
1211 	struct pci_dev *pci_dev;
1212 
1213 	pci_dev = to_pci_dev(dev);
1214 	pci_release_capabilities(pci_dev);
1215 	pci_release_of_node(pci_dev);
1216 	pcibios_release_device(pci_dev);
1217 	pci_bus_put(pci_dev->bus);
1218 	kfree(pci_dev);
1219 }
1220 
1221 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1222 {
1223 	struct pci_dev *dev;
1224 
1225 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1226 	if (!dev)
1227 		return NULL;
1228 
1229 	INIT_LIST_HEAD(&dev->bus_list);
1230 	dev->dev.type = &pci_dev_type;
1231 	dev->bus = pci_bus_get(bus);
1232 
1233 	return dev;
1234 }
1235 EXPORT_SYMBOL(pci_alloc_dev);
1236 
1237 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1238 				 int crs_timeout)
1239 {
1240 	int delay = 1;
1241 
1242 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1243 		return false;
1244 
1245 	/* some broken boards return 0 or ~0 if a slot is empty: */
1246 	if (*l == 0xffffffff || *l == 0x00000000 ||
1247 	    *l == 0x0000ffff || *l == 0xffff0000)
1248 		return false;
1249 
1250 	/* Configuration request Retry Status */
1251 	while (*l == 0xffff0001) {
1252 		if (!crs_timeout)
1253 			return false;
1254 
1255 		msleep(delay);
1256 		delay *= 2;
1257 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1258 			return false;
1259 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1260 		if (delay > crs_timeout) {
1261 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1262 					"responding\n", pci_domain_nr(bus),
1263 					bus->number, PCI_SLOT(devfn),
1264 					PCI_FUNC(devfn));
1265 			return false;
1266 		}
1267 	}
1268 
1269 	return true;
1270 }
1271 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1272 
1273 /*
1274  * Read the config data for a PCI device, sanity-check it
1275  * and fill in the dev structure...
1276  */
1277 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1278 {
1279 	struct pci_dev *dev;
1280 	u32 l;
1281 
1282 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1283 		return NULL;
1284 
1285 	dev = pci_alloc_dev(bus);
1286 	if (!dev)
1287 		return NULL;
1288 
1289 	dev->devfn = devfn;
1290 	dev->vendor = l & 0xffff;
1291 	dev->device = (l >> 16) & 0xffff;
1292 
1293 	pci_set_of_node(dev);
1294 
1295 	if (pci_setup_device(dev)) {
1296 		pci_bus_put(dev->bus);
1297 		kfree(dev);
1298 		return NULL;
1299 	}
1300 
1301 	return dev;
1302 }
1303 
1304 static void pci_init_capabilities(struct pci_dev *dev)
1305 {
1306 	/* MSI/MSI-X list */
1307 	pci_msi_init_pci_dev(dev);
1308 
1309 	/* Buffers for saving PCIe and PCI-X capabilities */
1310 	pci_allocate_cap_save_buffers(dev);
1311 
1312 	/* Power Management */
1313 	pci_pm_init(dev);
1314 
1315 	/* Vital Product Data */
1316 	pci_vpd_pci22_init(dev);
1317 
1318 	/* Alternative Routing-ID Forwarding */
1319 	pci_configure_ari(dev);
1320 
1321 	/* Single Root I/O Virtualization */
1322 	pci_iov_init(dev);
1323 
1324 	/* Enable ACS P2P upstream forwarding */
1325 	pci_enable_acs(dev);
1326 }
1327 
1328 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1329 {
1330 	int ret;
1331 
1332 	device_initialize(&dev->dev);
1333 	dev->dev.release = pci_release_dev;
1334 
1335 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1336 	dev->dev.dma_mask = &dev->dma_mask;
1337 	dev->dev.dma_parms = &dev->dma_parms;
1338 	dev->dev.coherent_dma_mask = 0xffffffffull;
1339 
1340 	pci_set_dma_max_seg_size(dev, 65536);
1341 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1342 
1343 	/* Fix up broken headers */
1344 	pci_fixup_device(pci_fixup_header, dev);
1345 
1346 	/* moved out from quirk header fixup code */
1347 	pci_reassigndev_resource_alignment(dev);
1348 
1349 	/* Clear the state_saved flag. */
1350 	dev->state_saved = false;
1351 
1352 	/* Initialize various capabilities */
1353 	pci_init_capabilities(dev);
1354 
1355 	/*
1356 	 * Add the device to our list of discovered devices
1357 	 * and the bus list for fixup functions, etc.
1358 	 */
1359 	down_write(&pci_bus_sem);
1360 	list_add_tail(&dev->bus_list, &bus->devices);
1361 	up_write(&pci_bus_sem);
1362 
1363 	ret = pcibios_add_device(dev);
1364 	WARN_ON(ret < 0);
1365 
1366 	/* Notifier could use PCI capabilities */
1367 	dev->match_driver = false;
1368 	ret = device_add(&dev->dev);
1369 	WARN_ON(ret < 0);
1370 }
1371 
1372 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1373 {
1374 	struct pci_dev *dev;
1375 
1376 	dev = pci_get_slot(bus, devfn);
1377 	if (dev) {
1378 		pci_dev_put(dev);
1379 		return dev;
1380 	}
1381 
1382 	dev = pci_scan_device(bus, devfn);
1383 	if (!dev)
1384 		return NULL;
1385 
1386 	pci_device_add(dev, bus);
1387 
1388 	return dev;
1389 }
1390 EXPORT_SYMBOL(pci_scan_single_device);
1391 
1392 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1393 {
1394 	int pos;
1395 	u16 cap = 0;
1396 	unsigned next_fn;
1397 
1398 	if (pci_ari_enabled(bus)) {
1399 		if (!dev)
1400 			return 0;
1401 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1402 		if (!pos)
1403 			return 0;
1404 
1405 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1406 		next_fn = PCI_ARI_CAP_NFN(cap);
1407 		if (next_fn <= fn)
1408 			return 0;	/* protect against malformed list */
1409 
1410 		return next_fn;
1411 	}
1412 
1413 	/* dev may be NULL for non-contiguous multifunction devices */
1414 	if (!dev || dev->multifunction)
1415 		return (fn + 1) % 8;
1416 
1417 	return 0;
1418 }
1419 
1420 static int only_one_child(struct pci_bus *bus)
1421 {
1422 	struct pci_dev *parent = bus->self;
1423 
1424 	if (!parent || !pci_is_pcie(parent))
1425 		return 0;
1426 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1427 		return 1;
1428 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1429 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1430 		return 1;
1431 	return 0;
1432 }
1433 
1434 /**
1435  * pci_scan_slot - scan a PCI slot on a bus for devices.
1436  * @bus: PCI bus to scan
1437  * @devfn: slot number to scan (must have zero function.)
1438  *
1439  * Scan a PCI slot on the specified PCI bus for devices, adding
1440  * discovered devices to the @bus->devices list.  New devices
1441  * will not have is_added set.
1442  *
1443  * Returns the number of new devices found.
1444  */
1445 int pci_scan_slot(struct pci_bus *bus, int devfn)
1446 {
1447 	unsigned fn, nr = 0;
1448 	struct pci_dev *dev;
1449 
1450 	if (only_one_child(bus) && (devfn > 0))
1451 		return 0; /* Already scanned the entire slot */
1452 
1453 	dev = pci_scan_single_device(bus, devfn);
1454 	if (!dev)
1455 		return 0;
1456 	if (!dev->is_added)
1457 		nr++;
1458 
1459 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1460 		dev = pci_scan_single_device(bus, devfn + fn);
1461 		if (dev) {
1462 			if (!dev->is_added)
1463 				nr++;
1464 			dev->multifunction = 1;
1465 		}
1466 	}
1467 
1468 	/* only one slot has pcie device */
1469 	if (bus->self && nr)
1470 		pcie_aspm_init_link_state(bus->self);
1471 
1472 	return nr;
1473 }
1474 
1475 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1476 {
1477 	u8 *smpss = data;
1478 
1479 	if (!pci_is_pcie(dev))
1480 		return 0;
1481 
1482 	/*
1483 	 * We don't have a way to change MPS settings on devices that have
1484 	 * drivers attached.  A hot-added device might support only the minimum
1485 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
1486 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
1487 	 * hot-added devices will work correctly.
1488 	 *
1489 	 * However, if we hot-add a device to a slot directly below a Root
1490 	 * Port, it's impossible for there to be other existing devices below
1491 	 * the port.  We don't limit the MPS in this case because we can
1492 	 * reconfigure MPS on both the Root Port and the hot-added device,
1493 	 * and there are no other devices involved.
1494 	 *
1495 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1496 	 */
1497 	if (dev->is_hotplug_bridge &&
1498 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1499 		*smpss = 0;
1500 
1501 	if (*smpss > dev->pcie_mpss)
1502 		*smpss = dev->pcie_mpss;
1503 
1504 	return 0;
1505 }
1506 
1507 static void pcie_write_mps(struct pci_dev *dev, int mps)
1508 {
1509 	int rc;
1510 
1511 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1512 		mps = 128 << dev->pcie_mpss;
1513 
1514 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1515 		    dev->bus->self)
1516 			/* For "Performance", the assumption is made that
1517 			 * downstream communication will never be larger than
1518 			 * the MRRS.  So, the MPS only needs to be configured
1519 			 * for the upstream communication.  This being the case,
1520 			 * walk from the top down and set the MPS of the child
1521 			 * to that of the parent bus.
1522 			 *
1523 			 * Configure the device MPS with the smaller of the
1524 			 * device MPSS or the bridge MPS (which is assumed to be
1525 			 * properly configured at this point to the largest
1526 			 * allowable MPS based on its parent bus).
1527 			 */
1528 			mps = min(mps, pcie_get_mps(dev->bus->self));
1529 	}
1530 
1531 	rc = pcie_set_mps(dev, mps);
1532 	if (rc)
1533 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1534 }
1535 
1536 static void pcie_write_mrrs(struct pci_dev *dev)
1537 {
1538 	int rc, mrrs;
1539 
1540 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1541 	 * issues with setting MRRS to 0 on a number of devices.
1542 	 */
1543 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1544 		return;
1545 
1546 	/* For Max performance, the MRRS must be set to the largest supported
1547 	 * value.  However, it cannot be configured larger than the MPS the
1548 	 * device or the bus can support.  This should already be properly
1549 	 * configured by a prior call to pcie_write_mps.
1550 	 */
1551 	mrrs = pcie_get_mps(dev);
1552 
1553 	/* MRRS is a R/W register.  Invalid values can be written, but a
1554 	 * subsequent read will verify if the value is acceptable or not.
1555 	 * If the MRRS value provided is not acceptable (e.g., too large),
1556 	 * shrink the value until it is acceptable to the HW.
1557 	 */
1558 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1559 		rc = pcie_set_readrq(dev, mrrs);
1560 		if (!rc)
1561 			break;
1562 
1563 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1564 		mrrs /= 2;
1565 	}
1566 
1567 	if (mrrs < 128)
1568 		dev_err(&dev->dev, "MRRS was unable to be configured with a "
1569 			"safe value.  If problems are experienced, try running "
1570 			"with pci=pcie_bus_safe.\n");
1571 }
1572 
1573 static void pcie_bus_detect_mps(struct pci_dev *dev)
1574 {
1575 	struct pci_dev *bridge = dev->bus->self;
1576 	int mps, p_mps;
1577 
1578 	if (!bridge)
1579 		return;
1580 
1581 	mps = pcie_get_mps(dev);
1582 	p_mps = pcie_get_mps(bridge);
1583 
1584 	if (mps != p_mps)
1585 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1586 			 mps, pci_name(bridge), p_mps);
1587 }
1588 
1589 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1590 {
1591 	int mps, orig_mps;
1592 
1593 	if (!pci_is_pcie(dev))
1594 		return 0;
1595 
1596 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1597 		pcie_bus_detect_mps(dev);
1598 		return 0;
1599 	}
1600 
1601 	mps = 128 << *(u8 *)data;
1602 	orig_mps = pcie_get_mps(dev);
1603 
1604 	pcie_write_mps(dev, mps);
1605 	pcie_write_mrrs(dev);
1606 
1607 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
1608 		 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1609 		 orig_mps, pcie_get_readrq(dev));
1610 
1611 	return 0;
1612 }
1613 
1614 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1615  * parents then children fashion.  If this changes, then this code will not
1616  * work as designed.
1617  */
1618 void pcie_bus_configure_settings(struct pci_bus *bus)
1619 {
1620 	u8 smpss;
1621 
1622 	if (!bus->self)
1623 		return;
1624 
1625 	if (!pci_is_pcie(bus->self))
1626 		return;
1627 
1628 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1629 	 * to be aware of the MPS of the destination.  To work around this,
1630 	 * simply force the MPS of the entire system to the smallest possible.
1631 	 */
1632 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1633 		smpss = 0;
1634 
1635 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1636 		smpss = bus->self->pcie_mpss;
1637 
1638 		pcie_find_smpss(bus->self, &smpss);
1639 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1640 	}
1641 
1642 	pcie_bus_configure_set(bus->self, &smpss);
1643 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1644 }
1645 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1646 
1647 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1648 {
1649 	unsigned int devfn, pass, max = bus->busn_res.start;
1650 	struct pci_dev *dev;
1651 
1652 	dev_dbg(&bus->dev, "scanning bus\n");
1653 
1654 	/* Go find them, Rover! */
1655 	for (devfn = 0; devfn < 0x100; devfn += 8)
1656 		pci_scan_slot(bus, devfn);
1657 
1658 	/* Reserve buses for SR-IOV capability. */
1659 	max += pci_iov_bus_range(bus);
1660 
1661 	/*
1662 	 * After performing arch-dependent fixup of the bus, look behind
1663 	 * all PCI-to-PCI bridges on this bus.
1664 	 */
1665 	if (!bus->is_added) {
1666 		dev_dbg(&bus->dev, "fixups for bus\n");
1667 		pcibios_fixup_bus(bus);
1668 		bus->is_added = 1;
1669 	}
1670 
1671 	for (pass=0; pass < 2; pass++)
1672 		list_for_each_entry(dev, &bus->devices, bus_list) {
1673 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1674 			    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1675 				max = pci_scan_bridge(bus, dev, max, pass);
1676 		}
1677 
1678 	/*
1679 	 * We've scanned the bus and so we know all about what's on
1680 	 * the other side of any bridges that may be on this bus plus
1681 	 * any devices.
1682 	 *
1683 	 * Return how far we've got finding sub-buses.
1684 	 */
1685 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1686 	return max;
1687 }
1688 
1689 /**
1690  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1691  * @bridge: Host bridge to set up.
1692  *
1693  * Default empty implementation.  Replace with an architecture-specific setup
1694  * routine, if necessary.
1695  */
1696 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1697 {
1698 	return 0;
1699 }
1700 
1701 void __weak pcibios_add_bus(struct pci_bus *bus)
1702 {
1703 }
1704 
1705 void __weak pcibios_remove_bus(struct pci_bus *bus)
1706 {
1707 }
1708 
1709 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1710 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1711 {
1712 	int error;
1713 	struct pci_host_bridge *bridge;
1714 	struct pci_bus *b, *b2;
1715 	struct pci_host_bridge_window *window, *n;
1716 	struct resource *res;
1717 	resource_size_t offset;
1718 	char bus_addr[64];
1719 	char *fmt;
1720 
1721 	b = pci_alloc_bus();
1722 	if (!b)
1723 		return NULL;
1724 
1725 	b->sysdata = sysdata;
1726 	b->ops = ops;
1727 	b->number = b->busn_res.start = bus;
1728 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1729 	if (b2) {
1730 		/* If we already got to this bus through a different bridge, ignore it */
1731 		dev_dbg(&b2->dev, "bus already known\n");
1732 		goto err_out;
1733 	}
1734 
1735 	bridge = pci_alloc_host_bridge(b);
1736 	if (!bridge)
1737 		goto err_out;
1738 
1739 	bridge->dev.parent = parent;
1740 	bridge->dev.release = pci_release_host_bridge_dev;
1741 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1742 	error = pcibios_root_bridge_prepare(bridge);
1743 	if (error) {
1744 		kfree(bridge);
1745 		goto err_out;
1746 	}
1747 
1748 	error = device_register(&bridge->dev);
1749 	if (error) {
1750 		put_device(&bridge->dev);
1751 		goto err_out;
1752 	}
1753 	b->bridge = get_device(&bridge->dev);
1754 	device_enable_async_suspend(b->bridge);
1755 	pci_set_bus_of_node(b);
1756 
1757 	if (!parent)
1758 		set_dev_node(b->bridge, pcibus_to_node(b));
1759 
1760 	b->dev.class = &pcibus_class;
1761 	b->dev.parent = b->bridge;
1762 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1763 	error = device_register(&b->dev);
1764 	if (error)
1765 		goto class_dev_reg_err;
1766 
1767 	pcibios_add_bus(b);
1768 
1769 	/* Create legacy_io and legacy_mem files for this bus */
1770 	pci_create_legacy_files(b);
1771 
1772 	if (parent)
1773 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1774 	else
1775 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1776 
1777 	/* Add initial resources to the bus */
1778 	list_for_each_entry_safe(window, n, resources, list) {
1779 		list_move_tail(&window->list, &bridge->windows);
1780 		res = window->res;
1781 		offset = window->offset;
1782 		if (res->flags & IORESOURCE_BUS)
1783 			pci_bus_insert_busn_res(b, bus, res->end);
1784 		else
1785 			pci_bus_add_resource(b, res, 0);
1786 		if (offset) {
1787 			if (resource_type(res) == IORESOURCE_IO)
1788 				fmt = " (bus address [%#06llx-%#06llx])";
1789 			else
1790 				fmt = " (bus address [%#010llx-%#010llx])";
1791 			snprintf(bus_addr, sizeof(bus_addr), fmt,
1792 				 (unsigned long long) (res->start - offset),
1793 				 (unsigned long long) (res->end - offset));
1794 		} else
1795 			bus_addr[0] = '\0';
1796 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1797 	}
1798 
1799 	down_write(&pci_bus_sem);
1800 	list_add_tail(&b->node, &pci_root_buses);
1801 	up_write(&pci_bus_sem);
1802 
1803 	return b;
1804 
1805 class_dev_reg_err:
1806 	put_device(&bridge->dev);
1807 	device_unregister(&bridge->dev);
1808 err_out:
1809 	kfree(b);
1810 	return NULL;
1811 }
1812 
1813 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1814 {
1815 	struct resource *res = &b->busn_res;
1816 	struct resource *parent_res, *conflict;
1817 
1818 	res->start = bus;
1819 	res->end = bus_max;
1820 	res->flags = IORESOURCE_BUS;
1821 
1822 	if (!pci_is_root_bus(b))
1823 		parent_res = &b->parent->busn_res;
1824 	else {
1825 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1826 		res->flags |= IORESOURCE_PCI_FIXED;
1827 	}
1828 
1829 	conflict = request_resource_conflict(parent_res, res);
1830 
1831 	if (conflict)
1832 		dev_printk(KERN_DEBUG, &b->dev,
1833 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1834 			    res, pci_is_root_bus(b) ? "domain " : "",
1835 			    parent_res, conflict->name, conflict);
1836 
1837 	return conflict == NULL;
1838 }
1839 
1840 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1841 {
1842 	struct resource *res = &b->busn_res;
1843 	struct resource old_res = *res;
1844 	resource_size_t size;
1845 	int ret;
1846 
1847 	if (res->start > bus_max)
1848 		return -EINVAL;
1849 
1850 	size = bus_max - res->start + 1;
1851 	ret = adjust_resource(res, res->start, size);
1852 	dev_printk(KERN_DEBUG, &b->dev,
1853 			"busn_res: %pR end %s updated to %02x\n",
1854 			&old_res, ret ? "can not be" : "is", bus_max);
1855 
1856 	if (!ret && !res->parent)
1857 		pci_bus_insert_busn_res(b, res->start, res->end);
1858 
1859 	return ret;
1860 }
1861 
1862 void pci_bus_release_busn_res(struct pci_bus *b)
1863 {
1864 	struct resource *res = &b->busn_res;
1865 	int ret;
1866 
1867 	if (!res->flags || !res->parent)
1868 		return;
1869 
1870 	ret = release_resource(res);
1871 	dev_printk(KERN_DEBUG, &b->dev,
1872 			"busn_res: %pR %s released\n",
1873 			res, ret ? "can not be" : "is");
1874 }
1875 
1876 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1877 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1878 {
1879 	struct pci_host_bridge_window *window;
1880 	bool found = false;
1881 	struct pci_bus *b;
1882 	int max;
1883 
1884 	list_for_each_entry(window, resources, list)
1885 		if (window->res->flags & IORESOURCE_BUS) {
1886 			found = true;
1887 			break;
1888 		}
1889 
1890 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1891 	if (!b)
1892 		return NULL;
1893 
1894 	if (!found) {
1895 		dev_info(&b->dev,
1896 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1897 			bus);
1898 		pci_bus_insert_busn_res(b, bus, 255);
1899 	}
1900 
1901 	max = pci_scan_child_bus(b);
1902 
1903 	if (!found)
1904 		pci_bus_update_busn_res_end(b, max);
1905 
1906 	pci_bus_add_devices(b);
1907 	return b;
1908 }
1909 EXPORT_SYMBOL(pci_scan_root_bus);
1910 
1911 /* Deprecated; use pci_scan_root_bus() instead */
1912 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1913 		int bus, struct pci_ops *ops, void *sysdata)
1914 {
1915 	LIST_HEAD(resources);
1916 	struct pci_bus *b;
1917 
1918 	pci_add_resource(&resources, &ioport_resource);
1919 	pci_add_resource(&resources, &iomem_resource);
1920 	pci_add_resource(&resources, &busn_resource);
1921 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1922 	if (b)
1923 		pci_scan_child_bus(b);
1924 	else
1925 		pci_free_resource_list(&resources);
1926 	return b;
1927 }
1928 EXPORT_SYMBOL(pci_scan_bus_parented);
1929 
1930 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1931 					void *sysdata)
1932 {
1933 	LIST_HEAD(resources);
1934 	struct pci_bus *b;
1935 
1936 	pci_add_resource(&resources, &ioport_resource);
1937 	pci_add_resource(&resources, &iomem_resource);
1938 	pci_add_resource(&resources, &busn_resource);
1939 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1940 	if (b) {
1941 		pci_scan_child_bus(b);
1942 		pci_bus_add_devices(b);
1943 	} else {
1944 		pci_free_resource_list(&resources);
1945 	}
1946 	return b;
1947 }
1948 EXPORT_SYMBOL(pci_scan_bus);
1949 
1950 /**
1951  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1952  * @bridge: PCI bridge for the bus to scan
1953  *
1954  * Scan a PCI bus and child buses for new devices, add them,
1955  * and enable them, resizing bridge mmio/io resource if necessary
1956  * and possible.  The caller must ensure the child devices are already
1957  * removed for resizing to occur.
1958  *
1959  * Returns the max number of subordinate bus discovered.
1960  */
1961 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1962 {
1963 	unsigned int max;
1964 	struct pci_bus *bus = bridge->subordinate;
1965 
1966 	max = pci_scan_child_bus(bus);
1967 
1968 	pci_assign_unassigned_bridge_resources(bridge);
1969 
1970 	pci_bus_add_devices(bus);
1971 
1972 	return max;
1973 }
1974 
1975 /**
1976  * pci_rescan_bus - scan a PCI bus for devices.
1977  * @bus: PCI bus to scan
1978  *
1979  * Scan a PCI bus and child buses for new devices, adds them,
1980  * and enables them.
1981  *
1982  * Returns the max number of subordinate bus discovered.
1983  */
1984 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1985 {
1986 	unsigned int max;
1987 
1988 	max = pci_scan_child_bus(bus);
1989 	pci_assign_unassigned_bus_resources(bus);
1990 	pci_bus_add_devices(bus);
1991 
1992 	return max;
1993 }
1994 EXPORT_SYMBOL_GPL(pci_rescan_bus);
1995 
1996 EXPORT_SYMBOL(pci_add_new_bus);
1997 EXPORT_SYMBOL(pci_scan_slot);
1998 EXPORT_SYMBOL(pci_scan_bridge);
1999 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2000 
2001 /*
2002  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2003  * routines should always be executed under this mutex.
2004  */
2005 static DEFINE_MUTEX(pci_rescan_remove_lock);
2006 
2007 void pci_lock_rescan_remove(void)
2008 {
2009 	mutex_lock(&pci_rescan_remove_lock);
2010 }
2011 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2012 
2013 void pci_unlock_rescan_remove(void)
2014 {
2015 	mutex_unlock(&pci_rescan_remove_lock);
2016 }
2017 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2018 
2019 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
2020 {
2021 	const struct pci_dev *a = to_pci_dev(d_a);
2022 	const struct pci_dev *b = to_pci_dev(d_b);
2023 
2024 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2025 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2026 
2027 	if      (a->bus->number < b->bus->number) return -1;
2028 	else if (a->bus->number > b->bus->number) return  1;
2029 
2030 	if      (a->devfn < b->devfn) return -1;
2031 	else if (a->devfn > b->devfn) return  1;
2032 
2033 	return 0;
2034 }
2035 
2036 void __init pci_sort_breadthfirst(void)
2037 {
2038 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2039 }
2040