xref: /openbmc/linux/drivers/pci/probe.c (revision 7b7dfdd2)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15 
16 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR	3
18 
19 static struct resource busn_resource = {
20 	.name	= "PCI busn",
21 	.start	= 0,
22 	.end	= 255,
23 	.flags	= IORESOURCE_BUS,
24 };
25 
26 /* Ugh.  Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29 
30 static LIST_HEAD(pci_domain_busn_res_list);
31 
32 struct pci_domain_busn_res {
33 	struct list_head list;
34 	struct resource res;
35 	int domain_nr;
36 };
37 
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 	struct pci_domain_busn_res *r;
41 
42 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 		if (r->domain_nr == domain_nr)
44 			return &r->res;
45 
46 	r = kzalloc(sizeof(*r), GFP_KERNEL);
47 	if (!r)
48 		return NULL;
49 
50 	r->domain_nr = domain_nr;
51 	r->res.start = 0;
52 	r->res.end = 0xff;
53 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54 
55 	list_add_tail(&r->list, &pci_domain_busn_res_list);
56 
57 	return &r->res;
58 }
59 
60 static int find_anything(struct device *dev, void *data)
61 {
62 	return 1;
63 }
64 
65 /*
66  * Some device drivers need know if pci is initiated.
67  * Basically, we think pci is not initiated when there
68  * is no device to be found on the pci_bus_type.
69  */
70 int no_pci_devices(void)
71 {
72 	struct device *dev;
73 	int no_devices;
74 
75 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 	no_devices = (dev == NULL);
77 	put_device(dev);
78 	return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81 
82 /*
83  * PCI Bus Class
84  */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 	struct pci_bus *pci_bus = to_pci_bus(dev);
88 
89 	if (pci_bus->bridge)
90 		put_device(pci_bus->bridge);
91 	pci_bus_remove_resources(pci_bus);
92 	pci_release_bus_of_node(pci_bus);
93 	kfree(pci_bus);
94 }
95 
96 static struct class pcibus_class = {
97 	.name		= "pci_bus",
98 	.dev_release	= &release_pcibus_dev,
99 	.dev_groups	= pcibus_groups,
100 };
101 
102 static int __init pcibus_class_init(void)
103 {
104 	return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107 
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 	u64 size = mask & maxbase;	/* Find the significant bits */
111 	if (!size)
112 		return 0;
113 
114 	/* Get the lowest of them to find the decode size, and
115 	   from that the extent.  */
116 	size = (size & ~(size-1)) - 1;
117 
118 	/* base == maxbase can be valid only if the BAR has
119 	   already been programmed with all 1s.  */
120 	if (base == maxbase && ((base | size) & mask) != mask)
121 		return 0;
122 
123 	return size;
124 }
125 
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 	u32 mem_type;
129 	unsigned long flags;
130 
131 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 		flags |= IORESOURCE_IO;
134 		return flags;
135 	}
136 
137 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 	flags |= IORESOURCE_MEM;
139 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 		flags |= IORESOURCE_PREFETCH;
141 
142 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 	switch (mem_type) {
144 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 		break;
146 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 		/* 1M mem BAR treated as 32-bit BAR */
148 		break;
149 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 		flags |= IORESOURCE_MEM_64;
151 		break;
152 	default:
153 		/* mem unknown type treated as 32-bit BAR */
154 		break;
155 	}
156 	return flags;
157 }
158 
159 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
160 
161 /**
162  * pci_read_base - read a PCI BAR
163  * @dev: the PCI device
164  * @type: type of the BAR
165  * @res: resource buffer to be filled in
166  * @pos: BAR position in the config space
167  *
168  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
169  */
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 		    struct resource *res, unsigned int pos)
172 {
173 	u32 l, sz, mask;
174 	u64 l64, sz64, mask64;
175 	u16 orig_cmd;
176 	struct pci_bus_region region, inverted_region;
177 	bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
178 
179 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
180 
181 	/* No printks while decoding is disabled! */
182 	if (!dev->mmio_always_on) {
183 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
184 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
185 			pci_write_config_word(dev, PCI_COMMAND,
186 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
187 		}
188 	}
189 
190 	res->name = pci_name(dev);
191 
192 	pci_read_config_dword(dev, pos, &l);
193 	pci_write_config_dword(dev, pos, l | mask);
194 	pci_read_config_dword(dev, pos, &sz);
195 	pci_write_config_dword(dev, pos, l);
196 
197 	/*
198 	 * All bits set in sz means the device isn't working properly.
199 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
200 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
201 	 * 1 must be clear.
202 	 */
203 	if (!sz || sz == 0xffffffff)
204 		goto fail;
205 
206 	/*
207 	 * I don't know how l can have all bits set.  Copied from old code.
208 	 * Maybe it fixes a bug on some ancient platform.
209 	 */
210 	if (l == 0xffffffff)
211 		l = 0;
212 
213 	if (type == pci_bar_unknown) {
214 		res->flags = decode_bar(dev, l);
215 		res->flags |= IORESOURCE_SIZEALIGN;
216 		if (res->flags & IORESOURCE_IO) {
217 			l &= PCI_BASE_ADDRESS_IO_MASK;
218 			mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
219 		} else {
220 			l &= PCI_BASE_ADDRESS_MEM_MASK;
221 			mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
222 		}
223 	} else {
224 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
225 		l &= PCI_ROM_ADDRESS_MASK;
226 		mask = (u32)PCI_ROM_ADDRESS_MASK;
227 	}
228 
229 	if (res->flags & IORESOURCE_MEM_64) {
230 		l64 = l;
231 		sz64 = sz;
232 		mask64 = mask | (u64)~0 << 32;
233 
234 		pci_read_config_dword(dev, pos + 4, &l);
235 		pci_write_config_dword(dev, pos + 4, ~0);
236 		pci_read_config_dword(dev, pos + 4, &sz);
237 		pci_write_config_dword(dev, pos + 4, l);
238 
239 		l64 |= ((u64)l << 32);
240 		sz64 |= ((u64)sz << 32);
241 
242 		sz64 = pci_size(l64, sz64, mask64);
243 
244 		if (!sz64)
245 			goto fail;
246 
247 		if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
248 		    sz64 > 0x100000000ULL) {
249 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
250 			res->start = 0;
251 			res->end = 0;
252 			bar_too_big = true;
253 			goto out;
254 		}
255 
256 		if ((sizeof(dma_addr_t) < 8) && l) {
257 			/* Above 32-bit boundary; try to reallocate */
258 			res->flags |= IORESOURCE_UNSET;
259 			res->start = 0;
260 			res->end = sz64;
261 			bar_too_high = true;
262 			goto out;
263 		} else {
264 			region.start = l64;
265 			region.end = l64 + sz64;
266 		}
267 	} else {
268 		sz = pci_size(l, sz, mask);
269 
270 		if (!sz)
271 			goto fail;
272 
273 		region.start = l;
274 		region.end = l + sz;
275 	}
276 
277 	pcibios_bus_to_resource(dev->bus, res, &region);
278 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
279 
280 	/*
281 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
282 	 * the corresponding resource address (the physical address used by
283 	 * the CPU.  Converting that resource address back to a bus address
284 	 * should yield the original BAR value:
285 	 *
286 	 *     resource_to_bus(bus_to_resource(A)) == A
287 	 *
288 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
289 	 * be claimed by the device.
290 	 */
291 	if (inverted_region.start != region.start) {
292 		res->flags |= IORESOURCE_UNSET;
293 		res->start = 0;
294 		res->end = region.end - region.start;
295 		bar_invalid = true;
296 	}
297 
298 	goto out;
299 
300 
301 fail:
302 	res->flags = 0;
303 out:
304 	if (!dev->mmio_always_on &&
305 	    (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
306 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
307 
308 	if (bar_too_big)
309 		dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
310 			pos, (unsigned long long) sz64);
311 	if (bar_too_high)
312 		dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
313 			 pos, (unsigned long long) l64);
314 	if (bar_invalid)
315 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
316 			 pos, (unsigned long long) region.start);
317 	if (res->flags)
318 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
319 
320 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
321 }
322 
323 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
324 {
325 	unsigned int pos, reg;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
338 				IORESOURCE_SIZEALIGN;
339 		__pci_read_base(dev, pci_bar_mem32, res, rom);
340 	}
341 }
342 
343 static void pci_read_bridge_io(struct pci_bus *child)
344 {
345 	struct pci_dev *dev = child->self;
346 	u8 io_base_lo, io_limit_lo;
347 	unsigned long io_mask, io_granularity, base, limit;
348 	struct pci_bus_region region;
349 	struct resource *res;
350 
351 	io_mask = PCI_IO_RANGE_MASK;
352 	io_granularity = 0x1000;
353 	if (dev->io_window_1k) {
354 		/* Support 1K I/O space granularity */
355 		io_mask = PCI_IO_1K_RANGE_MASK;
356 		io_granularity = 0x400;
357 	}
358 
359 	res = child->resource[0];
360 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
361 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
362 	base = (io_base_lo & io_mask) << 8;
363 	limit = (io_limit_lo & io_mask) << 8;
364 
365 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
366 		u16 io_base_hi, io_limit_hi;
367 
368 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
369 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
370 		base |= ((unsigned long) io_base_hi << 16);
371 		limit |= ((unsigned long) io_limit_hi << 16);
372 	}
373 
374 	if (base <= limit) {
375 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
376 		region.start = base;
377 		region.end = limit + io_granularity - 1;
378 		pcibios_bus_to_resource(dev->bus, res, &region);
379 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
380 	}
381 }
382 
383 static void pci_read_bridge_mmio(struct pci_bus *child)
384 {
385 	struct pci_dev *dev = child->self;
386 	u16 mem_base_lo, mem_limit_lo;
387 	unsigned long base, limit;
388 	struct pci_bus_region region;
389 	struct resource *res;
390 
391 	res = child->resource[1];
392 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
393 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
394 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
396 	if (base <= limit) {
397 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
398 		region.start = base;
399 		region.end = limit + 0xfffff;
400 		pcibios_bus_to_resource(dev->bus, res, &region);
401 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
402 	}
403 }
404 
405 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
406 {
407 	struct pci_dev *dev = child->self;
408 	u16 mem_base_lo, mem_limit_lo;
409 	unsigned long base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 #if BITS_PER_LONG == 64
432 			base |= ((unsigned long) mem_base_hi) << 32;
433 			limit |= ((unsigned long) mem_limit_hi) << 32;
434 #else
435 			if (mem_base_hi || mem_limit_hi) {
436 				dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n");
437 				return;
438 			}
439 #endif
440 		}
441 	}
442 	if (base <= limit) {
443 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
444 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
445 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
446 			res->flags |= IORESOURCE_MEM_64;
447 		region.start = base;
448 		region.end = limit + 0xfffff;
449 		pcibios_bus_to_resource(dev->bus, res, &region);
450 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
451 	}
452 }
453 
454 void pci_read_bridge_bases(struct pci_bus *child)
455 {
456 	struct pci_dev *dev = child->self;
457 	struct resource *res;
458 	int i;
459 
460 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
461 		return;
462 
463 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
464 		 &child->busn_res,
465 		 dev->transparent ? " (subtractive decode)" : "");
466 
467 	pci_bus_remove_resources(child);
468 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
469 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
470 
471 	pci_read_bridge_io(child);
472 	pci_read_bridge_mmio(child);
473 	pci_read_bridge_mmio_pref(child);
474 
475 	if (dev->transparent) {
476 		pci_bus_for_each_resource(child->parent, res, i) {
477 			if (res && res->flags) {
478 				pci_bus_add_resource(child, res,
479 						     PCI_SUBTRACTIVE_DECODE);
480 				dev_printk(KERN_DEBUG, &dev->dev,
481 					   "  bridge window %pR (subtractive decode)\n",
482 					   res);
483 			}
484 		}
485 	}
486 }
487 
488 static struct pci_bus *pci_alloc_bus(void)
489 {
490 	struct pci_bus *b;
491 
492 	b = kzalloc(sizeof(*b), GFP_KERNEL);
493 	if (!b)
494 		return NULL;
495 
496 	INIT_LIST_HEAD(&b->node);
497 	INIT_LIST_HEAD(&b->children);
498 	INIT_LIST_HEAD(&b->devices);
499 	INIT_LIST_HEAD(&b->slots);
500 	INIT_LIST_HEAD(&b->resources);
501 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
502 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
503 	return b;
504 }
505 
506 static void pci_release_host_bridge_dev(struct device *dev)
507 {
508 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
509 
510 	if (bridge->release_fn)
511 		bridge->release_fn(bridge);
512 
513 	pci_free_resource_list(&bridge->windows);
514 
515 	kfree(bridge);
516 }
517 
518 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
519 {
520 	struct pci_host_bridge *bridge;
521 
522 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
523 	if (!bridge)
524 		return NULL;
525 
526 	INIT_LIST_HEAD(&bridge->windows);
527 	bridge->bus = b;
528 	return bridge;
529 }
530 
531 static const unsigned char pcix_bus_speed[] = {
532 	PCI_SPEED_UNKNOWN,		/* 0 */
533 	PCI_SPEED_66MHz_PCIX,		/* 1 */
534 	PCI_SPEED_100MHz_PCIX,		/* 2 */
535 	PCI_SPEED_133MHz_PCIX,		/* 3 */
536 	PCI_SPEED_UNKNOWN,		/* 4 */
537 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
538 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
539 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
540 	PCI_SPEED_UNKNOWN,		/* 8 */
541 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
542 	PCI_SPEED_100MHz_PCIX_266,	/* A */
543 	PCI_SPEED_133MHz_PCIX_266,	/* B */
544 	PCI_SPEED_UNKNOWN,		/* C */
545 	PCI_SPEED_66MHz_PCIX_533,	/* D */
546 	PCI_SPEED_100MHz_PCIX_533,	/* E */
547 	PCI_SPEED_133MHz_PCIX_533	/* F */
548 };
549 
550 const unsigned char pcie_link_speed[] = {
551 	PCI_SPEED_UNKNOWN,		/* 0 */
552 	PCIE_SPEED_2_5GT,		/* 1 */
553 	PCIE_SPEED_5_0GT,		/* 2 */
554 	PCIE_SPEED_8_0GT,		/* 3 */
555 	PCI_SPEED_UNKNOWN,		/* 4 */
556 	PCI_SPEED_UNKNOWN,		/* 5 */
557 	PCI_SPEED_UNKNOWN,		/* 6 */
558 	PCI_SPEED_UNKNOWN,		/* 7 */
559 	PCI_SPEED_UNKNOWN,		/* 8 */
560 	PCI_SPEED_UNKNOWN,		/* 9 */
561 	PCI_SPEED_UNKNOWN,		/* A */
562 	PCI_SPEED_UNKNOWN,		/* B */
563 	PCI_SPEED_UNKNOWN,		/* C */
564 	PCI_SPEED_UNKNOWN,		/* D */
565 	PCI_SPEED_UNKNOWN,		/* E */
566 	PCI_SPEED_UNKNOWN		/* F */
567 };
568 
569 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
570 {
571 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
572 }
573 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
574 
575 static unsigned char agp_speeds[] = {
576 	AGP_UNKNOWN,
577 	AGP_1X,
578 	AGP_2X,
579 	AGP_4X,
580 	AGP_8X
581 };
582 
583 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
584 {
585 	int index = 0;
586 
587 	if (agpstat & 4)
588 		index = 3;
589 	else if (agpstat & 2)
590 		index = 2;
591 	else if (agpstat & 1)
592 		index = 1;
593 	else
594 		goto out;
595 
596 	if (agp3) {
597 		index += 2;
598 		if (index == 5)
599 			index = 0;
600 	}
601 
602  out:
603 	return agp_speeds[index];
604 }
605 
606 static void pci_set_bus_speed(struct pci_bus *bus)
607 {
608 	struct pci_dev *bridge = bus->self;
609 	int pos;
610 
611 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
612 	if (!pos)
613 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
614 	if (pos) {
615 		u32 agpstat, agpcmd;
616 
617 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
618 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
619 
620 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
621 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
622 	}
623 
624 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
625 	if (pos) {
626 		u16 status;
627 		enum pci_bus_speed max;
628 
629 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
630 				     &status);
631 
632 		if (status & PCI_X_SSTATUS_533MHZ) {
633 			max = PCI_SPEED_133MHz_PCIX_533;
634 		} else if (status & PCI_X_SSTATUS_266MHZ) {
635 			max = PCI_SPEED_133MHz_PCIX_266;
636 		} else if (status & PCI_X_SSTATUS_133MHZ) {
637 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
638 				max = PCI_SPEED_133MHz_PCIX_ECC;
639 			else
640 				max = PCI_SPEED_133MHz_PCIX;
641 		} else {
642 			max = PCI_SPEED_66MHz_PCIX;
643 		}
644 
645 		bus->max_bus_speed = max;
646 		bus->cur_bus_speed = pcix_bus_speed[
647 			(status & PCI_X_SSTATUS_FREQ) >> 6];
648 
649 		return;
650 	}
651 
652 	if (pci_is_pcie(bridge)) {
653 		u32 linkcap;
654 		u16 linksta;
655 
656 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
657 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
658 
659 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
660 		pcie_update_link_speed(bus, linksta);
661 	}
662 }
663 
664 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
665 					   struct pci_dev *bridge, int busnr)
666 {
667 	struct pci_bus *child;
668 	int i;
669 	int ret;
670 
671 	/*
672 	 * Allocate a new bus, and inherit stuff from the parent..
673 	 */
674 	child = pci_alloc_bus();
675 	if (!child)
676 		return NULL;
677 
678 	child->parent = parent;
679 	child->ops = parent->ops;
680 	child->msi = parent->msi;
681 	child->sysdata = parent->sysdata;
682 	child->bus_flags = parent->bus_flags;
683 
684 	/* initialize some portions of the bus device, but don't register it
685 	 * now as the parent is not properly set up yet.
686 	 */
687 	child->dev.class = &pcibus_class;
688 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
689 
690 	/*
691 	 * Set up the primary, secondary and subordinate
692 	 * bus numbers.
693 	 */
694 	child->number = child->busn_res.start = busnr;
695 	child->primary = parent->busn_res.start;
696 	child->busn_res.end = 0xff;
697 
698 	if (!bridge) {
699 		child->dev.parent = parent->bridge;
700 		goto add_dev;
701 	}
702 
703 	child->self = bridge;
704 	child->bridge = get_device(&bridge->dev);
705 	child->dev.parent = child->bridge;
706 	pci_set_bus_of_node(child);
707 	pci_set_bus_speed(child);
708 
709 	/* Set up default resource pointers and names.. */
710 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
711 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
712 		child->resource[i]->name = child->name;
713 	}
714 	bridge->subordinate = child;
715 
716 add_dev:
717 	ret = device_register(&child->dev);
718 	WARN_ON(ret < 0);
719 
720 	pcibios_add_bus(child);
721 
722 	/* Create legacy_io and legacy_mem files for this bus */
723 	pci_create_legacy_files(child);
724 
725 	return child;
726 }
727 
728 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
729 				int busnr)
730 {
731 	struct pci_bus *child;
732 
733 	child = pci_alloc_child_bus(parent, dev, busnr);
734 	if (child) {
735 		down_write(&pci_bus_sem);
736 		list_add_tail(&child->node, &parent->children);
737 		up_write(&pci_bus_sem);
738 	}
739 	return child;
740 }
741 EXPORT_SYMBOL(pci_add_new_bus);
742 
743 /*
744  * If it's a bridge, configure it and scan the bus behind it.
745  * For CardBus bridges, we don't scan behind as the devices will
746  * be handled by the bridge driver itself.
747  *
748  * We need to process bridges in two passes -- first we scan those
749  * already configured by the BIOS and after we are done with all of
750  * them, we proceed to assigning numbers to the remaining buses in
751  * order to avoid overlaps between old and new bus numbers.
752  */
753 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
754 {
755 	struct pci_bus *child;
756 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
757 	u32 buses, i, j = 0;
758 	u16 bctl;
759 	u8 primary, secondary, subordinate;
760 	int broken = 0;
761 
762 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
763 	primary = buses & 0xFF;
764 	secondary = (buses >> 8) & 0xFF;
765 	subordinate = (buses >> 16) & 0xFF;
766 
767 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
768 		secondary, subordinate, pass);
769 
770 	if (!primary && (primary != bus->number) && secondary && subordinate) {
771 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
772 		primary = bus->number;
773 	}
774 
775 	/* Check if setup is sensible at all */
776 	if (!pass &&
777 	    (primary != bus->number || secondary <= bus->number ||
778 	     secondary > subordinate || subordinate > bus->busn_res.end)) {
779 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
780 			 secondary, subordinate);
781 		broken = 1;
782 	}
783 
784 	/* Disable MasterAbortMode during probing to avoid reporting
785 	   of bus errors (in some architectures) */
786 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
787 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
788 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
789 
790 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
791 	    !is_cardbus && !broken) {
792 		unsigned int cmax;
793 		/*
794 		 * Bus already configured by firmware, process it in the first
795 		 * pass and just note the configuration.
796 		 */
797 		if (pass)
798 			goto out;
799 
800 		/*
801 		 * The bus might already exist for two reasons: Either we are
802 		 * rescanning the bus or the bus is reachable through more than
803 		 * one bridge. The second case can happen with the i450NX
804 		 * chipset.
805 		 */
806 		child = pci_find_bus(pci_domain_nr(bus), secondary);
807 		if (!child) {
808 			child = pci_add_new_bus(bus, dev, secondary);
809 			if (!child)
810 				goto out;
811 			child->primary = primary;
812 			pci_bus_insert_busn_res(child, secondary, subordinate);
813 			child->bridge_ctl = bctl;
814 		}
815 
816 		cmax = pci_scan_child_bus(child);
817 		if (cmax > subordinate)
818 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
819 				 subordinate, cmax);
820 		/* subordinate should equal child->busn_res.end */
821 		if (subordinate > max)
822 			max = subordinate;
823 	} else {
824 		/*
825 		 * We need to assign a number to this bus which we always
826 		 * do in the second pass.
827 		 */
828 		if (!pass) {
829 			if (pcibios_assign_all_busses() || broken || is_cardbus)
830 				/* Temporarily disable forwarding of the
831 				   configuration cycles on all bridges in
832 				   this bus segment to avoid possible
833 				   conflicts in the second pass between two
834 				   bridges programmed with overlapping
835 				   bus ranges. */
836 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
837 						       buses & ~0xffffff);
838 			goto out;
839 		}
840 
841 		if (max >= bus->busn_res.end) {
842 			dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
843 				 max, &bus->busn_res);
844 			goto out;
845 		}
846 
847 		/* Clear errors */
848 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
849 
850 		/* The bus will already exist if we are rescanning */
851 		child = pci_find_bus(pci_domain_nr(bus), max+1);
852 		if (!child) {
853 			child = pci_add_new_bus(bus, dev, max+1);
854 			if (!child)
855 				goto out;
856 			pci_bus_insert_busn_res(child, max+1,
857 						bus->busn_res.end);
858 		}
859 		max++;
860 		buses = (buses & 0xff000000)
861 		      | ((unsigned int)(child->primary)     <<  0)
862 		      | ((unsigned int)(child->busn_res.start)   <<  8)
863 		      | ((unsigned int)(child->busn_res.end) << 16);
864 
865 		/*
866 		 * yenta.c forces a secondary latency timer of 176.
867 		 * Copy that behaviour here.
868 		 */
869 		if (is_cardbus) {
870 			buses &= ~0xff000000;
871 			buses |= CARDBUS_LATENCY_TIMER << 24;
872 		}
873 
874 		/*
875 		 * We need to blast all three values with a single write.
876 		 */
877 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
878 
879 		if (!is_cardbus) {
880 			child->bridge_ctl = bctl;
881 			max = pci_scan_child_bus(child);
882 		} else {
883 			/*
884 			 * For CardBus bridges, we leave 4 bus numbers
885 			 * as cards with a PCI-to-PCI bridge can be
886 			 * inserted later.
887 			 */
888 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
889 				struct pci_bus *parent = bus;
890 				if (pci_find_bus(pci_domain_nr(bus),
891 							max+i+1))
892 					break;
893 				while (parent->parent) {
894 					if ((!pcibios_assign_all_busses()) &&
895 					    (parent->busn_res.end > max) &&
896 					    (parent->busn_res.end <= max+i)) {
897 						j = 1;
898 					}
899 					parent = parent->parent;
900 				}
901 				if (j) {
902 					/*
903 					 * Often, there are two cardbus bridges
904 					 * -- try to leave one valid bus number
905 					 * for each one.
906 					 */
907 					i /= 2;
908 					break;
909 				}
910 			}
911 			max += i;
912 		}
913 		/*
914 		 * Set the subordinate bus number to its real value.
915 		 */
916 		if (max > bus->busn_res.end) {
917 			dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
918 				 max, &bus->busn_res);
919 			max = bus->busn_res.end;
920 		}
921 		pci_bus_update_busn_res_end(child, max);
922 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
923 	}
924 
925 	sprintf(child->name,
926 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
927 		pci_domain_nr(bus), child->number);
928 
929 	/* Has only triggered on CardBus, fixup is in yenta_socket */
930 	while (bus->parent) {
931 		if ((child->busn_res.end > bus->busn_res.end) ||
932 		    (child->number > bus->busn_res.end) ||
933 		    (child->number < bus->number) ||
934 		    (child->busn_res.end < bus->number)) {
935 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
936 				&child->busn_res,
937 				(bus->number > child->busn_res.end &&
938 				 bus->busn_res.end < child->number) ?
939 					"wholly" : "partially",
940 				bus->self->transparent ? " transparent" : "",
941 				dev_name(&bus->dev),
942 				&bus->busn_res);
943 		}
944 		bus = bus->parent;
945 	}
946 
947 out:
948 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
949 
950 	return max;
951 }
952 EXPORT_SYMBOL(pci_scan_bridge);
953 
954 /*
955  * Read interrupt line and base address registers.
956  * The architecture-dependent code can tweak these, of course.
957  */
958 static void pci_read_irq(struct pci_dev *dev)
959 {
960 	unsigned char irq;
961 
962 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
963 	dev->pin = irq;
964 	if (irq)
965 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
966 	dev->irq = irq;
967 }
968 
969 void set_pcie_port_type(struct pci_dev *pdev)
970 {
971 	int pos;
972 	u16 reg16;
973 
974 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
975 	if (!pos)
976 		return;
977 	pdev->pcie_cap = pos;
978 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
979 	pdev->pcie_flags_reg = reg16;
980 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
981 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
982 }
983 
984 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
985 {
986 	u32 reg32;
987 
988 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
989 	if (reg32 & PCI_EXP_SLTCAP_HPC)
990 		pdev->is_hotplug_bridge = 1;
991 }
992 
993 /**
994  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
995  * @dev: PCI device
996  *
997  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
998  * when forwarding a type1 configuration request the bridge must check that
999  * the extended register address field is zero.  The bridge is not permitted
1000  * to forward the transactions and must handle it as an Unsupported Request.
1001  * Some bridges do not follow this rule and simply drop the extended register
1002  * bits, resulting in the standard config space being aliased, every 256
1003  * bytes across the entire configuration space.  Test for this condition by
1004  * comparing the first dword of each potential alias to the vendor/device ID.
1005  * Known offenders:
1006  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1007  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1008  */
1009 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1010 {
1011 #ifdef CONFIG_PCI_QUIRKS
1012 	int pos;
1013 	u32 header, tmp;
1014 
1015 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1016 
1017 	for (pos = PCI_CFG_SPACE_SIZE;
1018 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1019 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1020 		    || header != tmp)
1021 			return false;
1022 	}
1023 
1024 	return true;
1025 #else
1026 	return false;
1027 #endif
1028 }
1029 
1030 /**
1031  * pci_cfg_space_size - get the configuration space size of the PCI device.
1032  * @dev: PCI device
1033  *
1034  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1035  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1036  * access it.  Maybe we don't have a way to generate extended config space
1037  * accesses, or the device is behind a reverse Express bridge.  So we try
1038  * reading the dword at 0x100 which must either be 0 or a valid extended
1039  * capability header.
1040  */
1041 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1042 {
1043 	u32 status;
1044 	int pos = PCI_CFG_SPACE_SIZE;
1045 
1046 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1047 		goto fail;
1048 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1049 		goto fail;
1050 
1051 	return PCI_CFG_SPACE_EXP_SIZE;
1052 
1053  fail:
1054 	return PCI_CFG_SPACE_SIZE;
1055 }
1056 
1057 int pci_cfg_space_size(struct pci_dev *dev)
1058 {
1059 	int pos;
1060 	u32 status;
1061 	u16 class;
1062 
1063 	class = dev->class >> 8;
1064 	if (class == PCI_CLASS_BRIDGE_HOST)
1065 		return pci_cfg_space_size_ext(dev);
1066 
1067 	if (!pci_is_pcie(dev)) {
1068 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1069 		if (!pos)
1070 			goto fail;
1071 
1072 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1073 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1074 			goto fail;
1075 	}
1076 
1077 	return pci_cfg_space_size_ext(dev);
1078 
1079  fail:
1080 	return PCI_CFG_SPACE_SIZE;
1081 }
1082 
1083 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1084 
1085 /**
1086  * pci_setup_device - fill in class and map information of a device
1087  * @dev: the device structure to fill
1088  *
1089  * Initialize the device structure with information about the device's
1090  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1091  * Called at initialisation of the PCI subsystem and by CardBus services.
1092  * Returns 0 on success and negative if unknown type of device (not normal,
1093  * bridge or CardBus).
1094  */
1095 int pci_setup_device(struct pci_dev *dev)
1096 {
1097 	u32 class;
1098 	u8 hdr_type;
1099 	struct pci_slot *slot;
1100 	int pos = 0;
1101 	struct pci_bus_region region;
1102 	struct resource *res;
1103 
1104 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1105 		return -EIO;
1106 
1107 	dev->sysdata = dev->bus->sysdata;
1108 	dev->dev.parent = dev->bus->bridge;
1109 	dev->dev.bus = &pci_bus_type;
1110 	dev->hdr_type = hdr_type & 0x7f;
1111 	dev->multifunction = !!(hdr_type & 0x80);
1112 	dev->error_state = pci_channel_io_normal;
1113 	set_pcie_port_type(dev);
1114 
1115 	list_for_each_entry(slot, &dev->bus->slots, list)
1116 		if (PCI_SLOT(dev->devfn) == slot->number)
1117 			dev->slot = slot;
1118 
1119 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1120 	   set this higher, assuming the system even supports it.  */
1121 	dev->dma_mask = 0xffffffff;
1122 
1123 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1124 		     dev->bus->number, PCI_SLOT(dev->devfn),
1125 		     PCI_FUNC(dev->devfn));
1126 
1127 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1128 	dev->revision = class & 0xff;
1129 	dev->class = class >> 8;		    /* upper 3 bytes */
1130 
1131 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1132 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1133 
1134 	/* need to have dev->class ready */
1135 	dev->cfg_size = pci_cfg_space_size(dev);
1136 
1137 	/* "Unknown power state" */
1138 	dev->current_state = PCI_UNKNOWN;
1139 
1140 	/* Early fixups, before probing the BARs */
1141 	pci_fixup_device(pci_fixup_early, dev);
1142 	/* device class may be changed after fixup */
1143 	class = dev->class >> 8;
1144 
1145 	switch (dev->hdr_type) {		    /* header type */
1146 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1147 		if (class == PCI_CLASS_BRIDGE_PCI)
1148 			goto bad;
1149 		pci_read_irq(dev);
1150 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1151 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1152 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1153 
1154 		/*
1155 		 * Do the ugly legacy mode stuff here rather than broken chip
1156 		 * quirk code. Legacy mode ATA controllers have fixed
1157 		 * addresses. These are not always echoed in BAR0-3, and
1158 		 * BAR0-3 in a few cases contain junk!
1159 		 */
1160 		if (class == PCI_CLASS_STORAGE_IDE) {
1161 			u8 progif;
1162 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1163 			if ((progif & 1) == 0) {
1164 				region.start = 0x1F0;
1165 				region.end = 0x1F7;
1166 				res = &dev->resource[0];
1167 				res->flags = LEGACY_IO_RESOURCE;
1168 				pcibios_bus_to_resource(dev->bus, res, &region);
1169 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1170 					 res);
1171 				region.start = 0x3F6;
1172 				region.end = 0x3F6;
1173 				res = &dev->resource[1];
1174 				res->flags = LEGACY_IO_RESOURCE;
1175 				pcibios_bus_to_resource(dev->bus, res, &region);
1176 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1177 					 res);
1178 			}
1179 			if ((progif & 4) == 0) {
1180 				region.start = 0x170;
1181 				region.end = 0x177;
1182 				res = &dev->resource[2];
1183 				res->flags = LEGACY_IO_RESOURCE;
1184 				pcibios_bus_to_resource(dev->bus, res, &region);
1185 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1186 					 res);
1187 				region.start = 0x376;
1188 				region.end = 0x376;
1189 				res = &dev->resource[3];
1190 				res->flags = LEGACY_IO_RESOURCE;
1191 				pcibios_bus_to_resource(dev->bus, res, &region);
1192 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1193 					 res);
1194 			}
1195 		}
1196 		break;
1197 
1198 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1199 		if (class != PCI_CLASS_BRIDGE_PCI)
1200 			goto bad;
1201 		/* The PCI-to-PCI bridge spec requires that subtractive
1202 		   decoding (i.e. transparent) bridge must have programming
1203 		   interface code of 0x01. */
1204 		pci_read_irq(dev);
1205 		dev->transparent = ((dev->class & 0xff) == 1);
1206 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1207 		set_pcie_hotplug_bridge(dev);
1208 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1209 		if (pos) {
1210 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1211 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1212 		}
1213 		break;
1214 
1215 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1216 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1217 			goto bad;
1218 		pci_read_irq(dev);
1219 		pci_read_bases(dev, 1, 0);
1220 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1221 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1222 		break;
1223 
1224 	default:				    /* unknown header */
1225 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1226 			dev->hdr_type);
1227 		return -EIO;
1228 
1229 	bad:
1230 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1231 			dev->class, dev->hdr_type);
1232 		dev->class = PCI_CLASS_NOT_DEFINED;
1233 	}
1234 
1235 	/* We found a fine healthy device, go go go... */
1236 	return 0;
1237 }
1238 
1239 static void pci_release_capabilities(struct pci_dev *dev)
1240 {
1241 	pci_vpd_release(dev);
1242 	pci_iov_release(dev);
1243 	pci_free_cap_save_buffers(dev);
1244 }
1245 
1246 /**
1247  * pci_release_dev - free a pci device structure when all users of it are finished.
1248  * @dev: device that's been disconnected
1249  *
1250  * Will be called only by the device core when all users of this pci device are
1251  * done.
1252  */
1253 static void pci_release_dev(struct device *dev)
1254 {
1255 	struct pci_dev *pci_dev;
1256 
1257 	pci_dev = to_pci_dev(dev);
1258 	pci_release_capabilities(pci_dev);
1259 	pci_release_of_node(pci_dev);
1260 	pcibios_release_device(pci_dev);
1261 	pci_bus_put(pci_dev->bus);
1262 	kfree(pci_dev->driver_override);
1263 	kfree(pci_dev);
1264 }
1265 
1266 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1267 {
1268 	struct pci_dev *dev;
1269 
1270 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1271 	if (!dev)
1272 		return NULL;
1273 
1274 	INIT_LIST_HEAD(&dev->bus_list);
1275 	dev->dev.type = &pci_dev_type;
1276 	dev->bus = pci_bus_get(bus);
1277 
1278 	return dev;
1279 }
1280 EXPORT_SYMBOL(pci_alloc_dev);
1281 
1282 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1283 				int crs_timeout)
1284 {
1285 	int delay = 1;
1286 
1287 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1288 		return false;
1289 
1290 	/* some broken boards return 0 or ~0 if a slot is empty: */
1291 	if (*l == 0xffffffff || *l == 0x00000000 ||
1292 	    *l == 0x0000ffff || *l == 0xffff0000)
1293 		return false;
1294 
1295 	/* Configuration request Retry Status */
1296 	while (*l == 0xffff0001) {
1297 		if (!crs_timeout)
1298 			return false;
1299 
1300 		msleep(delay);
1301 		delay *= 2;
1302 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1303 			return false;
1304 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1305 		if (delay > crs_timeout) {
1306 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1307 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1308 			       PCI_FUNC(devfn));
1309 			return false;
1310 		}
1311 	}
1312 
1313 	return true;
1314 }
1315 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1316 
1317 /*
1318  * Read the config data for a PCI device, sanity-check it
1319  * and fill in the dev structure...
1320  */
1321 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1322 {
1323 	struct pci_dev *dev;
1324 	u32 l;
1325 
1326 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1327 		return NULL;
1328 
1329 	dev = pci_alloc_dev(bus);
1330 	if (!dev)
1331 		return NULL;
1332 
1333 	dev->devfn = devfn;
1334 	dev->vendor = l & 0xffff;
1335 	dev->device = (l >> 16) & 0xffff;
1336 
1337 	pci_set_of_node(dev);
1338 
1339 	if (pci_setup_device(dev)) {
1340 		pci_bus_put(dev->bus);
1341 		kfree(dev);
1342 		return NULL;
1343 	}
1344 
1345 	return dev;
1346 }
1347 
1348 static void pci_init_capabilities(struct pci_dev *dev)
1349 {
1350 	/* MSI/MSI-X list */
1351 	pci_msi_init_pci_dev(dev);
1352 
1353 	/* Buffers for saving PCIe and PCI-X capabilities */
1354 	pci_allocate_cap_save_buffers(dev);
1355 
1356 	/* Power Management */
1357 	pci_pm_init(dev);
1358 
1359 	/* Vital Product Data */
1360 	pci_vpd_pci22_init(dev);
1361 
1362 	/* Alternative Routing-ID Forwarding */
1363 	pci_configure_ari(dev);
1364 
1365 	/* Single Root I/O Virtualization */
1366 	pci_iov_init(dev);
1367 
1368 	/* Enable ACS P2P upstream forwarding */
1369 	pci_enable_acs(dev);
1370 }
1371 
1372 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1373 {
1374 	int ret;
1375 
1376 	device_initialize(&dev->dev);
1377 	dev->dev.release = pci_release_dev;
1378 
1379 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1380 	dev->dev.dma_mask = &dev->dma_mask;
1381 	dev->dev.dma_parms = &dev->dma_parms;
1382 	dev->dev.coherent_dma_mask = 0xffffffffull;
1383 
1384 	pci_set_dma_max_seg_size(dev, 65536);
1385 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1386 
1387 	/* Fix up broken headers */
1388 	pci_fixup_device(pci_fixup_header, dev);
1389 
1390 	/* moved out from quirk header fixup code */
1391 	pci_reassigndev_resource_alignment(dev);
1392 
1393 	/* Clear the state_saved flag. */
1394 	dev->state_saved = false;
1395 
1396 	/* Initialize various capabilities */
1397 	pci_init_capabilities(dev);
1398 
1399 	/*
1400 	 * Add the device to our list of discovered devices
1401 	 * and the bus list for fixup functions, etc.
1402 	 */
1403 	down_write(&pci_bus_sem);
1404 	list_add_tail(&dev->bus_list, &bus->devices);
1405 	up_write(&pci_bus_sem);
1406 
1407 	ret = pcibios_add_device(dev);
1408 	WARN_ON(ret < 0);
1409 
1410 	/* Notifier could use PCI capabilities */
1411 	dev->match_driver = false;
1412 	ret = device_add(&dev->dev);
1413 	WARN_ON(ret < 0);
1414 }
1415 
1416 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1417 {
1418 	struct pci_dev *dev;
1419 
1420 	dev = pci_get_slot(bus, devfn);
1421 	if (dev) {
1422 		pci_dev_put(dev);
1423 		return dev;
1424 	}
1425 
1426 	dev = pci_scan_device(bus, devfn);
1427 	if (!dev)
1428 		return NULL;
1429 
1430 	pci_device_add(dev, bus);
1431 
1432 	return dev;
1433 }
1434 EXPORT_SYMBOL(pci_scan_single_device);
1435 
1436 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1437 {
1438 	int pos;
1439 	u16 cap = 0;
1440 	unsigned next_fn;
1441 
1442 	if (pci_ari_enabled(bus)) {
1443 		if (!dev)
1444 			return 0;
1445 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1446 		if (!pos)
1447 			return 0;
1448 
1449 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1450 		next_fn = PCI_ARI_CAP_NFN(cap);
1451 		if (next_fn <= fn)
1452 			return 0;	/* protect against malformed list */
1453 
1454 		return next_fn;
1455 	}
1456 
1457 	/* dev may be NULL for non-contiguous multifunction devices */
1458 	if (!dev || dev->multifunction)
1459 		return (fn + 1) % 8;
1460 
1461 	return 0;
1462 }
1463 
1464 static int only_one_child(struct pci_bus *bus)
1465 {
1466 	struct pci_dev *parent = bus->self;
1467 
1468 	if (!parent || !pci_is_pcie(parent))
1469 		return 0;
1470 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1471 		return 1;
1472 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1473 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1474 		return 1;
1475 	return 0;
1476 }
1477 
1478 /**
1479  * pci_scan_slot - scan a PCI slot on a bus for devices.
1480  * @bus: PCI bus to scan
1481  * @devfn: slot number to scan (must have zero function.)
1482  *
1483  * Scan a PCI slot on the specified PCI bus for devices, adding
1484  * discovered devices to the @bus->devices list.  New devices
1485  * will not have is_added set.
1486  *
1487  * Returns the number of new devices found.
1488  */
1489 int pci_scan_slot(struct pci_bus *bus, int devfn)
1490 {
1491 	unsigned fn, nr = 0;
1492 	struct pci_dev *dev;
1493 
1494 	if (only_one_child(bus) && (devfn > 0))
1495 		return 0; /* Already scanned the entire slot */
1496 
1497 	dev = pci_scan_single_device(bus, devfn);
1498 	if (!dev)
1499 		return 0;
1500 	if (!dev->is_added)
1501 		nr++;
1502 
1503 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1504 		dev = pci_scan_single_device(bus, devfn + fn);
1505 		if (dev) {
1506 			if (!dev->is_added)
1507 				nr++;
1508 			dev->multifunction = 1;
1509 		}
1510 	}
1511 
1512 	/* only one slot has pcie device */
1513 	if (bus->self && nr)
1514 		pcie_aspm_init_link_state(bus->self);
1515 
1516 	return nr;
1517 }
1518 EXPORT_SYMBOL(pci_scan_slot);
1519 
1520 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1521 {
1522 	u8 *smpss = data;
1523 
1524 	if (!pci_is_pcie(dev))
1525 		return 0;
1526 
1527 	/*
1528 	 * We don't have a way to change MPS settings on devices that have
1529 	 * drivers attached.  A hot-added device might support only the minimum
1530 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
1531 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
1532 	 * hot-added devices will work correctly.
1533 	 *
1534 	 * However, if we hot-add a device to a slot directly below a Root
1535 	 * Port, it's impossible for there to be other existing devices below
1536 	 * the port.  We don't limit the MPS in this case because we can
1537 	 * reconfigure MPS on both the Root Port and the hot-added device,
1538 	 * and there are no other devices involved.
1539 	 *
1540 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1541 	 */
1542 	if (dev->is_hotplug_bridge &&
1543 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1544 		*smpss = 0;
1545 
1546 	if (*smpss > dev->pcie_mpss)
1547 		*smpss = dev->pcie_mpss;
1548 
1549 	return 0;
1550 }
1551 
1552 static void pcie_write_mps(struct pci_dev *dev, int mps)
1553 {
1554 	int rc;
1555 
1556 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1557 		mps = 128 << dev->pcie_mpss;
1558 
1559 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1560 		    dev->bus->self)
1561 			/* For "Performance", the assumption is made that
1562 			 * downstream communication will never be larger than
1563 			 * the MRRS.  So, the MPS only needs to be configured
1564 			 * for the upstream communication.  This being the case,
1565 			 * walk from the top down and set the MPS of the child
1566 			 * to that of the parent bus.
1567 			 *
1568 			 * Configure the device MPS with the smaller of the
1569 			 * device MPSS or the bridge MPS (which is assumed to be
1570 			 * properly configured at this point to the largest
1571 			 * allowable MPS based on its parent bus).
1572 			 */
1573 			mps = min(mps, pcie_get_mps(dev->bus->self));
1574 	}
1575 
1576 	rc = pcie_set_mps(dev, mps);
1577 	if (rc)
1578 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1579 }
1580 
1581 static void pcie_write_mrrs(struct pci_dev *dev)
1582 {
1583 	int rc, mrrs;
1584 
1585 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1586 	 * issues with setting MRRS to 0 on a number of devices.
1587 	 */
1588 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1589 		return;
1590 
1591 	/* For Max performance, the MRRS must be set to the largest supported
1592 	 * value.  However, it cannot be configured larger than the MPS the
1593 	 * device or the bus can support.  This should already be properly
1594 	 * configured by a prior call to pcie_write_mps.
1595 	 */
1596 	mrrs = pcie_get_mps(dev);
1597 
1598 	/* MRRS is a R/W register.  Invalid values can be written, but a
1599 	 * subsequent read will verify if the value is acceptable or not.
1600 	 * If the MRRS value provided is not acceptable (e.g., too large),
1601 	 * shrink the value until it is acceptable to the HW.
1602 	 */
1603 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1604 		rc = pcie_set_readrq(dev, mrrs);
1605 		if (!rc)
1606 			break;
1607 
1608 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1609 		mrrs /= 2;
1610 	}
1611 
1612 	if (mrrs < 128)
1613 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
1614 }
1615 
1616 static void pcie_bus_detect_mps(struct pci_dev *dev)
1617 {
1618 	struct pci_dev *bridge = dev->bus->self;
1619 	int mps, p_mps;
1620 
1621 	if (!bridge)
1622 		return;
1623 
1624 	mps = pcie_get_mps(dev);
1625 	p_mps = pcie_get_mps(bridge);
1626 
1627 	if (mps != p_mps)
1628 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1629 			 mps, pci_name(bridge), p_mps);
1630 }
1631 
1632 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1633 {
1634 	int mps, orig_mps;
1635 
1636 	if (!pci_is_pcie(dev))
1637 		return 0;
1638 
1639 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1640 		pcie_bus_detect_mps(dev);
1641 		return 0;
1642 	}
1643 
1644 	mps = 128 << *(u8 *)data;
1645 	orig_mps = pcie_get_mps(dev);
1646 
1647 	pcie_write_mps(dev, mps);
1648 	pcie_write_mrrs(dev);
1649 
1650 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
1651 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
1652 		 orig_mps, pcie_get_readrq(dev));
1653 
1654 	return 0;
1655 }
1656 
1657 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1658  * parents then children fashion.  If this changes, then this code will not
1659  * work as designed.
1660  */
1661 void pcie_bus_configure_settings(struct pci_bus *bus)
1662 {
1663 	u8 smpss = 0;
1664 
1665 	if (!bus->self)
1666 		return;
1667 
1668 	if (!pci_is_pcie(bus->self))
1669 		return;
1670 
1671 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1672 	 * to be aware of the MPS of the destination.  To work around this,
1673 	 * simply force the MPS of the entire system to the smallest possible.
1674 	 */
1675 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1676 		smpss = 0;
1677 
1678 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1679 		smpss = bus->self->pcie_mpss;
1680 
1681 		pcie_find_smpss(bus->self, &smpss);
1682 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1683 	}
1684 
1685 	pcie_bus_configure_set(bus->self, &smpss);
1686 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1687 }
1688 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1689 
1690 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1691 {
1692 	unsigned int devfn, pass, max = bus->busn_res.start;
1693 	struct pci_dev *dev;
1694 
1695 	dev_dbg(&bus->dev, "scanning bus\n");
1696 
1697 	/* Go find them, Rover! */
1698 	for (devfn = 0; devfn < 0x100; devfn += 8)
1699 		pci_scan_slot(bus, devfn);
1700 
1701 	/* Reserve buses for SR-IOV capability. */
1702 	max += pci_iov_bus_range(bus);
1703 
1704 	/*
1705 	 * After performing arch-dependent fixup of the bus, look behind
1706 	 * all PCI-to-PCI bridges on this bus.
1707 	 */
1708 	if (!bus->is_added) {
1709 		dev_dbg(&bus->dev, "fixups for bus\n");
1710 		pcibios_fixup_bus(bus);
1711 		bus->is_added = 1;
1712 	}
1713 
1714 	for (pass = 0; pass < 2; pass++)
1715 		list_for_each_entry(dev, &bus->devices, bus_list) {
1716 			if (pci_is_bridge(dev))
1717 				max = pci_scan_bridge(bus, dev, max, pass);
1718 		}
1719 
1720 	/*
1721 	 * We've scanned the bus and so we know all about what's on
1722 	 * the other side of any bridges that may be on this bus plus
1723 	 * any devices.
1724 	 *
1725 	 * Return how far we've got finding sub-buses.
1726 	 */
1727 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1728 	return max;
1729 }
1730 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1731 
1732 /**
1733  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1734  * @bridge: Host bridge to set up.
1735  *
1736  * Default empty implementation.  Replace with an architecture-specific setup
1737  * routine, if necessary.
1738  */
1739 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1740 {
1741 	return 0;
1742 }
1743 
1744 void __weak pcibios_add_bus(struct pci_bus *bus)
1745 {
1746 }
1747 
1748 void __weak pcibios_remove_bus(struct pci_bus *bus)
1749 {
1750 }
1751 
1752 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1753 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1754 {
1755 	int error;
1756 	struct pci_host_bridge *bridge;
1757 	struct pci_bus *b, *b2;
1758 	struct pci_host_bridge_window *window, *n;
1759 	struct resource *res;
1760 	resource_size_t offset;
1761 	char bus_addr[64];
1762 	char *fmt;
1763 
1764 	b = pci_alloc_bus();
1765 	if (!b)
1766 		return NULL;
1767 
1768 	b->sysdata = sysdata;
1769 	b->ops = ops;
1770 	b->number = b->busn_res.start = bus;
1771 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1772 	if (b2) {
1773 		/* If we already got to this bus through a different bridge, ignore it */
1774 		dev_dbg(&b2->dev, "bus already known\n");
1775 		goto err_out;
1776 	}
1777 
1778 	bridge = pci_alloc_host_bridge(b);
1779 	if (!bridge)
1780 		goto err_out;
1781 
1782 	bridge->dev.parent = parent;
1783 	bridge->dev.release = pci_release_host_bridge_dev;
1784 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1785 	error = pcibios_root_bridge_prepare(bridge);
1786 	if (error) {
1787 		kfree(bridge);
1788 		goto err_out;
1789 	}
1790 
1791 	error = device_register(&bridge->dev);
1792 	if (error) {
1793 		put_device(&bridge->dev);
1794 		goto err_out;
1795 	}
1796 	b->bridge = get_device(&bridge->dev);
1797 	device_enable_async_suspend(b->bridge);
1798 	pci_set_bus_of_node(b);
1799 
1800 	if (!parent)
1801 		set_dev_node(b->bridge, pcibus_to_node(b));
1802 
1803 	b->dev.class = &pcibus_class;
1804 	b->dev.parent = b->bridge;
1805 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1806 	error = device_register(&b->dev);
1807 	if (error)
1808 		goto class_dev_reg_err;
1809 
1810 	pcibios_add_bus(b);
1811 
1812 	/* Create legacy_io and legacy_mem files for this bus */
1813 	pci_create_legacy_files(b);
1814 
1815 	if (parent)
1816 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1817 	else
1818 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1819 
1820 	/* Add initial resources to the bus */
1821 	list_for_each_entry_safe(window, n, resources, list) {
1822 		list_move_tail(&window->list, &bridge->windows);
1823 		res = window->res;
1824 		offset = window->offset;
1825 		if (res->flags & IORESOURCE_BUS)
1826 			pci_bus_insert_busn_res(b, bus, res->end);
1827 		else
1828 			pci_bus_add_resource(b, res, 0);
1829 		if (offset) {
1830 			if (resource_type(res) == IORESOURCE_IO)
1831 				fmt = " (bus address [%#06llx-%#06llx])";
1832 			else
1833 				fmt = " (bus address [%#010llx-%#010llx])";
1834 			snprintf(bus_addr, sizeof(bus_addr), fmt,
1835 				 (unsigned long long) (res->start - offset),
1836 				 (unsigned long long) (res->end - offset));
1837 		} else
1838 			bus_addr[0] = '\0';
1839 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1840 	}
1841 
1842 	down_write(&pci_bus_sem);
1843 	list_add_tail(&b->node, &pci_root_buses);
1844 	up_write(&pci_bus_sem);
1845 
1846 	return b;
1847 
1848 class_dev_reg_err:
1849 	put_device(&bridge->dev);
1850 	device_unregister(&bridge->dev);
1851 err_out:
1852 	kfree(b);
1853 	return NULL;
1854 }
1855 
1856 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1857 {
1858 	struct resource *res = &b->busn_res;
1859 	struct resource *parent_res, *conflict;
1860 
1861 	res->start = bus;
1862 	res->end = bus_max;
1863 	res->flags = IORESOURCE_BUS;
1864 
1865 	if (!pci_is_root_bus(b))
1866 		parent_res = &b->parent->busn_res;
1867 	else {
1868 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1869 		res->flags |= IORESOURCE_PCI_FIXED;
1870 	}
1871 
1872 	conflict = request_resource_conflict(parent_res, res);
1873 
1874 	if (conflict)
1875 		dev_printk(KERN_DEBUG, &b->dev,
1876 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1877 			    res, pci_is_root_bus(b) ? "domain " : "",
1878 			    parent_res, conflict->name, conflict);
1879 
1880 	return conflict == NULL;
1881 }
1882 
1883 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1884 {
1885 	struct resource *res = &b->busn_res;
1886 	struct resource old_res = *res;
1887 	resource_size_t size;
1888 	int ret;
1889 
1890 	if (res->start > bus_max)
1891 		return -EINVAL;
1892 
1893 	size = bus_max - res->start + 1;
1894 	ret = adjust_resource(res, res->start, size);
1895 	dev_printk(KERN_DEBUG, &b->dev,
1896 			"busn_res: %pR end %s updated to %02x\n",
1897 			&old_res, ret ? "can not be" : "is", bus_max);
1898 
1899 	if (!ret && !res->parent)
1900 		pci_bus_insert_busn_res(b, res->start, res->end);
1901 
1902 	return ret;
1903 }
1904 
1905 void pci_bus_release_busn_res(struct pci_bus *b)
1906 {
1907 	struct resource *res = &b->busn_res;
1908 	int ret;
1909 
1910 	if (!res->flags || !res->parent)
1911 		return;
1912 
1913 	ret = release_resource(res);
1914 	dev_printk(KERN_DEBUG, &b->dev,
1915 			"busn_res: %pR %s released\n",
1916 			res, ret ? "can not be" : "is");
1917 }
1918 
1919 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1920 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1921 {
1922 	struct pci_host_bridge_window *window;
1923 	bool found = false;
1924 	struct pci_bus *b;
1925 	int max;
1926 
1927 	list_for_each_entry(window, resources, list)
1928 		if (window->res->flags & IORESOURCE_BUS) {
1929 			found = true;
1930 			break;
1931 		}
1932 
1933 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1934 	if (!b)
1935 		return NULL;
1936 
1937 	if (!found) {
1938 		dev_info(&b->dev,
1939 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1940 			bus);
1941 		pci_bus_insert_busn_res(b, bus, 255);
1942 	}
1943 
1944 	max = pci_scan_child_bus(b);
1945 
1946 	if (!found)
1947 		pci_bus_update_busn_res_end(b, max);
1948 
1949 	pci_bus_add_devices(b);
1950 	return b;
1951 }
1952 EXPORT_SYMBOL(pci_scan_root_bus);
1953 
1954 /* Deprecated; use pci_scan_root_bus() instead */
1955 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1956 		int bus, struct pci_ops *ops, void *sysdata)
1957 {
1958 	LIST_HEAD(resources);
1959 	struct pci_bus *b;
1960 
1961 	pci_add_resource(&resources, &ioport_resource);
1962 	pci_add_resource(&resources, &iomem_resource);
1963 	pci_add_resource(&resources, &busn_resource);
1964 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1965 	if (b)
1966 		pci_scan_child_bus(b);
1967 	else
1968 		pci_free_resource_list(&resources);
1969 	return b;
1970 }
1971 EXPORT_SYMBOL(pci_scan_bus_parented);
1972 
1973 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1974 					void *sysdata)
1975 {
1976 	LIST_HEAD(resources);
1977 	struct pci_bus *b;
1978 
1979 	pci_add_resource(&resources, &ioport_resource);
1980 	pci_add_resource(&resources, &iomem_resource);
1981 	pci_add_resource(&resources, &busn_resource);
1982 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1983 	if (b) {
1984 		pci_scan_child_bus(b);
1985 		pci_bus_add_devices(b);
1986 	} else {
1987 		pci_free_resource_list(&resources);
1988 	}
1989 	return b;
1990 }
1991 EXPORT_SYMBOL(pci_scan_bus);
1992 
1993 /**
1994  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1995  * @bridge: PCI bridge for the bus to scan
1996  *
1997  * Scan a PCI bus and child buses for new devices, add them,
1998  * and enable them, resizing bridge mmio/io resource if necessary
1999  * and possible.  The caller must ensure the child devices are already
2000  * removed for resizing to occur.
2001  *
2002  * Returns the max number of subordinate bus discovered.
2003  */
2004 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2005 {
2006 	unsigned int max;
2007 	struct pci_bus *bus = bridge->subordinate;
2008 
2009 	max = pci_scan_child_bus(bus);
2010 
2011 	pci_assign_unassigned_bridge_resources(bridge);
2012 
2013 	pci_bus_add_devices(bus);
2014 
2015 	return max;
2016 }
2017 
2018 /**
2019  * pci_rescan_bus - scan a PCI bus for devices.
2020  * @bus: PCI bus to scan
2021  *
2022  * Scan a PCI bus and child buses for new devices, adds them,
2023  * and enables them.
2024  *
2025  * Returns the max number of subordinate bus discovered.
2026  */
2027 unsigned int pci_rescan_bus(struct pci_bus *bus)
2028 {
2029 	unsigned int max;
2030 
2031 	max = pci_scan_child_bus(bus);
2032 	pci_assign_unassigned_bus_resources(bus);
2033 	pci_bus_add_devices(bus);
2034 
2035 	return max;
2036 }
2037 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2038 
2039 /*
2040  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2041  * routines should always be executed under this mutex.
2042  */
2043 static DEFINE_MUTEX(pci_rescan_remove_lock);
2044 
2045 void pci_lock_rescan_remove(void)
2046 {
2047 	mutex_lock(&pci_rescan_remove_lock);
2048 }
2049 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2050 
2051 void pci_unlock_rescan_remove(void)
2052 {
2053 	mutex_unlock(&pci_rescan_remove_lock);
2054 }
2055 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2056 
2057 static int __init pci_sort_bf_cmp(const struct device *d_a,
2058 				  const struct device *d_b)
2059 {
2060 	const struct pci_dev *a = to_pci_dev(d_a);
2061 	const struct pci_dev *b = to_pci_dev(d_b);
2062 
2063 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2064 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2065 
2066 	if      (a->bus->number < b->bus->number) return -1;
2067 	else if (a->bus->number > b->bus->number) return  1;
2068 
2069 	if      (a->devfn < b->devfn) return -1;
2070 	else if (a->devfn > b->devfn) return  1;
2071 
2072 	return 0;
2073 }
2074 
2075 void __init pci_sort_breadthfirst(void)
2076 {
2077 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2078 }
2079