xref: /openbmc/linux/drivers/pci/probe.c (revision 8b036556)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/pci_hotplug.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/cpumask.h>
13 #include <linux/pci-aspm.h>
14 #include <asm-generic/pci-bridge.h>
15 #include "pci.h"
16 
17 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
18 #define CARDBUS_RESERVE_BUSNR	3
19 
20 static struct resource busn_resource = {
21 	.name	= "PCI busn",
22 	.start	= 0,
23 	.end	= 255,
24 	.flags	= IORESOURCE_BUS,
25 };
26 
27 /* Ugh.  Need to stop exporting this to modules. */
28 LIST_HEAD(pci_root_buses);
29 EXPORT_SYMBOL(pci_root_buses);
30 
31 static LIST_HEAD(pci_domain_busn_res_list);
32 
33 struct pci_domain_busn_res {
34 	struct list_head list;
35 	struct resource res;
36 	int domain_nr;
37 };
38 
39 static struct resource *get_pci_domain_busn_res(int domain_nr)
40 {
41 	struct pci_domain_busn_res *r;
42 
43 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
44 		if (r->domain_nr == domain_nr)
45 			return &r->res;
46 
47 	r = kzalloc(sizeof(*r), GFP_KERNEL);
48 	if (!r)
49 		return NULL;
50 
51 	r->domain_nr = domain_nr;
52 	r->res.start = 0;
53 	r->res.end = 0xff;
54 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
55 
56 	list_add_tail(&r->list, &pci_domain_busn_res_list);
57 
58 	return &r->res;
59 }
60 
61 static int find_anything(struct device *dev, void *data)
62 {
63 	return 1;
64 }
65 
66 /*
67  * Some device drivers need know if pci is initiated.
68  * Basically, we think pci is not initiated when there
69  * is no device to be found on the pci_bus_type.
70  */
71 int no_pci_devices(void)
72 {
73 	struct device *dev;
74 	int no_devices;
75 
76 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
77 	no_devices = (dev == NULL);
78 	put_device(dev);
79 	return no_devices;
80 }
81 EXPORT_SYMBOL(no_pci_devices);
82 
83 /*
84  * PCI Bus Class
85  */
86 static void release_pcibus_dev(struct device *dev)
87 {
88 	struct pci_bus *pci_bus = to_pci_bus(dev);
89 
90 	put_device(pci_bus->bridge);
91 	pci_bus_remove_resources(pci_bus);
92 	pci_release_bus_of_node(pci_bus);
93 	kfree(pci_bus);
94 }
95 
96 static struct class pcibus_class = {
97 	.name		= "pci_bus",
98 	.dev_release	= &release_pcibus_dev,
99 	.dev_groups	= pcibus_groups,
100 };
101 
102 static int __init pcibus_class_init(void)
103 {
104 	return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107 
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 	u64 size = mask & maxbase;	/* Find the significant bits */
111 	if (!size)
112 		return 0;
113 
114 	/* Get the lowest of them to find the decode size, and
115 	   from that the extent.  */
116 	size = (size & ~(size-1)) - 1;
117 
118 	/* base == maxbase can be valid only if the BAR has
119 	   already been programmed with all 1s.  */
120 	if (base == maxbase && ((base | size) & mask) != mask)
121 		return 0;
122 
123 	return size;
124 }
125 
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 	u32 mem_type;
129 	unsigned long flags;
130 
131 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 		flags |= IORESOURCE_IO;
134 		return flags;
135 	}
136 
137 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 	flags |= IORESOURCE_MEM;
139 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 		flags |= IORESOURCE_PREFETCH;
141 
142 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 	switch (mem_type) {
144 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 		break;
146 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 		/* 1M mem BAR treated as 32-bit BAR */
148 		break;
149 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 		flags |= IORESOURCE_MEM_64;
151 		break;
152 	default:
153 		/* mem unknown type treated as 32-bit BAR */
154 		break;
155 	}
156 	return flags;
157 }
158 
159 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
160 
161 /**
162  * pci_read_base - read a PCI BAR
163  * @dev: the PCI device
164  * @type: type of the BAR
165  * @res: resource buffer to be filled in
166  * @pos: BAR position in the config space
167  *
168  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
169  */
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 		    struct resource *res, unsigned int pos)
172 {
173 	u32 l, sz, mask;
174 	u64 l64, sz64, mask64;
175 	u16 orig_cmd;
176 	struct pci_bus_region region, inverted_region;
177 
178 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
179 
180 	/* No printks while decoding is disabled! */
181 	if (!dev->mmio_always_on) {
182 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
183 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
184 			pci_write_config_word(dev, PCI_COMMAND,
185 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
186 		}
187 	}
188 
189 	res->name = pci_name(dev);
190 
191 	pci_read_config_dword(dev, pos, &l);
192 	pci_write_config_dword(dev, pos, l | mask);
193 	pci_read_config_dword(dev, pos, &sz);
194 	pci_write_config_dword(dev, pos, l);
195 
196 	/*
197 	 * All bits set in sz means the device isn't working properly.
198 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
199 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
200 	 * 1 must be clear.
201 	 */
202 	if (sz == 0xffffffff)
203 		sz = 0;
204 
205 	/*
206 	 * I don't know how l can have all bits set.  Copied from old code.
207 	 * Maybe it fixes a bug on some ancient platform.
208 	 */
209 	if (l == 0xffffffff)
210 		l = 0;
211 
212 	if (type == pci_bar_unknown) {
213 		res->flags = decode_bar(dev, l);
214 		res->flags |= IORESOURCE_SIZEALIGN;
215 		if (res->flags & IORESOURCE_IO) {
216 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
217 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
218 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
219 		} else {
220 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
221 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
222 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
223 		}
224 	} else {
225 		res->flags |= (l & IORESOURCE_ROM_ENABLE);
226 		l64 = l & PCI_ROM_ADDRESS_MASK;
227 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
228 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
229 	}
230 
231 	if (res->flags & IORESOURCE_MEM_64) {
232 		pci_read_config_dword(dev, pos + 4, &l);
233 		pci_write_config_dword(dev, pos + 4, ~0);
234 		pci_read_config_dword(dev, pos + 4, &sz);
235 		pci_write_config_dword(dev, pos + 4, l);
236 
237 		l64 |= ((u64)l << 32);
238 		sz64 |= ((u64)sz << 32);
239 		mask64 |= ((u64)~0 << 32);
240 	}
241 
242 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
243 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
244 
245 	if (!sz64)
246 		goto fail;
247 
248 	sz64 = pci_size(l64, sz64, mask64);
249 	if (!sz64) {
250 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
251 			 pos);
252 		goto fail;
253 	}
254 
255 	if (res->flags & IORESOURCE_MEM_64) {
256 		if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
257 		    sz64 > 0x100000000ULL) {
258 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
259 			res->start = 0;
260 			res->end = 0;
261 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
262 				pos, (unsigned long long)sz64);
263 			goto out;
264 		}
265 
266 		if ((sizeof(dma_addr_t) < 8) && l) {
267 			/* Above 32-bit boundary; try to reallocate */
268 			res->flags |= IORESOURCE_UNSET;
269 			res->start = 0;
270 			res->end = sz64;
271 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
272 				 pos, (unsigned long long)l64);
273 			goto out;
274 		}
275 	}
276 
277 	region.start = l64;
278 	region.end = l64 + sz64;
279 
280 	pcibios_bus_to_resource(dev->bus, res, &region);
281 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
282 
283 	/*
284 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
285 	 * the corresponding resource address (the physical address used by
286 	 * the CPU.  Converting that resource address back to a bus address
287 	 * should yield the original BAR value:
288 	 *
289 	 *     resource_to_bus(bus_to_resource(A)) == A
290 	 *
291 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
292 	 * be claimed by the device.
293 	 */
294 	if (inverted_region.start != region.start) {
295 		res->flags |= IORESOURCE_UNSET;
296 		res->start = 0;
297 		res->end = region.end - region.start;
298 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
299 			 pos, (unsigned long long)region.start);
300 	}
301 
302 	goto out;
303 
304 
305 fail:
306 	res->flags = 0;
307 out:
308 	if (res->flags)
309 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
310 
311 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
312 }
313 
314 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
315 {
316 	unsigned int pos, reg;
317 
318 	for (pos = 0; pos < howmany; pos++) {
319 		struct resource *res = &dev->resource[pos];
320 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
321 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
322 	}
323 
324 	if (rom) {
325 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
326 		dev->rom_base_reg = rom;
327 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
328 				IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
329 				IORESOURCE_SIZEALIGN;
330 		__pci_read_base(dev, pci_bar_mem32, res, rom);
331 	}
332 }
333 
334 static void pci_read_bridge_io(struct pci_bus *child)
335 {
336 	struct pci_dev *dev = child->self;
337 	u8 io_base_lo, io_limit_lo;
338 	unsigned long io_mask, io_granularity, base, limit;
339 	struct pci_bus_region region;
340 	struct resource *res;
341 
342 	io_mask = PCI_IO_RANGE_MASK;
343 	io_granularity = 0x1000;
344 	if (dev->io_window_1k) {
345 		/* Support 1K I/O space granularity */
346 		io_mask = PCI_IO_1K_RANGE_MASK;
347 		io_granularity = 0x400;
348 	}
349 
350 	res = child->resource[0];
351 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
352 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
353 	base = (io_base_lo & io_mask) << 8;
354 	limit = (io_limit_lo & io_mask) << 8;
355 
356 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
357 		u16 io_base_hi, io_limit_hi;
358 
359 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
360 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
361 		base |= ((unsigned long) io_base_hi << 16);
362 		limit |= ((unsigned long) io_limit_hi << 16);
363 	}
364 
365 	if (base <= limit) {
366 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
367 		region.start = base;
368 		region.end = limit + io_granularity - 1;
369 		pcibios_bus_to_resource(dev->bus, res, &region);
370 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
371 	}
372 }
373 
374 static void pci_read_bridge_mmio(struct pci_bus *child)
375 {
376 	struct pci_dev *dev = child->self;
377 	u16 mem_base_lo, mem_limit_lo;
378 	unsigned long base, limit;
379 	struct pci_bus_region region;
380 	struct resource *res;
381 
382 	res = child->resource[1];
383 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
384 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
385 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
386 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
387 	if (base <= limit) {
388 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
389 		region.start = base;
390 		region.end = limit + 0xfffff;
391 		pcibios_bus_to_resource(dev->bus, res, &region);
392 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
393 	}
394 }
395 
396 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
397 {
398 	struct pci_dev *dev = child->self;
399 	u16 mem_base_lo, mem_limit_lo;
400 	u64 base64, limit64;
401 	dma_addr_t base, limit;
402 	struct pci_bus_region region;
403 	struct resource *res;
404 
405 	res = child->resource[2];
406 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
407 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
408 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
409 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
410 
411 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
412 		u32 mem_base_hi, mem_limit_hi;
413 
414 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
415 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
416 
417 		/*
418 		 * Some bridges set the base > limit by default, and some
419 		 * (broken) BIOSes do not initialize them.  If we find
420 		 * this, just assume they are not being used.
421 		 */
422 		if (mem_base_hi <= mem_limit_hi) {
423 			base64 |= (u64) mem_base_hi << 32;
424 			limit64 |= (u64) mem_limit_hi << 32;
425 		}
426 	}
427 
428 	base = (dma_addr_t) base64;
429 	limit = (dma_addr_t) limit64;
430 
431 	if (base != base64) {
432 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
433 			(unsigned long long) base64);
434 		return;
435 	}
436 
437 	if (base <= limit) {
438 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
439 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
440 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
441 			res->flags |= IORESOURCE_MEM_64;
442 		region.start = base;
443 		region.end = limit + 0xfffff;
444 		pcibios_bus_to_resource(dev->bus, res, &region);
445 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
446 	}
447 }
448 
449 void pci_read_bridge_bases(struct pci_bus *child)
450 {
451 	struct pci_dev *dev = child->self;
452 	struct resource *res;
453 	int i;
454 
455 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
456 		return;
457 
458 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
459 		 &child->busn_res,
460 		 dev->transparent ? " (subtractive decode)" : "");
461 
462 	pci_bus_remove_resources(child);
463 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
464 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
465 
466 	pci_read_bridge_io(child);
467 	pci_read_bridge_mmio(child);
468 	pci_read_bridge_mmio_pref(child);
469 
470 	if (dev->transparent) {
471 		pci_bus_for_each_resource(child->parent, res, i) {
472 			if (res && res->flags) {
473 				pci_bus_add_resource(child, res,
474 						     PCI_SUBTRACTIVE_DECODE);
475 				dev_printk(KERN_DEBUG, &dev->dev,
476 					   "  bridge window %pR (subtractive decode)\n",
477 					   res);
478 			}
479 		}
480 	}
481 }
482 
483 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
484 {
485 	struct pci_bus *b;
486 
487 	b = kzalloc(sizeof(*b), GFP_KERNEL);
488 	if (!b)
489 		return NULL;
490 
491 	INIT_LIST_HEAD(&b->node);
492 	INIT_LIST_HEAD(&b->children);
493 	INIT_LIST_HEAD(&b->devices);
494 	INIT_LIST_HEAD(&b->slots);
495 	INIT_LIST_HEAD(&b->resources);
496 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
497 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
498 #ifdef CONFIG_PCI_DOMAINS_GENERIC
499 	if (parent)
500 		b->domain_nr = parent->domain_nr;
501 #endif
502 	return b;
503 }
504 
505 static void pci_release_host_bridge_dev(struct device *dev)
506 {
507 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
508 
509 	if (bridge->release_fn)
510 		bridge->release_fn(bridge);
511 
512 	pci_free_resource_list(&bridge->windows);
513 
514 	kfree(bridge);
515 }
516 
517 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
518 {
519 	struct pci_host_bridge *bridge;
520 
521 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
522 	if (!bridge)
523 		return NULL;
524 
525 	INIT_LIST_HEAD(&bridge->windows);
526 	bridge->bus = b;
527 	return bridge;
528 }
529 
530 static const unsigned char pcix_bus_speed[] = {
531 	PCI_SPEED_UNKNOWN,		/* 0 */
532 	PCI_SPEED_66MHz_PCIX,		/* 1 */
533 	PCI_SPEED_100MHz_PCIX,		/* 2 */
534 	PCI_SPEED_133MHz_PCIX,		/* 3 */
535 	PCI_SPEED_UNKNOWN,		/* 4 */
536 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
537 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
538 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
539 	PCI_SPEED_UNKNOWN,		/* 8 */
540 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
541 	PCI_SPEED_100MHz_PCIX_266,	/* A */
542 	PCI_SPEED_133MHz_PCIX_266,	/* B */
543 	PCI_SPEED_UNKNOWN,		/* C */
544 	PCI_SPEED_66MHz_PCIX_533,	/* D */
545 	PCI_SPEED_100MHz_PCIX_533,	/* E */
546 	PCI_SPEED_133MHz_PCIX_533	/* F */
547 };
548 
549 const unsigned char pcie_link_speed[] = {
550 	PCI_SPEED_UNKNOWN,		/* 0 */
551 	PCIE_SPEED_2_5GT,		/* 1 */
552 	PCIE_SPEED_5_0GT,		/* 2 */
553 	PCIE_SPEED_8_0GT,		/* 3 */
554 	PCI_SPEED_UNKNOWN,		/* 4 */
555 	PCI_SPEED_UNKNOWN,		/* 5 */
556 	PCI_SPEED_UNKNOWN,		/* 6 */
557 	PCI_SPEED_UNKNOWN,		/* 7 */
558 	PCI_SPEED_UNKNOWN,		/* 8 */
559 	PCI_SPEED_UNKNOWN,		/* 9 */
560 	PCI_SPEED_UNKNOWN,		/* A */
561 	PCI_SPEED_UNKNOWN,		/* B */
562 	PCI_SPEED_UNKNOWN,		/* C */
563 	PCI_SPEED_UNKNOWN,		/* D */
564 	PCI_SPEED_UNKNOWN,		/* E */
565 	PCI_SPEED_UNKNOWN		/* F */
566 };
567 
568 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
569 {
570 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
571 }
572 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
573 
574 static unsigned char agp_speeds[] = {
575 	AGP_UNKNOWN,
576 	AGP_1X,
577 	AGP_2X,
578 	AGP_4X,
579 	AGP_8X
580 };
581 
582 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
583 {
584 	int index = 0;
585 
586 	if (agpstat & 4)
587 		index = 3;
588 	else if (agpstat & 2)
589 		index = 2;
590 	else if (agpstat & 1)
591 		index = 1;
592 	else
593 		goto out;
594 
595 	if (agp3) {
596 		index += 2;
597 		if (index == 5)
598 			index = 0;
599 	}
600 
601  out:
602 	return agp_speeds[index];
603 }
604 
605 static void pci_set_bus_speed(struct pci_bus *bus)
606 {
607 	struct pci_dev *bridge = bus->self;
608 	int pos;
609 
610 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
611 	if (!pos)
612 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
613 	if (pos) {
614 		u32 agpstat, agpcmd;
615 
616 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
617 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
618 
619 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
620 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
621 	}
622 
623 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
624 	if (pos) {
625 		u16 status;
626 		enum pci_bus_speed max;
627 
628 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
629 				     &status);
630 
631 		if (status & PCI_X_SSTATUS_533MHZ) {
632 			max = PCI_SPEED_133MHz_PCIX_533;
633 		} else if (status & PCI_X_SSTATUS_266MHZ) {
634 			max = PCI_SPEED_133MHz_PCIX_266;
635 		} else if (status & PCI_X_SSTATUS_133MHZ) {
636 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
637 				max = PCI_SPEED_133MHz_PCIX_ECC;
638 			else
639 				max = PCI_SPEED_133MHz_PCIX;
640 		} else {
641 			max = PCI_SPEED_66MHz_PCIX;
642 		}
643 
644 		bus->max_bus_speed = max;
645 		bus->cur_bus_speed = pcix_bus_speed[
646 			(status & PCI_X_SSTATUS_FREQ) >> 6];
647 
648 		return;
649 	}
650 
651 	if (pci_is_pcie(bridge)) {
652 		u32 linkcap;
653 		u16 linksta;
654 
655 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
656 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
657 
658 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
659 		pcie_update_link_speed(bus, linksta);
660 	}
661 }
662 
663 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
664 					   struct pci_dev *bridge, int busnr)
665 {
666 	struct pci_bus *child;
667 	int i;
668 	int ret;
669 
670 	/*
671 	 * Allocate a new bus, and inherit stuff from the parent..
672 	 */
673 	child = pci_alloc_bus(parent);
674 	if (!child)
675 		return NULL;
676 
677 	child->parent = parent;
678 	child->ops = parent->ops;
679 	child->msi = parent->msi;
680 	child->sysdata = parent->sysdata;
681 	child->bus_flags = parent->bus_flags;
682 
683 	/* initialize some portions of the bus device, but don't register it
684 	 * now as the parent is not properly set up yet.
685 	 */
686 	child->dev.class = &pcibus_class;
687 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
688 
689 	/*
690 	 * Set up the primary, secondary and subordinate
691 	 * bus numbers.
692 	 */
693 	child->number = child->busn_res.start = busnr;
694 	child->primary = parent->busn_res.start;
695 	child->busn_res.end = 0xff;
696 
697 	if (!bridge) {
698 		child->dev.parent = parent->bridge;
699 		goto add_dev;
700 	}
701 
702 	child->self = bridge;
703 	child->bridge = get_device(&bridge->dev);
704 	child->dev.parent = child->bridge;
705 	pci_set_bus_of_node(child);
706 	pci_set_bus_speed(child);
707 
708 	/* Set up default resource pointers and names.. */
709 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
710 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
711 		child->resource[i]->name = child->name;
712 	}
713 	bridge->subordinate = child;
714 
715 add_dev:
716 	ret = device_register(&child->dev);
717 	WARN_ON(ret < 0);
718 
719 	pcibios_add_bus(child);
720 
721 	/* Create legacy_io and legacy_mem files for this bus */
722 	pci_create_legacy_files(child);
723 
724 	return child;
725 }
726 
727 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
728 				int busnr)
729 {
730 	struct pci_bus *child;
731 
732 	child = pci_alloc_child_bus(parent, dev, busnr);
733 	if (child) {
734 		down_write(&pci_bus_sem);
735 		list_add_tail(&child->node, &parent->children);
736 		up_write(&pci_bus_sem);
737 	}
738 	return child;
739 }
740 EXPORT_SYMBOL(pci_add_new_bus);
741 
742 static void pci_enable_crs(struct pci_dev *pdev)
743 {
744 	u16 root_cap = 0;
745 
746 	/* Enable CRS Software Visibility if supported */
747 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
748 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
749 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
750 					 PCI_EXP_RTCTL_CRSSVE);
751 }
752 
753 /*
754  * If it's a bridge, configure it and scan the bus behind it.
755  * For CardBus bridges, we don't scan behind as the devices will
756  * be handled by the bridge driver itself.
757  *
758  * We need to process bridges in two passes -- first we scan those
759  * already configured by the BIOS and after we are done with all of
760  * them, we proceed to assigning numbers to the remaining buses in
761  * order to avoid overlaps between old and new bus numbers.
762  */
763 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
764 {
765 	struct pci_bus *child;
766 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
767 	u32 buses, i, j = 0;
768 	u16 bctl;
769 	u8 primary, secondary, subordinate;
770 	int broken = 0;
771 
772 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
773 	primary = buses & 0xFF;
774 	secondary = (buses >> 8) & 0xFF;
775 	subordinate = (buses >> 16) & 0xFF;
776 
777 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
778 		secondary, subordinate, pass);
779 
780 	if (!primary && (primary != bus->number) && secondary && subordinate) {
781 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
782 		primary = bus->number;
783 	}
784 
785 	/* Check if setup is sensible at all */
786 	if (!pass &&
787 	    (primary != bus->number || secondary <= bus->number ||
788 	     secondary > subordinate)) {
789 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
790 			 secondary, subordinate);
791 		broken = 1;
792 	}
793 
794 	/* Disable MasterAbortMode during probing to avoid reporting
795 	   of bus errors (in some architectures) */
796 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
797 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
798 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
799 
800 	pci_enable_crs(dev);
801 
802 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
803 	    !is_cardbus && !broken) {
804 		unsigned int cmax;
805 		/*
806 		 * Bus already configured by firmware, process it in the first
807 		 * pass and just note the configuration.
808 		 */
809 		if (pass)
810 			goto out;
811 
812 		/*
813 		 * The bus might already exist for two reasons: Either we are
814 		 * rescanning the bus or the bus is reachable through more than
815 		 * one bridge. The second case can happen with the i450NX
816 		 * chipset.
817 		 */
818 		child = pci_find_bus(pci_domain_nr(bus), secondary);
819 		if (!child) {
820 			child = pci_add_new_bus(bus, dev, secondary);
821 			if (!child)
822 				goto out;
823 			child->primary = primary;
824 			pci_bus_insert_busn_res(child, secondary, subordinate);
825 			child->bridge_ctl = bctl;
826 		}
827 
828 		cmax = pci_scan_child_bus(child);
829 		if (cmax > subordinate)
830 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
831 				 subordinate, cmax);
832 		/* subordinate should equal child->busn_res.end */
833 		if (subordinate > max)
834 			max = subordinate;
835 	} else {
836 		/*
837 		 * We need to assign a number to this bus which we always
838 		 * do in the second pass.
839 		 */
840 		if (!pass) {
841 			if (pcibios_assign_all_busses() || broken || is_cardbus)
842 				/* Temporarily disable forwarding of the
843 				   configuration cycles on all bridges in
844 				   this bus segment to avoid possible
845 				   conflicts in the second pass between two
846 				   bridges programmed with overlapping
847 				   bus ranges. */
848 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
849 						       buses & ~0xffffff);
850 			goto out;
851 		}
852 
853 		/* Clear errors */
854 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
855 
856 		/* Prevent assigning a bus number that already exists.
857 		 * This can happen when a bridge is hot-plugged, so in
858 		 * this case we only re-scan this bus. */
859 		child = pci_find_bus(pci_domain_nr(bus), max+1);
860 		if (!child) {
861 			child = pci_add_new_bus(bus, dev, max+1);
862 			if (!child)
863 				goto out;
864 			pci_bus_insert_busn_res(child, max+1, 0xff);
865 		}
866 		max++;
867 		buses = (buses & 0xff000000)
868 		      | ((unsigned int)(child->primary)     <<  0)
869 		      | ((unsigned int)(child->busn_res.start)   <<  8)
870 		      | ((unsigned int)(child->busn_res.end) << 16);
871 
872 		/*
873 		 * yenta.c forces a secondary latency timer of 176.
874 		 * Copy that behaviour here.
875 		 */
876 		if (is_cardbus) {
877 			buses &= ~0xff000000;
878 			buses |= CARDBUS_LATENCY_TIMER << 24;
879 		}
880 
881 		/*
882 		 * We need to blast all three values with a single write.
883 		 */
884 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
885 
886 		if (!is_cardbus) {
887 			child->bridge_ctl = bctl;
888 			max = pci_scan_child_bus(child);
889 		} else {
890 			/*
891 			 * For CardBus bridges, we leave 4 bus numbers
892 			 * as cards with a PCI-to-PCI bridge can be
893 			 * inserted later.
894 			 */
895 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
896 				struct pci_bus *parent = bus;
897 				if (pci_find_bus(pci_domain_nr(bus),
898 							max+i+1))
899 					break;
900 				while (parent->parent) {
901 					if ((!pcibios_assign_all_busses()) &&
902 					    (parent->busn_res.end > max) &&
903 					    (parent->busn_res.end <= max+i)) {
904 						j = 1;
905 					}
906 					parent = parent->parent;
907 				}
908 				if (j) {
909 					/*
910 					 * Often, there are two cardbus bridges
911 					 * -- try to leave one valid bus number
912 					 * for each one.
913 					 */
914 					i /= 2;
915 					break;
916 				}
917 			}
918 			max += i;
919 		}
920 		/*
921 		 * Set the subordinate bus number to its real value.
922 		 */
923 		pci_bus_update_busn_res_end(child, max);
924 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
925 	}
926 
927 	sprintf(child->name,
928 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
929 		pci_domain_nr(bus), child->number);
930 
931 	/* Has only triggered on CardBus, fixup is in yenta_socket */
932 	while (bus->parent) {
933 		if ((child->busn_res.end > bus->busn_res.end) ||
934 		    (child->number > bus->busn_res.end) ||
935 		    (child->number < bus->number) ||
936 		    (child->busn_res.end < bus->number)) {
937 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
938 				&child->busn_res,
939 				(bus->number > child->busn_res.end &&
940 				 bus->busn_res.end < child->number) ?
941 					"wholly" : "partially",
942 				bus->self->transparent ? " transparent" : "",
943 				dev_name(&bus->dev),
944 				&bus->busn_res);
945 		}
946 		bus = bus->parent;
947 	}
948 
949 out:
950 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
951 
952 	return max;
953 }
954 EXPORT_SYMBOL(pci_scan_bridge);
955 
956 /*
957  * Read interrupt line and base address registers.
958  * The architecture-dependent code can tweak these, of course.
959  */
960 static void pci_read_irq(struct pci_dev *dev)
961 {
962 	unsigned char irq;
963 
964 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
965 	dev->pin = irq;
966 	if (irq)
967 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
968 	dev->irq = irq;
969 }
970 
971 void set_pcie_port_type(struct pci_dev *pdev)
972 {
973 	int pos;
974 	u16 reg16;
975 
976 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
977 	if (!pos)
978 		return;
979 	pdev->pcie_cap = pos;
980 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
981 	pdev->pcie_flags_reg = reg16;
982 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
983 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
984 }
985 
986 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
987 {
988 	u32 reg32;
989 
990 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
991 	if (reg32 & PCI_EXP_SLTCAP_HPC)
992 		pdev->is_hotplug_bridge = 1;
993 }
994 
995 /**
996  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
997  * @dev: PCI device
998  *
999  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1000  * when forwarding a type1 configuration request the bridge must check that
1001  * the extended register address field is zero.  The bridge is not permitted
1002  * to forward the transactions and must handle it as an Unsupported Request.
1003  * Some bridges do not follow this rule and simply drop the extended register
1004  * bits, resulting in the standard config space being aliased, every 256
1005  * bytes across the entire configuration space.  Test for this condition by
1006  * comparing the first dword of each potential alias to the vendor/device ID.
1007  * Known offenders:
1008  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1009  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1010  */
1011 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1012 {
1013 #ifdef CONFIG_PCI_QUIRKS
1014 	int pos;
1015 	u32 header, tmp;
1016 
1017 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1018 
1019 	for (pos = PCI_CFG_SPACE_SIZE;
1020 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1021 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1022 		    || header != tmp)
1023 			return false;
1024 	}
1025 
1026 	return true;
1027 #else
1028 	return false;
1029 #endif
1030 }
1031 
1032 /**
1033  * pci_cfg_space_size - get the configuration space size of the PCI device.
1034  * @dev: PCI device
1035  *
1036  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1037  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1038  * access it.  Maybe we don't have a way to generate extended config space
1039  * accesses, or the device is behind a reverse Express bridge.  So we try
1040  * reading the dword at 0x100 which must either be 0 or a valid extended
1041  * capability header.
1042  */
1043 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1044 {
1045 	u32 status;
1046 	int pos = PCI_CFG_SPACE_SIZE;
1047 
1048 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1049 		goto fail;
1050 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1051 		goto fail;
1052 
1053 	return PCI_CFG_SPACE_EXP_SIZE;
1054 
1055  fail:
1056 	return PCI_CFG_SPACE_SIZE;
1057 }
1058 
1059 int pci_cfg_space_size(struct pci_dev *dev)
1060 {
1061 	int pos;
1062 	u32 status;
1063 	u16 class;
1064 
1065 	class = dev->class >> 8;
1066 	if (class == PCI_CLASS_BRIDGE_HOST)
1067 		return pci_cfg_space_size_ext(dev);
1068 
1069 	if (!pci_is_pcie(dev)) {
1070 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1071 		if (!pos)
1072 			goto fail;
1073 
1074 		pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1075 		if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1076 			goto fail;
1077 	}
1078 
1079 	return pci_cfg_space_size_ext(dev);
1080 
1081  fail:
1082 	return PCI_CFG_SPACE_SIZE;
1083 }
1084 
1085 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1086 
1087 /**
1088  * pci_setup_device - fill in class and map information of a device
1089  * @dev: the device structure to fill
1090  *
1091  * Initialize the device structure with information about the device's
1092  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1093  * Called at initialisation of the PCI subsystem and by CardBus services.
1094  * Returns 0 on success and negative if unknown type of device (not normal,
1095  * bridge or CardBus).
1096  */
1097 int pci_setup_device(struct pci_dev *dev)
1098 {
1099 	u32 class;
1100 	u8 hdr_type;
1101 	struct pci_slot *slot;
1102 	int pos = 0;
1103 	struct pci_bus_region region;
1104 	struct resource *res;
1105 
1106 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1107 		return -EIO;
1108 
1109 	dev->sysdata = dev->bus->sysdata;
1110 	dev->dev.parent = dev->bus->bridge;
1111 	dev->dev.bus = &pci_bus_type;
1112 	dev->hdr_type = hdr_type & 0x7f;
1113 	dev->multifunction = !!(hdr_type & 0x80);
1114 	dev->error_state = pci_channel_io_normal;
1115 	set_pcie_port_type(dev);
1116 
1117 	list_for_each_entry(slot, &dev->bus->slots, list)
1118 		if (PCI_SLOT(dev->devfn) == slot->number)
1119 			dev->slot = slot;
1120 
1121 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1122 	   set this higher, assuming the system even supports it.  */
1123 	dev->dma_mask = 0xffffffff;
1124 
1125 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1126 		     dev->bus->number, PCI_SLOT(dev->devfn),
1127 		     PCI_FUNC(dev->devfn));
1128 
1129 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1130 	dev->revision = class & 0xff;
1131 	dev->class = class >> 8;		    /* upper 3 bytes */
1132 
1133 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1134 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1135 
1136 	/* need to have dev->class ready */
1137 	dev->cfg_size = pci_cfg_space_size(dev);
1138 
1139 	/* "Unknown power state" */
1140 	dev->current_state = PCI_UNKNOWN;
1141 
1142 	/* Early fixups, before probing the BARs */
1143 	pci_fixup_device(pci_fixup_early, dev);
1144 	/* device class may be changed after fixup */
1145 	class = dev->class >> 8;
1146 
1147 	switch (dev->hdr_type) {		    /* header type */
1148 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1149 		if (class == PCI_CLASS_BRIDGE_PCI)
1150 			goto bad;
1151 		pci_read_irq(dev);
1152 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1153 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1154 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1155 
1156 		/*
1157 		 * Do the ugly legacy mode stuff here rather than broken chip
1158 		 * quirk code. Legacy mode ATA controllers have fixed
1159 		 * addresses. These are not always echoed in BAR0-3, and
1160 		 * BAR0-3 in a few cases contain junk!
1161 		 */
1162 		if (class == PCI_CLASS_STORAGE_IDE) {
1163 			u8 progif;
1164 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1165 			if ((progif & 1) == 0) {
1166 				region.start = 0x1F0;
1167 				region.end = 0x1F7;
1168 				res = &dev->resource[0];
1169 				res->flags = LEGACY_IO_RESOURCE;
1170 				pcibios_bus_to_resource(dev->bus, res, &region);
1171 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1172 					 res);
1173 				region.start = 0x3F6;
1174 				region.end = 0x3F6;
1175 				res = &dev->resource[1];
1176 				res->flags = LEGACY_IO_RESOURCE;
1177 				pcibios_bus_to_resource(dev->bus, res, &region);
1178 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1179 					 res);
1180 			}
1181 			if ((progif & 4) == 0) {
1182 				region.start = 0x170;
1183 				region.end = 0x177;
1184 				res = &dev->resource[2];
1185 				res->flags = LEGACY_IO_RESOURCE;
1186 				pcibios_bus_to_resource(dev->bus, res, &region);
1187 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1188 					 res);
1189 				region.start = 0x376;
1190 				region.end = 0x376;
1191 				res = &dev->resource[3];
1192 				res->flags = LEGACY_IO_RESOURCE;
1193 				pcibios_bus_to_resource(dev->bus, res, &region);
1194 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1195 					 res);
1196 			}
1197 		}
1198 		break;
1199 
1200 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1201 		if (class != PCI_CLASS_BRIDGE_PCI)
1202 			goto bad;
1203 		/* The PCI-to-PCI bridge spec requires that subtractive
1204 		   decoding (i.e. transparent) bridge must have programming
1205 		   interface code of 0x01. */
1206 		pci_read_irq(dev);
1207 		dev->transparent = ((dev->class & 0xff) == 1);
1208 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1209 		set_pcie_hotplug_bridge(dev);
1210 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1211 		if (pos) {
1212 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1213 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1214 		}
1215 		break;
1216 
1217 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1218 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1219 			goto bad;
1220 		pci_read_irq(dev);
1221 		pci_read_bases(dev, 1, 0);
1222 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1223 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1224 		break;
1225 
1226 	default:				    /* unknown header */
1227 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1228 			dev->hdr_type);
1229 		return -EIO;
1230 
1231 	bad:
1232 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1233 			dev->class, dev->hdr_type);
1234 		dev->class = PCI_CLASS_NOT_DEFINED;
1235 	}
1236 
1237 	/* We found a fine healthy device, go go go... */
1238 	return 0;
1239 }
1240 
1241 static struct hpp_type0 pci_default_type0 = {
1242 	.revision = 1,
1243 	.cache_line_size = 8,
1244 	.latency_timer = 0x40,
1245 	.enable_serr = 0,
1246 	.enable_perr = 0,
1247 };
1248 
1249 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1250 {
1251 	u16 pci_cmd, pci_bctl;
1252 
1253 	if (!hpp)
1254 		hpp = &pci_default_type0;
1255 
1256 	if (hpp->revision > 1) {
1257 		dev_warn(&dev->dev,
1258 			 "PCI settings rev %d not supported; using defaults\n",
1259 			 hpp->revision);
1260 		hpp = &pci_default_type0;
1261 	}
1262 
1263 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1264 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1265 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1266 	if (hpp->enable_serr)
1267 		pci_cmd |= PCI_COMMAND_SERR;
1268 	if (hpp->enable_perr)
1269 		pci_cmd |= PCI_COMMAND_PARITY;
1270 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1271 
1272 	/* Program bridge control value */
1273 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1274 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1275 				      hpp->latency_timer);
1276 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1277 		if (hpp->enable_serr)
1278 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1279 		if (hpp->enable_perr)
1280 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1281 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1282 	}
1283 }
1284 
1285 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1286 {
1287 	if (hpp)
1288 		dev_warn(&dev->dev, "PCI-X settings not supported\n");
1289 }
1290 
1291 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1292 {
1293 	int pos;
1294 	u32 reg32;
1295 
1296 	if (!hpp)
1297 		return;
1298 
1299 	if (hpp->revision > 1) {
1300 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1301 			 hpp->revision);
1302 		return;
1303 	}
1304 
1305 	/*
1306 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1307 	 * those to make sure they're consistent with the rest of the
1308 	 * platform.
1309 	 */
1310 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1311 				    PCI_EXP_DEVCTL_READRQ;
1312 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1313 				    PCI_EXP_DEVCTL_READRQ);
1314 
1315 	/* Initialize Device Control Register */
1316 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1317 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1318 
1319 	/* Initialize Link Control Register */
1320 	if (pcie_cap_has_lnkctl(dev))
1321 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1322 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1323 
1324 	/* Find Advanced Error Reporting Enhanced Capability */
1325 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1326 	if (!pos)
1327 		return;
1328 
1329 	/* Initialize Uncorrectable Error Mask Register */
1330 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1331 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1332 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1333 
1334 	/* Initialize Uncorrectable Error Severity Register */
1335 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1336 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1337 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1338 
1339 	/* Initialize Correctable Error Mask Register */
1340 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1341 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1342 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1343 
1344 	/* Initialize Advanced Error Capabilities and Control Register */
1345 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1346 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1347 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1348 
1349 	/*
1350 	 * FIXME: The following two registers are not supported yet.
1351 	 *
1352 	 *   o Secondary Uncorrectable Error Severity Register
1353 	 *   o Secondary Uncorrectable Error Mask Register
1354 	 */
1355 }
1356 
1357 static void pci_configure_device(struct pci_dev *dev)
1358 {
1359 	struct hotplug_params hpp;
1360 	int ret;
1361 
1362 	memset(&hpp, 0, sizeof(hpp));
1363 	ret = pci_get_hp_params(dev, &hpp);
1364 	if (ret)
1365 		return;
1366 
1367 	program_hpp_type2(dev, hpp.t2);
1368 	program_hpp_type1(dev, hpp.t1);
1369 	program_hpp_type0(dev, hpp.t0);
1370 }
1371 
1372 static void pci_release_capabilities(struct pci_dev *dev)
1373 {
1374 	pci_vpd_release(dev);
1375 	pci_iov_release(dev);
1376 	pci_free_cap_save_buffers(dev);
1377 }
1378 
1379 /**
1380  * pci_release_dev - free a pci device structure when all users of it are finished.
1381  * @dev: device that's been disconnected
1382  *
1383  * Will be called only by the device core when all users of this pci device are
1384  * done.
1385  */
1386 static void pci_release_dev(struct device *dev)
1387 {
1388 	struct pci_dev *pci_dev;
1389 
1390 	pci_dev = to_pci_dev(dev);
1391 	pci_release_capabilities(pci_dev);
1392 	pci_release_of_node(pci_dev);
1393 	pcibios_release_device(pci_dev);
1394 	pci_bus_put(pci_dev->bus);
1395 	kfree(pci_dev->driver_override);
1396 	kfree(pci_dev);
1397 }
1398 
1399 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1400 {
1401 	struct pci_dev *dev;
1402 
1403 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1404 	if (!dev)
1405 		return NULL;
1406 
1407 	INIT_LIST_HEAD(&dev->bus_list);
1408 	dev->dev.type = &pci_dev_type;
1409 	dev->bus = pci_bus_get(bus);
1410 
1411 	return dev;
1412 }
1413 EXPORT_SYMBOL(pci_alloc_dev);
1414 
1415 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1416 				int crs_timeout)
1417 {
1418 	int delay = 1;
1419 
1420 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1421 		return false;
1422 
1423 	/* some broken boards return 0 or ~0 if a slot is empty: */
1424 	if (*l == 0xffffffff || *l == 0x00000000 ||
1425 	    *l == 0x0000ffff || *l == 0xffff0000)
1426 		return false;
1427 
1428 	/*
1429 	 * Configuration Request Retry Status.  Some root ports return the
1430 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1431 	 * by the PCIe spec.  Ignore the device ID and only check for
1432 	 * (vendor id == 1).
1433 	 */
1434 	while ((*l & 0xffff) == 0x0001) {
1435 		if (!crs_timeout)
1436 			return false;
1437 
1438 		msleep(delay);
1439 		delay *= 2;
1440 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1441 			return false;
1442 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1443 		if (delay > crs_timeout) {
1444 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1445 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1446 			       PCI_FUNC(devfn));
1447 			return false;
1448 		}
1449 	}
1450 
1451 	return true;
1452 }
1453 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1454 
1455 /*
1456  * Read the config data for a PCI device, sanity-check it
1457  * and fill in the dev structure...
1458  */
1459 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1460 {
1461 	struct pci_dev *dev;
1462 	u32 l;
1463 
1464 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1465 		return NULL;
1466 
1467 	dev = pci_alloc_dev(bus);
1468 	if (!dev)
1469 		return NULL;
1470 
1471 	dev->devfn = devfn;
1472 	dev->vendor = l & 0xffff;
1473 	dev->device = (l >> 16) & 0xffff;
1474 
1475 	pci_set_of_node(dev);
1476 
1477 	if (pci_setup_device(dev)) {
1478 		pci_bus_put(dev->bus);
1479 		kfree(dev);
1480 		return NULL;
1481 	}
1482 
1483 	return dev;
1484 }
1485 
1486 static void pci_init_capabilities(struct pci_dev *dev)
1487 {
1488 	/* MSI/MSI-X list */
1489 	pci_msi_init_pci_dev(dev);
1490 
1491 	/* Buffers for saving PCIe and PCI-X capabilities */
1492 	pci_allocate_cap_save_buffers(dev);
1493 
1494 	/* Power Management */
1495 	pci_pm_init(dev);
1496 
1497 	/* Vital Product Data */
1498 	pci_vpd_pci22_init(dev);
1499 
1500 	/* Alternative Routing-ID Forwarding */
1501 	pci_configure_ari(dev);
1502 
1503 	/* Single Root I/O Virtualization */
1504 	pci_iov_init(dev);
1505 
1506 	/* Enable ACS P2P upstream forwarding */
1507 	pci_enable_acs(dev);
1508 }
1509 
1510 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1511 {
1512 	int ret;
1513 
1514 	pci_configure_device(dev);
1515 
1516 	device_initialize(&dev->dev);
1517 	dev->dev.release = pci_release_dev;
1518 
1519 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1520 	dev->dev.dma_mask = &dev->dma_mask;
1521 	dev->dev.dma_parms = &dev->dma_parms;
1522 	dev->dev.coherent_dma_mask = 0xffffffffull;
1523 
1524 	pci_set_dma_max_seg_size(dev, 65536);
1525 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1526 
1527 	/* Fix up broken headers */
1528 	pci_fixup_device(pci_fixup_header, dev);
1529 
1530 	/* moved out from quirk header fixup code */
1531 	pci_reassigndev_resource_alignment(dev);
1532 
1533 	/* Clear the state_saved flag. */
1534 	dev->state_saved = false;
1535 
1536 	/* Initialize various capabilities */
1537 	pci_init_capabilities(dev);
1538 
1539 	/*
1540 	 * Add the device to our list of discovered devices
1541 	 * and the bus list for fixup functions, etc.
1542 	 */
1543 	down_write(&pci_bus_sem);
1544 	list_add_tail(&dev->bus_list, &bus->devices);
1545 	up_write(&pci_bus_sem);
1546 
1547 	ret = pcibios_add_device(dev);
1548 	WARN_ON(ret < 0);
1549 
1550 	/* Notifier could use PCI capabilities */
1551 	dev->match_driver = false;
1552 	ret = device_add(&dev->dev);
1553 	WARN_ON(ret < 0);
1554 }
1555 
1556 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1557 {
1558 	struct pci_dev *dev;
1559 
1560 	dev = pci_get_slot(bus, devfn);
1561 	if (dev) {
1562 		pci_dev_put(dev);
1563 		return dev;
1564 	}
1565 
1566 	dev = pci_scan_device(bus, devfn);
1567 	if (!dev)
1568 		return NULL;
1569 
1570 	pci_device_add(dev, bus);
1571 
1572 	return dev;
1573 }
1574 EXPORT_SYMBOL(pci_scan_single_device);
1575 
1576 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1577 {
1578 	int pos;
1579 	u16 cap = 0;
1580 	unsigned next_fn;
1581 
1582 	if (pci_ari_enabled(bus)) {
1583 		if (!dev)
1584 			return 0;
1585 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1586 		if (!pos)
1587 			return 0;
1588 
1589 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1590 		next_fn = PCI_ARI_CAP_NFN(cap);
1591 		if (next_fn <= fn)
1592 			return 0;	/* protect against malformed list */
1593 
1594 		return next_fn;
1595 	}
1596 
1597 	/* dev may be NULL for non-contiguous multifunction devices */
1598 	if (!dev || dev->multifunction)
1599 		return (fn + 1) % 8;
1600 
1601 	return 0;
1602 }
1603 
1604 static int only_one_child(struct pci_bus *bus)
1605 {
1606 	struct pci_dev *parent = bus->self;
1607 
1608 	if (!parent || !pci_is_pcie(parent))
1609 		return 0;
1610 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1611 		return 1;
1612 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1613 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1614 		return 1;
1615 	return 0;
1616 }
1617 
1618 /**
1619  * pci_scan_slot - scan a PCI slot on a bus for devices.
1620  * @bus: PCI bus to scan
1621  * @devfn: slot number to scan (must have zero function.)
1622  *
1623  * Scan a PCI slot on the specified PCI bus for devices, adding
1624  * discovered devices to the @bus->devices list.  New devices
1625  * will not have is_added set.
1626  *
1627  * Returns the number of new devices found.
1628  */
1629 int pci_scan_slot(struct pci_bus *bus, int devfn)
1630 {
1631 	unsigned fn, nr = 0;
1632 	struct pci_dev *dev;
1633 
1634 	if (only_one_child(bus) && (devfn > 0))
1635 		return 0; /* Already scanned the entire slot */
1636 
1637 	dev = pci_scan_single_device(bus, devfn);
1638 	if (!dev)
1639 		return 0;
1640 	if (!dev->is_added)
1641 		nr++;
1642 
1643 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1644 		dev = pci_scan_single_device(bus, devfn + fn);
1645 		if (dev) {
1646 			if (!dev->is_added)
1647 				nr++;
1648 			dev->multifunction = 1;
1649 		}
1650 	}
1651 
1652 	/* only one slot has pcie device */
1653 	if (bus->self && nr)
1654 		pcie_aspm_init_link_state(bus->self);
1655 
1656 	return nr;
1657 }
1658 EXPORT_SYMBOL(pci_scan_slot);
1659 
1660 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1661 {
1662 	u8 *smpss = data;
1663 
1664 	if (!pci_is_pcie(dev))
1665 		return 0;
1666 
1667 	/*
1668 	 * We don't have a way to change MPS settings on devices that have
1669 	 * drivers attached.  A hot-added device might support only the minimum
1670 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
1671 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
1672 	 * hot-added devices will work correctly.
1673 	 *
1674 	 * However, if we hot-add a device to a slot directly below a Root
1675 	 * Port, it's impossible for there to be other existing devices below
1676 	 * the port.  We don't limit the MPS in this case because we can
1677 	 * reconfigure MPS on both the Root Port and the hot-added device,
1678 	 * and there are no other devices involved.
1679 	 *
1680 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1681 	 */
1682 	if (dev->is_hotplug_bridge &&
1683 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1684 		*smpss = 0;
1685 
1686 	if (*smpss > dev->pcie_mpss)
1687 		*smpss = dev->pcie_mpss;
1688 
1689 	return 0;
1690 }
1691 
1692 static void pcie_write_mps(struct pci_dev *dev, int mps)
1693 {
1694 	int rc;
1695 
1696 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1697 		mps = 128 << dev->pcie_mpss;
1698 
1699 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1700 		    dev->bus->self)
1701 			/* For "Performance", the assumption is made that
1702 			 * downstream communication will never be larger than
1703 			 * the MRRS.  So, the MPS only needs to be configured
1704 			 * for the upstream communication.  This being the case,
1705 			 * walk from the top down and set the MPS of the child
1706 			 * to that of the parent bus.
1707 			 *
1708 			 * Configure the device MPS with the smaller of the
1709 			 * device MPSS or the bridge MPS (which is assumed to be
1710 			 * properly configured at this point to the largest
1711 			 * allowable MPS based on its parent bus).
1712 			 */
1713 			mps = min(mps, pcie_get_mps(dev->bus->self));
1714 	}
1715 
1716 	rc = pcie_set_mps(dev, mps);
1717 	if (rc)
1718 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1719 }
1720 
1721 static void pcie_write_mrrs(struct pci_dev *dev)
1722 {
1723 	int rc, mrrs;
1724 
1725 	/* In the "safe" case, do not configure the MRRS.  There appear to be
1726 	 * issues with setting MRRS to 0 on a number of devices.
1727 	 */
1728 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1729 		return;
1730 
1731 	/* For Max performance, the MRRS must be set to the largest supported
1732 	 * value.  However, it cannot be configured larger than the MPS the
1733 	 * device or the bus can support.  This should already be properly
1734 	 * configured by a prior call to pcie_write_mps.
1735 	 */
1736 	mrrs = pcie_get_mps(dev);
1737 
1738 	/* MRRS is a R/W register.  Invalid values can be written, but a
1739 	 * subsequent read will verify if the value is acceptable or not.
1740 	 * If the MRRS value provided is not acceptable (e.g., too large),
1741 	 * shrink the value until it is acceptable to the HW.
1742 	 */
1743 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1744 		rc = pcie_set_readrq(dev, mrrs);
1745 		if (!rc)
1746 			break;
1747 
1748 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1749 		mrrs /= 2;
1750 	}
1751 
1752 	if (mrrs < 128)
1753 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
1754 }
1755 
1756 static void pcie_bus_detect_mps(struct pci_dev *dev)
1757 {
1758 	struct pci_dev *bridge = dev->bus->self;
1759 	int mps, p_mps;
1760 
1761 	if (!bridge)
1762 		return;
1763 
1764 	mps = pcie_get_mps(dev);
1765 	p_mps = pcie_get_mps(bridge);
1766 
1767 	if (mps != p_mps)
1768 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1769 			 mps, pci_name(bridge), p_mps);
1770 }
1771 
1772 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1773 {
1774 	int mps, orig_mps;
1775 
1776 	if (!pci_is_pcie(dev))
1777 		return 0;
1778 
1779 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1780 		pcie_bus_detect_mps(dev);
1781 		return 0;
1782 	}
1783 
1784 	mps = 128 << *(u8 *)data;
1785 	orig_mps = pcie_get_mps(dev);
1786 
1787 	pcie_write_mps(dev, mps);
1788 	pcie_write_mrrs(dev);
1789 
1790 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
1791 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
1792 		 orig_mps, pcie_get_readrq(dev));
1793 
1794 	return 0;
1795 }
1796 
1797 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1798  * parents then children fashion.  If this changes, then this code will not
1799  * work as designed.
1800  */
1801 void pcie_bus_configure_settings(struct pci_bus *bus)
1802 {
1803 	u8 smpss = 0;
1804 
1805 	if (!bus->self)
1806 		return;
1807 
1808 	if (!pci_is_pcie(bus->self))
1809 		return;
1810 
1811 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
1812 	 * to be aware of the MPS of the destination.  To work around this,
1813 	 * simply force the MPS of the entire system to the smallest possible.
1814 	 */
1815 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1816 		smpss = 0;
1817 
1818 	if (pcie_bus_config == PCIE_BUS_SAFE) {
1819 		smpss = bus->self->pcie_mpss;
1820 
1821 		pcie_find_smpss(bus->self, &smpss);
1822 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
1823 	}
1824 
1825 	pcie_bus_configure_set(bus->self, &smpss);
1826 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1827 }
1828 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1829 
1830 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1831 {
1832 	unsigned int devfn, pass, max = bus->busn_res.start;
1833 	struct pci_dev *dev;
1834 
1835 	dev_dbg(&bus->dev, "scanning bus\n");
1836 
1837 	/* Go find them, Rover! */
1838 	for (devfn = 0; devfn < 0x100; devfn += 8)
1839 		pci_scan_slot(bus, devfn);
1840 
1841 	/* Reserve buses for SR-IOV capability. */
1842 	max += pci_iov_bus_range(bus);
1843 
1844 	/*
1845 	 * After performing arch-dependent fixup of the bus, look behind
1846 	 * all PCI-to-PCI bridges on this bus.
1847 	 */
1848 	if (!bus->is_added) {
1849 		dev_dbg(&bus->dev, "fixups for bus\n");
1850 		pcibios_fixup_bus(bus);
1851 		bus->is_added = 1;
1852 	}
1853 
1854 	for (pass = 0; pass < 2; pass++)
1855 		list_for_each_entry(dev, &bus->devices, bus_list) {
1856 			if (pci_is_bridge(dev))
1857 				max = pci_scan_bridge(bus, dev, max, pass);
1858 		}
1859 
1860 	/*
1861 	 * We've scanned the bus and so we know all about what's on
1862 	 * the other side of any bridges that may be on this bus plus
1863 	 * any devices.
1864 	 *
1865 	 * Return how far we've got finding sub-buses.
1866 	 */
1867 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1868 	return max;
1869 }
1870 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1871 
1872 /**
1873  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1874  * @bridge: Host bridge to set up.
1875  *
1876  * Default empty implementation.  Replace with an architecture-specific setup
1877  * routine, if necessary.
1878  */
1879 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1880 {
1881 	return 0;
1882 }
1883 
1884 void __weak pcibios_add_bus(struct pci_bus *bus)
1885 {
1886 }
1887 
1888 void __weak pcibios_remove_bus(struct pci_bus *bus)
1889 {
1890 }
1891 
1892 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1893 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
1894 {
1895 	int error;
1896 	struct pci_host_bridge *bridge;
1897 	struct pci_bus *b, *b2;
1898 	struct resource_entry *window, *n;
1899 	struct resource *res;
1900 	resource_size_t offset;
1901 	char bus_addr[64];
1902 	char *fmt;
1903 
1904 	b = pci_alloc_bus(NULL);
1905 	if (!b)
1906 		return NULL;
1907 
1908 	b->sysdata = sysdata;
1909 	b->ops = ops;
1910 	b->number = b->busn_res.start = bus;
1911 	pci_bus_assign_domain_nr(b, parent);
1912 	b2 = pci_find_bus(pci_domain_nr(b), bus);
1913 	if (b2) {
1914 		/* If we already got to this bus through a different bridge, ignore it */
1915 		dev_dbg(&b2->dev, "bus already known\n");
1916 		goto err_out;
1917 	}
1918 
1919 	bridge = pci_alloc_host_bridge(b);
1920 	if (!bridge)
1921 		goto err_out;
1922 
1923 	bridge->dev.parent = parent;
1924 	bridge->dev.release = pci_release_host_bridge_dev;
1925 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1926 	error = pcibios_root_bridge_prepare(bridge);
1927 	if (error) {
1928 		kfree(bridge);
1929 		goto err_out;
1930 	}
1931 
1932 	error = device_register(&bridge->dev);
1933 	if (error) {
1934 		put_device(&bridge->dev);
1935 		goto err_out;
1936 	}
1937 	b->bridge = get_device(&bridge->dev);
1938 	device_enable_async_suspend(b->bridge);
1939 	pci_set_bus_of_node(b);
1940 
1941 	if (!parent)
1942 		set_dev_node(b->bridge, pcibus_to_node(b));
1943 
1944 	b->dev.class = &pcibus_class;
1945 	b->dev.parent = b->bridge;
1946 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1947 	error = device_register(&b->dev);
1948 	if (error)
1949 		goto class_dev_reg_err;
1950 
1951 	pcibios_add_bus(b);
1952 
1953 	/* Create legacy_io and legacy_mem files for this bus */
1954 	pci_create_legacy_files(b);
1955 
1956 	if (parent)
1957 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1958 	else
1959 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1960 
1961 	/* Add initial resources to the bus */
1962 	resource_list_for_each_entry_safe(window, n, resources) {
1963 		list_move_tail(&window->node, &bridge->windows);
1964 		res = window->res;
1965 		offset = window->offset;
1966 		if (res->flags & IORESOURCE_BUS)
1967 			pci_bus_insert_busn_res(b, bus, res->end);
1968 		else
1969 			pci_bus_add_resource(b, res, 0);
1970 		if (offset) {
1971 			if (resource_type(res) == IORESOURCE_IO)
1972 				fmt = " (bus address [%#06llx-%#06llx])";
1973 			else
1974 				fmt = " (bus address [%#010llx-%#010llx])";
1975 			snprintf(bus_addr, sizeof(bus_addr), fmt,
1976 				 (unsigned long long) (res->start - offset),
1977 				 (unsigned long long) (res->end - offset));
1978 		} else
1979 			bus_addr[0] = '\0';
1980 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1981 	}
1982 
1983 	down_write(&pci_bus_sem);
1984 	list_add_tail(&b->node, &pci_root_buses);
1985 	up_write(&pci_bus_sem);
1986 
1987 	return b;
1988 
1989 class_dev_reg_err:
1990 	put_device(&bridge->dev);
1991 	device_unregister(&bridge->dev);
1992 err_out:
1993 	kfree(b);
1994 	return NULL;
1995 }
1996 
1997 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1998 {
1999 	struct resource *res = &b->busn_res;
2000 	struct resource *parent_res, *conflict;
2001 
2002 	res->start = bus;
2003 	res->end = bus_max;
2004 	res->flags = IORESOURCE_BUS;
2005 
2006 	if (!pci_is_root_bus(b))
2007 		parent_res = &b->parent->busn_res;
2008 	else {
2009 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2010 		res->flags |= IORESOURCE_PCI_FIXED;
2011 	}
2012 
2013 	conflict = request_resource_conflict(parent_res, res);
2014 
2015 	if (conflict)
2016 		dev_printk(KERN_DEBUG, &b->dev,
2017 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2018 			    res, pci_is_root_bus(b) ? "domain " : "",
2019 			    parent_res, conflict->name, conflict);
2020 
2021 	return conflict == NULL;
2022 }
2023 
2024 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2025 {
2026 	struct resource *res = &b->busn_res;
2027 	struct resource old_res = *res;
2028 	resource_size_t size;
2029 	int ret;
2030 
2031 	if (res->start > bus_max)
2032 		return -EINVAL;
2033 
2034 	size = bus_max - res->start + 1;
2035 	ret = adjust_resource(res, res->start, size);
2036 	dev_printk(KERN_DEBUG, &b->dev,
2037 			"busn_res: %pR end %s updated to %02x\n",
2038 			&old_res, ret ? "can not be" : "is", bus_max);
2039 
2040 	if (!ret && !res->parent)
2041 		pci_bus_insert_busn_res(b, res->start, res->end);
2042 
2043 	return ret;
2044 }
2045 
2046 void pci_bus_release_busn_res(struct pci_bus *b)
2047 {
2048 	struct resource *res = &b->busn_res;
2049 	int ret;
2050 
2051 	if (!res->flags || !res->parent)
2052 		return;
2053 
2054 	ret = release_resource(res);
2055 	dev_printk(KERN_DEBUG, &b->dev,
2056 			"busn_res: %pR %s released\n",
2057 			res, ret ? "can not be" : "is");
2058 }
2059 
2060 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2061 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2062 {
2063 	struct resource_entry *window;
2064 	bool found = false;
2065 	struct pci_bus *b;
2066 	int max;
2067 
2068 	resource_list_for_each_entry(window, resources)
2069 		if (window->res->flags & IORESOURCE_BUS) {
2070 			found = true;
2071 			break;
2072 		}
2073 
2074 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2075 	if (!b)
2076 		return NULL;
2077 
2078 	if (!found) {
2079 		dev_info(&b->dev,
2080 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2081 			bus);
2082 		pci_bus_insert_busn_res(b, bus, 255);
2083 	}
2084 
2085 	max = pci_scan_child_bus(b);
2086 
2087 	if (!found)
2088 		pci_bus_update_busn_res_end(b, max);
2089 
2090 	pci_bus_add_devices(b);
2091 	return b;
2092 }
2093 EXPORT_SYMBOL(pci_scan_root_bus);
2094 
2095 /* Deprecated; use pci_scan_root_bus() instead */
2096 struct pci_bus *pci_scan_bus_parented(struct device *parent,
2097 		int bus, struct pci_ops *ops, void *sysdata)
2098 {
2099 	LIST_HEAD(resources);
2100 	struct pci_bus *b;
2101 
2102 	pci_add_resource(&resources, &ioport_resource);
2103 	pci_add_resource(&resources, &iomem_resource);
2104 	pci_add_resource(&resources, &busn_resource);
2105 	b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
2106 	if (b)
2107 		pci_scan_child_bus(b);
2108 	else
2109 		pci_free_resource_list(&resources);
2110 	return b;
2111 }
2112 EXPORT_SYMBOL(pci_scan_bus_parented);
2113 
2114 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2115 					void *sysdata)
2116 {
2117 	LIST_HEAD(resources);
2118 	struct pci_bus *b;
2119 
2120 	pci_add_resource(&resources, &ioport_resource);
2121 	pci_add_resource(&resources, &iomem_resource);
2122 	pci_add_resource(&resources, &busn_resource);
2123 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2124 	if (b) {
2125 		pci_scan_child_bus(b);
2126 		pci_bus_add_devices(b);
2127 	} else {
2128 		pci_free_resource_list(&resources);
2129 	}
2130 	return b;
2131 }
2132 EXPORT_SYMBOL(pci_scan_bus);
2133 
2134 /**
2135  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2136  * @bridge: PCI bridge for the bus to scan
2137  *
2138  * Scan a PCI bus and child buses for new devices, add them,
2139  * and enable them, resizing bridge mmio/io resource if necessary
2140  * and possible.  The caller must ensure the child devices are already
2141  * removed for resizing to occur.
2142  *
2143  * Returns the max number of subordinate bus discovered.
2144  */
2145 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2146 {
2147 	unsigned int max;
2148 	struct pci_bus *bus = bridge->subordinate;
2149 
2150 	max = pci_scan_child_bus(bus);
2151 
2152 	pci_assign_unassigned_bridge_resources(bridge);
2153 
2154 	pci_bus_add_devices(bus);
2155 
2156 	return max;
2157 }
2158 
2159 /**
2160  * pci_rescan_bus - scan a PCI bus for devices.
2161  * @bus: PCI bus to scan
2162  *
2163  * Scan a PCI bus and child buses for new devices, adds them,
2164  * and enables them.
2165  *
2166  * Returns the max number of subordinate bus discovered.
2167  */
2168 unsigned int pci_rescan_bus(struct pci_bus *bus)
2169 {
2170 	unsigned int max;
2171 
2172 	max = pci_scan_child_bus(bus);
2173 	pci_assign_unassigned_bus_resources(bus);
2174 	pci_bus_add_devices(bus);
2175 
2176 	return max;
2177 }
2178 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2179 
2180 /*
2181  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2182  * routines should always be executed under this mutex.
2183  */
2184 static DEFINE_MUTEX(pci_rescan_remove_lock);
2185 
2186 void pci_lock_rescan_remove(void)
2187 {
2188 	mutex_lock(&pci_rescan_remove_lock);
2189 }
2190 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2191 
2192 void pci_unlock_rescan_remove(void)
2193 {
2194 	mutex_unlock(&pci_rescan_remove_lock);
2195 }
2196 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2197 
2198 static int __init pci_sort_bf_cmp(const struct device *d_a,
2199 				  const struct device *d_b)
2200 {
2201 	const struct pci_dev *a = to_pci_dev(d_a);
2202 	const struct pci_dev *b = to_pci_dev(d_b);
2203 
2204 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2205 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2206 
2207 	if      (a->bus->number < b->bus->number) return -1;
2208 	else if (a->bus->number > b->bus->number) return  1;
2209 
2210 	if      (a->devfn < b->devfn) return -1;
2211 	else if (a->devfn > b->devfn) return  1;
2212 
2213 	return 0;
2214 }
2215 
2216 void __init pci_sort_breadthfirst(void)
2217 {
2218 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2219 }
2220