xref: /openbmc/linux/drivers/pci/probe.c (revision 31368ce8)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l = 0, sz = 0, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
513 static void devm_pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 }
520 
521 static void pci_release_host_bridge_dev(struct device *dev)
522 {
523 	devm_pci_release_host_bridge_dev(dev);
524 	pci_free_host_bridge(to_pci_host_bridge(dev));
525 }
526 
527 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
528 {
529 	struct pci_host_bridge *bridge;
530 
531 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
532 	if (!bridge)
533 		return NULL;
534 
535 	INIT_LIST_HEAD(&bridge->windows);
536 	bridge->dev.release = pci_release_host_bridge_dev;
537 
538 	return bridge;
539 }
540 EXPORT_SYMBOL(pci_alloc_host_bridge);
541 
542 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
543 						   size_t priv)
544 {
545 	struct pci_host_bridge *bridge;
546 
547 	bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
548 	if (!bridge)
549 		return NULL;
550 
551 	INIT_LIST_HEAD(&bridge->windows);
552 	bridge->dev.release = devm_pci_release_host_bridge_dev;
553 
554 	return bridge;
555 }
556 EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
557 
558 void pci_free_host_bridge(struct pci_host_bridge *bridge)
559 {
560 	pci_free_resource_list(&bridge->windows);
561 
562 	kfree(bridge);
563 }
564 EXPORT_SYMBOL(pci_free_host_bridge);
565 
566 static const unsigned char pcix_bus_speed[] = {
567 	PCI_SPEED_UNKNOWN,		/* 0 */
568 	PCI_SPEED_66MHz_PCIX,		/* 1 */
569 	PCI_SPEED_100MHz_PCIX,		/* 2 */
570 	PCI_SPEED_133MHz_PCIX,		/* 3 */
571 	PCI_SPEED_UNKNOWN,		/* 4 */
572 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
573 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
574 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
575 	PCI_SPEED_UNKNOWN,		/* 8 */
576 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
577 	PCI_SPEED_100MHz_PCIX_266,	/* A */
578 	PCI_SPEED_133MHz_PCIX_266,	/* B */
579 	PCI_SPEED_UNKNOWN,		/* C */
580 	PCI_SPEED_66MHz_PCIX_533,	/* D */
581 	PCI_SPEED_100MHz_PCIX_533,	/* E */
582 	PCI_SPEED_133MHz_PCIX_533	/* F */
583 };
584 
585 const unsigned char pcie_link_speed[] = {
586 	PCI_SPEED_UNKNOWN,		/* 0 */
587 	PCIE_SPEED_2_5GT,		/* 1 */
588 	PCIE_SPEED_5_0GT,		/* 2 */
589 	PCIE_SPEED_8_0GT,		/* 3 */
590 	PCI_SPEED_UNKNOWN,		/* 4 */
591 	PCI_SPEED_UNKNOWN,		/* 5 */
592 	PCI_SPEED_UNKNOWN,		/* 6 */
593 	PCI_SPEED_UNKNOWN,		/* 7 */
594 	PCI_SPEED_UNKNOWN,		/* 8 */
595 	PCI_SPEED_UNKNOWN,		/* 9 */
596 	PCI_SPEED_UNKNOWN,		/* A */
597 	PCI_SPEED_UNKNOWN,		/* B */
598 	PCI_SPEED_UNKNOWN,		/* C */
599 	PCI_SPEED_UNKNOWN,		/* D */
600 	PCI_SPEED_UNKNOWN,		/* E */
601 	PCI_SPEED_UNKNOWN		/* F */
602 };
603 
604 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
605 {
606 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
607 }
608 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
609 
610 static unsigned char agp_speeds[] = {
611 	AGP_UNKNOWN,
612 	AGP_1X,
613 	AGP_2X,
614 	AGP_4X,
615 	AGP_8X
616 };
617 
618 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
619 {
620 	int index = 0;
621 
622 	if (agpstat & 4)
623 		index = 3;
624 	else if (agpstat & 2)
625 		index = 2;
626 	else if (agpstat & 1)
627 		index = 1;
628 	else
629 		goto out;
630 
631 	if (agp3) {
632 		index += 2;
633 		if (index == 5)
634 			index = 0;
635 	}
636 
637  out:
638 	return agp_speeds[index];
639 }
640 
641 static void pci_set_bus_speed(struct pci_bus *bus)
642 {
643 	struct pci_dev *bridge = bus->self;
644 	int pos;
645 
646 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
647 	if (!pos)
648 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
649 	if (pos) {
650 		u32 agpstat, agpcmd;
651 
652 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
653 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
654 
655 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
656 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
657 	}
658 
659 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
660 	if (pos) {
661 		u16 status;
662 		enum pci_bus_speed max;
663 
664 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
665 				     &status);
666 
667 		if (status & PCI_X_SSTATUS_533MHZ) {
668 			max = PCI_SPEED_133MHz_PCIX_533;
669 		} else if (status & PCI_X_SSTATUS_266MHZ) {
670 			max = PCI_SPEED_133MHz_PCIX_266;
671 		} else if (status & PCI_X_SSTATUS_133MHZ) {
672 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
673 				max = PCI_SPEED_133MHz_PCIX_ECC;
674 			else
675 				max = PCI_SPEED_133MHz_PCIX;
676 		} else {
677 			max = PCI_SPEED_66MHz_PCIX;
678 		}
679 
680 		bus->max_bus_speed = max;
681 		bus->cur_bus_speed = pcix_bus_speed[
682 			(status & PCI_X_SSTATUS_FREQ) >> 6];
683 
684 		return;
685 	}
686 
687 	if (pci_is_pcie(bridge)) {
688 		u32 linkcap;
689 		u16 linksta;
690 
691 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
692 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
693 
694 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
695 		pcie_update_link_speed(bus, linksta);
696 	}
697 }
698 
699 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
700 {
701 	struct irq_domain *d;
702 
703 	/*
704 	 * Any firmware interface that can resolve the msi_domain
705 	 * should be called from here.
706 	 */
707 	d = pci_host_bridge_of_msi_domain(bus);
708 	if (!d)
709 		d = pci_host_bridge_acpi_msi_domain(bus);
710 
711 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
712 	/*
713 	 * If no IRQ domain was found via the OF tree, try looking it up
714 	 * directly through the fwnode_handle.
715 	 */
716 	if (!d) {
717 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
718 
719 		if (fwnode)
720 			d = irq_find_matching_fwnode(fwnode,
721 						     DOMAIN_BUS_PCI_MSI);
722 	}
723 #endif
724 
725 	return d;
726 }
727 
728 static void pci_set_bus_msi_domain(struct pci_bus *bus)
729 {
730 	struct irq_domain *d;
731 	struct pci_bus *b;
732 
733 	/*
734 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
735 	 * created by an SR-IOV device.  Walk up to the first bridge device
736 	 * found or derive the domain from the host bridge.
737 	 */
738 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
739 		if (b->self)
740 			d = dev_get_msi_domain(&b->self->dev);
741 	}
742 
743 	if (!d)
744 		d = pci_host_bridge_msi_domain(b);
745 
746 	dev_set_msi_domain(&bus->dev, d);
747 }
748 
749 static int pci_register_host_bridge(struct pci_host_bridge *bridge)
750 {
751 	struct device *parent = bridge->dev.parent;
752 	struct resource_entry *window, *n;
753 	struct pci_bus *bus, *b;
754 	resource_size_t offset;
755 	LIST_HEAD(resources);
756 	struct resource *res;
757 	char addr[64], *fmt;
758 	const char *name;
759 	int err;
760 
761 	bus = pci_alloc_bus(NULL);
762 	if (!bus)
763 		return -ENOMEM;
764 
765 	bridge->bus = bus;
766 
767 	/* temporarily move resources off the list */
768 	list_splice_init(&bridge->windows, &resources);
769 	bus->sysdata = bridge->sysdata;
770 	bus->msi = bridge->msi;
771 	bus->ops = bridge->ops;
772 	bus->number = bus->busn_res.start = bridge->busnr;
773 #ifdef CONFIG_PCI_DOMAINS_GENERIC
774 	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
775 #endif
776 
777 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
778 	if (b) {
779 		/* If we already got to this bus through a different bridge, ignore it */
780 		dev_dbg(&b->dev, "bus already known\n");
781 		err = -EEXIST;
782 		goto free;
783 	}
784 
785 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
786 		     bridge->busnr);
787 
788 	err = pcibios_root_bridge_prepare(bridge);
789 	if (err)
790 		goto free;
791 
792 	err = device_register(&bridge->dev);
793 	if (err)
794 		put_device(&bridge->dev);
795 
796 	bus->bridge = get_device(&bridge->dev);
797 	device_enable_async_suspend(bus->bridge);
798 	pci_set_bus_of_node(bus);
799 	pci_set_bus_msi_domain(bus);
800 
801 	if (!parent)
802 		set_dev_node(bus->bridge, pcibus_to_node(bus));
803 
804 	bus->dev.class = &pcibus_class;
805 	bus->dev.parent = bus->bridge;
806 
807 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
808 	name = dev_name(&bus->dev);
809 
810 	err = device_register(&bus->dev);
811 	if (err)
812 		goto unregister;
813 
814 	pcibios_add_bus(bus);
815 
816 	/* Create legacy_io and legacy_mem files for this bus */
817 	pci_create_legacy_files(bus);
818 
819 	if (parent)
820 		dev_info(parent, "PCI host bridge to bus %s\n", name);
821 	else
822 		pr_info("PCI host bridge to bus %s\n", name);
823 
824 	/* Add initial resources to the bus */
825 	resource_list_for_each_entry_safe(window, n, &resources) {
826 		list_move_tail(&window->node, &bridge->windows);
827 		offset = window->offset;
828 		res = window->res;
829 
830 		if (res->flags & IORESOURCE_BUS)
831 			pci_bus_insert_busn_res(bus, bus->number, res->end);
832 		else
833 			pci_bus_add_resource(bus, res, 0);
834 
835 		if (offset) {
836 			if (resource_type(res) == IORESOURCE_IO)
837 				fmt = " (bus address [%#06llx-%#06llx])";
838 			else
839 				fmt = " (bus address [%#010llx-%#010llx])";
840 
841 			snprintf(addr, sizeof(addr), fmt,
842 				 (unsigned long long)(res->start - offset),
843 				 (unsigned long long)(res->end - offset));
844 		} else
845 			addr[0] = '\0';
846 
847 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
848 	}
849 
850 	down_write(&pci_bus_sem);
851 	list_add_tail(&bus->node, &pci_root_buses);
852 	up_write(&pci_bus_sem);
853 
854 	return 0;
855 
856 unregister:
857 	put_device(&bridge->dev);
858 	device_unregister(&bridge->dev);
859 
860 free:
861 	kfree(bus);
862 	return err;
863 }
864 
865 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
866 					   struct pci_dev *bridge, int busnr)
867 {
868 	struct pci_bus *child;
869 	int i;
870 	int ret;
871 
872 	/*
873 	 * Allocate a new bus, and inherit stuff from the parent..
874 	 */
875 	child = pci_alloc_bus(parent);
876 	if (!child)
877 		return NULL;
878 
879 	child->parent = parent;
880 	child->ops = parent->ops;
881 	child->msi = parent->msi;
882 	child->sysdata = parent->sysdata;
883 	child->bus_flags = parent->bus_flags;
884 
885 	/* initialize some portions of the bus device, but don't register it
886 	 * now as the parent is not properly set up yet.
887 	 */
888 	child->dev.class = &pcibus_class;
889 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
890 
891 	/*
892 	 * Set up the primary, secondary and subordinate
893 	 * bus numbers.
894 	 */
895 	child->number = child->busn_res.start = busnr;
896 	child->primary = parent->busn_res.start;
897 	child->busn_res.end = 0xff;
898 
899 	if (!bridge) {
900 		child->dev.parent = parent->bridge;
901 		goto add_dev;
902 	}
903 
904 	child->self = bridge;
905 	child->bridge = get_device(&bridge->dev);
906 	child->dev.parent = child->bridge;
907 	pci_set_bus_of_node(child);
908 	pci_set_bus_speed(child);
909 
910 	/* Set up default resource pointers and names.. */
911 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
912 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
913 		child->resource[i]->name = child->name;
914 	}
915 	bridge->subordinate = child;
916 
917 add_dev:
918 	pci_set_bus_msi_domain(child);
919 	ret = device_register(&child->dev);
920 	WARN_ON(ret < 0);
921 
922 	pcibios_add_bus(child);
923 
924 	if (child->ops->add_bus) {
925 		ret = child->ops->add_bus(child);
926 		if (WARN_ON(ret < 0))
927 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
928 	}
929 
930 	/* Create legacy_io and legacy_mem files for this bus */
931 	pci_create_legacy_files(child);
932 
933 	return child;
934 }
935 
936 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
937 				int busnr)
938 {
939 	struct pci_bus *child;
940 
941 	child = pci_alloc_child_bus(parent, dev, busnr);
942 	if (child) {
943 		down_write(&pci_bus_sem);
944 		list_add_tail(&child->node, &parent->children);
945 		up_write(&pci_bus_sem);
946 	}
947 	return child;
948 }
949 EXPORT_SYMBOL(pci_add_new_bus);
950 
951 static void pci_enable_crs(struct pci_dev *pdev)
952 {
953 	u16 root_cap = 0;
954 
955 	/* Enable CRS Software Visibility if supported */
956 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
957 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
958 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
959 					 PCI_EXP_RTCTL_CRSSVE);
960 }
961 
962 /*
963  * If it's a bridge, configure it and scan the bus behind it.
964  * For CardBus bridges, we don't scan behind as the devices will
965  * be handled by the bridge driver itself.
966  *
967  * We need to process bridges in two passes -- first we scan those
968  * already configured by the BIOS and after we are done with all of
969  * them, we proceed to assigning numbers to the remaining buses in
970  * order to avoid overlaps between old and new bus numbers.
971  */
972 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
973 {
974 	struct pci_bus *child;
975 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
976 	u32 buses, i, j = 0;
977 	u16 bctl;
978 	u8 primary, secondary, subordinate;
979 	int broken = 0;
980 
981 	/*
982 	 * Make sure the bridge is powered on to be able to access config
983 	 * space of devices below it.
984 	 */
985 	pm_runtime_get_sync(&dev->dev);
986 
987 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
988 	primary = buses & 0xFF;
989 	secondary = (buses >> 8) & 0xFF;
990 	subordinate = (buses >> 16) & 0xFF;
991 
992 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
993 		secondary, subordinate, pass);
994 
995 	if (!primary && (primary != bus->number) && secondary && subordinate) {
996 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
997 		primary = bus->number;
998 	}
999 
1000 	/* Check if setup is sensible at all */
1001 	if (!pass &&
1002 	    (primary != bus->number || secondary <= bus->number ||
1003 	     secondary > subordinate)) {
1004 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1005 			 secondary, subordinate);
1006 		broken = 1;
1007 	}
1008 
1009 	/* Disable MasterAbortMode during probing to avoid reporting
1010 	   of bus errors (in some architectures) */
1011 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1012 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1013 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1014 
1015 	pci_enable_crs(dev);
1016 
1017 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1018 	    !is_cardbus && !broken) {
1019 		unsigned int cmax;
1020 		/*
1021 		 * Bus already configured by firmware, process it in the first
1022 		 * pass and just note the configuration.
1023 		 */
1024 		if (pass)
1025 			goto out;
1026 
1027 		/*
1028 		 * The bus might already exist for two reasons: Either we are
1029 		 * rescanning the bus or the bus is reachable through more than
1030 		 * one bridge. The second case can happen with the i450NX
1031 		 * chipset.
1032 		 */
1033 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1034 		if (!child) {
1035 			child = pci_add_new_bus(bus, dev, secondary);
1036 			if (!child)
1037 				goto out;
1038 			child->primary = primary;
1039 			pci_bus_insert_busn_res(child, secondary, subordinate);
1040 			child->bridge_ctl = bctl;
1041 		}
1042 
1043 		cmax = pci_scan_child_bus(child);
1044 		if (cmax > subordinate)
1045 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1046 				 subordinate, cmax);
1047 		/* subordinate should equal child->busn_res.end */
1048 		if (subordinate > max)
1049 			max = subordinate;
1050 	} else {
1051 		/*
1052 		 * We need to assign a number to this bus which we always
1053 		 * do in the second pass.
1054 		 */
1055 		if (!pass) {
1056 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1057 				/* Temporarily disable forwarding of the
1058 				   configuration cycles on all bridges in
1059 				   this bus segment to avoid possible
1060 				   conflicts in the second pass between two
1061 				   bridges programmed with overlapping
1062 				   bus ranges. */
1063 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1064 						       buses & ~0xffffff);
1065 			goto out;
1066 		}
1067 
1068 		/* Clear errors */
1069 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1070 
1071 		/* Prevent assigning a bus number that already exists.
1072 		 * This can happen when a bridge is hot-plugged, so in
1073 		 * this case we only re-scan this bus. */
1074 		child = pci_find_bus(pci_domain_nr(bus), max+1);
1075 		if (!child) {
1076 			child = pci_add_new_bus(bus, dev, max+1);
1077 			if (!child)
1078 				goto out;
1079 			pci_bus_insert_busn_res(child, max+1, 0xff);
1080 		}
1081 		max++;
1082 		buses = (buses & 0xff000000)
1083 		      | ((unsigned int)(child->primary)     <<  0)
1084 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1085 		      | ((unsigned int)(child->busn_res.end) << 16);
1086 
1087 		/*
1088 		 * yenta.c forces a secondary latency timer of 176.
1089 		 * Copy that behaviour here.
1090 		 */
1091 		if (is_cardbus) {
1092 			buses &= ~0xff000000;
1093 			buses |= CARDBUS_LATENCY_TIMER << 24;
1094 		}
1095 
1096 		/*
1097 		 * We need to blast all three values with a single write.
1098 		 */
1099 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1100 
1101 		if (!is_cardbus) {
1102 			child->bridge_ctl = bctl;
1103 			max = pci_scan_child_bus(child);
1104 		} else {
1105 			/*
1106 			 * For CardBus bridges, we leave 4 bus numbers
1107 			 * as cards with a PCI-to-PCI bridge can be
1108 			 * inserted later.
1109 			 */
1110 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1111 				struct pci_bus *parent = bus;
1112 				if (pci_find_bus(pci_domain_nr(bus),
1113 							max+i+1))
1114 					break;
1115 				while (parent->parent) {
1116 					if ((!pcibios_assign_all_busses()) &&
1117 					    (parent->busn_res.end > max) &&
1118 					    (parent->busn_res.end <= max+i)) {
1119 						j = 1;
1120 					}
1121 					parent = parent->parent;
1122 				}
1123 				if (j) {
1124 					/*
1125 					 * Often, there are two cardbus bridges
1126 					 * -- try to leave one valid bus number
1127 					 * for each one.
1128 					 */
1129 					i /= 2;
1130 					break;
1131 				}
1132 			}
1133 			max += i;
1134 		}
1135 		/*
1136 		 * Set the subordinate bus number to its real value.
1137 		 */
1138 		pci_bus_update_busn_res_end(child, max);
1139 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1140 	}
1141 
1142 	sprintf(child->name,
1143 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1144 		pci_domain_nr(bus), child->number);
1145 
1146 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1147 	while (bus->parent) {
1148 		if ((child->busn_res.end > bus->busn_res.end) ||
1149 		    (child->number > bus->busn_res.end) ||
1150 		    (child->number < bus->number) ||
1151 		    (child->busn_res.end < bus->number)) {
1152 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1153 				&child->busn_res,
1154 				(bus->number > child->busn_res.end &&
1155 				 bus->busn_res.end < child->number) ?
1156 					"wholly" : "partially",
1157 				bus->self->transparent ? " transparent" : "",
1158 				dev_name(&bus->dev),
1159 				&bus->busn_res);
1160 		}
1161 		bus = bus->parent;
1162 	}
1163 
1164 out:
1165 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1166 
1167 	pm_runtime_put(&dev->dev);
1168 
1169 	return max;
1170 }
1171 EXPORT_SYMBOL(pci_scan_bridge);
1172 
1173 /*
1174  * Read interrupt line and base address registers.
1175  * The architecture-dependent code can tweak these, of course.
1176  */
1177 static void pci_read_irq(struct pci_dev *dev)
1178 {
1179 	unsigned char irq;
1180 
1181 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1182 	dev->pin = irq;
1183 	if (irq)
1184 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1185 	dev->irq = irq;
1186 }
1187 
1188 void set_pcie_port_type(struct pci_dev *pdev)
1189 {
1190 	int pos;
1191 	u16 reg16;
1192 	int type;
1193 	struct pci_dev *parent;
1194 
1195 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1196 	if (!pos)
1197 		return;
1198 
1199 	pdev->pcie_cap = pos;
1200 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1201 	pdev->pcie_flags_reg = reg16;
1202 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1203 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1204 
1205 	/*
1206 	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1207 	 * of a Link.  No PCIe component has two Links.  Two Links are
1208 	 * connected by a Switch that has a Port on each Link and internal
1209 	 * logic to connect the two Ports.
1210 	 */
1211 	type = pci_pcie_type(pdev);
1212 	if (type == PCI_EXP_TYPE_ROOT_PORT ||
1213 	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
1214 		pdev->has_secondary_link = 1;
1215 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1216 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1217 		parent = pci_upstream_bridge(pdev);
1218 
1219 		/*
1220 		 * Usually there's an upstream device (Root Port or Switch
1221 		 * Downstream Port), but we can't assume one exists.
1222 		 */
1223 		if (parent && !parent->has_secondary_link)
1224 			pdev->has_secondary_link = 1;
1225 	}
1226 }
1227 
1228 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1229 {
1230 	u32 reg32;
1231 
1232 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1233 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1234 		pdev->is_hotplug_bridge = 1;
1235 }
1236 
1237 static void set_pcie_thunderbolt(struct pci_dev *dev)
1238 {
1239 	int vsec = 0;
1240 	u32 header;
1241 
1242 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
1243 						    PCI_EXT_CAP_ID_VNDR))) {
1244 		pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1245 
1246 		/* Is the device part of a Thunderbolt controller? */
1247 		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1248 		    PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1249 			dev->is_thunderbolt = 1;
1250 			return;
1251 		}
1252 	}
1253 }
1254 
1255 /**
1256  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1257  * @dev: PCI device
1258  *
1259  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1260  * when forwarding a type1 configuration request the bridge must check that
1261  * the extended register address field is zero.  The bridge is not permitted
1262  * to forward the transactions and must handle it as an Unsupported Request.
1263  * Some bridges do not follow this rule and simply drop the extended register
1264  * bits, resulting in the standard config space being aliased, every 256
1265  * bytes across the entire configuration space.  Test for this condition by
1266  * comparing the first dword of each potential alias to the vendor/device ID.
1267  * Known offenders:
1268  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1269  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1270  */
1271 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1272 {
1273 #ifdef CONFIG_PCI_QUIRKS
1274 	int pos;
1275 	u32 header, tmp;
1276 
1277 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1278 
1279 	for (pos = PCI_CFG_SPACE_SIZE;
1280 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1281 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1282 		    || header != tmp)
1283 			return false;
1284 	}
1285 
1286 	return true;
1287 #else
1288 	return false;
1289 #endif
1290 }
1291 
1292 /**
1293  * pci_cfg_space_size - get the configuration space size of the PCI device.
1294  * @dev: PCI device
1295  *
1296  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1297  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1298  * access it.  Maybe we don't have a way to generate extended config space
1299  * accesses, or the device is behind a reverse Express bridge.  So we try
1300  * reading the dword at 0x100 which must either be 0 or a valid extended
1301  * capability header.
1302  */
1303 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1304 {
1305 	u32 status;
1306 	int pos = PCI_CFG_SPACE_SIZE;
1307 
1308 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1309 		return PCI_CFG_SPACE_SIZE;
1310 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1311 		return PCI_CFG_SPACE_SIZE;
1312 
1313 	return PCI_CFG_SPACE_EXP_SIZE;
1314 }
1315 
1316 int pci_cfg_space_size(struct pci_dev *dev)
1317 {
1318 	int pos;
1319 	u32 status;
1320 	u16 class;
1321 
1322 	class = dev->class >> 8;
1323 	if (class == PCI_CLASS_BRIDGE_HOST)
1324 		return pci_cfg_space_size_ext(dev);
1325 
1326 	if (pci_is_pcie(dev))
1327 		return pci_cfg_space_size_ext(dev);
1328 
1329 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1330 	if (!pos)
1331 		return PCI_CFG_SPACE_SIZE;
1332 
1333 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1334 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1335 		return pci_cfg_space_size_ext(dev);
1336 
1337 	return PCI_CFG_SPACE_SIZE;
1338 }
1339 
1340 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1341 
1342 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1343 {
1344 	/*
1345 	 * Disable the MSI hardware to avoid screaming interrupts
1346 	 * during boot.  This is the power on reset default so
1347 	 * usually this should be a noop.
1348 	 */
1349 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1350 	if (dev->msi_cap)
1351 		pci_msi_set_enable(dev, 0);
1352 
1353 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1354 	if (dev->msix_cap)
1355 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1356 }
1357 
1358 /**
1359  * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1360  * @dev: PCI device
1361  *
1362  * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev.  Check this
1363  * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1364  */
1365 static int pci_intx_mask_broken(struct pci_dev *dev)
1366 {
1367 	u16 orig, toggle, new;
1368 
1369 	pci_read_config_word(dev, PCI_COMMAND, &orig);
1370 	toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1371 	pci_write_config_word(dev, PCI_COMMAND, toggle);
1372 	pci_read_config_word(dev, PCI_COMMAND, &new);
1373 
1374 	pci_write_config_word(dev, PCI_COMMAND, orig);
1375 
1376 	/*
1377 	 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1378 	 * r2.3, so strictly speaking, a device is not *broken* if it's not
1379 	 * writable.  But we'll live with the misnomer for now.
1380 	 */
1381 	if (new != toggle)
1382 		return 1;
1383 	return 0;
1384 }
1385 
1386 /**
1387  * pci_setup_device - fill in class and map information of a device
1388  * @dev: the device structure to fill
1389  *
1390  * Initialize the device structure with information about the device's
1391  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1392  * Called at initialisation of the PCI subsystem and by CardBus services.
1393  * Returns 0 on success and negative if unknown type of device (not normal,
1394  * bridge or CardBus).
1395  */
1396 int pci_setup_device(struct pci_dev *dev)
1397 {
1398 	u32 class;
1399 	u16 cmd;
1400 	u8 hdr_type;
1401 	int pos = 0;
1402 	struct pci_bus_region region;
1403 	struct resource *res;
1404 
1405 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1406 		return -EIO;
1407 
1408 	dev->sysdata = dev->bus->sysdata;
1409 	dev->dev.parent = dev->bus->bridge;
1410 	dev->dev.bus = &pci_bus_type;
1411 	dev->hdr_type = hdr_type & 0x7f;
1412 	dev->multifunction = !!(hdr_type & 0x80);
1413 	dev->error_state = pci_channel_io_normal;
1414 	set_pcie_port_type(dev);
1415 
1416 	pci_dev_assign_slot(dev);
1417 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1418 	   set this higher, assuming the system even supports it.  */
1419 	dev->dma_mask = 0xffffffff;
1420 
1421 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1422 		     dev->bus->number, PCI_SLOT(dev->devfn),
1423 		     PCI_FUNC(dev->devfn));
1424 
1425 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1426 	dev->revision = class & 0xff;
1427 	dev->class = class >> 8;		    /* upper 3 bytes */
1428 
1429 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1430 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1431 
1432 	/* need to have dev->class ready */
1433 	dev->cfg_size = pci_cfg_space_size(dev);
1434 
1435 	/* need to have dev->cfg_size ready */
1436 	set_pcie_thunderbolt(dev);
1437 
1438 	/* "Unknown power state" */
1439 	dev->current_state = PCI_UNKNOWN;
1440 
1441 	/* Early fixups, before probing the BARs */
1442 	pci_fixup_device(pci_fixup_early, dev);
1443 	/* device class may be changed after fixup */
1444 	class = dev->class >> 8;
1445 
1446 	if (dev->non_compliant_bars) {
1447 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1448 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1449 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1450 			cmd &= ~PCI_COMMAND_IO;
1451 			cmd &= ~PCI_COMMAND_MEMORY;
1452 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1453 		}
1454 	}
1455 
1456 	dev->broken_intx_masking = pci_intx_mask_broken(dev);
1457 
1458 	switch (dev->hdr_type) {		    /* header type */
1459 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1460 		if (class == PCI_CLASS_BRIDGE_PCI)
1461 			goto bad;
1462 		pci_read_irq(dev);
1463 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1464 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1465 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1466 
1467 		/*
1468 		 * Do the ugly legacy mode stuff here rather than broken chip
1469 		 * quirk code. Legacy mode ATA controllers have fixed
1470 		 * addresses. These are not always echoed in BAR0-3, and
1471 		 * BAR0-3 in a few cases contain junk!
1472 		 */
1473 		if (class == PCI_CLASS_STORAGE_IDE) {
1474 			u8 progif;
1475 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1476 			if ((progif & 1) == 0) {
1477 				region.start = 0x1F0;
1478 				region.end = 0x1F7;
1479 				res = &dev->resource[0];
1480 				res->flags = LEGACY_IO_RESOURCE;
1481 				pcibios_bus_to_resource(dev->bus, res, &region);
1482 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1483 					 res);
1484 				region.start = 0x3F6;
1485 				region.end = 0x3F6;
1486 				res = &dev->resource[1];
1487 				res->flags = LEGACY_IO_RESOURCE;
1488 				pcibios_bus_to_resource(dev->bus, res, &region);
1489 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1490 					 res);
1491 			}
1492 			if ((progif & 4) == 0) {
1493 				region.start = 0x170;
1494 				region.end = 0x177;
1495 				res = &dev->resource[2];
1496 				res->flags = LEGACY_IO_RESOURCE;
1497 				pcibios_bus_to_resource(dev->bus, res, &region);
1498 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1499 					 res);
1500 				region.start = 0x376;
1501 				region.end = 0x376;
1502 				res = &dev->resource[3];
1503 				res->flags = LEGACY_IO_RESOURCE;
1504 				pcibios_bus_to_resource(dev->bus, res, &region);
1505 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1506 					 res);
1507 			}
1508 		}
1509 		break;
1510 
1511 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1512 		if (class != PCI_CLASS_BRIDGE_PCI)
1513 			goto bad;
1514 		/* The PCI-to-PCI bridge spec requires that subtractive
1515 		   decoding (i.e. transparent) bridge must have programming
1516 		   interface code of 0x01. */
1517 		pci_read_irq(dev);
1518 		dev->transparent = ((dev->class & 0xff) == 1);
1519 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1520 		set_pcie_hotplug_bridge(dev);
1521 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1522 		if (pos) {
1523 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1524 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1525 		}
1526 		break;
1527 
1528 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1529 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1530 			goto bad;
1531 		pci_read_irq(dev);
1532 		pci_read_bases(dev, 1, 0);
1533 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1534 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1535 		break;
1536 
1537 	default:				    /* unknown header */
1538 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1539 			dev->hdr_type);
1540 		return -EIO;
1541 
1542 	bad:
1543 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1544 			dev->class, dev->hdr_type);
1545 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1546 	}
1547 
1548 	/* We found a fine healthy device, go go go... */
1549 	return 0;
1550 }
1551 
1552 static void pci_configure_mps(struct pci_dev *dev)
1553 {
1554 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1555 	int mps, p_mps, rc;
1556 
1557 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1558 		return;
1559 
1560 	mps = pcie_get_mps(dev);
1561 	p_mps = pcie_get_mps(bridge);
1562 
1563 	if (mps == p_mps)
1564 		return;
1565 
1566 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1567 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1568 			 mps, pci_name(bridge), p_mps);
1569 		return;
1570 	}
1571 
1572 	/*
1573 	 * Fancier MPS configuration is done later by
1574 	 * pcie_bus_configure_settings()
1575 	 */
1576 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1577 		return;
1578 
1579 	rc = pcie_set_mps(dev, p_mps);
1580 	if (rc) {
1581 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1582 			 p_mps);
1583 		return;
1584 	}
1585 
1586 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1587 		 p_mps, mps, 128 << dev->pcie_mpss);
1588 }
1589 
1590 static struct hpp_type0 pci_default_type0 = {
1591 	.revision = 1,
1592 	.cache_line_size = 8,
1593 	.latency_timer = 0x40,
1594 	.enable_serr = 0,
1595 	.enable_perr = 0,
1596 };
1597 
1598 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1599 {
1600 	u16 pci_cmd, pci_bctl;
1601 
1602 	if (!hpp)
1603 		hpp = &pci_default_type0;
1604 
1605 	if (hpp->revision > 1) {
1606 		dev_warn(&dev->dev,
1607 			 "PCI settings rev %d not supported; using defaults\n",
1608 			 hpp->revision);
1609 		hpp = &pci_default_type0;
1610 	}
1611 
1612 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1613 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1614 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1615 	if (hpp->enable_serr)
1616 		pci_cmd |= PCI_COMMAND_SERR;
1617 	if (hpp->enable_perr)
1618 		pci_cmd |= PCI_COMMAND_PARITY;
1619 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1620 
1621 	/* Program bridge control value */
1622 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1623 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1624 				      hpp->latency_timer);
1625 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1626 		if (hpp->enable_serr)
1627 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1628 		if (hpp->enable_perr)
1629 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1630 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1631 	}
1632 }
1633 
1634 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1635 {
1636 	int pos;
1637 
1638 	if (!hpp)
1639 		return;
1640 
1641 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1642 	if (!pos)
1643 		return;
1644 
1645 	dev_warn(&dev->dev, "PCI-X settings not supported\n");
1646 }
1647 
1648 static bool pcie_root_rcb_set(struct pci_dev *dev)
1649 {
1650 	struct pci_dev *rp = pcie_find_root_port(dev);
1651 	u16 lnkctl;
1652 
1653 	if (!rp)
1654 		return false;
1655 
1656 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1657 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1658 		return true;
1659 
1660 	return false;
1661 }
1662 
1663 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1664 {
1665 	int pos;
1666 	u32 reg32;
1667 
1668 	if (!hpp)
1669 		return;
1670 
1671 	if (!pci_is_pcie(dev))
1672 		return;
1673 
1674 	if (hpp->revision > 1) {
1675 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1676 			 hpp->revision);
1677 		return;
1678 	}
1679 
1680 	/*
1681 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1682 	 * those to make sure they're consistent with the rest of the
1683 	 * platform.
1684 	 */
1685 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1686 				    PCI_EXP_DEVCTL_READRQ;
1687 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1688 				    PCI_EXP_DEVCTL_READRQ);
1689 
1690 	/* Initialize Device Control Register */
1691 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1692 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1693 
1694 	/* Initialize Link Control Register */
1695 	if (pcie_cap_has_lnkctl(dev)) {
1696 
1697 		/*
1698 		 * If the Root Port supports Read Completion Boundary of
1699 		 * 128, set RCB to 128.  Otherwise, clear it.
1700 		 */
1701 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1702 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1703 		if (pcie_root_rcb_set(dev))
1704 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1705 
1706 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1707 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1708 	}
1709 
1710 	/* Find Advanced Error Reporting Enhanced Capability */
1711 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1712 	if (!pos)
1713 		return;
1714 
1715 	/* Initialize Uncorrectable Error Mask Register */
1716 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1717 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1718 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1719 
1720 	/* Initialize Uncorrectable Error Severity Register */
1721 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1722 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1723 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1724 
1725 	/* Initialize Correctable Error Mask Register */
1726 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1727 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1728 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1729 
1730 	/* Initialize Advanced Error Capabilities and Control Register */
1731 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1732 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1733 	/* Don't enable ECRC generation or checking if unsupported */
1734 	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1735 		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1736 	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1737 		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1738 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1739 
1740 	/*
1741 	 * FIXME: The following two registers are not supported yet.
1742 	 *
1743 	 *   o Secondary Uncorrectable Error Severity Register
1744 	 *   o Secondary Uncorrectable Error Mask Register
1745 	 */
1746 }
1747 
1748 static void pci_configure_extended_tags(struct pci_dev *dev)
1749 {
1750 	u32 dev_cap;
1751 	int ret;
1752 
1753 	if (!pci_is_pcie(dev))
1754 		return;
1755 
1756 	ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap);
1757 	if (ret)
1758 		return;
1759 
1760 	if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG)
1761 		pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1762 					 PCI_EXP_DEVCTL_EXT_TAG);
1763 }
1764 
1765 static void pci_configure_device(struct pci_dev *dev)
1766 {
1767 	struct hotplug_params hpp;
1768 	int ret;
1769 
1770 	pci_configure_mps(dev);
1771 	pci_configure_extended_tags(dev);
1772 
1773 	memset(&hpp, 0, sizeof(hpp));
1774 	ret = pci_get_hp_params(dev, &hpp);
1775 	if (ret)
1776 		return;
1777 
1778 	program_hpp_type2(dev, hpp.t2);
1779 	program_hpp_type1(dev, hpp.t1);
1780 	program_hpp_type0(dev, hpp.t0);
1781 }
1782 
1783 static void pci_release_capabilities(struct pci_dev *dev)
1784 {
1785 	pci_vpd_release(dev);
1786 	pci_iov_release(dev);
1787 	pci_free_cap_save_buffers(dev);
1788 }
1789 
1790 /**
1791  * pci_release_dev - free a pci device structure when all users of it are finished.
1792  * @dev: device that's been disconnected
1793  *
1794  * Will be called only by the device core when all users of this pci device are
1795  * done.
1796  */
1797 static void pci_release_dev(struct device *dev)
1798 {
1799 	struct pci_dev *pci_dev;
1800 
1801 	pci_dev = to_pci_dev(dev);
1802 	pci_release_capabilities(pci_dev);
1803 	pci_release_of_node(pci_dev);
1804 	pcibios_release_device(pci_dev);
1805 	pci_bus_put(pci_dev->bus);
1806 	kfree(pci_dev->driver_override);
1807 	kfree(pci_dev->dma_alias_mask);
1808 	kfree(pci_dev);
1809 }
1810 
1811 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1812 {
1813 	struct pci_dev *dev;
1814 
1815 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1816 	if (!dev)
1817 		return NULL;
1818 
1819 	INIT_LIST_HEAD(&dev->bus_list);
1820 	dev->dev.type = &pci_dev_type;
1821 	dev->bus = pci_bus_get(bus);
1822 
1823 	return dev;
1824 }
1825 EXPORT_SYMBOL(pci_alloc_dev);
1826 
1827 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1828 				int crs_timeout)
1829 {
1830 	int delay = 1;
1831 
1832 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1833 		return false;
1834 
1835 	/* some broken boards return 0 or ~0 if a slot is empty: */
1836 	if (*l == 0xffffffff || *l == 0x00000000 ||
1837 	    *l == 0x0000ffff || *l == 0xffff0000)
1838 		return false;
1839 
1840 	/*
1841 	 * Configuration Request Retry Status.  Some root ports return the
1842 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1843 	 * by the PCIe spec.  Ignore the device ID and only check for
1844 	 * (vendor id == 1).
1845 	 */
1846 	while ((*l & 0xffff) == 0x0001) {
1847 		if (!crs_timeout)
1848 			return false;
1849 
1850 		msleep(delay);
1851 		delay *= 2;
1852 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1853 			return false;
1854 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1855 		if (delay > crs_timeout) {
1856 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1857 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1858 			       PCI_FUNC(devfn));
1859 			return false;
1860 		}
1861 	}
1862 
1863 	return true;
1864 }
1865 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1866 
1867 /*
1868  * Read the config data for a PCI device, sanity-check it
1869  * and fill in the dev structure...
1870  */
1871 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1872 {
1873 	struct pci_dev *dev;
1874 	u32 l;
1875 
1876 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1877 		return NULL;
1878 
1879 	dev = pci_alloc_dev(bus);
1880 	if (!dev)
1881 		return NULL;
1882 
1883 	dev->devfn = devfn;
1884 	dev->vendor = l & 0xffff;
1885 	dev->device = (l >> 16) & 0xffff;
1886 
1887 	pci_set_of_node(dev);
1888 
1889 	if (pci_setup_device(dev)) {
1890 		pci_bus_put(dev->bus);
1891 		kfree(dev);
1892 		return NULL;
1893 	}
1894 
1895 	return dev;
1896 }
1897 
1898 static void pci_init_capabilities(struct pci_dev *dev)
1899 {
1900 	/* Enhanced Allocation */
1901 	pci_ea_init(dev);
1902 
1903 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
1904 	pci_msi_setup_pci_dev(dev);
1905 
1906 	/* Buffers for saving PCIe and PCI-X capabilities */
1907 	pci_allocate_cap_save_buffers(dev);
1908 
1909 	/* Power Management */
1910 	pci_pm_init(dev);
1911 
1912 	/* Vital Product Data */
1913 	pci_vpd_init(dev);
1914 
1915 	/* Alternative Routing-ID Forwarding */
1916 	pci_configure_ari(dev);
1917 
1918 	/* Single Root I/O Virtualization */
1919 	pci_iov_init(dev);
1920 
1921 	/* Address Translation Services */
1922 	pci_ats_init(dev);
1923 
1924 	/* Enable ACS P2P upstream forwarding */
1925 	pci_enable_acs(dev);
1926 
1927 	/* Precision Time Measurement */
1928 	pci_ptm_init(dev);
1929 
1930 	/* Advanced Error Reporting */
1931 	pci_aer_init(dev);
1932 }
1933 
1934 /*
1935  * This is the equivalent of pci_host_bridge_msi_domain that acts on
1936  * devices. Firmware interfaces that can select the MSI domain on a
1937  * per-device basis should be called from here.
1938  */
1939 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1940 {
1941 	struct irq_domain *d;
1942 
1943 	/*
1944 	 * If a domain has been set through the pcibios_add_device
1945 	 * callback, then this is the one (platform code knows best).
1946 	 */
1947 	d = dev_get_msi_domain(&dev->dev);
1948 	if (d)
1949 		return d;
1950 
1951 	/*
1952 	 * Let's see if we have a firmware interface able to provide
1953 	 * the domain.
1954 	 */
1955 	d = pci_msi_get_device_domain(dev);
1956 	if (d)
1957 		return d;
1958 
1959 	return NULL;
1960 }
1961 
1962 static void pci_set_msi_domain(struct pci_dev *dev)
1963 {
1964 	struct irq_domain *d;
1965 
1966 	/*
1967 	 * If the platform or firmware interfaces cannot supply a
1968 	 * device-specific MSI domain, then inherit the default domain
1969 	 * from the host bridge itself.
1970 	 */
1971 	d = pci_dev_msi_domain(dev);
1972 	if (!d)
1973 		d = dev_get_msi_domain(&dev->bus->dev);
1974 
1975 	dev_set_msi_domain(&dev->dev, d);
1976 }
1977 
1978 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1979 {
1980 	int ret;
1981 
1982 	pci_configure_device(dev);
1983 
1984 	device_initialize(&dev->dev);
1985 	dev->dev.release = pci_release_dev;
1986 
1987 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1988 	dev->dev.dma_mask = &dev->dma_mask;
1989 	dev->dev.dma_parms = &dev->dma_parms;
1990 	dev->dev.coherent_dma_mask = 0xffffffffull;
1991 
1992 	pci_set_dma_max_seg_size(dev, 65536);
1993 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1994 
1995 	/* Fix up broken headers */
1996 	pci_fixup_device(pci_fixup_header, dev);
1997 
1998 	/* moved out from quirk header fixup code */
1999 	pci_reassigndev_resource_alignment(dev);
2000 
2001 	/* Clear the state_saved flag. */
2002 	dev->state_saved = false;
2003 
2004 	/* Initialize various capabilities */
2005 	pci_init_capabilities(dev);
2006 
2007 	/*
2008 	 * Add the device to our list of discovered devices
2009 	 * and the bus list for fixup functions, etc.
2010 	 */
2011 	down_write(&pci_bus_sem);
2012 	list_add_tail(&dev->bus_list, &bus->devices);
2013 	up_write(&pci_bus_sem);
2014 
2015 	ret = pcibios_add_device(dev);
2016 	WARN_ON(ret < 0);
2017 
2018 	/* Setup MSI irq domain */
2019 	pci_set_msi_domain(dev);
2020 
2021 	/* Notifier could use PCI capabilities */
2022 	dev->match_driver = false;
2023 	ret = device_add(&dev->dev);
2024 	WARN_ON(ret < 0);
2025 }
2026 
2027 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2028 {
2029 	struct pci_dev *dev;
2030 
2031 	dev = pci_get_slot(bus, devfn);
2032 	if (dev) {
2033 		pci_dev_put(dev);
2034 		return dev;
2035 	}
2036 
2037 	dev = pci_scan_device(bus, devfn);
2038 	if (!dev)
2039 		return NULL;
2040 
2041 	pci_device_add(dev, bus);
2042 
2043 	return dev;
2044 }
2045 EXPORT_SYMBOL(pci_scan_single_device);
2046 
2047 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2048 {
2049 	int pos;
2050 	u16 cap = 0;
2051 	unsigned next_fn;
2052 
2053 	if (pci_ari_enabled(bus)) {
2054 		if (!dev)
2055 			return 0;
2056 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2057 		if (!pos)
2058 			return 0;
2059 
2060 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2061 		next_fn = PCI_ARI_CAP_NFN(cap);
2062 		if (next_fn <= fn)
2063 			return 0;	/* protect against malformed list */
2064 
2065 		return next_fn;
2066 	}
2067 
2068 	/* dev may be NULL for non-contiguous multifunction devices */
2069 	if (!dev || dev->multifunction)
2070 		return (fn + 1) % 8;
2071 
2072 	return 0;
2073 }
2074 
2075 static int only_one_child(struct pci_bus *bus)
2076 {
2077 	struct pci_dev *parent = bus->self;
2078 
2079 	if (!parent || !pci_is_pcie(parent))
2080 		return 0;
2081 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2082 		return 1;
2083 
2084 	/*
2085 	 * PCIe downstream ports are bridges that normally lead to only a
2086 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2087 	 * possible devices, not just device 0.  See PCIe spec r3.0,
2088 	 * sec 7.3.1.
2089 	 */
2090 	if (parent->has_secondary_link &&
2091 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2092 		return 1;
2093 	return 0;
2094 }
2095 
2096 /**
2097  * pci_scan_slot - scan a PCI slot on a bus for devices.
2098  * @bus: PCI bus to scan
2099  * @devfn: slot number to scan (must have zero function.)
2100  *
2101  * Scan a PCI slot on the specified PCI bus for devices, adding
2102  * discovered devices to the @bus->devices list.  New devices
2103  * will not have is_added set.
2104  *
2105  * Returns the number of new devices found.
2106  */
2107 int pci_scan_slot(struct pci_bus *bus, int devfn)
2108 {
2109 	unsigned fn, nr = 0;
2110 	struct pci_dev *dev;
2111 
2112 	if (only_one_child(bus) && (devfn > 0))
2113 		return 0; /* Already scanned the entire slot */
2114 
2115 	dev = pci_scan_single_device(bus, devfn);
2116 	if (!dev)
2117 		return 0;
2118 	if (!dev->is_added)
2119 		nr++;
2120 
2121 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2122 		dev = pci_scan_single_device(bus, devfn + fn);
2123 		if (dev) {
2124 			if (!dev->is_added)
2125 				nr++;
2126 			dev->multifunction = 1;
2127 		}
2128 	}
2129 
2130 	/* only one slot has pcie device */
2131 	if (bus->self && nr)
2132 		pcie_aspm_init_link_state(bus->self);
2133 
2134 	return nr;
2135 }
2136 EXPORT_SYMBOL(pci_scan_slot);
2137 
2138 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2139 {
2140 	u8 *smpss = data;
2141 
2142 	if (!pci_is_pcie(dev))
2143 		return 0;
2144 
2145 	/*
2146 	 * We don't have a way to change MPS settings on devices that have
2147 	 * drivers attached.  A hot-added device might support only the minimum
2148 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2149 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2150 	 * hot-added devices will work correctly.
2151 	 *
2152 	 * However, if we hot-add a device to a slot directly below a Root
2153 	 * Port, it's impossible for there to be other existing devices below
2154 	 * the port.  We don't limit the MPS in this case because we can
2155 	 * reconfigure MPS on both the Root Port and the hot-added device,
2156 	 * and there are no other devices involved.
2157 	 *
2158 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2159 	 */
2160 	if (dev->is_hotplug_bridge &&
2161 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2162 		*smpss = 0;
2163 
2164 	if (*smpss > dev->pcie_mpss)
2165 		*smpss = dev->pcie_mpss;
2166 
2167 	return 0;
2168 }
2169 
2170 static void pcie_write_mps(struct pci_dev *dev, int mps)
2171 {
2172 	int rc;
2173 
2174 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2175 		mps = 128 << dev->pcie_mpss;
2176 
2177 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2178 		    dev->bus->self)
2179 			/* For "Performance", the assumption is made that
2180 			 * downstream communication will never be larger than
2181 			 * the MRRS.  So, the MPS only needs to be configured
2182 			 * for the upstream communication.  This being the case,
2183 			 * walk from the top down and set the MPS of the child
2184 			 * to that of the parent bus.
2185 			 *
2186 			 * Configure the device MPS with the smaller of the
2187 			 * device MPSS or the bridge MPS (which is assumed to be
2188 			 * properly configured at this point to the largest
2189 			 * allowable MPS based on its parent bus).
2190 			 */
2191 			mps = min(mps, pcie_get_mps(dev->bus->self));
2192 	}
2193 
2194 	rc = pcie_set_mps(dev, mps);
2195 	if (rc)
2196 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2197 }
2198 
2199 static void pcie_write_mrrs(struct pci_dev *dev)
2200 {
2201 	int rc, mrrs;
2202 
2203 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2204 	 * issues with setting MRRS to 0 on a number of devices.
2205 	 */
2206 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2207 		return;
2208 
2209 	/* For Max performance, the MRRS must be set to the largest supported
2210 	 * value.  However, it cannot be configured larger than the MPS the
2211 	 * device or the bus can support.  This should already be properly
2212 	 * configured by a prior call to pcie_write_mps.
2213 	 */
2214 	mrrs = pcie_get_mps(dev);
2215 
2216 	/* MRRS is a R/W register.  Invalid values can be written, but a
2217 	 * subsequent read will verify if the value is acceptable or not.
2218 	 * If the MRRS value provided is not acceptable (e.g., too large),
2219 	 * shrink the value until it is acceptable to the HW.
2220 	 */
2221 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2222 		rc = pcie_set_readrq(dev, mrrs);
2223 		if (!rc)
2224 			break;
2225 
2226 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2227 		mrrs /= 2;
2228 	}
2229 
2230 	if (mrrs < 128)
2231 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2232 }
2233 
2234 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2235 {
2236 	int mps, orig_mps;
2237 
2238 	if (!pci_is_pcie(dev))
2239 		return 0;
2240 
2241 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2242 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2243 		return 0;
2244 
2245 	mps = 128 << *(u8 *)data;
2246 	orig_mps = pcie_get_mps(dev);
2247 
2248 	pcie_write_mps(dev, mps);
2249 	pcie_write_mrrs(dev);
2250 
2251 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2252 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2253 		 orig_mps, pcie_get_readrq(dev));
2254 
2255 	return 0;
2256 }
2257 
2258 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2259  * parents then children fashion.  If this changes, then this code will not
2260  * work as designed.
2261  */
2262 void pcie_bus_configure_settings(struct pci_bus *bus)
2263 {
2264 	u8 smpss = 0;
2265 
2266 	if (!bus->self)
2267 		return;
2268 
2269 	if (!pci_is_pcie(bus->self))
2270 		return;
2271 
2272 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2273 	 * to be aware of the MPS of the destination.  To work around this,
2274 	 * simply force the MPS of the entire system to the smallest possible.
2275 	 */
2276 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2277 		smpss = 0;
2278 
2279 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2280 		smpss = bus->self->pcie_mpss;
2281 
2282 		pcie_find_smpss(bus->self, &smpss);
2283 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2284 	}
2285 
2286 	pcie_bus_configure_set(bus->self, &smpss);
2287 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2288 }
2289 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2290 
2291 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2292 {
2293 	unsigned int devfn, pass, max = bus->busn_res.start;
2294 	struct pci_dev *dev;
2295 
2296 	dev_dbg(&bus->dev, "scanning bus\n");
2297 
2298 	/* Go find them, Rover! */
2299 	for (devfn = 0; devfn < 0x100; devfn += 8)
2300 		pci_scan_slot(bus, devfn);
2301 
2302 	/* Reserve buses for SR-IOV capability. */
2303 	max += pci_iov_bus_range(bus);
2304 
2305 	/*
2306 	 * After performing arch-dependent fixup of the bus, look behind
2307 	 * all PCI-to-PCI bridges on this bus.
2308 	 */
2309 	if (!bus->is_added) {
2310 		dev_dbg(&bus->dev, "fixups for bus\n");
2311 		pcibios_fixup_bus(bus);
2312 		bus->is_added = 1;
2313 	}
2314 
2315 	for (pass = 0; pass < 2; pass++)
2316 		list_for_each_entry(dev, &bus->devices, bus_list) {
2317 			if (pci_is_bridge(dev))
2318 				max = pci_scan_bridge(bus, dev, max, pass);
2319 		}
2320 
2321 	/*
2322 	 * Make sure a hotplug bridge has at least the minimum requested
2323 	 * number of buses.
2324 	 */
2325 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2326 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2327 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2328 	}
2329 
2330 	/*
2331 	 * We've scanned the bus and so we know all about what's on
2332 	 * the other side of any bridges that may be on this bus plus
2333 	 * any devices.
2334 	 *
2335 	 * Return how far we've got finding sub-buses.
2336 	 */
2337 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2338 	return max;
2339 }
2340 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2341 
2342 /**
2343  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2344  * @bridge: Host bridge to set up.
2345  *
2346  * Default empty implementation.  Replace with an architecture-specific setup
2347  * routine, if necessary.
2348  */
2349 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2350 {
2351 	return 0;
2352 }
2353 
2354 void __weak pcibios_add_bus(struct pci_bus *bus)
2355 {
2356 }
2357 
2358 void __weak pcibios_remove_bus(struct pci_bus *bus)
2359 {
2360 }
2361 
2362 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2363 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2364 {
2365 	int error;
2366 	struct pci_host_bridge *bridge;
2367 
2368 	bridge = pci_alloc_host_bridge(0);
2369 	if (!bridge)
2370 		return NULL;
2371 
2372 	bridge->dev.parent = parent;
2373 
2374 	list_splice_init(resources, &bridge->windows);
2375 	bridge->sysdata = sysdata;
2376 	bridge->busnr = bus;
2377 	bridge->ops = ops;
2378 
2379 	error = pci_register_host_bridge(bridge);
2380 	if (error < 0)
2381 		goto err_out;
2382 
2383 	return bridge->bus;
2384 
2385 err_out:
2386 	kfree(bridge);
2387 	return NULL;
2388 }
2389 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2390 
2391 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2392 {
2393 	struct resource *res = &b->busn_res;
2394 	struct resource *parent_res, *conflict;
2395 
2396 	res->start = bus;
2397 	res->end = bus_max;
2398 	res->flags = IORESOURCE_BUS;
2399 
2400 	if (!pci_is_root_bus(b))
2401 		parent_res = &b->parent->busn_res;
2402 	else {
2403 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2404 		res->flags |= IORESOURCE_PCI_FIXED;
2405 	}
2406 
2407 	conflict = request_resource_conflict(parent_res, res);
2408 
2409 	if (conflict)
2410 		dev_printk(KERN_DEBUG, &b->dev,
2411 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2412 			    res, pci_is_root_bus(b) ? "domain " : "",
2413 			    parent_res, conflict->name, conflict);
2414 
2415 	return conflict == NULL;
2416 }
2417 
2418 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2419 {
2420 	struct resource *res = &b->busn_res;
2421 	struct resource old_res = *res;
2422 	resource_size_t size;
2423 	int ret;
2424 
2425 	if (res->start > bus_max)
2426 		return -EINVAL;
2427 
2428 	size = bus_max - res->start + 1;
2429 	ret = adjust_resource(res, res->start, size);
2430 	dev_printk(KERN_DEBUG, &b->dev,
2431 			"busn_res: %pR end %s updated to %02x\n",
2432 			&old_res, ret ? "can not be" : "is", bus_max);
2433 
2434 	if (!ret && !res->parent)
2435 		pci_bus_insert_busn_res(b, res->start, res->end);
2436 
2437 	return ret;
2438 }
2439 
2440 void pci_bus_release_busn_res(struct pci_bus *b)
2441 {
2442 	struct resource *res = &b->busn_res;
2443 	int ret;
2444 
2445 	if (!res->flags || !res->parent)
2446 		return;
2447 
2448 	ret = release_resource(res);
2449 	dev_printk(KERN_DEBUG, &b->dev,
2450 			"busn_res: %pR %s released\n",
2451 			res, ret ? "can not be" : "is");
2452 }
2453 
2454 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
2455 {
2456 	struct resource_entry *window;
2457 	bool found = false;
2458 	struct pci_bus *b;
2459 	int max, bus, ret;
2460 
2461 	if (!bridge)
2462 		return -EINVAL;
2463 
2464 	resource_list_for_each_entry(window, &bridge->windows)
2465 		if (window->res->flags & IORESOURCE_BUS) {
2466 			found = true;
2467 			break;
2468 		}
2469 
2470 	ret = pci_register_host_bridge(bridge);
2471 	if (ret < 0)
2472 		return ret;
2473 
2474 	b = bridge->bus;
2475 	bus = bridge->busnr;
2476 
2477 	if (!found) {
2478 		dev_info(&b->dev,
2479 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2480 			bus);
2481 		pci_bus_insert_busn_res(b, bus, 255);
2482 	}
2483 
2484 	max = pci_scan_child_bus(b);
2485 
2486 	if (!found)
2487 		pci_bus_update_busn_res_end(b, max);
2488 
2489 	return 0;
2490 }
2491 EXPORT_SYMBOL(pci_scan_root_bus_bridge);
2492 
2493 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2494 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2495 {
2496 	struct resource_entry *window;
2497 	bool found = false;
2498 	struct pci_bus *b;
2499 	int max;
2500 
2501 	resource_list_for_each_entry(window, resources)
2502 		if (window->res->flags & IORESOURCE_BUS) {
2503 			found = true;
2504 			break;
2505 		}
2506 
2507 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2508 	if (!b)
2509 		return NULL;
2510 
2511 	if (!found) {
2512 		dev_info(&b->dev,
2513 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2514 			bus);
2515 		pci_bus_insert_busn_res(b, bus, 255);
2516 	}
2517 
2518 	max = pci_scan_child_bus(b);
2519 
2520 	if (!found)
2521 		pci_bus_update_busn_res_end(b, max);
2522 
2523 	return b;
2524 }
2525 EXPORT_SYMBOL(pci_scan_root_bus);
2526 
2527 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2528 					void *sysdata)
2529 {
2530 	LIST_HEAD(resources);
2531 	struct pci_bus *b;
2532 
2533 	pci_add_resource(&resources, &ioport_resource);
2534 	pci_add_resource(&resources, &iomem_resource);
2535 	pci_add_resource(&resources, &busn_resource);
2536 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2537 	if (b) {
2538 		pci_scan_child_bus(b);
2539 	} else {
2540 		pci_free_resource_list(&resources);
2541 	}
2542 	return b;
2543 }
2544 EXPORT_SYMBOL(pci_scan_bus);
2545 
2546 /**
2547  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2548  * @bridge: PCI bridge for the bus to scan
2549  *
2550  * Scan a PCI bus and child buses for new devices, add them,
2551  * and enable them, resizing bridge mmio/io resource if necessary
2552  * and possible.  The caller must ensure the child devices are already
2553  * removed for resizing to occur.
2554  *
2555  * Returns the max number of subordinate bus discovered.
2556  */
2557 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2558 {
2559 	unsigned int max;
2560 	struct pci_bus *bus = bridge->subordinate;
2561 
2562 	max = pci_scan_child_bus(bus);
2563 
2564 	pci_assign_unassigned_bridge_resources(bridge);
2565 
2566 	pci_bus_add_devices(bus);
2567 
2568 	return max;
2569 }
2570 
2571 /**
2572  * pci_rescan_bus - scan a PCI bus for devices.
2573  * @bus: PCI bus to scan
2574  *
2575  * Scan a PCI bus and child buses for new devices, adds them,
2576  * and enables them.
2577  *
2578  * Returns the max number of subordinate bus discovered.
2579  */
2580 unsigned int pci_rescan_bus(struct pci_bus *bus)
2581 {
2582 	unsigned int max;
2583 
2584 	max = pci_scan_child_bus(bus);
2585 	pci_assign_unassigned_bus_resources(bus);
2586 	pci_bus_add_devices(bus);
2587 
2588 	return max;
2589 }
2590 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2591 
2592 /*
2593  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2594  * routines should always be executed under this mutex.
2595  */
2596 static DEFINE_MUTEX(pci_rescan_remove_lock);
2597 
2598 void pci_lock_rescan_remove(void)
2599 {
2600 	mutex_lock(&pci_rescan_remove_lock);
2601 }
2602 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2603 
2604 void pci_unlock_rescan_remove(void)
2605 {
2606 	mutex_unlock(&pci_rescan_remove_lock);
2607 }
2608 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2609 
2610 static int __init pci_sort_bf_cmp(const struct device *d_a,
2611 				  const struct device *d_b)
2612 {
2613 	const struct pci_dev *a = to_pci_dev(d_a);
2614 	const struct pci_dev *b = to_pci_dev(d_b);
2615 
2616 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2617 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2618 
2619 	if      (a->bus->number < b->bus->number) return -1;
2620 	else if (a->bus->number > b->bus->number) return  1;
2621 
2622 	if      (a->devfn < b->devfn) return -1;
2623 	else if (a->devfn > b->devfn) return  1;
2624 
2625 	return 0;
2626 }
2627 
2628 void __init pci_sort_breadthfirst(void)
2629 {
2630 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2631 }
2632