xref: /openbmc/linux/drivers/pci/probe.c (revision 1c2dd16a)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l, sz, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
513 static void pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 
520 	pci_free_resource_list(&bridge->windows);
521 
522 	kfree(bridge);
523 }
524 
525 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
526 {
527 	struct pci_host_bridge *bridge;
528 
529 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
530 	if (!bridge)
531 		return NULL;
532 
533 	INIT_LIST_HEAD(&bridge->windows);
534 
535 	return bridge;
536 }
537 EXPORT_SYMBOL(pci_alloc_host_bridge);
538 
539 static const unsigned char pcix_bus_speed[] = {
540 	PCI_SPEED_UNKNOWN,		/* 0 */
541 	PCI_SPEED_66MHz_PCIX,		/* 1 */
542 	PCI_SPEED_100MHz_PCIX,		/* 2 */
543 	PCI_SPEED_133MHz_PCIX,		/* 3 */
544 	PCI_SPEED_UNKNOWN,		/* 4 */
545 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
546 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
547 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
548 	PCI_SPEED_UNKNOWN,		/* 8 */
549 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
550 	PCI_SPEED_100MHz_PCIX_266,	/* A */
551 	PCI_SPEED_133MHz_PCIX_266,	/* B */
552 	PCI_SPEED_UNKNOWN,		/* C */
553 	PCI_SPEED_66MHz_PCIX_533,	/* D */
554 	PCI_SPEED_100MHz_PCIX_533,	/* E */
555 	PCI_SPEED_133MHz_PCIX_533	/* F */
556 };
557 
558 const unsigned char pcie_link_speed[] = {
559 	PCI_SPEED_UNKNOWN,		/* 0 */
560 	PCIE_SPEED_2_5GT,		/* 1 */
561 	PCIE_SPEED_5_0GT,		/* 2 */
562 	PCIE_SPEED_8_0GT,		/* 3 */
563 	PCI_SPEED_UNKNOWN,		/* 4 */
564 	PCI_SPEED_UNKNOWN,		/* 5 */
565 	PCI_SPEED_UNKNOWN,		/* 6 */
566 	PCI_SPEED_UNKNOWN,		/* 7 */
567 	PCI_SPEED_UNKNOWN,		/* 8 */
568 	PCI_SPEED_UNKNOWN,		/* 9 */
569 	PCI_SPEED_UNKNOWN,		/* A */
570 	PCI_SPEED_UNKNOWN,		/* B */
571 	PCI_SPEED_UNKNOWN,		/* C */
572 	PCI_SPEED_UNKNOWN,		/* D */
573 	PCI_SPEED_UNKNOWN,		/* E */
574 	PCI_SPEED_UNKNOWN		/* F */
575 };
576 
577 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
578 {
579 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
580 }
581 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
582 
583 static unsigned char agp_speeds[] = {
584 	AGP_UNKNOWN,
585 	AGP_1X,
586 	AGP_2X,
587 	AGP_4X,
588 	AGP_8X
589 };
590 
591 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
592 {
593 	int index = 0;
594 
595 	if (agpstat & 4)
596 		index = 3;
597 	else if (agpstat & 2)
598 		index = 2;
599 	else if (agpstat & 1)
600 		index = 1;
601 	else
602 		goto out;
603 
604 	if (agp3) {
605 		index += 2;
606 		if (index == 5)
607 			index = 0;
608 	}
609 
610  out:
611 	return agp_speeds[index];
612 }
613 
614 static void pci_set_bus_speed(struct pci_bus *bus)
615 {
616 	struct pci_dev *bridge = bus->self;
617 	int pos;
618 
619 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
620 	if (!pos)
621 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
622 	if (pos) {
623 		u32 agpstat, agpcmd;
624 
625 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
626 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
627 
628 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
629 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
630 	}
631 
632 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
633 	if (pos) {
634 		u16 status;
635 		enum pci_bus_speed max;
636 
637 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
638 				     &status);
639 
640 		if (status & PCI_X_SSTATUS_533MHZ) {
641 			max = PCI_SPEED_133MHz_PCIX_533;
642 		} else if (status & PCI_X_SSTATUS_266MHZ) {
643 			max = PCI_SPEED_133MHz_PCIX_266;
644 		} else if (status & PCI_X_SSTATUS_133MHZ) {
645 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
646 				max = PCI_SPEED_133MHz_PCIX_ECC;
647 			else
648 				max = PCI_SPEED_133MHz_PCIX;
649 		} else {
650 			max = PCI_SPEED_66MHz_PCIX;
651 		}
652 
653 		bus->max_bus_speed = max;
654 		bus->cur_bus_speed = pcix_bus_speed[
655 			(status & PCI_X_SSTATUS_FREQ) >> 6];
656 
657 		return;
658 	}
659 
660 	if (pci_is_pcie(bridge)) {
661 		u32 linkcap;
662 		u16 linksta;
663 
664 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
665 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
666 
667 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
668 		pcie_update_link_speed(bus, linksta);
669 	}
670 }
671 
672 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
673 {
674 	struct irq_domain *d;
675 
676 	/*
677 	 * Any firmware interface that can resolve the msi_domain
678 	 * should be called from here.
679 	 */
680 	d = pci_host_bridge_of_msi_domain(bus);
681 	if (!d)
682 		d = pci_host_bridge_acpi_msi_domain(bus);
683 
684 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
685 	/*
686 	 * If no IRQ domain was found via the OF tree, try looking it up
687 	 * directly through the fwnode_handle.
688 	 */
689 	if (!d) {
690 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
691 
692 		if (fwnode)
693 			d = irq_find_matching_fwnode(fwnode,
694 						     DOMAIN_BUS_PCI_MSI);
695 	}
696 #endif
697 
698 	return d;
699 }
700 
701 static void pci_set_bus_msi_domain(struct pci_bus *bus)
702 {
703 	struct irq_domain *d;
704 	struct pci_bus *b;
705 
706 	/*
707 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
708 	 * created by an SR-IOV device.  Walk up to the first bridge device
709 	 * found or derive the domain from the host bridge.
710 	 */
711 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
712 		if (b->self)
713 			d = dev_get_msi_domain(&b->self->dev);
714 	}
715 
716 	if (!d)
717 		d = pci_host_bridge_msi_domain(b);
718 
719 	dev_set_msi_domain(&bus->dev, d);
720 }
721 
722 int pci_register_host_bridge(struct pci_host_bridge *bridge)
723 {
724 	struct device *parent = bridge->dev.parent;
725 	struct resource_entry *window, *n;
726 	struct pci_bus *bus, *b;
727 	resource_size_t offset;
728 	LIST_HEAD(resources);
729 	struct resource *res;
730 	char addr[64], *fmt;
731 	const char *name;
732 	int err;
733 
734 	bus = pci_alloc_bus(NULL);
735 	if (!bus)
736 		return -ENOMEM;
737 
738 	bridge->bus = bus;
739 
740 	/* temporarily move resources off the list */
741 	list_splice_init(&bridge->windows, &resources);
742 	bus->sysdata = bridge->sysdata;
743 	bus->msi = bridge->msi;
744 	bus->ops = bridge->ops;
745 	bus->number = bus->busn_res.start = bridge->busnr;
746 #ifdef CONFIG_PCI_DOMAINS_GENERIC
747 	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
748 #endif
749 
750 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
751 	if (b) {
752 		/* If we already got to this bus through a different bridge, ignore it */
753 		dev_dbg(&b->dev, "bus already known\n");
754 		err = -EEXIST;
755 		goto free;
756 	}
757 
758 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
759 		     bridge->busnr);
760 
761 	err = pcibios_root_bridge_prepare(bridge);
762 	if (err)
763 		goto free;
764 
765 	err = device_register(&bridge->dev);
766 	if (err)
767 		put_device(&bridge->dev);
768 
769 	bus->bridge = get_device(&bridge->dev);
770 	device_enable_async_suspend(bus->bridge);
771 	pci_set_bus_of_node(bus);
772 	pci_set_bus_msi_domain(bus);
773 
774 	if (!parent)
775 		set_dev_node(bus->bridge, pcibus_to_node(bus));
776 
777 	bus->dev.class = &pcibus_class;
778 	bus->dev.parent = bus->bridge;
779 
780 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
781 	name = dev_name(&bus->dev);
782 
783 	err = device_register(&bus->dev);
784 	if (err)
785 		goto unregister;
786 
787 	pcibios_add_bus(bus);
788 
789 	/* Create legacy_io and legacy_mem files for this bus */
790 	pci_create_legacy_files(bus);
791 
792 	if (parent)
793 		dev_info(parent, "PCI host bridge to bus %s\n", name);
794 	else
795 		pr_info("PCI host bridge to bus %s\n", name);
796 
797 	/* Add initial resources to the bus */
798 	resource_list_for_each_entry_safe(window, n, &resources) {
799 		list_move_tail(&window->node, &bridge->windows);
800 		offset = window->offset;
801 		res = window->res;
802 
803 		if (res->flags & IORESOURCE_BUS)
804 			pci_bus_insert_busn_res(bus, bus->number, res->end);
805 		else
806 			pci_bus_add_resource(bus, res, 0);
807 
808 		if (offset) {
809 			if (resource_type(res) == IORESOURCE_IO)
810 				fmt = " (bus address [%#06llx-%#06llx])";
811 			else
812 				fmt = " (bus address [%#010llx-%#010llx])";
813 
814 			snprintf(addr, sizeof(addr), fmt,
815 				 (unsigned long long)(res->start - offset),
816 				 (unsigned long long)(res->end - offset));
817 		} else
818 			addr[0] = '\0';
819 
820 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
821 	}
822 
823 	down_write(&pci_bus_sem);
824 	list_add_tail(&bus->node, &pci_root_buses);
825 	up_write(&pci_bus_sem);
826 
827 	return 0;
828 
829 unregister:
830 	put_device(&bridge->dev);
831 	device_unregister(&bridge->dev);
832 
833 free:
834 	kfree(bus);
835 	return err;
836 }
837 EXPORT_SYMBOL(pci_register_host_bridge);
838 
839 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
840 					   struct pci_dev *bridge, int busnr)
841 {
842 	struct pci_bus *child;
843 	int i;
844 	int ret;
845 
846 	/*
847 	 * Allocate a new bus, and inherit stuff from the parent..
848 	 */
849 	child = pci_alloc_bus(parent);
850 	if (!child)
851 		return NULL;
852 
853 	child->parent = parent;
854 	child->ops = parent->ops;
855 	child->msi = parent->msi;
856 	child->sysdata = parent->sysdata;
857 	child->bus_flags = parent->bus_flags;
858 
859 	/* initialize some portions of the bus device, but don't register it
860 	 * now as the parent is not properly set up yet.
861 	 */
862 	child->dev.class = &pcibus_class;
863 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
864 
865 	/*
866 	 * Set up the primary, secondary and subordinate
867 	 * bus numbers.
868 	 */
869 	child->number = child->busn_res.start = busnr;
870 	child->primary = parent->busn_res.start;
871 	child->busn_res.end = 0xff;
872 
873 	if (!bridge) {
874 		child->dev.parent = parent->bridge;
875 		goto add_dev;
876 	}
877 
878 	child->self = bridge;
879 	child->bridge = get_device(&bridge->dev);
880 	child->dev.parent = child->bridge;
881 	pci_set_bus_of_node(child);
882 	pci_set_bus_speed(child);
883 
884 	/* Set up default resource pointers and names.. */
885 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
886 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
887 		child->resource[i]->name = child->name;
888 	}
889 	bridge->subordinate = child;
890 
891 add_dev:
892 	pci_set_bus_msi_domain(child);
893 	ret = device_register(&child->dev);
894 	WARN_ON(ret < 0);
895 
896 	pcibios_add_bus(child);
897 
898 	if (child->ops->add_bus) {
899 		ret = child->ops->add_bus(child);
900 		if (WARN_ON(ret < 0))
901 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
902 	}
903 
904 	/* Create legacy_io and legacy_mem files for this bus */
905 	pci_create_legacy_files(child);
906 
907 	return child;
908 }
909 
910 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
911 				int busnr)
912 {
913 	struct pci_bus *child;
914 
915 	child = pci_alloc_child_bus(parent, dev, busnr);
916 	if (child) {
917 		down_write(&pci_bus_sem);
918 		list_add_tail(&child->node, &parent->children);
919 		up_write(&pci_bus_sem);
920 	}
921 	return child;
922 }
923 EXPORT_SYMBOL(pci_add_new_bus);
924 
925 static void pci_enable_crs(struct pci_dev *pdev)
926 {
927 	u16 root_cap = 0;
928 
929 	/* Enable CRS Software Visibility if supported */
930 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
931 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
932 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
933 					 PCI_EXP_RTCTL_CRSSVE);
934 }
935 
936 /*
937  * If it's a bridge, configure it and scan the bus behind it.
938  * For CardBus bridges, we don't scan behind as the devices will
939  * be handled by the bridge driver itself.
940  *
941  * We need to process bridges in two passes -- first we scan those
942  * already configured by the BIOS and after we are done with all of
943  * them, we proceed to assigning numbers to the remaining buses in
944  * order to avoid overlaps between old and new bus numbers.
945  */
946 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
947 {
948 	struct pci_bus *child;
949 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
950 	u32 buses, i, j = 0;
951 	u16 bctl;
952 	u8 primary, secondary, subordinate;
953 	int broken = 0;
954 
955 	/*
956 	 * Make sure the bridge is powered on to be able to access config
957 	 * space of devices below it.
958 	 */
959 	pm_runtime_get_sync(&dev->dev);
960 
961 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
962 	primary = buses & 0xFF;
963 	secondary = (buses >> 8) & 0xFF;
964 	subordinate = (buses >> 16) & 0xFF;
965 
966 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
967 		secondary, subordinate, pass);
968 
969 	if (!primary && (primary != bus->number) && secondary && subordinate) {
970 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
971 		primary = bus->number;
972 	}
973 
974 	/* Check if setup is sensible at all */
975 	if (!pass &&
976 	    (primary != bus->number || secondary <= bus->number ||
977 	     secondary > subordinate)) {
978 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
979 			 secondary, subordinate);
980 		broken = 1;
981 	}
982 
983 	/* Disable MasterAbortMode during probing to avoid reporting
984 	   of bus errors (in some architectures) */
985 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
986 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
987 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
988 
989 	pci_enable_crs(dev);
990 
991 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
992 	    !is_cardbus && !broken) {
993 		unsigned int cmax;
994 		/*
995 		 * Bus already configured by firmware, process it in the first
996 		 * pass and just note the configuration.
997 		 */
998 		if (pass)
999 			goto out;
1000 
1001 		/*
1002 		 * The bus might already exist for two reasons: Either we are
1003 		 * rescanning the bus or the bus is reachable through more than
1004 		 * one bridge. The second case can happen with the i450NX
1005 		 * chipset.
1006 		 */
1007 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1008 		if (!child) {
1009 			child = pci_add_new_bus(bus, dev, secondary);
1010 			if (!child)
1011 				goto out;
1012 			child->primary = primary;
1013 			pci_bus_insert_busn_res(child, secondary, subordinate);
1014 			child->bridge_ctl = bctl;
1015 		}
1016 
1017 		cmax = pci_scan_child_bus(child);
1018 		if (cmax > subordinate)
1019 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1020 				 subordinate, cmax);
1021 		/* subordinate should equal child->busn_res.end */
1022 		if (subordinate > max)
1023 			max = subordinate;
1024 	} else {
1025 		/*
1026 		 * We need to assign a number to this bus which we always
1027 		 * do in the second pass.
1028 		 */
1029 		if (!pass) {
1030 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1031 				/* Temporarily disable forwarding of the
1032 				   configuration cycles on all bridges in
1033 				   this bus segment to avoid possible
1034 				   conflicts in the second pass between two
1035 				   bridges programmed with overlapping
1036 				   bus ranges. */
1037 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1038 						       buses & ~0xffffff);
1039 			goto out;
1040 		}
1041 
1042 		/* Clear errors */
1043 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1044 
1045 		/* Prevent assigning a bus number that already exists.
1046 		 * This can happen when a bridge is hot-plugged, so in
1047 		 * this case we only re-scan this bus. */
1048 		child = pci_find_bus(pci_domain_nr(bus), max+1);
1049 		if (!child) {
1050 			child = pci_add_new_bus(bus, dev, max+1);
1051 			if (!child)
1052 				goto out;
1053 			pci_bus_insert_busn_res(child, max+1, 0xff);
1054 		}
1055 		max++;
1056 		buses = (buses & 0xff000000)
1057 		      | ((unsigned int)(child->primary)     <<  0)
1058 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1059 		      | ((unsigned int)(child->busn_res.end) << 16);
1060 
1061 		/*
1062 		 * yenta.c forces a secondary latency timer of 176.
1063 		 * Copy that behaviour here.
1064 		 */
1065 		if (is_cardbus) {
1066 			buses &= ~0xff000000;
1067 			buses |= CARDBUS_LATENCY_TIMER << 24;
1068 		}
1069 
1070 		/*
1071 		 * We need to blast all three values with a single write.
1072 		 */
1073 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1074 
1075 		if (!is_cardbus) {
1076 			child->bridge_ctl = bctl;
1077 			max = pci_scan_child_bus(child);
1078 		} else {
1079 			/*
1080 			 * For CardBus bridges, we leave 4 bus numbers
1081 			 * as cards with a PCI-to-PCI bridge can be
1082 			 * inserted later.
1083 			 */
1084 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1085 				struct pci_bus *parent = bus;
1086 				if (pci_find_bus(pci_domain_nr(bus),
1087 							max+i+1))
1088 					break;
1089 				while (parent->parent) {
1090 					if ((!pcibios_assign_all_busses()) &&
1091 					    (parent->busn_res.end > max) &&
1092 					    (parent->busn_res.end <= max+i)) {
1093 						j = 1;
1094 					}
1095 					parent = parent->parent;
1096 				}
1097 				if (j) {
1098 					/*
1099 					 * Often, there are two cardbus bridges
1100 					 * -- try to leave one valid bus number
1101 					 * for each one.
1102 					 */
1103 					i /= 2;
1104 					break;
1105 				}
1106 			}
1107 			max += i;
1108 		}
1109 		/*
1110 		 * Set the subordinate bus number to its real value.
1111 		 */
1112 		pci_bus_update_busn_res_end(child, max);
1113 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1114 	}
1115 
1116 	sprintf(child->name,
1117 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1118 		pci_domain_nr(bus), child->number);
1119 
1120 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1121 	while (bus->parent) {
1122 		if ((child->busn_res.end > bus->busn_res.end) ||
1123 		    (child->number > bus->busn_res.end) ||
1124 		    (child->number < bus->number) ||
1125 		    (child->busn_res.end < bus->number)) {
1126 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1127 				&child->busn_res,
1128 				(bus->number > child->busn_res.end &&
1129 				 bus->busn_res.end < child->number) ?
1130 					"wholly" : "partially",
1131 				bus->self->transparent ? " transparent" : "",
1132 				dev_name(&bus->dev),
1133 				&bus->busn_res);
1134 		}
1135 		bus = bus->parent;
1136 	}
1137 
1138 out:
1139 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1140 
1141 	pm_runtime_put(&dev->dev);
1142 
1143 	return max;
1144 }
1145 EXPORT_SYMBOL(pci_scan_bridge);
1146 
1147 /*
1148  * Read interrupt line and base address registers.
1149  * The architecture-dependent code can tweak these, of course.
1150  */
1151 static void pci_read_irq(struct pci_dev *dev)
1152 {
1153 	unsigned char irq;
1154 
1155 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1156 	dev->pin = irq;
1157 	if (irq)
1158 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1159 	dev->irq = irq;
1160 }
1161 
1162 void set_pcie_port_type(struct pci_dev *pdev)
1163 {
1164 	int pos;
1165 	u16 reg16;
1166 	int type;
1167 	struct pci_dev *parent;
1168 
1169 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1170 	if (!pos)
1171 		return;
1172 
1173 	pdev->pcie_cap = pos;
1174 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1175 	pdev->pcie_flags_reg = reg16;
1176 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1177 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1178 
1179 	/*
1180 	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1181 	 * of a Link.  No PCIe component has two Links.  Two Links are
1182 	 * connected by a Switch that has a Port on each Link and internal
1183 	 * logic to connect the two Ports.
1184 	 */
1185 	type = pci_pcie_type(pdev);
1186 	if (type == PCI_EXP_TYPE_ROOT_PORT ||
1187 	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
1188 		pdev->has_secondary_link = 1;
1189 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1190 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1191 		parent = pci_upstream_bridge(pdev);
1192 
1193 		/*
1194 		 * Usually there's an upstream device (Root Port or Switch
1195 		 * Downstream Port), but we can't assume one exists.
1196 		 */
1197 		if (parent && !parent->has_secondary_link)
1198 			pdev->has_secondary_link = 1;
1199 	}
1200 }
1201 
1202 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1203 {
1204 	u32 reg32;
1205 
1206 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1207 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1208 		pdev->is_hotplug_bridge = 1;
1209 }
1210 
1211 /**
1212  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1213  * @dev: PCI device
1214  *
1215  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1216  * when forwarding a type1 configuration request the bridge must check that
1217  * the extended register address field is zero.  The bridge is not permitted
1218  * to forward the transactions and must handle it as an Unsupported Request.
1219  * Some bridges do not follow this rule and simply drop the extended register
1220  * bits, resulting in the standard config space being aliased, every 256
1221  * bytes across the entire configuration space.  Test for this condition by
1222  * comparing the first dword of each potential alias to the vendor/device ID.
1223  * Known offenders:
1224  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1225  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1226  */
1227 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1228 {
1229 #ifdef CONFIG_PCI_QUIRKS
1230 	int pos;
1231 	u32 header, tmp;
1232 
1233 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1234 
1235 	for (pos = PCI_CFG_SPACE_SIZE;
1236 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1237 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1238 		    || header != tmp)
1239 			return false;
1240 	}
1241 
1242 	return true;
1243 #else
1244 	return false;
1245 #endif
1246 }
1247 
1248 /**
1249  * pci_cfg_space_size - get the configuration space size of the PCI device.
1250  * @dev: PCI device
1251  *
1252  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1253  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1254  * access it.  Maybe we don't have a way to generate extended config space
1255  * accesses, or the device is behind a reverse Express bridge.  So we try
1256  * reading the dword at 0x100 which must either be 0 or a valid extended
1257  * capability header.
1258  */
1259 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1260 {
1261 	u32 status;
1262 	int pos = PCI_CFG_SPACE_SIZE;
1263 
1264 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1265 		return PCI_CFG_SPACE_SIZE;
1266 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1267 		return PCI_CFG_SPACE_SIZE;
1268 
1269 	return PCI_CFG_SPACE_EXP_SIZE;
1270 }
1271 
1272 int pci_cfg_space_size(struct pci_dev *dev)
1273 {
1274 	int pos;
1275 	u32 status;
1276 	u16 class;
1277 
1278 	class = dev->class >> 8;
1279 	if (class == PCI_CLASS_BRIDGE_HOST)
1280 		return pci_cfg_space_size_ext(dev);
1281 
1282 	if (pci_is_pcie(dev))
1283 		return pci_cfg_space_size_ext(dev);
1284 
1285 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1286 	if (!pos)
1287 		return PCI_CFG_SPACE_SIZE;
1288 
1289 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1290 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1291 		return pci_cfg_space_size_ext(dev);
1292 
1293 	return PCI_CFG_SPACE_SIZE;
1294 }
1295 
1296 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1297 
1298 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1299 {
1300 	/*
1301 	 * Disable the MSI hardware to avoid screaming interrupts
1302 	 * during boot.  This is the power on reset default so
1303 	 * usually this should be a noop.
1304 	 */
1305 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1306 	if (dev->msi_cap)
1307 		pci_msi_set_enable(dev, 0);
1308 
1309 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1310 	if (dev->msix_cap)
1311 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1312 }
1313 
1314 /**
1315  * pci_setup_device - fill in class and map information of a device
1316  * @dev: the device structure to fill
1317  *
1318  * Initialize the device structure with information about the device's
1319  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1320  * Called at initialisation of the PCI subsystem and by CardBus services.
1321  * Returns 0 on success and negative if unknown type of device (not normal,
1322  * bridge or CardBus).
1323  */
1324 int pci_setup_device(struct pci_dev *dev)
1325 {
1326 	u32 class;
1327 	u16 cmd;
1328 	u8 hdr_type;
1329 	int pos = 0;
1330 	struct pci_bus_region region;
1331 	struct resource *res;
1332 
1333 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1334 		return -EIO;
1335 
1336 	dev->sysdata = dev->bus->sysdata;
1337 	dev->dev.parent = dev->bus->bridge;
1338 	dev->dev.bus = &pci_bus_type;
1339 	dev->hdr_type = hdr_type & 0x7f;
1340 	dev->multifunction = !!(hdr_type & 0x80);
1341 	dev->error_state = pci_channel_io_normal;
1342 	set_pcie_port_type(dev);
1343 
1344 	pci_dev_assign_slot(dev);
1345 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1346 	   set this higher, assuming the system even supports it.  */
1347 	dev->dma_mask = 0xffffffff;
1348 
1349 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1350 		     dev->bus->number, PCI_SLOT(dev->devfn),
1351 		     PCI_FUNC(dev->devfn));
1352 
1353 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1354 	dev->revision = class & 0xff;
1355 	dev->class = class >> 8;		    /* upper 3 bytes */
1356 
1357 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1358 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1359 
1360 	/* need to have dev->class ready */
1361 	dev->cfg_size = pci_cfg_space_size(dev);
1362 
1363 	/* "Unknown power state" */
1364 	dev->current_state = PCI_UNKNOWN;
1365 
1366 	/* Early fixups, before probing the BARs */
1367 	pci_fixup_device(pci_fixup_early, dev);
1368 	/* device class may be changed after fixup */
1369 	class = dev->class >> 8;
1370 
1371 	if (dev->non_compliant_bars) {
1372 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1373 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1374 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1375 			cmd &= ~PCI_COMMAND_IO;
1376 			cmd &= ~PCI_COMMAND_MEMORY;
1377 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1378 		}
1379 	}
1380 
1381 	switch (dev->hdr_type) {		    /* header type */
1382 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1383 		if (class == PCI_CLASS_BRIDGE_PCI)
1384 			goto bad;
1385 		pci_read_irq(dev);
1386 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1387 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1388 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1389 
1390 		/*
1391 		 * Do the ugly legacy mode stuff here rather than broken chip
1392 		 * quirk code. Legacy mode ATA controllers have fixed
1393 		 * addresses. These are not always echoed in BAR0-3, and
1394 		 * BAR0-3 in a few cases contain junk!
1395 		 */
1396 		if (class == PCI_CLASS_STORAGE_IDE) {
1397 			u8 progif;
1398 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1399 			if ((progif & 1) == 0) {
1400 				region.start = 0x1F0;
1401 				region.end = 0x1F7;
1402 				res = &dev->resource[0];
1403 				res->flags = LEGACY_IO_RESOURCE;
1404 				pcibios_bus_to_resource(dev->bus, res, &region);
1405 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1406 					 res);
1407 				region.start = 0x3F6;
1408 				region.end = 0x3F6;
1409 				res = &dev->resource[1];
1410 				res->flags = LEGACY_IO_RESOURCE;
1411 				pcibios_bus_to_resource(dev->bus, res, &region);
1412 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1413 					 res);
1414 			}
1415 			if ((progif & 4) == 0) {
1416 				region.start = 0x170;
1417 				region.end = 0x177;
1418 				res = &dev->resource[2];
1419 				res->flags = LEGACY_IO_RESOURCE;
1420 				pcibios_bus_to_resource(dev->bus, res, &region);
1421 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1422 					 res);
1423 				region.start = 0x376;
1424 				region.end = 0x376;
1425 				res = &dev->resource[3];
1426 				res->flags = LEGACY_IO_RESOURCE;
1427 				pcibios_bus_to_resource(dev->bus, res, &region);
1428 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1429 					 res);
1430 			}
1431 		}
1432 		break;
1433 
1434 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1435 		if (class != PCI_CLASS_BRIDGE_PCI)
1436 			goto bad;
1437 		/* The PCI-to-PCI bridge spec requires that subtractive
1438 		   decoding (i.e. transparent) bridge must have programming
1439 		   interface code of 0x01. */
1440 		pci_read_irq(dev);
1441 		dev->transparent = ((dev->class & 0xff) == 1);
1442 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1443 		set_pcie_hotplug_bridge(dev);
1444 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1445 		if (pos) {
1446 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1447 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1448 		}
1449 		break;
1450 
1451 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1452 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1453 			goto bad;
1454 		pci_read_irq(dev);
1455 		pci_read_bases(dev, 1, 0);
1456 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1457 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1458 		break;
1459 
1460 	default:				    /* unknown header */
1461 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1462 			dev->hdr_type);
1463 		return -EIO;
1464 
1465 	bad:
1466 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1467 			dev->class, dev->hdr_type);
1468 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1469 	}
1470 
1471 	/* We found a fine healthy device, go go go... */
1472 	return 0;
1473 }
1474 
1475 static void pci_configure_mps(struct pci_dev *dev)
1476 {
1477 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1478 	int mps, p_mps, rc;
1479 
1480 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1481 		return;
1482 
1483 	mps = pcie_get_mps(dev);
1484 	p_mps = pcie_get_mps(bridge);
1485 
1486 	if (mps == p_mps)
1487 		return;
1488 
1489 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1490 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1491 			 mps, pci_name(bridge), p_mps);
1492 		return;
1493 	}
1494 
1495 	/*
1496 	 * Fancier MPS configuration is done later by
1497 	 * pcie_bus_configure_settings()
1498 	 */
1499 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1500 		return;
1501 
1502 	rc = pcie_set_mps(dev, p_mps);
1503 	if (rc) {
1504 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1505 			 p_mps);
1506 		return;
1507 	}
1508 
1509 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1510 		 p_mps, mps, 128 << dev->pcie_mpss);
1511 }
1512 
1513 static struct hpp_type0 pci_default_type0 = {
1514 	.revision = 1,
1515 	.cache_line_size = 8,
1516 	.latency_timer = 0x40,
1517 	.enable_serr = 0,
1518 	.enable_perr = 0,
1519 };
1520 
1521 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1522 {
1523 	u16 pci_cmd, pci_bctl;
1524 
1525 	if (!hpp)
1526 		hpp = &pci_default_type0;
1527 
1528 	if (hpp->revision > 1) {
1529 		dev_warn(&dev->dev,
1530 			 "PCI settings rev %d not supported; using defaults\n",
1531 			 hpp->revision);
1532 		hpp = &pci_default_type0;
1533 	}
1534 
1535 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1536 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1537 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1538 	if (hpp->enable_serr)
1539 		pci_cmd |= PCI_COMMAND_SERR;
1540 	if (hpp->enable_perr)
1541 		pci_cmd |= PCI_COMMAND_PARITY;
1542 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1543 
1544 	/* Program bridge control value */
1545 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1546 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1547 				      hpp->latency_timer);
1548 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1549 		if (hpp->enable_serr)
1550 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1551 		if (hpp->enable_perr)
1552 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1553 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1554 	}
1555 }
1556 
1557 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1558 {
1559 	int pos;
1560 
1561 	if (!hpp)
1562 		return;
1563 
1564 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1565 	if (!pos)
1566 		return;
1567 
1568 	dev_warn(&dev->dev, "PCI-X settings not supported\n");
1569 }
1570 
1571 static bool pcie_root_rcb_set(struct pci_dev *dev)
1572 {
1573 	struct pci_dev *rp = pcie_find_root_port(dev);
1574 	u16 lnkctl;
1575 
1576 	if (!rp)
1577 		return false;
1578 
1579 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1580 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1581 		return true;
1582 
1583 	return false;
1584 }
1585 
1586 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1587 {
1588 	int pos;
1589 	u32 reg32;
1590 
1591 	if (!hpp)
1592 		return;
1593 
1594 	if (!pci_is_pcie(dev))
1595 		return;
1596 
1597 	if (hpp->revision > 1) {
1598 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1599 			 hpp->revision);
1600 		return;
1601 	}
1602 
1603 	/*
1604 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1605 	 * those to make sure they're consistent with the rest of the
1606 	 * platform.
1607 	 */
1608 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1609 				    PCI_EXP_DEVCTL_READRQ;
1610 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1611 				    PCI_EXP_DEVCTL_READRQ);
1612 
1613 	/* Initialize Device Control Register */
1614 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1615 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1616 
1617 	/* Initialize Link Control Register */
1618 	if (pcie_cap_has_lnkctl(dev)) {
1619 
1620 		/*
1621 		 * If the Root Port supports Read Completion Boundary of
1622 		 * 128, set RCB to 128.  Otherwise, clear it.
1623 		 */
1624 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1625 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1626 		if (pcie_root_rcb_set(dev))
1627 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1628 
1629 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1630 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1631 	}
1632 
1633 	/* Find Advanced Error Reporting Enhanced Capability */
1634 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1635 	if (!pos)
1636 		return;
1637 
1638 	/* Initialize Uncorrectable Error Mask Register */
1639 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1640 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1641 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1642 
1643 	/* Initialize Uncorrectable Error Severity Register */
1644 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1645 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1646 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1647 
1648 	/* Initialize Correctable Error Mask Register */
1649 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1650 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1651 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1652 
1653 	/* Initialize Advanced Error Capabilities and Control Register */
1654 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1655 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1656 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1657 
1658 	/*
1659 	 * FIXME: The following two registers are not supported yet.
1660 	 *
1661 	 *   o Secondary Uncorrectable Error Severity Register
1662 	 *   o Secondary Uncorrectable Error Mask Register
1663 	 */
1664 }
1665 
1666 static void pci_configure_extended_tags(struct pci_dev *dev)
1667 {
1668 	u32 dev_cap;
1669 	int ret;
1670 
1671 	if (!pci_is_pcie(dev))
1672 		return;
1673 
1674 	ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap);
1675 	if (ret)
1676 		return;
1677 
1678 	if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG)
1679 		pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1680 					 PCI_EXP_DEVCTL_EXT_TAG);
1681 }
1682 
1683 static void pci_configure_device(struct pci_dev *dev)
1684 {
1685 	struct hotplug_params hpp;
1686 	int ret;
1687 
1688 	pci_configure_mps(dev);
1689 	pci_configure_extended_tags(dev);
1690 
1691 	memset(&hpp, 0, sizeof(hpp));
1692 	ret = pci_get_hp_params(dev, &hpp);
1693 	if (ret)
1694 		return;
1695 
1696 	program_hpp_type2(dev, hpp.t2);
1697 	program_hpp_type1(dev, hpp.t1);
1698 	program_hpp_type0(dev, hpp.t0);
1699 }
1700 
1701 static void pci_release_capabilities(struct pci_dev *dev)
1702 {
1703 	pci_vpd_release(dev);
1704 	pci_iov_release(dev);
1705 	pci_free_cap_save_buffers(dev);
1706 }
1707 
1708 /**
1709  * pci_release_dev - free a pci device structure when all users of it are finished.
1710  * @dev: device that's been disconnected
1711  *
1712  * Will be called only by the device core when all users of this pci device are
1713  * done.
1714  */
1715 static void pci_release_dev(struct device *dev)
1716 {
1717 	struct pci_dev *pci_dev;
1718 
1719 	pci_dev = to_pci_dev(dev);
1720 	pci_release_capabilities(pci_dev);
1721 	pci_release_of_node(pci_dev);
1722 	pcibios_release_device(pci_dev);
1723 	pci_bus_put(pci_dev->bus);
1724 	kfree(pci_dev->driver_override);
1725 	kfree(pci_dev->dma_alias_mask);
1726 	kfree(pci_dev);
1727 }
1728 
1729 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1730 {
1731 	struct pci_dev *dev;
1732 
1733 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1734 	if (!dev)
1735 		return NULL;
1736 
1737 	INIT_LIST_HEAD(&dev->bus_list);
1738 	dev->dev.type = &pci_dev_type;
1739 	dev->bus = pci_bus_get(bus);
1740 
1741 	return dev;
1742 }
1743 EXPORT_SYMBOL(pci_alloc_dev);
1744 
1745 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1746 				int crs_timeout)
1747 {
1748 	int delay = 1;
1749 
1750 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1751 		return false;
1752 
1753 	/* some broken boards return 0 or ~0 if a slot is empty: */
1754 	if (*l == 0xffffffff || *l == 0x00000000 ||
1755 	    *l == 0x0000ffff || *l == 0xffff0000)
1756 		return false;
1757 
1758 	/*
1759 	 * Configuration Request Retry Status.  Some root ports return the
1760 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1761 	 * by the PCIe spec.  Ignore the device ID and only check for
1762 	 * (vendor id == 1).
1763 	 */
1764 	while ((*l & 0xffff) == 0x0001) {
1765 		if (!crs_timeout)
1766 			return false;
1767 
1768 		msleep(delay);
1769 		delay *= 2;
1770 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1771 			return false;
1772 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1773 		if (delay > crs_timeout) {
1774 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1775 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1776 			       PCI_FUNC(devfn));
1777 			return false;
1778 		}
1779 	}
1780 
1781 	return true;
1782 }
1783 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1784 
1785 /*
1786  * Read the config data for a PCI device, sanity-check it
1787  * and fill in the dev structure...
1788  */
1789 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1790 {
1791 	struct pci_dev *dev;
1792 	u32 l;
1793 
1794 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1795 		return NULL;
1796 
1797 	dev = pci_alloc_dev(bus);
1798 	if (!dev)
1799 		return NULL;
1800 
1801 	dev->devfn = devfn;
1802 	dev->vendor = l & 0xffff;
1803 	dev->device = (l >> 16) & 0xffff;
1804 
1805 	pci_set_of_node(dev);
1806 
1807 	if (pci_setup_device(dev)) {
1808 		pci_bus_put(dev->bus);
1809 		kfree(dev);
1810 		return NULL;
1811 	}
1812 
1813 	return dev;
1814 }
1815 
1816 static void pci_init_capabilities(struct pci_dev *dev)
1817 {
1818 	/* Enhanced Allocation */
1819 	pci_ea_init(dev);
1820 
1821 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
1822 	pci_msi_setup_pci_dev(dev);
1823 
1824 	/* Buffers for saving PCIe and PCI-X capabilities */
1825 	pci_allocate_cap_save_buffers(dev);
1826 
1827 	/* Power Management */
1828 	pci_pm_init(dev);
1829 
1830 	/* Vital Product Data */
1831 	pci_vpd_init(dev);
1832 
1833 	/* Alternative Routing-ID Forwarding */
1834 	pci_configure_ari(dev);
1835 
1836 	/* Single Root I/O Virtualization */
1837 	pci_iov_init(dev);
1838 
1839 	/* Address Translation Services */
1840 	pci_ats_init(dev);
1841 
1842 	/* Enable ACS P2P upstream forwarding */
1843 	pci_enable_acs(dev);
1844 
1845 	/* Precision Time Measurement */
1846 	pci_ptm_init(dev);
1847 
1848 	/* Advanced Error Reporting */
1849 	pci_aer_init(dev);
1850 }
1851 
1852 /*
1853  * This is the equivalent of pci_host_bridge_msi_domain that acts on
1854  * devices. Firmware interfaces that can select the MSI domain on a
1855  * per-device basis should be called from here.
1856  */
1857 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1858 {
1859 	struct irq_domain *d;
1860 
1861 	/*
1862 	 * If a domain has been set through the pcibios_add_device
1863 	 * callback, then this is the one (platform code knows best).
1864 	 */
1865 	d = dev_get_msi_domain(&dev->dev);
1866 	if (d)
1867 		return d;
1868 
1869 	/*
1870 	 * Let's see if we have a firmware interface able to provide
1871 	 * the domain.
1872 	 */
1873 	d = pci_msi_get_device_domain(dev);
1874 	if (d)
1875 		return d;
1876 
1877 	return NULL;
1878 }
1879 
1880 static void pci_set_msi_domain(struct pci_dev *dev)
1881 {
1882 	struct irq_domain *d;
1883 
1884 	/*
1885 	 * If the platform or firmware interfaces cannot supply a
1886 	 * device-specific MSI domain, then inherit the default domain
1887 	 * from the host bridge itself.
1888 	 */
1889 	d = pci_dev_msi_domain(dev);
1890 	if (!d)
1891 		d = dev_get_msi_domain(&dev->bus->dev);
1892 
1893 	dev_set_msi_domain(&dev->dev, d);
1894 }
1895 
1896 /**
1897  * pci_dma_configure - Setup DMA configuration
1898  * @dev: ptr to pci_dev struct of the PCI device
1899  *
1900  * Function to update PCI devices's DMA configuration using the same
1901  * info from the OF node or ACPI node of host bridge's parent (if any).
1902  */
1903 static void pci_dma_configure(struct pci_dev *dev)
1904 {
1905 	struct device *bridge = pci_get_host_bridge_device(dev);
1906 
1907 	if (IS_ENABLED(CONFIG_OF) &&
1908 		bridge->parent && bridge->parent->of_node) {
1909 			of_dma_configure(&dev->dev, bridge->parent->of_node);
1910 	} else if (has_acpi_companion(bridge)) {
1911 		struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
1912 		enum dev_dma_attr attr = acpi_get_dma_attr(adev);
1913 
1914 		if (attr == DEV_DMA_NOT_SUPPORTED)
1915 			dev_warn(&dev->dev, "DMA not supported.\n");
1916 		else
1917 			acpi_dma_configure(&dev->dev, attr);
1918 	}
1919 
1920 	pci_put_host_bridge_device(bridge);
1921 }
1922 
1923 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1924 {
1925 	int ret;
1926 
1927 	pci_configure_device(dev);
1928 
1929 	device_initialize(&dev->dev);
1930 	dev->dev.release = pci_release_dev;
1931 
1932 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1933 	dev->dev.dma_mask = &dev->dma_mask;
1934 	dev->dev.dma_parms = &dev->dma_parms;
1935 	dev->dev.coherent_dma_mask = 0xffffffffull;
1936 	pci_dma_configure(dev);
1937 
1938 	pci_set_dma_max_seg_size(dev, 65536);
1939 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1940 
1941 	/* Fix up broken headers */
1942 	pci_fixup_device(pci_fixup_header, dev);
1943 
1944 	/* moved out from quirk header fixup code */
1945 	pci_reassigndev_resource_alignment(dev);
1946 
1947 	/* Clear the state_saved flag. */
1948 	dev->state_saved = false;
1949 
1950 	/* Initialize various capabilities */
1951 	pci_init_capabilities(dev);
1952 
1953 	/*
1954 	 * Add the device to our list of discovered devices
1955 	 * and the bus list for fixup functions, etc.
1956 	 */
1957 	down_write(&pci_bus_sem);
1958 	list_add_tail(&dev->bus_list, &bus->devices);
1959 	up_write(&pci_bus_sem);
1960 
1961 	ret = pcibios_add_device(dev);
1962 	WARN_ON(ret < 0);
1963 
1964 	/* Setup MSI irq domain */
1965 	pci_set_msi_domain(dev);
1966 
1967 	/* Notifier could use PCI capabilities */
1968 	dev->match_driver = false;
1969 	ret = device_add(&dev->dev);
1970 	WARN_ON(ret < 0);
1971 }
1972 
1973 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1974 {
1975 	struct pci_dev *dev;
1976 
1977 	dev = pci_get_slot(bus, devfn);
1978 	if (dev) {
1979 		pci_dev_put(dev);
1980 		return dev;
1981 	}
1982 
1983 	dev = pci_scan_device(bus, devfn);
1984 	if (!dev)
1985 		return NULL;
1986 
1987 	pci_device_add(dev, bus);
1988 
1989 	return dev;
1990 }
1991 EXPORT_SYMBOL(pci_scan_single_device);
1992 
1993 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1994 {
1995 	int pos;
1996 	u16 cap = 0;
1997 	unsigned next_fn;
1998 
1999 	if (pci_ari_enabled(bus)) {
2000 		if (!dev)
2001 			return 0;
2002 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2003 		if (!pos)
2004 			return 0;
2005 
2006 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2007 		next_fn = PCI_ARI_CAP_NFN(cap);
2008 		if (next_fn <= fn)
2009 			return 0;	/* protect against malformed list */
2010 
2011 		return next_fn;
2012 	}
2013 
2014 	/* dev may be NULL for non-contiguous multifunction devices */
2015 	if (!dev || dev->multifunction)
2016 		return (fn + 1) % 8;
2017 
2018 	return 0;
2019 }
2020 
2021 static int only_one_child(struct pci_bus *bus)
2022 {
2023 	struct pci_dev *parent = bus->self;
2024 
2025 	if (!parent || !pci_is_pcie(parent))
2026 		return 0;
2027 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2028 		return 1;
2029 
2030 	/*
2031 	 * PCIe downstream ports are bridges that normally lead to only a
2032 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2033 	 * possible devices, not just device 0.  See PCIe spec r3.0,
2034 	 * sec 7.3.1.
2035 	 */
2036 	if (parent->has_secondary_link &&
2037 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2038 		return 1;
2039 	return 0;
2040 }
2041 
2042 /**
2043  * pci_scan_slot - scan a PCI slot on a bus for devices.
2044  * @bus: PCI bus to scan
2045  * @devfn: slot number to scan (must have zero function.)
2046  *
2047  * Scan a PCI slot on the specified PCI bus for devices, adding
2048  * discovered devices to the @bus->devices list.  New devices
2049  * will not have is_added set.
2050  *
2051  * Returns the number of new devices found.
2052  */
2053 int pci_scan_slot(struct pci_bus *bus, int devfn)
2054 {
2055 	unsigned fn, nr = 0;
2056 	struct pci_dev *dev;
2057 
2058 	if (only_one_child(bus) && (devfn > 0))
2059 		return 0; /* Already scanned the entire slot */
2060 
2061 	dev = pci_scan_single_device(bus, devfn);
2062 	if (!dev)
2063 		return 0;
2064 	if (!dev->is_added)
2065 		nr++;
2066 
2067 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2068 		dev = pci_scan_single_device(bus, devfn + fn);
2069 		if (dev) {
2070 			if (!dev->is_added)
2071 				nr++;
2072 			dev->multifunction = 1;
2073 		}
2074 	}
2075 
2076 	/* only one slot has pcie device */
2077 	if (bus->self && nr)
2078 		pcie_aspm_init_link_state(bus->self);
2079 
2080 	return nr;
2081 }
2082 EXPORT_SYMBOL(pci_scan_slot);
2083 
2084 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2085 {
2086 	u8 *smpss = data;
2087 
2088 	if (!pci_is_pcie(dev))
2089 		return 0;
2090 
2091 	/*
2092 	 * We don't have a way to change MPS settings on devices that have
2093 	 * drivers attached.  A hot-added device might support only the minimum
2094 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2095 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2096 	 * hot-added devices will work correctly.
2097 	 *
2098 	 * However, if we hot-add a device to a slot directly below a Root
2099 	 * Port, it's impossible for there to be other existing devices below
2100 	 * the port.  We don't limit the MPS in this case because we can
2101 	 * reconfigure MPS on both the Root Port and the hot-added device,
2102 	 * and there are no other devices involved.
2103 	 *
2104 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2105 	 */
2106 	if (dev->is_hotplug_bridge &&
2107 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2108 		*smpss = 0;
2109 
2110 	if (*smpss > dev->pcie_mpss)
2111 		*smpss = dev->pcie_mpss;
2112 
2113 	return 0;
2114 }
2115 
2116 static void pcie_write_mps(struct pci_dev *dev, int mps)
2117 {
2118 	int rc;
2119 
2120 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2121 		mps = 128 << dev->pcie_mpss;
2122 
2123 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2124 		    dev->bus->self)
2125 			/* For "Performance", the assumption is made that
2126 			 * downstream communication will never be larger than
2127 			 * the MRRS.  So, the MPS only needs to be configured
2128 			 * for the upstream communication.  This being the case,
2129 			 * walk from the top down and set the MPS of the child
2130 			 * to that of the parent bus.
2131 			 *
2132 			 * Configure the device MPS with the smaller of the
2133 			 * device MPSS or the bridge MPS (which is assumed to be
2134 			 * properly configured at this point to the largest
2135 			 * allowable MPS based on its parent bus).
2136 			 */
2137 			mps = min(mps, pcie_get_mps(dev->bus->self));
2138 	}
2139 
2140 	rc = pcie_set_mps(dev, mps);
2141 	if (rc)
2142 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2143 }
2144 
2145 static void pcie_write_mrrs(struct pci_dev *dev)
2146 {
2147 	int rc, mrrs;
2148 
2149 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2150 	 * issues with setting MRRS to 0 on a number of devices.
2151 	 */
2152 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2153 		return;
2154 
2155 	/* For Max performance, the MRRS must be set to the largest supported
2156 	 * value.  However, it cannot be configured larger than the MPS the
2157 	 * device or the bus can support.  This should already be properly
2158 	 * configured by a prior call to pcie_write_mps.
2159 	 */
2160 	mrrs = pcie_get_mps(dev);
2161 
2162 	/* MRRS is a R/W register.  Invalid values can be written, but a
2163 	 * subsequent read will verify if the value is acceptable or not.
2164 	 * If the MRRS value provided is not acceptable (e.g., too large),
2165 	 * shrink the value until it is acceptable to the HW.
2166 	 */
2167 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2168 		rc = pcie_set_readrq(dev, mrrs);
2169 		if (!rc)
2170 			break;
2171 
2172 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2173 		mrrs /= 2;
2174 	}
2175 
2176 	if (mrrs < 128)
2177 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2178 }
2179 
2180 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2181 {
2182 	int mps, orig_mps;
2183 
2184 	if (!pci_is_pcie(dev))
2185 		return 0;
2186 
2187 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2188 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2189 		return 0;
2190 
2191 	mps = 128 << *(u8 *)data;
2192 	orig_mps = pcie_get_mps(dev);
2193 
2194 	pcie_write_mps(dev, mps);
2195 	pcie_write_mrrs(dev);
2196 
2197 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2198 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2199 		 orig_mps, pcie_get_readrq(dev));
2200 
2201 	return 0;
2202 }
2203 
2204 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2205  * parents then children fashion.  If this changes, then this code will not
2206  * work as designed.
2207  */
2208 void pcie_bus_configure_settings(struct pci_bus *bus)
2209 {
2210 	u8 smpss = 0;
2211 
2212 	if (!bus->self)
2213 		return;
2214 
2215 	if (!pci_is_pcie(bus->self))
2216 		return;
2217 
2218 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2219 	 * to be aware of the MPS of the destination.  To work around this,
2220 	 * simply force the MPS of the entire system to the smallest possible.
2221 	 */
2222 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2223 		smpss = 0;
2224 
2225 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2226 		smpss = bus->self->pcie_mpss;
2227 
2228 		pcie_find_smpss(bus->self, &smpss);
2229 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2230 	}
2231 
2232 	pcie_bus_configure_set(bus->self, &smpss);
2233 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2234 }
2235 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2236 
2237 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2238 {
2239 	unsigned int devfn, pass, max = bus->busn_res.start;
2240 	struct pci_dev *dev;
2241 
2242 	dev_dbg(&bus->dev, "scanning bus\n");
2243 
2244 	/* Go find them, Rover! */
2245 	for (devfn = 0; devfn < 0x100; devfn += 8)
2246 		pci_scan_slot(bus, devfn);
2247 
2248 	/* Reserve buses for SR-IOV capability. */
2249 	max += pci_iov_bus_range(bus);
2250 
2251 	/*
2252 	 * After performing arch-dependent fixup of the bus, look behind
2253 	 * all PCI-to-PCI bridges on this bus.
2254 	 */
2255 	if (!bus->is_added) {
2256 		dev_dbg(&bus->dev, "fixups for bus\n");
2257 		pcibios_fixup_bus(bus);
2258 		bus->is_added = 1;
2259 	}
2260 
2261 	for (pass = 0; pass < 2; pass++)
2262 		list_for_each_entry(dev, &bus->devices, bus_list) {
2263 			if (pci_is_bridge(dev))
2264 				max = pci_scan_bridge(bus, dev, max, pass);
2265 		}
2266 
2267 	/*
2268 	 * Make sure a hotplug bridge has at least the minimum requested
2269 	 * number of buses.
2270 	 */
2271 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2272 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2273 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2274 	}
2275 
2276 	/*
2277 	 * We've scanned the bus and so we know all about what's on
2278 	 * the other side of any bridges that may be on this bus plus
2279 	 * any devices.
2280 	 *
2281 	 * Return how far we've got finding sub-buses.
2282 	 */
2283 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2284 	return max;
2285 }
2286 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2287 
2288 /**
2289  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2290  * @bridge: Host bridge to set up.
2291  *
2292  * Default empty implementation.  Replace with an architecture-specific setup
2293  * routine, if necessary.
2294  */
2295 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2296 {
2297 	return 0;
2298 }
2299 
2300 void __weak pcibios_add_bus(struct pci_bus *bus)
2301 {
2302 }
2303 
2304 void __weak pcibios_remove_bus(struct pci_bus *bus)
2305 {
2306 }
2307 
2308 static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
2309 		int bus, struct pci_ops *ops, void *sysdata,
2310 		struct list_head *resources, struct msi_controller *msi)
2311 {
2312 	int error;
2313 	struct pci_host_bridge *bridge;
2314 
2315 	bridge = pci_alloc_host_bridge(0);
2316 	if (!bridge)
2317 		return NULL;
2318 
2319 	bridge->dev.parent = parent;
2320 	bridge->dev.release = pci_release_host_bridge_dev;
2321 
2322 	list_splice_init(resources, &bridge->windows);
2323 	bridge->sysdata = sysdata;
2324 	bridge->busnr = bus;
2325 	bridge->ops = ops;
2326 	bridge->msi = msi;
2327 
2328 	error = pci_register_host_bridge(bridge);
2329 	if (error < 0)
2330 		goto err_out;
2331 
2332 	return bridge->bus;
2333 
2334 err_out:
2335 	kfree(bridge);
2336 	return NULL;
2337 }
2338 
2339 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2340 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2341 {
2342 	return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
2343 				       NULL);
2344 }
2345 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2346 
2347 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2348 {
2349 	struct resource *res = &b->busn_res;
2350 	struct resource *parent_res, *conflict;
2351 
2352 	res->start = bus;
2353 	res->end = bus_max;
2354 	res->flags = IORESOURCE_BUS;
2355 
2356 	if (!pci_is_root_bus(b))
2357 		parent_res = &b->parent->busn_res;
2358 	else {
2359 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2360 		res->flags |= IORESOURCE_PCI_FIXED;
2361 	}
2362 
2363 	conflict = request_resource_conflict(parent_res, res);
2364 
2365 	if (conflict)
2366 		dev_printk(KERN_DEBUG, &b->dev,
2367 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2368 			    res, pci_is_root_bus(b) ? "domain " : "",
2369 			    parent_res, conflict->name, conflict);
2370 
2371 	return conflict == NULL;
2372 }
2373 
2374 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2375 {
2376 	struct resource *res = &b->busn_res;
2377 	struct resource old_res = *res;
2378 	resource_size_t size;
2379 	int ret;
2380 
2381 	if (res->start > bus_max)
2382 		return -EINVAL;
2383 
2384 	size = bus_max - res->start + 1;
2385 	ret = adjust_resource(res, res->start, size);
2386 	dev_printk(KERN_DEBUG, &b->dev,
2387 			"busn_res: %pR end %s updated to %02x\n",
2388 			&old_res, ret ? "can not be" : "is", bus_max);
2389 
2390 	if (!ret && !res->parent)
2391 		pci_bus_insert_busn_res(b, res->start, res->end);
2392 
2393 	return ret;
2394 }
2395 
2396 void pci_bus_release_busn_res(struct pci_bus *b)
2397 {
2398 	struct resource *res = &b->busn_res;
2399 	int ret;
2400 
2401 	if (!res->flags || !res->parent)
2402 		return;
2403 
2404 	ret = release_resource(res);
2405 	dev_printk(KERN_DEBUG, &b->dev,
2406 			"busn_res: %pR %s released\n",
2407 			res, ret ? "can not be" : "is");
2408 }
2409 
2410 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2411 		struct pci_ops *ops, void *sysdata,
2412 		struct list_head *resources, struct msi_controller *msi)
2413 {
2414 	struct resource_entry *window;
2415 	bool found = false;
2416 	struct pci_bus *b;
2417 	int max;
2418 
2419 	resource_list_for_each_entry(window, resources)
2420 		if (window->res->flags & IORESOURCE_BUS) {
2421 			found = true;
2422 			break;
2423 		}
2424 
2425 	b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
2426 	if (!b)
2427 		return NULL;
2428 
2429 	if (!found) {
2430 		dev_info(&b->dev,
2431 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2432 			bus);
2433 		pci_bus_insert_busn_res(b, bus, 255);
2434 	}
2435 
2436 	max = pci_scan_child_bus(b);
2437 
2438 	if (!found)
2439 		pci_bus_update_busn_res_end(b, max);
2440 
2441 	return b;
2442 }
2443 
2444 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2445 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2446 {
2447 	return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2448 				     NULL);
2449 }
2450 EXPORT_SYMBOL(pci_scan_root_bus);
2451 
2452 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2453 					void *sysdata)
2454 {
2455 	LIST_HEAD(resources);
2456 	struct pci_bus *b;
2457 
2458 	pci_add_resource(&resources, &ioport_resource);
2459 	pci_add_resource(&resources, &iomem_resource);
2460 	pci_add_resource(&resources, &busn_resource);
2461 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2462 	if (b) {
2463 		pci_scan_child_bus(b);
2464 	} else {
2465 		pci_free_resource_list(&resources);
2466 	}
2467 	return b;
2468 }
2469 EXPORT_SYMBOL(pci_scan_bus);
2470 
2471 /**
2472  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2473  * @bridge: PCI bridge for the bus to scan
2474  *
2475  * Scan a PCI bus and child buses for new devices, add them,
2476  * and enable them, resizing bridge mmio/io resource if necessary
2477  * and possible.  The caller must ensure the child devices are already
2478  * removed for resizing to occur.
2479  *
2480  * Returns the max number of subordinate bus discovered.
2481  */
2482 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2483 {
2484 	unsigned int max;
2485 	struct pci_bus *bus = bridge->subordinate;
2486 
2487 	max = pci_scan_child_bus(bus);
2488 
2489 	pci_assign_unassigned_bridge_resources(bridge);
2490 
2491 	pci_bus_add_devices(bus);
2492 
2493 	return max;
2494 }
2495 
2496 /**
2497  * pci_rescan_bus - scan a PCI bus for devices.
2498  * @bus: PCI bus to scan
2499  *
2500  * Scan a PCI bus and child buses for new devices, adds them,
2501  * and enables them.
2502  *
2503  * Returns the max number of subordinate bus discovered.
2504  */
2505 unsigned int pci_rescan_bus(struct pci_bus *bus)
2506 {
2507 	unsigned int max;
2508 
2509 	max = pci_scan_child_bus(bus);
2510 	pci_assign_unassigned_bus_resources(bus);
2511 	pci_bus_add_devices(bus);
2512 
2513 	return max;
2514 }
2515 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2516 
2517 /*
2518  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2519  * routines should always be executed under this mutex.
2520  */
2521 static DEFINE_MUTEX(pci_rescan_remove_lock);
2522 
2523 void pci_lock_rescan_remove(void)
2524 {
2525 	mutex_lock(&pci_rescan_remove_lock);
2526 }
2527 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2528 
2529 void pci_unlock_rescan_remove(void)
2530 {
2531 	mutex_unlock(&pci_rescan_remove_lock);
2532 }
2533 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2534 
2535 static int __init pci_sort_bf_cmp(const struct device *d_a,
2536 				  const struct device *d_b)
2537 {
2538 	const struct pci_dev *a = to_pci_dev(d_a);
2539 	const struct pci_dev *b = to_pci_dev(d_b);
2540 
2541 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2542 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2543 
2544 	if      (a->bus->number < b->bus->number) return -1;
2545 	else if (a->bus->number > b->bus->number) return  1;
2546 
2547 	if      (a->devfn < b->devfn) return -1;
2548 	else if (a->devfn > b->devfn) return  1;
2549 
2550 	return 0;
2551 }
2552 
2553 void __init pci_sort_breadthfirst(void)
2554 {
2555 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2556 }
2557