xref: /openbmc/linux/drivers/pci/probe.c (revision dea54fba)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l = 0, sz = 0, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
513 static void devm_pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 }
520 
521 static void pci_release_host_bridge_dev(struct device *dev)
522 {
523 	devm_pci_release_host_bridge_dev(dev);
524 	pci_free_host_bridge(to_pci_host_bridge(dev));
525 }
526 
527 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
528 {
529 	struct pci_host_bridge *bridge;
530 
531 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
532 	if (!bridge)
533 		return NULL;
534 
535 	INIT_LIST_HEAD(&bridge->windows);
536 	bridge->dev.release = pci_release_host_bridge_dev;
537 
538 	return bridge;
539 }
540 EXPORT_SYMBOL(pci_alloc_host_bridge);
541 
542 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
543 						   size_t priv)
544 {
545 	struct pci_host_bridge *bridge;
546 
547 	bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
548 	if (!bridge)
549 		return NULL;
550 
551 	INIT_LIST_HEAD(&bridge->windows);
552 	bridge->dev.release = devm_pci_release_host_bridge_dev;
553 
554 	return bridge;
555 }
556 EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
557 
558 void pci_free_host_bridge(struct pci_host_bridge *bridge)
559 {
560 	pci_free_resource_list(&bridge->windows);
561 
562 	kfree(bridge);
563 }
564 EXPORT_SYMBOL(pci_free_host_bridge);
565 
566 static const unsigned char pcix_bus_speed[] = {
567 	PCI_SPEED_UNKNOWN,		/* 0 */
568 	PCI_SPEED_66MHz_PCIX,		/* 1 */
569 	PCI_SPEED_100MHz_PCIX,		/* 2 */
570 	PCI_SPEED_133MHz_PCIX,		/* 3 */
571 	PCI_SPEED_UNKNOWN,		/* 4 */
572 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
573 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
574 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
575 	PCI_SPEED_UNKNOWN,		/* 8 */
576 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
577 	PCI_SPEED_100MHz_PCIX_266,	/* A */
578 	PCI_SPEED_133MHz_PCIX_266,	/* B */
579 	PCI_SPEED_UNKNOWN,		/* C */
580 	PCI_SPEED_66MHz_PCIX_533,	/* D */
581 	PCI_SPEED_100MHz_PCIX_533,	/* E */
582 	PCI_SPEED_133MHz_PCIX_533	/* F */
583 };
584 
585 const unsigned char pcie_link_speed[] = {
586 	PCI_SPEED_UNKNOWN,		/* 0 */
587 	PCIE_SPEED_2_5GT,		/* 1 */
588 	PCIE_SPEED_5_0GT,		/* 2 */
589 	PCIE_SPEED_8_0GT,		/* 3 */
590 	PCI_SPEED_UNKNOWN,		/* 4 */
591 	PCI_SPEED_UNKNOWN,		/* 5 */
592 	PCI_SPEED_UNKNOWN,		/* 6 */
593 	PCI_SPEED_UNKNOWN,		/* 7 */
594 	PCI_SPEED_UNKNOWN,		/* 8 */
595 	PCI_SPEED_UNKNOWN,		/* 9 */
596 	PCI_SPEED_UNKNOWN,		/* A */
597 	PCI_SPEED_UNKNOWN,		/* B */
598 	PCI_SPEED_UNKNOWN,		/* C */
599 	PCI_SPEED_UNKNOWN,		/* D */
600 	PCI_SPEED_UNKNOWN,		/* E */
601 	PCI_SPEED_UNKNOWN		/* F */
602 };
603 
604 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
605 {
606 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
607 }
608 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
609 
610 static unsigned char agp_speeds[] = {
611 	AGP_UNKNOWN,
612 	AGP_1X,
613 	AGP_2X,
614 	AGP_4X,
615 	AGP_8X
616 };
617 
618 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
619 {
620 	int index = 0;
621 
622 	if (agpstat & 4)
623 		index = 3;
624 	else if (agpstat & 2)
625 		index = 2;
626 	else if (agpstat & 1)
627 		index = 1;
628 	else
629 		goto out;
630 
631 	if (agp3) {
632 		index += 2;
633 		if (index == 5)
634 			index = 0;
635 	}
636 
637  out:
638 	return agp_speeds[index];
639 }
640 
641 static void pci_set_bus_speed(struct pci_bus *bus)
642 {
643 	struct pci_dev *bridge = bus->self;
644 	int pos;
645 
646 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
647 	if (!pos)
648 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
649 	if (pos) {
650 		u32 agpstat, agpcmd;
651 
652 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
653 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
654 
655 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
656 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
657 	}
658 
659 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
660 	if (pos) {
661 		u16 status;
662 		enum pci_bus_speed max;
663 
664 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
665 				     &status);
666 
667 		if (status & PCI_X_SSTATUS_533MHZ) {
668 			max = PCI_SPEED_133MHz_PCIX_533;
669 		} else if (status & PCI_X_SSTATUS_266MHZ) {
670 			max = PCI_SPEED_133MHz_PCIX_266;
671 		} else if (status & PCI_X_SSTATUS_133MHZ) {
672 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
673 				max = PCI_SPEED_133MHz_PCIX_ECC;
674 			else
675 				max = PCI_SPEED_133MHz_PCIX;
676 		} else {
677 			max = PCI_SPEED_66MHz_PCIX;
678 		}
679 
680 		bus->max_bus_speed = max;
681 		bus->cur_bus_speed = pcix_bus_speed[
682 			(status & PCI_X_SSTATUS_FREQ) >> 6];
683 
684 		return;
685 	}
686 
687 	if (pci_is_pcie(bridge)) {
688 		u32 linkcap;
689 		u16 linksta;
690 
691 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
692 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
693 
694 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
695 		pcie_update_link_speed(bus, linksta);
696 	}
697 }
698 
699 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
700 {
701 	struct irq_domain *d;
702 
703 	/*
704 	 * Any firmware interface that can resolve the msi_domain
705 	 * should be called from here.
706 	 */
707 	d = pci_host_bridge_of_msi_domain(bus);
708 	if (!d)
709 		d = pci_host_bridge_acpi_msi_domain(bus);
710 
711 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
712 	/*
713 	 * If no IRQ domain was found via the OF tree, try looking it up
714 	 * directly through the fwnode_handle.
715 	 */
716 	if (!d) {
717 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
718 
719 		if (fwnode)
720 			d = irq_find_matching_fwnode(fwnode,
721 						     DOMAIN_BUS_PCI_MSI);
722 	}
723 #endif
724 
725 	return d;
726 }
727 
728 static void pci_set_bus_msi_domain(struct pci_bus *bus)
729 {
730 	struct irq_domain *d;
731 	struct pci_bus *b;
732 
733 	/*
734 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
735 	 * created by an SR-IOV device.  Walk up to the first bridge device
736 	 * found or derive the domain from the host bridge.
737 	 */
738 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
739 		if (b->self)
740 			d = dev_get_msi_domain(&b->self->dev);
741 	}
742 
743 	if (!d)
744 		d = pci_host_bridge_msi_domain(b);
745 
746 	dev_set_msi_domain(&bus->dev, d);
747 }
748 
749 static int pci_register_host_bridge(struct pci_host_bridge *bridge)
750 {
751 	struct device *parent = bridge->dev.parent;
752 	struct resource_entry *window, *n;
753 	struct pci_bus *bus, *b;
754 	resource_size_t offset;
755 	LIST_HEAD(resources);
756 	struct resource *res;
757 	char addr[64], *fmt;
758 	const char *name;
759 	int err;
760 
761 	bus = pci_alloc_bus(NULL);
762 	if (!bus)
763 		return -ENOMEM;
764 
765 	bridge->bus = bus;
766 
767 	/* temporarily move resources off the list */
768 	list_splice_init(&bridge->windows, &resources);
769 	bus->sysdata = bridge->sysdata;
770 	bus->msi = bridge->msi;
771 	bus->ops = bridge->ops;
772 	bus->number = bus->busn_res.start = bridge->busnr;
773 #ifdef CONFIG_PCI_DOMAINS_GENERIC
774 	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
775 #endif
776 
777 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
778 	if (b) {
779 		/* If we already got to this bus through a different bridge, ignore it */
780 		dev_dbg(&b->dev, "bus already known\n");
781 		err = -EEXIST;
782 		goto free;
783 	}
784 
785 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
786 		     bridge->busnr);
787 
788 	err = pcibios_root_bridge_prepare(bridge);
789 	if (err)
790 		goto free;
791 
792 	err = device_register(&bridge->dev);
793 	if (err)
794 		put_device(&bridge->dev);
795 
796 	bus->bridge = get_device(&bridge->dev);
797 	device_enable_async_suspend(bus->bridge);
798 	pci_set_bus_of_node(bus);
799 	pci_set_bus_msi_domain(bus);
800 
801 	if (!parent)
802 		set_dev_node(bus->bridge, pcibus_to_node(bus));
803 
804 	bus->dev.class = &pcibus_class;
805 	bus->dev.parent = bus->bridge;
806 
807 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
808 	name = dev_name(&bus->dev);
809 
810 	err = device_register(&bus->dev);
811 	if (err)
812 		goto unregister;
813 
814 	pcibios_add_bus(bus);
815 
816 	/* Create legacy_io and legacy_mem files for this bus */
817 	pci_create_legacy_files(bus);
818 
819 	if (parent)
820 		dev_info(parent, "PCI host bridge to bus %s\n", name);
821 	else
822 		pr_info("PCI host bridge to bus %s\n", name);
823 
824 	/* Add initial resources to the bus */
825 	resource_list_for_each_entry_safe(window, n, &resources) {
826 		list_move_tail(&window->node, &bridge->windows);
827 		offset = window->offset;
828 		res = window->res;
829 
830 		if (res->flags & IORESOURCE_BUS)
831 			pci_bus_insert_busn_res(bus, bus->number, res->end);
832 		else
833 			pci_bus_add_resource(bus, res, 0);
834 
835 		if (offset) {
836 			if (resource_type(res) == IORESOURCE_IO)
837 				fmt = " (bus address [%#06llx-%#06llx])";
838 			else
839 				fmt = " (bus address [%#010llx-%#010llx])";
840 
841 			snprintf(addr, sizeof(addr), fmt,
842 				 (unsigned long long)(res->start - offset),
843 				 (unsigned long long)(res->end - offset));
844 		} else
845 			addr[0] = '\0';
846 
847 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
848 	}
849 
850 	down_write(&pci_bus_sem);
851 	list_add_tail(&bus->node, &pci_root_buses);
852 	up_write(&pci_bus_sem);
853 
854 	return 0;
855 
856 unregister:
857 	put_device(&bridge->dev);
858 	device_unregister(&bridge->dev);
859 
860 free:
861 	kfree(bus);
862 	return err;
863 }
864 
865 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
866 					   struct pci_dev *bridge, int busnr)
867 {
868 	struct pci_bus *child;
869 	int i;
870 	int ret;
871 
872 	/*
873 	 * Allocate a new bus, and inherit stuff from the parent..
874 	 */
875 	child = pci_alloc_bus(parent);
876 	if (!child)
877 		return NULL;
878 
879 	child->parent = parent;
880 	child->ops = parent->ops;
881 	child->msi = parent->msi;
882 	child->sysdata = parent->sysdata;
883 	child->bus_flags = parent->bus_flags;
884 
885 	/* initialize some portions of the bus device, but don't register it
886 	 * now as the parent is not properly set up yet.
887 	 */
888 	child->dev.class = &pcibus_class;
889 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
890 
891 	/*
892 	 * Set up the primary, secondary and subordinate
893 	 * bus numbers.
894 	 */
895 	child->number = child->busn_res.start = busnr;
896 	child->primary = parent->busn_res.start;
897 	child->busn_res.end = 0xff;
898 
899 	if (!bridge) {
900 		child->dev.parent = parent->bridge;
901 		goto add_dev;
902 	}
903 
904 	child->self = bridge;
905 	child->bridge = get_device(&bridge->dev);
906 	child->dev.parent = child->bridge;
907 	pci_set_bus_of_node(child);
908 	pci_set_bus_speed(child);
909 
910 	/* Set up default resource pointers and names.. */
911 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
912 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
913 		child->resource[i]->name = child->name;
914 	}
915 	bridge->subordinate = child;
916 
917 add_dev:
918 	pci_set_bus_msi_domain(child);
919 	ret = device_register(&child->dev);
920 	WARN_ON(ret < 0);
921 
922 	pcibios_add_bus(child);
923 
924 	if (child->ops->add_bus) {
925 		ret = child->ops->add_bus(child);
926 		if (WARN_ON(ret < 0))
927 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
928 	}
929 
930 	/* Create legacy_io and legacy_mem files for this bus */
931 	pci_create_legacy_files(child);
932 
933 	return child;
934 }
935 
936 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
937 				int busnr)
938 {
939 	struct pci_bus *child;
940 
941 	child = pci_alloc_child_bus(parent, dev, busnr);
942 	if (child) {
943 		down_write(&pci_bus_sem);
944 		list_add_tail(&child->node, &parent->children);
945 		up_write(&pci_bus_sem);
946 	}
947 	return child;
948 }
949 EXPORT_SYMBOL(pci_add_new_bus);
950 
951 static void pci_enable_crs(struct pci_dev *pdev)
952 {
953 	u16 root_cap = 0;
954 
955 	/* Enable CRS Software Visibility if supported */
956 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
957 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
958 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
959 					 PCI_EXP_RTCTL_CRSSVE);
960 }
961 
962 /*
963  * If it's a bridge, configure it and scan the bus behind it.
964  * For CardBus bridges, we don't scan behind as the devices will
965  * be handled by the bridge driver itself.
966  *
967  * We need to process bridges in two passes -- first we scan those
968  * already configured by the BIOS and after we are done with all of
969  * them, we proceed to assigning numbers to the remaining buses in
970  * order to avoid overlaps between old and new bus numbers.
971  */
972 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
973 {
974 	struct pci_bus *child;
975 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
976 	u32 buses, i, j = 0;
977 	u16 bctl;
978 	u8 primary, secondary, subordinate;
979 	int broken = 0;
980 
981 	/*
982 	 * Make sure the bridge is powered on to be able to access config
983 	 * space of devices below it.
984 	 */
985 	pm_runtime_get_sync(&dev->dev);
986 
987 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
988 	primary = buses & 0xFF;
989 	secondary = (buses >> 8) & 0xFF;
990 	subordinate = (buses >> 16) & 0xFF;
991 
992 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
993 		secondary, subordinate, pass);
994 
995 	if (!primary && (primary != bus->number) && secondary && subordinate) {
996 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
997 		primary = bus->number;
998 	}
999 
1000 	/* Check if setup is sensible at all */
1001 	if (!pass &&
1002 	    (primary != bus->number || secondary <= bus->number ||
1003 	     secondary > subordinate)) {
1004 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1005 			 secondary, subordinate);
1006 		broken = 1;
1007 	}
1008 
1009 	/* Disable MasterAbortMode during probing to avoid reporting
1010 	   of bus errors (in some architectures) */
1011 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1012 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1013 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1014 
1015 	pci_enable_crs(dev);
1016 
1017 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1018 	    !is_cardbus && !broken) {
1019 		unsigned int cmax;
1020 		/*
1021 		 * Bus already configured by firmware, process it in the first
1022 		 * pass and just note the configuration.
1023 		 */
1024 		if (pass)
1025 			goto out;
1026 
1027 		/*
1028 		 * The bus might already exist for two reasons: Either we are
1029 		 * rescanning the bus or the bus is reachable through more than
1030 		 * one bridge. The second case can happen with the i450NX
1031 		 * chipset.
1032 		 */
1033 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1034 		if (!child) {
1035 			child = pci_add_new_bus(bus, dev, secondary);
1036 			if (!child)
1037 				goto out;
1038 			child->primary = primary;
1039 			pci_bus_insert_busn_res(child, secondary, subordinate);
1040 			child->bridge_ctl = bctl;
1041 		}
1042 
1043 		cmax = pci_scan_child_bus(child);
1044 		if (cmax > subordinate)
1045 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1046 				 subordinate, cmax);
1047 		/* subordinate should equal child->busn_res.end */
1048 		if (subordinate > max)
1049 			max = subordinate;
1050 	} else {
1051 		/*
1052 		 * We need to assign a number to this bus which we always
1053 		 * do in the second pass.
1054 		 */
1055 		if (!pass) {
1056 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1057 				/* Temporarily disable forwarding of the
1058 				   configuration cycles on all bridges in
1059 				   this bus segment to avoid possible
1060 				   conflicts in the second pass between two
1061 				   bridges programmed with overlapping
1062 				   bus ranges. */
1063 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1064 						       buses & ~0xffffff);
1065 			goto out;
1066 		}
1067 
1068 		/* Clear errors */
1069 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1070 
1071 		/* Prevent assigning a bus number that already exists.
1072 		 * This can happen when a bridge is hot-plugged, so in
1073 		 * this case we only re-scan this bus. */
1074 		child = pci_find_bus(pci_domain_nr(bus), max+1);
1075 		if (!child) {
1076 			child = pci_add_new_bus(bus, dev, max+1);
1077 			if (!child)
1078 				goto out;
1079 			pci_bus_insert_busn_res(child, max+1, 0xff);
1080 		}
1081 		max++;
1082 		buses = (buses & 0xff000000)
1083 		      | ((unsigned int)(child->primary)     <<  0)
1084 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1085 		      | ((unsigned int)(child->busn_res.end) << 16);
1086 
1087 		/*
1088 		 * yenta.c forces a secondary latency timer of 176.
1089 		 * Copy that behaviour here.
1090 		 */
1091 		if (is_cardbus) {
1092 			buses &= ~0xff000000;
1093 			buses |= CARDBUS_LATENCY_TIMER << 24;
1094 		}
1095 
1096 		/*
1097 		 * We need to blast all three values with a single write.
1098 		 */
1099 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1100 
1101 		if (!is_cardbus) {
1102 			child->bridge_ctl = bctl;
1103 			max = pci_scan_child_bus(child);
1104 		} else {
1105 			/*
1106 			 * For CardBus bridges, we leave 4 bus numbers
1107 			 * as cards with a PCI-to-PCI bridge can be
1108 			 * inserted later.
1109 			 */
1110 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1111 				struct pci_bus *parent = bus;
1112 				if (pci_find_bus(pci_domain_nr(bus),
1113 							max+i+1))
1114 					break;
1115 				while (parent->parent) {
1116 					if ((!pcibios_assign_all_busses()) &&
1117 					    (parent->busn_res.end > max) &&
1118 					    (parent->busn_res.end <= max+i)) {
1119 						j = 1;
1120 					}
1121 					parent = parent->parent;
1122 				}
1123 				if (j) {
1124 					/*
1125 					 * Often, there are two cardbus bridges
1126 					 * -- try to leave one valid bus number
1127 					 * for each one.
1128 					 */
1129 					i /= 2;
1130 					break;
1131 				}
1132 			}
1133 			max += i;
1134 		}
1135 		/*
1136 		 * Set the subordinate bus number to its real value.
1137 		 */
1138 		pci_bus_update_busn_res_end(child, max);
1139 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1140 	}
1141 
1142 	sprintf(child->name,
1143 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1144 		pci_domain_nr(bus), child->number);
1145 
1146 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1147 	while (bus->parent) {
1148 		if ((child->busn_res.end > bus->busn_res.end) ||
1149 		    (child->number > bus->busn_res.end) ||
1150 		    (child->number < bus->number) ||
1151 		    (child->busn_res.end < bus->number)) {
1152 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1153 				&child->busn_res,
1154 				(bus->number > child->busn_res.end &&
1155 				 bus->busn_res.end < child->number) ?
1156 					"wholly" : "partially",
1157 				bus->self->transparent ? " transparent" : "",
1158 				dev_name(&bus->dev),
1159 				&bus->busn_res);
1160 		}
1161 		bus = bus->parent;
1162 	}
1163 
1164 out:
1165 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1166 
1167 	pm_runtime_put(&dev->dev);
1168 
1169 	return max;
1170 }
1171 EXPORT_SYMBOL(pci_scan_bridge);
1172 
1173 /*
1174  * Read interrupt line and base address registers.
1175  * The architecture-dependent code can tweak these, of course.
1176  */
1177 static void pci_read_irq(struct pci_dev *dev)
1178 {
1179 	unsigned char irq;
1180 
1181 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1182 	dev->pin = irq;
1183 	if (irq)
1184 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1185 	dev->irq = irq;
1186 }
1187 
1188 void set_pcie_port_type(struct pci_dev *pdev)
1189 {
1190 	int pos;
1191 	u16 reg16;
1192 	int type;
1193 	struct pci_dev *parent;
1194 
1195 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1196 	if (!pos)
1197 		return;
1198 
1199 	pdev->pcie_cap = pos;
1200 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1201 	pdev->pcie_flags_reg = reg16;
1202 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1203 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1204 
1205 	/*
1206 	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1207 	 * of a Link.  No PCIe component has two Links.  Two Links are
1208 	 * connected by a Switch that has a Port on each Link and internal
1209 	 * logic to connect the two Ports.
1210 	 */
1211 	type = pci_pcie_type(pdev);
1212 	if (type == PCI_EXP_TYPE_ROOT_PORT ||
1213 	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
1214 		pdev->has_secondary_link = 1;
1215 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1216 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1217 		parent = pci_upstream_bridge(pdev);
1218 
1219 		/*
1220 		 * Usually there's an upstream device (Root Port or Switch
1221 		 * Downstream Port), but we can't assume one exists.
1222 		 */
1223 		if (parent && !parent->has_secondary_link)
1224 			pdev->has_secondary_link = 1;
1225 	}
1226 }
1227 
1228 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1229 {
1230 	u32 reg32;
1231 
1232 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1233 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1234 		pdev->is_hotplug_bridge = 1;
1235 }
1236 
1237 static void set_pcie_thunderbolt(struct pci_dev *dev)
1238 {
1239 	int vsec = 0;
1240 	u32 header;
1241 
1242 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
1243 						    PCI_EXT_CAP_ID_VNDR))) {
1244 		pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1245 
1246 		/* Is the device part of a Thunderbolt controller? */
1247 		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1248 		    PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1249 			dev->is_thunderbolt = 1;
1250 			return;
1251 		}
1252 	}
1253 }
1254 
1255 /**
1256  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1257  * @dev: PCI device
1258  *
1259  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1260  * when forwarding a type1 configuration request the bridge must check that
1261  * the extended register address field is zero.  The bridge is not permitted
1262  * to forward the transactions and must handle it as an Unsupported Request.
1263  * Some bridges do not follow this rule and simply drop the extended register
1264  * bits, resulting in the standard config space being aliased, every 256
1265  * bytes across the entire configuration space.  Test for this condition by
1266  * comparing the first dword of each potential alias to the vendor/device ID.
1267  * Known offenders:
1268  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1269  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1270  */
1271 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1272 {
1273 #ifdef CONFIG_PCI_QUIRKS
1274 	int pos;
1275 	u32 header, tmp;
1276 
1277 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1278 
1279 	for (pos = PCI_CFG_SPACE_SIZE;
1280 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1281 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1282 		    || header != tmp)
1283 			return false;
1284 	}
1285 
1286 	return true;
1287 #else
1288 	return false;
1289 #endif
1290 }
1291 
1292 /**
1293  * pci_cfg_space_size - get the configuration space size of the PCI device.
1294  * @dev: PCI device
1295  *
1296  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1297  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1298  * access it.  Maybe we don't have a way to generate extended config space
1299  * accesses, or the device is behind a reverse Express bridge.  So we try
1300  * reading the dword at 0x100 which must either be 0 or a valid extended
1301  * capability header.
1302  */
1303 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1304 {
1305 	u32 status;
1306 	int pos = PCI_CFG_SPACE_SIZE;
1307 
1308 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1309 		return PCI_CFG_SPACE_SIZE;
1310 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1311 		return PCI_CFG_SPACE_SIZE;
1312 
1313 	return PCI_CFG_SPACE_EXP_SIZE;
1314 }
1315 
1316 int pci_cfg_space_size(struct pci_dev *dev)
1317 {
1318 	int pos;
1319 	u32 status;
1320 	u16 class;
1321 
1322 	class = dev->class >> 8;
1323 	if (class == PCI_CLASS_BRIDGE_HOST)
1324 		return pci_cfg_space_size_ext(dev);
1325 
1326 	if (pci_is_pcie(dev))
1327 		return pci_cfg_space_size_ext(dev);
1328 
1329 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1330 	if (!pos)
1331 		return PCI_CFG_SPACE_SIZE;
1332 
1333 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1334 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1335 		return pci_cfg_space_size_ext(dev);
1336 
1337 	return PCI_CFG_SPACE_SIZE;
1338 }
1339 
1340 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1341 
1342 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1343 {
1344 	/*
1345 	 * Disable the MSI hardware to avoid screaming interrupts
1346 	 * during boot.  This is the power on reset default so
1347 	 * usually this should be a noop.
1348 	 */
1349 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1350 	if (dev->msi_cap)
1351 		pci_msi_set_enable(dev, 0);
1352 
1353 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1354 	if (dev->msix_cap)
1355 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1356 }
1357 
1358 /**
1359  * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1360  * @dev: PCI device
1361  *
1362  * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev.  Check this
1363  * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1364  */
1365 static int pci_intx_mask_broken(struct pci_dev *dev)
1366 {
1367 	u16 orig, toggle, new;
1368 
1369 	pci_read_config_word(dev, PCI_COMMAND, &orig);
1370 	toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1371 	pci_write_config_word(dev, PCI_COMMAND, toggle);
1372 	pci_read_config_word(dev, PCI_COMMAND, &new);
1373 
1374 	pci_write_config_word(dev, PCI_COMMAND, orig);
1375 
1376 	/*
1377 	 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1378 	 * r2.3, so strictly speaking, a device is not *broken* if it's not
1379 	 * writable.  But we'll live with the misnomer for now.
1380 	 */
1381 	if (new != toggle)
1382 		return 1;
1383 	return 0;
1384 }
1385 
1386 /**
1387  * pci_setup_device - fill in class and map information of a device
1388  * @dev: the device structure to fill
1389  *
1390  * Initialize the device structure with information about the device's
1391  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1392  * Called at initialisation of the PCI subsystem and by CardBus services.
1393  * Returns 0 on success and negative if unknown type of device (not normal,
1394  * bridge or CardBus).
1395  */
1396 int pci_setup_device(struct pci_dev *dev)
1397 {
1398 	u32 class;
1399 	u16 cmd;
1400 	u8 hdr_type;
1401 	int pos = 0;
1402 	struct pci_bus_region region;
1403 	struct resource *res;
1404 
1405 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1406 		return -EIO;
1407 
1408 	dev->sysdata = dev->bus->sysdata;
1409 	dev->dev.parent = dev->bus->bridge;
1410 	dev->dev.bus = &pci_bus_type;
1411 	dev->hdr_type = hdr_type & 0x7f;
1412 	dev->multifunction = !!(hdr_type & 0x80);
1413 	dev->error_state = pci_channel_io_normal;
1414 	set_pcie_port_type(dev);
1415 
1416 	pci_dev_assign_slot(dev);
1417 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1418 	   set this higher, assuming the system even supports it.  */
1419 	dev->dma_mask = 0xffffffff;
1420 
1421 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1422 		     dev->bus->number, PCI_SLOT(dev->devfn),
1423 		     PCI_FUNC(dev->devfn));
1424 
1425 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1426 	dev->revision = class & 0xff;
1427 	dev->class = class >> 8;		    /* upper 3 bytes */
1428 
1429 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1430 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1431 
1432 	/* need to have dev->class ready */
1433 	dev->cfg_size = pci_cfg_space_size(dev);
1434 
1435 	/* need to have dev->cfg_size ready */
1436 	set_pcie_thunderbolt(dev);
1437 
1438 	/* "Unknown power state" */
1439 	dev->current_state = PCI_UNKNOWN;
1440 
1441 	/* Early fixups, before probing the BARs */
1442 	pci_fixup_device(pci_fixup_early, dev);
1443 	/* device class may be changed after fixup */
1444 	class = dev->class >> 8;
1445 
1446 	if (dev->non_compliant_bars) {
1447 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1448 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1449 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1450 			cmd &= ~PCI_COMMAND_IO;
1451 			cmd &= ~PCI_COMMAND_MEMORY;
1452 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1453 		}
1454 	}
1455 
1456 	dev->broken_intx_masking = pci_intx_mask_broken(dev);
1457 
1458 	switch (dev->hdr_type) {		    /* header type */
1459 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1460 		if (class == PCI_CLASS_BRIDGE_PCI)
1461 			goto bad;
1462 		pci_read_irq(dev);
1463 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1464 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1465 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1466 
1467 		/*
1468 		 * Do the ugly legacy mode stuff here rather than broken chip
1469 		 * quirk code. Legacy mode ATA controllers have fixed
1470 		 * addresses. These are not always echoed in BAR0-3, and
1471 		 * BAR0-3 in a few cases contain junk!
1472 		 */
1473 		if (class == PCI_CLASS_STORAGE_IDE) {
1474 			u8 progif;
1475 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1476 			if ((progif & 1) == 0) {
1477 				region.start = 0x1F0;
1478 				region.end = 0x1F7;
1479 				res = &dev->resource[0];
1480 				res->flags = LEGACY_IO_RESOURCE;
1481 				pcibios_bus_to_resource(dev->bus, res, &region);
1482 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1483 					 res);
1484 				region.start = 0x3F6;
1485 				region.end = 0x3F6;
1486 				res = &dev->resource[1];
1487 				res->flags = LEGACY_IO_RESOURCE;
1488 				pcibios_bus_to_resource(dev->bus, res, &region);
1489 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1490 					 res);
1491 			}
1492 			if ((progif & 4) == 0) {
1493 				region.start = 0x170;
1494 				region.end = 0x177;
1495 				res = &dev->resource[2];
1496 				res->flags = LEGACY_IO_RESOURCE;
1497 				pcibios_bus_to_resource(dev->bus, res, &region);
1498 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1499 					 res);
1500 				region.start = 0x376;
1501 				region.end = 0x376;
1502 				res = &dev->resource[3];
1503 				res->flags = LEGACY_IO_RESOURCE;
1504 				pcibios_bus_to_resource(dev->bus, res, &region);
1505 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1506 					 res);
1507 			}
1508 		}
1509 		break;
1510 
1511 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1512 		if (class != PCI_CLASS_BRIDGE_PCI)
1513 			goto bad;
1514 		/* The PCI-to-PCI bridge spec requires that subtractive
1515 		   decoding (i.e. transparent) bridge must have programming
1516 		   interface code of 0x01. */
1517 		pci_read_irq(dev);
1518 		dev->transparent = ((dev->class & 0xff) == 1);
1519 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1520 		set_pcie_hotplug_bridge(dev);
1521 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1522 		if (pos) {
1523 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1524 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1525 		}
1526 		break;
1527 
1528 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1529 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1530 			goto bad;
1531 		pci_read_irq(dev);
1532 		pci_read_bases(dev, 1, 0);
1533 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1534 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1535 		break;
1536 
1537 	default:				    /* unknown header */
1538 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1539 			dev->hdr_type);
1540 		return -EIO;
1541 
1542 	bad:
1543 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1544 			dev->class, dev->hdr_type);
1545 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1546 	}
1547 
1548 	/* We found a fine healthy device, go go go... */
1549 	return 0;
1550 }
1551 
1552 static void pci_configure_mps(struct pci_dev *dev)
1553 {
1554 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1555 	int mps, p_mps, rc;
1556 
1557 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1558 		return;
1559 
1560 	mps = pcie_get_mps(dev);
1561 	p_mps = pcie_get_mps(bridge);
1562 
1563 	if (mps == p_mps)
1564 		return;
1565 
1566 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1567 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1568 			 mps, pci_name(bridge), p_mps);
1569 		return;
1570 	}
1571 
1572 	/*
1573 	 * Fancier MPS configuration is done later by
1574 	 * pcie_bus_configure_settings()
1575 	 */
1576 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1577 		return;
1578 
1579 	rc = pcie_set_mps(dev, p_mps);
1580 	if (rc) {
1581 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1582 			 p_mps);
1583 		return;
1584 	}
1585 
1586 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1587 		 p_mps, mps, 128 << dev->pcie_mpss);
1588 }
1589 
1590 static struct hpp_type0 pci_default_type0 = {
1591 	.revision = 1,
1592 	.cache_line_size = 8,
1593 	.latency_timer = 0x40,
1594 	.enable_serr = 0,
1595 	.enable_perr = 0,
1596 };
1597 
1598 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1599 {
1600 	u16 pci_cmd, pci_bctl;
1601 
1602 	if (!hpp)
1603 		hpp = &pci_default_type0;
1604 
1605 	if (hpp->revision > 1) {
1606 		dev_warn(&dev->dev,
1607 			 "PCI settings rev %d not supported; using defaults\n",
1608 			 hpp->revision);
1609 		hpp = &pci_default_type0;
1610 	}
1611 
1612 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1613 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1614 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1615 	if (hpp->enable_serr)
1616 		pci_cmd |= PCI_COMMAND_SERR;
1617 	if (hpp->enable_perr)
1618 		pci_cmd |= PCI_COMMAND_PARITY;
1619 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1620 
1621 	/* Program bridge control value */
1622 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1623 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1624 				      hpp->latency_timer);
1625 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1626 		if (hpp->enable_serr)
1627 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1628 		if (hpp->enable_perr)
1629 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1630 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1631 	}
1632 }
1633 
1634 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1635 {
1636 	int pos;
1637 
1638 	if (!hpp)
1639 		return;
1640 
1641 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1642 	if (!pos)
1643 		return;
1644 
1645 	dev_warn(&dev->dev, "PCI-X settings not supported\n");
1646 }
1647 
1648 static bool pcie_root_rcb_set(struct pci_dev *dev)
1649 {
1650 	struct pci_dev *rp = pcie_find_root_port(dev);
1651 	u16 lnkctl;
1652 
1653 	if (!rp)
1654 		return false;
1655 
1656 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1657 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1658 		return true;
1659 
1660 	return false;
1661 }
1662 
1663 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1664 {
1665 	int pos;
1666 	u32 reg32;
1667 
1668 	if (!hpp)
1669 		return;
1670 
1671 	if (!pci_is_pcie(dev))
1672 		return;
1673 
1674 	if (hpp->revision > 1) {
1675 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1676 			 hpp->revision);
1677 		return;
1678 	}
1679 
1680 	/*
1681 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1682 	 * those to make sure they're consistent with the rest of the
1683 	 * platform.
1684 	 */
1685 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1686 				    PCI_EXP_DEVCTL_READRQ;
1687 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1688 				    PCI_EXP_DEVCTL_READRQ);
1689 
1690 	/* Initialize Device Control Register */
1691 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1692 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1693 
1694 	/* Initialize Link Control Register */
1695 	if (pcie_cap_has_lnkctl(dev)) {
1696 
1697 		/*
1698 		 * If the Root Port supports Read Completion Boundary of
1699 		 * 128, set RCB to 128.  Otherwise, clear it.
1700 		 */
1701 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1702 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1703 		if (pcie_root_rcb_set(dev))
1704 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1705 
1706 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1707 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1708 	}
1709 
1710 	/* Find Advanced Error Reporting Enhanced Capability */
1711 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1712 	if (!pos)
1713 		return;
1714 
1715 	/* Initialize Uncorrectable Error Mask Register */
1716 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1717 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1718 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1719 
1720 	/* Initialize Uncorrectable Error Severity Register */
1721 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1722 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1723 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1724 
1725 	/* Initialize Correctable Error Mask Register */
1726 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1727 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1728 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1729 
1730 	/* Initialize Advanced Error Capabilities and Control Register */
1731 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1732 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1733 	/* Don't enable ECRC generation or checking if unsupported */
1734 	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1735 		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1736 	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1737 		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1738 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1739 
1740 	/*
1741 	 * FIXME: The following two registers are not supported yet.
1742 	 *
1743 	 *   o Secondary Uncorrectable Error Severity Register
1744 	 *   o Secondary Uncorrectable Error Mask Register
1745 	 */
1746 }
1747 
1748 static void pci_configure_extended_tags(struct pci_dev *dev)
1749 {
1750 	u32 dev_cap;
1751 	int ret;
1752 
1753 	if (!pci_is_pcie(dev))
1754 		return;
1755 
1756 	ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap);
1757 	if (ret)
1758 		return;
1759 
1760 	if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG)
1761 		pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1762 					 PCI_EXP_DEVCTL_EXT_TAG);
1763 }
1764 
1765 /**
1766  * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1767  * @dev: PCI device to query
1768  *
1769  * Returns true if the device has enabled relaxed ordering attribute.
1770  */
1771 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1772 {
1773 	u16 v;
1774 
1775 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1776 
1777 	return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1778 }
1779 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1780 
1781 static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1782 {
1783 	struct pci_dev *root;
1784 
1785 	/* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1786 	if (dev->is_virtfn)
1787 		return;
1788 
1789 	if (!pcie_relaxed_ordering_enabled(dev))
1790 		return;
1791 
1792 	/*
1793 	 * For now, we only deal with Relaxed Ordering issues with Root
1794 	 * Ports. Peer-to-Peer DMA is another can of worms.
1795 	 */
1796 	root = pci_find_pcie_root_port(dev);
1797 	if (!root)
1798 		return;
1799 
1800 	if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1801 		pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1802 					   PCI_EXP_DEVCTL_RELAX_EN);
1803 		dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1804 	}
1805 }
1806 
1807 static void pci_configure_device(struct pci_dev *dev)
1808 {
1809 	struct hotplug_params hpp;
1810 	int ret;
1811 
1812 	pci_configure_mps(dev);
1813 	pci_configure_extended_tags(dev);
1814 	pci_configure_relaxed_ordering(dev);
1815 
1816 	memset(&hpp, 0, sizeof(hpp));
1817 	ret = pci_get_hp_params(dev, &hpp);
1818 	if (ret)
1819 		return;
1820 
1821 	program_hpp_type2(dev, hpp.t2);
1822 	program_hpp_type1(dev, hpp.t1);
1823 	program_hpp_type0(dev, hpp.t0);
1824 }
1825 
1826 static void pci_release_capabilities(struct pci_dev *dev)
1827 {
1828 	pci_vpd_release(dev);
1829 	pci_iov_release(dev);
1830 	pci_free_cap_save_buffers(dev);
1831 }
1832 
1833 /**
1834  * pci_release_dev - free a pci device structure when all users of it are finished.
1835  * @dev: device that's been disconnected
1836  *
1837  * Will be called only by the device core when all users of this pci device are
1838  * done.
1839  */
1840 static void pci_release_dev(struct device *dev)
1841 {
1842 	struct pci_dev *pci_dev;
1843 
1844 	pci_dev = to_pci_dev(dev);
1845 	pci_release_capabilities(pci_dev);
1846 	pci_release_of_node(pci_dev);
1847 	pcibios_release_device(pci_dev);
1848 	pci_bus_put(pci_dev->bus);
1849 	kfree(pci_dev->driver_override);
1850 	kfree(pci_dev->dma_alias_mask);
1851 	kfree(pci_dev);
1852 }
1853 
1854 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1855 {
1856 	struct pci_dev *dev;
1857 
1858 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1859 	if (!dev)
1860 		return NULL;
1861 
1862 	INIT_LIST_HEAD(&dev->bus_list);
1863 	dev->dev.type = &pci_dev_type;
1864 	dev->bus = pci_bus_get(bus);
1865 
1866 	return dev;
1867 }
1868 EXPORT_SYMBOL(pci_alloc_dev);
1869 
1870 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1871 				int crs_timeout)
1872 {
1873 	int delay = 1;
1874 
1875 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1876 		return false;
1877 
1878 	/* some broken boards return 0 or ~0 if a slot is empty: */
1879 	if (*l == 0xffffffff || *l == 0x00000000 ||
1880 	    *l == 0x0000ffff || *l == 0xffff0000)
1881 		return false;
1882 
1883 	/*
1884 	 * Configuration Request Retry Status.  Some root ports return the
1885 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1886 	 * by the PCIe spec.  Ignore the device ID and only check for
1887 	 * (vendor id == 1).
1888 	 */
1889 	while ((*l & 0xffff) == 0x0001) {
1890 		if (!crs_timeout)
1891 			return false;
1892 
1893 		msleep(delay);
1894 		delay *= 2;
1895 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1896 			return false;
1897 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1898 		if (delay > crs_timeout) {
1899 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1900 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1901 			       PCI_FUNC(devfn));
1902 			return false;
1903 		}
1904 	}
1905 
1906 	return true;
1907 }
1908 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1909 
1910 /*
1911  * Read the config data for a PCI device, sanity-check it
1912  * and fill in the dev structure...
1913  */
1914 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1915 {
1916 	struct pci_dev *dev;
1917 	u32 l;
1918 
1919 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1920 		return NULL;
1921 
1922 	dev = pci_alloc_dev(bus);
1923 	if (!dev)
1924 		return NULL;
1925 
1926 	dev->devfn = devfn;
1927 	dev->vendor = l & 0xffff;
1928 	dev->device = (l >> 16) & 0xffff;
1929 
1930 	pci_set_of_node(dev);
1931 
1932 	if (pci_setup_device(dev)) {
1933 		pci_bus_put(dev->bus);
1934 		kfree(dev);
1935 		return NULL;
1936 	}
1937 
1938 	return dev;
1939 }
1940 
1941 static void pci_init_capabilities(struct pci_dev *dev)
1942 {
1943 	/* Enhanced Allocation */
1944 	pci_ea_init(dev);
1945 
1946 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
1947 	pci_msi_setup_pci_dev(dev);
1948 
1949 	/* Buffers for saving PCIe and PCI-X capabilities */
1950 	pci_allocate_cap_save_buffers(dev);
1951 
1952 	/* Power Management */
1953 	pci_pm_init(dev);
1954 
1955 	/* Vital Product Data */
1956 	pci_vpd_init(dev);
1957 
1958 	/* Alternative Routing-ID Forwarding */
1959 	pci_configure_ari(dev);
1960 
1961 	/* Single Root I/O Virtualization */
1962 	pci_iov_init(dev);
1963 
1964 	/* Address Translation Services */
1965 	pci_ats_init(dev);
1966 
1967 	/* Enable ACS P2P upstream forwarding */
1968 	pci_enable_acs(dev);
1969 
1970 	/* Precision Time Measurement */
1971 	pci_ptm_init(dev);
1972 
1973 	/* Advanced Error Reporting */
1974 	pci_aer_init(dev);
1975 }
1976 
1977 /*
1978  * This is the equivalent of pci_host_bridge_msi_domain that acts on
1979  * devices. Firmware interfaces that can select the MSI domain on a
1980  * per-device basis should be called from here.
1981  */
1982 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1983 {
1984 	struct irq_domain *d;
1985 
1986 	/*
1987 	 * If a domain has been set through the pcibios_add_device
1988 	 * callback, then this is the one (platform code knows best).
1989 	 */
1990 	d = dev_get_msi_domain(&dev->dev);
1991 	if (d)
1992 		return d;
1993 
1994 	/*
1995 	 * Let's see if we have a firmware interface able to provide
1996 	 * the domain.
1997 	 */
1998 	d = pci_msi_get_device_domain(dev);
1999 	if (d)
2000 		return d;
2001 
2002 	return NULL;
2003 }
2004 
2005 static void pci_set_msi_domain(struct pci_dev *dev)
2006 {
2007 	struct irq_domain *d;
2008 
2009 	/*
2010 	 * If the platform or firmware interfaces cannot supply a
2011 	 * device-specific MSI domain, then inherit the default domain
2012 	 * from the host bridge itself.
2013 	 */
2014 	d = pci_dev_msi_domain(dev);
2015 	if (!d)
2016 		d = dev_get_msi_domain(&dev->bus->dev);
2017 
2018 	dev_set_msi_domain(&dev->dev, d);
2019 }
2020 
2021 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2022 {
2023 	int ret;
2024 
2025 	pci_configure_device(dev);
2026 
2027 	device_initialize(&dev->dev);
2028 	dev->dev.release = pci_release_dev;
2029 
2030 	set_dev_node(&dev->dev, pcibus_to_node(bus));
2031 	dev->dev.dma_mask = &dev->dma_mask;
2032 	dev->dev.dma_parms = &dev->dma_parms;
2033 	dev->dev.coherent_dma_mask = 0xffffffffull;
2034 
2035 	pci_set_dma_max_seg_size(dev, 65536);
2036 	pci_set_dma_seg_boundary(dev, 0xffffffff);
2037 
2038 	/* Fix up broken headers */
2039 	pci_fixup_device(pci_fixup_header, dev);
2040 
2041 	/* moved out from quirk header fixup code */
2042 	pci_reassigndev_resource_alignment(dev);
2043 
2044 	/* Clear the state_saved flag. */
2045 	dev->state_saved = false;
2046 
2047 	/* Initialize various capabilities */
2048 	pci_init_capabilities(dev);
2049 
2050 	/*
2051 	 * Add the device to our list of discovered devices
2052 	 * and the bus list for fixup functions, etc.
2053 	 */
2054 	down_write(&pci_bus_sem);
2055 	list_add_tail(&dev->bus_list, &bus->devices);
2056 	up_write(&pci_bus_sem);
2057 
2058 	ret = pcibios_add_device(dev);
2059 	WARN_ON(ret < 0);
2060 
2061 	/* Setup MSI irq domain */
2062 	pci_set_msi_domain(dev);
2063 
2064 	/* Notifier could use PCI capabilities */
2065 	dev->match_driver = false;
2066 	ret = device_add(&dev->dev);
2067 	WARN_ON(ret < 0);
2068 }
2069 
2070 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2071 {
2072 	struct pci_dev *dev;
2073 
2074 	dev = pci_get_slot(bus, devfn);
2075 	if (dev) {
2076 		pci_dev_put(dev);
2077 		return dev;
2078 	}
2079 
2080 	dev = pci_scan_device(bus, devfn);
2081 	if (!dev)
2082 		return NULL;
2083 
2084 	pci_device_add(dev, bus);
2085 
2086 	return dev;
2087 }
2088 EXPORT_SYMBOL(pci_scan_single_device);
2089 
2090 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2091 {
2092 	int pos;
2093 	u16 cap = 0;
2094 	unsigned next_fn;
2095 
2096 	if (pci_ari_enabled(bus)) {
2097 		if (!dev)
2098 			return 0;
2099 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2100 		if (!pos)
2101 			return 0;
2102 
2103 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2104 		next_fn = PCI_ARI_CAP_NFN(cap);
2105 		if (next_fn <= fn)
2106 			return 0;	/* protect against malformed list */
2107 
2108 		return next_fn;
2109 	}
2110 
2111 	/* dev may be NULL for non-contiguous multifunction devices */
2112 	if (!dev || dev->multifunction)
2113 		return (fn + 1) % 8;
2114 
2115 	return 0;
2116 }
2117 
2118 static int only_one_child(struct pci_bus *bus)
2119 {
2120 	struct pci_dev *parent = bus->self;
2121 
2122 	if (!parent || !pci_is_pcie(parent))
2123 		return 0;
2124 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2125 		return 1;
2126 
2127 	/*
2128 	 * PCIe downstream ports are bridges that normally lead to only a
2129 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2130 	 * possible devices, not just device 0.  See PCIe spec r3.0,
2131 	 * sec 7.3.1.
2132 	 */
2133 	if (parent->has_secondary_link &&
2134 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2135 		return 1;
2136 	return 0;
2137 }
2138 
2139 /**
2140  * pci_scan_slot - scan a PCI slot on a bus for devices.
2141  * @bus: PCI bus to scan
2142  * @devfn: slot number to scan (must have zero function.)
2143  *
2144  * Scan a PCI slot on the specified PCI bus for devices, adding
2145  * discovered devices to the @bus->devices list.  New devices
2146  * will not have is_added set.
2147  *
2148  * Returns the number of new devices found.
2149  */
2150 int pci_scan_slot(struct pci_bus *bus, int devfn)
2151 {
2152 	unsigned fn, nr = 0;
2153 	struct pci_dev *dev;
2154 
2155 	if (only_one_child(bus) && (devfn > 0))
2156 		return 0; /* Already scanned the entire slot */
2157 
2158 	dev = pci_scan_single_device(bus, devfn);
2159 	if (!dev)
2160 		return 0;
2161 	if (!dev->is_added)
2162 		nr++;
2163 
2164 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2165 		dev = pci_scan_single_device(bus, devfn + fn);
2166 		if (dev) {
2167 			if (!dev->is_added)
2168 				nr++;
2169 			dev->multifunction = 1;
2170 		}
2171 	}
2172 
2173 	/* only one slot has pcie device */
2174 	if (bus->self && nr)
2175 		pcie_aspm_init_link_state(bus->self);
2176 
2177 	return nr;
2178 }
2179 EXPORT_SYMBOL(pci_scan_slot);
2180 
2181 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2182 {
2183 	u8 *smpss = data;
2184 
2185 	if (!pci_is_pcie(dev))
2186 		return 0;
2187 
2188 	/*
2189 	 * We don't have a way to change MPS settings on devices that have
2190 	 * drivers attached.  A hot-added device might support only the minimum
2191 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2192 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2193 	 * hot-added devices will work correctly.
2194 	 *
2195 	 * However, if we hot-add a device to a slot directly below a Root
2196 	 * Port, it's impossible for there to be other existing devices below
2197 	 * the port.  We don't limit the MPS in this case because we can
2198 	 * reconfigure MPS on both the Root Port and the hot-added device,
2199 	 * and there are no other devices involved.
2200 	 *
2201 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2202 	 */
2203 	if (dev->is_hotplug_bridge &&
2204 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2205 		*smpss = 0;
2206 
2207 	if (*smpss > dev->pcie_mpss)
2208 		*smpss = dev->pcie_mpss;
2209 
2210 	return 0;
2211 }
2212 
2213 static void pcie_write_mps(struct pci_dev *dev, int mps)
2214 {
2215 	int rc;
2216 
2217 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2218 		mps = 128 << dev->pcie_mpss;
2219 
2220 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2221 		    dev->bus->self)
2222 			/* For "Performance", the assumption is made that
2223 			 * downstream communication will never be larger than
2224 			 * the MRRS.  So, the MPS only needs to be configured
2225 			 * for the upstream communication.  This being the case,
2226 			 * walk from the top down and set the MPS of the child
2227 			 * to that of the parent bus.
2228 			 *
2229 			 * Configure the device MPS with the smaller of the
2230 			 * device MPSS or the bridge MPS (which is assumed to be
2231 			 * properly configured at this point to the largest
2232 			 * allowable MPS based on its parent bus).
2233 			 */
2234 			mps = min(mps, pcie_get_mps(dev->bus->self));
2235 	}
2236 
2237 	rc = pcie_set_mps(dev, mps);
2238 	if (rc)
2239 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2240 }
2241 
2242 static void pcie_write_mrrs(struct pci_dev *dev)
2243 {
2244 	int rc, mrrs;
2245 
2246 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2247 	 * issues with setting MRRS to 0 on a number of devices.
2248 	 */
2249 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2250 		return;
2251 
2252 	/* For Max performance, the MRRS must be set to the largest supported
2253 	 * value.  However, it cannot be configured larger than the MPS the
2254 	 * device or the bus can support.  This should already be properly
2255 	 * configured by a prior call to pcie_write_mps.
2256 	 */
2257 	mrrs = pcie_get_mps(dev);
2258 
2259 	/* MRRS is a R/W register.  Invalid values can be written, but a
2260 	 * subsequent read will verify if the value is acceptable or not.
2261 	 * If the MRRS value provided is not acceptable (e.g., too large),
2262 	 * shrink the value until it is acceptable to the HW.
2263 	 */
2264 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2265 		rc = pcie_set_readrq(dev, mrrs);
2266 		if (!rc)
2267 			break;
2268 
2269 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2270 		mrrs /= 2;
2271 	}
2272 
2273 	if (mrrs < 128)
2274 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2275 }
2276 
2277 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2278 {
2279 	int mps, orig_mps;
2280 
2281 	if (!pci_is_pcie(dev))
2282 		return 0;
2283 
2284 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2285 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2286 		return 0;
2287 
2288 	mps = 128 << *(u8 *)data;
2289 	orig_mps = pcie_get_mps(dev);
2290 
2291 	pcie_write_mps(dev, mps);
2292 	pcie_write_mrrs(dev);
2293 
2294 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2295 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2296 		 orig_mps, pcie_get_readrq(dev));
2297 
2298 	return 0;
2299 }
2300 
2301 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2302  * parents then children fashion.  If this changes, then this code will not
2303  * work as designed.
2304  */
2305 void pcie_bus_configure_settings(struct pci_bus *bus)
2306 {
2307 	u8 smpss = 0;
2308 
2309 	if (!bus->self)
2310 		return;
2311 
2312 	if (!pci_is_pcie(bus->self))
2313 		return;
2314 
2315 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2316 	 * to be aware of the MPS of the destination.  To work around this,
2317 	 * simply force the MPS of the entire system to the smallest possible.
2318 	 */
2319 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2320 		smpss = 0;
2321 
2322 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2323 		smpss = bus->self->pcie_mpss;
2324 
2325 		pcie_find_smpss(bus->self, &smpss);
2326 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2327 	}
2328 
2329 	pcie_bus_configure_set(bus->self, &smpss);
2330 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2331 }
2332 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2333 
2334 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2335 {
2336 	unsigned int devfn, pass, max = bus->busn_res.start;
2337 	struct pci_dev *dev;
2338 
2339 	dev_dbg(&bus->dev, "scanning bus\n");
2340 
2341 	/* Go find them, Rover! */
2342 	for (devfn = 0; devfn < 0x100; devfn += 8)
2343 		pci_scan_slot(bus, devfn);
2344 
2345 	/* Reserve buses for SR-IOV capability. */
2346 	max += pci_iov_bus_range(bus);
2347 
2348 	/*
2349 	 * After performing arch-dependent fixup of the bus, look behind
2350 	 * all PCI-to-PCI bridges on this bus.
2351 	 */
2352 	if (!bus->is_added) {
2353 		dev_dbg(&bus->dev, "fixups for bus\n");
2354 		pcibios_fixup_bus(bus);
2355 		bus->is_added = 1;
2356 	}
2357 
2358 	for (pass = 0; pass < 2; pass++)
2359 		list_for_each_entry(dev, &bus->devices, bus_list) {
2360 			if (pci_is_bridge(dev))
2361 				max = pci_scan_bridge(bus, dev, max, pass);
2362 		}
2363 
2364 	/*
2365 	 * Make sure a hotplug bridge has at least the minimum requested
2366 	 * number of buses.
2367 	 */
2368 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2369 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2370 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2371 	}
2372 
2373 	/*
2374 	 * We've scanned the bus and so we know all about what's on
2375 	 * the other side of any bridges that may be on this bus plus
2376 	 * any devices.
2377 	 *
2378 	 * Return how far we've got finding sub-buses.
2379 	 */
2380 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2381 	return max;
2382 }
2383 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2384 
2385 /**
2386  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2387  * @bridge: Host bridge to set up.
2388  *
2389  * Default empty implementation.  Replace with an architecture-specific setup
2390  * routine, if necessary.
2391  */
2392 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2393 {
2394 	return 0;
2395 }
2396 
2397 void __weak pcibios_add_bus(struct pci_bus *bus)
2398 {
2399 }
2400 
2401 void __weak pcibios_remove_bus(struct pci_bus *bus)
2402 {
2403 }
2404 
2405 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2406 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2407 {
2408 	int error;
2409 	struct pci_host_bridge *bridge;
2410 
2411 	bridge = pci_alloc_host_bridge(0);
2412 	if (!bridge)
2413 		return NULL;
2414 
2415 	bridge->dev.parent = parent;
2416 
2417 	list_splice_init(resources, &bridge->windows);
2418 	bridge->sysdata = sysdata;
2419 	bridge->busnr = bus;
2420 	bridge->ops = ops;
2421 
2422 	error = pci_register_host_bridge(bridge);
2423 	if (error < 0)
2424 		goto err_out;
2425 
2426 	return bridge->bus;
2427 
2428 err_out:
2429 	kfree(bridge);
2430 	return NULL;
2431 }
2432 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2433 
2434 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2435 {
2436 	struct resource *res = &b->busn_res;
2437 	struct resource *parent_res, *conflict;
2438 
2439 	res->start = bus;
2440 	res->end = bus_max;
2441 	res->flags = IORESOURCE_BUS;
2442 
2443 	if (!pci_is_root_bus(b))
2444 		parent_res = &b->parent->busn_res;
2445 	else {
2446 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2447 		res->flags |= IORESOURCE_PCI_FIXED;
2448 	}
2449 
2450 	conflict = request_resource_conflict(parent_res, res);
2451 
2452 	if (conflict)
2453 		dev_printk(KERN_DEBUG, &b->dev,
2454 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2455 			    res, pci_is_root_bus(b) ? "domain " : "",
2456 			    parent_res, conflict->name, conflict);
2457 
2458 	return conflict == NULL;
2459 }
2460 
2461 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2462 {
2463 	struct resource *res = &b->busn_res;
2464 	struct resource old_res = *res;
2465 	resource_size_t size;
2466 	int ret;
2467 
2468 	if (res->start > bus_max)
2469 		return -EINVAL;
2470 
2471 	size = bus_max - res->start + 1;
2472 	ret = adjust_resource(res, res->start, size);
2473 	dev_printk(KERN_DEBUG, &b->dev,
2474 			"busn_res: %pR end %s updated to %02x\n",
2475 			&old_res, ret ? "can not be" : "is", bus_max);
2476 
2477 	if (!ret && !res->parent)
2478 		pci_bus_insert_busn_res(b, res->start, res->end);
2479 
2480 	return ret;
2481 }
2482 
2483 void pci_bus_release_busn_res(struct pci_bus *b)
2484 {
2485 	struct resource *res = &b->busn_res;
2486 	int ret;
2487 
2488 	if (!res->flags || !res->parent)
2489 		return;
2490 
2491 	ret = release_resource(res);
2492 	dev_printk(KERN_DEBUG, &b->dev,
2493 			"busn_res: %pR %s released\n",
2494 			res, ret ? "can not be" : "is");
2495 }
2496 
2497 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
2498 {
2499 	struct resource_entry *window;
2500 	bool found = false;
2501 	struct pci_bus *b;
2502 	int max, bus, ret;
2503 
2504 	if (!bridge)
2505 		return -EINVAL;
2506 
2507 	resource_list_for_each_entry(window, &bridge->windows)
2508 		if (window->res->flags & IORESOURCE_BUS) {
2509 			found = true;
2510 			break;
2511 		}
2512 
2513 	ret = pci_register_host_bridge(bridge);
2514 	if (ret < 0)
2515 		return ret;
2516 
2517 	b = bridge->bus;
2518 	bus = bridge->busnr;
2519 
2520 	if (!found) {
2521 		dev_info(&b->dev,
2522 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2523 			bus);
2524 		pci_bus_insert_busn_res(b, bus, 255);
2525 	}
2526 
2527 	max = pci_scan_child_bus(b);
2528 
2529 	if (!found)
2530 		pci_bus_update_busn_res_end(b, max);
2531 
2532 	return 0;
2533 }
2534 EXPORT_SYMBOL(pci_scan_root_bus_bridge);
2535 
2536 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2537 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2538 {
2539 	struct resource_entry *window;
2540 	bool found = false;
2541 	struct pci_bus *b;
2542 	int max;
2543 
2544 	resource_list_for_each_entry(window, resources)
2545 		if (window->res->flags & IORESOURCE_BUS) {
2546 			found = true;
2547 			break;
2548 		}
2549 
2550 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2551 	if (!b)
2552 		return NULL;
2553 
2554 	if (!found) {
2555 		dev_info(&b->dev,
2556 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2557 			bus);
2558 		pci_bus_insert_busn_res(b, bus, 255);
2559 	}
2560 
2561 	max = pci_scan_child_bus(b);
2562 
2563 	if (!found)
2564 		pci_bus_update_busn_res_end(b, max);
2565 
2566 	return b;
2567 }
2568 EXPORT_SYMBOL(pci_scan_root_bus);
2569 
2570 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2571 					void *sysdata)
2572 {
2573 	LIST_HEAD(resources);
2574 	struct pci_bus *b;
2575 
2576 	pci_add_resource(&resources, &ioport_resource);
2577 	pci_add_resource(&resources, &iomem_resource);
2578 	pci_add_resource(&resources, &busn_resource);
2579 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2580 	if (b) {
2581 		pci_scan_child_bus(b);
2582 	} else {
2583 		pci_free_resource_list(&resources);
2584 	}
2585 	return b;
2586 }
2587 EXPORT_SYMBOL(pci_scan_bus);
2588 
2589 /**
2590  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2591  * @bridge: PCI bridge for the bus to scan
2592  *
2593  * Scan a PCI bus and child buses for new devices, add them,
2594  * and enable them, resizing bridge mmio/io resource if necessary
2595  * and possible.  The caller must ensure the child devices are already
2596  * removed for resizing to occur.
2597  *
2598  * Returns the max number of subordinate bus discovered.
2599  */
2600 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2601 {
2602 	unsigned int max;
2603 	struct pci_bus *bus = bridge->subordinate;
2604 
2605 	max = pci_scan_child_bus(bus);
2606 
2607 	pci_assign_unassigned_bridge_resources(bridge);
2608 
2609 	pci_bus_add_devices(bus);
2610 
2611 	return max;
2612 }
2613 
2614 /**
2615  * pci_rescan_bus - scan a PCI bus for devices.
2616  * @bus: PCI bus to scan
2617  *
2618  * Scan a PCI bus and child buses for new devices, adds them,
2619  * and enables them.
2620  *
2621  * Returns the max number of subordinate bus discovered.
2622  */
2623 unsigned int pci_rescan_bus(struct pci_bus *bus)
2624 {
2625 	unsigned int max;
2626 
2627 	max = pci_scan_child_bus(bus);
2628 	pci_assign_unassigned_bus_resources(bus);
2629 	pci_bus_add_devices(bus);
2630 
2631 	return max;
2632 }
2633 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2634 
2635 /*
2636  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2637  * routines should always be executed under this mutex.
2638  */
2639 static DEFINE_MUTEX(pci_rescan_remove_lock);
2640 
2641 void pci_lock_rescan_remove(void)
2642 {
2643 	mutex_lock(&pci_rescan_remove_lock);
2644 }
2645 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2646 
2647 void pci_unlock_rescan_remove(void)
2648 {
2649 	mutex_unlock(&pci_rescan_remove_lock);
2650 }
2651 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2652 
2653 static int __init pci_sort_bf_cmp(const struct device *d_a,
2654 				  const struct device *d_b)
2655 {
2656 	const struct pci_dev *a = to_pci_dev(d_a);
2657 	const struct pci_dev *b = to_pci_dev(d_b);
2658 
2659 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2660 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2661 
2662 	if      (a->bus->number < b->bus->number) return -1;
2663 	else if (a->bus->number > b->bus->number) return  1;
2664 
2665 	if      (a->devfn < b->devfn) return -1;
2666 	else if (a->devfn > b->devfn) return  1;
2667 
2668 	return 0;
2669 }
2670 
2671 void __init pci_sort_breadthfirst(void)
2672 {
2673 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2674 }
2675