xref: /openbmc/linux/drivers/pci/probe.c (revision 6aa7de05)
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l = 0, sz = 0, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
513 static void devm_pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 }
520 
521 static void pci_release_host_bridge_dev(struct device *dev)
522 {
523 	devm_pci_release_host_bridge_dev(dev);
524 	pci_free_host_bridge(to_pci_host_bridge(dev));
525 }
526 
527 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
528 {
529 	struct pci_host_bridge *bridge;
530 
531 	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
532 	if (!bridge)
533 		return NULL;
534 
535 	INIT_LIST_HEAD(&bridge->windows);
536 	bridge->dev.release = pci_release_host_bridge_dev;
537 
538 	return bridge;
539 }
540 EXPORT_SYMBOL(pci_alloc_host_bridge);
541 
542 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
543 						   size_t priv)
544 {
545 	struct pci_host_bridge *bridge;
546 
547 	bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
548 	if (!bridge)
549 		return NULL;
550 
551 	INIT_LIST_HEAD(&bridge->windows);
552 	bridge->dev.release = devm_pci_release_host_bridge_dev;
553 
554 	return bridge;
555 }
556 EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
557 
558 void pci_free_host_bridge(struct pci_host_bridge *bridge)
559 {
560 	pci_free_resource_list(&bridge->windows);
561 
562 	kfree(bridge);
563 }
564 EXPORT_SYMBOL(pci_free_host_bridge);
565 
566 static const unsigned char pcix_bus_speed[] = {
567 	PCI_SPEED_UNKNOWN,		/* 0 */
568 	PCI_SPEED_66MHz_PCIX,		/* 1 */
569 	PCI_SPEED_100MHz_PCIX,		/* 2 */
570 	PCI_SPEED_133MHz_PCIX,		/* 3 */
571 	PCI_SPEED_UNKNOWN,		/* 4 */
572 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
573 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
574 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
575 	PCI_SPEED_UNKNOWN,		/* 8 */
576 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
577 	PCI_SPEED_100MHz_PCIX_266,	/* A */
578 	PCI_SPEED_133MHz_PCIX_266,	/* B */
579 	PCI_SPEED_UNKNOWN,		/* C */
580 	PCI_SPEED_66MHz_PCIX_533,	/* D */
581 	PCI_SPEED_100MHz_PCIX_533,	/* E */
582 	PCI_SPEED_133MHz_PCIX_533	/* F */
583 };
584 
585 const unsigned char pcie_link_speed[] = {
586 	PCI_SPEED_UNKNOWN,		/* 0 */
587 	PCIE_SPEED_2_5GT,		/* 1 */
588 	PCIE_SPEED_5_0GT,		/* 2 */
589 	PCIE_SPEED_8_0GT,		/* 3 */
590 	PCI_SPEED_UNKNOWN,		/* 4 */
591 	PCI_SPEED_UNKNOWN,		/* 5 */
592 	PCI_SPEED_UNKNOWN,		/* 6 */
593 	PCI_SPEED_UNKNOWN,		/* 7 */
594 	PCI_SPEED_UNKNOWN,		/* 8 */
595 	PCI_SPEED_UNKNOWN,		/* 9 */
596 	PCI_SPEED_UNKNOWN,		/* A */
597 	PCI_SPEED_UNKNOWN,		/* B */
598 	PCI_SPEED_UNKNOWN,		/* C */
599 	PCI_SPEED_UNKNOWN,		/* D */
600 	PCI_SPEED_UNKNOWN,		/* E */
601 	PCI_SPEED_UNKNOWN		/* F */
602 };
603 
604 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
605 {
606 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
607 }
608 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
609 
610 static unsigned char agp_speeds[] = {
611 	AGP_UNKNOWN,
612 	AGP_1X,
613 	AGP_2X,
614 	AGP_4X,
615 	AGP_8X
616 };
617 
618 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
619 {
620 	int index = 0;
621 
622 	if (agpstat & 4)
623 		index = 3;
624 	else if (agpstat & 2)
625 		index = 2;
626 	else if (agpstat & 1)
627 		index = 1;
628 	else
629 		goto out;
630 
631 	if (agp3) {
632 		index += 2;
633 		if (index == 5)
634 			index = 0;
635 	}
636 
637  out:
638 	return agp_speeds[index];
639 }
640 
641 static void pci_set_bus_speed(struct pci_bus *bus)
642 {
643 	struct pci_dev *bridge = bus->self;
644 	int pos;
645 
646 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
647 	if (!pos)
648 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
649 	if (pos) {
650 		u32 agpstat, agpcmd;
651 
652 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
653 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
654 
655 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
656 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
657 	}
658 
659 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
660 	if (pos) {
661 		u16 status;
662 		enum pci_bus_speed max;
663 
664 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
665 				     &status);
666 
667 		if (status & PCI_X_SSTATUS_533MHZ) {
668 			max = PCI_SPEED_133MHz_PCIX_533;
669 		} else if (status & PCI_X_SSTATUS_266MHZ) {
670 			max = PCI_SPEED_133MHz_PCIX_266;
671 		} else if (status & PCI_X_SSTATUS_133MHZ) {
672 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
673 				max = PCI_SPEED_133MHz_PCIX_ECC;
674 			else
675 				max = PCI_SPEED_133MHz_PCIX;
676 		} else {
677 			max = PCI_SPEED_66MHz_PCIX;
678 		}
679 
680 		bus->max_bus_speed = max;
681 		bus->cur_bus_speed = pcix_bus_speed[
682 			(status & PCI_X_SSTATUS_FREQ) >> 6];
683 
684 		return;
685 	}
686 
687 	if (pci_is_pcie(bridge)) {
688 		u32 linkcap;
689 		u16 linksta;
690 
691 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
692 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
693 
694 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
695 		pcie_update_link_speed(bus, linksta);
696 	}
697 }
698 
699 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
700 {
701 	struct irq_domain *d;
702 
703 	/*
704 	 * Any firmware interface that can resolve the msi_domain
705 	 * should be called from here.
706 	 */
707 	d = pci_host_bridge_of_msi_domain(bus);
708 	if (!d)
709 		d = pci_host_bridge_acpi_msi_domain(bus);
710 
711 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
712 	/*
713 	 * If no IRQ domain was found via the OF tree, try looking it up
714 	 * directly through the fwnode_handle.
715 	 */
716 	if (!d) {
717 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
718 
719 		if (fwnode)
720 			d = irq_find_matching_fwnode(fwnode,
721 						     DOMAIN_BUS_PCI_MSI);
722 	}
723 #endif
724 
725 	return d;
726 }
727 
728 static void pci_set_bus_msi_domain(struct pci_bus *bus)
729 {
730 	struct irq_domain *d;
731 	struct pci_bus *b;
732 
733 	/*
734 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
735 	 * created by an SR-IOV device.  Walk up to the first bridge device
736 	 * found or derive the domain from the host bridge.
737 	 */
738 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
739 		if (b->self)
740 			d = dev_get_msi_domain(&b->self->dev);
741 	}
742 
743 	if (!d)
744 		d = pci_host_bridge_msi_domain(b);
745 
746 	dev_set_msi_domain(&bus->dev, d);
747 }
748 
749 static int pci_register_host_bridge(struct pci_host_bridge *bridge)
750 {
751 	struct device *parent = bridge->dev.parent;
752 	struct resource_entry *window, *n;
753 	struct pci_bus *bus, *b;
754 	resource_size_t offset;
755 	LIST_HEAD(resources);
756 	struct resource *res;
757 	char addr[64], *fmt;
758 	const char *name;
759 	int err;
760 
761 	bus = pci_alloc_bus(NULL);
762 	if (!bus)
763 		return -ENOMEM;
764 
765 	bridge->bus = bus;
766 
767 	/* temporarily move resources off the list */
768 	list_splice_init(&bridge->windows, &resources);
769 	bus->sysdata = bridge->sysdata;
770 	bus->msi = bridge->msi;
771 	bus->ops = bridge->ops;
772 	bus->number = bus->busn_res.start = bridge->busnr;
773 #ifdef CONFIG_PCI_DOMAINS_GENERIC
774 	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
775 #endif
776 
777 	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
778 	if (b) {
779 		/* If we already got to this bus through a different bridge, ignore it */
780 		dev_dbg(&b->dev, "bus already known\n");
781 		err = -EEXIST;
782 		goto free;
783 	}
784 
785 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
786 		     bridge->busnr);
787 
788 	err = pcibios_root_bridge_prepare(bridge);
789 	if (err)
790 		goto free;
791 
792 	err = device_register(&bridge->dev);
793 	if (err)
794 		put_device(&bridge->dev);
795 
796 	bus->bridge = get_device(&bridge->dev);
797 	device_enable_async_suspend(bus->bridge);
798 	pci_set_bus_of_node(bus);
799 	pci_set_bus_msi_domain(bus);
800 
801 	if (!parent)
802 		set_dev_node(bus->bridge, pcibus_to_node(bus));
803 
804 	bus->dev.class = &pcibus_class;
805 	bus->dev.parent = bus->bridge;
806 
807 	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
808 	name = dev_name(&bus->dev);
809 
810 	err = device_register(&bus->dev);
811 	if (err)
812 		goto unregister;
813 
814 	pcibios_add_bus(bus);
815 
816 	/* Create legacy_io and legacy_mem files for this bus */
817 	pci_create_legacy_files(bus);
818 
819 	if (parent)
820 		dev_info(parent, "PCI host bridge to bus %s\n", name);
821 	else
822 		pr_info("PCI host bridge to bus %s\n", name);
823 
824 	/* Add initial resources to the bus */
825 	resource_list_for_each_entry_safe(window, n, &resources) {
826 		list_move_tail(&window->node, &bridge->windows);
827 		offset = window->offset;
828 		res = window->res;
829 
830 		if (res->flags & IORESOURCE_BUS)
831 			pci_bus_insert_busn_res(bus, bus->number, res->end);
832 		else
833 			pci_bus_add_resource(bus, res, 0);
834 
835 		if (offset) {
836 			if (resource_type(res) == IORESOURCE_IO)
837 				fmt = " (bus address [%#06llx-%#06llx])";
838 			else
839 				fmt = " (bus address [%#010llx-%#010llx])";
840 
841 			snprintf(addr, sizeof(addr), fmt,
842 				 (unsigned long long)(res->start - offset),
843 				 (unsigned long long)(res->end - offset));
844 		} else
845 			addr[0] = '\0';
846 
847 		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
848 	}
849 
850 	down_write(&pci_bus_sem);
851 	list_add_tail(&bus->node, &pci_root_buses);
852 	up_write(&pci_bus_sem);
853 
854 	return 0;
855 
856 unregister:
857 	put_device(&bridge->dev);
858 	device_unregister(&bridge->dev);
859 
860 free:
861 	kfree(bus);
862 	return err;
863 }
864 
865 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
866 					   struct pci_dev *bridge, int busnr)
867 {
868 	struct pci_bus *child;
869 	int i;
870 	int ret;
871 
872 	/*
873 	 * Allocate a new bus, and inherit stuff from the parent..
874 	 */
875 	child = pci_alloc_bus(parent);
876 	if (!child)
877 		return NULL;
878 
879 	child->parent = parent;
880 	child->ops = parent->ops;
881 	child->msi = parent->msi;
882 	child->sysdata = parent->sysdata;
883 	child->bus_flags = parent->bus_flags;
884 
885 	/* initialize some portions of the bus device, but don't register it
886 	 * now as the parent is not properly set up yet.
887 	 */
888 	child->dev.class = &pcibus_class;
889 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
890 
891 	/*
892 	 * Set up the primary, secondary and subordinate
893 	 * bus numbers.
894 	 */
895 	child->number = child->busn_res.start = busnr;
896 	child->primary = parent->busn_res.start;
897 	child->busn_res.end = 0xff;
898 
899 	if (!bridge) {
900 		child->dev.parent = parent->bridge;
901 		goto add_dev;
902 	}
903 
904 	child->self = bridge;
905 	child->bridge = get_device(&bridge->dev);
906 	child->dev.parent = child->bridge;
907 	pci_set_bus_of_node(child);
908 	pci_set_bus_speed(child);
909 
910 	/* Set up default resource pointers and names.. */
911 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
912 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
913 		child->resource[i]->name = child->name;
914 	}
915 	bridge->subordinate = child;
916 
917 add_dev:
918 	pci_set_bus_msi_domain(child);
919 	ret = device_register(&child->dev);
920 	WARN_ON(ret < 0);
921 
922 	pcibios_add_bus(child);
923 
924 	if (child->ops->add_bus) {
925 		ret = child->ops->add_bus(child);
926 		if (WARN_ON(ret < 0))
927 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
928 	}
929 
930 	/* Create legacy_io and legacy_mem files for this bus */
931 	pci_create_legacy_files(child);
932 
933 	return child;
934 }
935 
936 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
937 				int busnr)
938 {
939 	struct pci_bus *child;
940 
941 	child = pci_alloc_child_bus(parent, dev, busnr);
942 	if (child) {
943 		down_write(&pci_bus_sem);
944 		list_add_tail(&child->node, &parent->children);
945 		up_write(&pci_bus_sem);
946 	}
947 	return child;
948 }
949 EXPORT_SYMBOL(pci_add_new_bus);
950 
951 static void pci_enable_crs(struct pci_dev *pdev)
952 {
953 	u16 root_cap = 0;
954 
955 	/* Enable CRS Software Visibility if supported */
956 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
957 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
958 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
959 					 PCI_EXP_RTCTL_CRSSVE);
960 }
961 
962 /*
963  * If it's a bridge, configure it and scan the bus behind it.
964  * For CardBus bridges, we don't scan behind as the devices will
965  * be handled by the bridge driver itself.
966  *
967  * We need to process bridges in two passes -- first we scan those
968  * already configured by the BIOS and after we are done with all of
969  * them, we proceed to assigning numbers to the remaining buses in
970  * order to avoid overlaps between old and new bus numbers.
971  */
972 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
973 {
974 	struct pci_bus *child;
975 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
976 	u32 buses, i, j = 0;
977 	u16 bctl;
978 	u8 primary, secondary, subordinate;
979 	int broken = 0;
980 
981 	/*
982 	 * Make sure the bridge is powered on to be able to access config
983 	 * space of devices below it.
984 	 */
985 	pm_runtime_get_sync(&dev->dev);
986 
987 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
988 	primary = buses & 0xFF;
989 	secondary = (buses >> 8) & 0xFF;
990 	subordinate = (buses >> 16) & 0xFF;
991 
992 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
993 		secondary, subordinate, pass);
994 
995 	if (!primary && (primary != bus->number) && secondary && subordinate) {
996 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
997 		primary = bus->number;
998 	}
999 
1000 	/* Check if setup is sensible at all */
1001 	if (!pass &&
1002 	    (primary != bus->number || secondary <= bus->number ||
1003 	     secondary > subordinate)) {
1004 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1005 			 secondary, subordinate);
1006 		broken = 1;
1007 	}
1008 
1009 	/* Disable MasterAbortMode during probing to avoid reporting
1010 	   of bus errors (in some architectures) */
1011 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1012 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1013 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1014 
1015 	pci_enable_crs(dev);
1016 
1017 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1018 	    !is_cardbus && !broken) {
1019 		unsigned int cmax;
1020 		/*
1021 		 * Bus already configured by firmware, process it in the first
1022 		 * pass and just note the configuration.
1023 		 */
1024 		if (pass)
1025 			goto out;
1026 
1027 		/*
1028 		 * The bus might already exist for two reasons: Either we are
1029 		 * rescanning the bus or the bus is reachable through more than
1030 		 * one bridge. The second case can happen with the i450NX
1031 		 * chipset.
1032 		 */
1033 		child = pci_find_bus(pci_domain_nr(bus), secondary);
1034 		if (!child) {
1035 			child = pci_add_new_bus(bus, dev, secondary);
1036 			if (!child)
1037 				goto out;
1038 			child->primary = primary;
1039 			pci_bus_insert_busn_res(child, secondary, subordinate);
1040 			child->bridge_ctl = bctl;
1041 		}
1042 
1043 		cmax = pci_scan_child_bus(child);
1044 		if (cmax > subordinate)
1045 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1046 				 subordinate, cmax);
1047 		/* subordinate should equal child->busn_res.end */
1048 		if (subordinate > max)
1049 			max = subordinate;
1050 	} else {
1051 		/*
1052 		 * We need to assign a number to this bus which we always
1053 		 * do in the second pass.
1054 		 */
1055 		if (!pass) {
1056 			if (pcibios_assign_all_busses() || broken || is_cardbus)
1057 				/* Temporarily disable forwarding of the
1058 				   configuration cycles on all bridges in
1059 				   this bus segment to avoid possible
1060 				   conflicts in the second pass between two
1061 				   bridges programmed with overlapping
1062 				   bus ranges. */
1063 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1064 						       buses & ~0xffffff);
1065 			goto out;
1066 		}
1067 
1068 		/* Clear errors */
1069 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
1070 
1071 		/* Prevent assigning a bus number that already exists.
1072 		 * This can happen when a bridge is hot-plugged, so in
1073 		 * this case we only re-scan this bus. */
1074 		child = pci_find_bus(pci_domain_nr(bus), max+1);
1075 		if (!child) {
1076 			child = pci_add_new_bus(bus, dev, max+1);
1077 			if (!child)
1078 				goto out;
1079 			pci_bus_insert_busn_res(child, max+1, 0xff);
1080 		}
1081 		max++;
1082 		buses = (buses & 0xff000000)
1083 		      | ((unsigned int)(child->primary)     <<  0)
1084 		      | ((unsigned int)(child->busn_res.start)   <<  8)
1085 		      | ((unsigned int)(child->busn_res.end) << 16);
1086 
1087 		/*
1088 		 * yenta.c forces a secondary latency timer of 176.
1089 		 * Copy that behaviour here.
1090 		 */
1091 		if (is_cardbus) {
1092 			buses &= ~0xff000000;
1093 			buses |= CARDBUS_LATENCY_TIMER << 24;
1094 		}
1095 
1096 		/*
1097 		 * We need to blast all three values with a single write.
1098 		 */
1099 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1100 
1101 		if (!is_cardbus) {
1102 			child->bridge_ctl = bctl;
1103 			max = pci_scan_child_bus(child);
1104 		} else {
1105 			/*
1106 			 * For CardBus bridges, we leave 4 bus numbers
1107 			 * as cards with a PCI-to-PCI bridge can be
1108 			 * inserted later.
1109 			 */
1110 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1111 				struct pci_bus *parent = bus;
1112 				if (pci_find_bus(pci_domain_nr(bus),
1113 							max+i+1))
1114 					break;
1115 				while (parent->parent) {
1116 					if ((!pcibios_assign_all_busses()) &&
1117 					    (parent->busn_res.end > max) &&
1118 					    (parent->busn_res.end <= max+i)) {
1119 						j = 1;
1120 					}
1121 					parent = parent->parent;
1122 				}
1123 				if (j) {
1124 					/*
1125 					 * Often, there are two cardbus bridges
1126 					 * -- try to leave one valid bus number
1127 					 * for each one.
1128 					 */
1129 					i /= 2;
1130 					break;
1131 				}
1132 			}
1133 			max += i;
1134 		}
1135 		/*
1136 		 * Set the subordinate bus number to its real value.
1137 		 */
1138 		pci_bus_update_busn_res_end(child, max);
1139 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1140 	}
1141 
1142 	sprintf(child->name,
1143 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1144 		pci_domain_nr(bus), child->number);
1145 
1146 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1147 	while (bus->parent) {
1148 		if ((child->busn_res.end > bus->busn_res.end) ||
1149 		    (child->number > bus->busn_res.end) ||
1150 		    (child->number < bus->number) ||
1151 		    (child->busn_res.end < bus->number)) {
1152 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1153 				&child->busn_res,
1154 				(bus->number > child->busn_res.end &&
1155 				 bus->busn_res.end < child->number) ?
1156 					"wholly" : "partially",
1157 				bus->self->transparent ? " transparent" : "",
1158 				dev_name(&bus->dev),
1159 				&bus->busn_res);
1160 		}
1161 		bus = bus->parent;
1162 	}
1163 
1164 out:
1165 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1166 
1167 	pm_runtime_put(&dev->dev);
1168 
1169 	return max;
1170 }
1171 EXPORT_SYMBOL(pci_scan_bridge);
1172 
1173 /*
1174  * Read interrupt line and base address registers.
1175  * The architecture-dependent code can tweak these, of course.
1176  */
1177 static void pci_read_irq(struct pci_dev *dev)
1178 {
1179 	unsigned char irq;
1180 
1181 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1182 	dev->pin = irq;
1183 	if (irq)
1184 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1185 	dev->irq = irq;
1186 }
1187 
1188 void set_pcie_port_type(struct pci_dev *pdev)
1189 {
1190 	int pos;
1191 	u16 reg16;
1192 	int type;
1193 	struct pci_dev *parent;
1194 
1195 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1196 	if (!pos)
1197 		return;
1198 
1199 	pdev->pcie_cap = pos;
1200 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1201 	pdev->pcie_flags_reg = reg16;
1202 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1203 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1204 
1205 	/*
1206 	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1207 	 * of a Link.  No PCIe component has two Links.  Two Links are
1208 	 * connected by a Switch that has a Port on each Link and internal
1209 	 * logic to connect the two Ports.
1210 	 */
1211 	type = pci_pcie_type(pdev);
1212 	if (type == PCI_EXP_TYPE_ROOT_PORT ||
1213 	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
1214 		pdev->has_secondary_link = 1;
1215 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1216 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1217 		parent = pci_upstream_bridge(pdev);
1218 
1219 		/*
1220 		 * Usually there's an upstream device (Root Port or Switch
1221 		 * Downstream Port), but we can't assume one exists.
1222 		 */
1223 		if (parent && !parent->has_secondary_link)
1224 			pdev->has_secondary_link = 1;
1225 	}
1226 }
1227 
1228 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1229 {
1230 	u32 reg32;
1231 
1232 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1233 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1234 		pdev->is_hotplug_bridge = 1;
1235 }
1236 
1237 static void set_pcie_thunderbolt(struct pci_dev *dev)
1238 {
1239 	int vsec = 0;
1240 	u32 header;
1241 
1242 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
1243 						    PCI_EXT_CAP_ID_VNDR))) {
1244 		pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1245 
1246 		/* Is the device part of a Thunderbolt controller? */
1247 		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1248 		    PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1249 			dev->is_thunderbolt = 1;
1250 			return;
1251 		}
1252 	}
1253 }
1254 
1255 /**
1256  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1257  * @dev: PCI device
1258  *
1259  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1260  * when forwarding a type1 configuration request the bridge must check that
1261  * the extended register address field is zero.  The bridge is not permitted
1262  * to forward the transactions and must handle it as an Unsupported Request.
1263  * Some bridges do not follow this rule and simply drop the extended register
1264  * bits, resulting in the standard config space being aliased, every 256
1265  * bytes across the entire configuration space.  Test for this condition by
1266  * comparing the first dword of each potential alias to the vendor/device ID.
1267  * Known offenders:
1268  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1269  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1270  */
1271 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1272 {
1273 #ifdef CONFIG_PCI_QUIRKS
1274 	int pos;
1275 	u32 header, tmp;
1276 
1277 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1278 
1279 	for (pos = PCI_CFG_SPACE_SIZE;
1280 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1281 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1282 		    || header != tmp)
1283 			return false;
1284 	}
1285 
1286 	return true;
1287 #else
1288 	return false;
1289 #endif
1290 }
1291 
1292 /**
1293  * pci_cfg_space_size - get the configuration space size of the PCI device.
1294  * @dev: PCI device
1295  *
1296  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1297  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1298  * access it.  Maybe we don't have a way to generate extended config space
1299  * accesses, or the device is behind a reverse Express bridge.  So we try
1300  * reading the dword at 0x100 which must either be 0 or a valid extended
1301  * capability header.
1302  */
1303 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1304 {
1305 	u32 status;
1306 	int pos = PCI_CFG_SPACE_SIZE;
1307 
1308 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1309 		return PCI_CFG_SPACE_SIZE;
1310 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1311 		return PCI_CFG_SPACE_SIZE;
1312 
1313 	return PCI_CFG_SPACE_EXP_SIZE;
1314 }
1315 
1316 int pci_cfg_space_size(struct pci_dev *dev)
1317 {
1318 	int pos;
1319 	u32 status;
1320 	u16 class;
1321 
1322 	class = dev->class >> 8;
1323 	if (class == PCI_CLASS_BRIDGE_HOST)
1324 		return pci_cfg_space_size_ext(dev);
1325 
1326 	if (pci_is_pcie(dev))
1327 		return pci_cfg_space_size_ext(dev);
1328 
1329 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1330 	if (!pos)
1331 		return PCI_CFG_SPACE_SIZE;
1332 
1333 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1334 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1335 		return pci_cfg_space_size_ext(dev);
1336 
1337 	return PCI_CFG_SPACE_SIZE;
1338 }
1339 
1340 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1341 
1342 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1343 {
1344 	/*
1345 	 * Disable the MSI hardware to avoid screaming interrupts
1346 	 * during boot.  This is the power on reset default so
1347 	 * usually this should be a noop.
1348 	 */
1349 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1350 	if (dev->msi_cap)
1351 		pci_msi_set_enable(dev, 0);
1352 
1353 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1354 	if (dev->msix_cap)
1355 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1356 }
1357 
1358 /**
1359  * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1360  * @dev: PCI device
1361  *
1362  * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev.  Check this
1363  * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1364  */
1365 static int pci_intx_mask_broken(struct pci_dev *dev)
1366 {
1367 	u16 orig, toggle, new;
1368 
1369 	pci_read_config_word(dev, PCI_COMMAND, &orig);
1370 	toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1371 	pci_write_config_word(dev, PCI_COMMAND, toggle);
1372 	pci_read_config_word(dev, PCI_COMMAND, &new);
1373 
1374 	pci_write_config_word(dev, PCI_COMMAND, orig);
1375 
1376 	/*
1377 	 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1378 	 * r2.3, so strictly speaking, a device is not *broken* if it's not
1379 	 * writable.  But we'll live with the misnomer for now.
1380 	 */
1381 	if (new != toggle)
1382 		return 1;
1383 	return 0;
1384 }
1385 
1386 /**
1387  * pci_setup_device - fill in class and map information of a device
1388  * @dev: the device structure to fill
1389  *
1390  * Initialize the device structure with information about the device's
1391  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1392  * Called at initialisation of the PCI subsystem and by CardBus services.
1393  * Returns 0 on success and negative if unknown type of device (not normal,
1394  * bridge or CardBus).
1395  */
1396 int pci_setup_device(struct pci_dev *dev)
1397 {
1398 	u32 class;
1399 	u16 cmd;
1400 	u8 hdr_type;
1401 	int pos = 0;
1402 	struct pci_bus_region region;
1403 	struct resource *res;
1404 
1405 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1406 		return -EIO;
1407 
1408 	dev->sysdata = dev->bus->sysdata;
1409 	dev->dev.parent = dev->bus->bridge;
1410 	dev->dev.bus = &pci_bus_type;
1411 	dev->hdr_type = hdr_type & 0x7f;
1412 	dev->multifunction = !!(hdr_type & 0x80);
1413 	dev->error_state = pci_channel_io_normal;
1414 	set_pcie_port_type(dev);
1415 
1416 	pci_dev_assign_slot(dev);
1417 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1418 	   set this higher, assuming the system even supports it.  */
1419 	dev->dma_mask = 0xffffffff;
1420 
1421 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1422 		     dev->bus->number, PCI_SLOT(dev->devfn),
1423 		     PCI_FUNC(dev->devfn));
1424 
1425 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1426 	dev->revision = class & 0xff;
1427 	dev->class = class >> 8;		    /* upper 3 bytes */
1428 
1429 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1430 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1431 
1432 	/* need to have dev->class ready */
1433 	dev->cfg_size = pci_cfg_space_size(dev);
1434 
1435 	/* need to have dev->cfg_size ready */
1436 	set_pcie_thunderbolt(dev);
1437 
1438 	/* "Unknown power state" */
1439 	dev->current_state = PCI_UNKNOWN;
1440 
1441 	/* Early fixups, before probing the BARs */
1442 	pci_fixup_device(pci_fixup_early, dev);
1443 	/* device class may be changed after fixup */
1444 	class = dev->class >> 8;
1445 
1446 	if (dev->non_compliant_bars) {
1447 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1448 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1449 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1450 			cmd &= ~PCI_COMMAND_IO;
1451 			cmd &= ~PCI_COMMAND_MEMORY;
1452 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1453 		}
1454 	}
1455 
1456 	dev->broken_intx_masking = pci_intx_mask_broken(dev);
1457 
1458 	switch (dev->hdr_type) {		    /* header type */
1459 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1460 		if (class == PCI_CLASS_BRIDGE_PCI)
1461 			goto bad;
1462 		pci_read_irq(dev);
1463 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1464 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1465 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1466 
1467 		/*
1468 		 * Do the ugly legacy mode stuff here rather than broken chip
1469 		 * quirk code. Legacy mode ATA controllers have fixed
1470 		 * addresses. These are not always echoed in BAR0-3, and
1471 		 * BAR0-3 in a few cases contain junk!
1472 		 */
1473 		if (class == PCI_CLASS_STORAGE_IDE) {
1474 			u8 progif;
1475 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1476 			if ((progif & 1) == 0) {
1477 				region.start = 0x1F0;
1478 				region.end = 0x1F7;
1479 				res = &dev->resource[0];
1480 				res->flags = LEGACY_IO_RESOURCE;
1481 				pcibios_bus_to_resource(dev->bus, res, &region);
1482 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1483 					 res);
1484 				region.start = 0x3F6;
1485 				region.end = 0x3F6;
1486 				res = &dev->resource[1];
1487 				res->flags = LEGACY_IO_RESOURCE;
1488 				pcibios_bus_to_resource(dev->bus, res, &region);
1489 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1490 					 res);
1491 			}
1492 			if ((progif & 4) == 0) {
1493 				region.start = 0x170;
1494 				region.end = 0x177;
1495 				res = &dev->resource[2];
1496 				res->flags = LEGACY_IO_RESOURCE;
1497 				pcibios_bus_to_resource(dev->bus, res, &region);
1498 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1499 					 res);
1500 				region.start = 0x376;
1501 				region.end = 0x376;
1502 				res = &dev->resource[3];
1503 				res->flags = LEGACY_IO_RESOURCE;
1504 				pcibios_bus_to_resource(dev->bus, res, &region);
1505 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1506 					 res);
1507 			}
1508 		}
1509 		break;
1510 
1511 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1512 		if (class != PCI_CLASS_BRIDGE_PCI)
1513 			goto bad;
1514 		/* The PCI-to-PCI bridge spec requires that subtractive
1515 		   decoding (i.e. transparent) bridge must have programming
1516 		   interface code of 0x01. */
1517 		pci_read_irq(dev);
1518 		dev->transparent = ((dev->class & 0xff) == 1);
1519 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1520 		set_pcie_hotplug_bridge(dev);
1521 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1522 		if (pos) {
1523 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1524 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1525 		}
1526 		break;
1527 
1528 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1529 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1530 			goto bad;
1531 		pci_read_irq(dev);
1532 		pci_read_bases(dev, 1, 0);
1533 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1534 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1535 		break;
1536 
1537 	default:				    /* unknown header */
1538 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1539 			dev->hdr_type);
1540 		return -EIO;
1541 
1542 	bad:
1543 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1544 			dev->class, dev->hdr_type);
1545 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1546 	}
1547 
1548 	/* We found a fine healthy device, go go go... */
1549 	return 0;
1550 }
1551 
1552 static void pci_configure_mps(struct pci_dev *dev)
1553 {
1554 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1555 	int mps, p_mps, rc;
1556 
1557 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1558 		return;
1559 
1560 	mps = pcie_get_mps(dev);
1561 	p_mps = pcie_get_mps(bridge);
1562 
1563 	if (mps == p_mps)
1564 		return;
1565 
1566 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1567 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1568 			 mps, pci_name(bridge), p_mps);
1569 		return;
1570 	}
1571 
1572 	/*
1573 	 * Fancier MPS configuration is done later by
1574 	 * pcie_bus_configure_settings()
1575 	 */
1576 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1577 		return;
1578 
1579 	rc = pcie_set_mps(dev, p_mps);
1580 	if (rc) {
1581 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1582 			 p_mps);
1583 		return;
1584 	}
1585 
1586 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1587 		 p_mps, mps, 128 << dev->pcie_mpss);
1588 }
1589 
1590 static struct hpp_type0 pci_default_type0 = {
1591 	.revision = 1,
1592 	.cache_line_size = 8,
1593 	.latency_timer = 0x40,
1594 	.enable_serr = 0,
1595 	.enable_perr = 0,
1596 };
1597 
1598 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1599 {
1600 	u16 pci_cmd, pci_bctl;
1601 
1602 	if (!hpp)
1603 		hpp = &pci_default_type0;
1604 
1605 	if (hpp->revision > 1) {
1606 		dev_warn(&dev->dev,
1607 			 "PCI settings rev %d not supported; using defaults\n",
1608 			 hpp->revision);
1609 		hpp = &pci_default_type0;
1610 	}
1611 
1612 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1613 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1614 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1615 	if (hpp->enable_serr)
1616 		pci_cmd |= PCI_COMMAND_SERR;
1617 	if (hpp->enable_perr)
1618 		pci_cmd |= PCI_COMMAND_PARITY;
1619 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1620 
1621 	/* Program bridge control value */
1622 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1623 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1624 				      hpp->latency_timer);
1625 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1626 		if (hpp->enable_serr)
1627 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1628 		if (hpp->enable_perr)
1629 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1630 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1631 	}
1632 }
1633 
1634 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1635 {
1636 	int pos;
1637 
1638 	if (!hpp)
1639 		return;
1640 
1641 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1642 	if (!pos)
1643 		return;
1644 
1645 	dev_warn(&dev->dev, "PCI-X settings not supported\n");
1646 }
1647 
1648 static bool pcie_root_rcb_set(struct pci_dev *dev)
1649 {
1650 	struct pci_dev *rp = pcie_find_root_port(dev);
1651 	u16 lnkctl;
1652 
1653 	if (!rp)
1654 		return false;
1655 
1656 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1657 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1658 		return true;
1659 
1660 	return false;
1661 }
1662 
1663 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1664 {
1665 	int pos;
1666 	u32 reg32;
1667 
1668 	if (!hpp)
1669 		return;
1670 
1671 	if (!pci_is_pcie(dev))
1672 		return;
1673 
1674 	if (hpp->revision > 1) {
1675 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1676 			 hpp->revision);
1677 		return;
1678 	}
1679 
1680 	/*
1681 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1682 	 * those to make sure they're consistent with the rest of the
1683 	 * platform.
1684 	 */
1685 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1686 				    PCI_EXP_DEVCTL_READRQ;
1687 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1688 				    PCI_EXP_DEVCTL_READRQ);
1689 
1690 	/* Initialize Device Control Register */
1691 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1692 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1693 
1694 	/* Initialize Link Control Register */
1695 	if (pcie_cap_has_lnkctl(dev)) {
1696 
1697 		/*
1698 		 * If the Root Port supports Read Completion Boundary of
1699 		 * 128, set RCB to 128.  Otherwise, clear it.
1700 		 */
1701 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1702 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1703 		if (pcie_root_rcb_set(dev))
1704 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1705 
1706 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1707 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1708 	}
1709 
1710 	/* Find Advanced Error Reporting Enhanced Capability */
1711 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1712 	if (!pos)
1713 		return;
1714 
1715 	/* Initialize Uncorrectable Error Mask Register */
1716 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1717 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1718 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1719 
1720 	/* Initialize Uncorrectable Error Severity Register */
1721 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1722 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1723 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1724 
1725 	/* Initialize Correctable Error Mask Register */
1726 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1727 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1728 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1729 
1730 	/* Initialize Advanced Error Capabilities and Control Register */
1731 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1732 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1733 	/* Don't enable ECRC generation or checking if unsupported */
1734 	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1735 		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1736 	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1737 		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1738 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1739 
1740 	/*
1741 	 * FIXME: The following two registers are not supported yet.
1742 	 *
1743 	 *   o Secondary Uncorrectable Error Severity Register
1744 	 *   o Secondary Uncorrectable Error Mask Register
1745 	 */
1746 }
1747 
1748 int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
1749 {
1750 	struct pci_host_bridge *host;
1751 	u32 cap;
1752 	u16 ctl;
1753 	int ret;
1754 
1755 	if (!pci_is_pcie(dev))
1756 		return 0;
1757 
1758 	ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
1759 	if (ret)
1760 		return 0;
1761 
1762 	if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
1763 		return 0;
1764 
1765 	ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
1766 	if (ret)
1767 		return 0;
1768 
1769 	host = pci_find_host_bridge(dev->bus);
1770 	if (!host)
1771 		return 0;
1772 
1773 	/*
1774 	 * If some device in the hierarchy doesn't handle Extended Tags
1775 	 * correctly, make sure they're disabled.
1776 	 */
1777 	if (host->no_ext_tags) {
1778 		if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1779 			dev_info(&dev->dev, "disabling Extended Tags\n");
1780 			pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1781 						   PCI_EXP_DEVCTL_EXT_TAG);
1782 		}
1783 		return 0;
1784 	}
1785 
1786 	if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1787 		dev_info(&dev->dev, "enabling Extended Tags\n");
1788 		pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1789 					 PCI_EXP_DEVCTL_EXT_TAG);
1790 	}
1791 	return 0;
1792 }
1793 
1794 /**
1795  * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1796  * @dev: PCI device to query
1797  *
1798  * Returns true if the device has enabled relaxed ordering attribute.
1799  */
1800 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1801 {
1802 	u16 v;
1803 
1804 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1805 
1806 	return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1807 }
1808 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1809 
1810 static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1811 {
1812 	struct pci_dev *root;
1813 
1814 	/* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1815 	if (dev->is_virtfn)
1816 		return;
1817 
1818 	if (!pcie_relaxed_ordering_enabled(dev))
1819 		return;
1820 
1821 	/*
1822 	 * For now, we only deal with Relaxed Ordering issues with Root
1823 	 * Ports. Peer-to-Peer DMA is another can of worms.
1824 	 */
1825 	root = pci_find_pcie_root_port(dev);
1826 	if (!root)
1827 		return;
1828 
1829 	if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1830 		pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1831 					   PCI_EXP_DEVCTL_RELAX_EN);
1832 		dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1833 	}
1834 }
1835 
1836 static void pci_configure_device(struct pci_dev *dev)
1837 {
1838 	struct hotplug_params hpp;
1839 	int ret;
1840 
1841 	pci_configure_mps(dev);
1842 	pci_configure_extended_tags(dev, NULL);
1843 	pci_configure_relaxed_ordering(dev);
1844 
1845 	memset(&hpp, 0, sizeof(hpp));
1846 	ret = pci_get_hp_params(dev, &hpp);
1847 	if (ret)
1848 		return;
1849 
1850 	program_hpp_type2(dev, hpp.t2);
1851 	program_hpp_type1(dev, hpp.t1);
1852 	program_hpp_type0(dev, hpp.t0);
1853 }
1854 
1855 static void pci_release_capabilities(struct pci_dev *dev)
1856 {
1857 	pci_vpd_release(dev);
1858 	pci_iov_release(dev);
1859 	pci_free_cap_save_buffers(dev);
1860 }
1861 
1862 /**
1863  * pci_release_dev - free a pci device structure when all users of it are finished.
1864  * @dev: device that's been disconnected
1865  *
1866  * Will be called only by the device core when all users of this pci device are
1867  * done.
1868  */
1869 static void pci_release_dev(struct device *dev)
1870 {
1871 	struct pci_dev *pci_dev;
1872 
1873 	pci_dev = to_pci_dev(dev);
1874 	pci_release_capabilities(pci_dev);
1875 	pci_release_of_node(pci_dev);
1876 	pcibios_release_device(pci_dev);
1877 	pci_bus_put(pci_dev->bus);
1878 	kfree(pci_dev->driver_override);
1879 	kfree(pci_dev->dma_alias_mask);
1880 	kfree(pci_dev);
1881 }
1882 
1883 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1884 {
1885 	struct pci_dev *dev;
1886 
1887 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1888 	if (!dev)
1889 		return NULL;
1890 
1891 	INIT_LIST_HEAD(&dev->bus_list);
1892 	dev->dev.type = &pci_dev_type;
1893 	dev->bus = pci_bus_get(bus);
1894 
1895 	return dev;
1896 }
1897 EXPORT_SYMBOL(pci_alloc_dev);
1898 
1899 static bool pci_bus_crs_vendor_id(u32 l)
1900 {
1901 	return (l & 0xffff) == 0x0001;
1902 }
1903 
1904 static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
1905 			     int timeout)
1906 {
1907 	int delay = 1;
1908 
1909 	if (!pci_bus_crs_vendor_id(*l))
1910 		return true;	/* not a CRS completion */
1911 
1912 	if (!timeout)
1913 		return false;	/* CRS, but caller doesn't want to wait */
1914 
1915 	/*
1916 	 * We got the reserved Vendor ID that indicates a completion with
1917 	 * Configuration Request Retry Status (CRS).  Retry until we get a
1918 	 * valid Vendor ID or we time out.
1919 	 */
1920 	while (pci_bus_crs_vendor_id(*l)) {
1921 		if (delay > timeout) {
1922 			pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
1923 				pci_domain_nr(bus), bus->number,
1924 				PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1925 
1926 			return false;
1927 		}
1928 		if (delay >= 1000)
1929 			pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
1930 				pci_domain_nr(bus), bus->number,
1931 				PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1932 
1933 		msleep(delay);
1934 		delay *= 2;
1935 
1936 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1937 			return false;
1938 	}
1939 
1940 	if (delay >= 1000)
1941 		pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
1942 			pci_domain_nr(bus), bus->number,
1943 			PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1944 
1945 	return true;
1946 }
1947 
1948 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1949 				int timeout)
1950 {
1951 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1952 		return false;
1953 
1954 	/* some broken boards return 0 or ~0 if a slot is empty: */
1955 	if (*l == 0xffffffff || *l == 0x00000000 ||
1956 	    *l == 0x0000ffff || *l == 0xffff0000)
1957 		return false;
1958 
1959 	if (pci_bus_crs_vendor_id(*l))
1960 		return pci_bus_wait_crs(bus, devfn, l, timeout);
1961 
1962 	return true;
1963 }
1964 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1965 
1966 /*
1967  * Read the config data for a PCI device, sanity-check it
1968  * and fill in the dev structure...
1969  */
1970 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1971 {
1972 	struct pci_dev *dev;
1973 	u32 l;
1974 
1975 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1976 		return NULL;
1977 
1978 	dev = pci_alloc_dev(bus);
1979 	if (!dev)
1980 		return NULL;
1981 
1982 	dev->devfn = devfn;
1983 	dev->vendor = l & 0xffff;
1984 	dev->device = (l >> 16) & 0xffff;
1985 
1986 	pci_set_of_node(dev);
1987 
1988 	if (pci_setup_device(dev)) {
1989 		pci_bus_put(dev->bus);
1990 		kfree(dev);
1991 		return NULL;
1992 	}
1993 
1994 	return dev;
1995 }
1996 
1997 static void pci_init_capabilities(struct pci_dev *dev)
1998 {
1999 	/* Enhanced Allocation */
2000 	pci_ea_init(dev);
2001 
2002 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
2003 	pci_msi_setup_pci_dev(dev);
2004 
2005 	/* Buffers for saving PCIe and PCI-X capabilities */
2006 	pci_allocate_cap_save_buffers(dev);
2007 
2008 	/* Power Management */
2009 	pci_pm_init(dev);
2010 
2011 	/* Vital Product Data */
2012 	pci_vpd_init(dev);
2013 
2014 	/* Alternative Routing-ID Forwarding */
2015 	pci_configure_ari(dev);
2016 
2017 	/* Single Root I/O Virtualization */
2018 	pci_iov_init(dev);
2019 
2020 	/* Address Translation Services */
2021 	pci_ats_init(dev);
2022 
2023 	/* Enable ACS P2P upstream forwarding */
2024 	pci_enable_acs(dev);
2025 
2026 	/* Precision Time Measurement */
2027 	pci_ptm_init(dev);
2028 
2029 	/* Advanced Error Reporting */
2030 	pci_aer_init(dev);
2031 }
2032 
2033 /*
2034  * This is the equivalent of pci_host_bridge_msi_domain that acts on
2035  * devices. Firmware interfaces that can select the MSI domain on a
2036  * per-device basis should be called from here.
2037  */
2038 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2039 {
2040 	struct irq_domain *d;
2041 
2042 	/*
2043 	 * If a domain has been set through the pcibios_add_device
2044 	 * callback, then this is the one (platform code knows best).
2045 	 */
2046 	d = dev_get_msi_domain(&dev->dev);
2047 	if (d)
2048 		return d;
2049 
2050 	/*
2051 	 * Let's see if we have a firmware interface able to provide
2052 	 * the domain.
2053 	 */
2054 	d = pci_msi_get_device_domain(dev);
2055 	if (d)
2056 		return d;
2057 
2058 	return NULL;
2059 }
2060 
2061 static void pci_set_msi_domain(struct pci_dev *dev)
2062 {
2063 	struct irq_domain *d;
2064 
2065 	/*
2066 	 * If the platform or firmware interfaces cannot supply a
2067 	 * device-specific MSI domain, then inherit the default domain
2068 	 * from the host bridge itself.
2069 	 */
2070 	d = pci_dev_msi_domain(dev);
2071 	if (!d)
2072 		d = dev_get_msi_domain(&dev->bus->dev);
2073 
2074 	dev_set_msi_domain(&dev->dev, d);
2075 }
2076 
2077 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2078 {
2079 	int ret;
2080 
2081 	pci_configure_device(dev);
2082 
2083 	device_initialize(&dev->dev);
2084 	dev->dev.release = pci_release_dev;
2085 
2086 	set_dev_node(&dev->dev, pcibus_to_node(bus));
2087 	dev->dev.dma_mask = &dev->dma_mask;
2088 	dev->dev.dma_parms = &dev->dma_parms;
2089 	dev->dev.coherent_dma_mask = 0xffffffffull;
2090 
2091 	pci_set_dma_max_seg_size(dev, 65536);
2092 	pci_set_dma_seg_boundary(dev, 0xffffffff);
2093 
2094 	/* Fix up broken headers */
2095 	pci_fixup_device(pci_fixup_header, dev);
2096 
2097 	/* moved out from quirk header fixup code */
2098 	pci_reassigndev_resource_alignment(dev);
2099 
2100 	/* Clear the state_saved flag. */
2101 	dev->state_saved = false;
2102 
2103 	/* Initialize various capabilities */
2104 	pci_init_capabilities(dev);
2105 
2106 	/*
2107 	 * Add the device to our list of discovered devices
2108 	 * and the bus list for fixup functions, etc.
2109 	 */
2110 	down_write(&pci_bus_sem);
2111 	list_add_tail(&dev->bus_list, &bus->devices);
2112 	up_write(&pci_bus_sem);
2113 
2114 	ret = pcibios_add_device(dev);
2115 	WARN_ON(ret < 0);
2116 
2117 	/* Setup MSI irq domain */
2118 	pci_set_msi_domain(dev);
2119 
2120 	/* Notifier could use PCI capabilities */
2121 	dev->match_driver = false;
2122 	ret = device_add(&dev->dev);
2123 	WARN_ON(ret < 0);
2124 }
2125 
2126 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2127 {
2128 	struct pci_dev *dev;
2129 
2130 	dev = pci_get_slot(bus, devfn);
2131 	if (dev) {
2132 		pci_dev_put(dev);
2133 		return dev;
2134 	}
2135 
2136 	dev = pci_scan_device(bus, devfn);
2137 	if (!dev)
2138 		return NULL;
2139 
2140 	pci_device_add(dev, bus);
2141 
2142 	return dev;
2143 }
2144 EXPORT_SYMBOL(pci_scan_single_device);
2145 
2146 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2147 {
2148 	int pos;
2149 	u16 cap = 0;
2150 	unsigned next_fn;
2151 
2152 	if (pci_ari_enabled(bus)) {
2153 		if (!dev)
2154 			return 0;
2155 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2156 		if (!pos)
2157 			return 0;
2158 
2159 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2160 		next_fn = PCI_ARI_CAP_NFN(cap);
2161 		if (next_fn <= fn)
2162 			return 0;	/* protect against malformed list */
2163 
2164 		return next_fn;
2165 	}
2166 
2167 	/* dev may be NULL for non-contiguous multifunction devices */
2168 	if (!dev || dev->multifunction)
2169 		return (fn + 1) % 8;
2170 
2171 	return 0;
2172 }
2173 
2174 static int only_one_child(struct pci_bus *bus)
2175 {
2176 	struct pci_dev *parent = bus->self;
2177 
2178 	if (!parent || !pci_is_pcie(parent))
2179 		return 0;
2180 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2181 		return 1;
2182 
2183 	/*
2184 	 * PCIe downstream ports are bridges that normally lead to only a
2185 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2186 	 * possible devices, not just device 0.  See PCIe spec r3.0,
2187 	 * sec 7.3.1.
2188 	 */
2189 	if (parent->has_secondary_link &&
2190 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2191 		return 1;
2192 	return 0;
2193 }
2194 
2195 /**
2196  * pci_scan_slot - scan a PCI slot on a bus for devices.
2197  * @bus: PCI bus to scan
2198  * @devfn: slot number to scan (must have zero function.)
2199  *
2200  * Scan a PCI slot on the specified PCI bus for devices, adding
2201  * discovered devices to the @bus->devices list.  New devices
2202  * will not have is_added set.
2203  *
2204  * Returns the number of new devices found.
2205  */
2206 int pci_scan_slot(struct pci_bus *bus, int devfn)
2207 {
2208 	unsigned fn, nr = 0;
2209 	struct pci_dev *dev;
2210 
2211 	if (only_one_child(bus) && (devfn > 0))
2212 		return 0; /* Already scanned the entire slot */
2213 
2214 	dev = pci_scan_single_device(bus, devfn);
2215 	if (!dev)
2216 		return 0;
2217 	if (!dev->is_added)
2218 		nr++;
2219 
2220 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2221 		dev = pci_scan_single_device(bus, devfn + fn);
2222 		if (dev) {
2223 			if (!dev->is_added)
2224 				nr++;
2225 			dev->multifunction = 1;
2226 		}
2227 	}
2228 
2229 	/* only one slot has pcie device */
2230 	if (bus->self && nr)
2231 		pcie_aspm_init_link_state(bus->self);
2232 
2233 	return nr;
2234 }
2235 EXPORT_SYMBOL(pci_scan_slot);
2236 
2237 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2238 {
2239 	u8 *smpss = data;
2240 
2241 	if (!pci_is_pcie(dev))
2242 		return 0;
2243 
2244 	/*
2245 	 * We don't have a way to change MPS settings on devices that have
2246 	 * drivers attached.  A hot-added device might support only the minimum
2247 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
2248 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
2249 	 * hot-added devices will work correctly.
2250 	 *
2251 	 * However, if we hot-add a device to a slot directly below a Root
2252 	 * Port, it's impossible for there to be other existing devices below
2253 	 * the port.  We don't limit the MPS in this case because we can
2254 	 * reconfigure MPS on both the Root Port and the hot-added device,
2255 	 * and there are no other devices involved.
2256 	 *
2257 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2258 	 */
2259 	if (dev->is_hotplug_bridge &&
2260 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2261 		*smpss = 0;
2262 
2263 	if (*smpss > dev->pcie_mpss)
2264 		*smpss = dev->pcie_mpss;
2265 
2266 	return 0;
2267 }
2268 
2269 static void pcie_write_mps(struct pci_dev *dev, int mps)
2270 {
2271 	int rc;
2272 
2273 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2274 		mps = 128 << dev->pcie_mpss;
2275 
2276 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2277 		    dev->bus->self)
2278 			/* For "Performance", the assumption is made that
2279 			 * downstream communication will never be larger than
2280 			 * the MRRS.  So, the MPS only needs to be configured
2281 			 * for the upstream communication.  This being the case,
2282 			 * walk from the top down and set the MPS of the child
2283 			 * to that of the parent bus.
2284 			 *
2285 			 * Configure the device MPS with the smaller of the
2286 			 * device MPSS or the bridge MPS (which is assumed to be
2287 			 * properly configured at this point to the largest
2288 			 * allowable MPS based on its parent bus).
2289 			 */
2290 			mps = min(mps, pcie_get_mps(dev->bus->self));
2291 	}
2292 
2293 	rc = pcie_set_mps(dev, mps);
2294 	if (rc)
2295 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2296 }
2297 
2298 static void pcie_write_mrrs(struct pci_dev *dev)
2299 {
2300 	int rc, mrrs;
2301 
2302 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2303 	 * issues with setting MRRS to 0 on a number of devices.
2304 	 */
2305 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2306 		return;
2307 
2308 	/* For Max performance, the MRRS must be set to the largest supported
2309 	 * value.  However, it cannot be configured larger than the MPS the
2310 	 * device or the bus can support.  This should already be properly
2311 	 * configured by a prior call to pcie_write_mps.
2312 	 */
2313 	mrrs = pcie_get_mps(dev);
2314 
2315 	/* MRRS is a R/W register.  Invalid values can be written, but a
2316 	 * subsequent read will verify if the value is acceptable or not.
2317 	 * If the MRRS value provided is not acceptable (e.g., too large),
2318 	 * shrink the value until it is acceptable to the HW.
2319 	 */
2320 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2321 		rc = pcie_set_readrq(dev, mrrs);
2322 		if (!rc)
2323 			break;
2324 
2325 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2326 		mrrs /= 2;
2327 	}
2328 
2329 	if (mrrs < 128)
2330 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2331 }
2332 
2333 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2334 {
2335 	int mps, orig_mps;
2336 
2337 	if (!pci_is_pcie(dev))
2338 		return 0;
2339 
2340 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2341 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2342 		return 0;
2343 
2344 	mps = 128 << *(u8 *)data;
2345 	orig_mps = pcie_get_mps(dev);
2346 
2347 	pcie_write_mps(dev, mps);
2348 	pcie_write_mrrs(dev);
2349 
2350 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2351 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2352 		 orig_mps, pcie_get_readrq(dev));
2353 
2354 	return 0;
2355 }
2356 
2357 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2358  * parents then children fashion.  If this changes, then this code will not
2359  * work as designed.
2360  */
2361 void pcie_bus_configure_settings(struct pci_bus *bus)
2362 {
2363 	u8 smpss = 0;
2364 
2365 	if (!bus->self)
2366 		return;
2367 
2368 	if (!pci_is_pcie(bus->self))
2369 		return;
2370 
2371 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2372 	 * to be aware of the MPS of the destination.  To work around this,
2373 	 * simply force the MPS of the entire system to the smallest possible.
2374 	 */
2375 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2376 		smpss = 0;
2377 
2378 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2379 		smpss = bus->self->pcie_mpss;
2380 
2381 		pcie_find_smpss(bus->self, &smpss);
2382 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2383 	}
2384 
2385 	pcie_bus_configure_set(bus->self, &smpss);
2386 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2387 }
2388 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2389 
2390 /*
2391  * Called after each bus is probed, but before its children are examined.  This
2392  * is marked as __weak because multiple architectures define it.
2393  */
2394 void __weak pcibios_fixup_bus(struct pci_bus *bus)
2395 {
2396        /* nothing to do, expected to be removed in the future */
2397 }
2398 
2399 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2400 {
2401 	unsigned int devfn, pass, max = bus->busn_res.start;
2402 	struct pci_dev *dev;
2403 
2404 	dev_dbg(&bus->dev, "scanning bus\n");
2405 
2406 	/* Go find them, Rover! */
2407 	for (devfn = 0; devfn < 0x100; devfn += 8)
2408 		pci_scan_slot(bus, devfn);
2409 
2410 	/* Reserve buses for SR-IOV capability. */
2411 	max += pci_iov_bus_range(bus);
2412 
2413 	/*
2414 	 * After performing arch-dependent fixup of the bus, look behind
2415 	 * all PCI-to-PCI bridges on this bus.
2416 	 */
2417 	if (!bus->is_added) {
2418 		dev_dbg(&bus->dev, "fixups for bus\n");
2419 		pcibios_fixup_bus(bus);
2420 		bus->is_added = 1;
2421 	}
2422 
2423 	for (pass = 0; pass < 2; pass++)
2424 		list_for_each_entry(dev, &bus->devices, bus_list) {
2425 			if (pci_is_bridge(dev))
2426 				max = pci_scan_bridge(bus, dev, max, pass);
2427 		}
2428 
2429 	/*
2430 	 * Make sure a hotplug bridge has at least the minimum requested
2431 	 * number of buses.
2432 	 */
2433 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2434 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2435 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2436 	}
2437 
2438 	/*
2439 	 * We've scanned the bus and so we know all about what's on
2440 	 * the other side of any bridges that may be on this bus plus
2441 	 * any devices.
2442 	 *
2443 	 * Return how far we've got finding sub-buses.
2444 	 */
2445 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2446 	return max;
2447 }
2448 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2449 
2450 /**
2451  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2452  * @bridge: Host bridge to set up.
2453  *
2454  * Default empty implementation.  Replace with an architecture-specific setup
2455  * routine, if necessary.
2456  */
2457 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2458 {
2459 	return 0;
2460 }
2461 
2462 void __weak pcibios_add_bus(struct pci_bus *bus)
2463 {
2464 }
2465 
2466 void __weak pcibios_remove_bus(struct pci_bus *bus)
2467 {
2468 }
2469 
2470 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2471 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2472 {
2473 	int error;
2474 	struct pci_host_bridge *bridge;
2475 
2476 	bridge = pci_alloc_host_bridge(0);
2477 	if (!bridge)
2478 		return NULL;
2479 
2480 	bridge->dev.parent = parent;
2481 
2482 	list_splice_init(resources, &bridge->windows);
2483 	bridge->sysdata = sysdata;
2484 	bridge->busnr = bus;
2485 	bridge->ops = ops;
2486 
2487 	error = pci_register_host_bridge(bridge);
2488 	if (error < 0)
2489 		goto err_out;
2490 
2491 	return bridge->bus;
2492 
2493 err_out:
2494 	kfree(bridge);
2495 	return NULL;
2496 }
2497 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2498 
2499 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2500 {
2501 	struct resource *res = &b->busn_res;
2502 	struct resource *parent_res, *conflict;
2503 
2504 	res->start = bus;
2505 	res->end = bus_max;
2506 	res->flags = IORESOURCE_BUS;
2507 
2508 	if (!pci_is_root_bus(b))
2509 		parent_res = &b->parent->busn_res;
2510 	else {
2511 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2512 		res->flags |= IORESOURCE_PCI_FIXED;
2513 	}
2514 
2515 	conflict = request_resource_conflict(parent_res, res);
2516 
2517 	if (conflict)
2518 		dev_printk(KERN_DEBUG, &b->dev,
2519 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2520 			    res, pci_is_root_bus(b) ? "domain " : "",
2521 			    parent_res, conflict->name, conflict);
2522 
2523 	return conflict == NULL;
2524 }
2525 
2526 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2527 {
2528 	struct resource *res = &b->busn_res;
2529 	struct resource old_res = *res;
2530 	resource_size_t size;
2531 	int ret;
2532 
2533 	if (res->start > bus_max)
2534 		return -EINVAL;
2535 
2536 	size = bus_max - res->start + 1;
2537 	ret = adjust_resource(res, res->start, size);
2538 	dev_printk(KERN_DEBUG, &b->dev,
2539 			"busn_res: %pR end %s updated to %02x\n",
2540 			&old_res, ret ? "can not be" : "is", bus_max);
2541 
2542 	if (!ret && !res->parent)
2543 		pci_bus_insert_busn_res(b, res->start, res->end);
2544 
2545 	return ret;
2546 }
2547 
2548 void pci_bus_release_busn_res(struct pci_bus *b)
2549 {
2550 	struct resource *res = &b->busn_res;
2551 	int ret;
2552 
2553 	if (!res->flags || !res->parent)
2554 		return;
2555 
2556 	ret = release_resource(res);
2557 	dev_printk(KERN_DEBUG, &b->dev,
2558 			"busn_res: %pR %s released\n",
2559 			res, ret ? "can not be" : "is");
2560 }
2561 
2562 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
2563 {
2564 	struct resource_entry *window;
2565 	bool found = false;
2566 	struct pci_bus *b;
2567 	int max, bus, ret;
2568 
2569 	if (!bridge)
2570 		return -EINVAL;
2571 
2572 	resource_list_for_each_entry(window, &bridge->windows)
2573 		if (window->res->flags & IORESOURCE_BUS) {
2574 			found = true;
2575 			break;
2576 		}
2577 
2578 	ret = pci_register_host_bridge(bridge);
2579 	if (ret < 0)
2580 		return ret;
2581 
2582 	b = bridge->bus;
2583 	bus = bridge->busnr;
2584 
2585 	if (!found) {
2586 		dev_info(&b->dev,
2587 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2588 			bus);
2589 		pci_bus_insert_busn_res(b, bus, 255);
2590 	}
2591 
2592 	max = pci_scan_child_bus(b);
2593 
2594 	if (!found)
2595 		pci_bus_update_busn_res_end(b, max);
2596 
2597 	return 0;
2598 }
2599 EXPORT_SYMBOL(pci_scan_root_bus_bridge);
2600 
2601 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2602 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2603 {
2604 	struct resource_entry *window;
2605 	bool found = false;
2606 	struct pci_bus *b;
2607 	int max;
2608 
2609 	resource_list_for_each_entry(window, resources)
2610 		if (window->res->flags & IORESOURCE_BUS) {
2611 			found = true;
2612 			break;
2613 		}
2614 
2615 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2616 	if (!b)
2617 		return NULL;
2618 
2619 	if (!found) {
2620 		dev_info(&b->dev,
2621 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2622 			bus);
2623 		pci_bus_insert_busn_res(b, bus, 255);
2624 	}
2625 
2626 	max = pci_scan_child_bus(b);
2627 
2628 	if (!found)
2629 		pci_bus_update_busn_res_end(b, max);
2630 
2631 	return b;
2632 }
2633 EXPORT_SYMBOL(pci_scan_root_bus);
2634 
2635 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2636 					void *sysdata)
2637 {
2638 	LIST_HEAD(resources);
2639 	struct pci_bus *b;
2640 
2641 	pci_add_resource(&resources, &ioport_resource);
2642 	pci_add_resource(&resources, &iomem_resource);
2643 	pci_add_resource(&resources, &busn_resource);
2644 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2645 	if (b) {
2646 		pci_scan_child_bus(b);
2647 	} else {
2648 		pci_free_resource_list(&resources);
2649 	}
2650 	return b;
2651 }
2652 EXPORT_SYMBOL(pci_scan_bus);
2653 
2654 /**
2655  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2656  * @bridge: PCI bridge for the bus to scan
2657  *
2658  * Scan a PCI bus and child buses for new devices, add them,
2659  * and enable them, resizing bridge mmio/io resource if necessary
2660  * and possible.  The caller must ensure the child devices are already
2661  * removed for resizing to occur.
2662  *
2663  * Returns the max number of subordinate bus discovered.
2664  */
2665 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2666 {
2667 	unsigned int max;
2668 	struct pci_bus *bus = bridge->subordinate;
2669 
2670 	max = pci_scan_child_bus(bus);
2671 
2672 	pci_assign_unassigned_bridge_resources(bridge);
2673 
2674 	pci_bus_add_devices(bus);
2675 
2676 	return max;
2677 }
2678 
2679 /**
2680  * pci_rescan_bus - scan a PCI bus for devices.
2681  * @bus: PCI bus to scan
2682  *
2683  * Scan a PCI bus and child buses for new devices, adds them,
2684  * and enables them.
2685  *
2686  * Returns the max number of subordinate bus discovered.
2687  */
2688 unsigned int pci_rescan_bus(struct pci_bus *bus)
2689 {
2690 	unsigned int max;
2691 
2692 	max = pci_scan_child_bus(bus);
2693 	pci_assign_unassigned_bus_resources(bus);
2694 	pci_bus_add_devices(bus);
2695 
2696 	return max;
2697 }
2698 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2699 
2700 /*
2701  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2702  * routines should always be executed under this mutex.
2703  */
2704 static DEFINE_MUTEX(pci_rescan_remove_lock);
2705 
2706 void pci_lock_rescan_remove(void)
2707 {
2708 	mutex_lock(&pci_rescan_remove_lock);
2709 }
2710 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2711 
2712 void pci_unlock_rescan_remove(void)
2713 {
2714 	mutex_unlock(&pci_rescan_remove_lock);
2715 }
2716 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2717 
2718 static int __init pci_sort_bf_cmp(const struct device *d_a,
2719 				  const struct device *d_b)
2720 {
2721 	const struct pci_dev *a = to_pci_dev(d_a);
2722 	const struct pci_dev *b = to_pci_dev(d_b);
2723 
2724 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2725 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2726 
2727 	if      (a->bus->number < b->bus->number) return -1;
2728 	else if (a->bus->number > b->bus->number) return  1;
2729 
2730 	if      (a->devfn < b->devfn) return -1;
2731 	else if (a->devfn > b->devfn) return  1;
2732 
2733 	return 0;
2734 }
2735 
2736 void __init pci_sort_breadthfirst(void)
2737 {
2738 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2739 }
2740