xref: /openbmc/linux/drivers/pci/setup-bus.c (revision 9958610552c0bd7558b41cb8addbd865587f142a)
1 /*
2  *	drivers/pci/setup-bus.c
3  *
4  * Extruded from code written by
5  *      Dave Rusling (david.rusling@reo.mts.dec.com)
6  *      David Mosberger (davidm@cs.arizona.edu)
7  *	David Miller (davem@redhat.com)
8  *
9  * Support routines for initializing a PCI subsystem.
10  */
11 
12 /*
13  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14  *	     PCI-PCI bridges cleanup, sorted resource allocation.
15  * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16  *	     Converted to allocation in 3 passes, which gives
17  *	     tighter packing. Prefetchable range support.
18  */
19 
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
28 #include "pci.h"
29 
30 struct resource_list_x {
31 	struct resource_list_x *next;
32 	struct resource *res;
33 	struct pci_dev *dev;
34 	resource_size_t start;
35 	resource_size_t end;
36 	unsigned long flags;
37 };
38 
39 static void add_to_failed_list(struct resource_list_x *head,
40 				 struct pci_dev *dev, struct resource *res)
41 {
42 	struct resource_list_x *list = head;
43 	struct resource_list_x *ln = list->next;
44 	struct resource_list_x *tmp;
45 
46 	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
47 	if (!tmp) {
48 		pr_warning("add_to_failed_list: kmalloc() failed!\n");
49 		return;
50 	}
51 
52 	tmp->next = ln;
53 	tmp->res = res;
54 	tmp->dev = dev;
55 	tmp->start = res->start;
56 	tmp->end = res->end;
57 	tmp->flags = res->flags;
58 	list->next = tmp;
59 }
60 
61 static void free_failed_list(struct resource_list_x *head)
62 {
63 	struct resource_list_x *list, *tmp;
64 
65 	for (list = head->next; list;) {
66 		tmp = list;
67 		list = list->next;
68 		kfree(tmp);
69 	}
70 
71 	head->next = NULL;
72 }
73 
74 static void __dev_sort_resources(struct pci_dev *dev,
75 				 struct resource_list *head)
76 {
77 	u16 class = dev->class >> 8;
78 
79 	/* Don't touch classless devices or host bridges or ioapics.  */
80 	if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
81 		return;
82 
83 	/* Don't touch ioapic devices already enabled by firmware */
84 	if (class == PCI_CLASS_SYSTEM_PIC) {
85 		u16 command;
86 		pci_read_config_word(dev, PCI_COMMAND, &command);
87 		if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
88 			return;
89 	}
90 
91 	pdev_sort_resources(dev, head);
92 }
93 
94 static void __assign_resources_sorted(struct resource_list *head,
95 				 struct resource_list_x *fail_head)
96 {
97 	struct resource *res;
98 	struct resource_list *list, *tmp;
99 	int idx;
100 
101 	for (list = head->next; list;) {
102 		res = list->res;
103 		idx = res - &list->dev->resource[0];
104 		if (pci_assign_resource(list->dev, idx)) {
105 			if (fail_head && !pci_is_root_bus(list->dev->bus))
106 				add_to_failed_list(fail_head, list->dev, res);
107 			res->start = 0;
108 			res->end = 0;
109 			res->flags = 0;
110 		}
111 		tmp = list;
112 		list = list->next;
113 		kfree(tmp);
114 	}
115 }
116 
117 static void pdev_assign_resources_sorted(struct pci_dev *dev,
118 				 struct resource_list_x *fail_head)
119 {
120 	struct resource_list head;
121 
122 	head.next = NULL;
123 	__dev_sort_resources(dev, &head);
124 	__assign_resources_sorted(&head, fail_head);
125 
126 }
127 
128 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
129 					 struct resource_list_x *fail_head)
130 {
131 	struct pci_dev *dev;
132 	struct resource_list head;
133 
134 	head.next = NULL;
135 	list_for_each_entry(dev, &bus->devices, bus_list)
136 		__dev_sort_resources(dev, &head);
137 
138 	__assign_resources_sorted(&head, fail_head);
139 }
140 
141 void pci_setup_cardbus(struct pci_bus *bus)
142 {
143 	struct pci_dev *bridge = bus->self;
144 	struct resource *res;
145 	struct pci_bus_region region;
146 
147 	dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
148 		 bus->secondary, bus->subordinate);
149 
150 	res = bus->resource[0];
151 	pcibios_resource_to_bus(bridge, &region, res);
152 	if (res->flags & IORESOURCE_IO) {
153 		/*
154 		 * The IO resource is allocated a range twice as large as it
155 		 * would normally need.  This allows us to set both IO regs.
156 		 */
157 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
158 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
159 					region.start);
160 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
161 					region.end);
162 	}
163 
164 	res = bus->resource[1];
165 	pcibios_resource_to_bus(bridge, &region, res);
166 	if (res->flags & IORESOURCE_IO) {
167 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
168 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
169 					region.start);
170 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
171 					region.end);
172 	}
173 
174 	res = bus->resource[2];
175 	pcibios_resource_to_bus(bridge, &region, res);
176 	if (res->flags & IORESOURCE_MEM) {
177 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
178 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
179 					region.start);
180 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
181 					region.end);
182 	}
183 
184 	res = bus->resource[3];
185 	pcibios_resource_to_bus(bridge, &region, res);
186 	if (res->flags & IORESOURCE_MEM) {
187 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
188 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
189 					region.start);
190 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
191 					region.end);
192 	}
193 }
194 EXPORT_SYMBOL(pci_setup_cardbus);
195 
196 /* Initialize bridges with base/limit values we have collected.
197    PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
198    requires that if there is no I/O ports or memory behind the
199    bridge, corresponding range must be turned off by writing base
200    value greater than limit to the bridge's base/limit registers.
201 
202    Note: care must be taken when updating I/O base/limit registers
203    of bridges which support 32-bit I/O. This update requires two
204    config space writes, so it's quite possible that an I/O window of
205    the bridge will have some undesirable address (e.g. 0) after the
206    first write. Ditto 64-bit prefetchable MMIO.  */
207 static void pci_setup_bridge_io(struct pci_bus *bus)
208 {
209 	struct pci_dev *bridge = bus->self;
210 	struct resource *res;
211 	struct pci_bus_region region;
212 	u32 l, io_upper16;
213 
214 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
215 	res = bus->resource[0];
216 	pcibios_resource_to_bus(bridge, &region, res);
217 	if (res->flags & IORESOURCE_IO) {
218 		pci_read_config_dword(bridge, PCI_IO_BASE, &l);
219 		l &= 0xffff0000;
220 		l |= (region.start >> 8) & 0x00f0;
221 		l |= region.end & 0xf000;
222 		/* Set up upper 16 bits of I/O base/limit. */
223 		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
224 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
225 	} else {
226 		/* Clear upper 16 bits of I/O base/limit. */
227 		io_upper16 = 0;
228 		l = 0x00f0;
229 		dev_info(&bridge->dev, "  bridge window [io  disabled]\n");
230 	}
231 	/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
232 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
233 	/* Update lower 16 bits of I/O base/limit. */
234 	pci_write_config_dword(bridge, PCI_IO_BASE, l);
235 	/* Update upper 16 bits of I/O base/limit. */
236 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
237 }
238 
239 static void pci_setup_bridge_mmio(struct pci_bus *bus)
240 {
241 	struct pci_dev *bridge = bus->self;
242 	struct resource *res;
243 	struct pci_bus_region region;
244 	u32 l;
245 
246 	/* Set up the top and bottom of the PCI Memory segment for this bus. */
247 	res = bus->resource[1];
248 	pcibios_resource_to_bus(bridge, &region, res);
249 	if (res->flags & IORESOURCE_MEM) {
250 		l = (region.start >> 16) & 0xfff0;
251 		l |= region.end & 0xfff00000;
252 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
253 	} else {
254 		l = 0x0000fff0;
255 		dev_info(&bridge->dev, "  bridge window [mem disabled]\n");
256 	}
257 	pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
258 }
259 
260 static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
261 {
262 	struct pci_dev *bridge = bus->self;
263 	struct resource *res;
264 	struct pci_bus_region region;
265 	u32 l, bu, lu;
266 
267 	/* Clear out the upper 32 bits of PREF limit.
268 	   If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
269 	   disables PREF range, which is ok. */
270 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
271 
272 	/* Set up PREF base/limit. */
273 	bu = lu = 0;
274 	res = bus->resource[2];
275 	pcibios_resource_to_bus(bridge, &region, res);
276 	if (res->flags & IORESOURCE_PREFETCH) {
277 		l = (region.start >> 16) & 0xfff0;
278 		l |= region.end & 0xfff00000;
279 		if (res->flags & IORESOURCE_MEM_64) {
280 			bu = upper_32_bits(region.start);
281 			lu = upper_32_bits(region.end);
282 		}
283 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
284 	} else {
285 		l = 0x0000fff0;
286 		dev_info(&bridge->dev, "  bridge window [mem pref disabled]\n");
287 	}
288 	pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
289 
290 	/* Set the upper 32 bits of PREF base & limit. */
291 	pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
292 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
293 }
294 
295 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
296 {
297 	struct pci_dev *bridge = bus->self;
298 
299 	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
300 		 bus->secondary, bus->subordinate);
301 
302 	if (type & IORESOURCE_IO)
303 		pci_setup_bridge_io(bus);
304 
305 	if (type & IORESOURCE_MEM)
306 		pci_setup_bridge_mmio(bus);
307 
308 	if (type & IORESOURCE_PREFETCH)
309 		pci_setup_bridge_mmio_pref(bus);
310 
311 	pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
312 }
313 
314 static void pci_setup_bridge(struct pci_bus *bus)
315 {
316 	unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
317 				  IORESOURCE_PREFETCH;
318 
319 	__pci_setup_bridge(bus, type);
320 }
321 
322 /* Check whether the bridge supports optional I/O and
323    prefetchable memory ranges. If not, the respective
324    base/limit registers must be read-only and read as 0. */
325 static void pci_bridge_check_ranges(struct pci_bus *bus)
326 {
327 	u16 io;
328 	u32 pmem;
329 	struct pci_dev *bridge = bus->self;
330 	struct resource *b_res;
331 
332 	b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
333 	b_res[1].flags |= IORESOURCE_MEM;
334 
335 	pci_read_config_word(bridge, PCI_IO_BASE, &io);
336 	if (!io) {
337 		pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
338 		pci_read_config_word(bridge, PCI_IO_BASE, &io);
339  		pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
340  	}
341  	if (io)
342 		b_res[0].flags |= IORESOURCE_IO;
343 	/*  DECchip 21050 pass 2 errata: the bridge may miss an address
344 	    disconnect boundary by one PCI data phase.
345 	    Workaround: do not use prefetching on this device. */
346 	if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
347 		return;
348 	pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
349 	if (!pmem) {
350 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
351 					       0xfff0fff0);
352 		pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
353 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
354 	}
355 	if (pmem) {
356 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
357 		if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
358 		    PCI_PREF_RANGE_TYPE_64) {
359 			b_res[2].flags |= IORESOURCE_MEM_64;
360 			b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
361 		}
362 	}
363 
364 	/* double check if bridge does support 64 bit pref */
365 	if (b_res[2].flags & IORESOURCE_MEM_64) {
366 		u32 mem_base_hi, tmp;
367 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
368 					 &mem_base_hi);
369 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
370 					       0xffffffff);
371 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
372 		if (!tmp)
373 			b_res[2].flags &= ~IORESOURCE_MEM_64;
374 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
375 				       mem_base_hi);
376 	}
377 }
378 
379 /* Helper function for sizing routines: find first available
380    bus resource of a given type. Note: we intentionally skip
381    the bus resources which have already been assigned (that is,
382    have non-NULL parent resource). */
383 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
384 {
385 	int i;
386 	struct resource *r;
387 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
388 				  IORESOURCE_PREFETCH;
389 
390 	for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
391 		r = bus->resource[i];
392 		if (r == &ioport_resource || r == &iomem_resource)
393 			continue;
394 		if (r && (r->flags & type_mask) == type && !r->parent)
395 			return r;
396 	}
397 	return NULL;
398 }
399 
400 /* Sizing the IO windows of the PCI-PCI bridge is trivial,
401    since these windows have 4K granularity and the IO ranges
402    of non-bridge PCI devices are limited to 256 bytes.
403    We must be careful with the ISA aliasing though. */
404 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
405 {
406 	struct pci_dev *dev;
407 	struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
408 	unsigned long size = 0, size1 = 0, old_size;
409 
410 	if (!b_res)
411  		return;
412 
413 	list_for_each_entry(dev, &bus->devices, bus_list) {
414 		int i;
415 
416 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
417 			struct resource *r = &dev->resource[i];
418 			unsigned long r_size;
419 
420 			if (r->parent || !(r->flags & IORESOURCE_IO))
421 				continue;
422 			r_size = resource_size(r);
423 
424 			if (r_size < 0x400)
425 				/* Might be re-aligned for ISA */
426 				size += r_size;
427 			else
428 				size1 += r_size;
429 		}
430 	}
431 	if (size < min_size)
432 		size = min_size;
433 	old_size = resource_size(b_res);
434 	if (old_size == 1)
435 		old_size = 0;
436 /* To be fixed in 2.5: we should have sort of HAVE_ISA
437    flag in the struct pci_bus. */
438 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
439 	size = (size & 0xff) + ((size & ~0xffUL) << 2);
440 #endif
441 	size = ALIGN(size + size1, 4096);
442 	if (size < old_size)
443 		size = old_size;
444 	if (!size) {
445 		if (b_res->start || b_res->end)
446 			dev_info(&bus->self->dev, "disabling bridge window "
447 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
448 				 bus->secondary, bus->subordinate);
449 		b_res->flags = 0;
450 		return;
451 	}
452 	/* Alignment of the IO window is always 4K */
453 	b_res->start = 4096;
454 	b_res->end = b_res->start + size - 1;
455 	b_res->flags |= IORESOURCE_STARTALIGN;
456 }
457 
458 /* Calculate the size of the bus and minimal alignment which
459    guarantees that all child resources fit in this size. */
460 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
461 			 unsigned long type, resource_size_t min_size)
462 {
463 	struct pci_dev *dev;
464 	resource_size_t min_align, align, size, old_size;
465 	resource_size_t aligns[12];	/* Alignments from 1Mb to 2Gb */
466 	int order, max_order;
467 	struct resource *b_res = find_free_bus_resource(bus, type);
468 	unsigned int mem64_mask = 0;
469 
470 	if (!b_res)
471 		return 0;
472 
473 	memset(aligns, 0, sizeof(aligns));
474 	max_order = 0;
475 	size = 0;
476 
477 	mem64_mask = b_res->flags & IORESOURCE_MEM_64;
478 	b_res->flags &= ~IORESOURCE_MEM_64;
479 
480 	list_for_each_entry(dev, &bus->devices, bus_list) {
481 		int i;
482 
483 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
484 			struct resource *r = &dev->resource[i];
485 			resource_size_t r_size;
486 
487 			if (r->parent || (r->flags & mask) != type)
488 				continue;
489 			r_size = resource_size(r);
490 			/* For bridges size != alignment */
491 			align = pci_resource_alignment(dev, r);
492 			order = __ffs(align) - 20;
493 			if (order > 11) {
494 				dev_warn(&dev->dev, "disabling BAR %d: %pR "
495 					 "(bad alignment %#llx)\n", i, r,
496 					 (unsigned long long) align);
497 				r->flags = 0;
498 				continue;
499 			}
500 			size += r_size;
501 			if (order < 0)
502 				order = 0;
503 			/* Exclude ranges with size > align from
504 			   calculation of the alignment. */
505 			if (r_size == align)
506 				aligns[order] += align;
507 			if (order > max_order)
508 				max_order = order;
509 			mem64_mask &= r->flags & IORESOURCE_MEM_64;
510 		}
511 	}
512 	if (size < min_size)
513 		size = min_size;
514 	old_size = resource_size(b_res);
515 	if (old_size == 1)
516 		old_size = 0;
517 	if (size < old_size)
518 		size = old_size;
519 
520 	align = 0;
521 	min_align = 0;
522 	for (order = 0; order <= max_order; order++) {
523 		resource_size_t align1 = 1;
524 
525 		align1 <<= (order + 20);
526 
527 		if (!align)
528 			min_align = align1;
529 		else if (ALIGN(align + min_align, min_align) < align1)
530 			min_align = align1 >> 1;
531 		align += aligns[order];
532 	}
533 	size = ALIGN(size, min_align);
534 	if (!size) {
535 		if (b_res->start || b_res->end)
536 			dev_info(&bus->self->dev, "disabling bridge window "
537 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
538 				 bus->secondary, bus->subordinate);
539 		b_res->flags = 0;
540 		return 1;
541 	}
542 	b_res->start = min_align;
543 	b_res->end = size + min_align - 1;
544 	b_res->flags |= IORESOURCE_STARTALIGN;
545 	b_res->flags |= mem64_mask;
546 	return 1;
547 }
548 
549 static void pci_bus_size_cardbus(struct pci_bus *bus)
550 {
551 	struct pci_dev *bridge = bus->self;
552 	struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
553 	u16 ctrl;
554 
555 	/*
556 	 * Reserve some resources for CardBus.  We reserve
557 	 * a fixed amount of bus space for CardBus bridges.
558 	 */
559 	b_res[0].start = 0;
560 	b_res[0].end = pci_cardbus_io_size - 1;
561 	b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
562 
563 	b_res[1].start = 0;
564 	b_res[1].end = pci_cardbus_io_size - 1;
565 	b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
566 
567 	/*
568 	 * Check whether prefetchable memory is supported
569 	 * by this bridge.
570 	 */
571 	pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
572 	if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
573 		ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
574 		pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
575 		pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
576 	}
577 
578 	/*
579 	 * If we have prefetchable memory support, allocate
580 	 * two regions.  Otherwise, allocate one region of
581 	 * twice the size.
582 	 */
583 	if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
584 		b_res[2].start = 0;
585 		b_res[2].end = pci_cardbus_mem_size - 1;
586 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
587 
588 		b_res[3].start = 0;
589 		b_res[3].end = pci_cardbus_mem_size - 1;
590 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
591 	} else {
592 		b_res[3].start = 0;
593 		b_res[3].end = pci_cardbus_mem_size * 2 - 1;
594 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
595 	}
596 }
597 
598 void __ref pci_bus_size_bridges(struct pci_bus *bus)
599 {
600 	struct pci_dev *dev;
601 	unsigned long mask, prefmask;
602 	resource_size_t min_mem_size = 0, min_io_size = 0;
603 
604 	list_for_each_entry(dev, &bus->devices, bus_list) {
605 		struct pci_bus *b = dev->subordinate;
606 		if (!b)
607 			continue;
608 
609 		switch (dev->class >> 8) {
610 		case PCI_CLASS_BRIDGE_CARDBUS:
611 			pci_bus_size_cardbus(b);
612 			break;
613 
614 		case PCI_CLASS_BRIDGE_PCI:
615 		default:
616 			pci_bus_size_bridges(b);
617 			break;
618 		}
619 	}
620 
621 	/* The root bus? */
622 	if (!bus->self)
623 		return;
624 
625 	switch (bus->self->class >> 8) {
626 	case PCI_CLASS_BRIDGE_CARDBUS:
627 		/* don't size cardbuses yet. */
628 		break;
629 
630 	case PCI_CLASS_BRIDGE_PCI:
631 		pci_bridge_check_ranges(bus);
632 		if (bus->self->is_hotplug_bridge) {
633 			min_io_size  = pci_hotplug_io_size;
634 			min_mem_size = pci_hotplug_mem_size;
635 		}
636 	default:
637 		pbus_size_io(bus, min_io_size);
638 		/* If the bridge supports prefetchable range, size it
639 		   separately. If it doesn't, or its prefetchable window
640 		   has already been allocated by arch code, try
641 		   non-prefetchable range for both types of PCI memory
642 		   resources. */
643 		mask = IORESOURCE_MEM;
644 		prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
645 		if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
646 			mask = prefmask; /* Success, size non-prefetch only. */
647 		else
648 			min_mem_size += min_mem_size;
649 		pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
650 		break;
651 	}
652 }
653 EXPORT_SYMBOL(pci_bus_size_bridges);
654 
655 static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
656 					 struct resource_list_x *fail_head)
657 {
658 	struct pci_bus *b;
659 	struct pci_dev *dev;
660 
661 	pbus_assign_resources_sorted(bus, fail_head);
662 
663 	list_for_each_entry(dev, &bus->devices, bus_list) {
664 		b = dev->subordinate;
665 		if (!b)
666 			continue;
667 
668 		__pci_bus_assign_resources(b, fail_head);
669 
670 		switch (dev->class >> 8) {
671 		case PCI_CLASS_BRIDGE_PCI:
672 			if (!pci_is_enabled(dev))
673 				pci_setup_bridge(b);
674 			break;
675 
676 		case PCI_CLASS_BRIDGE_CARDBUS:
677 			pci_setup_cardbus(b);
678 			break;
679 
680 		default:
681 			dev_info(&dev->dev, "not setting up bridge for bus "
682 				 "%04x:%02x\n", pci_domain_nr(b), b->number);
683 			break;
684 		}
685 	}
686 }
687 
688 void __ref pci_bus_assign_resources(const struct pci_bus *bus)
689 {
690 	__pci_bus_assign_resources(bus, NULL);
691 }
692 EXPORT_SYMBOL(pci_bus_assign_resources);
693 
694 static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
695 					 struct resource_list_x *fail_head)
696 {
697 	struct pci_bus *b;
698 
699 	pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
700 
701 	b = bridge->subordinate;
702 	if (!b)
703 		return;
704 
705 	__pci_bus_assign_resources(b, fail_head);
706 
707 	switch (bridge->class >> 8) {
708 	case PCI_CLASS_BRIDGE_PCI:
709 		pci_setup_bridge(b);
710 		break;
711 
712 	case PCI_CLASS_BRIDGE_CARDBUS:
713 		pci_setup_cardbus(b);
714 		break;
715 
716 	default:
717 		dev_info(&bridge->dev, "not setting up bridge for bus "
718 			 "%04x:%02x\n", pci_domain_nr(b), b->number);
719 		break;
720 	}
721 }
722 static void pci_bridge_release_resources(struct pci_bus *bus,
723 					  unsigned long type)
724 {
725 	int idx;
726 	bool changed = false;
727 	struct pci_dev *dev;
728 	struct resource *r;
729 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
730 				  IORESOURCE_PREFETCH;
731 
732 	dev = bus->self;
733 	for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
734 	     idx++) {
735 		r = &dev->resource[idx];
736 		if ((r->flags & type_mask) != type)
737 			continue;
738 		if (!r->parent)
739 			continue;
740 		/*
741 		 * if there are children under that, we should release them
742 		 *  all
743 		 */
744 		release_child_resources(r);
745 		if (!release_resource(r)) {
746 			dev_printk(KERN_DEBUG, &dev->dev,
747 				 "resource %d %pR released\n", idx, r);
748 			/* keep the old size */
749 			r->end = resource_size(r) - 1;
750 			r->start = 0;
751 			r->flags = 0;
752 			changed = true;
753 		}
754 	}
755 
756 	if (changed) {
757 		/* avoiding touch the one without PREF */
758 		if (type & IORESOURCE_PREFETCH)
759 			type = IORESOURCE_PREFETCH;
760 		__pci_setup_bridge(bus, type);
761 	}
762 }
763 
764 enum release_type {
765 	leaf_only,
766 	whole_subtree,
767 };
768 /*
769  * try to release pci bridge resources that is from leaf bridge,
770  * so we can allocate big new one later
771  */
772 static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
773 						   unsigned long type,
774 						   enum release_type rel_type)
775 {
776 	struct pci_dev *dev;
777 	bool is_leaf_bridge = true;
778 
779 	list_for_each_entry(dev, &bus->devices, bus_list) {
780 		struct pci_bus *b = dev->subordinate;
781 		if (!b)
782 			continue;
783 
784 		is_leaf_bridge = false;
785 
786 		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
787 			continue;
788 
789 		if (rel_type == whole_subtree)
790 			pci_bus_release_bridge_resources(b, type,
791 						 whole_subtree);
792 	}
793 
794 	if (pci_is_root_bus(bus))
795 		return;
796 
797 	if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
798 		return;
799 
800 	if ((rel_type == whole_subtree) || is_leaf_bridge)
801 		pci_bridge_release_resources(bus, type);
802 }
803 
804 static void pci_bus_dump_res(struct pci_bus *bus)
805 {
806         int i;
807 
808         for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
809                 struct resource *res = bus->resource[i];
810 
811 		if (!res || !res->end || !res->flags)
812                         continue;
813 
814 		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
815         }
816 }
817 
818 static void pci_bus_dump_resources(struct pci_bus *bus)
819 {
820 	struct pci_bus *b;
821 	struct pci_dev *dev;
822 
823 
824 	pci_bus_dump_res(bus);
825 
826 	list_for_each_entry(dev, &bus->devices, bus_list) {
827 		b = dev->subordinate;
828 		if (!b)
829 			continue;
830 
831 		pci_bus_dump_resources(b);
832 	}
833 }
834 
835 static int __init pci_bus_get_depth(struct pci_bus *bus)
836 {
837 	int depth = 0;
838 	struct pci_dev *dev;
839 
840 	list_for_each_entry(dev, &bus->devices, bus_list) {
841 		int ret;
842 		struct pci_bus *b = dev->subordinate;
843 		if (!b)
844 			continue;
845 
846 		ret = pci_bus_get_depth(b);
847 		if (ret + 1 > depth)
848 			depth = ret + 1;
849 	}
850 
851 	return depth;
852 }
853 static int __init pci_get_max_depth(void)
854 {
855 	int depth = 0;
856 	struct pci_bus *bus;
857 
858 	list_for_each_entry(bus, &pci_root_buses, node) {
859 		int ret;
860 
861 		ret = pci_bus_get_depth(bus);
862 		if (ret > depth)
863 			depth = ret;
864 	}
865 
866 	return depth;
867 }
868 
869 /*
870  * first try will not touch pci bridge res
871  * second  and later try will clear small leaf bridge res
872  * will stop till to the max  deepth if can not find good one
873  */
874 void __init
875 pci_assign_unassigned_resources(void)
876 {
877 	struct pci_bus *bus;
878 	int tried_times = 0;
879 	enum release_type rel_type = leaf_only;
880 	struct resource_list_x head, *list;
881 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
882 				  IORESOURCE_PREFETCH;
883 	unsigned long failed_type;
884 	int max_depth = pci_get_max_depth();
885 	int pci_try_num;
886 
887 	head.next = NULL;
888 
889 	pci_try_num = max_depth + 1;
890 	printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
891 		 max_depth, pci_try_num);
892 
893 again:
894 	/* Depth first, calculate sizes and alignments of all
895 	   subordinate buses. */
896 	list_for_each_entry(bus, &pci_root_buses, node) {
897 		pci_bus_size_bridges(bus);
898 	}
899 	/* Depth last, allocate resources and update the hardware. */
900 	list_for_each_entry(bus, &pci_root_buses, node) {
901 		__pci_bus_assign_resources(bus, &head);
902 	}
903 	tried_times++;
904 
905 	/* any device complain? */
906 	if (!head.next)
907 		goto enable_and_dump;
908 	failed_type = 0;
909 	for (list = head.next; list;) {
910 		failed_type |= list->flags;
911 		list = list->next;
912 	}
913 	/*
914 	 * io port are tight, don't try extra
915 	 * or if reach the limit, don't want to try more
916 	 */
917 	failed_type &= type_mask;
918 	if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
919 		free_failed_list(&head);
920 		goto enable_and_dump;
921 	}
922 
923 	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
924 			 tried_times + 1);
925 
926 	/* third times and later will not check if it is leaf */
927 	if ((tried_times + 1) > 2)
928 		rel_type = whole_subtree;
929 
930 	/*
931 	 * Try to release leaf bridge's resources that doesn't fit resource of
932 	 * child device under that bridge
933 	 */
934 	for (list = head.next; list;) {
935 		bus = list->dev->bus;
936 		pci_bus_release_bridge_resources(bus, list->flags & type_mask,
937 						  rel_type);
938 		list = list->next;
939 	}
940 	/* restore size and flags */
941 	for (list = head.next; list;) {
942 		struct resource *res = list->res;
943 
944 		res->start = list->start;
945 		res->end = list->end;
946 		res->flags = list->flags;
947 		if (list->dev->subordinate)
948 			res->flags = 0;
949 
950 		list = list->next;
951 	}
952 	free_failed_list(&head);
953 
954 	goto again;
955 
956 enable_and_dump:
957 	/* Depth last, update the hardware. */
958 	list_for_each_entry(bus, &pci_root_buses, node)
959 		pci_enable_bridges(bus);
960 
961 	/* dump the resource on buses */
962 	list_for_each_entry(bus, &pci_root_buses, node) {
963 		pci_bus_dump_resources(bus);
964 	}
965 }
966 
967 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
968 {
969 	struct pci_bus *parent = bridge->subordinate;
970 	int tried_times = 0;
971 	struct resource_list_x head, *list;
972 	int retval;
973 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
974 				  IORESOURCE_PREFETCH;
975 
976 	head.next = NULL;
977 
978 again:
979 	pci_bus_size_bridges(parent);
980 	__pci_bridge_assign_resources(bridge, &head);
981 	retval = pci_reenable_device(bridge);
982 	pci_set_master(bridge);
983 	pci_enable_bridges(parent);
984 
985 	tried_times++;
986 
987 	if (!head.next)
988 		return;
989 
990 	if (tried_times >= 2) {
991 		/* still fail, don't need to try more */
992 		free_failed_list(&head);
993 		return;
994 	}
995 
996 	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
997 			 tried_times + 1);
998 
999 	/*
1000 	 * Try to release leaf bridge's resources that doesn't fit resource of
1001 	 * child device under that bridge
1002 	 */
1003 	for (list = head.next; list;) {
1004 		struct pci_bus *bus = list->dev->bus;
1005 		unsigned long flags = list->flags;
1006 
1007 		pci_bus_release_bridge_resources(bus, flags & type_mask,
1008 						 whole_subtree);
1009 		list = list->next;
1010 	}
1011 	/* restore size and flags */
1012 	for (list = head.next; list;) {
1013 		struct resource *res = list->res;
1014 
1015 		res->start = list->start;
1016 		res->end = list->end;
1017 		res->flags = list->flags;
1018 		if (list->dev->subordinate)
1019 			res->flags = 0;
1020 
1021 		list = list->next;
1022 	}
1023 	free_failed_list(&head);
1024 
1025 	goto again;
1026 }
1027 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
1028