xref: /openbmc/linux/drivers/pci/setup-bus.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  *	drivers/pci/setup-bus.c
3  *
4  * Extruded from code written by
5  *      Dave Rusling (david.rusling@reo.mts.dec.com)
6  *      David Mosberger (davidm@cs.arizona.edu)
7  *	David Miller (davem@redhat.com)
8  *
9  * Support routines for initializing a PCI subsystem.
10  */
11 
12 /*
13  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14  *	     PCI-PCI bridges cleanup, sorted resource allocation.
15  * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16  *	     Converted to allocation in 3 passes, which gives
17  *	     tighter packing. Prefetchable range support.
18  */
19 
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
28 #include "pci.h"
29 
30 struct resource_list_x {
31 	struct resource_list_x *next;
32 	struct resource *res;
33 	struct pci_dev *dev;
34 	resource_size_t start;
35 	resource_size_t end;
36 	unsigned long flags;
37 };
38 
39 static void add_to_failed_list(struct resource_list_x *head,
40 				 struct pci_dev *dev, struct resource *res)
41 {
42 	struct resource_list_x *list = head;
43 	struct resource_list_x *ln = list->next;
44 	struct resource_list_x *tmp;
45 
46 	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
47 	if (!tmp) {
48 		pr_warning("add_to_failed_list: kmalloc() failed!\n");
49 		return;
50 	}
51 
52 	tmp->next = ln;
53 	tmp->res = res;
54 	tmp->dev = dev;
55 	tmp->start = res->start;
56 	tmp->end = res->end;
57 	tmp->flags = res->flags;
58 	list->next = tmp;
59 }
60 
61 static void free_failed_list(struct resource_list_x *head)
62 {
63 	struct resource_list_x *list, *tmp;
64 
65 	for (list = head->next; list;) {
66 		tmp = list;
67 		list = list->next;
68 		kfree(tmp);
69 	}
70 
71 	head->next = NULL;
72 }
73 
74 static void __dev_sort_resources(struct pci_dev *dev,
75 				 struct resource_list *head)
76 {
77 	u16 class = dev->class >> 8;
78 
79 	/* Don't touch classless devices or host bridges or ioapics.  */
80 	if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
81 		return;
82 
83 	/* Don't touch ioapic devices already enabled by firmware */
84 	if (class == PCI_CLASS_SYSTEM_PIC) {
85 		u16 command;
86 		pci_read_config_word(dev, PCI_COMMAND, &command);
87 		if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
88 			return;
89 	}
90 
91 	pdev_sort_resources(dev, head);
92 }
93 
94 static void __assign_resources_sorted(struct resource_list *head,
95 				 struct resource_list_x *fail_head)
96 {
97 	struct resource *res;
98 	struct resource_list *list, *tmp;
99 	int idx;
100 
101 	for (list = head->next; list;) {
102 		res = list->res;
103 		idx = res - &list->dev->resource[0];
104 
105 		if (pci_assign_resource(list->dev, idx)) {
106 			if (fail_head && !pci_is_root_bus(list->dev->bus)) {
107 				/*
108 				 * if the failed res is for ROM BAR, and it will
109 				 * be enabled later, don't add it to the list
110 				 */
111 				if (!((idx == PCI_ROM_RESOURCE) &&
112 				      (!(res->flags & IORESOURCE_ROM_ENABLE))))
113 					add_to_failed_list(fail_head, list->dev, res);
114 			}
115 			res->start = 0;
116 			res->end = 0;
117 			res->flags = 0;
118 		}
119 		tmp = list;
120 		list = list->next;
121 		kfree(tmp);
122 	}
123 }
124 
125 static void pdev_assign_resources_sorted(struct pci_dev *dev,
126 				 struct resource_list_x *fail_head)
127 {
128 	struct resource_list head;
129 
130 	head.next = NULL;
131 	__dev_sort_resources(dev, &head);
132 	__assign_resources_sorted(&head, fail_head);
133 
134 }
135 
136 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
137 					 struct resource_list_x *fail_head)
138 {
139 	struct pci_dev *dev;
140 	struct resource_list head;
141 
142 	head.next = NULL;
143 	list_for_each_entry(dev, &bus->devices, bus_list)
144 		__dev_sort_resources(dev, &head);
145 
146 	__assign_resources_sorted(&head, fail_head);
147 }
148 
149 void pci_setup_cardbus(struct pci_bus *bus)
150 {
151 	struct pci_dev *bridge = bus->self;
152 	struct resource *res;
153 	struct pci_bus_region region;
154 
155 	dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
156 		 bus->secondary, bus->subordinate);
157 
158 	res = bus->resource[0];
159 	pcibios_resource_to_bus(bridge, &region, res);
160 	if (res->flags & IORESOURCE_IO) {
161 		/*
162 		 * The IO resource is allocated a range twice as large as it
163 		 * would normally need.  This allows us to set both IO regs.
164 		 */
165 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
166 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
167 					region.start);
168 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
169 					region.end);
170 	}
171 
172 	res = bus->resource[1];
173 	pcibios_resource_to_bus(bridge, &region, res);
174 	if (res->flags & IORESOURCE_IO) {
175 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
176 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
177 					region.start);
178 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
179 					region.end);
180 	}
181 
182 	res = bus->resource[2];
183 	pcibios_resource_to_bus(bridge, &region, res);
184 	if (res->flags & IORESOURCE_MEM) {
185 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
186 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
187 					region.start);
188 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
189 					region.end);
190 	}
191 
192 	res = bus->resource[3];
193 	pcibios_resource_to_bus(bridge, &region, res);
194 	if (res->flags & IORESOURCE_MEM) {
195 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
196 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
197 					region.start);
198 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
199 					region.end);
200 	}
201 }
202 EXPORT_SYMBOL(pci_setup_cardbus);
203 
204 /* Initialize bridges with base/limit values we have collected.
205    PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
206    requires that if there is no I/O ports or memory behind the
207    bridge, corresponding range must be turned off by writing base
208    value greater than limit to the bridge's base/limit registers.
209 
210    Note: care must be taken when updating I/O base/limit registers
211    of bridges which support 32-bit I/O. This update requires two
212    config space writes, so it's quite possible that an I/O window of
213    the bridge will have some undesirable address (e.g. 0) after the
214    first write. Ditto 64-bit prefetchable MMIO.  */
215 static void pci_setup_bridge_io(struct pci_bus *bus)
216 {
217 	struct pci_dev *bridge = bus->self;
218 	struct resource *res;
219 	struct pci_bus_region region;
220 	u32 l, io_upper16;
221 
222 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
223 	res = bus->resource[0];
224 	pcibios_resource_to_bus(bridge, &region, res);
225 	if (res->flags & IORESOURCE_IO) {
226 		pci_read_config_dword(bridge, PCI_IO_BASE, &l);
227 		l &= 0xffff0000;
228 		l |= (region.start >> 8) & 0x00f0;
229 		l |= region.end & 0xf000;
230 		/* Set up upper 16 bits of I/O base/limit. */
231 		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
232 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
233 	} else {
234 		/* Clear upper 16 bits of I/O base/limit. */
235 		io_upper16 = 0;
236 		l = 0x00f0;
237 		dev_info(&bridge->dev, "  bridge window [io  disabled]\n");
238 	}
239 	/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
240 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
241 	/* Update lower 16 bits of I/O base/limit. */
242 	pci_write_config_dword(bridge, PCI_IO_BASE, l);
243 	/* Update upper 16 bits of I/O base/limit. */
244 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
245 }
246 
247 static void pci_setup_bridge_mmio(struct pci_bus *bus)
248 {
249 	struct pci_dev *bridge = bus->self;
250 	struct resource *res;
251 	struct pci_bus_region region;
252 	u32 l;
253 
254 	/* Set up the top and bottom of the PCI Memory segment for this bus. */
255 	res = bus->resource[1];
256 	pcibios_resource_to_bus(bridge, &region, res);
257 	if (res->flags & IORESOURCE_MEM) {
258 		l = (region.start >> 16) & 0xfff0;
259 		l |= region.end & 0xfff00000;
260 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
261 	} else {
262 		l = 0x0000fff0;
263 		dev_info(&bridge->dev, "  bridge window [mem disabled]\n");
264 	}
265 	pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
266 }
267 
268 static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
269 {
270 	struct pci_dev *bridge = bus->self;
271 	struct resource *res;
272 	struct pci_bus_region region;
273 	u32 l, bu, lu;
274 
275 	/* Clear out the upper 32 bits of PREF limit.
276 	   If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
277 	   disables PREF range, which is ok. */
278 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
279 
280 	/* Set up PREF base/limit. */
281 	bu = lu = 0;
282 	res = bus->resource[2];
283 	pcibios_resource_to_bus(bridge, &region, res);
284 	if (res->flags & IORESOURCE_PREFETCH) {
285 		l = (region.start >> 16) & 0xfff0;
286 		l |= region.end & 0xfff00000;
287 		if (res->flags & IORESOURCE_MEM_64) {
288 			bu = upper_32_bits(region.start);
289 			lu = upper_32_bits(region.end);
290 		}
291 		dev_info(&bridge->dev, "  bridge window %pR\n", res);
292 	} else {
293 		l = 0x0000fff0;
294 		dev_info(&bridge->dev, "  bridge window [mem pref disabled]\n");
295 	}
296 	pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
297 
298 	/* Set the upper 32 bits of PREF base & limit. */
299 	pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
300 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
301 }
302 
303 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
304 {
305 	struct pci_dev *bridge = bus->self;
306 
307 	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
308 		 bus->secondary, bus->subordinate);
309 
310 	if (type & IORESOURCE_IO)
311 		pci_setup_bridge_io(bus);
312 
313 	if (type & IORESOURCE_MEM)
314 		pci_setup_bridge_mmio(bus);
315 
316 	if (type & IORESOURCE_PREFETCH)
317 		pci_setup_bridge_mmio_pref(bus);
318 
319 	pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
320 }
321 
322 static void pci_setup_bridge(struct pci_bus *bus)
323 {
324 	unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
325 				  IORESOURCE_PREFETCH;
326 
327 	__pci_setup_bridge(bus, type);
328 }
329 
330 /* Check whether the bridge supports optional I/O and
331    prefetchable memory ranges. If not, the respective
332    base/limit registers must be read-only and read as 0. */
333 static void pci_bridge_check_ranges(struct pci_bus *bus)
334 {
335 	u16 io;
336 	u32 pmem;
337 	struct pci_dev *bridge = bus->self;
338 	struct resource *b_res;
339 
340 	b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
341 	b_res[1].flags |= IORESOURCE_MEM;
342 
343 	pci_read_config_word(bridge, PCI_IO_BASE, &io);
344 	if (!io) {
345 		pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
346 		pci_read_config_word(bridge, PCI_IO_BASE, &io);
347  		pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
348  	}
349  	if (io)
350 		b_res[0].flags |= IORESOURCE_IO;
351 	/*  DECchip 21050 pass 2 errata: the bridge may miss an address
352 	    disconnect boundary by one PCI data phase.
353 	    Workaround: do not use prefetching on this device. */
354 	if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
355 		return;
356 	pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
357 	if (!pmem) {
358 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
359 					       0xfff0fff0);
360 		pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
361 		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
362 	}
363 	if (pmem) {
364 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
365 		if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
366 		    PCI_PREF_RANGE_TYPE_64) {
367 			b_res[2].flags |= IORESOURCE_MEM_64;
368 			b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
369 		}
370 	}
371 
372 	/* double check if bridge does support 64 bit pref */
373 	if (b_res[2].flags & IORESOURCE_MEM_64) {
374 		u32 mem_base_hi, tmp;
375 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
376 					 &mem_base_hi);
377 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
378 					       0xffffffff);
379 		pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
380 		if (!tmp)
381 			b_res[2].flags &= ~IORESOURCE_MEM_64;
382 		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
383 				       mem_base_hi);
384 	}
385 }
386 
387 /* Helper function for sizing routines: find first available
388    bus resource of a given type. Note: we intentionally skip
389    the bus resources which have already been assigned (that is,
390    have non-NULL parent resource). */
391 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
392 {
393 	int i;
394 	struct resource *r;
395 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
396 				  IORESOURCE_PREFETCH;
397 
398 	pci_bus_for_each_resource(bus, r, i) {
399 		if (r == &ioport_resource || r == &iomem_resource)
400 			continue;
401 		if (r && (r->flags & type_mask) == type && !r->parent)
402 			return r;
403 	}
404 	return NULL;
405 }
406 
407 /* Sizing the IO windows of the PCI-PCI bridge is trivial,
408    since these windows have 4K granularity and the IO ranges
409    of non-bridge PCI devices are limited to 256 bytes.
410    We must be careful with the ISA aliasing though. */
411 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
412 {
413 	struct pci_dev *dev;
414 	struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
415 	unsigned long size = 0, size1 = 0, old_size;
416 
417 	if (!b_res)
418  		return;
419 
420 	list_for_each_entry(dev, &bus->devices, bus_list) {
421 		int i;
422 
423 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
424 			struct resource *r = &dev->resource[i];
425 			unsigned long r_size;
426 
427 			if (r->parent || !(r->flags & IORESOURCE_IO))
428 				continue;
429 			r_size = resource_size(r);
430 
431 			if (r_size < 0x400)
432 				/* Might be re-aligned for ISA */
433 				size += r_size;
434 			else
435 				size1 += r_size;
436 		}
437 	}
438 	if (size < min_size)
439 		size = min_size;
440 	old_size = resource_size(b_res);
441 	if (old_size == 1)
442 		old_size = 0;
443 /* To be fixed in 2.5: we should have sort of HAVE_ISA
444    flag in the struct pci_bus. */
445 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
446 	size = (size & 0xff) + ((size & ~0xffUL) << 2);
447 #endif
448 	size = ALIGN(size + size1, 4096);
449 	if (size < old_size)
450 		size = old_size;
451 	if (!size) {
452 		if (b_res->start || b_res->end)
453 			dev_info(&bus->self->dev, "disabling bridge window "
454 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
455 				 bus->secondary, bus->subordinate);
456 		b_res->flags = 0;
457 		return;
458 	}
459 	/* Alignment of the IO window is always 4K */
460 	b_res->start = 4096;
461 	b_res->end = b_res->start + size - 1;
462 	b_res->flags |= IORESOURCE_STARTALIGN;
463 }
464 
465 /* Calculate the size of the bus and minimal alignment which
466    guarantees that all child resources fit in this size. */
467 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
468 			 unsigned long type, resource_size_t min_size)
469 {
470 	struct pci_dev *dev;
471 	resource_size_t min_align, align, size, old_size;
472 	resource_size_t aligns[12];	/* Alignments from 1Mb to 2Gb */
473 	int order, max_order;
474 	struct resource *b_res = find_free_bus_resource(bus, type);
475 	unsigned int mem64_mask = 0;
476 
477 	if (!b_res)
478 		return 0;
479 
480 	memset(aligns, 0, sizeof(aligns));
481 	max_order = 0;
482 	size = 0;
483 
484 	mem64_mask = b_res->flags & IORESOURCE_MEM_64;
485 	b_res->flags &= ~IORESOURCE_MEM_64;
486 
487 	list_for_each_entry(dev, &bus->devices, bus_list) {
488 		int i;
489 
490 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
491 			struct resource *r = &dev->resource[i];
492 			resource_size_t r_size;
493 
494 			if (r->parent || (r->flags & mask) != type)
495 				continue;
496 			r_size = resource_size(r);
497 			/* For bridges size != alignment */
498 			align = pci_resource_alignment(dev, r);
499 			order = __ffs(align) - 20;
500 			if (order > 11) {
501 				dev_warn(&dev->dev, "disabling BAR %d: %pR "
502 					 "(bad alignment %#llx)\n", i, r,
503 					 (unsigned long long) align);
504 				r->flags = 0;
505 				continue;
506 			}
507 			size += r_size;
508 			if (order < 0)
509 				order = 0;
510 			/* Exclude ranges with size > align from
511 			   calculation of the alignment. */
512 			if (r_size == align)
513 				aligns[order] += align;
514 			if (order > max_order)
515 				max_order = order;
516 			mem64_mask &= r->flags & IORESOURCE_MEM_64;
517 		}
518 	}
519 	if (size < min_size)
520 		size = min_size;
521 	old_size = resource_size(b_res);
522 	if (old_size == 1)
523 		old_size = 0;
524 	if (size < old_size)
525 		size = old_size;
526 
527 	align = 0;
528 	min_align = 0;
529 	for (order = 0; order <= max_order; order++) {
530 		resource_size_t align1 = 1;
531 
532 		align1 <<= (order + 20);
533 
534 		if (!align)
535 			min_align = align1;
536 		else if (ALIGN(align + min_align, min_align) < align1)
537 			min_align = align1 >> 1;
538 		align += aligns[order];
539 	}
540 	size = ALIGN(size, min_align);
541 	if (!size) {
542 		if (b_res->start || b_res->end)
543 			dev_info(&bus->self->dev, "disabling bridge window "
544 				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
545 				 bus->secondary, bus->subordinate);
546 		b_res->flags = 0;
547 		return 1;
548 	}
549 	b_res->start = min_align;
550 	b_res->end = size + min_align - 1;
551 	b_res->flags |= IORESOURCE_STARTALIGN;
552 	b_res->flags |= mem64_mask;
553 	return 1;
554 }
555 
556 static void pci_bus_size_cardbus(struct pci_bus *bus)
557 {
558 	struct pci_dev *bridge = bus->self;
559 	struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
560 	u16 ctrl;
561 
562 	/*
563 	 * Reserve some resources for CardBus.  We reserve
564 	 * a fixed amount of bus space for CardBus bridges.
565 	 */
566 	b_res[0].start = 0;
567 	b_res[0].end = pci_cardbus_io_size - 1;
568 	b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
569 
570 	b_res[1].start = 0;
571 	b_res[1].end = pci_cardbus_io_size - 1;
572 	b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
573 
574 	/*
575 	 * Check whether prefetchable memory is supported
576 	 * by this bridge.
577 	 */
578 	pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
579 	if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
580 		ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
581 		pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
582 		pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
583 	}
584 
585 	/*
586 	 * If we have prefetchable memory support, allocate
587 	 * two regions.  Otherwise, allocate one region of
588 	 * twice the size.
589 	 */
590 	if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
591 		b_res[2].start = 0;
592 		b_res[2].end = pci_cardbus_mem_size - 1;
593 		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
594 
595 		b_res[3].start = 0;
596 		b_res[3].end = pci_cardbus_mem_size - 1;
597 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
598 	} else {
599 		b_res[3].start = 0;
600 		b_res[3].end = pci_cardbus_mem_size * 2 - 1;
601 		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
602 	}
603 }
604 
605 void __ref pci_bus_size_bridges(struct pci_bus *bus)
606 {
607 	struct pci_dev *dev;
608 	unsigned long mask, prefmask;
609 	resource_size_t min_mem_size = 0, min_io_size = 0;
610 
611 	list_for_each_entry(dev, &bus->devices, bus_list) {
612 		struct pci_bus *b = dev->subordinate;
613 		if (!b)
614 			continue;
615 
616 		switch (dev->class >> 8) {
617 		case PCI_CLASS_BRIDGE_CARDBUS:
618 			pci_bus_size_cardbus(b);
619 			break;
620 
621 		case PCI_CLASS_BRIDGE_PCI:
622 		default:
623 			pci_bus_size_bridges(b);
624 			break;
625 		}
626 	}
627 
628 	/* The root bus? */
629 	if (!bus->self)
630 		return;
631 
632 	switch (bus->self->class >> 8) {
633 	case PCI_CLASS_BRIDGE_CARDBUS:
634 		/* don't size cardbuses yet. */
635 		break;
636 
637 	case PCI_CLASS_BRIDGE_PCI:
638 		pci_bridge_check_ranges(bus);
639 		if (bus->self->is_hotplug_bridge) {
640 			min_io_size  = pci_hotplug_io_size;
641 			min_mem_size = pci_hotplug_mem_size;
642 		}
643 	default:
644 		pbus_size_io(bus, min_io_size);
645 		/* If the bridge supports prefetchable range, size it
646 		   separately. If it doesn't, or its prefetchable window
647 		   has already been allocated by arch code, try
648 		   non-prefetchable range for both types of PCI memory
649 		   resources. */
650 		mask = IORESOURCE_MEM;
651 		prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
652 		if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
653 			mask = prefmask; /* Success, size non-prefetch only. */
654 		else
655 			min_mem_size += min_mem_size;
656 		pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
657 		break;
658 	}
659 }
660 EXPORT_SYMBOL(pci_bus_size_bridges);
661 
662 static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
663 					 struct resource_list_x *fail_head)
664 {
665 	struct pci_bus *b;
666 	struct pci_dev *dev;
667 
668 	pbus_assign_resources_sorted(bus, fail_head);
669 
670 	list_for_each_entry(dev, &bus->devices, bus_list) {
671 		b = dev->subordinate;
672 		if (!b)
673 			continue;
674 
675 		__pci_bus_assign_resources(b, fail_head);
676 
677 		switch (dev->class >> 8) {
678 		case PCI_CLASS_BRIDGE_PCI:
679 			if (!pci_is_enabled(dev))
680 				pci_setup_bridge(b);
681 			break;
682 
683 		case PCI_CLASS_BRIDGE_CARDBUS:
684 			pci_setup_cardbus(b);
685 			break;
686 
687 		default:
688 			dev_info(&dev->dev, "not setting up bridge for bus "
689 				 "%04x:%02x\n", pci_domain_nr(b), b->number);
690 			break;
691 		}
692 	}
693 }
694 
695 void __ref pci_bus_assign_resources(const struct pci_bus *bus)
696 {
697 	__pci_bus_assign_resources(bus, NULL);
698 }
699 EXPORT_SYMBOL(pci_bus_assign_resources);
700 
701 static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
702 					 struct resource_list_x *fail_head)
703 {
704 	struct pci_bus *b;
705 
706 	pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
707 
708 	b = bridge->subordinate;
709 	if (!b)
710 		return;
711 
712 	__pci_bus_assign_resources(b, fail_head);
713 
714 	switch (bridge->class >> 8) {
715 	case PCI_CLASS_BRIDGE_PCI:
716 		pci_setup_bridge(b);
717 		break;
718 
719 	case PCI_CLASS_BRIDGE_CARDBUS:
720 		pci_setup_cardbus(b);
721 		break;
722 
723 	default:
724 		dev_info(&bridge->dev, "not setting up bridge for bus "
725 			 "%04x:%02x\n", pci_domain_nr(b), b->number);
726 		break;
727 	}
728 }
729 static void pci_bridge_release_resources(struct pci_bus *bus,
730 					  unsigned long type)
731 {
732 	int idx;
733 	bool changed = false;
734 	struct pci_dev *dev;
735 	struct resource *r;
736 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
737 				  IORESOURCE_PREFETCH;
738 
739 	dev = bus->self;
740 	for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
741 	     idx++) {
742 		r = &dev->resource[idx];
743 		if ((r->flags & type_mask) != type)
744 			continue;
745 		if (!r->parent)
746 			continue;
747 		/*
748 		 * if there are children under that, we should release them
749 		 *  all
750 		 */
751 		release_child_resources(r);
752 		if (!release_resource(r)) {
753 			dev_printk(KERN_DEBUG, &dev->dev,
754 				 "resource %d %pR released\n", idx, r);
755 			/* keep the old size */
756 			r->end = resource_size(r) - 1;
757 			r->start = 0;
758 			r->flags = 0;
759 			changed = true;
760 		}
761 	}
762 
763 	if (changed) {
764 		/* avoiding touch the one without PREF */
765 		if (type & IORESOURCE_PREFETCH)
766 			type = IORESOURCE_PREFETCH;
767 		__pci_setup_bridge(bus, type);
768 	}
769 }
770 
771 enum release_type {
772 	leaf_only,
773 	whole_subtree,
774 };
775 /*
776  * try to release pci bridge resources that is from leaf bridge,
777  * so we can allocate big new one later
778  */
779 static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
780 						   unsigned long type,
781 						   enum release_type rel_type)
782 {
783 	struct pci_dev *dev;
784 	bool is_leaf_bridge = true;
785 
786 	list_for_each_entry(dev, &bus->devices, bus_list) {
787 		struct pci_bus *b = dev->subordinate;
788 		if (!b)
789 			continue;
790 
791 		is_leaf_bridge = false;
792 
793 		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
794 			continue;
795 
796 		if (rel_type == whole_subtree)
797 			pci_bus_release_bridge_resources(b, type,
798 						 whole_subtree);
799 	}
800 
801 	if (pci_is_root_bus(bus))
802 		return;
803 
804 	if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
805 		return;
806 
807 	if ((rel_type == whole_subtree) || is_leaf_bridge)
808 		pci_bridge_release_resources(bus, type);
809 }
810 
811 static void pci_bus_dump_res(struct pci_bus *bus)
812 {
813 	struct resource *res;
814 	int i;
815 
816 	pci_bus_for_each_resource(bus, res, i) {
817 		if (!res || !res->end || !res->flags)
818                         continue;
819 
820 		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
821         }
822 }
823 
824 static void pci_bus_dump_resources(struct pci_bus *bus)
825 {
826 	struct pci_bus *b;
827 	struct pci_dev *dev;
828 
829 
830 	pci_bus_dump_res(bus);
831 
832 	list_for_each_entry(dev, &bus->devices, bus_list) {
833 		b = dev->subordinate;
834 		if (!b)
835 			continue;
836 
837 		pci_bus_dump_resources(b);
838 	}
839 }
840 
841 void __init
842 pci_assign_unassigned_resources(void)
843 {
844 	struct pci_bus *bus;
845 
846 	/* Depth first, calculate sizes and alignments of all
847 	   subordinate buses. */
848 	list_for_each_entry(bus, &pci_root_buses, node) {
849 		pci_bus_size_bridges(bus);
850 	}
851 	/* Depth last, allocate resources and update the hardware. */
852 	list_for_each_entry(bus, &pci_root_buses, node) {
853 		pci_bus_assign_resources(bus);
854 		pci_enable_bridges(bus);
855 	}
856 
857 	/* dump the resource on buses */
858 	list_for_each_entry(bus, &pci_root_buses, node) {
859 		pci_bus_dump_resources(bus);
860 	}
861 }
862 
863 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
864 {
865 	struct pci_bus *parent = bridge->subordinate;
866 	int tried_times = 0;
867 	struct resource_list_x head, *list;
868 	int retval;
869 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
870 				  IORESOURCE_PREFETCH;
871 
872 	head.next = NULL;
873 
874 again:
875 	pci_bus_size_bridges(parent);
876 	__pci_bridge_assign_resources(bridge, &head);
877 
878 	tried_times++;
879 
880 	if (!head.next)
881 		goto enable_all;
882 
883 	if (tried_times >= 2) {
884 		/* still fail, don't need to try more */
885 		free_failed_list(&head);
886 		goto enable_all;
887 	}
888 
889 	printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
890 			 tried_times + 1);
891 
892 	/*
893 	 * Try to release leaf bridge's resources that doesn't fit resource of
894 	 * child device under that bridge
895 	 */
896 	for (list = head.next; list;) {
897 		struct pci_bus *bus = list->dev->bus;
898 		unsigned long flags = list->flags;
899 
900 		pci_bus_release_bridge_resources(bus, flags & type_mask,
901 						 whole_subtree);
902 		list = list->next;
903 	}
904 	/* restore size and flags */
905 	for (list = head.next; list;) {
906 		struct resource *res = list->res;
907 
908 		res->start = list->start;
909 		res->end = list->end;
910 		res->flags = list->flags;
911 		if (list->dev->subordinate)
912 			res->flags = 0;
913 
914 		list = list->next;
915 	}
916 	free_failed_list(&head);
917 
918 	goto again;
919 
920 enable_all:
921 	retval = pci_reenable_device(bridge);
922 	pci_set_master(bridge);
923 	pci_enable_bridges(parent);
924 }
925 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
926