xref: /openbmc/linux/arch/powerpc/kernel/pci_64.c (revision c21b37f6)
1 /*
2  * Port for PPC64 David Engebretsen, IBM Corp.
3  * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4  *
5  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6  *   Rework, based on alpha PCI code.
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #undef DEBUG
15 
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/mm.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
26 
27 #include <asm/processor.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h>
32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h>
34 #include <asm/firmware.h>
35 
36 #ifdef DEBUG
37 #include <asm/udbg.h>
38 #define DBG(fmt...) printk(fmt)
39 #else
40 #define DBG(fmt...)
41 #endif
42 
43 unsigned long pci_probe_only = 1;
44 int pci_assign_all_buses = 0;
45 
46 static void fixup_resource(struct resource *res, struct pci_dev *dev);
47 static void do_bus_setup(struct pci_bus *bus);
48 
49 /* pci_io_base -- the base address from which io bars are offsets.
50  * This is the lowest I/O base address (so bar values are always positive),
51  * and it *must* be the start of ISA space if an ISA bus exists because
52  * ISA drivers use hard coded offsets.  If no ISA bus exists nothing
53  * is mapped on the first 64K of IO space
54  */
55 unsigned long pci_io_base = ISA_IO_BASE;
56 EXPORT_SYMBOL(pci_io_base);
57 
58 LIST_HEAD(hose_list);
59 
60 static struct dma_mapping_ops *pci_dma_ops;
61 
62 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
63 {
64 	pci_dma_ops = dma_ops;
65 }
66 
67 struct dma_mapping_ops *get_pci_dma_ops(void)
68 {
69 	return pci_dma_ops;
70 }
71 EXPORT_SYMBOL(get_pci_dma_ops);
72 
73 static void fixup_broken_pcnet32(struct pci_dev* dev)
74 {
75 	if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
76 		dev->vendor = PCI_VENDOR_ID_AMD;
77 		pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
78 	}
79 }
80 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
81 
82 void  pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
83 			      struct resource *res)
84 {
85 	unsigned long offset = 0;
86 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
87 
88 	if (!hose)
89 		return;
90 
91 	if (res->flags & IORESOURCE_IO)
92 	        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
93 
94 	if (res->flags & IORESOURCE_MEM)
95 		offset = hose->pci_mem_offset;
96 
97 	region->start = res->start - offset;
98 	region->end = res->end - offset;
99 }
100 
101 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
102 			      struct pci_bus_region *region)
103 {
104 	unsigned long offset = 0;
105 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
106 
107 	if (!hose)
108 		return;
109 
110 	if (res->flags & IORESOURCE_IO)
111 	        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
112 
113 	if (res->flags & IORESOURCE_MEM)
114 		offset = hose->pci_mem_offset;
115 
116 	res->start = region->start + offset;
117 	res->end = region->end + offset;
118 }
119 
120 #ifdef CONFIG_HOTPLUG
121 EXPORT_SYMBOL(pcibios_resource_to_bus);
122 EXPORT_SYMBOL(pcibios_bus_to_resource);
123 #endif
124 
125 /*
126  * We need to avoid collisions with `mirrored' VGA ports
127  * and other strange ISA hardware, so we always want the
128  * addresses to be allocated in the 0x000-0x0ff region
129  * modulo 0x400.
130  *
131  * Why? Because some silly external IO cards only decode
132  * the low 10 bits of the IO address. The 0x00-0xff region
133  * is reserved for motherboard devices that decode all 16
134  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
135  * but we want to try to avoid allocating at 0x2900-0x2bff
136  * which might have be mirrored at 0x0100-0x03ff..
137  */
138 void pcibios_align_resource(void *data, struct resource *res,
139 			    resource_size_t size, resource_size_t align)
140 {
141 	struct pci_dev *dev = data;
142 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
143 	resource_size_t start = res->start;
144 	unsigned long alignto;
145 
146 	if (res->flags & IORESOURCE_IO) {
147 	        unsigned long offset = (unsigned long)hose->io_base_virt -
148 					_IO_BASE;
149 		/* Make sure we start at our min on all hoses */
150 		if (start - offset < PCIBIOS_MIN_IO)
151 			start = PCIBIOS_MIN_IO + offset;
152 
153 		/*
154 		 * Put everything into 0x00-0xff region modulo 0x400
155 		 */
156 		if (start & 0x300)
157 			start = (start + 0x3ff) & ~0x3ff;
158 
159 	} else if (res->flags & IORESOURCE_MEM) {
160 		/* Make sure we start at our min on all hoses */
161 		if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
162 			start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
163 
164 		/* Align to multiple of size of minimum base.  */
165 		alignto = max(0x1000UL, align);
166 		start = ALIGN(start, alignto);
167 	}
168 
169 	res->start = start;
170 }
171 
172 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
173 {
174 	struct pci_dev *dev;
175 	struct pci_bus *child_bus;
176 
177 	list_for_each_entry(dev, &b->devices, bus_list) {
178 		int i;
179 
180 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
181 			struct resource *r = &dev->resource[i];
182 
183 			if (r->parent || !r->start || !r->flags)
184 				continue;
185 			pci_claim_resource(dev, i);
186 		}
187 	}
188 
189 	list_for_each_entry(child_bus, &b->children, node)
190 		pcibios_claim_one_bus(child_bus);
191 }
192 #ifdef CONFIG_HOTPLUG
193 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
194 #endif
195 
196 static void __init pcibios_claim_of_setup(void)
197 {
198 	struct pci_bus *b;
199 
200 	if (firmware_has_feature(FW_FEATURE_ISERIES))
201 		return;
202 
203 	list_for_each_entry(b, &pci_root_buses, node)
204 		pcibios_claim_one_bus(b);
205 }
206 
207 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
208 {
209 	const u32 *prop;
210 	int len;
211 
212 	prop = of_get_property(np, name, &len);
213 	if (prop && len >= 4)
214 		return *prop;
215 	return def;
216 }
217 
218 static unsigned int pci_parse_of_flags(u32 addr0)
219 {
220 	unsigned int flags = 0;
221 
222 	if (addr0 & 0x02000000) {
223 		flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
224 		flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
225 		flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
226 		if (addr0 & 0x40000000)
227 			flags |= IORESOURCE_PREFETCH
228 				 | PCI_BASE_ADDRESS_MEM_PREFETCH;
229 	} else if (addr0 & 0x01000000)
230 		flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
231 	return flags;
232 }
233 
234 
235 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
236 {
237 	u64 base, size;
238 	unsigned int flags;
239 	struct resource *res;
240 	const u32 *addrs;
241 	u32 i;
242 	int proplen;
243 
244 	addrs = of_get_property(node, "assigned-addresses", &proplen);
245 	if (!addrs)
246 		return;
247 	DBG("    parse addresses (%d bytes) @ %p\n", proplen, addrs);
248 	for (; proplen >= 20; proplen -= 20, addrs += 5) {
249 		flags = pci_parse_of_flags(addrs[0]);
250 		if (!flags)
251 			continue;
252 		base = of_read_number(&addrs[1], 2);
253 		size = of_read_number(&addrs[3], 2);
254 		if (!size)
255 			continue;
256 		i = addrs[0] & 0xff;
257 		DBG("  base: %llx, size: %llx, i: %x\n",
258 		    (unsigned long long)base, (unsigned long long)size, i);
259 
260 		if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
261 			res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
262 		} else if (i == dev->rom_base_reg) {
263 			res = &dev->resource[PCI_ROM_RESOURCE];
264 			flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
265 		} else {
266 			printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
267 			continue;
268 		}
269 		res->start = base;
270 		res->end = base + size - 1;
271 		res->flags = flags;
272 		res->name = pci_name(dev);
273 		fixup_resource(res, dev);
274 	}
275 }
276 
277 struct pci_dev *of_create_pci_dev(struct device_node *node,
278 				 struct pci_bus *bus, int devfn)
279 {
280 	struct pci_dev *dev;
281 	const char *type;
282 
283 	dev = alloc_pci_dev();
284 	if (!dev)
285 		return NULL;
286 	type = of_get_property(node, "device_type", NULL);
287 	if (type == NULL)
288 		type = "";
289 
290 	DBG("    create device, devfn: %x, type: %s\n", devfn, type);
291 
292 	dev->bus = bus;
293 	dev->sysdata = node;
294 	dev->dev.parent = bus->bridge;
295 	dev->dev.bus = &pci_bus_type;
296 	dev->devfn = devfn;
297 	dev->multifunction = 0;		/* maybe a lie? */
298 
299 	dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
300 	dev->device = get_int_prop(node, "device-id", 0xffff);
301 	dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
302 	dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
303 
304 	dev->cfg_size = pci_cfg_space_size(dev);
305 
306 	sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
307 		dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
308 	dev->class = get_int_prop(node, "class-code", 0);
309 	dev->revision = get_int_prop(node, "revision-id", 0);
310 
311 	DBG("    class: 0x%x\n", dev->class);
312 	DBG("    revision: 0x%x\n", dev->revision);
313 
314 	dev->current_state = 4;		/* unknown power state */
315 	dev->error_state = pci_channel_io_normal;
316 
317 	if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
318 		/* a PCI-PCI bridge */
319 		dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
320 		dev->rom_base_reg = PCI_ROM_ADDRESS1;
321 	} else if (!strcmp(type, "cardbus")) {
322 		dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
323 	} else {
324 		dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
325 		dev->rom_base_reg = PCI_ROM_ADDRESS;
326 		/* Maybe do a default OF mapping here */
327 		dev->irq = NO_IRQ;
328 	}
329 
330 	pci_parse_of_addrs(node, dev);
331 
332 	DBG("    adding to system ...\n");
333 
334 	pci_device_add(dev, bus);
335 
336 	return dev;
337 }
338 EXPORT_SYMBOL(of_create_pci_dev);
339 
340 void __devinit of_scan_bus(struct device_node *node,
341 				  struct pci_bus *bus)
342 {
343 	struct device_node *child = NULL;
344 	const u32 *reg;
345 	int reglen, devfn;
346 	struct pci_dev *dev;
347 
348 	DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
349 
350 	while ((child = of_get_next_child(node, child)) != NULL) {
351 		DBG("  * %s\n", child->full_name);
352 		reg = of_get_property(child, "reg", &reglen);
353 		if (reg == NULL || reglen < 20)
354 			continue;
355 		devfn = (reg[0] >> 8) & 0xff;
356 
357 		/* create a new pci_dev for this device */
358 		dev = of_create_pci_dev(child, bus, devfn);
359 		if (!dev)
360 			continue;
361 		DBG("dev header type: %x\n", dev->hdr_type);
362 
363 		if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
364 		    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
365 			of_scan_pci_bridge(child, dev);
366 	}
367 
368 	do_bus_setup(bus);
369 }
370 EXPORT_SYMBOL(of_scan_bus);
371 
372 void __devinit of_scan_pci_bridge(struct device_node *node,
373 			 	struct pci_dev *dev)
374 {
375 	struct pci_bus *bus;
376 	const u32 *busrange, *ranges;
377 	int len, i, mode;
378 	struct resource *res;
379 	unsigned int flags;
380 	u64 size;
381 
382 	DBG("of_scan_pci_bridge(%s)\n", node->full_name);
383 
384 	/* parse bus-range property */
385 	busrange = of_get_property(node, "bus-range", &len);
386 	if (busrange == NULL || len != 8) {
387 		printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
388 		       node->full_name);
389 		return;
390 	}
391 	ranges = of_get_property(node, "ranges", &len);
392 	if (ranges == NULL) {
393 		printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
394 		       node->full_name);
395 		return;
396 	}
397 
398 	bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
399 	if (!bus) {
400 		printk(KERN_ERR "Failed to create pci bus for %s\n",
401 		       node->full_name);
402 		return;
403 	}
404 
405 	bus->primary = dev->bus->number;
406 	bus->subordinate = busrange[1];
407 	bus->bridge_ctl = 0;
408 	bus->sysdata = node;
409 
410 	/* parse ranges property */
411 	/* PCI #address-cells == 3 and #size-cells == 2 always */
412 	res = &dev->resource[PCI_BRIDGE_RESOURCES];
413 	for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
414 		res->flags = 0;
415 		bus->resource[i] = res;
416 		++res;
417 	}
418 	i = 1;
419 	for (; len >= 32; len -= 32, ranges += 8) {
420 		flags = pci_parse_of_flags(ranges[0]);
421 		size = of_read_number(&ranges[6], 2);
422 		if (flags == 0 || size == 0)
423 			continue;
424 		if (flags & IORESOURCE_IO) {
425 			res = bus->resource[0];
426 			if (res->flags) {
427 				printk(KERN_ERR "PCI: ignoring extra I/O range"
428 				       " for bridge %s\n", node->full_name);
429 				continue;
430 			}
431 		} else {
432 			if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
433 				printk(KERN_ERR "PCI: too many memory ranges"
434 				       " for bridge %s\n", node->full_name);
435 				continue;
436 			}
437 			res = bus->resource[i];
438 			++i;
439 		}
440 		res->start = of_read_number(&ranges[1], 2);
441 		res->end = res->start + size - 1;
442 		res->flags = flags;
443 		fixup_resource(res, dev);
444 	}
445 	sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
446 		bus->number);
447 	DBG("    bus name: %s\n", bus->name);
448 
449 	mode = PCI_PROBE_NORMAL;
450 	if (ppc_md.pci_probe_mode)
451 		mode = ppc_md.pci_probe_mode(bus);
452 	DBG("    probe mode: %d\n", mode);
453 
454 	if (mode == PCI_PROBE_DEVTREE)
455 		of_scan_bus(node, bus);
456 	else if (mode == PCI_PROBE_NORMAL)
457 		pci_scan_child_bus(bus);
458 }
459 EXPORT_SYMBOL(of_scan_pci_bridge);
460 
461 void __devinit scan_phb(struct pci_controller *hose)
462 {
463 	struct pci_bus *bus;
464 	struct device_node *node = hose->arch_data;
465 	int i, mode;
466 	struct resource *res;
467 
468 	DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
469 
470 	bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
471 	if (bus == NULL) {
472 		printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
473 		       hose->global_number);
474 		return;
475 	}
476 	bus->secondary = hose->first_busno;
477 	hose->bus = bus;
478 
479 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
480 		pcibios_map_io_space(bus);
481 
482 	bus->resource[0] = res = &hose->io_resource;
483 	if (res->flags && request_resource(&ioport_resource, res)) {
484 		printk(KERN_ERR "Failed to request PCI IO region "
485 		       "on PCI domain %04x\n", hose->global_number);
486 		DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
487 		    res->start, res->end);
488 	}
489 
490 	for (i = 0; i < 3; ++i) {
491 		res = &hose->mem_resources[i];
492 		bus->resource[i+1] = res;
493 		if (res->flags && request_resource(&iomem_resource, res))
494 			printk(KERN_ERR "Failed to request PCI memory region "
495 			       "on PCI domain %04x\n", hose->global_number);
496 	}
497 
498 	mode = PCI_PROBE_NORMAL;
499 
500 	if (node && ppc_md.pci_probe_mode)
501 		mode = ppc_md.pci_probe_mode(bus);
502 	DBG("    probe mode: %d\n", mode);
503 	if (mode == PCI_PROBE_DEVTREE) {
504 		bus->subordinate = hose->last_busno;
505 		of_scan_bus(node, bus);
506 	}
507 
508 	if (mode == PCI_PROBE_NORMAL)
509 		hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
510 }
511 
512 static int __init pcibios_init(void)
513 {
514 	struct pci_controller *hose, *tmp;
515 
516 	/* For now, override phys_mem_access_prot. If we need it,
517 	 * later, we may move that initialization to each ppc_md
518 	 */
519 	ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
520 
521 	if (firmware_has_feature(FW_FEATURE_ISERIES))
522 		iSeries_pcibios_init();
523 
524 	printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
525 
526 	/* Scan all of the recorded PCI controllers.  */
527 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
528 		scan_phb(hose);
529 		pci_bus_add_devices(hose->bus);
530 	}
531 
532 	if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
533 		if (pci_probe_only)
534 			pcibios_claim_of_setup();
535 		else
536 			/* FIXME: `else' will be removed when
537 			   pci_assign_unassigned_resources() is able to work
538 			   correctly with [partially] allocated PCI tree. */
539 			pci_assign_unassigned_resources();
540 	}
541 
542 	/* Call machine dependent final fixup */
543 	if (ppc_md.pcibios_fixup)
544 		ppc_md.pcibios_fixup();
545 
546 	printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
547 
548 	return 0;
549 }
550 
551 subsys_initcall(pcibios_init);
552 
553 int pcibios_enable_device(struct pci_dev *dev, int mask)
554 {
555 	u16 cmd, oldcmd;
556 	int i;
557 
558 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
559 	oldcmd = cmd;
560 
561 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
562 		struct resource *res = &dev->resource[i];
563 
564 		/* Only set up the requested stuff */
565 		if (!(mask & (1<<i)))
566 			continue;
567 
568 		if (res->flags & IORESOURCE_IO)
569 			cmd |= PCI_COMMAND_IO;
570 		if (res->flags & IORESOURCE_MEM)
571 			cmd |= PCI_COMMAND_MEMORY;
572 	}
573 
574 	if (cmd != oldcmd) {
575 		printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
576 		       pci_name(dev), cmd);
577                 /* Enable the appropriate bits in the PCI command register.  */
578 		pci_write_config_word(dev, PCI_COMMAND, cmd);
579 	}
580 	return 0;
581 }
582 
583 /* Decide whether to display the domain number in /proc */
584 int pci_proc_domain(struct pci_bus *bus)
585 {
586 	if (firmware_has_feature(FW_FEATURE_ISERIES))
587 		return 0;
588 	else {
589 		struct pci_controller *hose = pci_bus_to_host(bus);
590 		return hose->buid;
591 	}
592 }
593 
594 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
595 					    struct device_node *dev, int prim)
596 {
597 	const unsigned int *ranges;
598 	unsigned int pci_space;
599 	unsigned long size;
600 	int rlen = 0;
601 	int memno = 0;
602 	struct resource *res;
603 	int np, na = of_n_addr_cells(dev);
604 	unsigned long pci_addr, cpu_phys_addr;
605 
606 	np = na + 5;
607 
608 	/* From "PCI Binding to 1275"
609 	 * The ranges property is laid out as an array of elements,
610 	 * each of which comprises:
611 	 *   cells 0 - 2:	a PCI address
612 	 *   cells 3 or 3+4:	a CPU physical address
613 	 *			(size depending on dev->n_addr_cells)
614 	 *   cells 4+5 or 5+6:	the size of the range
615 	 */
616 	ranges = of_get_property(dev, "ranges", &rlen);
617 	if (ranges == NULL)
618 		return;
619 	hose->io_base_phys = 0;
620 	while ((rlen -= np * sizeof(unsigned int)) >= 0) {
621 		res = NULL;
622 		pci_space = ranges[0];
623 		pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
624 		cpu_phys_addr = of_translate_address(dev, &ranges[3]);
625 		size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
626 		ranges += np;
627 		if (size == 0)
628 			continue;
629 
630 		/* Now consume following elements while they are contiguous */
631 		while (rlen >= np * sizeof(unsigned int)) {
632 			unsigned long addr, phys;
633 
634 			if (ranges[0] != pci_space)
635 				break;
636 			addr = ((unsigned long)ranges[1] << 32) | ranges[2];
637 			phys = ranges[3];
638 			if (na >= 2)
639 				phys = (phys << 32) | ranges[4];
640 			if (addr != pci_addr + size ||
641 			    phys != cpu_phys_addr + size)
642 				break;
643 
644 			size += ((unsigned long)ranges[na+3] << 32)
645 				| ranges[na+4];
646 			ranges += np;
647 			rlen -= np * sizeof(unsigned int);
648 		}
649 
650 		switch ((pci_space >> 24) & 0x3) {
651 		case 1:		/* I/O space */
652 			hose->io_base_phys = cpu_phys_addr - pci_addr;
653 			/* handle from 0 to top of I/O window */
654 			hose->pci_io_size = pci_addr + size;
655 
656 			res = &hose->io_resource;
657 			res->flags = IORESOURCE_IO;
658 			res->start = pci_addr;
659 			DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
660 				    res->start, res->start + size - 1);
661 			break;
662 		case 2:		/* memory space */
663 			memno = 0;
664 			while (memno < 3 && hose->mem_resources[memno].flags)
665 				++memno;
666 
667 			if (memno == 0)
668 				hose->pci_mem_offset = cpu_phys_addr - pci_addr;
669 			if (memno < 3) {
670 				res = &hose->mem_resources[memno];
671 				res->flags = IORESOURCE_MEM;
672 				res->start = cpu_phys_addr;
673 				DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
674 					    res->start, res->start + size - 1);
675 			}
676 			break;
677 		}
678 		if (res != NULL) {
679 			res->name = dev->full_name;
680 			res->end = res->start + size - 1;
681 			res->parent = NULL;
682 			res->sibling = NULL;
683 			res->child = NULL;
684 		}
685 	}
686 }
687 
688 #ifdef CONFIG_HOTPLUG
689 
690 int pcibios_unmap_io_space(struct pci_bus *bus)
691 {
692 	struct pci_controller *hose;
693 
694 	WARN_ON(bus == NULL);
695 
696 	/* If this is not a PHB, we only flush the hash table over
697 	 * the area mapped by this bridge. We don't play with the PTE
698 	 * mappings since we might have to deal with sub-page alignemnts
699 	 * so flushing the hash table is the only sane way to make sure
700 	 * that no hash entries are covering that removed bridge area
701 	 * while still allowing other busses overlapping those pages
702 	 */
703 	if (bus->self) {
704 		struct resource *res = bus->resource[0];
705 
706 		DBG("IO unmapping for PCI-PCI bridge %s\n",
707 		    pci_name(bus->self));
708 
709 		__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
710 					 res->end - res->start + 1);
711 		return 0;
712 	}
713 
714 	/* Get the host bridge */
715 	hose = pci_bus_to_host(bus);
716 
717 	/* Check if we have IOs allocated */
718 	if (hose->io_base_alloc == 0)
719 		return 0;
720 
721 	DBG("IO unmapping for PHB %s\n",
722 	    ((struct device_node *)hose->arch_data)->full_name);
723 	DBG("  alloc=0x%p\n", hose->io_base_alloc);
724 
725 	/* This is a PHB, we fully unmap the IO area */
726 	vunmap(hose->io_base_alloc);
727 
728 	return 0;
729 }
730 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
731 
732 #endif /* CONFIG_HOTPLUG */
733 
734 int __devinit pcibios_map_io_space(struct pci_bus *bus)
735 {
736 	struct vm_struct *area;
737 	unsigned long phys_page;
738 	unsigned long size_page;
739 	unsigned long io_virt_offset;
740 	struct pci_controller *hose;
741 
742 	WARN_ON(bus == NULL);
743 
744 	/* If this not a PHB, nothing to do, page tables still exist and
745 	 * thus HPTEs will be faulted in when needed
746 	 */
747 	if (bus->self) {
748 		DBG("IO mapping for PCI-PCI bridge %s\n",
749 		    pci_name(bus->self));
750 		DBG("  virt=0x%016lx...0x%016lx\n",
751 		    bus->resource[0]->start + _IO_BASE,
752 		    bus->resource[0]->end + _IO_BASE);
753 		return 0;
754 	}
755 
756 	/* Get the host bridge */
757 	hose = pci_bus_to_host(bus);
758 	phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
759 	size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
760 
761 	/* Make sure IO area address is clear */
762 	hose->io_base_alloc = NULL;
763 
764 	/* If there's no IO to map on that bus, get away too */
765 	if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
766 		return 0;
767 
768 	/* Let's allocate some IO space for that guy. We don't pass
769 	 * VM_IOREMAP because we don't care about alignment tricks that
770 	 * the core does in that case. Maybe we should due to stupid card
771 	 * with incomplete address decoding but I'd rather not deal with
772 	 * those outside of the reserved 64K legacy region.
773 	 */
774 	area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
775 	if (area == NULL)
776 		return -ENOMEM;
777 	hose->io_base_alloc = area->addr;
778 	hose->io_base_virt = (void __iomem *)(area->addr +
779 					      hose->io_base_phys - phys_page);
780 
781 	DBG("IO mapping for PHB %s\n",
782 	    ((struct device_node *)hose->arch_data)->full_name);
783 	DBG("  phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
784 	    hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
785 	DBG("  size=0x%016lx (alloc=0x%016lx)\n",
786 	    hose->pci_io_size, size_page);
787 
788 	/* Establish the mapping */
789 	if (__ioremap_at(phys_page, area->addr, size_page,
790 			 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
791 		return -ENOMEM;
792 
793 	/* Fixup hose IO resource */
794 	io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
795 	hose->io_resource.start += io_virt_offset;
796 	hose->io_resource.end += io_virt_offset;
797 
798 	DBG("  hose->io_resource=0x%016lx...0x%016lx\n",
799 	    hose->io_resource.start, hose->io_resource.end);
800 
801 	return 0;
802 }
803 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
804 
805 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
806 {
807 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
808 	unsigned long offset;
809 
810 	if (res->flags & IORESOURCE_IO) {
811 		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
812 		res->start += offset;
813 		res->end += offset;
814 	} else if (res->flags & IORESOURCE_MEM) {
815 		res->start += hose->pci_mem_offset;
816 		res->end += hose->pci_mem_offset;
817 	}
818 }
819 
820 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
821 					      struct pci_bus *bus)
822 {
823 	/* Update device resources.  */
824 	int i;
825 
826 	DBG("%s: Fixup resources:\n", pci_name(dev));
827 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
828 		struct resource *res = &dev->resource[i];
829 		if (!res->flags)
830 			continue;
831 
832 		DBG("  0x%02x < %08lx:0x%016lx...0x%016lx\n",
833 		    i, res->flags, res->start, res->end);
834 
835 		fixup_resource(res, dev);
836 
837 		DBG("       > %08lx:0x%016lx...0x%016lx\n",
838 		    res->flags, res->start, res->end);
839 	}
840 }
841 EXPORT_SYMBOL(pcibios_fixup_device_resources);
842 
843 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
844 {
845 	struct dev_archdata *sd = &dev->dev.archdata;
846 
847 	sd->of_node = pci_device_to_OF_node(dev);
848 
849 	DBG("PCI device %s OF node: %s\n", pci_name(dev),
850 	    sd->of_node ? sd->of_node->full_name : "<none>");
851 
852 	sd->dma_ops = pci_dma_ops;
853 #ifdef CONFIG_NUMA
854 	sd->numa_node = pcibus_to_node(dev->bus);
855 #else
856 	sd->numa_node = -1;
857 #endif
858 	if (ppc_md.pci_dma_dev_setup)
859 		ppc_md.pci_dma_dev_setup(dev);
860 }
861 EXPORT_SYMBOL(pcibios_setup_new_device);
862 
863 static void __devinit do_bus_setup(struct pci_bus *bus)
864 {
865 	struct pci_dev *dev;
866 
867 	if (ppc_md.pci_dma_bus_setup)
868 		ppc_md.pci_dma_bus_setup(bus);
869 
870 	list_for_each_entry(dev, &bus->devices, bus_list)
871 		pcibios_setup_new_device(dev);
872 
873 	/* Read default IRQs and fixup if necessary */
874 	list_for_each_entry(dev, &bus->devices, bus_list) {
875 		pci_read_irq_line(dev);
876 		if (ppc_md.pci_irq_fixup)
877 			ppc_md.pci_irq_fixup(dev);
878 	}
879 }
880 
881 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
882 {
883 	struct pci_dev *dev = bus->self;
884 	struct device_node *np;
885 
886 	np = pci_bus_to_OF_node(bus);
887 
888 	DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
889 
890 	if (dev && pci_probe_only &&
891 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
892 		/* This is a subordinate bridge */
893 
894 		pci_read_bridge_bases(bus);
895 		pcibios_fixup_device_resources(dev, bus);
896 	}
897 
898 	do_bus_setup(bus);
899 
900 	if (!pci_probe_only)
901 		return;
902 
903 	list_for_each_entry(dev, &bus->devices, bus_list)
904 		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
905 			pcibios_fixup_device_resources(dev, bus);
906 }
907 EXPORT_SYMBOL(pcibios_fixup_bus);
908 
909 unsigned long pci_address_to_pio(phys_addr_t address)
910 {
911 	struct pci_controller *hose, *tmp;
912 
913 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
914 		if (address >= hose->io_base_phys &&
915 		    address < (hose->io_base_phys + hose->pci_io_size)) {
916 			unsigned long base =
917 				(unsigned long)hose->io_base_virt - _IO_BASE;
918 			return base + (address - hose->io_base_phys);
919 		}
920 	}
921 	return (unsigned int)-1;
922 }
923 EXPORT_SYMBOL_GPL(pci_address_to_pio);
924 
925 
926 #define IOBASE_BRIDGE_NUMBER	0
927 #define IOBASE_MEMORY		1
928 #define IOBASE_IO		2
929 #define IOBASE_ISA_IO		3
930 #define IOBASE_ISA_MEM		4
931 
932 long sys_pciconfig_iobase(long which, unsigned long in_bus,
933 			  unsigned long in_devfn)
934 {
935 	struct pci_controller* hose;
936 	struct list_head *ln;
937 	struct pci_bus *bus = NULL;
938 	struct device_node *hose_node;
939 
940 	/* Argh ! Please forgive me for that hack, but that's the
941 	 * simplest way to get existing XFree to not lockup on some
942 	 * G5 machines... So when something asks for bus 0 io base
943 	 * (bus 0 is HT root), we return the AGP one instead.
944 	 */
945 	if (machine_is_compatible("MacRISC4"))
946 		if (in_bus == 0)
947 			in_bus = 0xf0;
948 
949 	/* That syscall isn't quite compatible with PCI domains, but it's
950 	 * used on pre-domains setup. We return the first match
951 	 */
952 
953 	for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
954 		bus = pci_bus_b(ln);
955 		if (in_bus >= bus->number && in_bus <= bus->subordinate)
956 			break;
957 		bus = NULL;
958 	}
959 	if (bus == NULL || bus->sysdata == NULL)
960 		return -ENODEV;
961 
962 	hose_node = (struct device_node *)bus->sysdata;
963 	hose = PCI_DN(hose_node)->phb;
964 
965 	switch (which) {
966 	case IOBASE_BRIDGE_NUMBER:
967 		return (long)hose->first_busno;
968 	case IOBASE_MEMORY:
969 		return (long)hose->pci_mem_offset;
970 	case IOBASE_IO:
971 		return (long)hose->io_base_phys;
972 	case IOBASE_ISA_IO:
973 		return (long)isa_io_base;
974 	case IOBASE_ISA_MEM:
975 		return -EINVAL;
976 	}
977 
978 	return -EOPNOTSUPP;
979 }
980 
981 #ifdef CONFIG_NUMA
982 int pcibus_to_node(struct pci_bus *bus)
983 {
984 	struct pci_controller *phb = pci_bus_to_host(bus);
985 	return phb->node;
986 }
987 EXPORT_SYMBOL(pcibus_to_node);
988 #endif
989