xref: /openbmc/linux/arch/powerpc/kernel/pci_64.c (revision 545e4006)
1 /*
2  * Port for PPC64 David Engebretsen, IBM Corp.
3  * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4  *
5  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6  *   Rework, based on alpha PCI code.
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #undef DEBUG
15 
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/mm.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
26 
27 #include <asm/processor.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h>
32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h>
34 
35 #ifdef DEBUG
36 #include <asm/udbg.h>
37 #define DBG(fmt...) printk(fmt)
38 #else
39 #define DBG(fmt...)
40 #endif
41 
42 unsigned long pci_probe_only = 1;
43 
44 /* pci_io_base -- the base address from which io bars are offsets.
45  * This is the lowest I/O base address (so bar values are always positive),
46  * and it *must* be the start of ISA space if an ISA bus exists because
47  * ISA drivers use hard coded offsets.  If no ISA bus exists nothing
48  * is mapped on the first 64K of IO space
49  */
50 unsigned long pci_io_base = ISA_IO_BASE;
51 EXPORT_SYMBOL(pci_io_base);
52 
53 LIST_HEAD(hose_list);
54 
55 static struct dma_mapping_ops *pci_dma_ops;
56 
57 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
58 {
59 	pci_dma_ops = dma_ops;
60 }
61 
62 struct dma_mapping_ops *get_pci_dma_ops(void)
63 {
64 	return pci_dma_ops;
65 }
66 EXPORT_SYMBOL(get_pci_dma_ops);
67 
68 
69 int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
70 {
71 	return dma_set_mask(&dev->dev, mask);
72 }
73 
74 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
75 {
76 	int rc;
77 
78 	rc = dma_set_mask(&dev->dev, mask);
79 	dev->dev.coherent_dma_mask = dev->dma_mask;
80 
81 	return rc;
82 }
83 
84 static void fixup_broken_pcnet32(struct pci_dev* dev)
85 {
86 	if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
87 		dev->vendor = PCI_VENDOR_ID_AMD;
88 		pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
89 	}
90 }
91 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
92 
93 
94 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
95 {
96 	const u32 *prop;
97 	int len;
98 
99 	prop = of_get_property(np, name, &len);
100 	if (prop && len >= 4)
101 		return *prop;
102 	return def;
103 }
104 
105 static unsigned int pci_parse_of_flags(u32 addr0)
106 {
107 	unsigned int flags = 0;
108 
109 	if (addr0 & 0x02000000) {
110 		flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
111 		flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
112 		flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
113 		if (addr0 & 0x40000000)
114 			flags |= IORESOURCE_PREFETCH
115 				 | PCI_BASE_ADDRESS_MEM_PREFETCH;
116 	} else if (addr0 & 0x01000000)
117 		flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
118 	return flags;
119 }
120 
121 
122 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
123 {
124 	u64 base, size;
125 	unsigned int flags;
126 	struct resource *res;
127 	const u32 *addrs;
128 	u32 i;
129 	int proplen;
130 
131 	addrs = of_get_property(node, "assigned-addresses", &proplen);
132 	if (!addrs)
133 		return;
134 	DBG("    parse addresses (%d bytes) @ %p\n", proplen, addrs);
135 	for (; proplen >= 20; proplen -= 20, addrs += 5) {
136 		flags = pci_parse_of_flags(addrs[0]);
137 		if (!flags)
138 			continue;
139 		base = of_read_number(&addrs[1], 2);
140 		size = of_read_number(&addrs[3], 2);
141 		if (!size)
142 			continue;
143 		i = addrs[0] & 0xff;
144 		DBG("  base: %llx, size: %llx, i: %x\n",
145 		    (unsigned long long)base, (unsigned long long)size, i);
146 
147 		if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
148 			res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
149 		} else if (i == dev->rom_base_reg) {
150 			res = &dev->resource[PCI_ROM_RESOURCE];
151 			flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
152 		} else {
153 			printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
154 			continue;
155 		}
156 		res->start = base;
157 		res->end = base + size - 1;
158 		res->flags = flags;
159 		res->name = pci_name(dev);
160 	}
161 }
162 
163 struct pci_dev *of_create_pci_dev(struct device_node *node,
164 				 struct pci_bus *bus, int devfn)
165 {
166 	struct pci_dev *dev;
167 	const char *type;
168 
169 	dev = alloc_pci_dev();
170 	if (!dev)
171 		return NULL;
172 	type = of_get_property(node, "device_type", NULL);
173 	if (type == NULL)
174 		type = "";
175 
176 	DBG("    create device, devfn: %x, type: %s\n", devfn, type);
177 
178 	dev->bus = bus;
179 	dev->sysdata = node;
180 	dev->dev.parent = bus->bridge;
181 	dev->dev.bus = &pci_bus_type;
182 	dev->devfn = devfn;
183 	dev->multifunction = 0;		/* maybe a lie? */
184 
185 	dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
186 	dev->device = get_int_prop(node, "device-id", 0xffff);
187 	dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
188 	dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
189 
190 	dev->cfg_size = pci_cfg_space_size(dev);
191 
192 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
193 		dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
194 	dev->class = get_int_prop(node, "class-code", 0);
195 	dev->revision = get_int_prop(node, "revision-id", 0);
196 
197 	DBG("    class: 0x%x\n", dev->class);
198 	DBG("    revision: 0x%x\n", dev->revision);
199 
200 	dev->current_state = 4;		/* unknown power state */
201 	dev->error_state = pci_channel_io_normal;
202 	dev->dma_mask = 0xffffffff;
203 
204 	if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
205 		/* a PCI-PCI bridge */
206 		dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
207 		dev->rom_base_reg = PCI_ROM_ADDRESS1;
208 	} else if (!strcmp(type, "cardbus")) {
209 		dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
210 	} else {
211 		dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
212 		dev->rom_base_reg = PCI_ROM_ADDRESS;
213 		/* Maybe do a default OF mapping here */
214 		dev->irq = NO_IRQ;
215 	}
216 
217 	pci_parse_of_addrs(node, dev);
218 
219 	DBG("    adding to system ...\n");
220 
221 	pci_device_add(dev, bus);
222 
223 	return dev;
224 }
225 EXPORT_SYMBOL(of_create_pci_dev);
226 
227 void __devinit of_scan_bus(struct device_node *node,
228 			   struct pci_bus *bus)
229 {
230 	struct device_node *child;
231 	const u32 *reg;
232 	int reglen, devfn;
233 	struct pci_dev *dev;
234 
235 	DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
236 
237 	/* Scan direct children */
238 	for_each_child_of_node(node, child) {
239 		DBG("  * %s\n", child->full_name);
240 		reg = of_get_property(child, "reg", &reglen);
241 		if (reg == NULL || reglen < 20)
242 			continue;
243 		devfn = (reg[0] >> 8) & 0xff;
244 
245 		/* create a new pci_dev for this device */
246 		dev = of_create_pci_dev(child, bus, devfn);
247 		if (!dev)
248 			continue;
249 		DBG("    dev header type: %x\n", dev->hdr_type);
250 	}
251 
252 	/* Ally all fixups */
253 	pcibios_fixup_of_probed_bus(bus);
254 
255 	/* Now scan child busses */
256 	list_for_each_entry(dev, &bus->devices, bus_list) {
257 		if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
258 		    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
259 			struct device_node *child = pci_device_to_OF_node(dev);
260 			if (dev)
261 				of_scan_pci_bridge(child, dev);
262 		}
263 	}
264 }
265 EXPORT_SYMBOL(of_scan_bus);
266 
267 void __devinit of_scan_pci_bridge(struct device_node *node,
268 				  struct pci_dev *dev)
269 {
270 	struct pci_bus *bus;
271 	const u32 *busrange, *ranges;
272 	int len, i, mode;
273 	struct resource *res;
274 	unsigned int flags;
275 	u64 size;
276 
277 	DBG("of_scan_pci_bridge(%s)\n", node->full_name);
278 
279 	/* parse bus-range property */
280 	busrange = of_get_property(node, "bus-range", &len);
281 	if (busrange == NULL || len != 8) {
282 		printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
283 		       node->full_name);
284 		return;
285 	}
286 	ranges = of_get_property(node, "ranges", &len);
287 	if (ranges == NULL) {
288 		printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
289 		       node->full_name);
290 		return;
291 	}
292 
293 	bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
294 	if (!bus) {
295 		printk(KERN_ERR "Failed to create pci bus for %s\n",
296 		       node->full_name);
297 		return;
298 	}
299 
300 	bus->primary = dev->bus->number;
301 	bus->subordinate = busrange[1];
302 	bus->bridge_ctl = 0;
303 	bus->sysdata = node;
304 
305 	/* parse ranges property */
306 	/* PCI #address-cells == 3 and #size-cells == 2 always */
307 	res = &dev->resource[PCI_BRIDGE_RESOURCES];
308 	for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
309 		res->flags = 0;
310 		bus->resource[i] = res;
311 		++res;
312 	}
313 	i = 1;
314 	for (; len >= 32; len -= 32, ranges += 8) {
315 		flags = pci_parse_of_flags(ranges[0]);
316 		size = of_read_number(&ranges[6], 2);
317 		if (flags == 0 || size == 0)
318 			continue;
319 		if (flags & IORESOURCE_IO) {
320 			res = bus->resource[0];
321 			if (res->flags) {
322 				printk(KERN_ERR "PCI: ignoring extra I/O range"
323 				       " for bridge %s\n", node->full_name);
324 				continue;
325 			}
326 		} else {
327 			if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
328 				printk(KERN_ERR "PCI: too many memory ranges"
329 				       " for bridge %s\n", node->full_name);
330 				continue;
331 			}
332 			res = bus->resource[i];
333 			++i;
334 		}
335 		res->start = of_read_number(&ranges[1], 2);
336 		res->end = res->start + size - 1;
337 		res->flags = flags;
338 	}
339 	sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
340 		bus->number);
341 	DBG("    bus name: %s\n", bus->name);
342 
343 	mode = PCI_PROBE_NORMAL;
344 	if (ppc_md.pci_probe_mode)
345 		mode = ppc_md.pci_probe_mode(bus);
346 	DBG("    probe mode: %d\n", mode);
347 
348 	if (mode == PCI_PROBE_DEVTREE)
349 		of_scan_bus(node, bus);
350 	else if (mode == PCI_PROBE_NORMAL)
351 		pci_scan_child_bus(bus);
352 }
353 EXPORT_SYMBOL(of_scan_pci_bridge);
354 
355 void __devinit scan_phb(struct pci_controller *hose)
356 {
357 	struct pci_bus *bus;
358 	struct device_node *node = hose->dn;
359 	int i, mode;
360 
361 	DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
362 
363 	/* Create an empty bus for the toplevel */
364 	bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
365 	if (bus == NULL) {
366 		printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
367 		       hose->global_number);
368 		return;
369 	}
370 	bus->secondary = hose->first_busno;
371 	hose->bus = bus;
372 
373 	/* Get some IO space for the new PHB */
374 	pcibios_map_io_space(bus);
375 
376 	/* Wire up PHB bus resources */
377 	DBG("PCI: PHB IO resource    = %016lx-%016lx [%lx]\n",
378 	    hose->io_resource.start, hose->io_resource.end,
379 	    hose->io_resource.flags);
380 	bus->resource[0] = &hose->io_resource;
381 	for (i = 0; i < 3; ++i) {
382 		DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
383 		    hose->mem_resources[i].start,
384 		    hose->mem_resources[i].end,
385 		    hose->mem_resources[i].flags);
386 		bus->resource[i+1] = &hose->mem_resources[i];
387 	}
388 	DBG("PCI: PHB MEM offset     = %016lx\n", hose->pci_mem_offset);
389 	DBG("PCI: PHB IO  offset     = %08lx\n",
390 	    (unsigned long)hose->io_base_virt - _IO_BASE);
391 
392 	/* Get probe mode and perform scan */
393 	mode = PCI_PROBE_NORMAL;
394 	if (node && ppc_md.pci_probe_mode)
395 		mode = ppc_md.pci_probe_mode(bus);
396 	DBG("    probe mode: %d\n", mode);
397 	if (mode == PCI_PROBE_DEVTREE) {
398 		bus->subordinate = hose->last_busno;
399 		of_scan_bus(node, bus);
400 	}
401 
402 	if (mode == PCI_PROBE_NORMAL)
403 		hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
404 }
405 
406 static int __init pcibios_init(void)
407 {
408 	struct pci_controller *hose, *tmp;
409 
410 	printk(KERN_INFO "PCI: Probing PCI hardware\n");
411 
412 	/* For now, override phys_mem_access_prot. If we need it,
413 	 * later, we may move that initialization to each ppc_md
414 	 */
415 	ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
416 
417 	if (pci_probe_only)
418 		ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
419 
420 	/* Scan all of the recorded PCI controllers.  */
421 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
422 		scan_phb(hose);
423 		pci_bus_add_devices(hose->bus);
424 	}
425 
426 	/* Call common code to handle resource allocation */
427 	pcibios_resource_survey();
428 
429 	printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
430 
431 	return 0;
432 }
433 
434 subsys_initcall(pcibios_init);
435 
436 #ifdef CONFIG_HOTPLUG
437 
438 int pcibios_unmap_io_space(struct pci_bus *bus)
439 {
440 	struct pci_controller *hose;
441 
442 	WARN_ON(bus == NULL);
443 
444 	/* If this is not a PHB, we only flush the hash table over
445 	 * the area mapped by this bridge. We don't play with the PTE
446 	 * mappings since we might have to deal with sub-page alignemnts
447 	 * so flushing the hash table is the only sane way to make sure
448 	 * that no hash entries are covering that removed bridge area
449 	 * while still allowing other busses overlapping those pages
450 	 */
451 	if (bus->self) {
452 		struct resource *res = bus->resource[0];
453 
454 		DBG("IO unmapping for PCI-PCI bridge %s\n",
455 		    pci_name(bus->self));
456 
457 		__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
458 					 res->end - res->start + 1);
459 		return 0;
460 	}
461 
462 	/* Get the host bridge */
463 	hose = pci_bus_to_host(bus);
464 
465 	/* Check if we have IOs allocated */
466 	if (hose->io_base_alloc == 0)
467 		return 0;
468 
469 	DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
470 	DBG("  alloc=0x%p\n", hose->io_base_alloc);
471 
472 	/* This is a PHB, we fully unmap the IO area */
473 	vunmap(hose->io_base_alloc);
474 
475 	return 0;
476 }
477 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
478 
479 #endif /* CONFIG_HOTPLUG */
480 
481 int __devinit pcibios_map_io_space(struct pci_bus *bus)
482 {
483 	struct vm_struct *area;
484 	unsigned long phys_page;
485 	unsigned long size_page;
486 	unsigned long io_virt_offset;
487 	struct pci_controller *hose;
488 
489 	WARN_ON(bus == NULL);
490 
491 	/* If this not a PHB, nothing to do, page tables still exist and
492 	 * thus HPTEs will be faulted in when needed
493 	 */
494 	if (bus->self) {
495 		DBG("IO mapping for PCI-PCI bridge %s\n",
496 		    pci_name(bus->self));
497 		DBG("  virt=0x%016lx...0x%016lx\n",
498 		    bus->resource[0]->start + _IO_BASE,
499 		    bus->resource[0]->end + _IO_BASE);
500 		return 0;
501 	}
502 
503 	/* Get the host bridge */
504 	hose = pci_bus_to_host(bus);
505 	phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
506 	size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
507 
508 	/* Make sure IO area address is clear */
509 	hose->io_base_alloc = NULL;
510 
511 	/* If there's no IO to map on that bus, get away too */
512 	if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
513 		return 0;
514 
515 	/* Let's allocate some IO space for that guy. We don't pass
516 	 * VM_IOREMAP because we don't care about alignment tricks that
517 	 * the core does in that case. Maybe we should due to stupid card
518 	 * with incomplete address decoding but I'd rather not deal with
519 	 * those outside of the reserved 64K legacy region.
520 	 */
521 	area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
522 	if (area == NULL)
523 		return -ENOMEM;
524 	hose->io_base_alloc = area->addr;
525 	hose->io_base_virt = (void __iomem *)(area->addr +
526 					      hose->io_base_phys - phys_page);
527 
528 	DBG("IO mapping for PHB %s\n", hose->dn->full_name);
529 	DBG("  phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
530 	    hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
531 	DBG("  size=0x%016lx (alloc=0x%016lx)\n",
532 	    hose->pci_io_size, size_page);
533 
534 	/* Establish the mapping */
535 	if (__ioremap_at(phys_page, area->addr, size_page,
536 			 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
537 		return -ENOMEM;
538 
539 	/* Fixup hose IO resource */
540 	io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
541 	hose->io_resource.start += io_virt_offset;
542 	hose->io_resource.end += io_virt_offset;
543 
544 	DBG("  hose->io_resource=0x%016lx...0x%016lx\n",
545 	    hose->io_resource.start, hose->io_resource.end);
546 
547 	return 0;
548 }
549 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
550 
551 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
552 {
553 	struct dev_archdata *sd = &dev->dev.archdata;
554 
555 	sd->of_node = pci_device_to_OF_node(dev);
556 
557 	DBG("PCI: device %s OF node: %s\n", pci_name(dev),
558 	    sd->of_node ? sd->of_node->full_name : "<none>");
559 
560 	sd->dma_ops = pci_dma_ops;
561 #ifdef CONFIG_NUMA
562 	sd->numa_node = pcibus_to_node(dev->bus);
563 #else
564 	sd->numa_node = -1;
565 #endif
566 	if (ppc_md.pci_dma_dev_setup)
567 		ppc_md.pci_dma_dev_setup(dev);
568 }
569 EXPORT_SYMBOL(pcibios_setup_new_device);
570 
571 void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
572 {
573 	struct pci_dev *dev;
574 
575 	if (ppc_md.pci_dma_bus_setup)
576 		ppc_md.pci_dma_bus_setup(bus);
577 
578 	list_for_each_entry(dev, &bus->devices, bus_list)
579 		pcibios_setup_new_device(dev);
580 }
581 
582 unsigned long pci_address_to_pio(phys_addr_t address)
583 {
584 	struct pci_controller *hose, *tmp;
585 
586 	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
587 		if (address >= hose->io_base_phys &&
588 		    address < (hose->io_base_phys + hose->pci_io_size)) {
589 			unsigned long base =
590 				(unsigned long)hose->io_base_virt - _IO_BASE;
591 			return base + (address - hose->io_base_phys);
592 		}
593 	}
594 	return (unsigned int)-1;
595 }
596 EXPORT_SYMBOL_GPL(pci_address_to_pio);
597 
598 
599 #define IOBASE_BRIDGE_NUMBER	0
600 #define IOBASE_MEMORY		1
601 #define IOBASE_IO		2
602 #define IOBASE_ISA_IO		3
603 #define IOBASE_ISA_MEM		4
604 
605 long sys_pciconfig_iobase(long which, unsigned long in_bus,
606 			  unsigned long in_devfn)
607 {
608 	struct pci_controller* hose;
609 	struct list_head *ln;
610 	struct pci_bus *bus = NULL;
611 	struct device_node *hose_node;
612 
613 	/* Argh ! Please forgive me for that hack, but that's the
614 	 * simplest way to get existing XFree to not lockup on some
615 	 * G5 machines... So when something asks for bus 0 io base
616 	 * (bus 0 is HT root), we return the AGP one instead.
617 	 */
618 	if (machine_is_compatible("MacRISC4"))
619 		if (in_bus == 0)
620 			in_bus = 0xf0;
621 
622 	/* That syscall isn't quite compatible with PCI domains, but it's
623 	 * used on pre-domains setup. We return the first match
624 	 */
625 
626 	for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
627 		bus = pci_bus_b(ln);
628 		if (in_bus >= bus->number && in_bus <= bus->subordinate)
629 			break;
630 		bus = NULL;
631 	}
632 	if (bus == NULL || bus->sysdata == NULL)
633 		return -ENODEV;
634 
635 	hose_node = (struct device_node *)bus->sysdata;
636 	hose = PCI_DN(hose_node)->phb;
637 
638 	switch (which) {
639 	case IOBASE_BRIDGE_NUMBER:
640 		return (long)hose->first_busno;
641 	case IOBASE_MEMORY:
642 		return (long)hose->pci_mem_offset;
643 	case IOBASE_IO:
644 		return (long)hose->io_base_phys;
645 	case IOBASE_ISA_IO:
646 		return (long)isa_io_base;
647 	case IOBASE_ISA_MEM:
648 		return -EINVAL;
649 	}
650 
651 	return -EOPNOTSUPP;
652 }
653 
654 #ifdef CONFIG_NUMA
655 int pcibus_to_node(struct pci_bus *bus)
656 {
657 	struct pci_controller *phb = pci_bus_to_host(bus);
658 	return phb->node;
659 }
660 EXPORT_SYMBOL(pcibus_to_node);
661 #endif
662