xref: /openbmc/linux/arch/powerpc/kernel/pci_32.c (revision b454cc66)
1 /*
2  * Common pmac/prep/chrp pci routines. -- Cort
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/pci.h>
7 #include <linux/delay.h>
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/capability.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
13 #include <linux/bootmem.h>
14 #include <linux/irq.h>
15 #include <linux/list.h>
16 
17 #include <asm/processor.h>
18 #include <asm/io.h>
19 #include <asm/prom.h>
20 #include <asm/sections.h>
21 #include <asm/pci-bridge.h>
22 #include <asm/byteorder.h>
23 #include <asm/uaccess.h>
24 #include <asm/machdep.h>
25 
26 #undef DEBUG
27 
28 #ifdef DEBUG
29 #define DBG(x...) printk(x)
30 #else
31 #define DBG(x...)
32 #endif
33 
34 unsigned long isa_io_base     = 0;
35 unsigned long isa_mem_base    = 0;
36 unsigned long pci_dram_offset = 0;
37 int pcibios_assign_bus_offset = 1;
38 
39 void pcibios_make_OF_bus_map(void);
40 
41 static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
42 static int probe_resource(struct pci_bus *parent, struct resource *pr,
43 			  struct resource *res, struct resource **conflict);
44 static void update_bridge_base(struct pci_bus *bus, int i);
45 static void pcibios_fixup_resources(struct pci_dev* dev);
46 static void fixup_broken_pcnet32(struct pci_dev* dev);
47 static int reparent_resources(struct resource *parent, struct resource *res);
48 static void fixup_cpc710_pci64(struct pci_dev* dev);
49 #ifdef CONFIG_PPC_OF
50 static u8* pci_to_OF_bus_map;
51 #endif
52 
53 /* By default, we don't re-assign bus numbers. We do this only on
54  * some pmacs
55  */
56 int pci_assign_all_buses;
57 
58 struct pci_controller* hose_head;
59 struct pci_controller** hose_tail = &hose_head;
60 
61 static int pci_bus_count;
62 
63 static void
64 fixup_broken_pcnet32(struct pci_dev* dev)
65 {
66 	if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
67 		dev->vendor = PCI_VENDOR_ID_AMD;
68 		pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
69 	}
70 }
71 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT,	PCI_ANY_ID,			fixup_broken_pcnet32);
72 
73 static void
74 fixup_cpc710_pci64(struct pci_dev* dev)
75 {
76 	/* Hide the PCI64 BARs from the kernel as their content doesn't
77 	 * fit well in the resource management
78 	 */
79 	dev->resource[0].start = dev->resource[0].end = 0;
80 	dev->resource[0].flags = 0;
81 	dev->resource[1].start = dev->resource[1].end = 0;
82 	dev->resource[1].flags = 0;
83 }
84 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CPC710_PCI64,	fixup_cpc710_pci64);
85 
86 static void
87 pcibios_fixup_resources(struct pci_dev *dev)
88 {
89 	struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
90 	int i;
91 	unsigned long offset;
92 
93 	if (!hose) {
94 		printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
95 		return;
96 	}
97 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
98 		struct resource *res = dev->resource + i;
99 		if (!res->flags)
100 			continue;
101 		if (res->end == 0xffffffff) {
102 			DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
103 			    pci_name(dev), i, (u64)res->start, (u64)res->end);
104 			res->end -= res->start;
105 			res->start = 0;
106 			res->flags |= IORESOURCE_UNSET;
107 			continue;
108 		}
109 		offset = 0;
110 		if (res->flags & IORESOURCE_MEM) {
111 			offset = hose->pci_mem_offset;
112 		} else if (res->flags & IORESOURCE_IO) {
113 			offset = (unsigned long) hose->io_base_virt
114 				- isa_io_base;
115 		}
116 		if (offset != 0) {
117 			res->start += offset;
118 			res->end += offset;
119 			DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
120 			    i, res->flags, pci_name(dev),
121 			    (u64)res->start - offset, (u64)res->start);
122 		}
123 	}
124 
125 	/* Call machine specific resource fixup */
126 	if (ppc_md.pcibios_fixup_resources)
127 		ppc_md.pcibios_fixup_resources(dev);
128 }
129 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID,		PCI_ANY_ID,			pcibios_fixup_resources);
130 
131 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
132 			struct resource *res)
133 {
134 	unsigned long offset = 0;
135 	struct pci_controller *hose = dev->sysdata;
136 
137 	if (hose && res->flags & IORESOURCE_IO)
138 		offset = (unsigned long)hose->io_base_virt - isa_io_base;
139 	else if (hose && res->flags & IORESOURCE_MEM)
140 		offset = hose->pci_mem_offset;
141 	region->start = res->start - offset;
142 	region->end = res->end - offset;
143 }
144 EXPORT_SYMBOL(pcibios_resource_to_bus);
145 
146 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
147 			     struct pci_bus_region *region)
148 {
149 	unsigned long offset = 0;
150 	struct pci_controller *hose = dev->sysdata;
151 
152 	if (hose && res->flags & IORESOURCE_IO)
153 		offset = (unsigned long)hose->io_base_virt - isa_io_base;
154 	else if (hose && res->flags & IORESOURCE_MEM)
155 		offset = hose->pci_mem_offset;
156 	res->start = region->start + offset;
157 	res->end = region->end + offset;
158 }
159 EXPORT_SYMBOL(pcibios_bus_to_resource);
160 
161 /*
162  * We need to avoid collisions with `mirrored' VGA ports
163  * and other strange ISA hardware, so we always want the
164  * addresses to be allocated in the 0x000-0x0ff region
165  * modulo 0x400.
166  *
167  * Why? Because some silly external IO cards only decode
168  * the low 10 bits of the IO address. The 0x00-0xff region
169  * is reserved for motherboard devices that decode all 16
170  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
171  * but we want to try to avoid allocating at 0x2900-0x2bff
172  * which might have be mirrored at 0x0100-0x03ff..
173  */
174 void pcibios_align_resource(void *data, struct resource *res,
175 				resource_size_t size, resource_size_t align)
176 {
177 	struct pci_dev *dev = data;
178 
179 	if (res->flags & IORESOURCE_IO) {
180 		resource_size_t start = res->start;
181 
182 		if (size > 0x100) {
183 			printk(KERN_ERR "PCI: I/O Region %s/%d too large"
184 			       " (%lld bytes)\n", pci_name(dev),
185 			       dev->resource - res, (unsigned long long)size);
186 		}
187 
188 		if (start & 0x300) {
189 			start = (start + 0x3ff) & ~0x3ff;
190 			res->start = start;
191 		}
192 	}
193 }
194 EXPORT_SYMBOL(pcibios_align_resource);
195 
196 /*
197  *  Handle resources of PCI devices.  If the world were perfect, we could
198  *  just allocate all the resource regions and do nothing more.  It isn't.
199  *  On the other hand, we cannot just re-allocate all devices, as it would
200  *  require us to know lots of host bridge internals.  So we attempt to
201  *  keep as much of the original configuration as possible, but tweak it
202  *  when it's found to be wrong.
203  *
204  *  Known BIOS problems we have to work around:
205  *	- I/O or memory regions not configured
206  *	- regions configured, but not enabled in the command register
207  *	- bogus I/O addresses above 64K used
208  *	- expansion ROMs left enabled (this may sound harmless, but given
209  *	  the fact the PCI specs explicitly allow address decoders to be
210  *	  shared between expansion ROMs and other resource regions, it's
211  *	  at least dangerous)
212  *
213  *  Our solution:
214  *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
215  *	    This gives us fixed barriers on where we can allocate.
216  *	(2) Allocate resources for all enabled devices.  If there is
217  *	    a collision, just mark the resource as unallocated. Also
218  *	    disable expansion ROMs during this step.
219  *	(3) Try to allocate resources for disabled devices.  If the
220  *	    resources were assigned correctly, everything goes well,
221  *	    if they weren't, they won't disturb allocation of other
222  *	    resources.
223  *	(4) Assign new addresses to resources which were either
224  *	    not configured at all or misconfigured.  If explicitly
225  *	    requested by the user, configure expansion ROM address
226  *	    as well.
227  */
228 
229 static void __init
230 pcibios_allocate_bus_resources(struct list_head *bus_list)
231 {
232 	struct pci_bus *bus;
233 	int i;
234 	struct resource *res, *pr;
235 
236 	/* Depth-First Search on bus tree */
237 	list_for_each_entry(bus, bus_list, node) {
238 		for (i = 0; i < 4; ++i) {
239 			if ((res = bus->resource[i]) == NULL || !res->flags
240 			    || res->start > res->end)
241 				continue;
242 			if (bus->parent == NULL)
243 				pr = (res->flags & IORESOURCE_IO)?
244 					&ioport_resource: &iomem_resource;
245 			else {
246 				pr = pci_find_parent_resource(bus->self, res);
247 				if (pr == res) {
248 					/* this happens when the generic PCI
249 					 * code (wrongly) decides that this
250 					 * bridge is transparent  -- paulus
251 					 */
252 					continue;
253 				}
254 			}
255 
256 			DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
257 			    (u64)res->start, (u64)res->end, res->flags, pr);
258 			if (pr) {
259 				if (request_resource(pr, res) == 0)
260 					continue;
261 				/*
262 				 * Must be a conflict with an existing entry.
263 				 * Move that entry (or entries) under the
264 				 * bridge resource and try again.
265 				 */
266 				if (reparent_resources(pr, res) == 0)
267 					continue;
268 			}
269 			printk(KERN_ERR "PCI: Cannot allocate resource region "
270 			       "%d of PCI bridge %d\n", i, bus->number);
271 			if (pci_relocate_bridge_resource(bus, i))
272 				bus->resource[i] = NULL;
273 		}
274 		pcibios_allocate_bus_resources(&bus->children);
275 	}
276 }
277 
278 /*
279  * Reparent resource children of pr that conflict with res
280  * under res, and make res replace those children.
281  */
282 static int __init
283 reparent_resources(struct resource *parent, struct resource *res)
284 {
285 	struct resource *p, **pp;
286 	struct resource **firstpp = NULL;
287 
288 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
289 		if (p->end < res->start)
290 			continue;
291 		if (res->end < p->start)
292 			break;
293 		if (p->start < res->start || p->end > res->end)
294 			return -1;	/* not completely contained */
295 		if (firstpp == NULL)
296 			firstpp = pp;
297 	}
298 	if (firstpp == NULL)
299 		return -1;	/* didn't find any conflicting entries? */
300 	res->parent = parent;
301 	res->child = *firstpp;
302 	res->sibling = *pp;
303 	*firstpp = res;
304 	*pp = NULL;
305 	for (p = res->child; p != NULL; p = p->sibling) {
306 		p->parent = res;
307 		DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
308 		    p->name, (u64)p->start, (u64)p->end, res->name);
309 	}
310 	return 0;
311 }
312 
313 /*
314  * A bridge has been allocated a range which is outside the range
315  * of its parent bridge, so it needs to be moved.
316  */
317 static int __init
318 pci_relocate_bridge_resource(struct pci_bus *bus, int i)
319 {
320 	struct resource *res, *pr, *conflict;
321 	unsigned long try, size;
322 	int j;
323 	struct pci_bus *parent = bus->parent;
324 
325 	if (parent == NULL) {
326 		/* shouldn't ever happen */
327 		printk(KERN_ERR "PCI: can't move host bridge resource\n");
328 		return -1;
329 	}
330 	res = bus->resource[i];
331 	if (res == NULL)
332 		return -1;
333 	pr = NULL;
334 	for (j = 0; j < 4; j++) {
335 		struct resource *r = parent->resource[j];
336 		if (!r)
337 			continue;
338 		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
339 			continue;
340 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
341 			pr = r;
342 			break;
343 		}
344 		if (res->flags & IORESOURCE_PREFETCH)
345 			pr = r;
346 	}
347 	if (pr == NULL)
348 		return -1;
349 	size = res->end - res->start;
350 	if (pr->start > pr->end || size > pr->end - pr->start)
351 		return -1;
352 	try = pr->end;
353 	for (;;) {
354 		res->start = try - size;
355 		res->end = try;
356 		if (probe_resource(bus->parent, pr, res, &conflict) == 0)
357 			break;
358 		if (conflict->start <= pr->start + size)
359 			return -1;
360 		try = conflict->start - 1;
361 	}
362 	if (request_resource(pr, res)) {
363 		DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
364 		    (u64)res->start, (u64)res->end);
365 		return -1;		/* "can't happen" */
366 	}
367 	update_bridge_base(bus, i);
368 	printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
369 	       bus->number, i, (unsigned long long)res->start,
370 	       (unsigned long long)res->end);
371 	return 0;
372 }
373 
374 static int __init
375 probe_resource(struct pci_bus *parent, struct resource *pr,
376 	       struct resource *res, struct resource **conflict)
377 {
378 	struct pci_bus *bus;
379 	struct pci_dev *dev;
380 	struct resource *r;
381 	int i;
382 
383 	for (r = pr->child; r != NULL; r = r->sibling) {
384 		if (r->end >= res->start && res->end >= r->start) {
385 			*conflict = r;
386 			return 1;
387 		}
388 	}
389 	list_for_each_entry(bus, &parent->children, node) {
390 		for (i = 0; i < 4; ++i) {
391 			if ((r = bus->resource[i]) == NULL)
392 				continue;
393 			if (!r->flags || r->start > r->end || r == res)
394 				continue;
395 			if (pci_find_parent_resource(bus->self, r) != pr)
396 				continue;
397 			if (r->end >= res->start && res->end >= r->start) {
398 				*conflict = r;
399 				return 1;
400 			}
401 		}
402 	}
403 	list_for_each_entry(dev, &parent->devices, bus_list) {
404 		for (i = 0; i < 6; ++i) {
405 			r = &dev->resource[i];
406 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
407 				continue;
408 			if (pci_find_parent_resource(dev, r) != pr)
409 				continue;
410 			if (r->end >= res->start && res->end >= r->start) {
411 				*conflict = r;
412 				return 1;
413 			}
414 		}
415 	}
416 	return 0;
417 }
418 
419 static void __init
420 update_bridge_base(struct pci_bus *bus, int i)
421 {
422 	struct resource *res = bus->resource[i];
423 	u8 io_base_lo, io_limit_lo;
424 	u16 mem_base, mem_limit;
425 	u16 cmd;
426 	unsigned long start, end, off;
427 	struct pci_dev *dev = bus->self;
428 	struct pci_controller *hose = dev->sysdata;
429 
430 	if (!hose) {
431 		printk("update_bridge_base: no hose?\n");
432 		return;
433 	}
434 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
435 	pci_write_config_word(dev, PCI_COMMAND,
436 			      cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
437 	if (res->flags & IORESOURCE_IO) {
438 		off = (unsigned long) hose->io_base_virt - isa_io_base;
439 		start = res->start - off;
440 		end = res->end - off;
441 		io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
442 		io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
443 		if (end > 0xffff)
444 			io_base_lo |= PCI_IO_RANGE_TYPE_32;
445 		else
446 			io_base_lo |= PCI_IO_RANGE_TYPE_16;
447 		pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
448 				start >> 16);
449 		pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
450 				end >> 16);
451 		pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
452 		pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
453 
454 	} else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
455 		   == IORESOURCE_MEM) {
456 		off = hose->pci_mem_offset;
457 		mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
458 		mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
459 		pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
460 		pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
461 
462 	} else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
463 		   == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
464 		off = hose->pci_mem_offset;
465 		mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
466 		mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
467 		pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
468 		pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
469 
470 	} else {
471 		DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
472 		    pci_name(dev), i, res->flags);
473 	}
474 	pci_write_config_word(dev, PCI_COMMAND, cmd);
475 }
476 
477 static inline void alloc_resource(struct pci_dev *dev, int idx)
478 {
479 	struct resource *pr, *r = &dev->resource[idx];
480 
481 	DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
482 	    pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
483 	pr = pci_find_parent_resource(dev, r);
484 	if (!pr || request_resource(pr, r) < 0) {
485 		printk(KERN_ERR "PCI: Cannot allocate resource region %d"
486 		       " of device %s\n", idx, pci_name(dev));
487 		if (pr)
488 			DBG("PCI:  parent is %p: %016llx-%016llx (f=%lx)\n",
489 			    pr, (u64)pr->start, (u64)pr->end, pr->flags);
490 		/* We'll assign a new address later */
491 		r->flags |= IORESOURCE_UNSET;
492 		r->end -= r->start;
493 		r->start = 0;
494 	}
495 }
496 
497 static void __init
498 pcibios_allocate_resources(int pass)
499 {
500 	struct pci_dev *dev = NULL;
501 	int idx, disabled;
502 	u16 command;
503 	struct resource *r;
504 
505 	for_each_pci_dev(dev) {
506 		pci_read_config_word(dev, PCI_COMMAND, &command);
507 		for (idx = 0; idx < 6; idx++) {
508 			r = &dev->resource[idx];
509 			if (r->parent)		/* Already allocated */
510 				continue;
511 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
512 				continue;	/* Not assigned at all */
513 			if (r->flags & IORESOURCE_IO)
514 				disabled = !(command & PCI_COMMAND_IO);
515 			else
516 				disabled = !(command & PCI_COMMAND_MEMORY);
517 			if (pass == disabled)
518 				alloc_resource(dev, idx);
519 		}
520 		if (pass)
521 			continue;
522 		r = &dev->resource[PCI_ROM_RESOURCE];
523 		if (r->flags & IORESOURCE_ROM_ENABLE) {
524 			/* Turn the ROM off, leave the resource region, but keep it unregistered. */
525 			u32 reg;
526 			DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
527 			r->flags &= ~IORESOURCE_ROM_ENABLE;
528 			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
529 			pci_write_config_dword(dev, dev->rom_base_reg,
530 					       reg & ~PCI_ROM_ADDRESS_ENABLE);
531 		}
532 	}
533 }
534 
535 static void __init
536 pcibios_assign_resources(void)
537 {
538 	struct pci_dev *dev = NULL;
539 	int idx;
540 	struct resource *r;
541 
542 	for_each_pci_dev(dev) {
543 		int class = dev->class >> 8;
544 
545 		/* Don't touch classless devices and host bridges */
546 		if (!class || class == PCI_CLASS_BRIDGE_HOST)
547 			continue;
548 
549 		for (idx = 0; idx < 6; idx++) {
550 			r = &dev->resource[idx];
551 
552 			/*
553 			 * We shall assign a new address to this resource,
554 			 * either because the BIOS (sic) forgot to do so
555 			 * or because we have decided the old address was
556 			 * unusable for some reason.
557 			 */
558 			if ((r->flags & IORESOURCE_UNSET) && r->end &&
559 			    (!ppc_md.pcibios_enable_device_hook ||
560 			     !ppc_md.pcibios_enable_device_hook(dev, 1))) {
561 				r->flags &= ~IORESOURCE_UNSET;
562 				pci_assign_resource(dev, idx);
563 			}
564 		}
565 
566 #if 0 /* don't assign ROMs */
567 		r = &dev->resource[PCI_ROM_RESOURCE];
568 		r->end -= r->start;
569 		r->start = 0;
570 		if (r->end)
571 			pci_assign_resource(dev, PCI_ROM_RESOURCE);
572 #endif
573 	}
574 }
575 
576 
577 int
578 pcibios_enable_resources(struct pci_dev *dev, int mask)
579 {
580 	u16 cmd, old_cmd;
581 	int idx;
582 	struct resource *r;
583 
584 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
585 	old_cmd = cmd;
586 	for (idx=0; idx<6; idx++) {
587 		/* Only set up the requested stuff */
588 		if (!(mask & (1<<idx)))
589 			continue;
590 
591 		r = &dev->resource[idx];
592 		if (r->flags & IORESOURCE_UNSET) {
593 			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
594 			return -EINVAL;
595 		}
596 		if (r->flags & IORESOURCE_IO)
597 			cmd |= PCI_COMMAND_IO;
598 		if (r->flags & IORESOURCE_MEM)
599 			cmd |= PCI_COMMAND_MEMORY;
600 	}
601 	if (dev->resource[PCI_ROM_RESOURCE].start)
602 		cmd |= PCI_COMMAND_MEMORY;
603 	if (cmd != old_cmd) {
604 		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
605 		pci_write_config_word(dev, PCI_COMMAND, cmd);
606 	}
607 	return 0;
608 }
609 
610 static int next_controller_index;
611 
612 struct pci_controller * __init
613 pcibios_alloc_controller(void)
614 {
615 	struct pci_controller *hose;
616 
617 	hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
618 	memset(hose, 0, sizeof(struct pci_controller));
619 
620 	*hose_tail = hose;
621 	hose_tail = &hose->next;
622 
623 	hose->index = next_controller_index++;
624 
625 	return hose;
626 }
627 
628 #ifdef CONFIG_PPC_OF
629 /*
630  * Functions below are used on OpenFirmware machines.
631  */
632 static void
633 make_one_node_map(struct device_node* node, u8 pci_bus)
634 {
635 	const int *bus_range;
636 	int len;
637 
638 	if (pci_bus >= pci_bus_count)
639 		return;
640 	bus_range = get_property(node, "bus-range", &len);
641 	if (bus_range == NULL || len < 2 * sizeof(int)) {
642 		printk(KERN_WARNING "Can't get bus-range for %s, "
643 		       "assuming it starts at 0\n", node->full_name);
644 		pci_to_OF_bus_map[pci_bus] = 0;
645 	} else
646 		pci_to_OF_bus_map[pci_bus] = bus_range[0];
647 
648 	for (node=node->child; node != 0;node = node->sibling) {
649 		struct pci_dev* dev;
650 		const unsigned int *class_code, *reg;
651 
652 		class_code = get_property(node, "class-code", NULL);
653 		if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
654 			(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
655 			continue;
656 		reg = get_property(node, "reg", NULL);
657 		if (!reg)
658 			continue;
659 		dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
660 		if (!dev || !dev->subordinate)
661 			continue;
662 		make_one_node_map(node, dev->subordinate->number);
663 	}
664 }
665 
666 void
667 pcibios_make_OF_bus_map(void)
668 {
669 	int i;
670 	struct pci_controller* hose;
671 	struct property *map_prop;
672 
673 	pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
674 	if (!pci_to_OF_bus_map) {
675 		printk(KERN_ERR "Can't allocate OF bus map !\n");
676 		return;
677 	}
678 
679 	/* We fill the bus map with invalid values, that helps
680 	 * debugging.
681 	 */
682 	for (i=0; i<pci_bus_count; i++)
683 		pci_to_OF_bus_map[i] = 0xff;
684 
685 	/* For each hose, we begin searching bridges */
686 	for(hose=hose_head; hose; hose=hose->next) {
687 		struct device_node* node;
688 		node = (struct device_node *)hose->arch_data;
689 		if (!node)
690 			continue;
691 		make_one_node_map(node, hose->first_busno);
692 	}
693 	map_prop = of_find_property(find_path_device("/"),
694 			"pci-OF-bus-map", NULL);
695 	if (map_prop) {
696 		BUG_ON(pci_bus_count > map_prop->length);
697 		memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
698 	}
699 #ifdef DEBUG
700 	printk("PCI->OF bus map:\n");
701 	for (i=0; i<pci_bus_count; i++) {
702 		if (pci_to_OF_bus_map[i] == 0xff)
703 			continue;
704 		printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
705 	}
706 #endif
707 }
708 
709 typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
710 
711 static struct device_node*
712 scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
713 {
714 	struct device_node* sub_node;
715 
716 	for (; node != 0;node = node->sibling) {
717 		const unsigned int *class_code;
718 
719 		if (filter(node, data))
720 			return node;
721 
722 		/* For PCI<->PCI bridges or CardBus bridges, we go down
723 		 * Note: some OFs create a parent node "multifunc-device" as
724 		 * a fake root for all functions of a multi-function device,
725 		 * we go down them as well.
726 		 */
727 		class_code = get_property(node, "class-code", NULL);
728 		if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
729 			(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
730 			strcmp(node->name, "multifunc-device"))
731 			continue;
732 		sub_node = scan_OF_pci_childs(node->child, filter, data);
733 		if (sub_node)
734 			return sub_node;
735 	}
736 	return NULL;
737 }
738 
739 static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
740 					       unsigned int devfn)
741 {
742 	struct device_node *np = NULL;
743 	const u32 *reg;
744 	unsigned int psize;
745 
746 	while ((np = of_get_next_child(parent, np)) != NULL) {
747 		reg = get_property(np, "reg", &psize);
748 		if (reg == NULL || psize < 4)
749 			continue;
750 		if (((reg[0] >> 8) & 0xff) == devfn)
751 			return np;
752 	}
753 	return NULL;
754 }
755 
756 
757 static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
758 {
759 	struct device_node *parent, *np;
760 
761 	/* Are we a root bus ? */
762 	if (bus->self == NULL || bus->parent == NULL) {
763 		struct pci_controller *hose = pci_bus_to_hose(bus->number);
764 		if (hose == NULL)
765 			return NULL;
766 		return of_node_get(hose->arch_data);
767 	}
768 
769 	/* not a root bus, we need to get our parent */
770 	parent = scan_OF_for_pci_bus(bus->parent);
771 	if (parent == NULL)
772 		return NULL;
773 
774 	/* now iterate for children for a match */
775 	np = scan_OF_for_pci_dev(parent, bus->self->devfn);
776 	of_node_put(parent);
777 
778 	return np;
779 }
780 
781 /*
782  * Scans the OF tree for a device node matching a PCI device
783  */
784 struct device_node *
785 pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
786 {
787 	struct device_node *parent, *np;
788 
789 	if (!have_of)
790 		return NULL;
791 
792 	DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
793 	parent = scan_OF_for_pci_bus(bus);
794 	if (parent == NULL)
795 		return NULL;
796 	DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
797 	np = scan_OF_for_pci_dev(parent, devfn);
798 	of_node_put(parent);
799 	DBG(" result is %s\n", np ? np->full_name : "<NULL>");
800 
801 	/* XXX most callers don't release the returned node
802 	 * mostly because ppc64 doesn't increase the refcount,
803 	 * we need to fix that.
804 	 */
805 	return np;
806 }
807 EXPORT_SYMBOL(pci_busdev_to_OF_node);
808 
809 struct device_node*
810 pci_device_to_OF_node(struct pci_dev *dev)
811 {
812 	return pci_busdev_to_OF_node(dev->bus, dev->devfn);
813 }
814 EXPORT_SYMBOL(pci_device_to_OF_node);
815 
816 /* This routine is meant to be used early during boot, when the
817  * PCI bus numbers have not yet been assigned, and you need to
818  * issue PCI config cycles to an OF device.
819  * It could also be used to "fix" RTAS config cycles if you want
820  * to set pci_assign_all_buses to 1 and still use RTAS for PCI
821  * config cycles.
822  */
823 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
824 {
825 	if (!have_of)
826 		return NULL;
827 	while(node) {
828 		struct pci_controller* hose;
829 		for (hose=hose_head;hose;hose=hose->next)
830 			if (hose->arch_data == node)
831 				return hose;
832 		node=node->parent;
833 	}
834 	return NULL;
835 }
836 
837 static int
838 find_OF_pci_device_filter(struct device_node* node, void* data)
839 {
840 	return ((void *)node == data);
841 }
842 
843 /*
844  * Returns the PCI device matching a given OF node
845  */
846 int
847 pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
848 {
849 	const unsigned int *reg;
850 	struct pci_controller* hose;
851 	struct pci_dev* dev = NULL;
852 
853 	if (!have_of)
854 		return -ENODEV;
855 	/* Make sure it's really a PCI device */
856 	hose = pci_find_hose_for_OF_device(node);
857 	if (!hose || !hose->arch_data)
858 		return -ENODEV;
859 	if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
860 			find_OF_pci_device_filter, (void *)node))
861 		return -ENODEV;
862 	reg = get_property(node, "reg", NULL);
863 	if (!reg)
864 		return -ENODEV;
865 	*bus = (reg[0] >> 16) & 0xff;
866 	*devfn = ((reg[0] >> 8) & 0xff);
867 
868 	/* Ok, here we need some tweak. If we have already renumbered
869 	 * all busses, we can't rely on the OF bus number any more.
870 	 * the pci_to_OF_bus_map is not enough as several PCI busses
871 	 * may match the same OF bus number.
872 	 */
873 	if (!pci_to_OF_bus_map)
874 		return 0;
875 
876 	for_each_pci_dev(dev)
877 		if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
878 				dev->devfn == *devfn) {
879 			*bus = dev->bus->number;
880 			pci_dev_put(dev);
881 			return 0;
882 		}
883 
884 	return -ENODEV;
885 }
886 EXPORT_SYMBOL(pci_device_from_OF_node);
887 
888 void __init
889 pci_process_bridge_OF_ranges(struct pci_controller *hose,
890 			   struct device_node *dev, int primary)
891 {
892 	static unsigned int static_lc_ranges[256] __initdata;
893 	const unsigned int *dt_ranges;
894 	unsigned int *lc_ranges, *ranges, *prev, size;
895 	int rlen = 0, orig_rlen;
896 	int memno = 0;
897 	struct resource *res;
898 	int np, na = prom_n_addr_cells(dev);
899 	np = na + 5;
900 
901 	/* First we try to merge ranges to fix a problem with some pmacs
902 	 * that can have more than 3 ranges, fortunately using contiguous
903 	 * addresses -- BenH
904 	 */
905 	dt_ranges = get_property(dev, "ranges", &rlen);
906 	if (!dt_ranges)
907 		return;
908 	/* Sanity check, though hopefully that never happens */
909 	if (rlen > sizeof(static_lc_ranges)) {
910 		printk(KERN_WARNING "OF ranges property too large !\n");
911 		rlen = sizeof(static_lc_ranges);
912 	}
913 	lc_ranges = static_lc_ranges;
914 	memcpy(lc_ranges, dt_ranges, rlen);
915 	orig_rlen = rlen;
916 
917 	/* Let's work on a copy of the "ranges" property instead of damaging
918 	 * the device-tree image in memory
919 	 */
920 	ranges = lc_ranges;
921 	prev = NULL;
922 	while ((rlen -= np * sizeof(unsigned int)) >= 0) {
923 		if (prev) {
924 			if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
925 				(prev[2] + prev[na+4]) == ranges[2] &&
926 				(prev[na+2] + prev[na+4]) == ranges[na+2]) {
927 				prev[na+4] += ranges[na+4];
928 				ranges[0] = 0;
929 				ranges += np;
930 				continue;
931 			}
932 		}
933 		prev = ranges;
934 		ranges += np;
935 	}
936 
937 	/*
938 	 * The ranges property is laid out as an array of elements,
939 	 * each of which comprises:
940 	 *   cells 0 - 2:	a PCI address
941 	 *   cells 3 or 3+4:	a CPU physical address
942 	 *			(size depending on dev->n_addr_cells)
943 	 *   cells 4+5 or 5+6:	the size of the range
944 	 */
945 	ranges = lc_ranges;
946 	rlen = orig_rlen;
947 	while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
948 		res = NULL;
949 		size = ranges[na+4];
950 		switch ((ranges[0] >> 24) & 0x3) {
951 		case 1:		/* I/O space */
952 			if (ranges[2] != 0)
953 				break;
954 			hose->io_base_phys = ranges[na+2];
955 			/* limit I/O space to 16MB */
956 			if (size > 0x01000000)
957 				size = 0x01000000;
958 			hose->io_base_virt = ioremap(ranges[na+2], size);
959 			if (primary)
960 				isa_io_base = (unsigned long) hose->io_base_virt;
961 			res = &hose->io_resource;
962 			res->flags = IORESOURCE_IO;
963 			res->start = ranges[2];
964 			DBG("PCI: IO 0x%llx -> 0x%llx\n",
965 			    (u64)res->start, (u64)res->start + size - 1);
966 			break;
967 		case 2:		/* memory space */
968 			memno = 0;
969 			if (ranges[1] == 0 && ranges[2] == 0
970 			    && ranges[na+4] <= (16 << 20)) {
971 				/* 1st 16MB, i.e. ISA memory area */
972 				if (primary)
973 					isa_mem_base = ranges[na+2];
974 				memno = 1;
975 			}
976 			while (memno < 3 && hose->mem_resources[memno].flags)
977 				++memno;
978 			if (memno == 0)
979 				hose->pci_mem_offset = ranges[na+2] - ranges[2];
980 			if (memno < 3) {
981 				res = &hose->mem_resources[memno];
982 				res->flags = IORESOURCE_MEM;
983 				if(ranges[0] & 0x40000000)
984 					res->flags |= IORESOURCE_PREFETCH;
985 				res->start = ranges[na+2];
986 				DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
987 				    (u64)res->start, (u64)res->start + size - 1);
988 			}
989 			break;
990 		}
991 		if (res != NULL) {
992 			res->name = dev->full_name;
993 			res->end = res->start + size - 1;
994 			res->parent = NULL;
995 			res->sibling = NULL;
996 			res->child = NULL;
997 		}
998 		ranges += np;
999 	}
1000 }
1001 
1002 /* We create the "pci-OF-bus-map" property now so it appears in the
1003  * /proc device tree
1004  */
1005 void __init
1006 pci_create_OF_bus_map(void)
1007 {
1008 	struct property* of_prop;
1009 
1010 	of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
1011 	if (of_prop && find_path_device("/")) {
1012 		memset(of_prop, -1, sizeof(struct property) + 256);
1013 		of_prop->name = "pci-OF-bus-map";
1014 		of_prop->length = 256;
1015 		of_prop->value = (unsigned char *)&of_prop[1];
1016 		prom_add_property(find_path_device("/"), of_prop);
1017 	}
1018 }
1019 
1020 static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1021 {
1022 	struct pci_dev *pdev;
1023 	struct device_node *np;
1024 
1025 	pdev = to_pci_dev (dev);
1026 	np = pci_device_to_OF_node(pdev);
1027 	if (np == NULL || np->full_name == NULL)
1028 		return 0;
1029 	return sprintf(buf, "%s", np->full_name);
1030 }
1031 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1032 
1033 #else /* CONFIG_PPC_OF */
1034 void pcibios_make_OF_bus_map(void)
1035 {
1036 }
1037 #endif /* CONFIG_PPC_OF */
1038 
1039 /* Add sysfs properties */
1040 void pcibios_add_platform_entries(struct pci_dev *pdev)
1041 {
1042 #ifdef CONFIG_PPC_OF
1043 	device_create_file(&pdev->dev, &dev_attr_devspec);
1044 #endif /* CONFIG_PPC_OF */
1045 }
1046 
1047 
1048 #ifdef CONFIG_PPC_PMAC
1049 /*
1050  * This set of routines checks for PCI<->PCI bridges that have closed
1051  * IO resources and have child devices. It tries to re-open an IO
1052  * window on them.
1053  *
1054  * This is a _temporary_ fix to workaround a problem with Apple's OF
1055  * closing IO windows on P2P bridges when the OF drivers of cards
1056  * below this bridge don't claim any IO range (typically ATI or
1057  * Adaptec).
1058  *
1059  * A more complete fix would be to use drivers/pci/setup-bus.c, which
1060  * involves a working pcibios_fixup_pbus_ranges(), some more care about
1061  * ordering when creating the host bus resources, and maybe a few more
1062  * minor tweaks
1063  */
1064 
1065 /* Initialize bridges with base/limit values we have collected */
1066 static void __init
1067 do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
1068 {
1069 	struct pci_dev *bridge = bus->self;
1070 	struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
1071 	u32 l;
1072 	u16 w;
1073 	struct resource res;
1074 
1075 	if (bus->resource[0] == NULL)
1076 		return;
1077  	res = *(bus->resource[0]);
1078 
1079 	DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
1080 	res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
1081 	res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
1082 	DBG("  IO window: %016llx-%016llx\n", res.start, res.end);
1083 
1084 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
1085 	pci_read_config_dword(bridge, PCI_IO_BASE, &l);
1086 	l &= 0xffff000f;
1087 	l |= (res.start >> 8) & 0x00f0;
1088 	l |= res.end & 0xf000;
1089 	pci_write_config_dword(bridge, PCI_IO_BASE, l);
1090 
1091 	if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1092 		l = (res.start >> 16) | (res.end & 0xffff0000);
1093 		pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
1094 	}
1095 
1096 	pci_read_config_word(bridge, PCI_COMMAND, &w);
1097 	w |= PCI_COMMAND_IO;
1098 	pci_write_config_word(bridge, PCI_COMMAND, w);
1099 
1100 #if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
1101 	if (enable_vga) {
1102 		pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
1103 		w |= PCI_BRIDGE_CTL_VGA;
1104 		pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
1105 	}
1106 #endif
1107 }
1108 
1109 /* This function is pretty basic and actually quite broken for the
1110  * general case, it's enough for us right now though. It's supposed
1111  * to tell us if we need to open an IO range at all or not and what
1112  * size.
1113  */
1114 static int __init
1115 check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
1116 {
1117 	struct pci_dev *dev;
1118 	int	i;
1119 	int	rc = 0;
1120 
1121 #define push_end(res, mask) do {		\
1122 	BUG_ON((mask+1) & mask);		\
1123 	res->end = (res->end + mask) | mask;	\
1124 } while (0)
1125 
1126 	list_for_each_entry(dev, &bus->devices, bus_list) {
1127 		u16 class = dev->class >> 8;
1128 
1129 		if (class == PCI_CLASS_DISPLAY_VGA ||
1130 		    class == PCI_CLASS_NOT_DEFINED_VGA)
1131 			*found_vga = 1;
1132 		if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
1133 			rc |= check_for_io_childs(dev->subordinate, res, found_vga);
1134 		if (class == PCI_CLASS_BRIDGE_CARDBUS)
1135 			push_end(res, 0xfff);
1136 
1137 		for (i=0; i<PCI_NUM_RESOURCES; i++) {
1138 			struct resource *r;
1139 			unsigned long r_size;
1140 
1141 			if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
1142 			    && i >= PCI_BRIDGE_RESOURCES)
1143 				continue;
1144 			r = &dev->resource[i];
1145 			r_size = r->end - r->start;
1146 			if (r_size < 0xfff)
1147 				r_size = 0xfff;
1148 			if (r->flags & IORESOURCE_IO && (r_size) != 0) {
1149 				rc = 1;
1150 				push_end(res, r_size);
1151 			}
1152 		}
1153 	}
1154 
1155 	return rc;
1156 }
1157 
1158 /* Here we scan all P2P bridges of a given level that have a closed
1159  * IO window. Note that the test for the presence of a VGA card should
1160  * be improved to take into account already configured P2P bridges,
1161  * currently, we don't see them and might end up configuring 2 bridges
1162  * with VGA pass through enabled
1163  */
1164 static void __init
1165 do_fixup_p2p_level(struct pci_bus *bus)
1166 {
1167 	struct pci_bus *b;
1168 	int i, parent_io;
1169 	int has_vga = 0;
1170 
1171 	for (parent_io=0; parent_io<4; parent_io++)
1172 		if (bus->resource[parent_io]
1173 		    && bus->resource[parent_io]->flags & IORESOURCE_IO)
1174 			break;
1175 	if (parent_io >= 4)
1176 		return;
1177 
1178 	list_for_each_entry(b, &bus->children, node) {
1179 		struct pci_dev *d = b->self;
1180 		struct pci_controller* hose = (struct pci_controller *)d->sysdata;
1181 		struct resource *res = b->resource[0];
1182 		struct resource tmp_res;
1183 		unsigned long max;
1184 		int found_vga = 0;
1185 
1186 		memset(&tmp_res, 0, sizeof(tmp_res));
1187 		tmp_res.start = bus->resource[parent_io]->start;
1188 
1189 		/* We don't let low addresses go through that closed P2P bridge, well,
1190 		 * that may not be necessary but I feel safer that way
1191 		 */
1192 		if (tmp_res.start == 0)
1193 			tmp_res.start = 0x1000;
1194 
1195 		if (!list_empty(&b->devices) && res && res->flags == 0 &&
1196 		    res != bus->resource[parent_io] &&
1197 		    (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
1198 		    check_for_io_childs(b, &tmp_res, &found_vga)) {
1199 			u8 io_base_lo;
1200 
1201 			printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
1202 
1203 			if (found_vga) {
1204 				if (has_vga) {
1205 					printk(KERN_WARNING "Skipping VGA, already active"
1206 					    " on bus segment\n");
1207 					found_vga = 0;
1208 				} else
1209 					has_vga = 1;
1210 			}
1211 			pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
1212 
1213 			if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
1214 				max = ((unsigned long) hose->io_base_virt
1215 					- isa_io_base) + 0xffffffff;
1216 			else
1217 				max = ((unsigned long) hose->io_base_virt
1218 					- isa_io_base) + 0xffff;
1219 
1220 			*res = tmp_res;
1221 			res->flags = IORESOURCE_IO;
1222 			res->name = b->name;
1223 
1224 			/* Find a resource in the parent where we can allocate */
1225 			for (i = 0 ; i < 4; i++) {
1226 				struct resource *r = bus->resource[i];
1227 				if (!r)
1228 					continue;
1229 				if ((r->flags & IORESOURCE_IO) == 0)
1230 					continue;
1231 				DBG("Trying to allocate from %016llx, size %016llx from parent"
1232 				    " res %d: %016llx -> %016llx\n",
1233 					res->start, res->end, i, r->start, r->end);
1234 
1235 				if (allocate_resource(r, res, res->end + 1, res->start, max,
1236 				    res->end + 1, NULL, NULL) < 0) {
1237 					DBG("Failed !\n");
1238 					continue;
1239 				}
1240 				do_update_p2p_io_resource(b, found_vga);
1241 				break;
1242 			}
1243 		}
1244 		do_fixup_p2p_level(b);
1245 	}
1246 }
1247 
1248 static void
1249 pcibios_fixup_p2p_bridges(void)
1250 {
1251 	struct pci_bus *b;
1252 
1253 	list_for_each_entry(b, &pci_root_buses, node)
1254 		do_fixup_p2p_level(b);
1255 }
1256 
1257 #endif /* CONFIG_PPC_PMAC */
1258 
1259 static int __init
1260 pcibios_init(void)
1261 {
1262 	struct pci_controller *hose;
1263 	struct pci_bus *bus;
1264 	int next_busno;
1265 
1266 	printk(KERN_INFO "PCI: Probing PCI hardware\n");
1267 
1268 	/* Scan all of the recorded PCI controllers.  */
1269 	for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1270 		if (pci_assign_all_buses)
1271 			hose->first_busno = next_busno;
1272 		hose->last_busno = 0xff;
1273 		bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
1274 					    hose->ops, hose);
1275 		if (bus)
1276 			pci_bus_add_devices(bus);
1277 		hose->last_busno = bus->subordinate;
1278 		if (pci_assign_all_buses || next_busno <= hose->last_busno)
1279 			next_busno = hose->last_busno + pcibios_assign_bus_offset;
1280 	}
1281 	pci_bus_count = next_busno;
1282 
1283 	/* OpenFirmware based machines need a map of OF bus
1284 	 * numbers vs. kernel bus numbers since we may have to
1285 	 * remap them.
1286 	 */
1287 	if (pci_assign_all_buses && have_of)
1288 		pcibios_make_OF_bus_map();
1289 
1290 	/* Call machine dependent fixup */
1291 	if (ppc_md.pcibios_fixup)
1292 		ppc_md.pcibios_fixup();
1293 
1294 	/* Allocate and assign resources */
1295 	pcibios_allocate_bus_resources(&pci_root_buses);
1296 	pcibios_allocate_resources(0);
1297 	pcibios_allocate_resources(1);
1298 #ifdef CONFIG_PPC_PMAC
1299 	pcibios_fixup_p2p_bridges();
1300 #endif /* CONFIG_PPC_PMAC */
1301 	pcibios_assign_resources();
1302 
1303 	/* Call machine dependent post-init code */
1304 	if (ppc_md.pcibios_after_init)
1305 		ppc_md.pcibios_after_init();
1306 
1307 	return 0;
1308 }
1309 
1310 subsys_initcall(pcibios_init);
1311 
1312 unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
1313 			     unsigned long start, unsigned long size)
1314 {
1315 	return start;
1316 }
1317 
1318 void __init pcibios_fixup_bus(struct pci_bus *bus)
1319 {
1320 	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1321 	unsigned long io_offset;
1322 	struct resource *res;
1323 	struct pci_dev *dev;
1324 	int i;
1325 
1326 	io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1327 	if (bus->parent == NULL) {
1328 		/* This is a host bridge - fill in its resources */
1329 		hose->bus = bus;
1330 
1331 		bus->resource[0] = res = &hose->io_resource;
1332 		if (!res->flags) {
1333 			if (io_offset)
1334 				printk(KERN_ERR "I/O resource not set for host"
1335 				       " bridge %d\n", hose->index);
1336 			res->start = 0;
1337 			res->end = IO_SPACE_LIMIT;
1338 			res->flags = IORESOURCE_IO;
1339 		}
1340 		res->start += io_offset;
1341 		res->end += io_offset;
1342 
1343 		for (i = 0; i < 3; ++i) {
1344 			res = &hose->mem_resources[i];
1345 			if (!res->flags) {
1346 				if (i > 0)
1347 					continue;
1348 				printk(KERN_ERR "Memory resource not set for "
1349 				       "host bridge %d\n", hose->index);
1350 				res->start = hose->pci_mem_offset;
1351 				res->end = ~0U;
1352 				res->flags = IORESOURCE_MEM;
1353 			}
1354 			bus->resource[i+1] = res;
1355 		}
1356 	} else {
1357 		/* This is a subordinate bridge */
1358 		pci_read_bridge_bases(bus);
1359 
1360 		for (i = 0; i < 4; ++i) {
1361 			if ((res = bus->resource[i]) == NULL)
1362 				continue;
1363 			if (!res->flags)
1364 				continue;
1365 			if (io_offset && (res->flags & IORESOURCE_IO)) {
1366 				res->start += io_offset;
1367 				res->end += io_offset;
1368 			} else if (hose->pci_mem_offset
1369 				   && (res->flags & IORESOURCE_MEM)) {
1370 				res->start += hose->pci_mem_offset;
1371 				res->end += hose->pci_mem_offset;
1372 			}
1373 		}
1374 	}
1375 
1376 	/* Platform specific bus fixups */
1377 	if (ppc_md.pcibios_fixup_bus)
1378 		ppc_md.pcibios_fixup_bus(bus);
1379 
1380 	/* Read default IRQs and fixup if necessary */
1381 	list_for_each_entry(dev, &bus->devices, bus_list) {
1382 		pci_read_irq_line(dev);
1383 		if (ppc_md.pci_irq_fixup)
1384 			ppc_md.pci_irq_fixup(dev);
1385 	}
1386 }
1387 
1388 char __init *pcibios_setup(char *str)
1389 {
1390 	return str;
1391 }
1392 
1393 /* the next one is stolen from the alpha port... */
1394 void __init
1395 pcibios_update_irq(struct pci_dev *dev, int irq)
1396 {
1397 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
1398 	/* XXX FIXME - update OF device tree node interrupt property */
1399 }
1400 
1401 #ifdef CONFIG_PPC_MERGE
1402 /* XXX This is a copy of the ppc64 version. This is temporary until we start
1403  * merging the 2 PCI layers
1404  */
1405 /*
1406  * Reads the interrupt pin to determine if interrupt is use by card.
1407  * If the interrupt is used, then gets the interrupt line from the
1408  * openfirmware and sets it in the pci_dev and pci_config line.
1409  */
1410 int pci_read_irq_line(struct pci_dev *pci_dev)
1411 {
1412 	struct of_irq oirq;
1413 	unsigned int virq;
1414 
1415 	DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1416 
1417 	/* Try to get a mapping from the device-tree */
1418 	if (of_irq_map_pci(pci_dev, &oirq)) {
1419 		u8 line, pin;
1420 
1421 		/* If that fails, lets fallback to what is in the config
1422 		 * space and map that through the default controller. We
1423 		 * also set the type to level low since that's what PCI
1424 		 * interrupts are. If your platform does differently, then
1425 		 * either provide a proper interrupt tree or don't use this
1426 		 * function.
1427 		 */
1428 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
1429 			return -1;
1430 		if (pin == 0)
1431 			return -1;
1432 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
1433 		    line == 0xff) {
1434 			return -1;
1435 		}
1436 		DBG(" -> no map ! Using irq line %d from PCI config\n", line);
1437 
1438 		virq = irq_create_mapping(NULL, line);
1439 		if (virq != NO_IRQ)
1440 			set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
1441 	} else {
1442 		DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
1443 		    oirq.size, oirq.specifier[0], oirq.controller->full_name);
1444 
1445 		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
1446 					     oirq.size);
1447 	}
1448 	if(virq == NO_IRQ) {
1449 		DBG(" -> failed to map !\n");
1450 		return -1;
1451 	}
1452 	pci_dev->irq = virq;
1453 
1454 	return 0;
1455 }
1456 EXPORT_SYMBOL(pci_read_irq_line);
1457 #endif /* CONFIG_PPC_MERGE */
1458 
1459 int pcibios_enable_device(struct pci_dev *dev, int mask)
1460 {
1461 	u16 cmd, old_cmd;
1462 	int idx;
1463 	struct resource *r;
1464 
1465 	if (ppc_md.pcibios_enable_device_hook)
1466 		if (ppc_md.pcibios_enable_device_hook(dev, 0))
1467 			return -EINVAL;
1468 
1469 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
1470 	old_cmd = cmd;
1471 	for (idx=0; idx<6; idx++) {
1472 		r = &dev->resource[idx];
1473 		if (r->flags & IORESOURCE_UNSET) {
1474 			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
1475 			return -EINVAL;
1476 		}
1477 		if (r->flags & IORESOURCE_IO)
1478 			cmd |= PCI_COMMAND_IO;
1479 		if (r->flags & IORESOURCE_MEM)
1480 			cmd |= PCI_COMMAND_MEMORY;
1481 	}
1482 	if (cmd != old_cmd) {
1483 		printk("PCI: Enabling device %s (%04x -> %04x)\n",
1484 		       pci_name(dev), old_cmd, cmd);
1485 		pci_write_config_word(dev, PCI_COMMAND, cmd);
1486 	}
1487 	return 0;
1488 }
1489 
1490 struct pci_controller*
1491 pci_bus_to_hose(int bus)
1492 {
1493 	struct pci_controller* hose = hose_head;
1494 
1495 	for (; hose; hose = hose->next)
1496 		if (bus >= hose->first_busno && bus <= hose->last_busno)
1497 			return hose;
1498 	return NULL;
1499 }
1500 
1501 void __iomem *
1502 pci_bus_io_base(unsigned int bus)
1503 {
1504 	struct pci_controller *hose;
1505 
1506 	hose = pci_bus_to_hose(bus);
1507 	if (!hose)
1508 		return NULL;
1509 	return hose->io_base_virt;
1510 }
1511 
1512 unsigned long
1513 pci_bus_io_base_phys(unsigned int bus)
1514 {
1515 	struct pci_controller *hose;
1516 
1517 	hose = pci_bus_to_hose(bus);
1518 	if (!hose)
1519 		return 0;
1520 	return hose->io_base_phys;
1521 }
1522 
1523 unsigned long
1524 pci_bus_mem_base_phys(unsigned int bus)
1525 {
1526 	struct pci_controller *hose;
1527 
1528 	hose = pci_bus_to_hose(bus);
1529 	if (!hose)
1530 		return 0;
1531 	return hose->pci_mem_offset;
1532 }
1533 
1534 unsigned long
1535 pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
1536 {
1537 	/* Hack alert again ! See comments in chrp_pci.c
1538 	 */
1539 	struct pci_controller* hose =
1540 		(struct pci_controller *)pdev->sysdata;
1541 	if (hose && res->flags & IORESOURCE_MEM)
1542 		return res->start - hose->pci_mem_offset;
1543 	/* We may want to do something with IOs here... */
1544 	return res->start;
1545 }
1546 
1547 
1548 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1549 					       resource_size_t *offset,
1550 					       enum pci_mmap_state mmap_state)
1551 {
1552 	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1553 	unsigned long io_offset = 0;
1554 	int i, res_bit;
1555 
1556 	if (hose == 0)
1557 		return NULL;		/* should never happen */
1558 
1559 	/* If memory, add on the PCI bridge address offset */
1560 	if (mmap_state == pci_mmap_mem) {
1561 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
1562 		*offset += hose->pci_mem_offset;
1563 #endif
1564 		res_bit = IORESOURCE_MEM;
1565 	} else {
1566 		io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
1567 		*offset += io_offset;
1568 		res_bit = IORESOURCE_IO;
1569 	}
1570 
1571 	/*
1572 	 * Check that the offset requested corresponds to one of the
1573 	 * resources of the device.
1574 	 */
1575 	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1576 		struct resource *rp = &dev->resource[i];
1577 		int flags = rp->flags;
1578 
1579 		/* treat ROM as memory (should be already) */
1580 		if (i == PCI_ROM_RESOURCE)
1581 			flags |= IORESOURCE_MEM;
1582 
1583 		/* Active and same type? */
1584 		if ((flags & res_bit) == 0)
1585 			continue;
1586 
1587 		/* In the range of this resource? */
1588 		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
1589 			continue;
1590 
1591 		/* found it! construct the final physical address */
1592 		if (mmap_state == pci_mmap_io)
1593 			*offset += hose->io_base_phys - io_offset;
1594 		return rp;
1595 	}
1596 
1597 	return NULL;
1598 }
1599 
1600 /*
1601  * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1602  * device mapping.
1603  */
1604 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1605 				      pgprot_t protection,
1606 				      enum pci_mmap_state mmap_state,
1607 				      int write_combine)
1608 {
1609 	unsigned long prot = pgprot_val(protection);
1610 
1611 	/* Write combine is always 0 on non-memory space mappings. On
1612 	 * memory space, if the user didn't pass 1, we check for a
1613 	 * "prefetchable" resource. This is a bit hackish, but we use
1614 	 * this to workaround the inability of /sysfs to provide a write
1615 	 * combine bit
1616 	 */
1617 	if (mmap_state != pci_mmap_mem)
1618 		write_combine = 0;
1619 	else if (write_combine == 0) {
1620 		if (rp->flags & IORESOURCE_PREFETCH)
1621 			write_combine = 1;
1622 	}
1623 
1624 	/* XXX would be nice to have a way to ask for write-through */
1625 	prot |= _PAGE_NO_CACHE;
1626 	if (write_combine)
1627 		prot &= ~_PAGE_GUARDED;
1628 	else
1629 		prot |= _PAGE_GUARDED;
1630 
1631 	return __pgprot(prot);
1632 }
1633 
1634 /*
1635  * This one is used by /dev/mem and fbdev who have no clue about the
1636  * PCI device, it tries to find the PCI device first and calls the
1637  * above routine
1638  */
1639 pgprot_t pci_phys_mem_access_prot(struct file *file,
1640 				  unsigned long pfn,
1641 				  unsigned long size,
1642 				  pgprot_t protection)
1643 {
1644 	struct pci_dev *pdev = NULL;
1645 	struct resource *found = NULL;
1646 	unsigned long prot = pgprot_val(protection);
1647 	unsigned long offset = pfn << PAGE_SHIFT;
1648 	int i;
1649 
1650 	if (page_is_ram(pfn))
1651 		return prot;
1652 
1653 	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
1654 
1655 	for_each_pci_dev(pdev) {
1656 		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1657 			struct resource *rp = &pdev->resource[i];
1658 			int flags = rp->flags;
1659 
1660 			/* Active and same type? */
1661 			if ((flags & IORESOURCE_MEM) == 0)
1662 				continue;
1663 			/* In the range of this resource? */
1664 			if (offset < (rp->start & PAGE_MASK) ||
1665 			    offset > rp->end)
1666 				continue;
1667 			found = rp;
1668 			break;
1669 		}
1670 		if (found)
1671 			break;
1672 	}
1673 	if (found) {
1674 		if (found->flags & IORESOURCE_PREFETCH)
1675 			prot &= ~_PAGE_GUARDED;
1676 		pci_dev_put(pdev);
1677 	}
1678 
1679 	DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
1680 
1681 	return __pgprot(prot);
1682 }
1683 
1684 
1685 /*
1686  * Perform the actual remap of the pages for a PCI device mapping, as
1687  * appropriate for this architecture.  The region in the process to map
1688  * is described by vm_start and vm_end members of VMA, the base physical
1689  * address is found in vm_pgoff.
1690  * The pci device structure is provided so that architectures may make mapping
1691  * decisions on a per-device or per-bus basis.
1692  *
1693  * Returns a negative error code on failure, zero on success.
1694  */
1695 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1696 			enum pci_mmap_state mmap_state,
1697 			int write_combine)
1698 {
1699 	resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
1700 	struct resource *rp;
1701 	int ret;
1702 
1703 	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
1704 	if (rp == NULL)
1705 		return -EINVAL;
1706 
1707 	vma->vm_pgoff = offset >> PAGE_SHIFT;
1708 	vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1709 						  vma->vm_page_prot,
1710 						  mmap_state, write_combine);
1711 
1712 	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1713 			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
1714 
1715 	return ret;
1716 }
1717 
1718 /* Obsolete functions. Should be removed once the symbios driver
1719  * is fixed
1720  */
1721 unsigned long
1722 phys_to_bus(unsigned long pa)
1723 {
1724 	struct pci_controller *hose;
1725 	int i;
1726 
1727 	for (hose = hose_head; hose; hose = hose->next) {
1728 		for (i = 0; i < 3; ++i) {
1729 			if (pa >= hose->mem_resources[i].start
1730 			    && pa <= hose->mem_resources[i].end) {
1731 				/*
1732 				 * XXX the hose->pci_mem_offset really
1733 				 * only applies to mem_resources[0].
1734 				 * We need a way to store an offset for
1735 				 * the others.  -- paulus
1736 				 */
1737 				if (i == 0)
1738 					pa -= hose->pci_mem_offset;
1739 				return pa;
1740 			}
1741 		}
1742 	}
1743 	/* hmmm, didn't find it */
1744 	return 0;
1745 }
1746 
1747 unsigned long
1748 pci_phys_to_bus(unsigned long pa, int busnr)
1749 {
1750 	struct pci_controller* hose = pci_bus_to_hose(busnr);
1751 	if (!hose)
1752 		return pa;
1753 	return pa - hose->pci_mem_offset;
1754 }
1755 
1756 unsigned long
1757 pci_bus_to_phys(unsigned int ba, int busnr)
1758 {
1759 	struct pci_controller* hose = pci_bus_to_hose(busnr);
1760 	if (!hose)
1761 		return ba;
1762 	return ba + hose->pci_mem_offset;
1763 }
1764 
1765 /* Provide information on locations of various I/O regions in physical
1766  * memory.  Do this on a per-card basis so that we choose the right
1767  * root bridge.
1768  * Note that the returned IO or memory base is a physical address
1769  */
1770 
1771 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1772 {
1773 	struct pci_controller* hose;
1774 	long result = -EOPNOTSUPP;
1775 
1776 	/* Argh ! Please forgive me for that hack, but that's the
1777 	 * simplest way to get existing XFree to not lockup on some
1778 	 * G5 machines... So when something asks for bus 0 io base
1779 	 * (bus 0 is HT root), we return the AGP one instead.
1780 	 */
1781 #ifdef CONFIG_PPC_PMAC
1782 	if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
1783 		if (bus == 0)
1784 			bus = 0xf0;
1785 #endif /* CONFIG_PPC_PMAC */
1786 
1787 	hose = pci_bus_to_hose(bus);
1788 	if (!hose)
1789 		return -ENODEV;
1790 
1791 	switch (which) {
1792 	case IOBASE_BRIDGE_NUMBER:
1793 		return (long)hose->first_busno;
1794 	case IOBASE_MEMORY:
1795 		return (long)hose->pci_mem_offset;
1796 	case IOBASE_IO:
1797 		return (long)hose->io_base_phys;
1798 	case IOBASE_ISA_IO:
1799 		return (long)isa_io_base;
1800 	case IOBASE_ISA_MEM:
1801 		return (long)isa_mem_base;
1802 	}
1803 
1804 	return result;
1805 }
1806 
1807 void pci_resource_to_user(const struct pci_dev *dev, int bar,
1808 			  const struct resource *rsrc,
1809 			  resource_size_t *start, resource_size_t *end)
1810 {
1811 	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1812 	resource_size_t offset = 0;
1813 
1814 	if (hose == NULL)
1815 		return;
1816 
1817 	if (rsrc->flags & IORESOURCE_IO)
1818 		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1819 
1820 	/* We pass a fully fixed up address to userland for MMIO instead of
1821 	 * a BAR value because X is lame and expects to be able to use that
1822 	 * to pass to /dev/mem !
1823 	 *
1824 	 * That means that we'll have potentially 64 bits values where some
1825 	 * userland apps only expect 32 (like X itself since it thinks only
1826 	 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1827 	 * 32 bits CHRPs :-(
1828 	 *
1829 	 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1830 	 * has been fixed (and the fix spread enough), we can re-enable the
1831 	 * 2 lines below and pass down a BAR value to userland. In that case
1832 	 * we'll also have to re-enable the matching code in
1833 	 * __pci_mmap_make_offset().
1834 	 *
1835 	 * BenH.
1836 	 */
1837 #if 0
1838 	else if (rsrc->flags & IORESOURCE_MEM)
1839 		offset = hose->pci_mem_offset;
1840 #endif
1841 
1842 	*start = rsrc->start - offset;
1843 	*end = rsrc->end - offset;
1844 }
1845 
1846 void __init pci_init_resource(struct resource *res, resource_size_t start,
1847 			      resource_size_t end, int flags, char *name)
1848 {
1849 	res->start = start;
1850 	res->end = end;
1851 	res->flags = flags;
1852 	res->name = name;
1853 	res->parent = NULL;
1854 	res->sibling = NULL;
1855 	res->child = NULL;
1856 }
1857 
1858 unsigned long pci_address_to_pio(phys_addr_t address)
1859 {
1860 	struct pci_controller* hose = hose_head;
1861 
1862 	for (; hose; hose = hose->next) {
1863 		unsigned int size = hose->io_resource.end -
1864 			hose->io_resource.start + 1;
1865 		if (address >= hose->io_base_phys &&
1866 		    address < (hose->io_base_phys + size)) {
1867 			unsigned long base =
1868 				(unsigned long)hose->io_base_virt - _IO_BASE;
1869 			return base + (address - hose->io_base_phys);
1870 		}
1871 	}
1872 	return (unsigned int)-1;
1873 }
1874 EXPORT_SYMBOL(pci_address_to_pio);
1875 
1876 /*
1877  * Null PCI config access functions, for the case when we can't
1878  * find a hose.
1879  */
1880 #define NULL_PCI_OP(rw, size, type)					\
1881 static int								\
1882 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)	\
1883 {									\
1884 	return PCIBIOS_DEVICE_NOT_FOUND;    				\
1885 }
1886 
1887 static int
1888 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1889 		 int len, u32 *val)
1890 {
1891 	return PCIBIOS_DEVICE_NOT_FOUND;
1892 }
1893 
1894 static int
1895 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1896 		  int len, u32 val)
1897 {
1898 	return PCIBIOS_DEVICE_NOT_FOUND;
1899 }
1900 
1901 static struct pci_ops null_pci_ops =
1902 {
1903 	null_read_config,
1904 	null_write_config
1905 };
1906 
1907 /*
1908  * These functions are used early on before PCI scanning is done
1909  * and all of the pci_dev and pci_bus structures have been created.
1910  */
1911 static struct pci_bus *
1912 fake_pci_bus(struct pci_controller *hose, int busnr)
1913 {
1914 	static struct pci_bus bus;
1915 
1916 	if (hose == 0) {
1917 		hose = pci_bus_to_hose(busnr);
1918 		if (hose == 0)
1919 			printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1920 	}
1921 	bus.number = busnr;
1922 	bus.sysdata = hose;
1923 	bus.ops = hose? hose->ops: &null_pci_ops;
1924 	return &bus;
1925 }
1926 
1927 #define EARLY_PCI_OP(rw, size, type)					\
1928 int early_##rw##_config_##size(struct pci_controller *hose, int bus,	\
1929 			       int devfn, int offset, type value)	\
1930 {									\
1931 	return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),	\
1932 					    devfn, offset, value);	\
1933 }
1934 
1935 EARLY_PCI_OP(read, byte, u8 *)
1936 EARLY_PCI_OP(read, word, u16 *)
1937 EARLY_PCI_OP(read, dword, u32 *)
1938 EARLY_PCI_OP(write, byte, u8)
1939 EARLY_PCI_OP(write, word, u16)
1940 EARLY_PCI_OP(write, dword, u32)
1941