xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision 80579e1f)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/lmb.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/mmu.h>
43 #include <asm/pgtable.h>
44 #include <asm/pci.h>
45 #include <asm/iommu.h>
46 #include <asm/btext.h>
47 #include <asm/sections.h>
48 #include <asm/machdep.h>
49 #include <asm/pSeries_reconfig.h>
50 #include <asm/pci-bridge.h>
51 #ifdef CONFIG_PPC64
52 #include <asm/systemcfg.h>
53 #endif
54 
55 #ifdef DEBUG
56 #define DBG(fmt...) printk(KERN_ERR fmt)
57 #else
58 #define DBG(fmt...)
59 #endif
60 
61 struct pci_reg_property {
62 	struct pci_address addr;
63 	u32 size_hi;
64 	u32 size_lo;
65 };
66 
67 struct isa_reg_property {
68 	u32 space;
69 	u32 address;
70 	u32 size;
71 };
72 
73 
74 typedef int interpret_func(struct device_node *, unsigned long *,
75 			   int, int, int);
76 
77 extern struct rtas_t rtas;
78 extern struct lmb lmb;
79 extern unsigned long klimit;
80 
81 static unsigned long memory_limit;
82 
83 static int __initdata dt_root_addr_cells;
84 static int __initdata dt_root_size_cells;
85 
86 #ifdef CONFIG_PPC64
87 static int __initdata iommu_is_off;
88 int __initdata iommu_force_on;
89 extern unsigned long tce_alloc_start, tce_alloc_end;
90 #endif
91 
92 typedef u32 cell_t;
93 
94 #if 0
95 static struct boot_param_header *initial_boot_params __initdata;
96 #else
97 struct boot_param_header *initial_boot_params;
98 #endif
99 
100 static struct device_node *allnodes = NULL;
101 
102 /* use when traversing tree through the allnext, child, sibling,
103  * or parent members of struct device_node.
104  */
105 static DEFINE_RWLOCK(devtree_lock);
106 
107 /* export that to outside world */
108 struct device_node *of_chosen;
109 
110 struct device_node *dflt_interrupt_controller;
111 int num_interrupt_controllers;
112 
113 /*
114  * Wrapper for allocating memory for various data that needs to be
115  * attached to device nodes as they are processed at boot or when
116  * added to the device tree later (e.g. DLPAR).  At boot there is
117  * already a region reserved so we just increment *mem_start by size;
118  * otherwise we call kmalloc.
119  */
120 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
121 {
122 	unsigned long tmp;
123 
124 	if (!mem_start)
125 		return kmalloc(size, GFP_KERNEL);
126 
127 	tmp = *mem_start;
128 	*mem_start += size;
129 	return (void *)tmp;
130 }
131 
132 /*
133  * Find the device_node with a given phandle.
134  */
135 static struct device_node * find_phandle(phandle ph)
136 {
137 	struct device_node *np;
138 
139 	for (np = allnodes; np != 0; np = np->allnext)
140 		if (np->linux_phandle == ph)
141 			return np;
142 	return NULL;
143 }
144 
145 /*
146  * Find the interrupt parent of a node.
147  */
148 static struct device_node * __devinit intr_parent(struct device_node *p)
149 {
150 	phandle *parp;
151 
152 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
153 	if (parp == NULL)
154 		return p->parent;
155 	p = find_phandle(*parp);
156 	if (p != NULL)
157 		return p;
158 	/*
159 	 * On a powermac booted with BootX, we don't get to know the
160 	 * phandles for any nodes, so find_phandle will return NULL.
161 	 * Fortunately these machines only have one interrupt controller
162 	 * so there isn't in fact any ambiguity.  -- paulus
163 	 */
164 	if (num_interrupt_controllers == 1)
165 		p = dflt_interrupt_controller;
166 	return p;
167 }
168 
169 /*
170  * Find out the size of each entry of the interrupts property
171  * for a node.
172  */
173 int __devinit prom_n_intr_cells(struct device_node *np)
174 {
175 	struct device_node *p;
176 	unsigned int *icp;
177 
178 	for (p = np; (p = intr_parent(p)) != NULL; ) {
179 		icp = (unsigned int *)
180 			get_property(p, "#interrupt-cells", NULL);
181 		if (icp != NULL)
182 			return *icp;
183 		if (get_property(p, "interrupt-controller", NULL) != NULL
184 		    || get_property(p, "interrupt-map", NULL) != NULL) {
185 			printk("oops, node %s doesn't have #interrupt-cells\n",
186 			       p->full_name);
187 			return 1;
188 		}
189 	}
190 #ifdef DEBUG_IRQ
191 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
192 #endif
193 	return 1;
194 }
195 
196 /*
197  * Map an interrupt from a device up to the platform interrupt
198  * descriptor.
199  */
200 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
201 				   struct device_node *np, unsigned int *ints,
202 				   int nintrc)
203 {
204 	struct device_node *p, *ipar;
205 	unsigned int *imap, *imask, *ip;
206 	int i, imaplen, match;
207 	int newintrc = 0, newaddrc = 0;
208 	unsigned int *reg;
209 	int naddrc;
210 
211 	reg = (unsigned int *) get_property(np, "reg", NULL);
212 	naddrc = prom_n_addr_cells(np);
213 	p = intr_parent(np);
214 	while (p != NULL) {
215 		if (get_property(p, "interrupt-controller", NULL) != NULL)
216 			/* this node is an interrupt controller, stop here */
217 			break;
218 		imap = (unsigned int *)
219 			get_property(p, "interrupt-map", &imaplen);
220 		if (imap == NULL) {
221 			p = intr_parent(p);
222 			continue;
223 		}
224 		imask = (unsigned int *)
225 			get_property(p, "interrupt-map-mask", NULL);
226 		if (imask == NULL) {
227 			printk("oops, %s has interrupt-map but no mask\n",
228 			       p->full_name);
229 			return 0;
230 		}
231 		imaplen /= sizeof(unsigned int);
232 		match = 0;
233 		ipar = NULL;
234 		while (imaplen > 0 && !match) {
235 			/* check the child-interrupt field */
236 			match = 1;
237 			for (i = 0; i < naddrc && match; ++i)
238 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
239 			for (; i < naddrc + nintrc && match; ++i)
240 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
241 			imap += naddrc + nintrc;
242 			imaplen -= naddrc + nintrc;
243 			/* grab the interrupt parent */
244 			ipar = find_phandle((phandle) *imap++);
245 			--imaplen;
246 			if (ipar == NULL && num_interrupt_controllers == 1)
247 				/* cope with BootX not giving us phandles */
248 				ipar = dflt_interrupt_controller;
249 			if (ipar == NULL) {
250 				printk("oops, no int parent %x in map of %s\n",
251 				       imap[-1], p->full_name);
252 				return 0;
253 			}
254 			/* find the parent's # addr and intr cells */
255 			ip = (unsigned int *)
256 				get_property(ipar, "#interrupt-cells", NULL);
257 			if (ip == NULL) {
258 				printk("oops, no #interrupt-cells on %s\n",
259 				       ipar->full_name);
260 				return 0;
261 			}
262 			newintrc = *ip;
263 			ip = (unsigned int *)
264 				get_property(ipar, "#address-cells", NULL);
265 			newaddrc = (ip == NULL)? 0: *ip;
266 			imap += newaddrc + newintrc;
267 			imaplen -= newaddrc + newintrc;
268 		}
269 		if (imaplen < 0) {
270 			printk("oops, error decoding int-map on %s, len=%d\n",
271 			       p->full_name, imaplen);
272 			return 0;
273 		}
274 		if (!match) {
275 #ifdef DEBUG_IRQ
276 			printk("oops, no match in %s int-map for %s\n",
277 			       p->full_name, np->full_name);
278 #endif
279 			return 0;
280 		}
281 		p = ipar;
282 		naddrc = newaddrc;
283 		nintrc = newintrc;
284 		ints = imap - nintrc;
285 		reg = ints - naddrc;
286 	}
287 	if (p == NULL) {
288 #ifdef DEBUG_IRQ
289 		printk("hmmm, int tree for %s doesn't have ctrler\n",
290 		       np->full_name);
291 #endif
292 		return 0;
293 	}
294 	*irq = ints;
295 	*ictrler = p;
296 	return nintrc;
297 }
298 
299 static unsigned char map_isa_senses[4] = {
300 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
301 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
302 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
303 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE
304 };
305 
306 static unsigned char map_mpic_senses[4] = {
307 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE,
308 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
309 	/* 2 seems to be used for the 8259 cascade... */
310 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
311 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
312 };
313 
314 static int __devinit finish_node_interrupts(struct device_node *np,
315 					    unsigned long *mem_start,
316 					    int measure_only)
317 {
318 	unsigned int *ints;
319 	int intlen, intrcells, intrcount;
320 	int i, j, n, sense;
321 	unsigned int *irq, virq;
322 	struct device_node *ic;
323 
324 	if (num_interrupt_controllers == 0) {
325 		/*
326 		 * Old machines just have a list of interrupt numbers
327 		 * and no interrupt-controller nodes.
328 		 */
329 		ints = (unsigned int *) get_property(np, "AAPL,interrupts",
330 						     &intlen);
331 		/* XXX old interpret_pci_props looked in parent too */
332 		/* XXX old interpret_macio_props looked for interrupts
333 		   before AAPL,interrupts */
334 		if (ints == NULL)
335 			ints = (unsigned int *) get_property(np, "interrupts",
336 							     &intlen);
337 		if (ints == NULL)
338 			return 0;
339 
340 		np->n_intrs = intlen / sizeof(unsigned int);
341 		np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
342 				       mem_start);
343 		if (!np->intrs)
344 			return -ENOMEM;
345 		if (measure_only)
346 			return 0;
347 
348 		for (i = 0; i < np->n_intrs; ++i) {
349 			np->intrs[i].line = *ints++;
350 			np->intrs[i].sense = IRQ_SENSE_LEVEL
351 				| IRQ_POLARITY_NEGATIVE;
352 		}
353 		return 0;
354 	}
355 
356 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
357 	if (ints == NULL)
358 		return 0;
359 	intrcells = prom_n_intr_cells(np);
360 	intlen /= intrcells * sizeof(unsigned int);
361 
362 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
363 	if (!np->intrs)
364 		return -ENOMEM;
365 
366 	if (measure_only)
367 		return 0;
368 
369 	intrcount = 0;
370 	for (i = 0; i < intlen; ++i, ints += intrcells) {
371 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
372 		if (n <= 0)
373 			continue;
374 
375 		/* don't map IRQ numbers under a cascaded 8259 controller */
376 		if (ic && device_is_compatible(ic, "chrp,iic")) {
377 			np->intrs[intrcount].line = irq[0];
378 			sense = (n > 1)? (irq[1] & 3): 3;
379 			np->intrs[intrcount].sense = map_isa_senses[sense];
380 		} else {
381 			virq = virt_irq_create_mapping(irq[0]);
382 #ifdef CONFIG_PPC64
383 			if (virq == NO_IRQ) {
384 				printk(KERN_CRIT "Could not allocate interrupt"
385 				       " number for %s\n", np->full_name);
386 				continue;
387 			}
388 #endif
389 			np->intrs[intrcount].line = irq_offset_up(virq);
390 			sense = (n > 1)? (irq[1] & 3): 1;
391 			np->intrs[intrcount].sense = map_mpic_senses[sense];
392 		}
393 
394 #ifdef CONFIG_PPC64
395 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
396 		if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
397 			char *name = get_property(ic->parent, "name", NULL);
398 			if (name && !strcmp(name, "u3"))
399 				np->intrs[intrcount].line += 128;
400 			else if (!(name && !strcmp(name, "mac-io")))
401 				/* ignore other cascaded controllers, such as
402 				   the k2-sata-root */
403 				break;
404 		}
405 #endif
406 		if (n > 2) {
407 			printk("hmmm, got %d intr cells for %s:", n,
408 			       np->full_name);
409 			for (j = 0; j < n; ++j)
410 				printk(" %d", irq[j]);
411 			printk("\n");
412 		}
413 		++intrcount;
414 	}
415 	np->n_intrs = intrcount;
416 
417 	return 0;
418 }
419 
420 static int __devinit interpret_pci_props(struct device_node *np,
421 					 unsigned long *mem_start,
422 					 int naddrc, int nsizec,
423 					 int measure_only)
424 {
425 	struct address_range *adr;
426 	struct pci_reg_property *pci_addrs;
427 	int i, l, n_addrs;
428 
429 	pci_addrs = (struct pci_reg_property *)
430 		get_property(np, "assigned-addresses", &l);
431 	if (!pci_addrs)
432 		return 0;
433 
434 	n_addrs = l / sizeof(*pci_addrs);
435 
436 	adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
437 	if (!adr)
438 		return -ENOMEM;
439 
440  	if (measure_only)
441  		return 0;
442 
443  	np->addrs = adr;
444  	np->n_addrs = n_addrs;
445 
446  	for (i = 0; i < n_addrs; i++) {
447  		adr[i].space = pci_addrs[i].addr.a_hi;
448  		adr[i].address = pci_addrs[i].addr.a_lo |
449 			((u64)pci_addrs[i].addr.a_mid << 32);
450  		adr[i].size = pci_addrs[i].size_lo;
451 	}
452 
453 	return 0;
454 }
455 
456 static int __init interpret_dbdma_props(struct device_node *np,
457 					unsigned long *mem_start,
458 					int naddrc, int nsizec,
459 					int measure_only)
460 {
461 	struct reg_property32 *rp;
462 	struct address_range *adr;
463 	unsigned long base_address;
464 	int i, l;
465 	struct device_node *db;
466 
467 	base_address = 0;
468 	if (!measure_only) {
469 		for (db = np->parent; db != NULL; db = db->parent) {
470 			if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
471 				base_address = db->addrs[0].address;
472 				break;
473 			}
474 		}
475 	}
476 
477 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
478 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
479 		i = 0;
480 		adr = (struct address_range *) (*mem_start);
481 		while ((l -= sizeof(struct reg_property32)) >= 0) {
482 			if (!measure_only) {
483 				adr[i].space = 2;
484 				adr[i].address = rp[i].address + base_address;
485 				adr[i].size = rp[i].size;
486 			}
487 			++i;
488 		}
489 		np->addrs = adr;
490 		np->n_addrs = i;
491 		(*mem_start) += i * sizeof(struct address_range);
492 	}
493 
494 	return 0;
495 }
496 
497 static int __init interpret_macio_props(struct device_node *np,
498 					unsigned long *mem_start,
499 					int naddrc, int nsizec,
500 					int measure_only)
501 {
502 	struct reg_property32 *rp;
503 	struct address_range *adr;
504 	unsigned long base_address;
505 	int i, l;
506 	struct device_node *db;
507 
508 	base_address = 0;
509 	if (!measure_only) {
510 		for (db = np->parent; db != NULL; db = db->parent) {
511 			if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
512 				base_address = db->addrs[0].address;
513 				break;
514 			}
515 		}
516 	}
517 
518 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
519 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
520 		i = 0;
521 		adr = (struct address_range *) (*mem_start);
522 		while ((l -= sizeof(struct reg_property32)) >= 0) {
523 			if (!measure_only) {
524 				adr[i].space = 2;
525 				adr[i].address = rp[i].address + base_address;
526 				adr[i].size = rp[i].size;
527 			}
528 			++i;
529 		}
530 		np->addrs = adr;
531 		np->n_addrs = i;
532 		(*mem_start) += i * sizeof(struct address_range);
533 	}
534 
535 	return 0;
536 }
537 
538 static int __init interpret_isa_props(struct device_node *np,
539 				      unsigned long *mem_start,
540 				      int naddrc, int nsizec,
541 				      int measure_only)
542 {
543 	struct isa_reg_property *rp;
544 	struct address_range *adr;
545 	int i, l;
546 
547 	rp = (struct isa_reg_property *) get_property(np, "reg", &l);
548 	if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
549 		i = 0;
550 		adr = (struct address_range *) (*mem_start);
551 		while ((l -= sizeof(struct isa_reg_property)) >= 0) {
552 			if (!measure_only) {
553 				adr[i].space = rp[i].space;
554 				adr[i].address = rp[i].address;
555 				adr[i].size = rp[i].size;
556 			}
557 			++i;
558 		}
559 		np->addrs = adr;
560 		np->n_addrs = i;
561 		(*mem_start) += i * sizeof(struct address_range);
562 	}
563 
564 	return 0;
565 }
566 
567 static int __init interpret_root_props(struct device_node *np,
568 				       unsigned long *mem_start,
569 				       int naddrc, int nsizec,
570 				       int measure_only)
571 {
572 	struct address_range *adr;
573 	int i, l;
574 	unsigned int *rp;
575 	int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
576 
577 	rp = (unsigned int *) get_property(np, "reg", &l);
578 	if (rp != 0 && l >= rpsize) {
579 		i = 0;
580 		adr = (struct address_range *) (*mem_start);
581 		while ((l -= rpsize) >= 0) {
582 			if (!measure_only) {
583 				adr[i].space = 0;
584 				adr[i].address = rp[naddrc - 1];
585 				adr[i].size = rp[naddrc + nsizec - 1];
586 			}
587 			++i;
588 			rp += naddrc + nsizec;
589 		}
590 		np->addrs = adr;
591 		np->n_addrs = i;
592 		(*mem_start) += i * sizeof(struct address_range);
593 	}
594 
595 	return 0;
596 }
597 
598 static int __devinit finish_node(struct device_node *np,
599 				 unsigned long *mem_start,
600 				 interpret_func *ifunc,
601 				 int naddrc, int nsizec,
602 				 int measure_only)
603 {
604 	struct device_node *child;
605 	int *ip, rc = 0;
606 
607 	/* get the device addresses and interrupts */
608 	if (ifunc != NULL)
609 		rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
610 	if (rc)
611 		goto out;
612 
613 	rc = finish_node_interrupts(np, mem_start, measure_only);
614 	if (rc)
615 		goto out;
616 
617 	/* Look for #address-cells and #size-cells properties. */
618 	ip = (int *) get_property(np, "#address-cells", NULL);
619 	if (ip != NULL)
620 		naddrc = *ip;
621 	ip = (int *) get_property(np, "#size-cells", NULL);
622 	if (ip != NULL)
623 		nsizec = *ip;
624 
625 	if (!strcmp(np->name, "device-tree") || np->parent == NULL)
626 		ifunc = interpret_root_props;
627 	else if (np->type == 0)
628 		ifunc = NULL;
629 	else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
630 		ifunc = interpret_pci_props;
631 	else if (!strcmp(np->type, "dbdma"))
632 		ifunc = interpret_dbdma_props;
633 	else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
634 		ifunc = interpret_macio_props;
635 	else if (!strcmp(np->type, "isa"))
636 		ifunc = interpret_isa_props;
637 	else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
638 		ifunc = interpret_root_props;
639 	else if (!((ifunc == interpret_dbdma_props
640 		    || ifunc == interpret_macio_props)
641 		   && (!strcmp(np->type, "escc")
642 		       || !strcmp(np->type, "media-bay"))))
643 		ifunc = NULL;
644 
645 	for (child = np->child; child != NULL; child = child->sibling) {
646 		rc = finish_node(child, mem_start, ifunc,
647 				 naddrc, nsizec, measure_only);
648 		if (rc)
649 			goto out;
650 	}
651 out:
652 	return rc;
653 }
654 
655 static void __init scan_interrupt_controllers(void)
656 {
657 	struct device_node *np;
658 	int n = 0;
659 	char *name, *ic;
660 	int iclen;
661 
662 	for (np = allnodes; np != NULL; np = np->allnext) {
663 		ic = get_property(np, "interrupt-controller", &iclen);
664 		name = get_property(np, "name", NULL);
665 		/* checking iclen makes sure we don't get a false
666 		   match on /chosen.interrupt_controller */
667 		if ((name != NULL
668 		     && strcmp(name, "interrupt-controller") == 0)
669 		    || (ic != NULL && iclen == 0
670 			&& strcmp(name, "AppleKiwi"))) {
671 			if (n == 0)
672 				dflt_interrupt_controller = np;
673 			++n;
674 		}
675 	}
676 	num_interrupt_controllers = n;
677 }
678 
679 /**
680  * finish_device_tree is called once things are running normally
681  * (i.e. with text and data mapped to the address they were linked at).
682  * It traverses the device tree and fills in some of the additional,
683  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
684  * mapping is also initialized at this point.
685  */
686 void __init finish_device_tree(void)
687 {
688 	unsigned long start, end, size = 0;
689 
690 	DBG(" -> finish_device_tree\n");
691 
692 #ifdef CONFIG_PPC64
693 	/* Initialize virtual IRQ map */
694 	virt_irq_init();
695 #endif
696 	scan_interrupt_controllers();
697 
698 	/*
699 	 * Finish device-tree (pre-parsing some properties etc...)
700 	 * We do this in 2 passes. One with "measure_only" set, which
701 	 * will only measure the amount of memory needed, then we can
702 	 * allocate that memory, and call finish_node again. However,
703 	 * we must be careful as most routines will fail nowadays when
704 	 * prom_alloc() returns 0, so we must make sure our first pass
705 	 * doesn't start at 0. We pre-initialize size to 16 for that
706 	 * reason and then remove those additional 16 bytes
707 	 */
708 	size = 16;
709 	finish_node(allnodes, &size, NULL, 0, 0, 1);
710 	size -= 16;
711 	end = start = (unsigned long) __va(lmb_alloc(size, 128));
712 	finish_node(allnodes, &end, NULL, 0, 0, 0);
713 	BUG_ON(end != start + size);
714 
715 	DBG(" <- finish_device_tree\n");
716 }
717 
718 static inline char *find_flat_dt_string(u32 offset)
719 {
720 	return ((char *)initial_boot_params) +
721 		initial_boot_params->off_dt_strings + offset;
722 }
723 
724 /**
725  * This function is used to scan the flattened device-tree, it is
726  * used to extract the memory informations at boot before we can
727  * unflatten the tree
728  */
729 static int __init scan_flat_dt(int (*it)(unsigned long node,
730 					 const char *uname, int depth,
731 					 void *data),
732 			       void *data)
733 {
734 	unsigned long p = ((unsigned long)initial_boot_params) +
735 		initial_boot_params->off_dt_struct;
736 	int rc = 0;
737 	int depth = -1;
738 
739 	do {
740 		u32 tag = *((u32 *)p);
741 		char *pathp;
742 
743 		p += 4;
744 		if (tag == OF_DT_END_NODE) {
745 			depth --;
746 			continue;
747 		}
748 		if (tag == OF_DT_NOP)
749 			continue;
750 		if (tag == OF_DT_END)
751 			break;
752 		if (tag == OF_DT_PROP) {
753 			u32 sz = *((u32 *)p);
754 			p += 8;
755 			if (initial_boot_params->version < 0x10)
756 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
757 			p += sz;
758 			p = _ALIGN(p, 4);
759 			continue;
760 		}
761 		if (tag != OF_DT_BEGIN_NODE) {
762 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
763 			       " device tree !\n", tag);
764 			return -EINVAL;
765 		}
766 		depth++;
767 		pathp = (char *)p;
768 		p = _ALIGN(p + strlen(pathp) + 1, 4);
769 		if ((*pathp) == '/') {
770 			char *lp, *np;
771 			for (lp = NULL, np = pathp; *np; np++)
772 				if ((*np) == '/')
773 					lp = np+1;
774 			if (lp != NULL)
775 				pathp = lp;
776 		}
777 		rc = it(p, pathp, depth, data);
778 		if (rc != 0)
779 			break;
780 	} while(1);
781 
782 	return rc;
783 }
784 
785 /**
786  * This  function can be used within scan_flattened_dt callback to get
787  * access to properties
788  */
789 static void* __init get_flat_dt_prop(unsigned long node, const char *name,
790 				     unsigned long *size)
791 {
792 	unsigned long p = node;
793 
794 	do {
795 		u32 tag = *((u32 *)p);
796 		u32 sz, noff;
797 		const char *nstr;
798 
799 		p += 4;
800 		if (tag == OF_DT_NOP)
801 			continue;
802 		if (tag != OF_DT_PROP)
803 			return NULL;
804 
805 		sz = *((u32 *)p);
806 		noff = *((u32 *)(p + 4));
807 		p += 8;
808 		if (initial_boot_params->version < 0x10)
809 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
810 
811 		nstr = find_flat_dt_string(noff);
812 		if (nstr == NULL) {
813 			printk(KERN_WARNING "Can't find property index"
814 			       " name !\n");
815 			return NULL;
816 		}
817 		if (strcmp(name, nstr) == 0) {
818 			if (size)
819 				*size = sz;
820 			return (void *)p;
821 		}
822 		p += sz;
823 		p = _ALIGN(p, 4);
824 	} while(1);
825 }
826 
827 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
828 				       unsigned long align)
829 {
830 	void *res;
831 
832 	*mem = _ALIGN(*mem, align);
833 	res = (void *)*mem;
834 	*mem += size;
835 
836 	return res;
837 }
838 
839 static unsigned long __init unflatten_dt_node(unsigned long mem,
840 					      unsigned long *p,
841 					      struct device_node *dad,
842 					      struct device_node ***allnextpp,
843 					      unsigned long fpsize)
844 {
845 	struct device_node *np;
846 	struct property *pp, **prev_pp = NULL;
847 	char *pathp;
848 	u32 tag;
849 	unsigned int l, allocl;
850 	int has_name = 0;
851 	int new_format = 0;
852 
853 	tag = *((u32 *)(*p));
854 	if (tag != OF_DT_BEGIN_NODE) {
855 		printk("Weird tag at start of node: %x\n", tag);
856 		return mem;
857 	}
858 	*p += 4;
859 	pathp = (char *)*p;
860 	l = allocl = strlen(pathp) + 1;
861 	*p = _ALIGN(*p + l, 4);
862 
863 	/* version 0x10 has a more compact unit name here instead of the full
864 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
865 	 * it later. We detect this because the first character of the name is
866 	 * not '/'.
867 	 */
868 	if ((*pathp) != '/') {
869 		new_format = 1;
870 		if (fpsize == 0) {
871 			/* root node: special case. fpsize accounts for path
872 			 * plus terminating zero. root node only has '/', so
873 			 * fpsize should be 2, but we want to avoid the first
874 			 * level nodes to have two '/' so we use fpsize 1 here
875 			 */
876 			fpsize = 1;
877 			allocl = 2;
878 		} else {
879 			/* account for '/' and path size minus terminal 0
880 			 * already in 'l'
881 			 */
882 			fpsize += l;
883 			allocl = fpsize;
884 		}
885 	}
886 
887 
888 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
889 				__alignof__(struct device_node));
890 	if (allnextpp) {
891 		memset(np, 0, sizeof(*np));
892 		np->full_name = ((char*)np) + sizeof(struct device_node);
893 		if (new_format) {
894 			char *p = np->full_name;
895 			/* rebuild full path for new format */
896 			if (dad && dad->parent) {
897 				strcpy(p, dad->full_name);
898 #ifdef DEBUG
899 				if ((strlen(p) + l + 1) != allocl) {
900 					DBG("%s: p: %d, l: %d, a: %d\n",
901 					    pathp, strlen(p), l, allocl);
902 				}
903 #endif
904 				p += strlen(p);
905 			}
906 			*(p++) = '/';
907 			memcpy(p, pathp, l);
908 		} else
909 			memcpy(np->full_name, pathp, l);
910 		prev_pp = &np->properties;
911 		**allnextpp = np;
912 		*allnextpp = &np->allnext;
913 		if (dad != NULL) {
914 			np->parent = dad;
915 			/* we temporarily use the next field as `last_child'*/
916 			if (dad->next == 0)
917 				dad->child = np;
918 			else
919 				dad->next->sibling = np;
920 			dad->next = np;
921 		}
922 		kref_init(&np->kref);
923 	}
924 	while(1) {
925 		u32 sz, noff;
926 		char *pname;
927 
928 		tag = *((u32 *)(*p));
929 		if (tag == OF_DT_NOP) {
930 			*p += 4;
931 			continue;
932 		}
933 		if (tag != OF_DT_PROP)
934 			break;
935 		*p += 4;
936 		sz = *((u32 *)(*p));
937 		noff = *((u32 *)((*p) + 4));
938 		*p += 8;
939 		if (initial_boot_params->version < 0x10)
940 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
941 
942 		pname = find_flat_dt_string(noff);
943 		if (pname == NULL) {
944 			printk("Can't find property name in list !\n");
945 			break;
946 		}
947 		if (strcmp(pname, "name") == 0)
948 			has_name = 1;
949 		l = strlen(pname) + 1;
950 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
951 					__alignof__(struct property));
952 		if (allnextpp) {
953 			if (strcmp(pname, "linux,phandle") == 0) {
954 				np->node = *((u32 *)*p);
955 				if (np->linux_phandle == 0)
956 					np->linux_phandle = np->node;
957 			}
958 			if (strcmp(pname, "ibm,phandle") == 0)
959 				np->linux_phandle = *((u32 *)*p);
960 			pp->name = pname;
961 			pp->length = sz;
962 			pp->value = (void *)*p;
963 			*prev_pp = pp;
964 			prev_pp = &pp->next;
965 		}
966 		*p = _ALIGN((*p) + sz, 4);
967 	}
968 	/* with version 0x10 we may not have the name property, recreate
969 	 * it here from the unit name if absent
970 	 */
971 	if (!has_name) {
972 		char *p = pathp, *ps = pathp, *pa = NULL;
973 		int sz;
974 
975 		while (*p) {
976 			if ((*p) == '@')
977 				pa = p;
978 			if ((*p) == '/')
979 				ps = p + 1;
980 			p++;
981 		}
982 		if (pa < ps)
983 			pa = p;
984 		sz = (pa - ps) + 1;
985 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
986 					__alignof__(struct property));
987 		if (allnextpp) {
988 			pp->name = "name";
989 			pp->length = sz;
990 			pp->value = (unsigned char *)(pp + 1);
991 			*prev_pp = pp;
992 			prev_pp = &pp->next;
993 			memcpy(pp->value, ps, sz - 1);
994 			((char *)pp->value)[sz - 1] = 0;
995 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
996 		}
997 	}
998 	if (allnextpp) {
999 		*prev_pp = NULL;
1000 		np->name = get_property(np, "name", NULL);
1001 		np->type = get_property(np, "device_type", NULL);
1002 
1003 		if (!np->name)
1004 			np->name = "<NULL>";
1005 		if (!np->type)
1006 			np->type = "<NULL>";
1007 	}
1008 	while (tag == OF_DT_BEGIN_NODE) {
1009 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1010 		tag = *((u32 *)(*p));
1011 	}
1012 	if (tag != OF_DT_END_NODE) {
1013 		printk("Weird tag at end of node: %x\n", tag);
1014 		return mem;
1015 	}
1016 	*p += 4;
1017 	return mem;
1018 }
1019 
1020 
1021 /**
1022  * unflattens the device-tree passed by the firmware, creating the
1023  * tree of struct device_node. It also fills the "name" and "type"
1024  * pointers of the nodes so the normal device-tree walking functions
1025  * can be used (this used to be done by finish_device_tree)
1026  */
1027 void __init unflatten_device_tree(void)
1028 {
1029 	unsigned long start, mem, size;
1030 	struct device_node **allnextp = &allnodes;
1031 	char *p = NULL;
1032 	int l = 0;
1033 
1034 	DBG(" -> unflatten_device_tree()\n");
1035 
1036 	/* First pass, scan for size */
1037 	start = ((unsigned long)initial_boot_params) +
1038 		initial_boot_params->off_dt_struct;
1039 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1040 	size = (size | 3) + 1;
1041 
1042 	DBG("  size is %lx, allocating...\n", size);
1043 
1044 	/* Allocate memory for the expanded device tree */
1045 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1046 	if (!mem) {
1047 		DBG("Couldn't allocate memory with lmb_alloc()!\n");
1048 		panic("Couldn't allocate memory with lmb_alloc()!\n");
1049 	}
1050 	mem = (unsigned long) __va(mem);
1051 
1052 	((u32 *)mem)[size / 4] = 0xdeadbeef;
1053 
1054 	DBG("  unflattening %lx...\n", mem);
1055 
1056 	/* Second pass, do actual unflattening */
1057 	start = ((unsigned long)initial_boot_params) +
1058 		initial_boot_params->off_dt_struct;
1059 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1060 	if (*((u32 *)start) != OF_DT_END)
1061 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1062 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1063 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1064 		       ((u32 *)mem)[size / 4] );
1065 	*allnextp = NULL;
1066 
1067 	/* Get pointer to OF "/chosen" node for use everywhere */
1068 	of_chosen = of_find_node_by_path("/chosen");
1069 	if (of_chosen == NULL)
1070 		of_chosen = of_find_node_by_path("/chosen@0");
1071 
1072 	/* Retreive command line */
1073 	if (of_chosen != NULL) {
1074 		p = (char *)get_property(of_chosen, "bootargs", &l);
1075 		if (p != NULL && l > 0)
1076 			strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1077 	}
1078 #ifdef CONFIG_CMDLINE
1079 	if (l == 0 || (l == 1 && (*p) == 0))
1080 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1081 #endif /* CONFIG_CMDLINE */
1082 
1083 	DBG("Command line is: %s\n", cmd_line);
1084 
1085 	DBG(" <- unflatten_device_tree()\n");
1086 }
1087 
1088 
1089 static int __init early_init_dt_scan_cpus(unsigned long node,
1090 					  const char *uname, int depth, void *data)
1091 {
1092 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1093 	u32 *prop;
1094 	unsigned long size = 0;
1095 
1096 	/* We are scanning "cpu" nodes only */
1097 	if (type == NULL || strcmp(type, "cpu") != 0)
1098 		return 0;
1099 
1100 #ifdef CONFIG_PPC_PSERIES
1101 	/* On LPAR, look for the first ibm,pft-size property for the  hash table size
1102 	 */
1103 	if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1104 		u32 *pft_size;
1105 		pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1106 		if (pft_size != NULL) {
1107 			/* pft_size[0] is the NUMA CEC cookie */
1108 			ppc64_pft_size = pft_size[1];
1109 		}
1110 	}
1111 #endif
1112 
1113 	boot_cpuid = 0;
1114 	boot_cpuid_phys = 0;
1115 	if (initial_boot_params && initial_boot_params->version >= 2) {
1116 		/* version 2 of the kexec param format adds the phys cpuid
1117 		 * of booted proc.
1118 		 */
1119 		boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1120 	} else {
1121 		/* Check if it's the boot-cpu, set it's hw index now */
1122 		if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1123 			prop = get_flat_dt_prop(node, "reg", NULL);
1124 			if (prop != NULL)
1125 				boot_cpuid_phys = *prop;
1126 		}
1127 	}
1128 	set_hard_smp_processor_id(0, boot_cpuid_phys);
1129 
1130 #ifdef CONFIG_ALTIVEC
1131 	/* Check if we have a VMX and eventually update CPU features */
1132 	prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1133 	if (prop && (*prop) > 0) {
1134 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1135 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1136 	}
1137 
1138 	/* Same goes for Apple's "altivec" property */
1139 	prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1140 	if (prop) {
1141 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1142 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1143 	}
1144 #endif /* CONFIG_ALTIVEC */
1145 
1146 #ifdef CONFIG_PPC_PSERIES
1147 	/*
1148 	 * Check for an SMT capable CPU and set the CPU feature. We do
1149 	 * this by looking at the size of the ibm,ppc-interrupt-server#s
1150 	 * property
1151 	 */
1152 	prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1153 				       &size);
1154 	cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1155 	if (prop && ((size / sizeof(u32)) > 1))
1156 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1157 #endif
1158 
1159 	return 0;
1160 }
1161 
1162 static int __init early_init_dt_scan_chosen(unsigned long node,
1163 					    const char *uname, int depth, void *data)
1164 {
1165 	u32 *prop;
1166 	unsigned long *lprop;
1167 
1168 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1169 
1170 	if (depth != 1 ||
1171 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1172 		return 0;
1173 
1174 	/* get platform type */
1175 	prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1176 	if (prop == NULL)
1177 		return 0;
1178 #ifdef CONFIG_PPC64
1179 	systemcfg->platform = *prop;
1180 #else
1181 #ifdef CONFIG_PPC_MULTIPLATFORM
1182 	_machine = *prop;
1183 #endif
1184 #endif
1185 
1186 #ifdef CONFIG_PPC64
1187 	/* check if iommu is forced on or off */
1188 	if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1189 		iommu_is_off = 1;
1190 	if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1191 		iommu_force_on = 1;
1192 #endif
1193 
1194  	lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1195  	if (lprop)
1196  		memory_limit = *lprop;
1197 
1198 #ifdef CONFIG_PPC64
1199  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1200  	if (lprop)
1201  		tce_alloc_start = *lprop;
1202  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1203  	if (lprop)
1204  		tce_alloc_end = *lprop;
1205 #endif
1206 
1207 #ifdef CONFIG_PPC_RTAS
1208 	/* To help early debugging via the front panel, we retreive a minimal
1209 	 * set of RTAS infos now if available
1210 	 */
1211 	{
1212 		u64 *basep, *entryp;
1213 
1214 		basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1215 		entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1216 		prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1217 		if (basep && entryp && prop) {
1218 			rtas.base = *basep;
1219 			rtas.entry = *entryp;
1220 			rtas.size = *prop;
1221 		}
1222 	}
1223 #endif /* CONFIG_PPC_RTAS */
1224 
1225 	/* break now */
1226 	return 1;
1227 }
1228 
1229 static int __init early_init_dt_scan_root(unsigned long node,
1230 					  const char *uname, int depth, void *data)
1231 {
1232 	u32 *prop;
1233 
1234 	if (depth != 0)
1235 		return 0;
1236 
1237 	prop = get_flat_dt_prop(node, "#size-cells", NULL);
1238 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1239 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1240 
1241 	prop = get_flat_dt_prop(node, "#address-cells", NULL);
1242 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1243 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1244 
1245 	/* break now */
1246 	return 1;
1247 }
1248 
1249 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1250 {
1251 	cell_t *p = *cellp;
1252 	unsigned long r;
1253 
1254 	/* Ignore more than 2 cells */
1255 	while (s > sizeof(unsigned long) / 4) {
1256 		p++;
1257 		s--;
1258 	}
1259 	r = *p++;
1260 #ifdef CONFIG_PPC64
1261 	if (s > 1) {
1262 		r <<= 32;
1263 		r |= *(p++);
1264 		s--;
1265 	}
1266 #endif
1267 
1268 	*cellp = p;
1269 	return r;
1270 }
1271 
1272 
1273 static int __init early_init_dt_scan_memory(unsigned long node,
1274 					    const char *uname, int depth, void *data)
1275 {
1276 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1277 	cell_t *reg, *endp;
1278 	unsigned long l;
1279 
1280 	/* We are scanning "memory" nodes only */
1281 	if (type == NULL || strcmp(type, "memory") != 0)
1282 		return 0;
1283 
1284 	reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1285 	if (reg == NULL)
1286 		return 0;
1287 
1288 	endp = reg + (l / sizeof(cell_t));
1289 
1290 	DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1291 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1292 
1293 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1294 		unsigned long base, size;
1295 
1296 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1297 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1298 
1299 		if (size == 0)
1300 			continue;
1301 		DBG(" - %lx ,  %lx\n", base, size);
1302 #ifdef CONFIG_PPC64
1303 		if (iommu_is_off) {
1304 			if (base >= 0x80000000ul)
1305 				continue;
1306 			if ((base + size) > 0x80000000ul)
1307 				size = 0x80000000ul - base;
1308 		}
1309 #endif
1310 		lmb_add(base, size);
1311 	}
1312 	return 0;
1313 }
1314 
1315 static void __init early_reserve_mem(void)
1316 {
1317 	unsigned long base, size;
1318 	unsigned long *reserve_map;
1319 
1320 	reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1321 					initial_boot_params->off_mem_rsvmap);
1322 	while (1) {
1323 		base = *(reserve_map++);
1324 		size = *(reserve_map++);
1325 		if (size == 0)
1326 			break;
1327 		DBG("reserving: %lx -> %lx\n", base, size);
1328 		lmb_reserve(base, size);
1329 	}
1330 
1331 #if 0
1332 	DBG("memory reserved, lmbs :\n");
1333       	lmb_dump_all();
1334 #endif
1335 }
1336 
1337 void __init early_init_devtree(void *params)
1338 {
1339 	DBG(" -> early_init_devtree()\n");
1340 
1341 	/* Setup flat device-tree pointer */
1342 	initial_boot_params = params;
1343 
1344 	/* Retrieve various informations from the /chosen node of the
1345 	 * device-tree, including the platform type, initrd location and
1346 	 * size, TCE reserve, and more ...
1347 	 */
1348 	scan_flat_dt(early_init_dt_scan_chosen, NULL);
1349 
1350 	/* Scan memory nodes and rebuild LMBs */
1351 	lmb_init();
1352 	scan_flat_dt(early_init_dt_scan_root, NULL);
1353 	scan_flat_dt(early_init_dt_scan_memory, NULL);
1354 	lmb_enforce_memory_limit(memory_limit);
1355 	lmb_analyze();
1356 #ifdef CONFIG_PPC64
1357 	systemcfg->physicalMemorySize = lmb_phys_mem_size();
1358 #endif
1359 	lmb_reserve(0, __pa(klimit));
1360 
1361 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1362 
1363 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1364 	early_reserve_mem();
1365 
1366 	DBG("Scanning CPUs ...\n");
1367 
1368 	/* Retreive hash table size from flattened tree plus other
1369 	 * CPU related informations (altivec support, boot CPU ID, ...)
1370 	 */
1371 	scan_flat_dt(early_init_dt_scan_cpus, NULL);
1372 
1373 	DBG(" <- early_init_devtree()\n");
1374 }
1375 
1376 #undef printk
1377 
1378 int
1379 prom_n_addr_cells(struct device_node* np)
1380 {
1381 	int* ip;
1382 	do {
1383 		if (np->parent)
1384 			np = np->parent;
1385 		ip = (int *) get_property(np, "#address-cells", NULL);
1386 		if (ip != NULL)
1387 			return *ip;
1388 	} while (np->parent);
1389 	/* No #address-cells property for the root node, default to 1 */
1390 	return 1;
1391 }
1392 
1393 int
1394 prom_n_size_cells(struct device_node* np)
1395 {
1396 	int* ip;
1397 	do {
1398 		if (np->parent)
1399 			np = np->parent;
1400 		ip = (int *) get_property(np, "#size-cells", NULL);
1401 		if (ip != NULL)
1402 			return *ip;
1403 	} while (np->parent);
1404 	/* No #size-cells property for the root node, default to 1 */
1405 	return 1;
1406 }
1407 
1408 /**
1409  * Work out the sense (active-low level / active-high edge)
1410  * of each interrupt from the device tree.
1411  */
1412 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1413 {
1414 	struct device_node *np;
1415 	int i, j;
1416 
1417 	/* default to level-triggered */
1418 	memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1419 
1420 	for (np = allnodes; np != 0; np = np->allnext) {
1421 		for (j = 0; j < np->n_intrs; j++) {
1422 			i = np->intrs[j].line;
1423 			if (i >= off && i < max)
1424 				senses[i-off] = np->intrs[j].sense;
1425 		}
1426 	}
1427 }
1428 
1429 /**
1430  * Construct and return a list of the device_nodes with a given name.
1431  */
1432 struct device_node *find_devices(const char *name)
1433 {
1434 	struct device_node *head, **prevp, *np;
1435 
1436 	prevp = &head;
1437 	for (np = allnodes; np != 0; np = np->allnext) {
1438 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1439 			*prevp = np;
1440 			prevp = &np->next;
1441 		}
1442 	}
1443 	*prevp = NULL;
1444 	return head;
1445 }
1446 EXPORT_SYMBOL(find_devices);
1447 
1448 /**
1449  * Construct and return a list of the device_nodes with a given type.
1450  */
1451 struct device_node *find_type_devices(const char *type)
1452 {
1453 	struct device_node *head, **prevp, *np;
1454 
1455 	prevp = &head;
1456 	for (np = allnodes; np != 0; np = np->allnext) {
1457 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1458 			*prevp = np;
1459 			prevp = &np->next;
1460 		}
1461 	}
1462 	*prevp = NULL;
1463 	return head;
1464 }
1465 EXPORT_SYMBOL(find_type_devices);
1466 
1467 /**
1468  * Returns all nodes linked together
1469  */
1470 struct device_node *find_all_nodes(void)
1471 {
1472 	struct device_node *head, **prevp, *np;
1473 
1474 	prevp = &head;
1475 	for (np = allnodes; np != 0; np = np->allnext) {
1476 		*prevp = np;
1477 		prevp = &np->next;
1478 	}
1479 	*prevp = NULL;
1480 	return head;
1481 }
1482 EXPORT_SYMBOL(find_all_nodes);
1483 
1484 /** Checks if the given "compat" string matches one of the strings in
1485  * the device's "compatible" property
1486  */
1487 int device_is_compatible(struct device_node *device, const char *compat)
1488 {
1489 	const char* cp;
1490 	int cplen, l;
1491 
1492 	cp = (char *) get_property(device, "compatible", &cplen);
1493 	if (cp == NULL)
1494 		return 0;
1495 	while (cplen > 0) {
1496 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1497 			return 1;
1498 		l = strlen(cp) + 1;
1499 		cp += l;
1500 		cplen -= l;
1501 	}
1502 
1503 	return 0;
1504 }
1505 EXPORT_SYMBOL(device_is_compatible);
1506 
1507 
1508 /**
1509  * Indicates whether the root node has a given value in its
1510  * compatible property.
1511  */
1512 int machine_is_compatible(const char *compat)
1513 {
1514 	struct device_node *root;
1515 	int rc = 0;
1516 
1517 	root = of_find_node_by_path("/");
1518 	if (root) {
1519 		rc = device_is_compatible(root, compat);
1520 		of_node_put(root);
1521 	}
1522 	return rc;
1523 }
1524 EXPORT_SYMBOL(machine_is_compatible);
1525 
1526 /**
1527  * Construct and return a list of the device_nodes with a given type
1528  * and compatible property.
1529  */
1530 struct device_node *find_compatible_devices(const char *type,
1531 					    const char *compat)
1532 {
1533 	struct device_node *head, **prevp, *np;
1534 
1535 	prevp = &head;
1536 	for (np = allnodes; np != 0; np = np->allnext) {
1537 		if (type != NULL
1538 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1539 			continue;
1540 		if (device_is_compatible(np, compat)) {
1541 			*prevp = np;
1542 			prevp = &np->next;
1543 		}
1544 	}
1545 	*prevp = NULL;
1546 	return head;
1547 }
1548 EXPORT_SYMBOL(find_compatible_devices);
1549 
1550 /**
1551  * Find the device_node with a given full_name.
1552  */
1553 struct device_node *find_path_device(const char *path)
1554 {
1555 	struct device_node *np;
1556 
1557 	for (np = allnodes; np != 0; np = np->allnext)
1558 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1559 			return np;
1560 	return NULL;
1561 }
1562 EXPORT_SYMBOL(find_path_device);
1563 
1564 /*******
1565  *
1566  * New implementation of the OF "find" APIs, return a refcounted
1567  * object, call of_node_put() when done.  The device tree and list
1568  * are protected by a rw_lock.
1569  *
1570  * Note that property management will need some locking as well,
1571  * this isn't dealt with yet.
1572  *
1573  *******/
1574 
1575 /**
1576  *	of_find_node_by_name - Find a node by its "name" property
1577  *	@from:	The node to start searching from or NULL, the node
1578  *		you pass will not be searched, only the next one
1579  *		will; typically, you pass what the previous call
1580  *		returned. of_node_put() will be called on it
1581  *	@name:	The name string to match against
1582  *
1583  *	Returns a node pointer with refcount incremented, use
1584  *	of_node_put() on it when done.
1585  */
1586 struct device_node *of_find_node_by_name(struct device_node *from,
1587 	const char *name)
1588 {
1589 	struct device_node *np;
1590 
1591 	read_lock(&devtree_lock);
1592 	np = from ? from->allnext : allnodes;
1593 	for (; np != 0; np = np->allnext)
1594 		if (np->name != 0 && strcasecmp(np->name, name) == 0
1595 		    && of_node_get(np))
1596 			break;
1597 	if (from)
1598 		of_node_put(from);
1599 	read_unlock(&devtree_lock);
1600 	return np;
1601 }
1602 EXPORT_SYMBOL(of_find_node_by_name);
1603 
1604 /**
1605  *	of_find_node_by_type - Find a node by its "device_type" property
1606  *	@from:	The node to start searching from or NULL, the node
1607  *		you pass will not be searched, only the next one
1608  *		will; typically, you pass what the previous call
1609  *		returned. of_node_put() will be called on it
1610  *	@name:	The type string to match against
1611  *
1612  *	Returns a node pointer with refcount incremented, use
1613  *	of_node_put() on it when done.
1614  */
1615 struct device_node *of_find_node_by_type(struct device_node *from,
1616 	const char *type)
1617 {
1618 	struct device_node *np;
1619 
1620 	read_lock(&devtree_lock);
1621 	np = from ? from->allnext : allnodes;
1622 	for (; np != 0; np = np->allnext)
1623 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1624 		    && of_node_get(np))
1625 			break;
1626 	if (from)
1627 		of_node_put(from);
1628 	read_unlock(&devtree_lock);
1629 	return np;
1630 }
1631 EXPORT_SYMBOL(of_find_node_by_type);
1632 
1633 /**
1634  *	of_find_compatible_node - Find a node based on type and one of the
1635  *                                tokens in its "compatible" property
1636  *	@from:		The node to start searching from or NULL, the node
1637  *			you pass will not be searched, only the next one
1638  *			will; typically, you pass what the previous call
1639  *			returned. of_node_put() will be called on it
1640  *	@type:		The type string to match "device_type" or NULL to ignore
1641  *	@compatible:	The string to match to one of the tokens in the device
1642  *			"compatible" list.
1643  *
1644  *	Returns a node pointer with refcount incremented, use
1645  *	of_node_put() on it when done.
1646  */
1647 struct device_node *of_find_compatible_node(struct device_node *from,
1648 	const char *type, const char *compatible)
1649 {
1650 	struct device_node *np;
1651 
1652 	read_lock(&devtree_lock);
1653 	np = from ? from->allnext : allnodes;
1654 	for (; np != 0; np = np->allnext) {
1655 		if (type != NULL
1656 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1657 			continue;
1658 		if (device_is_compatible(np, compatible) && of_node_get(np))
1659 			break;
1660 	}
1661 	if (from)
1662 		of_node_put(from);
1663 	read_unlock(&devtree_lock);
1664 	return np;
1665 }
1666 EXPORT_SYMBOL(of_find_compatible_node);
1667 
1668 /**
1669  *	of_find_node_by_path - Find a node matching a full OF path
1670  *	@path:	The full path to match
1671  *
1672  *	Returns a node pointer with refcount incremented, use
1673  *	of_node_put() on it when done.
1674  */
1675 struct device_node *of_find_node_by_path(const char *path)
1676 {
1677 	struct device_node *np = allnodes;
1678 
1679 	read_lock(&devtree_lock);
1680 	for (; np != 0; np = np->allnext) {
1681 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1682 		    && of_node_get(np))
1683 			break;
1684 	}
1685 	read_unlock(&devtree_lock);
1686 	return np;
1687 }
1688 EXPORT_SYMBOL(of_find_node_by_path);
1689 
1690 /**
1691  *	of_find_node_by_phandle - Find a node given a phandle
1692  *	@handle:	phandle of the node to find
1693  *
1694  *	Returns a node pointer with refcount incremented, use
1695  *	of_node_put() on it when done.
1696  */
1697 struct device_node *of_find_node_by_phandle(phandle handle)
1698 {
1699 	struct device_node *np;
1700 
1701 	read_lock(&devtree_lock);
1702 	for (np = allnodes; np != 0; np = np->allnext)
1703 		if (np->linux_phandle == handle)
1704 			break;
1705 	if (np)
1706 		of_node_get(np);
1707 	read_unlock(&devtree_lock);
1708 	return np;
1709 }
1710 EXPORT_SYMBOL(of_find_node_by_phandle);
1711 
1712 /**
1713  *	of_find_all_nodes - Get next node in global list
1714  *	@prev:	Previous node or NULL to start iteration
1715  *		of_node_put() will be called on it
1716  *
1717  *	Returns a node pointer with refcount incremented, use
1718  *	of_node_put() on it when done.
1719  */
1720 struct device_node *of_find_all_nodes(struct device_node *prev)
1721 {
1722 	struct device_node *np;
1723 
1724 	read_lock(&devtree_lock);
1725 	np = prev ? prev->allnext : allnodes;
1726 	for (; np != 0; np = np->allnext)
1727 		if (of_node_get(np))
1728 			break;
1729 	if (prev)
1730 		of_node_put(prev);
1731 	read_unlock(&devtree_lock);
1732 	return np;
1733 }
1734 EXPORT_SYMBOL(of_find_all_nodes);
1735 
1736 /**
1737  *	of_get_parent - Get a node's parent if any
1738  *	@node:	Node to get parent
1739  *
1740  *	Returns a node pointer with refcount incremented, use
1741  *	of_node_put() on it when done.
1742  */
1743 struct device_node *of_get_parent(const struct device_node *node)
1744 {
1745 	struct device_node *np;
1746 
1747 	if (!node)
1748 		return NULL;
1749 
1750 	read_lock(&devtree_lock);
1751 	np = of_node_get(node->parent);
1752 	read_unlock(&devtree_lock);
1753 	return np;
1754 }
1755 EXPORT_SYMBOL(of_get_parent);
1756 
1757 /**
1758  *	of_get_next_child - Iterate a node childs
1759  *	@node:	parent node
1760  *	@prev:	previous child of the parent node, or NULL to get first
1761  *
1762  *	Returns a node pointer with refcount incremented, use
1763  *	of_node_put() on it when done.
1764  */
1765 struct device_node *of_get_next_child(const struct device_node *node,
1766 	struct device_node *prev)
1767 {
1768 	struct device_node *next;
1769 
1770 	read_lock(&devtree_lock);
1771 	next = prev ? prev->sibling : node->child;
1772 	for (; next != 0; next = next->sibling)
1773 		if (of_node_get(next))
1774 			break;
1775 	if (prev)
1776 		of_node_put(prev);
1777 	read_unlock(&devtree_lock);
1778 	return next;
1779 }
1780 EXPORT_SYMBOL(of_get_next_child);
1781 
1782 /**
1783  *	of_node_get - Increment refcount of a node
1784  *	@node:	Node to inc refcount, NULL is supported to
1785  *		simplify writing of callers
1786  *
1787  *	Returns node.
1788  */
1789 struct device_node *of_node_get(struct device_node *node)
1790 {
1791 	if (node)
1792 		kref_get(&node->kref);
1793 	return node;
1794 }
1795 EXPORT_SYMBOL(of_node_get);
1796 
1797 static inline struct device_node * kref_to_device_node(struct kref *kref)
1798 {
1799 	return container_of(kref, struct device_node, kref);
1800 }
1801 
1802 /**
1803  *	of_node_release - release a dynamically allocated node
1804  *	@kref:  kref element of the node to be released
1805  *
1806  *	In of_node_put() this function is passed to kref_put()
1807  *	as the destructor.
1808  */
1809 static void of_node_release(struct kref *kref)
1810 {
1811 	struct device_node *node = kref_to_device_node(kref);
1812 	struct property *prop = node->properties;
1813 
1814 	if (!OF_IS_DYNAMIC(node))
1815 		return;
1816 	while (prop) {
1817 		struct property *next = prop->next;
1818 		kfree(prop->name);
1819 		kfree(prop->value);
1820 		kfree(prop);
1821 		prop = next;
1822 	}
1823 	kfree(node->intrs);
1824 	kfree(node->addrs);
1825 	kfree(node->full_name);
1826 	kfree(node->data);
1827 	kfree(node);
1828 }
1829 
1830 /**
1831  *	of_node_put - Decrement refcount of a node
1832  *	@node:	Node to dec refcount, NULL is supported to
1833  *		simplify writing of callers
1834  *
1835  */
1836 void of_node_put(struct device_node *node)
1837 {
1838 	if (node)
1839 		kref_put(&node->kref, of_node_release);
1840 }
1841 EXPORT_SYMBOL(of_node_put);
1842 
1843 /*
1844  * Plug a device node into the tree and global list.
1845  */
1846 void of_attach_node(struct device_node *np)
1847 {
1848 	write_lock(&devtree_lock);
1849 	np->sibling = np->parent->child;
1850 	np->allnext = allnodes;
1851 	np->parent->child = np;
1852 	allnodes = np;
1853 	write_unlock(&devtree_lock);
1854 }
1855 
1856 /*
1857  * "Unplug" a node from the device tree.  The caller must hold
1858  * a reference to the node.  The memory associated with the node
1859  * is not freed until its refcount goes to zero.
1860  */
1861 void of_detach_node(const struct device_node *np)
1862 {
1863 	struct device_node *parent;
1864 
1865 	write_lock(&devtree_lock);
1866 
1867 	parent = np->parent;
1868 
1869 	if (allnodes == np)
1870 		allnodes = np->allnext;
1871 	else {
1872 		struct device_node *prev;
1873 		for (prev = allnodes;
1874 		     prev->allnext != np;
1875 		     prev = prev->allnext)
1876 			;
1877 		prev->allnext = np->allnext;
1878 	}
1879 
1880 	if (parent->child == np)
1881 		parent->child = np->sibling;
1882 	else {
1883 		struct device_node *prevsib;
1884 		for (prevsib = np->parent->child;
1885 		     prevsib->sibling != np;
1886 		     prevsib = prevsib->sibling)
1887 			;
1888 		prevsib->sibling = np->sibling;
1889 	}
1890 
1891 	write_unlock(&devtree_lock);
1892 }
1893 
1894 #ifdef CONFIG_PPC_PSERIES
1895 /*
1896  * Fix up the uninitialized fields in a new device node:
1897  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1898  *
1899  * A lot of boot-time code is duplicated here, because functions such
1900  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1901  * slab allocator.
1902  *
1903  * This should probably be split up into smaller chunks.
1904  */
1905 
1906 static int of_finish_dynamic_node(struct device_node *node,
1907 				  unsigned long *unused1, int unused2,
1908 				  int unused3, int unused4)
1909 {
1910 	struct device_node *parent = of_get_parent(node);
1911 	int err = 0;
1912 	phandle *ibm_phandle;
1913 
1914 	node->name = get_property(node, "name", NULL);
1915 	node->type = get_property(node, "device_type", NULL);
1916 
1917 	if (!parent) {
1918 		err = -ENODEV;
1919 		goto out;
1920 	}
1921 
1922 	/* We don't support that function on PowerMac, at least
1923 	 * not yet
1924 	 */
1925 	if (systemcfg->platform == PLATFORM_POWERMAC)
1926 		return -ENODEV;
1927 
1928 	/* fix up new node's linux_phandle field */
1929 	if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1930 		node->linux_phandle = *ibm_phandle;
1931 
1932 out:
1933 	of_node_put(parent);
1934 	return err;
1935 }
1936 
1937 static int prom_reconfig_notifier(struct notifier_block *nb,
1938 				  unsigned long action, void *node)
1939 {
1940 	int err;
1941 
1942 	switch (action) {
1943 	case PSERIES_RECONFIG_ADD:
1944 		err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1945 		if (err < 0) {
1946 			printk(KERN_ERR "finish_node returned %d\n", err);
1947 			err = NOTIFY_BAD;
1948 		}
1949 		break;
1950 	default:
1951 		err = NOTIFY_DONE;
1952 		break;
1953 	}
1954 	return err;
1955 }
1956 
1957 static struct notifier_block prom_reconfig_nb = {
1958 	.notifier_call = prom_reconfig_notifier,
1959 	.priority = 10, /* This one needs to run first */
1960 };
1961 
1962 static int __init prom_reconfig_setup(void)
1963 {
1964 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1965 }
1966 __initcall(prom_reconfig_setup);
1967 #endif
1968 
1969 /*
1970  * Find a property with a given name for a given node
1971  * and return the value.
1972  */
1973 unsigned char *get_property(struct device_node *np, const char *name,
1974 			    int *lenp)
1975 {
1976 	struct property *pp;
1977 
1978 	for (pp = np->properties; pp != 0; pp = pp->next)
1979 		if (strcmp(pp->name, name) == 0) {
1980 			if (lenp != 0)
1981 				*lenp = pp->length;
1982 			return pp->value;
1983 		}
1984 	return NULL;
1985 }
1986 EXPORT_SYMBOL(get_property);
1987 
1988 /*
1989  * Add a property to a node
1990  */
1991 void prom_add_property(struct device_node* np, struct property* prop)
1992 {
1993 	struct property **next = &np->properties;
1994 
1995 	prop->next = NULL;
1996 	while (*next)
1997 		next = &(*next)->next;
1998 	*next = prop;
1999 }
2000 
2001 /* I quickly hacked that one, check against spec ! */
2002 static inline unsigned long
2003 bus_space_to_resource_flags(unsigned int bus_space)
2004 {
2005 	u8 space = (bus_space >> 24) & 0xf;
2006 	if (space == 0)
2007 		space = 0x02;
2008 	if (space == 0x02)
2009 		return IORESOURCE_MEM;
2010 	else if (space == 0x01)
2011 		return IORESOURCE_IO;
2012 	else {
2013 		printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2014 		    	bus_space);
2015 		return 0;
2016 	}
2017 }
2018 
2019 #ifdef CONFIG_PCI
2020 static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2021 						 struct address_range *range)
2022 {
2023 	unsigned long mask;
2024 	int i;
2025 
2026 	/* Check this one */
2027 	mask = bus_space_to_resource_flags(range->space);
2028 	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2029 		if ((pdev->resource[i].flags & mask) == mask &&
2030 			pdev->resource[i].start <= range->address &&
2031 			pdev->resource[i].end > range->address) {
2032 				if ((range->address + range->size - 1) > pdev->resource[i].end) {
2033 					/* Add better message */
2034 					printk(KERN_WARNING "PCI/OF resource overlap !\n");
2035 					return NULL;
2036 				}
2037 				break;
2038 			}
2039 	}
2040 	if (i == DEVICE_COUNT_RESOURCE)
2041 		return NULL;
2042 	return &pdev->resource[i];
2043 }
2044 
2045 /*
2046  * Request an OF device resource. Currently handles child of PCI devices,
2047  * or other nodes attached to the root node. Ultimately, put some
2048  * link to resources in the OF node.
2049  */
2050 struct resource *request_OF_resource(struct device_node* node, int index,
2051 				     const char* name_postfix)
2052 {
2053 	struct pci_dev* pcidev;
2054 	u8 pci_bus, pci_devfn;
2055 	unsigned long iomask;
2056 	struct device_node* nd;
2057 	struct resource* parent;
2058 	struct resource *res = NULL;
2059 	int nlen, plen;
2060 
2061 	if (index >= node->n_addrs)
2062 		goto fail;
2063 
2064 	/* Sanity check on bus space */
2065 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2066 	if (iomask & IORESOURCE_MEM)
2067 		parent = &iomem_resource;
2068 	else if (iomask & IORESOURCE_IO)
2069 		parent = &ioport_resource;
2070 	else
2071 		goto fail;
2072 
2073 	/* Find a PCI parent if any */
2074 	nd = node;
2075 	pcidev = NULL;
2076 	while (nd) {
2077 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2078 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2079 		if (pcidev) break;
2080 		nd = nd->parent;
2081 	}
2082 	if (pcidev)
2083 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2084 	if (!parent) {
2085 		printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2086 			node->name);
2087 		goto fail;
2088 	}
2089 
2090 	res = __request_region(parent, node->addrs[index].address,
2091 			       node->addrs[index].size, NULL);
2092 	if (!res)
2093 		goto fail;
2094 	nlen = strlen(node->name);
2095 	plen = name_postfix ? strlen(name_postfix) : 0;
2096 	res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2097 	if (res->name) {
2098 		strcpy((char *)res->name, node->name);
2099 		if (plen)
2100 			strcpy((char *)res->name+nlen, name_postfix);
2101 	}
2102 	return res;
2103 fail:
2104 	return NULL;
2105 }
2106 EXPORT_SYMBOL(request_OF_resource);
2107 
2108 int release_OF_resource(struct device_node *node, int index)
2109 {
2110 	struct pci_dev* pcidev;
2111 	u8 pci_bus, pci_devfn;
2112 	unsigned long iomask, start, end;
2113 	struct device_node* nd;
2114 	struct resource* parent;
2115 	struct resource *res = NULL;
2116 
2117 	if (index >= node->n_addrs)
2118 		return -EINVAL;
2119 
2120 	/* Sanity check on bus space */
2121 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2122 	if (iomask & IORESOURCE_MEM)
2123 		parent = &iomem_resource;
2124 	else if (iomask & IORESOURCE_IO)
2125 		parent = &ioport_resource;
2126 	else
2127 		return -EINVAL;
2128 
2129 	/* Find a PCI parent if any */
2130 	nd = node;
2131 	pcidev = NULL;
2132 	while(nd) {
2133 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2134 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2135 		if (pcidev) break;
2136 		nd = nd->parent;
2137 	}
2138 	if (pcidev)
2139 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2140 	if (!parent) {
2141 		printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2142 			node->name);
2143 		return -ENODEV;
2144 	}
2145 
2146 	/* Find us in the parent and its childs */
2147 	res = parent->child;
2148 	start = node->addrs[index].address;
2149 	end = start + node->addrs[index].size - 1;
2150 	while (res) {
2151 		if (res->start == start && res->end == end &&
2152 		    (res->flags & IORESOURCE_BUSY))
2153 		    	break;
2154 		if (res->start <= start && res->end >= end)
2155 			res = res->child;
2156 		else
2157 			res = res->sibling;
2158 	}
2159 	if (!res)
2160 		return -ENODEV;
2161 
2162 	if (res->name) {
2163 		kfree(res->name);
2164 		res->name = NULL;
2165 	}
2166 	release_resource(res);
2167 	kfree(res);
2168 
2169 	return 0;
2170 }
2171 EXPORT_SYMBOL(release_OF_resource);
2172 #endif /* CONFIG_PCI */
2173