xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision 60dda256)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/lmb.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/mmu.h>
43 #include <asm/pgtable.h>
44 #include <asm/pci.h>
45 #include <asm/iommu.h>
46 #include <asm/btext.h>
47 #include <asm/sections.h>
48 #include <asm/machdep.h>
49 #include <asm/pSeries_reconfig.h>
50 #include <asm/pci-bridge.h>
51 #ifdef CONFIG_PPC64
52 #include <asm/systemcfg.h>
53 #endif
54 
55 #ifdef DEBUG
56 #define DBG(fmt...) printk(KERN_ERR fmt)
57 #else
58 #define DBG(fmt...)
59 #endif
60 
61 struct pci_reg_property {
62 	struct pci_address addr;
63 	u32 size_hi;
64 	u32 size_lo;
65 };
66 
67 struct isa_reg_property {
68 	u32 space;
69 	u32 address;
70 	u32 size;
71 };
72 
73 
74 typedef int interpret_func(struct device_node *, unsigned long *,
75 			   int, int, int);
76 
77 extern struct rtas_t rtas;
78 extern struct lmb lmb;
79 extern unsigned long klimit;
80 
81 static unsigned long memory_limit;
82 
83 static int __initdata dt_root_addr_cells;
84 static int __initdata dt_root_size_cells;
85 
86 #ifdef CONFIG_PPC64
87 static int __initdata iommu_is_off;
88 int __initdata iommu_force_on;
89 extern unsigned long tce_alloc_start, tce_alloc_end;
90 #endif
91 
92 typedef u32 cell_t;
93 
94 #if 0
95 static struct boot_param_header *initial_boot_params __initdata;
96 #else
97 struct boot_param_header *initial_boot_params;
98 #endif
99 
100 static struct device_node *allnodes = NULL;
101 
102 /* use when traversing tree through the allnext, child, sibling,
103  * or parent members of struct device_node.
104  */
105 static DEFINE_RWLOCK(devtree_lock);
106 
107 /* export that to outside world */
108 struct device_node *of_chosen;
109 
110 struct device_node *dflt_interrupt_controller;
111 int num_interrupt_controllers;
112 
113 u32 rtas_data;
114 u32 rtas_entry;
115 
116 /*
117  * Wrapper for allocating memory for various data that needs to be
118  * attached to device nodes as they are processed at boot or when
119  * added to the device tree later (e.g. DLPAR).  At boot there is
120  * already a region reserved so we just increment *mem_start by size;
121  * otherwise we call kmalloc.
122  */
123 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
124 {
125 	unsigned long tmp;
126 
127 	if (!mem_start)
128 		return kmalloc(size, GFP_KERNEL);
129 
130 	tmp = *mem_start;
131 	*mem_start += size;
132 	return (void *)tmp;
133 }
134 
135 /*
136  * Find the device_node with a given phandle.
137  */
138 static struct device_node * find_phandle(phandle ph)
139 {
140 	struct device_node *np;
141 
142 	for (np = allnodes; np != 0; np = np->allnext)
143 		if (np->linux_phandle == ph)
144 			return np;
145 	return NULL;
146 }
147 
148 /*
149  * Find the interrupt parent of a node.
150  */
151 static struct device_node * __devinit intr_parent(struct device_node *p)
152 {
153 	phandle *parp;
154 
155 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
156 	if (parp == NULL)
157 		return p->parent;
158 	p = find_phandle(*parp);
159 	if (p != NULL)
160 		return p;
161 	/*
162 	 * On a powermac booted with BootX, we don't get to know the
163 	 * phandles for any nodes, so find_phandle will return NULL.
164 	 * Fortunately these machines only have one interrupt controller
165 	 * so there isn't in fact any ambiguity.  -- paulus
166 	 */
167 	if (num_interrupt_controllers == 1)
168 		p = dflt_interrupt_controller;
169 	return p;
170 }
171 
172 /*
173  * Find out the size of each entry of the interrupts property
174  * for a node.
175  */
176 int __devinit prom_n_intr_cells(struct device_node *np)
177 {
178 	struct device_node *p;
179 	unsigned int *icp;
180 
181 	for (p = np; (p = intr_parent(p)) != NULL; ) {
182 		icp = (unsigned int *)
183 			get_property(p, "#interrupt-cells", NULL);
184 		if (icp != NULL)
185 			return *icp;
186 		if (get_property(p, "interrupt-controller", NULL) != NULL
187 		    || get_property(p, "interrupt-map", NULL) != NULL) {
188 			printk("oops, node %s doesn't have #interrupt-cells\n",
189 			       p->full_name);
190 			return 1;
191 		}
192 	}
193 #ifdef DEBUG_IRQ
194 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
195 #endif
196 	return 1;
197 }
198 
199 /*
200  * Map an interrupt from a device up to the platform interrupt
201  * descriptor.
202  */
203 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
204 				   struct device_node *np, unsigned int *ints,
205 				   int nintrc)
206 {
207 	struct device_node *p, *ipar;
208 	unsigned int *imap, *imask, *ip;
209 	int i, imaplen, match;
210 	int newintrc = 0, newaddrc = 0;
211 	unsigned int *reg;
212 	int naddrc;
213 
214 	reg = (unsigned int *) get_property(np, "reg", NULL);
215 	naddrc = prom_n_addr_cells(np);
216 	p = intr_parent(np);
217 	while (p != NULL) {
218 		if (get_property(p, "interrupt-controller", NULL) != NULL)
219 			/* this node is an interrupt controller, stop here */
220 			break;
221 		imap = (unsigned int *)
222 			get_property(p, "interrupt-map", &imaplen);
223 		if (imap == NULL) {
224 			p = intr_parent(p);
225 			continue;
226 		}
227 		imask = (unsigned int *)
228 			get_property(p, "interrupt-map-mask", NULL);
229 		if (imask == NULL) {
230 			printk("oops, %s has interrupt-map but no mask\n",
231 			       p->full_name);
232 			return 0;
233 		}
234 		imaplen /= sizeof(unsigned int);
235 		match = 0;
236 		ipar = NULL;
237 		while (imaplen > 0 && !match) {
238 			/* check the child-interrupt field */
239 			match = 1;
240 			for (i = 0; i < naddrc && match; ++i)
241 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
242 			for (; i < naddrc + nintrc && match; ++i)
243 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
244 			imap += naddrc + nintrc;
245 			imaplen -= naddrc + nintrc;
246 			/* grab the interrupt parent */
247 			ipar = find_phandle((phandle) *imap++);
248 			--imaplen;
249 			if (ipar == NULL && num_interrupt_controllers == 1)
250 				/* cope with BootX not giving us phandles */
251 				ipar = dflt_interrupt_controller;
252 			if (ipar == NULL) {
253 				printk("oops, no int parent %x in map of %s\n",
254 				       imap[-1], p->full_name);
255 				return 0;
256 			}
257 			/* find the parent's # addr and intr cells */
258 			ip = (unsigned int *)
259 				get_property(ipar, "#interrupt-cells", NULL);
260 			if (ip == NULL) {
261 				printk("oops, no #interrupt-cells on %s\n",
262 				       ipar->full_name);
263 				return 0;
264 			}
265 			newintrc = *ip;
266 			ip = (unsigned int *)
267 				get_property(ipar, "#address-cells", NULL);
268 			newaddrc = (ip == NULL)? 0: *ip;
269 			imap += newaddrc + newintrc;
270 			imaplen -= newaddrc + newintrc;
271 		}
272 		if (imaplen < 0) {
273 			printk("oops, error decoding int-map on %s, len=%d\n",
274 			       p->full_name, imaplen);
275 			return 0;
276 		}
277 		if (!match) {
278 #ifdef DEBUG_IRQ
279 			printk("oops, no match in %s int-map for %s\n",
280 			       p->full_name, np->full_name);
281 #endif
282 			return 0;
283 		}
284 		p = ipar;
285 		naddrc = newaddrc;
286 		nintrc = newintrc;
287 		ints = imap - nintrc;
288 		reg = ints - naddrc;
289 	}
290 	if (p == NULL) {
291 #ifdef DEBUG_IRQ
292 		printk("hmmm, int tree for %s doesn't have ctrler\n",
293 		       np->full_name);
294 #endif
295 		return 0;
296 	}
297 	*irq = ints;
298 	*ictrler = p;
299 	return nintrc;
300 }
301 
302 static int __devinit finish_node_interrupts(struct device_node *np,
303 					    unsigned long *mem_start,
304 					    int measure_only)
305 {
306 	unsigned int *ints;
307 	int intlen, intrcells, intrcount;
308 	int i, j, n;
309 	unsigned int *irq, virq;
310 	struct device_node *ic;
311 
312 	if (num_interrupt_controllers == 0) {
313 		/*
314 		 * Old machines just have a list of interrupt numbers
315 		 * and no interrupt-controller nodes.
316 		 */
317 		ints = (unsigned int *) get_property(np, "AAPL,interrupts",
318 						     &intlen);
319 		/* XXX old interpret_pci_props looked in parent too */
320 		/* XXX old interpret_macio_props looked for interrupts
321 		   before AAPL,interrupts */
322 		if (ints == NULL)
323 			ints = (unsigned int *) get_property(np, "interrupts",
324 							     &intlen);
325 		if (ints == NULL)
326 			return 0;
327 
328 		np->n_intrs = intlen / sizeof(unsigned int);
329 		np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
330 				       mem_start);
331 		if (!np->intrs)
332 			return -ENOMEM;
333 		if (measure_only)
334 			return 0;
335 
336 		for (i = 0; i < np->n_intrs; ++i) {
337 			np->intrs[i].line = *ints++;
338 			np->intrs[i].sense = 1;
339 		}
340 		return 0;
341 	}
342 
343 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
344 	if (ints == NULL)
345 		return 0;
346 	intrcells = prom_n_intr_cells(np);
347 	intlen /= intrcells * sizeof(unsigned int);
348 
349 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
350 	if (!np->intrs)
351 		return -ENOMEM;
352 
353 	if (measure_only)
354 		return 0;
355 
356 	intrcount = 0;
357 	for (i = 0; i < intlen; ++i, ints += intrcells) {
358 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
359 		if (n <= 0)
360 			continue;
361 
362 		/* don't map IRQ numbers under a cascaded 8259 controller */
363 		if (ic && device_is_compatible(ic, "chrp,iic")) {
364 			np->intrs[intrcount].line = irq[0];
365 		} else {
366 #ifdef CONFIG_PPC64
367 			virq = virt_irq_create_mapping(irq[0]);
368 			if (virq == NO_IRQ) {
369 				printk(KERN_CRIT "Could not allocate interrupt"
370 				       " number for %s\n", np->full_name);
371 				continue;
372 			}
373 			virq = irq_offset_up(virq);
374 #else
375 			virq = irq[0];
376 #endif
377 			np->intrs[intrcount].line = virq;
378 		}
379 
380 #ifdef CONFIG_PPC64
381 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
382 		if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
383 			char *name = get_property(ic->parent, "name", NULL);
384 			if (name && !strcmp(name, "u3"))
385 				np->intrs[intrcount].line += 128;
386 			else if (!(name && !strcmp(name, "mac-io")))
387 				/* ignore other cascaded controllers, such as
388 				   the k2-sata-root */
389 				break;
390 		}
391 #endif
392 		np->intrs[intrcount].sense = 1;
393 		if (n > 1)
394 			np->intrs[intrcount].sense = irq[1];
395 		if (n > 2) {
396 			printk("hmmm, got %d intr cells for %s:", n,
397 			       np->full_name);
398 			for (j = 0; j < n; ++j)
399 				printk(" %d", irq[j]);
400 			printk("\n");
401 		}
402 		++intrcount;
403 	}
404 	np->n_intrs = intrcount;
405 
406 	return 0;
407 }
408 
409 static int __devinit interpret_pci_props(struct device_node *np,
410 					 unsigned long *mem_start,
411 					 int naddrc, int nsizec,
412 					 int measure_only)
413 {
414 	struct address_range *adr;
415 	struct pci_reg_property *pci_addrs;
416 	int i, l, n_addrs;
417 
418 	pci_addrs = (struct pci_reg_property *)
419 		get_property(np, "assigned-addresses", &l);
420 	if (!pci_addrs)
421 		return 0;
422 
423 	n_addrs = l / sizeof(*pci_addrs);
424 
425 	adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
426 	if (!adr)
427 		return -ENOMEM;
428 
429  	if (measure_only)
430  		return 0;
431 
432  	np->addrs = adr;
433  	np->n_addrs = n_addrs;
434 
435  	for (i = 0; i < n_addrs; i++) {
436  		adr[i].space = pci_addrs[i].addr.a_hi;
437  		adr[i].address = pci_addrs[i].addr.a_lo |
438 			((u64)pci_addrs[i].addr.a_mid << 32);
439  		adr[i].size = pci_addrs[i].size_lo;
440 	}
441 
442 	return 0;
443 }
444 
445 static int __init interpret_dbdma_props(struct device_node *np,
446 					unsigned long *mem_start,
447 					int naddrc, int nsizec,
448 					int measure_only)
449 {
450 	struct reg_property32 *rp;
451 	struct address_range *adr;
452 	unsigned long base_address;
453 	int i, l;
454 	struct device_node *db;
455 
456 	base_address = 0;
457 	if (!measure_only) {
458 		for (db = np->parent; db != NULL; db = db->parent) {
459 			if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
460 				base_address = db->addrs[0].address;
461 				break;
462 			}
463 		}
464 	}
465 
466 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
467 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
468 		i = 0;
469 		adr = (struct address_range *) (*mem_start);
470 		while ((l -= sizeof(struct reg_property32)) >= 0) {
471 			if (!measure_only) {
472 				adr[i].space = 2;
473 				adr[i].address = rp[i].address + base_address;
474 				adr[i].size = rp[i].size;
475 			}
476 			++i;
477 		}
478 		np->addrs = adr;
479 		np->n_addrs = i;
480 		(*mem_start) += i * sizeof(struct address_range);
481 	}
482 
483 	return 0;
484 }
485 
486 static int __init interpret_macio_props(struct device_node *np,
487 					unsigned long *mem_start,
488 					int naddrc, int nsizec,
489 					int measure_only)
490 {
491 	struct reg_property32 *rp;
492 	struct address_range *adr;
493 	unsigned long base_address;
494 	int i, l;
495 	struct device_node *db;
496 
497 	base_address = 0;
498 	if (!measure_only) {
499 		for (db = np->parent; db != NULL; db = db->parent) {
500 			if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
501 				base_address = db->addrs[0].address;
502 				break;
503 			}
504 		}
505 	}
506 
507 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
508 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
509 		i = 0;
510 		adr = (struct address_range *) (*mem_start);
511 		while ((l -= sizeof(struct reg_property32)) >= 0) {
512 			if (!measure_only) {
513 				adr[i].space = 2;
514 				adr[i].address = rp[i].address + base_address;
515 				adr[i].size = rp[i].size;
516 			}
517 			++i;
518 		}
519 		np->addrs = adr;
520 		np->n_addrs = i;
521 		(*mem_start) += i * sizeof(struct address_range);
522 	}
523 
524 	return 0;
525 }
526 
527 static int __init interpret_isa_props(struct device_node *np,
528 				      unsigned long *mem_start,
529 				      int naddrc, int nsizec,
530 				      int measure_only)
531 {
532 	struct isa_reg_property *rp;
533 	struct address_range *adr;
534 	int i, l;
535 
536 	rp = (struct isa_reg_property *) get_property(np, "reg", &l);
537 	if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
538 		i = 0;
539 		adr = (struct address_range *) (*mem_start);
540 		while ((l -= sizeof(struct isa_reg_property)) >= 0) {
541 			if (!measure_only) {
542 				adr[i].space = rp[i].space;
543 				adr[i].address = rp[i].address;
544 				adr[i].size = rp[i].size;
545 			}
546 			++i;
547 		}
548 		np->addrs = adr;
549 		np->n_addrs = i;
550 		(*mem_start) += i * sizeof(struct address_range);
551 	}
552 
553 	return 0;
554 }
555 
556 static int __init interpret_root_props(struct device_node *np,
557 				       unsigned long *mem_start,
558 				       int naddrc, int nsizec,
559 				       int measure_only)
560 {
561 	struct address_range *adr;
562 	int i, l;
563 	unsigned int *rp;
564 	int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
565 
566 	rp = (unsigned int *) get_property(np, "reg", &l);
567 	if (rp != 0 && l >= rpsize) {
568 		i = 0;
569 		adr = (struct address_range *) (*mem_start);
570 		while ((l -= rpsize) >= 0) {
571 			if (!measure_only) {
572 				adr[i].space = 0;
573 				adr[i].address = rp[naddrc - 1];
574 				adr[i].size = rp[naddrc + nsizec - 1];
575 			}
576 			++i;
577 			rp += naddrc + nsizec;
578 		}
579 		np->addrs = adr;
580 		np->n_addrs = i;
581 		(*mem_start) += i * sizeof(struct address_range);
582 	}
583 
584 	return 0;
585 }
586 
587 static int __devinit finish_node(struct device_node *np,
588 				 unsigned long *mem_start,
589 				 interpret_func *ifunc,
590 				 int naddrc, int nsizec,
591 				 int measure_only)
592 {
593 	struct device_node *child;
594 	int *ip, rc = 0;
595 
596 	/* get the device addresses and interrupts */
597 	if (ifunc != NULL)
598 		rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
599 	if (rc)
600 		goto out;
601 
602 	rc = finish_node_interrupts(np, mem_start, measure_only);
603 	if (rc)
604 		goto out;
605 
606 	/* Look for #address-cells and #size-cells properties. */
607 	ip = (int *) get_property(np, "#address-cells", NULL);
608 	if (ip != NULL)
609 		naddrc = *ip;
610 	ip = (int *) get_property(np, "#size-cells", NULL);
611 	if (ip != NULL)
612 		nsizec = *ip;
613 
614 	if (!strcmp(np->name, "device-tree") || np->parent == NULL)
615 		ifunc = interpret_root_props;
616 	else if (np->type == 0)
617 		ifunc = NULL;
618 	else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
619 		ifunc = interpret_pci_props;
620 	else if (!strcmp(np->type, "dbdma"))
621 		ifunc = interpret_dbdma_props;
622 	else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
623 		ifunc = interpret_macio_props;
624 	else if (!strcmp(np->type, "isa"))
625 		ifunc = interpret_isa_props;
626 	else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
627 		ifunc = interpret_root_props;
628 	else if (!((ifunc == interpret_dbdma_props
629 		    || ifunc == interpret_macio_props)
630 		   && (!strcmp(np->type, "escc")
631 		       || !strcmp(np->type, "media-bay"))))
632 		ifunc = NULL;
633 
634 	for (child = np->child; child != NULL; child = child->sibling) {
635 		rc = finish_node(child, mem_start, ifunc,
636 				 naddrc, nsizec, measure_only);
637 		if (rc)
638 			goto out;
639 	}
640 out:
641 	return rc;
642 }
643 
644 static void __init scan_interrupt_controllers(void)
645 {
646 	struct device_node *np;
647 	int n = 0;
648 	char *name, *ic;
649 	int iclen;
650 
651 	for (np = allnodes; np != NULL; np = np->allnext) {
652 		ic = get_property(np, "interrupt-controller", &iclen);
653 		name = get_property(np, "name", NULL);
654 		/* checking iclen makes sure we don't get a false
655 		   match on /chosen.interrupt_controller */
656 		if ((name != NULL
657 		     && strcmp(name, "interrupt-controller") == 0)
658 		    || (ic != NULL && iclen == 0
659 			&& strcmp(name, "AppleKiwi"))) {
660 			if (n == 0)
661 				dflt_interrupt_controller = np;
662 			++n;
663 		}
664 	}
665 	num_interrupt_controllers = n;
666 }
667 
668 /**
669  * finish_device_tree is called once things are running normally
670  * (i.e. with text and data mapped to the address they were linked at).
671  * It traverses the device tree and fills in some of the additional,
672  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
673  * mapping is also initialized at this point.
674  */
675 void __init finish_device_tree(void)
676 {
677 	unsigned long start, end, size = 0;
678 
679 	DBG(" -> finish_device_tree\n");
680 
681 #ifdef CONFIG_PPC64
682 	/* Initialize virtual IRQ map */
683 	virt_irq_init();
684 #endif
685 	scan_interrupt_controllers();
686 
687 	/*
688 	 * Finish device-tree (pre-parsing some properties etc...)
689 	 * We do this in 2 passes. One with "measure_only" set, which
690 	 * will only measure the amount of memory needed, then we can
691 	 * allocate that memory, and call finish_node again. However,
692 	 * we must be careful as most routines will fail nowadays when
693 	 * prom_alloc() returns 0, so we must make sure our first pass
694 	 * doesn't start at 0. We pre-initialize size to 16 for that
695 	 * reason and then remove those additional 16 bytes
696 	 */
697 	size = 16;
698 	finish_node(allnodes, &size, NULL, 0, 0, 1);
699 	size -= 16;
700 	end = start = (unsigned long) __va(lmb_alloc(size, 128));
701 	finish_node(allnodes, &end, NULL, 0, 0, 0);
702 	BUG_ON(end != start + size);
703 
704 	DBG(" <- finish_device_tree\n");
705 }
706 
707 static inline char *find_flat_dt_string(u32 offset)
708 {
709 	return ((char *)initial_boot_params) +
710 		initial_boot_params->off_dt_strings + offset;
711 }
712 
713 /**
714  * This function is used to scan the flattened device-tree, it is
715  * used to extract the memory informations at boot before we can
716  * unflatten the tree
717  */
718 static int __init scan_flat_dt(int (*it)(unsigned long node,
719 					 const char *uname, int depth,
720 					 void *data),
721 			       void *data)
722 {
723 	unsigned long p = ((unsigned long)initial_boot_params) +
724 		initial_boot_params->off_dt_struct;
725 	int rc = 0;
726 	int depth = -1;
727 
728 	do {
729 		u32 tag = *((u32 *)p);
730 		char *pathp;
731 
732 		p += 4;
733 		if (tag == OF_DT_END_NODE) {
734 			depth --;
735 			continue;
736 		}
737 		if (tag == OF_DT_NOP)
738 			continue;
739 		if (tag == OF_DT_END)
740 			break;
741 		if (tag == OF_DT_PROP) {
742 			u32 sz = *((u32 *)p);
743 			p += 8;
744 			if (initial_boot_params->version < 0x10)
745 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
746 			p += sz;
747 			p = _ALIGN(p, 4);
748 			continue;
749 		}
750 		if (tag != OF_DT_BEGIN_NODE) {
751 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
752 			       " device tree !\n", tag);
753 			return -EINVAL;
754 		}
755 		depth++;
756 		pathp = (char *)p;
757 		p = _ALIGN(p + strlen(pathp) + 1, 4);
758 		if ((*pathp) == '/') {
759 			char *lp, *np;
760 			for (lp = NULL, np = pathp; *np; np++)
761 				if ((*np) == '/')
762 					lp = np+1;
763 			if (lp != NULL)
764 				pathp = lp;
765 		}
766 		rc = it(p, pathp, depth, data);
767 		if (rc != 0)
768 			break;
769 	} while(1);
770 
771 	return rc;
772 }
773 
774 /**
775  * This  function can be used within scan_flattened_dt callback to get
776  * access to properties
777  */
778 static void* __init get_flat_dt_prop(unsigned long node, const char *name,
779 				     unsigned long *size)
780 {
781 	unsigned long p = node;
782 
783 	do {
784 		u32 tag = *((u32 *)p);
785 		u32 sz, noff;
786 		const char *nstr;
787 
788 		p += 4;
789 		if (tag == OF_DT_NOP)
790 			continue;
791 		if (tag != OF_DT_PROP)
792 			return NULL;
793 
794 		sz = *((u32 *)p);
795 		noff = *((u32 *)(p + 4));
796 		p += 8;
797 		if (initial_boot_params->version < 0x10)
798 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
799 
800 		nstr = find_flat_dt_string(noff);
801 		if (nstr == NULL) {
802 			printk(KERN_WARNING "Can't find property index"
803 			       " name !\n");
804 			return NULL;
805 		}
806 		if (strcmp(name, nstr) == 0) {
807 			if (size)
808 				*size = sz;
809 			return (void *)p;
810 		}
811 		p += sz;
812 		p = _ALIGN(p, 4);
813 	} while(1);
814 }
815 
816 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
817 				       unsigned long align)
818 {
819 	void *res;
820 
821 	*mem = _ALIGN(*mem, align);
822 	res = (void *)*mem;
823 	*mem += size;
824 
825 	return res;
826 }
827 
828 static unsigned long __init unflatten_dt_node(unsigned long mem,
829 					      unsigned long *p,
830 					      struct device_node *dad,
831 					      struct device_node ***allnextpp,
832 					      unsigned long fpsize)
833 {
834 	struct device_node *np;
835 	struct property *pp, **prev_pp = NULL;
836 	char *pathp;
837 	u32 tag;
838 	unsigned int l, allocl;
839 	int has_name = 0;
840 	int new_format = 0;
841 
842 	tag = *((u32 *)(*p));
843 	if (tag != OF_DT_BEGIN_NODE) {
844 		printk("Weird tag at start of node: %x\n", tag);
845 		return mem;
846 	}
847 	*p += 4;
848 	pathp = (char *)*p;
849 	l = allocl = strlen(pathp) + 1;
850 	*p = _ALIGN(*p + l, 4);
851 
852 	/* version 0x10 has a more compact unit name here instead of the full
853 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
854 	 * it later. We detect this because the first character of the name is
855 	 * not '/'.
856 	 */
857 	if ((*pathp) != '/') {
858 		new_format = 1;
859 		if (fpsize == 0) {
860 			/* root node: special case. fpsize accounts for path
861 			 * plus terminating zero. root node only has '/', so
862 			 * fpsize should be 2, but we want to avoid the first
863 			 * level nodes to have two '/' so we use fpsize 1 here
864 			 */
865 			fpsize = 1;
866 			allocl = 2;
867 		} else {
868 			/* account for '/' and path size minus terminal 0
869 			 * already in 'l'
870 			 */
871 			fpsize += l;
872 			allocl = fpsize;
873 		}
874 	}
875 
876 
877 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
878 				__alignof__(struct device_node));
879 	if (allnextpp) {
880 		memset(np, 0, sizeof(*np));
881 		np->full_name = ((char*)np) + sizeof(struct device_node);
882 		if (new_format) {
883 			char *p = np->full_name;
884 			/* rebuild full path for new format */
885 			if (dad && dad->parent) {
886 				strcpy(p, dad->full_name);
887 #ifdef DEBUG
888 				if ((strlen(p) + l + 1) != allocl) {
889 					DBG("%s: p: %d, l: %d, a: %d\n",
890 					    pathp, strlen(p), l, allocl);
891 				}
892 #endif
893 				p += strlen(p);
894 			}
895 			*(p++) = '/';
896 			memcpy(p, pathp, l);
897 		} else
898 			memcpy(np->full_name, pathp, l);
899 		prev_pp = &np->properties;
900 		**allnextpp = np;
901 		*allnextpp = &np->allnext;
902 		if (dad != NULL) {
903 			np->parent = dad;
904 			/* we temporarily use the next field as `last_child'*/
905 			if (dad->next == 0)
906 				dad->child = np;
907 			else
908 				dad->next->sibling = np;
909 			dad->next = np;
910 		}
911 		kref_init(&np->kref);
912 	}
913 	while(1) {
914 		u32 sz, noff;
915 		char *pname;
916 
917 		tag = *((u32 *)(*p));
918 		if (tag == OF_DT_NOP) {
919 			*p += 4;
920 			continue;
921 		}
922 		if (tag != OF_DT_PROP)
923 			break;
924 		*p += 4;
925 		sz = *((u32 *)(*p));
926 		noff = *((u32 *)((*p) + 4));
927 		*p += 8;
928 		if (initial_boot_params->version < 0x10)
929 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
930 
931 		pname = find_flat_dt_string(noff);
932 		if (pname == NULL) {
933 			printk("Can't find property name in list !\n");
934 			break;
935 		}
936 		if (strcmp(pname, "name") == 0)
937 			has_name = 1;
938 		l = strlen(pname) + 1;
939 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
940 					__alignof__(struct property));
941 		if (allnextpp) {
942 			if (strcmp(pname, "linux,phandle") == 0) {
943 				np->node = *((u32 *)*p);
944 				if (np->linux_phandle == 0)
945 					np->linux_phandle = np->node;
946 			}
947 			if (strcmp(pname, "ibm,phandle") == 0)
948 				np->linux_phandle = *((u32 *)*p);
949 			pp->name = pname;
950 			pp->length = sz;
951 			pp->value = (void *)*p;
952 			*prev_pp = pp;
953 			prev_pp = &pp->next;
954 		}
955 		*p = _ALIGN((*p) + sz, 4);
956 	}
957 	/* with version 0x10 we may not have the name property, recreate
958 	 * it here from the unit name if absent
959 	 */
960 	if (!has_name) {
961 		char *p = pathp, *ps = pathp, *pa = NULL;
962 		int sz;
963 
964 		while (*p) {
965 			if ((*p) == '@')
966 				pa = p;
967 			if ((*p) == '/')
968 				ps = p + 1;
969 			p++;
970 		}
971 		if (pa < ps)
972 			pa = p;
973 		sz = (pa - ps) + 1;
974 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
975 					__alignof__(struct property));
976 		if (allnextpp) {
977 			pp->name = "name";
978 			pp->length = sz;
979 			pp->value = (unsigned char *)(pp + 1);
980 			*prev_pp = pp;
981 			prev_pp = &pp->next;
982 			memcpy(pp->value, ps, sz - 1);
983 			((char *)pp->value)[sz - 1] = 0;
984 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
985 		}
986 	}
987 	if (allnextpp) {
988 		*prev_pp = NULL;
989 		np->name = get_property(np, "name", NULL);
990 		np->type = get_property(np, "device_type", NULL);
991 
992 		if (!np->name)
993 			np->name = "<NULL>";
994 		if (!np->type)
995 			np->type = "<NULL>";
996 	}
997 	while (tag == OF_DT_BEGIN_NODE) {
998 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
999 		tag = *((u32 *)(*p));
1000 	}
1001 	if (tag != OF_DT_END_NODE) {
1002 		printk("Weird tag at end of node: %x\n", tag);
1003 		return mem;
1004 	}
1005 	*p += 4;
1006 	return mem;
1007 }
1008 
1009 
1010 /**
1011  * unflattens the device-tree passed by the firmware, creating the
1012  * tree of struct device_node. It also fills the "name" and "type"
1013  * pointers of the nodes so the normal device-tree walking functions
1014  * can be used (this used to be done by finish_device_tree)
1015  */
1016 void __init unflatten_device_tree(void)
1017 {
1018 	unsigned long start, mem, size;
1019 	struct device_node **allnextp = &allnodes;
1020 	char *p = NULL;
1021 	int l = 0;
1022 
1023 	DBG(" -> unflatten_device_tree()\n");
1024 
1025 	/* First pass, scan for size */
1026 	start = ((unsigned long)initial_boot_params) +
1027 		initial_boot_params->off_dt_struct;
1028 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1029 	size = (size | 3) + 1;
1030 
1031 	DBG("  size is %lx, allocating...\n", size);
1032 
1033 	/* Allocate memory for the expanded device tree */
1034 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1035 	if (!mem) {
1036 		DBG("Couldn't allocate memory with lmb_alloc()!\n");
1037 		panic("Couldn't allocate memory with lmb_alloc()!\n");
1038 	}
1039 	mem = (unsigned long) __va(mem);
1040 
1041 	((u32 *)mem)[size / 4] = 0xdeadbeef;
1042 
1043 	DBG("  unflattening %lx...\n", mem);
1044 
1045 	/* Second pass, do actual unflattening */
1046 	start = ((unsigned long)initial_boot_params) +
1047 		initial_boot_params->off_dt_struct;
1048 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1049 	if (*((u32 *)start) != OF_DT_END)
1050 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1051 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1052 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1053 		       ((u32 *)mem)[size / 4] );
1054 	*allnextp = NULL;
1055 
1056 	/* Get pointer to OF "/chosen" node for use everywhere */
1057 	of_chosen = of_find_node_by_path("/chosen");
1058 	if (of_chosen == NULL)
1059 		of_chosen = of_find_node_by_path("/chosen@0");
1060 
1061 	/* Retreive command line */
1062 	if (of_chosen != NULL) {
1063 		p = (char *)get_property(of_chosen, "bootargs", &l);
1064 		if (p != NULL && l > 0)
1065 			strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1066 	}
1067 #ifdef CONFIG_CMDLINE
1068 	if (l == 0 || (l == 1 && (*p) == 0))
1069 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1070 #endif /* CONFIG_CMDLINE */
1071 
1072 	DBG("Command line is: %s\n", cmd_line);
1073 
1074 	DBG(" <- unflatten_device_tree()\n");
1075 }
1076 
1077 
1078 static int __init early_init_dt_scan_cpus(unsigned long node,
1079 					  const char *uname, int depth, void *data)
1080 {
1081 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1082 	u32 *prop;
1083 	unsigned long size = 0;
1084 
1085 	/* We are scanning "cpu" nodes only */
1086 	if (type == NULL || strcmp(type, "cpu") != 0)
1087 		return 0;
1088 
1089 #ifdef CONFIG_PPC_PSERIES
1090 	/* On LPAR, look for the first ibm,pft-size property for the  hash table size
1091 	 */
1092 	if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1093 		u32 *pft_size;
1094 		pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1095 		if (pft_size != NULL) {
1096 			/* pft_size[0] is the NUMA CEC cookie */
1097 			ppc64_pft_size = pft_size[1];
1098 		}
1099 	}
1100 #endif
1101 
1102 #ifdef CONFIG_PPC64
1103 	if (initial_boot_params && initial_boot_params->version >= 2) {
1104 		/* version 2 of the kexec param format adds the phys cpuid
1105 		 * of booted proc.
1106 		 */
1107 		boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1108 		boot_cpuid = 0;
1109 	} else {
1110 		/* Check if it's the boot-cpu, set it's hw index in paca now */
1111 		if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1112 			prop = get_flat_dt_prop(node, "reg", NULL);
1113 			set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
1114 			boot_cpuid_phys = get_hard_smp_processor_id(0);
1115 		}
1116 	}
1117 #endif
1118 
1119 #ifdef CONFIG_ALTIVEC
1120 	/* Check if we have a VMX and eventually update CPU features */
1121 	prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1122 	if (prop && (*prop) > 0) {
1123 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1124 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1125 	}
1126 
1127 	/* Same goes for Apple's "altivec" property */
1128 	prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1129 	if (prop) {
1130 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1131 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1132 	}
1133 #endif /* CONFIG_ALTIVEC */
1134 
1135 #ifdef CONFIG_PPC_PSERIES
1136 	/*
1137 	 * Check for an SMT capable CPU and set the CPU feature. We do
1138 	 * this by looking at the size of the ibm,ppc-interrupt-server#s
1139 	 * property
1140 	 */
1141 	prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1142 				       &size);
1143 	cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1144 	if (prop && ((size / sizeof(u32)) > 1))
1145 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1146 #endif
1147 
1148 	return 0;
1149 }
1150 
1151 static int __init early_init_dt_scan_chosen(unsigned long node,
1152 					    const char *uname, int depth, void *data)
1153 {
1154 	u32 *prop;
1155 	unsigned long *lprop;
1156 
1157 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1158 
1159 	if (depth != 1 ||
1160 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1161 		return 0;
1162 
1163 	/* get platform type */
1164 	prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1165 	if (prop == NULL)
1166 		return 0;
1167 #ifdef CONFIG_PPC64
1168 	systemcfg->platform = *prop;
1169 #else
1170 #ifdef CONFIG_PPC_MULTIPLATFORM
1171 	_machine = *prop;
1172 #endif
1173 #endif
1174 
1175 #ifdef CONFIG_PPC64
1176 	/* check if iommu is forced on or off */
1177 	if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1178 		iommu_is_off = 1;
1179 	if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1180 		iommu_force_on = 1;
1181 #endif
1182 
1183  	lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1184  	if (lprop)
1185  		memory_limit = *lprop;
1186 
1187 #ifdef CONFIG_PPC64
1188  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1189  	if (lprop)
1190  		tce_alloc_start = *lprop;
1191  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1192  	if (lprop)
1193  		tce_alloc_end = *lprop;
1194 #endif
1195 
1196 #ifdef CONFIG_PPC_RTAS
1197 	/* To help early debugging via the front panel, we retreive a minimal
1198 	 * set of RTAS infos now if available
1199 	 */
1200 	{
1201 		u64 *basep, *entryp;
1202 
1203 		basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1204 		entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1205 		prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1206 		if (basep && entryp && prop) {
1207 			rtas.base = *basep;
1208 			rtas.entry = *entryp;
1209 			rtas.size = *prop;
1210 		}
1211 	}
1212 #endif /* CONFIG_PPC_RTAS */
1213 
1214 	/* break now */
1215 	return 1;
1216 }
1217 
1218 static int __init early_init_dt_scan_root(unsigned long node,
1219 					  const char *uname, int depth, void *data)
1220 {
1221 	u32 *prop;
1222 
1223 	if (depth != 0)
1224 		return 0;
1225 
1226 	prop = get_flat_dt_prop(node, "#size-cells", NULL);
1227 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1228 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1229 
1230 	prop = get_flat_dt_prop(node, "#address-cells", NULL);
1231 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1232 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1233 
1234 	/* break now */
1235 	return 1;
1236 }
1237 
1238 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1239 {
1240 	cell_t *p = *cellp;
1241 	unsigned long r;
1242 
1243 	/* Ignore more than 2 cells */
1244 	while (s > sizeof(unsigned long) / 4) {
1245 		p++;
1246 		s--;
1247 	}
1248 	r = *p++;
1249 #ifdef CONFIG_PPC64
1250 	if (s > 1) {
1251 		r <<= 32;
1252 		r |= *(p++);
1253 		s--;
1254 	}
1255 #endif
1256 
1257 	*cellp = p;
1258 	return r;
1259 }
1260 
1261 
1262 static int __init early_init_dt_scan_memory(unsigned long node,
1263 					    const char *uname, int depth, void *data)
1264 {
1265 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1266 	cell_t *reg, *endp;
1267 	unsigned long l;
1268 
1269 	/* We are scanning "memory" nodes only */
1270 	if (type == NULL || strcmp(type, "memory") != 0)
1271 		return 0;
1272 
1273 	reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1274 	if (reg == NULL)
1275 		return 0;
1276 
1277 	endp = reg + (l / sizeof(cell_t));
1278 
1279 	DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1280 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1281 
1282 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1283 		unsigned long base, size;
1284 
1285 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1286 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1287 
1288 		if (size == 0)
1289 			continue;
1290 		DBG(" - %lx ,  %lx\n", base, size);
1291 #ifdef CONFIG_PPC64
1292 		if (iommu_is_off) {
1293 			if (base >= 0x80000000ul)
1294 				continue;
1295 			if ((base + size) > 0x80000000ul)
1296 				size = 0x80000000ul - base;
1297 		}
1298 #endif
1299 		lmb_add(base, size);
1300 	}
1301 	return 0;
1302 }
1303 
1304 static void __init early_reserve_mem(void)
1305 {
1306 	unsigned long base, size;
1307 	unsigned long *reserve_map;
1308 
1309 	reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1310 					initial_boot_params->off_mem_rsvmap);
1311 	while (1) {
1312 		base = *(reserve_map++);
1313 		size = *(reserve_map++);
1314 		if (size == 0)
1315 			break;
1316 		DBG("reserving: %lx -> %lx\n", base, size);
1317 		lmb_reserve(base, size);
1318 	}
1319 
1320 #if 0
1321 	DBG("memory reserved, lmbs :\n");
1322       	lmb_dump_all();
1323 #endif
1324 }
1325 
1326 void __init early_init_devtree(void *params)
1327 {
1328 	DBG(" -> early_init_devtree()\n");
1329 
1330 	/* Setup flat device-tree pointer */
1331 	initial_boot_params = params;
1332 
1333 	/* Retrieve various informations from the /chosen node of the
1334 	 * device-tree, including the platform type, initrd location and
1335 	 * size, TCE reserve, and more ...
1336 	 */
1337 	scan_flat_dt(early_init_dt_scan_chosen, NULL);
1338 
1339 	/* Scan memory nodes and rebuild LMBs */
1340 	lmb_init();
1341 	scan_flat_dt(early_init_dt_scan_root, NULL);
1342 	scan_flat_dt(early_init_dt_scan_memory, NULL);
1343 	lmb_enforce_memory_limit(memory_limit);
1344 	lmb_analyze();
1345 #ifdef CONFIG_PPC64
1346 	systemcfg->physicalMemorySize = lmb_phys_mem_size();
1347 #endif
1348 	lmb_reserve(0, __pa(klimit));
1349 
1350 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1351 
1352 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1353 	early_reserve_mem();
1354 
1355 	DBG("Scanning CPUs ...\n");
1356 
1357 	/* Retreive hash table size from flattened tree plus other
1358 	 * CPU related informations (altivec support, boot CPU ID, ...)
1359 	 */
1360 	scan_flat_dt(early_init_dt_scan_cpus, NULL);
1361 
1362 	DBG(" <- early_init_devtree()\n");
1363 }
1364 
1365 #undef printk
1366 
1367 int
1368 prom_n_addr_cells(struct device_node* np)
1369 {
1370 	int* ip;
1371 	do {
1372 		if (np->parent)
1373 			np = np->parent;
1374 		ip = (int *) get_property(np, "#address-cells", NULL);
1375 		if (ip != NULL)
1376 			return *ip;
1377 	} while (np->parent);
1378 	/* No #address-cells property for the root node, default to 1 */
1379 	return 1;
1380 }
1381 
1382 int
1383 prom_n_size_cells(struct device_node* np)
1384 {
1385 	int* ip;
1386 	do {
1387 		if (np->parent)
1388 			np = np->parent;
1389 		ip = (int *) get_property(np, "#size-cells", NULL);
1390 		if (ip != NULL)
1391 			return *ip;
1392 	} while (np->parent);
1393 	/* No #size-cells property for the root node, default to 1 */
1394 	return 1;
1395 }
1396 
1397 /**
1398  * Work out the sense (active-low level / active-high edge)
1399  * of each interrupt from the device tree.
1400  */
1401 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1402 {
1403 	struct device_node *np;
1404 	int i, j;
1405 
1406 	/* default to level-triggered */
1407 	memset(senses, 1, max - off);
1408 
1409 	for (np = allnodes; np != 0; np = np->allnext) {
1410 		for (j = 0; j < np->n_intrs; j++) {
1411 			i = np->intrs[j].line;
1412 			if (i >= off && i < max)
1413 				senses[i-off] = np->intrs[j].sense ?
1414 					IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
1415 					IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
1416 		}
1417 	}
1418 }
1419 
1420 /**
1421  * Construct and return a list of the device_nodes with a given name.
1422  */
1423 struct device_node *find_devices(const char *name)
1424 {
1425 	struct device_node *head, **prevp, *np;
1426 
1427 	prevp = &head;
1428 	for (np = allnodes; np != 0; np = np->allnext) {
1429 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1430 			*prevp = np;
1431 			prevp = &np->next;
1432 		}
1433 	}
1434 	*prevp = NULL;
1435 	return head;
1436 }
1437 EXPORT_SYMBOL(find_devices);
1438 
1439 /**
1440  * Construct and return a list of the device_nodes with a given type.
1441  */
1442 struct device_node *find_type_devices(const char *type)
1443 {
1444 	struct device_node *head, **prevp, *np;
1445 
1446 	prevp = &head;
1447 	for (np = allnodes; np != 0; np = np->allnext) {
1448 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1449 			*prevp = np;
1450 			prevp = &np->next;
1451 		}
1452 	}
1453 	*prevp = NULL;
1454 	return head;
1455 }
1456 EXPORT_SYMBOL(find_type_devices);
1457 
1458 /**
1459  * Returns all nodes linked together
1460  */
1461 struct device_node *find_all_nodes(void)
1462 {
1463 	struct device_node *head, **prevp, *np;
1464 
1465 	prevp = &head;
1466 	for (np = allnodes; np != 0; np = np->allnext) {
1467 		*prevp = np;
1468 		prevp = &np->next;
1469 	}
1470 	*prevp = NULL;
1471 	return head;
1472 }
1473 EXPORT_SYMBOL(find_all_nodes);
1474 
1475 /** Checks if the given "compat" string matches one of the strings in
1476  * the device's "compatible" property
1477  */
1478 int device_is_compatible(struct device_node *device, const char *compat)
1479 {
1480 	const char* cp;
1481 	int cplen, l;
1482 
1483 	cp = (char *) get_property(device, "compatible", &cplen);
1484 	if (cp == NULL)
1485 		return 0;
1486 	while (cplen > 0) {
1487 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1488 			return 1;
1489 		l = strlen(cp) + 1;
1490 		cp += l;
1491 		cplen -= l;
1492 	}
1493 
1494 	return 0;
1495 }
1496 EXPORT_SYMBOL(device_is_compatible);
1497 
1498 
1499 /**
1500  * Indicates whether the root node has a given value in its
1501  * compatible property.
1502  */
1503 int machine_is_compatible(const char *compat)
1504 {
1505 	struct device_node *root;
1506 	int rc = 0;
1507 
1508 	root = of_find_node_by_path("/");
1509 	if (root) {
1510 		rc = device_is_compatible(root, compat);
1511 		of_node_put(root);
1512 	}
1513 	return rc;
1514 }
1515 EXPORT_SYMBOL(machine_is_compatible);
1516 
1517 /**
1518  * Construct and return a list of the device_nodes with a given type
1519  * and compatible property.
1520  */
1521 struct device_node *find_compatible_devices(const char *type,
1522 					    const char *compat)
1523 {
1524 	struct device_node *head, **prevp, *np;
1525 
1526 	prevp = &head;
1527 	for (np = allnodes; np != 0; np = np->allnext) {
1528 		if (type != NULL
1529 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1530 			continue;
1531 		if (device_is_compatible(np, compat)) {
1532 			*prevp = np;
1533 			prevp = &np->next;
1534 		}
1535 	}
1536 	*prevp = NULL;
1537 	return head;
1538 }
1539 EXPORT_SYMBOL(find_compatible_devices);
1540 
1541 /**
1542  * Find the device_node with a given full_name.
1543  */
1544 struct device_node *find_path_device(const char *path)
1545 {
1546 	struct device_node *np;
1547 
1548 	for (np = allnodes; np != 0; np = np->allnext)
1549 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1550 			return np;
1551 	return NULL;
1552 }
1553 EXPORT_SYMBOL(find_path_device);
1554 
1555 /*******
1556  *
1557  * New implementation of the OF "find" APIs, return a refcounted
1558  * object, call of_node_put() when done.  The device tree and list
1559  * are protected by a rw_lock.
1560  *
1561  * Note that property management will need some locking as well,
1562  * this isn't dealt with yet.
1563  *
1564  *******/
1565 
1566 /**
1567  *	of_find_node_by_name - Find a node by its "name" property
1568  *	@from:	The node to start searching from or NULL, the node
1569  *		you pass will not be searched, only the next one
1570  *		will; typically, you pass what the previous call
1571  *		returned. of_node_put() will be called on it
1572  *	@name:	The name string to match against
1573  *
1574  *	Returns a node pointer with refcount incremented, use
1575  *	of_node_put() on it when done.
1576  */
1577 struct device_node *of_find_node_by_name(struct device_node *from,
1578 	const char *name)
1579 {
1580 	struct device_node *np;
1581 
1582 	read_lock(&devtree_lock);
1583 	np = from ? from->allnext : allnodes;
1584 	for (; np != 0; np = np->allnext)
1585 		if (np->name != 0 && strcasecmp(np->name, name) == 0
1586 		    && of_node_get(np))
1587 			break;
1588 	if (from)
1589 		of_node_put(from);
1590 	read_unlock(&devtree_lock);
1591 	return np;
1592 }
1593 EXPORT_SYMBOL(of_find_node_by_name);
1594 
1595 /**
1596  *	of_find_node_by_type - Find a node by its "device_type" property
1597  *	@from:	The node to start searching from or NULL, the node
1598  *		you pass will not be searched, only the next one
1599  *		will; typically, you pass what the previous call
1600  *		returned. of_node_put() will be called on it
1601  *	@name:	The type string to match against
1602  *
1603  *	Returns a node pointer with refcount incremented, use
1604  *	of_node_put() on it when done.
1605  */
1606 struct device_node *of_find_node_by_type(struct device_node *from,
1607 	const char *type)
1608 {
1609 	struct device_node *np;
1610 
1611 	read_lock(&devtree_lock);
1612 	np = from ? from->allnext : allnodes;
1613 	for (; np != 0; np = np->allnext)
1614 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1615 		    && of_node_get(np))
1616 			break;
1617 	if (from)
1618 		of_node_put(from);
1619 	read_unlock(&devtree_lock);
1620 	return np;
1621 }
1622 EXPORT_SYMBOL(of_find_node_by_type);
1623 
1624 /**
1625  *	of_find_compatible_node - Find a node based on type and one of the
1626  *                                tokens in its "compatible" property
1627  *	@from:		The node to start searching from or NULL, the node
1628  *			you pass will not be searched, only the next one
1629  *			will; typically, you pass what the previous call
1630  *			returned. of_node_put() will be called on it
1631  *	@type:		The type string to match "device_type" or NULL to ignore
1632  *	@compatible:	The string to match to one of the tokens in the device
1633  *			"compatible" list.
1634  *
1635  *	Returns a node pointer with refcount incremented, use
1636  *	of_node_put() on it when done.
1637  */
1638 struct device_node *of_find_compatible_node(struct device_node *from,
1639 	const char *type, const char *compatible)
1640 {
1641 	struct device_node *np;
1642 
1643 	read_lock(&devtree_lock);
1644 	np = from ? from->allnext : allnodes;
1645 	for (; np != 0; np = np->allnext) {
1646 		if (type != NULL
1647 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1648 			continue;
1649 		if (device_is_compatible(np, compatible) && of_node_get(np))
1650 			break;
1651 	}
1652 	if (from)
1653 		of_node_put(from);
1654 	read_unlock(&devtree_lock);
1655 	return np;
1656 }
1657 EXPORT_SYMBOL(of_find_compatible_node);
1658 
1659 /**
1660  *	of_find_node_by_path - Find a node matching a full OF path
1661  *	@path:	The full path to match
1662  *
1663  *	Returns a node pointer with refcount incremented, use
1664  *	of_node_put() on it when done.
1665  */
1666 struct device_node *of_find_node_by_path(const char *path)
1667 {
1668 	struct device_node *np = allnodes;
1669 
1670 	read_lock(&devtree_lock);
1671 	for (; np != 0; np = np->allnext) {
1672 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1673 		    && of_node_get(np))
1674 			break;
1675 	}
1676 	read_unlock(&devtree_lock);
1677 	return np;
1678 }
1679 EXPORT_SYMBOL(of_find_node_by_path);
1680 
1681 /**
1682  *	of_find_node_by_phandle - Find a node given a phandle
1683  *	@handle:	phandle of the node to find
1684  *
1685  *	Returns a node pointer with refcount incremented, use
1686  *	of_node_put() on it when done.
1687  */
1688 struct device_node *of_find_node_by_phandle(phandle handle)
1689 {
1690 	struct device_node *np;
1691 
1692 	read_lock(&devtree_lock);
1693 	for (np = allnodes; np != 0; np = np->allnext)
1694 		if (np->linux_phandle == handle)
1695 			break;
1696 	if (np)
1697 		of_node_get(np);
1698 	read_unlock(&devtree_lock);
1699 	return np;
1700 }
1701 EXPORT_SYMBOL(of_find_node_by_phandle);
1702 
1703 /**
1704  *	of_find_all_nodes - Get next node in global list
1705  *	@prev:	Previous node or NULL to start iteration
1706  *		of_node_put() will be called on it
1707  *
1708  *	Returns a node pointer with refcount incremented, use
1709  *	of_node_put() on it when done.
1710  */
1711 struct device_node *of_find_all_nodes(struct device_node *prev)
1712 {
1713 	struct device_node *np;
1714 
1715 	read_lock(&devtree_lock);
1716 	np = prev ? prev->allnext : allnodes;
1717 	for (; np != 0; np = np->allnext)
1718 		if (of_node_get(np))
1719 			break;
1720 	if (prev)
1721 		of_node_put(prev);
1722 	read_unlock(&devtree_lock);
1723 	return np;
1724 }
1725 EXPORT_SYMBOL(of_find_all_nodes);
1726 
1727 /**
1728  *	of_get_parent - Get a node's parent if any
1729  *	@node:	Node to get parent
1730  *
1731  *	Returns a node pointer with refcount incremented, use
1732  *	of_node_put() on it when done.
1733  */
1734 struct device_node *of_get_parent(const struct device_node *node)
1735 {
1736 	struct device_node *np;
1737 
1738 	if (!node)
1739 		return NULL;
1740 
1741 	read_lock(&devtree_lock);
1742 	np = of_node_get(node->parent);
1743 	read_unlock(&devtree_lock);
1744 	return np;
1745 }
1746 EXPORT_SYMBOL(of_get_parent);
1747 
1748 /**
1749  *	of_get_next_child - Iterate a node childs
1750  *	@node:	parent node
1751  *	@prev:	previous child of the parent node, or NULL to get first
1752  *
1753  *	Returns a node pointer with refcount incremented, use
1754  *	of_node_put() on it when done.
1755  */
1756 struct device_node *of_get_next_child(const struct device_node *node,
1757 	struct device_node *prev)
1758 {
1759 	struct device_node *next;
1760 
1761 	read_lock(&devtree_lock);
1762 	next = prev ? prev->sibling : node->child;
1763 	for (; next != 0; next = next->sibling)
1764 		if (of_node_get(next))
1765 			break;
1766 	if (prev)
1767 		of_node_put(prev);
1768 	read_unlock(&devtree_lock);
1769 	return next;
1770 }
1771 EXPORT_SYMBOL(of_get_next_child);
1772 
1773 /**
1774  *	of_node_get - Increment refcount of a node
1775  *	@node:	Node to inc refcount, NULL is supported to
1776  *		simplify writing of callers
1777  *
1778  *	Returns node.
1779  */
1780 struct device_node *of_node_get(struct device_node *node)
1781 {
1782 	if (node)
1783 		kref_get(&node->kref);
1784 	return node;
1785 }
1786 EXPORT_SYMBOL(of_node_get);
1787 
1788 static inline struct device_node * kref_to_device_node(struct kref *kref)
1789 {
1790 	return container_of(kref, struct device_node, kref);
1791 }
1792 
1793 /**
1794  *	of_node_release - release a dynamically allocated node
1795  *	@kref:  kref element of the node to be released
1796  *
1797  *	In of_node_put() this function is passed to kref_put()
1798  *	as the destructor.
1799  */
1800 static void of_node_release(struct kref *kref)
1801 {
1802 	struct device_node *node = kref_to_device_node(kref);
1803 	struct property *prop = node->properties;
1804 
1805 	if (!OF_IS_DYNAMIC(node))
1806 		return;
1807 	while (prop) {
1808 		struct property *next = prop->next;
1809 		kfree(prop->name);
1810 		kfree(prop->value);
1811 		kfree(prop);
1812 		prop = next;
1813 	}
1814 	kfree(node->intrs);
1815 	kfree(node->addrs);
1816 	kfree(node->full_name);
1817 	kfree(node->data);
1818 	kfree(node);
1819 }
1820 
1821 /**
1822  *	of_node_put - Decrement refcount of a node
1823  *	@node:	Node to dec refcount, NULL is supported to
1824  *		simplify writing of callers
1825  *
1826  */
1827 void of_node_put(struct device_node *node)
1828 {
1829 	if (node)
1830 		kref_put(&node->kref, of_node_release);
1831 }
1832 EXPORT_SYMBOL(of_node_put);
1833 
1834 /*
1835  * Plug a device node into the tree and global list.
1836  */
1837 void of_attach_node(struct device_node *np)
1838 {
1839 	write_lock(&devtree_lock);
1840 	np->sibling = np->parent->child;
1841 	np->allnext = allnodes;
1842 	np->parent->child = np;
1843 	allnodes = np;
1844 	write_unlock(&devtree_lock);
1845 }
1846 
1847 /*
1848  * "Unplug" a node from the device tree.  The caller must hold
1849  * a reference to the node.  The memory associated with the node
1850  * is not freed until its refcount goes to zero.
1851  */
1852 void of_detach_node(const struct device_node *np)
1853 {
1854 	struct device_node *parent;
1855 
1856 	write_lock(&devtree_lock);
1857 
1858 	parent = np->parent;
1859 
1860 	if (allnodes == np)
1861 		allnodes = np->allnext;
1862 	else {
1863 		struct device_node *prev;
1864 		for (prev = allnodes;
1865 		     prev->allnext != np;
1866 		     prev = prev->allnext)
1867 			;
1868 		prev->allnext = np->allnext;
1869 	}
1870 
1871 	if (parent->child == np)
1872 		parent->child = np->sibling;
1873 	else {
1874 		struct device_node *prevsib;
1875 		for (prevsib = np->parent->child;
1876 		     prevsib->sibling != np;
1877 		     prevsib = prevsib->sibling)
1878 			;
1879 		prevsib->sibling = np->sibling;
1880 	}
1881 
1882 	write_unlock(&devtree_lock);
1883 }
1884 
1885 #ifdef CONFIG_PPC_PSERIES
1886 /*
1887  * Fix up the uninitialized fields in a new device node:
1888  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1889  *
1890  * A lot of boot-time code is duplicated here, because functions such
1891  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1892  * slab allocator.
1893  *
1894  * This should probably be split up into smaller chunks.
1895  */
1896 
1897 static int of_finish_dynamic_node(struct device_node *node,
1898 				  unsigned long *unused1, int unused2,
1899 				  int unused3, int unused4)
1900 {
1901 	struct device_node *parent = of_get_parent(node);
1902 	int err = 0;
1903 	phandle *ibm_phandle;
1904 
1905 	node->name = get_property(node, "name", NULL);
1906 	node->type = get_property(node, "device_type", NULL);
1907 
1908 	if (!parent) {
1909 		err = -ENODEV;
1910 		goto out;
1911 	}
1912 
1913 	/* We don't support that function on PowerMac, at least
1914 	 * not yet
1915 	 */
1916 	if (systemcfg->platform == PLATFORM_POWERMAC)
1917 		return -ENODEV;
1918 
1919 	/* fix up new node's linux_phandle field */
1920 	if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1921 		node->linux_phandle = *ibm_phandle;
1922 
1923 out:
1924 	of_node_put(parent);
1925 	return err;
1926 }
1927 
1928 static int prom_reconfig_notifier(struct notifier_block *nb,
1929 				  unsigned long action, void *node)
1930 {
1931 	int err;
1932 
1933 	switch (action) {
1934 	case PSERIES_RECONFIG_ADD:
1935 		err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1936 		if (err < 0) {
1937 			printk(KERN_ERR "finish_node returned %d\n", err);
1938 			err = NOTIFY_BAD;
1939 		}
1940 		break;
1941 	default:
1942 		err = NOTIFY_DONE;
1943 		break;
1944 	}
1945 	return err;
1946 }
1947 
1948 static struct notifier_block prom_reconfig_nb = {
1949 	.notifier_call = prom_reconfig_notifier,
1950 	.priority = 10, /* This one needs to run first */
1951 };
1952 
1953 static int __init prom_reconfig_setup(void)
1954 {
1955 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1956 }
1957 __initcall(prom_reconfig_setup);
1958 #endif
1959 
1960 /*
1961  * Find a property with a given name for a given node
1962  * and return the value.
1963  */
1964 unsigned char *get_property(struct device_node *np, const char *name,
1965 			    int *lenp)
1966 {
1967 	struct property *pp;
1968 
1969 	for (pp = np->properties; pp != 0; pp = pp->next)
1970 		if (strcmp(pp->name, name) == 0) {
1971 			if (lenp != 0)
1972 				*lenp = pp->length;
1973 			return pp->value;
1974 		}
1975 	return NULL;
1976 }
1977 EXPORT_SYMBOL(get_property);
1978 
1979 /*
1980  * Add a property to a node
1981  */
1982 void prom_add_property(struct device_node* np, struct property* prop)
1983 {
1984 	struct property **next = &np->properties;
1985 
1986 	prop->next = NULL;
1987 	while (*next)
1988 		next = &(*next)->next;
1989 	*next = prop;
1990 }
1991 
1992 /* I quickly hacked that one, check against spec ! */
1993 static inline unsigned long
1994 bus_space_to_resource_flags(unsigned int bus_space)
1995 {
1996 	u8 space = (bus_space >> 24) & 0xf;
1997 	if (space == 0)
1998 		space = 0x02;
1999 	if (space == 0x02)
2000 		return IORESOURCE_MEM;
2001 	else if (space == 0x01)
2002 		return IORESOURCE_IO;
2003 	else {
2004 		printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2005 		    	bus_space);
2006 		return 0;
2007 	}
2008 }
2009 
2010 #ifdef CONFIG_PCI
2011 static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2012 						 struct address_range *range)
2013 {
2014 	unsigned long mask;
2015 	int i;
2016 
2017 	/* Check this one */
2018 	mask = bus_space_to_resource_flags(range->space);
2019 	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2020 		if ((pdev->resource[i].flags & mask) == mask &&
2021 			pdev->resource[i].start <= range->address &&
2022 			pdev->resource[i].end > range->address) {
2023 				if ((range->address + range->size - 1) > pdev->resource[i].end) {
2024 					/* Add better message */
2025 					printk(KERN_WARNING "PCI/OF resource overlap !\n");
2026 					return NULL;
2027 				}
2028 				break;
2029 			}
2030 	}
2031 	if (i == DEVICE_COUNT_RESOURCE)
2032 		return NULL;
2033 	return &pdev->resource[i];
2034 }
2035 
2036 /*
2037  * Request an OF device resource. Currently handles child of PCI devices,
2038  * or other nodes attached to the root node. Ultimately, put some
2039  * link to resources in the OF node.
2040  */
2041 struct resource *request_OF_resource(struct device_node* node, int index,
2042 				     const char* name_postfix)
2043 {
2044 	struct pci_dev* pcidev;
2045 	u8 pci_bus, pci_devfn;
2046 	unsigned long iomask;
2047 	struct device_node* nd;
2048 	struct resource* parent;
2049 	struct resource *res = NULL;
2050 	int nlen, plen;
2051 
2052 	if (index >= node->n_addrs)
2053 		goto fail;
2054 
2055 	/* Sanity check on bus space */
2056 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2057 	if (iomask & IORESOURCE_MEM)
2058 		parent = &iomem_resource;
2059 	else if (iomask & IORESOURCE_IO)
2060 		parent = &ioport_resource;
2061 	else
2062 		goto fail;
2063 
2064 	/* Find a PCI parent if any */
2065 	nd = node;
2066 	pcidev = NULL;
2067 	while (nd) {
2068 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2069 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2070 		if (pcidev) break;
2071 		nd = nd->parent;
2072 	}
2073 	if (pcidev)
2074 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2075 	if (!parent) {
2076 		printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2077 			node->name);
2078 		goto fail;
2079 	}
2080 
2081 	res = __request_region(parent, node->addrs[index].address,
2082 			       node->addrs[index].size, NULL);
2083 	if (!res)
2084 		goto fail;
2085 	nlen = strlen(node->name);
2086 	plen = name_postfix ? strlen(name_postfix) : 0;
2087 	res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2088 	if (res->name) {
2089 		strcpy((char *)res->name, node->name);
2090 		if (plen)
2091 			strcpy((char *)res->name+nlen, name_postfix);
2092 	}
2093 	return res;
2094 fail:
2095 	return NULL;
2096 }
2097 EXPORT_SYMBOL(request_OF_resource);
2098 
2099 int release_OF_resource(struct device_node *node, int index)
2100 {
2101 	struct pci_dev* pcidev;
2102 	u8 pci_bus, pci_devfn;
2103 	unsigned long iomask, start, end;
2104 	struct device_node* nd;
2105 	struct resource* parent;
2106 	struct resource *res = NULL;
2107 
2108 	if (index >= node->n_addrs)
2109 		return -EINVAL;
2110 
2111 	/* Sanity check on bus space */
2112 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2113 	if (iomask & IORESOURCE_MEM)
2114 		parent = &iomem_resource;
2115 	else if (iomask & IORESOURCE_IO)
2116 		parent = &ioport_resource;
2117 	else
2118 		return -EINVAL;
2119 
2120 	/* Find a PCI parent if any */
2121 	nd = node;
2122 	pcidev = NULL;
2123 	while(nd) {
2124 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2125 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2126 		if (pcidev) break;
2127 		nd = nd->parent;
2128 	}
2129 	if (pcidev)
2130 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2131 	if (!parent) {
2132 		printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2133 			node->name);
2134 		return -ENODEV;
2135 	}
2136 
2137 	/* Find us in the parent and its childs */
2138 	res = parent->child;
2139 	start = node->addrs[index].address;
2140 	end = start + node->addrs[index].size - 1;
2141 	while (res) {
2142 		if (res->start == start && res->end == end &&
2143 		    (res->flags & IORESOURCE_BUSY))
2144 		    	break;
2145 		if (res->start <= start && res->end >= end)
2146 			res = res->child;
2147 		else
2148 			res = res->sibling;
2149 	}
2150 	if (!res)
2151 		return -ENODEV;
2152 
2153 	if (res->name) {
2154 		kfree(res->name);
2155 		res->name = NULL;
2156 	}
2157 	release_resource(res);
2158 	kfree(res);
2159 
2160 	return 0;
2161 }
2162 EXPORT_SYMBOL(release_OF_resource);
2163 #endif /* CONFIG_PCI */
2164