xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision 40ef8cbc)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/lmb.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/mmu.h>
43 #include <asm/pgtable.h>
44 #include <asm/pci.h>
45 #include <asm/iommu.h>
46 #include <asm/btext.h>
47 #include <asm/sections.h>
48 #include <asm/machdep.h>
49 #include <asm/pSeries_reconfig.h>
50 #include <asm/pci-bridge.h>
51 #ifdef CONFIG_PPC64
52 #include <asm/systemcfg.h>
53 #endif
54 
55 #ifdef DEBUG
56 #define DBG(fmt...) printk(KERN_ERR fmt)
57 #else
58 #define DBG(fmt...)
59 #endif
60 
61 struct pci_reg_property {
62 	struct pci_address addr;
63 	u32 size_hi;
64 	u32 size_lo;
65 };
66 
67 struct isa_reg_property {
68 	u32 space;
69 	u32 address;
70 	u32 size;
71 };
72 
73 
74 typedef int interpret_func(struct device_node *, unsigned long *,
75 			   int, int, int);
76 
77 extern struct rtas_t rtas;
78 extern struct lmb lmb;
79 extern unsigned long klimit;
80 
81 static unsigned long memory_limit;
82 
83 static int __initdata dt_root_addr_cells;
84 static int __initdata dt_root_size_cells;
85 
86 #ifdef CONFIG_PPC64
87 static int __initdata iommu_is_off;
88 int __initdata iommu_force_on;
89 extern unsigned long tce_alloc_start, tce_alloc_end;
90 #endif
91 
92 typedef u32 cell_t;
93 
94 #if 0
95 static struct boot_param_header *initial_boot_params __initdata;
96 #else
97 struct boot_param_header *initial_boot_params;
98 #endif
99 
100 static struct device_node *allnodes = NULL;
101 
102 /* use when traversing tree through the allnext, child, sibling,
103  * or parent members of struct device_node.
104  */
105 static DEFINE_RWLOCK(devtree_lock);
106 
107 /* export that to outside world */
108 struct device_node *of_chosen;
109 
110 struct device_node *dflt_interrupt_controller;
111 int num_interrupt_controllers;
112 
113 u32 rtas_data;
114 u32 rtas_entry;
115 
116 /*
117  * Wrapper for allocating memory for various data that needs to be
118  * attached to device nodes as they are processed at boot or when
119  * added to the device tree later (e.g. DLPAR).  At boot there is
120  * already a region reserved so we just increment *mem_start by size;
121  * otherwise we call kmalloc.
122  */
123 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
124 {
125 	unsigned long tmp;
126 
127 	if (!mem_start)
128 		return kmalloc(size, GFP_KERNEL);
129 
130 	tmp = *mem_start;
131 	*mem_start += size;
132 	return (void *)tmp;
133 }
134 
135 /*
136  * Find the device_node with a given phandle.
137  */
138 static struct device_node * find_phandle(phandle ph)
139 {
140 	struct device_node *np;
141 
142 	for (np = allnodes; np != 0; np = np->allnext)
143 		if (np->linux_phandle == ph)
144 			return np;
145 	return NULL;
146 }
147 
148 /*
149  * Find the interrupt parent of a node.
150  */
151 static struct device_node * __devinit intr_parent(struct device_node *p)
152 {
153 	phandle *parp;
154 
155 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
156 	if (parp == NULL)
157 		return p->parent;
158 	p = find_phandle(*parp);
159 	if (p != NULL)
160 		return p;
161 	/*
162 	 * On a powermac booted with BootX, we don't get to know the
163 	 * phandles for any nodes, so find_phandle will return NULL.
164 	 * Fortunately these machines only have one interrupt controller
165 	 * so there isn't in fact any ambiguity.  -- paulus
166 	 */
167 	if (num_interrupt_controllers == 1)
168 		p = dflt_interrupt_controller;
169 	return p;
170 }
171 
172 /*
173  * Find out the size of each entry of the interrupts property
174  * for a node.
175  */
176 int __devinit prom_n_intr_cells(struct device_node *np)
177 {
178 	struct device_node *p;
179 	unsigned int *icp;
180 
181 	for (p = np; (p = intr_parent(p)) != NULL; ) {
182 		icp = (unsigned int *)
183 			get_property(p, "#interrupt-cells", NULL);
184 		if (icp != NULL)
185 			return *icp;
186 		if (get_property(p, "interrupt-controller", NULL) != NULL
187 		    || get_property(p, "interrupt-map", NULL) != NULL) {
188 			printk("oops, node %s doesn't have #interrupt-cells\n",
189 			       p->full_name);
190 			return 1;
191 		}
192 	}
193 #ifdef DEBUG_IRQ
194 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
195 #endif
196 	return 1;
197 }
198 
199 /*
200  * Map an interrupt from a device up to the platform interrupt
201  * descriptor.
202  */
203 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
204 				   struct device_node *np, unsigned int *ints,
205 				   int nintrc)
206 {
207 	struct device_node *p, *ipar;
208 	unsigned int *imap, *imask, *ip;
209 	int i, imaplen, match;
210 	int newintrc = 0, newaddrc = 0;
211 	unsigned int *reg;
212 	int naddrc;
213 
214 	reg = (unsigned int *) get_property(np, "reg", NULL);
215 	naddrc = prom_n_addr_cells(np);
216 	p = intr_parent(np);
217 	while (p != NULL) {
218 		if (get_property(p, "interrupt-controller", NULL) != NULL)
219 			/* this node is an interrupt controller, stop here */
220 			break;
221 		imap = (unsigned int *)
222 			get_property(p, "interrupt-map", &imaplen);
223 		if (imap == NULL) {
224 			p = intr_parent(p);
225 			continue;
226 		}
227 		imask = (unsigned int *)
228 			get_property(p, "interrupt-map-mask", NULL);
229 		if (imask == NULL) {
230 			printk("oops, %s has interrupt-map but no mask\n",
231 			       p->full_name);
232 			return 0;
233 		}
234 		imaplen /= sizeof(unsigned int);
235 		match = 0;
236 		ipar = NULL;
237 		while (imaplen > 0 && !match) {
238 			/* check the child-interrupt field */
239 			match = 1;
240 			for (i = 0; i < naddrc && match; ++i)
241 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
242 			for (; i < naddrc + nintrc && match; ++i)
243 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
244 			imap += naddrc + nintrc;
245 			imaplen -= naddrc + nintrc;
246 			/* grab the interrupt parent */
247 			ipar = find_phandle((phandle) *imap++);
248 			--imaplen;
249 			if (ipar == NULL && num_interrupt_controllers == 1)
250 				/* cope with BootX not giving us phandles */
251 				ipar = dflt_interrupt_controller;
252 			if (ipar == NULL) {
253 				printk("oops, no int parent %x in map of %s\n",
254 				       imap[-1], p->full_name);
255 				return 0;
256 			}
257 			/* find the parent's # addr and intr cells */
258 			ip = (unsigned int *)
259 				get_property(ipar, "#interrupt-cells", NULL);
260 			if (ip == NULL) {
261 				printk("oops, no #interrupt-cells on %s\n",
262 				       ipar->full_name);
263 				return 0;
264 			}
265 			newintrc = *ip;
266 			ip = (unsigned int *)
267 				get_property(ipar, "#address-cells", NULL);
268 			newaddrc = (ip == NULL)? 0: *ip;
269 			imap += newaddrc + newintrc;
270 			imaplen -= newaddrc + newintrc;
271 		}
272 		if (imaplen < 0) {
273 			printk("oops, error decoding int-map on %s, len=%d\n",
274 			       p->full_name, imaplen);
275 			return 0;
276 		}
277 		if (!match) {
278 #ifdef DEBUG_IRQ
279 			printk("oops, no match in %s int-map for %s\n",
280 			       p->full_name, np->full_name);
281 #endif
282 			return 0;
283 		}
284 		p = ipar;
285 		naddrc = newaddrc;
286 		nintrc = newintrc;
287 		ints = imap - nintrc;
288 		reg = ints - naddrc;
289 	}
290 	if (p == NULL) {
291 #ifdef DEBUG_IRQ
292 		printk("hmmm, int tree for %s doesn't have ctrler\n",
293 		       np->full_name);
294 #endif
295 		return 0;
296 	}
297 	*irq = ints;
298 	*ictrler = p;
299 	return nintrc;
300 }
301 
302 static int __devinit finish_node_interrupts(struct device_node *np,
303 					    unsigned long *mem_start,
304 					    int measure_only)
305 {
306 	unsigned int *ints;
307 	int intlen, intrcells, intrcount;
308 	int i, j, n;
309 	unsigned int *irq, virq;
310 	struct device_node *ic;
311 
312 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
313 	if (ints == NULL)
314 		return 0;
315 	intrcells = prom_n_intr_cells(np);
316 	intlen /= intrcells * sizeof(unsigned int);
317 
318 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
319 	if (!np->intrs)
320 		return -ENOMEM;
321 
322 	if (measure_only)
323 		return 0;
324 
325 	intrcount = 0;
326 	for (i = 0; i < intlen; ++i, ints += intrcells) {
327 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
328 		if (n <= 0)
329 			continue;
330 
331 		/* don't map IRQ numbers under a cascaded 8259 controller */
332 		if (ic && device_is_compatible(ic, "chrp,iic")) {
333 			np->intrs[intrcount].line = irq[0];
334 		} else {
335 #ifdef CONFIG_PPC64
336 			virq = virt_irq_create_mapping(irq[0]);
337 			if (virq == NO_IRQ) {
338 				printk(KERN_CRIT "Could not allocate interrupt"
339 				       " number for %s\n", np->full_name);
340 				continue;
341 			}
342 			virq = irq_offset_up(virq);
343 #else
344 			virq = irq[0];
345 #endif
346 			np->intrs[intrcount].line = virq;
347 		}
348 
349 #ifdef CONFIG_PPC64
350 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
351 		if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
352 			char *name = get_property(ic->parent, "name", NULL);
353 			if (name && !strcmp(name, "u3"))
354 				np->intrs[intrcount].line += 128;
355 			else if (!(name && !strcmp(name, "mac-io")))
356 				/* ignore other cascaded controllers, such as
357 				   the k2-sata-root */
358 				break;
359 		}
360 #endif
361 		np->intrs[intrcount].sense = 1;
362 		if (n > 1)
363 			np->intrs[intrcount].sense = irq[1];
364 		if (n > 2) {
365 			printk("hmmm, got %d intr cells for %s:", n,
366 			       np->full_name);
367 			for (j = 0; j < n; ++j)
368 				printk(" %d", irq[j]);
369 			printk("\n");
370 		}
371 		++intrcount;
372 	}
373 	np->n_intrs = intrcount;
374 
375 	return 0;
376 }
377 
378 static int __devinit interpret_pci_props(struct device_node *np,
379 					 unsigned long *mem_start,
380 					 int naddrc, int nsizec,
381 					 int measure_only)
382 {
383 	struct address_range *adr;
384 	struct pci_reg_property *pci_addrs;
385 	int i, l, n_addrs;
386 
387 	pci_addrs = (struct pci_reg_property *)
388 		get_property(np, "assigned-addresses", &l);
389 	if (!pci_addrs)
390 		return 0;
391 
392 	n_addrs = l / sizeof(*pci_addrs);
393 
394 	adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
395 	if (!adr)
396 		return -ENOMEM;
397 
398  	if (measure_only)
399  		return 0;
400 
401  	np->addrs = adr;
402  	np->n_addrs = n_addrs;
403 
404  	for (i = 0; i < n_addrs; i++) {
405  		adr[i].space = pci_addrs[i].addr.a_hi;
406  		adr[i].address = pci_addrs[i].addr.a_lo |
407 			((u64)pci_addrs[i].addr.a_mid << 32);
408  		adr[i].size = pci_addrs[i].size_lo;
409 	}
410 
411 	return 0;
412 }
413 
414 static int __init interpret_dbdma_props(struct device_node *np,
415 					unsigned long *mem_start,
416 					int naddrc, int nsizec,
417 					int measure_only)
418 {
419 	struct reg_property32 *rp;
420 	struct address_range *adr;
421 	unsigned long base_address;
422 	int i, l;
423 	struct device_node *db;
424 
425 	base_address = 0;
426 	if (!measure_only) {
427 		for (db = np->parent; db != NULL; db = db->parent) {
428 			if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
429 				base_address = db->addrs[0].address;
430 				break;
431 			}
432 		}
433 	}
434 
435 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
436 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
437 		i = 0;
438 		adr = (struct address_range *) (*mem_start);
439 		while ((l -= sizeof(struct reg_property32)) >= 0) {
440 			if (!measure_only) {
441 				adr[i].space = 2;
442 				adr[i].address = rp[i].address + base_address;
443 				adr[i].size = rp[i].size;
444 			}
445 			++i;
446 		}
447 		np->addrs = adr;
448 		np->n_addrs = i;
449 		(*mem_start) += i * sizeof(struct address_range);
450 	}
451 
452 	return 0;
453 }
454 
455 static int __init interpret_macio_props(struct device_node *np,
456 					unsigned long *mem_start,
457 					int naddrc, int nsizec,
458 					int measure_only)
459 {
460 	struct reg_property32 *rp;
461 	struct address_range *adr;
462 	unsigned long base_address;
463 	int i, l;
464 	struct device_node *db;
465 
466 	base_address = 0;
467 	if (!measure_only) {
468 		for (db = np->parent; db != NULL; db = db->parent) {
469 			if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
470 				base_address = db->addrs[0].address;
471 				break;
472 			}
473 		}
474 	}
475 
476 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
477 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
478 		i = 0;
479 		adr = (struct address_range *) (*mem_start);
480 		while ((l -= sizeof(struct reg_property32)) >= 0) {
481 			if (!measure_only) {
482 				adr[i].space = 2;
483 				adr[i].address = rp[i].address + base_address;
484 				adr[i].size = rp[i].size;
485 			}
486 			++i;
487 		}
488 		np->addrs = adr;
489 		np->n_addrs = i;
490 		(*mem_start) += i * sizeof(struct address_range);
491 	}
492 
493 	return 0;
494 }
495 
496 static int __init interpret_isa_props(struct device_node *np,
497 				      unsigned long *mem_start,
498 				      int naddrc, int nsizec,
499 				      int measure_only)
500 {
501 	struct isa_reg_property *rp;
502 	struct address_range *adr;
503 	int i, l;
504 
505 	rp = (struct isa_reg_property *) get_property(np, "reg", &l);
506 	if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
507 		i = 0;
508 		adr = (struct address_range *) (*mem_start);
509 		while ((l -= sizeof(struct isa_reg_property)) >= 0) {
510 			if (!measure_only) {
511 				adr[i].space = rp[i].space;
512 				adr[i].address = rp[i].address;
513 				adr[i].size = rp[i].size;
514 			}
515 			++i;
516 		}
517 		np->addrs = adr;
518 		np->n_addrs = i;
519 		(*mem_start) += i * sizeof(struct address_range);
520 	}
521 
522 	return 0;
523 }
524 
525 static int __init interpret_root_props(struct device_node *np,
526 				       unsigned long *mem_start,
527 				       int naddrc, int nsizec,
528 				       int measure_only)
529 {
530 	struct address_range *adr;
531 	int i, l;
532 	unsigned int *rp;
533 	int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
534 
535 	rp = (unsigned int *) get_property(np, "reg", &l);
536 	if (rp != 0 && l >= rpsize) {
537 		i = 0;
538 		adr = (struct address_range *) (*mem_start);
539 		while ((l -= rpsize) >= 0) {
540 			if (!measure_only) {
541 				adr[i].space = 0;
542 				adr[i].address = rp[naddrc - 1];
543 				adr[i].size = rp[naddrc + nsizec - 1];
544 			}
545 			++i;
546 			rp += naddrc + nsizec;
547 		}
548 		np->addrs = adr;
549 		np->n_addrs = i;
550 		(*mem_start) += i * sizeof(struct address_range);
551 	}
552 
553 	return 0;
554 }
555 
556 static int __devinit finish_node(struct device_node *np,
557 				 unsigned long *mem_start,
558 				 interpret_func *ifunc,
559 				 int naddrc, int nsizec,
560 				 int measure_only)
561 {
562 	struct device_node *child;
563 	int *ip, rc = 0;
564 
565 	/* get the device addresses and interrupts */
566 	if (ifunc != NULL)
567 		rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
568 	if (rc)
569 		goto out;
570 
571 	rc = finish_node_interrupts(np, mem_start, measure_only);
572 	if (rc)
573 		goto out;
574 
575 	/* Look for #address-cells and #size-cells properties. */
576 	ip = (int *) get_property(np, "#address-cells", NULL);
577 	if (ip != NULL)
578 		naddrc = *ip;
579 	ip = (int *) get_property(np, "#size-cells", NULL);
580 	if (ip != NULL)
581 		nsizec = *ip;
582 
583 	if (!strcmp(np->name, "device-tree") || np->parent == NULL)
584 		ifunc = interpret_root_props;
585 	else if (np->type == 0)
586 		ifunc = NULL;
587 	else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
588 		ifunc = interpret_pci_props;
589 	else if (!strcmp(np->type, "dbdma"))
590 		ifunc = interpret_dbdma_props;
591 	else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
592 		ifunc = interpret_macio_props;
593 	else if (!strcmp(np->type, "isa"))
594 		ifunc = interpret_isa_props;
595 	else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
596 		ifunc = interpret_root_props;
597 	else if (!((ifunc == interpret_dbdma_props
598 		    || ifunc == interpret_macio_props)
599 		   && (!strcmp(np->type, "escc")
600 		       || !strcmp(np->type, "media-bay"))))
601 		ifunc = NULL;
602 
603 	for (child = np->child; child != NULL; child = child->sibling) {
604 		rc = finish_node(child, mem_start, ifunc,
605 				 naddrc, nsizec, measure_only);
606 		if (rc)
607 			goto out;
608 	}
609 out:
610 	return rc;
611 }
612 
613 static void __init scan_interrupt_controllers(void)
614 {
615 	struct device_node *np;
616 	int n = 0;
617 	char *name, *ic;
618 	int iclen;
619 
620 	for (np = allnodes; np != NULL; np = np->allnext) {
621 		ic = get_property(np, "interrupt-controller", &iclen);
622 		name = get_property(np, "name", NULL);
623 		/* checking iclen makes sure we don't get a false
624 		   match on /chosen.interrupt_controller */
625 		if ((name != NULL
626 		     && strcmp(name, "interrupt-controller") == 0)
627 		    || (ic != NULL && iclen == 0
628 			&& strcmp(name, "AppleKiwi"))) {
629 			if (n == 0)
630 				dflt_interrupt_controller = np;
631 			++n;
632 		}
633 	}
634 	num_interrupt_controllers = n;
635 }
636 
637 /**
638  * finish_device_tree is called once things are running normally
639  * (i.e. with text and data mapped to the address they were linked at).
640  * It traverses the device tree and fills in some of the additional,
641  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
642  * mapping is also initialized at this point.
643  */
644 void __init finish_device_tree(void)
645 {
646 	unsigned long start, end, size = 0;
647 
648 	DBG(" -> finish_device_tree\n");
649 
650 #ifdef CONFIG_PPC64
651 	/* Initialize virtual IRQ map */
652 	virt_irq_init();
653 #endif
654 	scan_interrupt_controllers();
655 
656 	/*
657 	 * Finish device-tree (pre-parsing some properties etc...)
658 	 * We do this in 2 passes. One with "measure_only" set, which
659 	 * will only measure the amount of memory needed, then we can
660 	 * allocate that memory, and call finish_node again. However,
661 	 * we must be careful as most routines will fail nowadays when
662 	 * prom_alloc() returns 0, so we must make sure our first pass
663 	 * doesn't start at 0. We pre-initialize size to 16 for that
664 	 * reason and then remove those additional 16 bytes
665 	 */
666 	size = 16;
667 	finish_node(allnodes, &size, NULL, 0, 0, 1);
668 	size -= 16;
669 	end = start = (unsigned long) __va(lmb_alloc(size, 128));
670 	finish_node(allnodes, &end, NULL, 0, 0, 0);
671 	BUG_ON(end != start + size);
672 
673 	DBG(" <- finish_device_tree\n");
674 }
675 
676 static inline char *find_flat_dt_string(u32 offset)
677 {
678 	return ((char *)initial_boot_params) +
679 		initial_boot_params->off_dt_strings + offset;
680 }
681 
682 /**
683  * This function is used to scan the flattened device-tree, it is
684  * used to extract the memory informations at boot before we can
685  * unflatten the tree
686  */
687 static int __init scan_flat_dt(int (*it)(unsigned long node,
688 					 const char *uname, int depth,
689 					 void *data),
690 			       void *data)
691 {
692 	unsigned long p = ((unsigned long)initial_boot_params) +
693 		initial_boot_params->off_dt_struct;
694 	int rc = 0;
695 	int depth = -1;
696 
697 	do {
698 		u32 tag = *((u32 *)p);
699 		char *pathp;
700 
701 		p += 4;
702 		if (tag == OF_DT_END_NODE) {
703 			depth --;
704 			continue;
705 		}
706 		if (tag == OF_DT_NOP)
707 			continue;
708 		if (tag == OF_DT_END)
709 			break;
710 		if (tag == OF_DT_PROP) {
711 			u32 sz = *((u32 *)p);
712 			p += 8;
713 			if (initial_boot_params->version < 0x10)
714 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
715 			p += sz;
716 			p = _ALIGN(p, 4);
717 			continue;
718 		}
719 		if (tag != OF_DT_BEGIN_NODE) {
720 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
721 			       " device tree !\n", tag);
722 			return -EINVAL;
723 		}
724 		depth++;
725 		pathp = (char *)p;
726 		p = _ALIGN(p + strlen(pathp) + 1, 4);
727 		if ((*pathp) == '/') {
728 			char *lp, *np;
729 			for (lp = NULL, np = pathp; *np; np++)
730 				if ((*np) == '/')
731 					lp = np+1;
732 			if (lp != NULL)
733 				pathp = lp;
734 		}
735 		rc = it(p, pathp, depth, data);
736 		if (rc != 0)
737 			break;
738 	} while(1);
739 
740 	return rc;
741 }
742 
743 /**
744  * This  function can be used within scan_flattened_dt callback to get
745  * access to properties
746  */
747 static void* __init get_flat_dt_prop(unsigned long node, const char *name,
748 				     unsigned long *size)
749 {
750 	unsigned long p = node;
751 
752 	do {
753 		u32 tag = *((u32 *)p);
754 		u32 sz, noff;
755 		const char *nstr;
756 
757 		p += 4;
758 		if (tag == OF_DT_NOP)
759 			continue;
760 		if (tag != OF_DT_PROP)
761 			return NULL;
762 
763 		sz = *((u32 *)p);
764 		noff = *((u32 *)(p + 4));
765 		p += 8;
766 		if (initial_boot_params->version < 0x10)
767 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
768 
769 		nstr = find_flat_dt_string(noff);
770 		if (nstr == NULL) {
771 			printk(KERN_WARNING "Can't find property index"
772 			       " name !\n");
773 			return NULL;
774 		}
775 		if (strcmp(name, nstr) == 0) {
776 			if (size)
777 				*size = sz;
778 			return (void *)p;
779 		}
780 		p += sz;
781 		p = _ALIGN(p, 4);
782 	} while(1);
783 }
784 
785 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
786 				       unsigned long align)
787 {
788 	void *res;
789 
790 	*mem = _ALIGN(*mem, align);
791 	res = (void *)*mem;
792 	*mem += size;
793 
794 	return res;
795 }
796 
797 static unsigned long __init unflatten_dt_node(unsigned long mem,
798 					      unsigned long *p,
799 					      struct device_node *dad,
800 					      struct device_node ***allnextpp,
801 					      unsigned long fpsize)
802 {
803 	struct device_node *np;
804 	struct property *pp, **prev_pp = NULL;
805 	char *pathp;
806 	u32 tag;
807 	unsigned int l, allocl;
808 	int has_name = 0;
809 	int new_format = 0;
810 
811 	tag = *((u32 *)(*p));
812 	if (tag != OF_DT_BEGIN_NODE) {
813 		printk("Weird tag at start of node: %x\n", tag);
814 		return mem;
815 	}
816 	*p += 4;
817 	pathp = (char *)*p;
818 	l = allocl = strlen(pathp) + 1;
819 	*p = _ALIGN(*p + l, 4);
820 
821 	/* version 0x10 has a more compact unit name here instead of the full
822 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
823 	 * it later. We detect this because the first character of the name is
824 	 * not '/'.
825 	 */
826 	if ((*pathp) != '/') {
827 		new_format = 1;
828 		if (fpsize == 0) {
829 			/* root node: special case. fpsize accounts for path
830 			 * plus terminating zero. root node only has '/', so
831 			 * fpsize should be 2, but we want to avoid the first
832 			 * level nodes to have two '/' so we use fpsize 1 here
833 			 */
834 			fpsize = 1;
835 			allocl = 2;
836 		} else {
837 			/* account for '/' and path size minus terminal 0
838 			 * already in 'l'
839 			 */
840 			fpsize += l;
841 			allocl = fpsize;
842 		}
843 	}
844 
845 
846 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
847 				__alignof__(struct device_node));
848 	if (allnextpp) {
849 		memset(np, 0, sizeof(*np));
850 		np->full_name = ((char*)np) + sizeof(struct device_node);
851 		if (new_format) {
852 			char *p = np->full_name;
853 			/* rebuild full path for new format */
854 			if (dad && dad->parent) {
855 				strcpy(p, dad->full_name);
856 #ifdef DEBUG
857 				if ((strlen(p) + l + 1) != allocl) {
858 					DBG("%s: p: %d, l: %d, a: %d\n",
859 					    pathp, strlen(p), l, allocl);
860 				}
861 #endif
862 				p += strlen(p);
863 			}
864 			*(p++) = '/';
865 			memcpy(p, pathp, l);
866 		} else
867 			memcpy(np->full_name, pathp, l);
868 		prev_pp = &np->properties;
869 		**allnextpp = np;
870 		*allnextpp = &np->allnext;
871 		if (dad != NULL) {
872 			np->parent = dad;
873 			/* we temporarily use the next field as `last_child'*/
874 			if (dad->next == 0)
875 				dad->child = np;
876 			else
877 				dad->next->sibling = np;
878 			dad->next = np;
879 		}
880 		kref_init(&np->kref);
881 	}
882 	while(1) {
883 		u32 sz, noff;
884 		char *pname;
885 
886 		tag = *((u32 *)(*p));
887 		if (tag == OF_DT_NOP) {
888 			*p += 4;
889 			continue;
890 		}
891 		if (tag != OF_DT_PROP)
892 			break;
893 		*p += 4;
894 		sz = *((u32 *)(*p));
895 		noff = *((u32 *)((*p) + 4));
896 		*p += 8;
897 		if (initial_boot_params->version < 0x10)
898 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
899 
900 		pname = find_flat_dt_string(noff);
901 		if (pname == NULL) {
902 			printk("Can't find property name in list !\n");
903 			break;
904 		}
905 		if (strcmp(pname, "name") == 0)
906 			has_name = 1;
907 		l = strlen(pname) + 1;
908 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
909 					__alignof__(struct property));
910 		if (allnextpp) {
911 			if (strcmp(pname, "linux,phandle") == 0) {
912 				np->node = *((u32 *)*p);
913 				if (np->linux_phandle == 0)
914 					np->linux_phandle = np->node;
915 			}
916 			if (strcmp(pname, "ibm,phandle") == 0)
917 				np->linux_phandle = *((u32 *)*p);
918 			pp->name = pname;
919 			pp->length = sz;
920 			pp->value = (void *)*p;
921 			*prev_pp = pp;
922 			prev_pp = &pp->next;
923 		}
924 		*p = _ALIGN((*p) + sz, 4);
925 	}
926 	/* with version 0x10 we may not have the name property, recreate
927 	 * it here from the unit name if absent
928 	 */
929 	if (!has_name) {
930 		char *p = pathp, *ps = pathp, *pa = NULL;
931 		int sz;
932 
933 		while (*p) {
934 			if ((*p) == '@')
935 				pa = p;
936 			if ((*p) == '/')
937 				ps = p + 1;
938 			p++;
939 		}
940 		if (pa < ps)
941 			pa = p;
942 		sz = (pa - ps) + 1;
943 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
944 					__alignof__(struct property));
945 		if (allnextpp) {
946 			pp->name = "name";
947 			pp->length = sz;
948 			pp->value = (unsigned char *)(pp + 1);
949 			*prev_pp = pp;
950 			prev_pp = &pp->next;
951 			memcpy(pp->value, ps, sz - 1);
952 			((char *)pp->value)[sz - 1] = 0;
953 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
954 		}
955 	}
956 	if (allnextpp) {
957 		*prev_pp = NULL;
958 		np->name = get_property(np, "name", NULL);
959 		np->type = get_property(np, "device_type", NULL);
960 
961 		if (!np->name)
962 			np->name = "<NULL>";
963 		if (!np->type)
964 			np->type = "<NULL>";
965 	}
966 	while (tag == OF_DT_BEGIN_NODE) {
967 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
968 		tag = *((u32 *)(*p));
969 	}
970 	if (tag != OF_DT_END_NODE) {
971 		printk("Weird tag at end of node: %x\n", tag);
972 		return mem;
973 	}
974 	*p += 4;
975 	return mem;
976 }
977 
978 
979 /**
980  * unflattens the device-tree passed by the firmware, creating the
981  * tree of struct device_node. It also fills the "name" and "type"
982  * pointers of the nodes so the normal device-tree walking functions
983  * can be used (this used to be done by finish_device_tree)
984  */
985 void __init unflatten_device_tree(void)
986 {
987 	unsigned long start, mem, size;
988 	struct device_node **allnextp = &allnodes;
989 	char *p = NULL;
990 	int l = 0;
991 
992 	DBG(" -> unflatten_device_tree()\n");
993 
994 	/* First pass, scan for size */
995 	start = ((unsigned long)initial_boot_params) +
996 		initial_boot_params->off_dt_struct;
997 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
998 	size = (size | 3) + 1;
999 
1000 	DBG("  size is %lx, allocating...\n", size);
1001 
1002 	/* Allocate memory for the expanded device tree */
1003 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1004 	if (!mem) {
1005 		DBG("Couldn't allocate memory with lmb_alloc()!\n");
1006 		panic("Couldn't allocate memory with lmb_alloc()!\n");
1007 	}
1008 	mem = (unsigned long) __va(mem);
1009 
1010 	((u32 *)mem)[size / 4] = 0xdeadbeef;
1011 
1012 	DBG("  unflattening %lx...\n", mem);
1013 
1014 	/* Second pass, do actual unflattening */
1015 	start = ((unsigned long)initial_boot_params) +
1016 		initial_boot_params->off_dt_struct;
1017 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1018 	if (*((u32 *)start) != OF_DT_END)
1019 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1020 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1021 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1022 		       ((u32 *)mem)[size / 4] );
1023 	*allnextp = NULL;
1024 
1025 	/* Get pointer to OF "/chosen" node for use everywhere */
1026 	of_chosen = of_find_node_by_path("/chosen");
1027 
1028 	/* Retreive command line */
1029 	if (of_chosen != NULL) {
1030 		p = (char *)get_property(of_chosen, "bootargs", &l);
1031 		if (p != NULL && l > 0)
1032 			strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1033 	}
1034 #ifdef CONFIG_CMDLINE
1035 	if (l == 0 || (l == 1 && (*p) == 0))
1036 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1037 #endif /* CONFIG_CMDLINE */
1038 
1039 	DBG("Command line is: %s\n", cmd_line);
1040 
1041 	DBG(" <- unflatten_device_tree()\n");
1042 }
1043 
1044 
1045 static int __init early_init_dt_scan_cpus(unsigned long node,
1046 					  const char *uname, int depth, void *data)
1047 {
1048 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1049 	u32 *prop;
1050 	unsigned long size = 0;
1051 
1052 	/* We are scanning "cpu" nodes only */
1053 	if (type == NULL || strcmp(type, "cpu") != 0)
1054 		return 0;
1055 
1056 #ifdef CONFIG_PPC_PSERIES
1057 	/* On LPAR, look for the first ibm,pft-size property for the  hash table size
1058 	 */
1059 	if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1060 		u32 *pft_size;
1061 		pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1062 		if (pft_size != NULL) {
1063 			/* pft_size[0] is the NUMA CEC cookie */
1064 			ppc64_pft_size = pft_size[1];
1065 		}
1066 	}
1067 #endif
1068 
1069 #ifdef CONFIG_PPC64
1070 	if (initial_boot_params && initial_boot_params->version >= 2) {
1071 		/* version 2 of the kexec param format adds the phys cpuid
1072 		 * of booted proc.
1073 		 */
1074 		boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1075 		boot_cpuid = 0;
1076 	} else {
1077 		/* Check if it's the boot-cpu, set it's hw index in paca now */
1078 		if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1079 			prop = get_flat_dt_prop(node, "reg", NULL);
1080 			set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
1081 			boot_cpuid_phys = get_hard_smp_processor_id(0);
1082 		}
1083 	}
1084 #endif
1085 
1086 #ifdef CONFIG_ALTIVEC
1087 	/* Check if we have a VMX and eventually update CPU features */
1088 	prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1089 	if (prop && (*prop) > 0) {
1090 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1091 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1092 	}
1093 
1094 	/* Same goes for Apple's "altivec" property */
1095 	prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1096 	if (prop) {
1097 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1098 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1099 	}
1100 #endif /* CONFIG_ALTIVEC */
1101 
1102 #ifdef CONFIG_PPC_PSERIES
1103 	/*
1104 	 * Check for an SMT capable CPU and set the CPU feature. We do
1105 	 * this by looking at the size of the ibm,ppc-interrupt-server#s
1106 	 * property
1107 	 */
1108 	prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1109 				       &size);
1110 	cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1111 	if (prop && ((size / sizeof(u32)) > 1))
1112 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1113 #endif
1114 
1115 	return 0;
1116 }
1117 
1118 static int __init early_init_dt_scan_chosen(unsigned long node,
1119 					    const char *uname, int depth, void *data)
1120 {
1121 	u32 *prop;
1122 	unsigned long *lprop;
1123 
1124 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1125 
1126 	if (depth != 1 || strcmp(uname, "chosen") != 0)
1127 		return 0;
1128 
1129 	/* get platform type */
1130 	prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1131 	if (prop == NULL)
1132 		return 0;
1133 #ifdef CONFIG_PPC64
1134 	systemcfg->platform = *prop;
1135 #else
1136 	_machine = *prop;
1137 #endif
1138 
1139 #ifdef CONFIG_PPC64
1140 	/* check if iommu is forced on or off */
1141 	if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1142 		iommu_is_off = 1;
1143 	if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1144 		iommu_force_on = 1;
1145 #endif
1146 
1147  	lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1148  	if (lprop)
1149  		memory_limit = *lprop;
1150 
1151 #ifdef CONFIG_PPC64
1152  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1153  	if (lprop)
1154  		tce_alloc_start = *lprop;
1155  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1156  	if (lprop)
1157  		tce_alloc_end = *lprop;
1158 #endif
1159 
1160 #ifdef CONFIG_PPC_RTAS
1161 	/* To help early debugging via the front panel, we retreive a minimal
1162 	 * set of RTAS infos now if available
1163 	 */
1164 	{
1165 		u64 *basep, *entryp;
1166 
1167 		basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1168 		entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1169 		prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1170 		if (basep && entryp && prop) {
1171 			rtas.base = *basep;
1172 			rtas.entry = *entryp;
1173 			rtas.size = *prop;
1174 		}
1175 	}
1176 #endif /* CONFIG_PPC_RTAS */
1177 
1178 	/* break now */
1179 	return 1;
1180 }
1181 
1182 static int __init early_init_dt_scan_root(unsigned long node,
1183 					  const char *uname, int depth, void *data)
1184 {
1185 	u32 *prop;
1186 
1187 	if (depth != 0)
1188 		return 0;
1189 
1190 	prop = get_flat_dt_prop(node, "#size-cells", NULL);
1191 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1192 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1193 
1194 	prop = get_flat_dt_prop(node, "#address-cells", NULL);
1195 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1196 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1197 
1198 	/* break now */
1199 	return 1;
1200 }
1201 
1202 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1203 {
1204 	cell_t *p = *cellp;
1205 	unsigned long r;
1206 
1207 	/* Ignore more than 2 cells */
1208 	while (s > sizeof(unsigned long) / 4) {
1209 		p++;
1210 		s--;
1211 	}
1212 	r = *p++;
1213 #ifdef CONFIG_PPC64
1214 	if (s > 1) {
1215 		r <<= 32;
1216 		r |= *(p++);
1217 		s--;
1218 	}
1219 #endif
1220 
1221 	*cellp = p;
1222 	return r;
1223 }
1224 
1225 
1226 static int __init early_init_dt_scan_memory(unsigned long node,
1227 					    const char *uname, int depth, void *data)
1228 {
1229 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1230 	cell_t *reg, *endp;
1231 	unsigned long l;
1232 
1233 	/* We are scanning "memory" nodes only */
1234 	if (type == NULL || strcmp(type, "memory") != 0)
1235 		return 0;
1236 
1237 	reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1238 	if (reg == NULL)
1239 		return 0;
1240 
1241 	endp = reg + (l / sizeof(cell_t));
1242 
1243 	DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1244 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1245 
1246 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1247 		unsigned long base, size;
1248 
1249 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1250 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1251 
1252 		if (size == 0)
1253 			continue;
1254 		DBG(" - %lx ,  %lx\n", base, size);
1255 #ifdef CONFIG_PPC64
1256 		if (iommu_is_off) {
1257 			if (base >= 0x80000000ul)
1258 				continue;
1259 			if ((base + size) > 0x80000000ul)
1260 				size = 0x80000000ul - base;
1261 		}
1262 #endif
1263 		lmb_add(base, size);
1264 	}
1265 	return 0;
1266 }
1267 
1268 static void __init early_reserve_mem(void)
1269 {
1270 	unsigned long base, size;
1271 	unsigned long *reserve_map;
1272 
1273 	reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1274 					initial_boot_params->off_mem_rsvmap);
1275 	while (1) {
1276 		base = *(reserve_map++);
1277 		size = *(reserve_map++);
1278 		if (size == 0)
1279 			break;
1280 		DBG("reserving: %lx -> %lx\n", base, size);
1281 		lmb_reserve(base, size);
1282 	}
1283 
1284 #if 0
1285 	DBG("memory reserved, lmbs :\n");
1286       	lmb_dump_all();
1287 #endif
1288 }
1289 
1290 void __init early_init_devtree(void *params)
1291 {
1292 	DBG(" -> early_init_devtree()\n");
1293 
1294 	/* Setup flat device-tree pointer */
1295 	initial_boot_params = params;
1296 
1297 	/* Retrieve various informations from the /chosen node of the
1298 	 * device-tree, including the platform type, initrd location and
1299 	 * size, TCE reserve, and more ...
1300 	 */
1301 	scan_flat_dt(early_init_dt_scan_chosen, NULL);
1302 
1303 	/* Scan memory nodes and rebuild LMBs */
1304 	lmb_init();
1305 	scan_flat_dt(early_init_dt_scan_root, NULL);
1306 	scan_flat_dt(early_init_dt_scan_memory, NULL);
1307 	lmb_enforce_memory_limit(memory_limit);
1308 	lmb_analyze();
1309 #ifdef CONFIG_PPC64
1310 	systemcfg->physicalMemorySize = lmb_phys_mem_size();
1311 #endif
1312 	lmb_reserve(0, __pa(klimit));
1313 
1314 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1315 
1316 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1317 	early_reserve_mem();
1318 
1319 	DBG("Scanning CPUs ...\n");
1320 
1321 	/* Retreive hash table size from flattened tree plus other
1322 	 * CPU related informations (altivec support, boot CPU ID, ...)
1323 	 */
1324 	scan_flat_dt(early_init_dt_scan_cpus, NULL);
1325 
1326 #ifdef CONFIG_PPC_PSERIES
1327 	/* If hash size wasn't obtained above, we calculate it now based on
1328 	 * the total RAM size
1329 	 */
1330 	if (ppc64_pft_size == 0) {
1331 		unsigned long rnd_mem_size, pteg_count;
1332 
1333 		/* round mem_size up to next power of 2 */
1334 		rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
1335 		if (rnd_mem_size < systemcfg->physicalMemorySize)
1336 			rnd_mem_size <<= 1;
1337 
1338 		/* # pages / 2 */
1339 		pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
1340 
1341 		ppc64_pft_size = __ilog2(pteg_count << 7);
1342 	}
1343 
1344 	DBG("Hash pftSize: %x\n", (int)ppc64_pft_size);
1345 #endif
1346 	DBG(" <- early_init_devtree()\n");
1347 }
1348 
1349 #undef printk
1350 
1351 int
1352 prom_n_addr_cells(struct device_node* np)
1353 {
1354 	int* ip;
1355 	do {
1356 		if (np->parent)
1357 			np = np->parent;
1358 		ip = (int *) get_property(np, "#address-cells", NULL);
1359 		if (ip != NULL)
1360 			return *ip;
1361 	} while (np->parent);
1362 	/* No #address-cells property for the root node, default to 1 */
1363 	return 1;
1364 }
1365 
1366 int
1367 prom_n_size_cells(struct device_node* np)
1368 {
1369 	int* ip;
1370 	do {
1371 		if (np->parent)
1372 			np = np->parent;
1373 		ip = (int *) get_property(np, "#size-cells", NULL);
1374 		if (ip != NULL)
1375 			return *ip;
1376 	} while (np->parent);
1377 	/* No #size-cells property for the root node, default to 1 */
1378 	return 1;
1379 }
1380 
1381 /**
1382  * Work out the sense (active-low level / active-high edge)
1383  * of each interrupt from the device tree.
1384  */
1385 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1386 {
1387 	struct device_node *np;
1388 	int i, j;
1389 
1390 	/* default to level-triggered */
1391 	memset(senses, 1, max - off);
1392 
1393 	for (np = allnodes; np != 0; np = np->allnext) {
1394 		for (j = 0; j < np->n_intrs; j++) {
1395 			i = np->intrs[j].line;
1396 			if (i >= off && i < max)
1397 				senses[i-off] = np->intrs[j].sense ?
1398 					IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
1399 					IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
1400 		}
1401 	}
1402 }
1403 
1404 /**
1405  * Construct and return a list of the device_nodes with a given name.
1406  */
1407 struct device_node *find_devices(const char *name)
1408 {
1409 	struct device_node *head, **prevp, *np;
1410 
1411 	prevp = &head;
1412 	for (np = allnodes; np != 0; np = np->allnext) {
1413 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1414 			*prevp = np;
1415 			prevp = &np->next;
1416 		}
1417 	}
1418 	*prevp = NULL;
1419 	return head;
1420 }
1421 EXPORT_SYMBOL(find_devices);
1422 
1423 /**
1424  * Construct and return a list of the device_nodes with a given type.
1425  */
1426 struct device_node *find_type_devices(const char *type)
1427 {
1428 	struct device_node *head, **prevp, *np;
1429 
1430 	prevp = &head;
1431 	for (np = allnodes; np != 0; np = np->allnext) {
1432 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1433 			*prevp = np;
1434 			prevp = &np->next;
1435 		}
1436 	}
1437 	*prevp = NULL;
1438 	return head;
1439 }
1440 EXPORT_SYMBOL(find_type_devices);
1441 
1442 /**
1443  * Returns all nodes linked together
1444  */
1445 struct device_node *find_all_nodes(void)
1446 {
1447 	struct device_node *head, **prevp, *np;
1448 
1449 	prevp = &head;
1450 	for (np = allnodes; np != 0; np = np->allnext) {
1451 		*prevp = np;
1452 		prevp = &np->next;
1453 	}
1454 	*prevp = NULL;
1455 	return head;
1456 }
1457 EXPORT_SYMBOL(find_all_nodes);
1458 
1459 /** Checks if the given "compat" string matches one of the strings in
1460  * the device's "compatible" property
1461  */
1462 int device_is_compatible(struct device_node *device, const char *compat)
1463 {
1464 	const char* cp;
1465 	int cplen, l;
1466 
1467 	cp = (char *) get_property(device, "compatible", &cplen);
1468 	if (cp == NULL)
1469 		return 0;
1470 	while (cplen > 0) {
1471 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1472 			return 1;
1473 		l = strlen(cp) + 1;
1474 		cp += l;
1475 		cplen -= l;
1476 	}
1477 
1478 	return 0;
1479 }
1480 EXPORT_SYMBOL(device_is_compatible);
1481 
1482 
1483 /**
1484  * Indicates whether the root node has a given value in its
1485  * compatible property.
1486  */
1487 int machine_is_compatible(const char *compat)
1488 {
1489 	struct device_node *root;
1490 	int rc = 0;
1491 
1492 	root = of_find_node_by_path("/");
1493 	if (root) {
1494 		rc = device_is_compatible(root, compat);
1495 		of_node_put(root);
1496 	}
1497 	return rc;
1498 }
1499 EXPORT_SYMBOL(machine_is_compatible);
1500 
1501 /**
1502  * Construct and return a list of the device_nodes with a given type
1503  * and compatible property.
1504  */
1505 struct device_node *find_compatible_devices(const char *type,
1506 					    const char *compat)
1507 {
1508 	struct device_node *head, **prevp, *np;
1509 
1510 	prevp = &head;
1511 	for (np = allnodes; np != 0; np = np->allnext) {
1512 		if (type != NULL
1513 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1514 			continue;
1515 		if (device_is_compatible(np, compat)) {
1516 			*prevp = np;
1517 			prevp = &np->next;
1518 		}
1519 	}
1520 	*prevp = NULL;
1521 	return head;
1522 }
1523 EXPORT_SYMBOL(find_compatible_devices);
1524 
1525 /**
1526  * Find the device_node with a given full_name.
1527  */
1528 struct device_node *find_path_device(const char *path)
1529 {
1530 	struct device_node *np;
1531 
1532 	for (np = allnodes; np != 0; np = np->allnext)
1533 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1534 			return np;
1535 	return NULL;
1536 }
1537 EXPORT_SYMBOL(find_path_device);
1538 
1539 /*******
1540  *
1541  * New implementation of the OF "find" APIs, return a refcounted
1542  * object, call of_node_put() when done.  The device tree and list
1543  * are protected by a rw_lock.
1544  *
1545  * Note that property management will need some locking as well,
1546  * this isn't dealt with yet.
1547  *
1548  *******/
1549 
1550 /**
1551  *	of_find_node_by_name - Find a node by its "name" property
1552  *	@from:	The node to start searching from or NULL, the node
1553  *		you pass will not be searched, only the next one
1554  *		will; typically, you pass what the previous call
1555  *		returned. of_node_put() will be called on it
1556  *	@name:	The name string to match against
1557  *
1558  *	Returns a node pointer with refcount incremented, use
1559  *	of_node_put() on it when done.
1560  */
1561 struct device_node *of_find_node_by_name(struct device_node *from,
1562 	const char *name)
1563 {
1564 	struct device_node *np;
1565 
1566 	read_lock(&devtree_lock);
1567 	np = from ? from->allnext : allnodes;
1568 	for (; np != 0; np = np->allnext)
1569 		if (np->name != 0 && strcasecmp(np->name, name) == 0
1570 		    && of_node_get(np))
1571 			break;
1572 	if (from)
1573 		of_node_put(from);
1574 	read_unlock(&devtree_lock);
1575 	return np;
1576 }
1577 EXPORT_SYMBOL(of_find_node_by_name);
1578 
1579 /**
1580  *	of_find_node_by_type - Find a node by its "device_type" property
1581  *	@from:	The node to start searching from or NULL, the node
1582  *		you pass will not be searched, only the next one
1583  *		will; typically, you pass what the previous call
1584  *		returned. of_node_put() will be called on it
1585  *	@name:	The type string to match against
1586  *
1587  *	Returns a node pointer with refcount incremented, use
1588  *	of_node_put() on it when done.
1589  */
1590 struct device_node *of_find_node_by_type(struct device_node *from,
1591 	const char *type)
1592 {
1593 	struct device_node *np;
1594 
1595 	read_lock(&devtree_lock);
1596 	np = from ? from->allnext : allnodes;
1597 	for (; np != 0; np = np->allnext)
1598 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1599 		    && of_node_get(np))
1600 			break;
1601 	if (from)
1602 		of_node_put(from);
1603 	read_unlock(&devtree_lock);
1604 	return np;
1605 }
1606 EXPORT_SYMBOL(of_find_node_by_type);
1607 
1608 /**
1609  *	of_find_compatible_node - Find a node based on type and one of the
1610  *                                tokens in its "compatible" property
1611  *	@from:		The node to start searching from or NULL, the node
1612  *			you pass will not be searched, only the next one
1613  *			will; typically, you pass what the previous call
1614  *			returned. of_node_put() will be called on it
1615  *	@type:		The type string to match "device_type" or NULL to ignore
1616  *	@compatible:	The string to match to one of the tokens in the device
1617  *			"compatible" list.
1618  *
1619  *	Returns a node pointer with refcount incremented, use
1620  *	of_node_put() on it when done.
1621  */
1622 struct device_node *of_find_compatible_node(struct device_node *from,
1623 	const char *type, const char *compatible)
1624 {
1625 	struct device_node *np;
1626 
1627 	read_lock(&devtree_lock);
1628 	np = from ? from->allnext : allnodes;
1629 	for (; np != 0; np = np->allnext) {
1630 		if (type != NULL
1631 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1632 			continue;
1633 		if (device_is_compatible(np, compatible) && of_node_get(np))
1634 			break;
1635 	}
1636 	if (from)
1637 		of_node_put(from);
1638 	read_unlock(&devtree_lock);
1639 	return np;
1640 }
1641 EXPORT_SYMBOL(of_find_compatible_node);
1642 
1643 /**
1644  *	of_find_node_by_path - Find a node matching a full OF path
1645  *	@path:	The full path to match
1646  *
1647  *	Returns a node pointer with refcount incremented, use
1648  *	of_node_put() on it when done.
1649  */
1650 struct device_node *of_find_node_by_path(const char *path)
1651 {
1652 	struct device_node *np = allnodes;
1653 
1654 	read_lock(&devtree_lock);
1655 	for (; np != 0; np = np->allnext) {
1656 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1657 		    && of_node_get(np))
1658 			break;
1659 	}
1660 	read_unlock(&devtree_lock);
1661 	return np;
1662 }
1663 EXPORT_SYMBOL(of_find_node_by_path);
1664 
1665 /**
1666  *	of_find_node_by_phandle - Find a node given a phandle
1667  *	@handle:	phandle of the node to find
1668  *
1669  *	Returns a node pointer with refcount incremented, use
1670  *	of_node_put() on it when done.
1671  */
1672 struct device_node *of_find_node_by_phandle(phandle handle)
1673 {
1674 	struct device_node *np;
1675 
1676 	read_lock(&devtree_lock);
1677 	for (np = allnodes; np != 0; np = np->allnext)
1678 		if (np->linux_phandle == handle)
1679 			break;
1680 	if (np)
1681 		of_node_get(np);
1682 	read_unlock(&devtree_lock);
1683 	return np;
1684 }
1685 EXPORT_SYMBOL(of_find_node_by_phandle);
1686 
1687 /**
1688  *	of_find_all_nodes - Get next node in global list
1689  *	@prev:	Previous node or NULL to start iteration
1690  *		of_node_put() will be called on it
1691  *
1692  *	Returns a node pointer with refcount incremented, use
1693  *	of_node_put() on it when done.
1694  */
1695 struct device_node *of_find_all_nodes(struct device_node *prev)
1696 {
1697 	struct device_node *np;
1698 
1699 	read_lock(&devtree_lock);
1700 	np = prev ? prev->allnext : allnodes;
1701 	for (; np != 0; np = np->allnext)
1702 		if (of_node_get(np))
1703 			break;
1704 	if (prev)
1705 		of_node_put(prev);
1706 	read_unlock(&devtree_lock);
1707 	return np;
1708 }
1709 EXPORT_SYMBOL(of_find_all_nodes);
1710 
1711 /**
1712  *	of_get_parent - Get a node's parent if any
1713  *	@node:	Node to get parent
1714  *
1715  *	Returns a node pointer with refcount incremented, use
1716  *	of_node_put() on it when done.
1717  */
1718 struct device_node *of_get_parent(const struct device_node *node)
1719 {
1720 	struct device_node *np;
1721 
1722 	if (!node)
1723 		return NULL;
1724 
1725 	read_lock(&devtree_lock);
1726 	np = of_node_get(node->parent);
1727 	read_unlock(&devtree_lock);
1728 	return np;
1729 }
1730 EXPORT_SYMBOL(of_get_parent);
1731 
1732 /**
1733  *	of_get_next_child - Iterate a node childs
1734  *	@node:	parent node
1735  *	@prev:	previous child of the parent node, or NULL to get first
1736  *
1737  *	Returns a node pointer with refcount incremented, use
1738  *	of_node_put() on it when done.
1739  */
1740 struct device_node *of_get_next_child(const struct device_node *node,
1741 	struct device_node *prev)
1742 {
1743 	struct device_node *next;
1744 
1745 	read_lock(&devtree_lock);
1746 	next = prev ? prev->sibling : node->child;
1747 	for (; next != 0; next = next->sibling)
1748 		if (of_node_get(next))
1749 			break;
1750 	if (prev)
1751 		of_node_put(prev);
1752 	read_unlock(&devtree_lock);
1753 	return next;
1754 }
1755 EXPORT_SYMBOL(of_get_next_child);
1756 
1757 /**
1758  *	of_node_get - Increment refcount of a node
1759  *	@node:	Node to inc refcount, NULL is supported to
1760  *		simplify writing of callers
1761  *
1762  *	Returns node.
1763  */
1764 struct device_node *of_node_get(struct device_node *node)
1765 {
1766 	if (node)
1767 		kref_get(&node->kref);
1768 	return node;
1769 }
1770 EXPORT_SYMBOL(of_node_get);
1771 
1772 static inline struct device_node * kref_to_device_node(struct kref *kref)
1773 {
1774 	return container_of(kref, struct device_node, kref);
1775 }
1776 
1777 /**
1778  *	of_node_release - release a dynamically allocated node
1779  *	@kref:  kref element of the node to be released
1780  *
1781  *	In of_node_put() this function is passed to kref_put()
1782  *	as the destructor.
1783  */
1784 static void of_node_release(struct kref *kref)
1785 {
1786 	struct device_node *node = kref_to_device_node(kref);
1787 	struct property *prop = node->properties;
1788 
1789 	if (!OF_IS_DYNAMIC(node))
1790 		return;
1791 	while (prop) {
1792 		struct property *next = prop->next;
1793 		kfree(prop->name);
1794 		kfree(prop->value);
1795 		kfree(prop);
1796 		prop = next;
1797 	}
1798 	kfree(node->intrs);
1799 	kfree(node->addrs);
1800 	kfree(node->full_name);
1801 	kfree(node->data);
1802 	kfree(node);
1803 }
1804 
1805 /**
1806  *	of_node_put - Decrement refcount of a node
1807  *	@node:	Node to dec refcount, NULL is supported to
1808  *		simplify writing of callers
1809  *
1810  */
1811 void of_node_put(struct device_node *node)
1812 {
1813 	if (node)
1814 		kref_put(&node->kref, of_node_release);
1815 }
1816 EXPORT_SYMBOL(of_node_put);
1817 
1818 /*
1819  * Plug a device node into the tree and global list.
1820  */
1821 void of_attach_node(struct device_node *np)
1822 {
1823 	write_lock(&devtree_lock);
1824 	np->sibling = np->parent->child;
1825 	np->allnext = allnodes;
1826 	np->parent->child = np;
1827 	allnodes = np;
1828 	write_unlock(&devtree_lock);
1829 }
1830 
1831 /*
1832  * "Unplug" a node from the device tree.  The caller must hold
1833  * a reference to the node.  The memory associated with the node
1834  * is not freed until its refcount goes to zero.
1835  */
1836 void of_detach_node(const struct device_node *np)
1837 {
1838 	struct device_node *parent;
1839 
1840 	write_lock(&devtree_lock);
1841 
1842 	parent = np->parent;
1843 
1844 	if (allnodes == np)
1845 		allnodes = np->allnext;
1846 	else {
1847 		struct device_node *prev;
1848 		for (prev = allnodes;
1849 		     prev->allnext != np;
1850 		     prev = prev->allnext)
1851 			;
1852 		prev->allnext = np->allnext;
1853 	}
1854 
1855 	if (parent->child == np)
1856 		parent->child = np->sibling;
1857 	else {
1858 		struct device_node *prevsib;
1859 		for (prevsib = np->parent->child;
1860 		     prevsib->sibling != np;
1861 		     prevsib = prevsib->sibling)
1862 			;
1863 		prevsib->sibling = np->sibling;
1864 	}
1865 
1866 	write_unlock(&devtree_lock);
1867 }
1868 
1869 #ifdef CONFIG_PPC_PSERIES
1870 /*
1871  * Fix up the uninitialized fields in a new device node:
1872  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1873  *
1874  * A lot of boot-time code is duplicated here, because functions such
1875  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1876  * slab allocator.
1877  *
1878  * This should probably be split up into smaller chunks.
1879  */
1880 
1881 static int of_finish_dynamic_node(struct device_node *node,
1882 				  unsigned long *unused1, int unused2,
1883 				  int unused3, int unused4)
1884 {
1885 	struct device_node *parent = of_get_parent(node);
1886 	int err = 0;
1887 	phandle *ibm_phandle;
1888 
1889 	node->name = get_property(node, "name", NULL);
1890 	node->type = get_property(node, "device_type", NULL);
1891 
1892 	if (!parent) {
1893 		err = -ENODEV;
1894 		goto out;
1895 	}
1896 
1897 	/* We don't support that function on PowerMac, at least
1898 	 * not yet
1899 	 */
1900 	if (systemcfg->platform == PLATFORM_POWERMAC)
1901 		return -ENODEV;
1902 
1903 	/* fix up new node's linux_phandle field */
1904 	if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1905 		node->linux_phandle = *ibm_phandle;
1906 
1907 out:
1908 	of_node_put(parent);
1909 	return err;
1910 }
1911 
1912 static int prom_reconfig_notifier(struct notifier_block *nb,
1913 				  unsigned long action, void *node)
1914 {
1915 	int err;
1916 
1917 	switch (action) {
1918 	case PSERIES_RECONFIG_ADD:
1919 		err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1920 		if (err < 0) {
1921 			printk(KERN_ERR "finish_node returned %d\n", err);
1922 			err = NOTIFY_BAD;
1923 		}
1924 		break;
1925 	default:
1926 		err = NOTIFY_DONE;
1927 		break;
1928 	}
1929 	return err;
1930 }
1931 
1932 static struct notifier_block prom_reconfig_nb = {
1933 	.notifier_call = prom_reconfig_notifier,
1934 	.priority = 10, /* This one needs to run first */
1935 };
1936 
1937 static int __init prom_reconfig_setup(void)
1938 {
1939 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1940 }
1941 __initcall(prom_reconfig_setup);
1942 #endif
1943 
1944 /*
1945  * Find a property with a given name for a given node
1946  * and return the value.
1947  */
1948 unsigned char *get_property(struct device_node *np, const char *name,
1949 			    int *lenp)
1950 {
1951 	struct property *pp;
1952 
1953 	for (pp = np->properties; pp != 0; pp = pp->next)
1954 		if (strcmp(pp->name, name) == 0) {
1955 			if (lenp != 0)
1956 				*lenp = pp->length;
1957 			return pp->value;
1958 		}
1959 	return NULL;
1960 }
1961 EXPORT_SYMBOL(get_property);
1962 
1963 /*
1964  * Add a property to a node
1965  */
1966 void prom_add_property(struct device_node* np, struct property* prop)
1967 {
1968 	struct property **next = &np->properties;
1969 
1970 	prop->next = NULL;
1971 	while (*next)
1972 		next = &(*next)->next;
1973 	*next = prop;
1974 }
1975 
1976 /* I quickly hacked that one, check against spec ! */
1977 static inline unsigned long
1978 bus_space_to_resource_flags(unsigned int bus_space)
1979 {
1980 	u8 space = (bus_space >> 24) & 0xf;
1981 	if (space == 0)
1982 		space = 0x02;
1983 	if (space == 0x02)
1984 		return IORESOURCE_MEM;
1985 	else if (space == 0x01)
1986 		return IORESOURCE_IO;
1987 	else {
1988 		printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
1989 		    	bus_space);
1990 		return 0;
1991 	}
1992 }
1993 
1994 static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
1995 						 struct address_range *range)
1996 {
1997 	unsigned long mask;
1998 	int i;
1999 
2000 	/* Check this one */
2001 	mask = bus_space_to_resource_flags(range->space);
2002 	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2003 		if ((pdev->resource[i].flags & mask) == mask &&
2004 			pdev->resource[i].start <= range->address &&
2005 			pdev->resource[i].end > range->address) {
2006 				if ((range->address + range->size - 1) > pdev->resource[i].end) {
2007 					/* Add better message */
2008 					printk(KERN_WARNING "PCI/OF resource overlap !\n");
2009 					return NULL;
2010 				}
2011 				break;
2012 			}
2013 	}
2014 	if (i == DEVICE_COUNT_RESOURCE)
2015 		return NULL;
2016 	return &pdev->resource[i];
2017 }
2018 
2019 /*
2020  * Request an OF device resource. Currently handles child of PCI devices,
2021  * or other nodes attached to the root node. Ultimately, put some
2022  * link to resources in the OF node.
2023  */
2024 struct resource *request_OF_resource(struct device_node* node, int index,
2025 				     const char* name_postfix)
2026 {
2027 	struct pci_dev* pcidev;
2028 	u8 pci_bus, pci_devfn;
2029 	unsigned long iomask;
2030 	struct device_node* nd;
2031 	struct resource* parent;
2032 	struct resource *res = NULL;
2033 	int nlen, plen;
2034 
2035 	if (index >= node->n_addrs)
2036 		goto fail;
2037 
2038 	/* Sanity check on bus space */
2039 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2040 	if (iomask & IORESOURCE_MEM)
2041 		parent = &iomem_resource;
2042 	else if (iomask & IORESOURCE_IO)
2043 		parent = &ioport_resource;
2044 	else
2045 		goto fail;
2046 
2047 	/* Find a PCI parent if any */
2048 	nd = node;
2049 	pcidev = NULL;
2050 	while (nd) {
2051 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2052 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2053 		if (pcidev) break;
2054 		nd = nd->parent;
2055 	}
2056 	if (pcidev)
2057 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2058 	if (!parent) {
2059 		printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2060 			node->name);
2061 		goto fail;
2062 	}
2063 
2064 	res = __request_region(parent, node->addrs[index].address,
2065 			       node->addrs[index].size, NULL);
2066 	if (!res)
2067 		goto fail;
2068 	nlen = strlen(node->name);
2069 	plen = name_postfix ? strlen(name_postfix) : 0;
2070 	res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2071 	if (res->name) {
2072 		strcpy((char *)res->name, node->name);
2073 		if (plen)
2074 			strcpy((char *)res->name+nlen, name_postfix);
2075 	}
2076 	return res;
2077 fail:
2078 	return NULL;
2079 }
2080 EXPORT_SYMBOL(request_OF_resource);
2081 
2082 int release_OF_resource(struct device_node *node, int index)
2083 {
2084 	struct pci_dev* pcidev;
2085 	u8 pci_bus, pci_devfn;
2086 	unsigned long iomask, start, end;
2087 	struct device_node* nd;
2088 	struct resource* parent;
2089 	struct resource *res = NULL;
2090 
2091 	if (index >= node->n_addrs)
2092 		return -EINVAL;
2093 
2094 	/* Sanity check on bus space */
2095 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2096 	if (iomask & IORESOURCE_MEM)
2097 		parent = &iomem_resource;
2098 	else if (iomask & IORESOURCE_IO)
2099 		parent = &ioport_resource;
2100 	else
2101 		return -EINVAL;
2102 
2103 	/* Find a PCI parent if any */
2104 	nd = node;
2105 	pcidev = NULL;
2106 	while(nd) {
2107 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2108 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2109 		if (pcidev) break;
2110 		nd = nd->parent;
2111 	}
2112 	if (pcidev)
2113 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2114 	if (!parent) {
2115 		printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2116 			node->name);
2117 		return -ENODEV;
2118 	}
2119 
2120 	/* Find us in the parent and its childs */
2121 	res = parent->child;
2122 	start = node->addrs[index].address;
2123 	end = start + node->addrs[index].size - 1;
2124 	while (res) {
2125 		if (res->start == start && res->end == end &&
2126 		    (res->flags & IORESOURCE_BUSY))
2127 		    	break;
2128 		if (res->start <= start && res->end >= end)
2129 			res = res->child;
2130 		else
2131 			res = res->sibling;
2132 	}
2133 	if (!res)
2134 		return -ENODEV;
2135 
2136 	if (res->name) {
2137 		kfree(res->name);
2138 		res->name = NULL;
2139 	}
2140 	release_resource(res);
2141 	kfree(res);
2142 
2143 	return 0;
2144 }
2145 EXPORT_SYMBOL(release_OF_resource);
2146