xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision 1dfc6772)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/lmb.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/mmu.h>
43 #include <asm/pgtable.h>
44 #include <asm/pci.h>
45 #include <asm/iommu.h>
46 #include <asm/btext.h>
47 #include <asm/sections.h>
48 #include <asm/machdep.h>
49 #include <asm/pSeries_reconfig.h>
50 #include <asm/pci-bridge.h>
51 
52 #ifdef DEBUG
53 #define DBG(fmt...) printk(KERN_ERR fmt)
54 #else
55 #define DBG(fmt...)
56 #endif
57 
58 struct pci_reg_property {
59 	struct pci_address addr;
60 	u32 size_hi;
61 	u32 size_lo;
62 };
63 
64 struct isa_reg_property {
65 	u32 space;
66 	u32 address;
67 	u32 size;
68 };
69 
70 
71 typedef int interpret_func(struct device_node *, unsigned long *,
72 			   int, int, int);
73 
74 static int __initdata dt_root_addr_cells;
75 static int __initdata dt_root_size_cells;
76 
77 #ifdef CONFIG_PPC64
78 static int __initdata iommu_is_off;
79 int __initdata iommu_force_on;
80 unsigned long tce_alloc_start, tce_alloc_end;
81 #endif
82 
83 typedef u32 cell_t;
84 
85 #if 0
86 static struct boot_param_header *initial_boot_params __initdata;
87 #else
88 struct boot_param_header *initial_boot_params;
89 #endif
90 
91 static struct device_node *allnodes = NULL;
92 
93 /* use when traversing tree through the allnext, child, sibling,
94  * or parent members of struct device_node.
95  */
96 static DEFINE_RWLOCK(devtree_lock);
97 
98 /* export that to outside world */
99 struct device_node *of_chosen;
100 
101 struct device_node *dflt_interrupt_controller;
102 int num_interrupt_controllers;
103 
104 /*
105  * Wrapper for allocating memory for various data that needs to be
106  * attached to device nodes as they are processed at boot or when
107  * added to the device tree later (e.g. DLPAR).  At boot there is
108  * already a region reserved so we just increment *mem_start by size;
109  * otherwise we call kmalloc.
110  */
111 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
112 {
113 	unsigned long tmp;
114 
115 	if (!mem_start)
116 		return kmalloc(size, GFP_KERNEL);
117 
118 	tmp = *mem_start;
119 	*mem_start += size;
120 	return (void *)tmp;
121 }
122 
123 /*
124  * Find the device_node with a given phandle.
125  */
126 static struct device_node * find_phandle(phandle ph)
127 {
128 	struct device_node *np;
129 
130 	for (np = allnodes; np != 0; np = np->allnext)
131 		if (np->linux_phandle == ph)
132 			return np;
133 	return NULL;
134 }
135 
136 /*
137  * Find the interrupt parent of a node.
138  */
139 static struct device_node * __devinit intr_parent(struct device_node *p)
140 {
141 	phandle *parp;
142 
143 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
144 	if (parp == NULL)
145 		return p->parent;
146 	p = find_phandle(*parp);
147 	if (p != NULL)
148 		return p;
149 	/*
150 	 * On a powermac booted with BootX, we don't get to know the
151 	 * phandles for any nodes, so find_phandle will return NULL.
152 	 * Fortunately these machines only have one interrupt controller
153 	 * so there isn't in fact any ambiguity.  -- paulus
154 	 */
155 	if (num_interrupt_controllers == 1)
156 		p = dflt_interrupt_controller;
157 	return p;
158 }
159 
160 /*
161  * Find out the size of each entry of the interrupts property
162  * for a node.
163  */
164 int __devinit prom_n_intr_cells(struct device_node *np)
165 {
166 	struct device_node *p;
167 	unsigned int *icp;
168 
169 	for (p = np; (p = intr_parent(p)) != NULL; ) {
170 		icp = (unsigned int *)
171 			get_property(p, "#interrupt-cells", NULL);
172 		if (icp != NULL)
173 			return *icp;
174 		if (get_property(p, "interrupt-controller", NULL) != NULL
175 		    || get_property(p, "interrupt-map", NULL) != NULL) {
176 			printk("oops, node %s doesn't have #interrupt-cells\n",
177 			       p->full_name);
178 			return 1;
179 		}
180 	}
181 #ifdef DEBUG_IRQ
182 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
183 #endif
184 	return 1;
185 }
186 
187 /*
188  * Map an interrupt from a device up to the platform interrupt
189  * descriptor.
190  */
191 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
192 				   struct device_node *np, unsigned int *ints,
193 				   int nintrc)
194 {
195 	struct device_node *p, *ipar;
196 	unsigned int *imap, *imask, *ip;
197 	int i, imaplen, match;
198 	int newintrc = 0, newaddrc = 0;
199 	unsigned int *reg;
200 	int naddrc;
201 
202 	reg = (unsigned int *) get_property(np, "reg", NULL);
203 	naddrc = prom_n_addr_cells(np);
204 	p = intr_parent(np);
205 	while (p != NULL) {
206 		if (get_property(p, "interrupt-controller", NULL) != NULL)
207 			/* this node is an interrupt controller, stop here */
208 			break;
209 		imap = (unsigned int *)
210 			get_property(p, "interrupt-map", &imaplen);
211 		if (imap == NULL) {
212 			p = intr_parent(p);
213 			continue;
214 		}
215 		imask = (unsigned int *)
216 			get_property(p, "interrupt-map-mask", NULL);
217 		if (imask == NULL) {
218 			printk("oops, %s has interrupt-map but no mask\n",
219 			       p->full_name);
220 			return 0;
221 		}
222 		imaplen /= sizeof(unsigned int);
223 		match = 0;
224 		ipar = NULL;
225 		while (imaplen > 0 && !match) {
226 			/* check the child-interrupt field */
227 			match = 1;
228 			for (i = 0; i < naddrc && match; ++i)
229 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
230 			for (; i < naddrc + nintrc && match; ++i)
231 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
232 			imap += naddrc + nintrc;
233 			imaplen -= naddrc + nintrc;
234 			/* grab the interrupt parent */
235 			ipar = find_phandle((phandle) *imap++);
236 			--imaplen;
237 			if (ipar == NULL && num_interrupt_controllers == 1)
238 				/* cope with BootX not giving us phandles */
239 				ipar = dflt_interrupt_controller;
240 			if (ipar == NULL) {
241 				printk("oops, no int parent %x in map of %s\n",
242 				       imap[-1], p->full_name);
243 				return 0;
244 			}
245 			/* find the parent's # addr and intr cells */
246 			ip = (unsigned int *)
247 				get_property(ipar, "#interrupt-cells", NULL);
248 			if (ip == NULL) {
249 				printk("oops, no #interrupt-cells on %s\n",
250 				       ipar->full_name);
251 				return 0;
252 			}
253 			newintrc = *ip;
254 			ip = (unsigned int *)
255 				get_property(ipar, "#address-cells", NULL);
256 			newaddrc = (ip == NULL)? 0: *ip;
257 			imap += newaddrc + newintrc;
258 			imaplen -= newaddrc + newintrc;
259 		}
260 		if (imaplen < 0) {
261 			printk("oops, error decoding int-map on %s, len=%d\n",
262 			       p->full_name, imaplen);
263 			return 0;
264 		}
265 		if (!match) {
266 #ifdef DEBUG_IRQ
267 			printk("oops, no match in %s int-map for %s\n",
268 			       p->full_name, np->full_name);
269 #endif
270 			return 0;
271 		}
272 		p = ipar;
273 		naddrc = newaddrc;
274 		nintrc = newintrc;
275 		ints = imap - nintrc;
276 		reg = ints - naddrc;
277 	}
278 	if (p == NULL) {
279 #ifdef DEBUG_IRQ
280 		printk("hmmm, int tree for %s doesn't have ctrler\n",
281 		       np->full_name);
282 #endif
283 		return 0;
284 	}
285 	*irq = ints;
286 	*ictrler = p;
287 	return nintrc;
288 }
289 
290 static unsigned char map_isa_senses[4] = {
291 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
292 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
293 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
294 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE
295 };
296 
297 static unsigned char map_mpic_senses[4] = {
298 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE,
299 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
300 	/* 2 seems to be used for the 8259 cascade... */
301 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
302 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
303 };
304 
305 static int __devinit finish_node_interrupts(struct device_node *np,
306 					    unsigned long *mem_start,
307 					    int measure_only)
308 {
309 	unsigned int *ints;
310 	int intlen, intrcells, intrcount;
311 	int i, j, n, sense;
312 	unsigned int *irq, virq;
313 	struct device_node *ic;
314 
315 	if (num_interrupt_controllers == 0) {
316 		/*
317 		 * Old machines just have a list of interrupt numbers
318 		 * and no interrupt-controller nodes.
319 		 */
320 		ints = (unsigned int *) get_property(np, "AAPL,interrupts",
321 						     &intlen);
322 		/* XXX old interpret_pci_props looked in parent too */
323 		/* XXX old interpret_macio_props looked for interrupts
324 		   before AAPL,interrupts */
325 		if (ints == NULL)
326 			ints = (unsigned int *) get_property(np, "interrupts",
327 							     &intlen);
328 		if (ints == NULL)
329 			return 0;
330 
331 		np->n_intrs = intlen / sizeof(unsigned int);
332 		np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
333 				       mem_start);
334 		if (!np->intrs)
335 			return -ENOMEM;
336 		if (measure_only)
337 			return 0;
338 
339 		for (i = 0; i < np->n_intrs; ++i) {
340 			np->intrs[i].line = *ints++;
341 			np->intrs[i].sense = IRQ_SENSE_LEVEL
342 				| IRQ_POLARITY_NEGATIVE;
343 		}
344 		return 0;
345 	}
346 
347 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
348 	if (ints == NULL)
349 		return 0;
350 	intrcells = prom_n_intr_cells(np);
351 	intlen /= intrcells * sizeof(unsigned int);
352 
353 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
354 	if (!np->intrs)
355 		return -ENOMEM;
356 
357 	if (measure_only)
358 		return 0;
359 
360 	intrcount = 0;
361 	for (i = 0; i < intlen; ++i, ints += intrcells) {
362 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
363 		if (n <= 0)
364 			continue;
365 
366 		/* don't map IRQ numbers under a cascaded 8259 controller */
367 		if (ic && device_is_compatible(ic, "chrp,iic")) {
368 			np->intrs[intrcount].line = irq[0];
369 			sense = (n > 1)? (irq[1] & 3): 3;
370 			np->intrs[intrcount].sense = map_isa_senses[sense];
371 		} else {
372 			virq = virt_irq_create_mapping(irq[0]);
373 #ifdef CONFIG_PPC64
374 			if (virq == NO_IRQ) {
375 				printk(KERN_CRIT "Could not allocate interrupt"
376 				       " number for %s\n", np->full_name);
377 				continue;
378 			}
379 #endif
380 			np->intrs[intrcount].line = irq_offset_up(virq);
381 			sense = (n > 1)? (irq[1] & 3): 1;
382 			np->intrs[intrcount].sense = map_mpic_senses[sense];
383 		}
384 
385 #ifdef CONFIG_PPC64
386 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
387 		if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
388 			char *name = get_property(ic->parent, "name", NULL);
389 			if (name && !strcmp(name, "u3"))
390 				np->intrs[intrcount].line += 128;
391 			else if (!(name && !strcmp(name, "mac-io")))
392 				/* ignore other cascaded controllers, such as
393 				   the k2-sata-root */
394 				break;
395 		}
396 #endif
397 		if (n > 2) {
398 			printk("hmmm, got %d intr cells for %s:", n,
399 			       np->full_name);
400 			for (j = 0; j < n; ++j)
401 				printk(" %d", irq[j]);
402 			printk("\n");
403 		}
404 		++intrcount;
405 	}
406 	np->n_intrs = intrcount;
407 
408 	return 0;
409 }
410 
411 static int __devinit interpret_pci_props(struct device_node *np,
412 					 unsigned long *mem_start,
413 					 int naddrc, int nsizec,
414 					 int measure_only)
415 {
416 	struct address_range *adr;
417 	struct pci_reg_property *pci_addrs;
418 	int i, l, n_addrs;
419 
420 	pci_addrs = (struct pci_reg_property *)
421 		get_property(np, "assigned-addresses", &l);
422 	if (!pci_addrs)
423 		return 0;
424 
425 	n_addrs = l / sizeof(*pci_addrs);
426 
427 	adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
428 	if (!adr)
429 		return -ENOMEM;
430 
431  	if (measure_only)
432  		return 0;
433 
434  	np->addrs = adr;
435  	np->n_addrs = n_addrs;
436 
437  	for (i = 0; i < n_addrs; i++) {
438  		adr[i].space = pci_addrs[i].addr.a_hi;
439  		adr[i].address = pci_addrs[i].addr.a_lo |
440 			((u64)pci_addrs[i].addr.a_mid << 32);
441  		adr[i].size = pci_addrs[i].size_lo;
442 	}
443 
444 	return 0;
445 }
446 
447 static int __init interpret_dbdma_props(struct device_node *np,
448 					unsigned long *mem_start,
449 					int naddrc, int nsizec,
450 					int measure_only)
451 {
452 	struct reg_property32 *rp;
453 	struct address_range *adr;
454 	unsigned long base_address;
455 	int i, l;
456 	struct device_node *db;
457 
458 	base_address = 0;
459 	if (!measure_only) {
460 		for (db = np->parent; db != NULL; db = db->parent) {
461 			if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
462 				base_address = db->addrs[0].address;
463 				break;
464 			}
465 		}
466 	}
467 
468 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
469 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
470 		i = 0;
471 		adr = (struct address_range *) (*mem_start);
472 		while ((l -= sizeof(struct reg_property32)) >= 0) {
473 			if (!measure_only) {
474 				adr[i].space = 2;
475 				adr[i].address = rp[i].address + base_address;
476 				adr[i].size = rp[i].size;
477 			}
478 			++i;
479 		}
480 		np->addrs = adr;
481 		np->n_addrs = i;
482 		(*mem_start) += i * sizeof(struct address_range);
483 	}
484 
485 	return 0;
486 }
487 
488 static int __init interpret_macio_props(struct device_node *np,
489 					unsigned long *mem_start,
490 					int naddrc, int nsizec,
491 					int measure_only)
492 {
493 	struct reg_property32 *rp;
494 	struct address_range *adr;
495 	unsigned long base_address;
496 	int i, l;
497 	struct device_node *db;
498 
499 	base_address = 0;
500 	if (!measure_only) {
501 		for (db = np->parent; db != NULL; db = db->parent) {
502 			if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
503 				base_address = db->addrs[0].address;
504 				break;
505 			}
506 		}
507 	}
508 
509 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
510 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
511 		i = 0;
512 		adr = (struct address_range *) (*mem_start);
513 		while ((l -= sizeof(struct reg_property32)) >= 0) {
514 			if (!measure_only) {
515 				adr[i].space = 2;
516 				adr[i].address = rp[i].address + base_address;
517 				adr[i].size = rp[i].size;
518 			}
519 			++i;
520 		}
521 		np->addrs = adr;
522 		np->n_addrs = i;
523 		(*mem_start) += i * sizeof(struct address_range);
524 	}
525 
526 	return 0;
527 }
528 
529 static int __init interpret_isa_props(struct device_node *np,
530 				      unsigned long *mem_start,
531 				      int naddrc, int nsizec,
532 				      int measure_only)
533 {
534 	struct isa_reg_property *rp;
535 	struct address_range *adr;
536 	int i, l;
537 
538 	rp = (struct isa_reg_property *) get_property(np, "reg", &l);
539 	if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
540 		i = 0;
541 		adr = (struct address_range *) (*mem_start);
542 		while ((l -= sizeof(struct isa_reg_property)) >= 0) {
543 			if (!measure_only) {
544 				adr[i].space = rp[i].space;
545 				adr[i].address = rp[i].address;
546 				adr[i].size = rp[i].size;
547 			}
548 			++i;
549 		}
550 		np->addrs = adr;
551 		np->n_addrs = i;
552 		(*mem_start) += i * sizeof(struct address_range);
553 	}
554 
555 	return 0;
556 }
557 
558 static int __init interpret_root_props(struct device_node *np,
559 				       unsigned long *mem_start,
560 				       int naddrc, int nsizec,
561 				       int measure_only)
562 {
563 	struct address_range *adr;
564 	int i, l;
565 	unsigned int *rp;
566 	int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
567 
568 	rp = (unsigned int *) get_property(np, "reg", &l);
569 	if (rp != 0 && l >= rpsize) {
570 		i = 0;
571 		adr = (struct address_range *) (*mem_start);
572 		while ((l -= rpsize) >= 0) {
573 			if (!measure_only) {
574 				adr[i].space = 0;
575 				adr[i].address = rp[naddrc - 1];
576 				adr[i].size = rp[naddrc + nsizec - 1];
577 			}
578 			++i;
579 			rp += naddrc + nsizec;
580 		}
581 		np->addrs = adr;
582 		np->n_addrs = i;
583 		(*mem_start) += i * sizeof(struct address_range);
584 	}
585 
586 	return 0;
587 }
588 
589 static int __devinit finish_node(struct device_node *np,
590 				 unsigned long *mem_start,
591 				 interpret_func *ifunc,
592 				 int naddrc, int nsizec,
593 				 int measure_only)
594 {
595 	struct device_node *child;
596 	int *ip, rc = 0;
597 
598 	/* get the device addresses and interrupts */
599 	if (ifunc != NULL)
600 		rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
601 	if (rc)
602 		goto out;
603 
604 	rc = finish_node_interrupts(np, mem_start, measure_only);
605 	if (rc)
606 		goto out;
607 
608 	/* Look for #address-cells and #size-cells properties. */
609 	ip = (int *) get_property(np, "#address-cells", NULL);
610 	if (ip != NULL)
611 		naddrc = *ip;
612 	ip = (int *) get_property(np, "#size-cells", NULL);
613 	if (ip != NULL)
614 		nsizec = *ip;
615 
616 	if (!strcmp(np->name, "device-tree") || np->parent == NULL)
617 		ifunc = interpret_root_props;
618 	else if (np->type == 0)
619 		ifunc = NULL;
620 	else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
621 		ifunc = interpret_pci_props;
622 	else if (!strcmp(np->type, "dbdma"))
623 		ifunc = interpret_dbdma_props;
624 	else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
625 		ifunc = interpret_macio_props;
626 	else if (!strcmp(np->type, "isa"))
627 		ifunc = interpret_isa_props;
628 	else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
629 		ifunc = interpret_root_props;
630 	else if (!((ifunc == interpret_dbdma_props
631 		    || ifunc == interpret_macio_props)
632 		   && (!strcmp(np->type, "escc")
633 		       || !strcmp(np->type, "media-bay"))))
634 		ifunc = NULL;
635 
636 	for (child = np->child; child != NULL; child = child->sibling) {
637 		rc = finish_node(child, mem_start, ifunc,
638 				 naddrc, nsizec, measure_only);
639 		if (rc)
640 			goto out;
641 	}
642 out:
643 	return rc;
644 }
645 
646 static void __init scan_interrupt_controllers(void)
647 {
648 	struct device_node *np;
649 	int n = 0;
650 	char *name, *ic;
651 	int iclen;
652 
653 	for (np = allnodes; np != NULL; np = np->allnext) {
654 		ic = get_property(np, "interrupt-controller", &iclen);
655 		name = get_property(np, "name", NULL);
656 		/* checking iclen makes sure we don't get a false
657 		   match on /chosen.interrupt_controller */
658 		if ((name != NULL
659 		     && strcmp(name, "interrupt-controller") == 0)
660 		    || (ic != NULL && iclen == 0
661 			&& strcmp(name, "AppleKiwi"))) {
662 			if (n == 0)
663 				dflt_interrupt_controller = np;
664 			++n;
665 		}
666 	}
667 	num_interrupt_controllers = n;
668 }
669 
670 /**
671  * finish_device_tree is called once things are running normally
672  * (i.e. with text and data mapped to the address they were linked at).
673  * It traverses the device tree and fills in some of the additional,
674  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
675  * mapping is also initialized at this point.
676  */
677 void __init finish_device_tree(void)
678 {
679 	unsigned long start, end, size = 0;
680 
681 	DBG(" -> finish_device_tree\n");
682 
683 #ifdef CONFIG_PPC64
684 	/* Initialize virtual IRQ map */
685 	virt_irq_init();
686 #endif
687 	scan_interrupt_controllers();
688 
689 	/*
690 	 * Finish device-tree (pre-parsing some properties etc...)
691 	 * We do this in 2 passes. One with "measure_only" set, which
692 	 * will only measure the amount of memory needed, then we can
693 	 * allocate that memory, and call finish_node again. However,
694 	 * we must be careful as most routines will fail nowadays when
695 	 * prom_alloc() returns 0, so we must make sure our first pass
696 	 * doesn't start at 0. We pre-initialize size to 16 for that
697 	 * reason and then remove those additional 16 bytes
698 	 */
699 	size = 16;
700 	finish_node(allnodes, &size, NULL, 0, 0, 1);
701 	size -= 16;
702 	end = start = (unsigned long) __va(lmb_alloc(size, 128));
703 	finish_node(allnodes, &end, NULL, 0, 0, 0);
704 	BUG_ON(end != start + size);
705 
706 	DBG(" <- finish_device_tree\n");
707 }
708 
709 static inline char *find_flat_dt_string(u32 offset)
710 {
711 	return ((char *)initial_boot_params) +
712 		initial_boot_params->off_dt_strings + offset;
713 }
714 
715 /**
716  * This function is used to scan the flattened device-tree, it is
717  * used to extract the memory informations at boot before we can
718  * unflatten the tree
719  */
720 int __init of_scan_flat_dt(int (*it)(unsigned long node,
721 				     const char *uname, int depth,
722 				     void *data),
723 			   void *data)
724 {
725 	unsigned long p = ((unsigned long)initial_boot_params) +
726 		initial_boot_params->off_dt_struct;
727 	int rc = 0;
728 	int depth = -1;
729 
730 	do {
731 		u32 tag = *((u32 *)p);
732 		char *pathp;
733 
734 		p += 4;
735 		if (tag == OF_DT_END_NODE) {
736 			depth --;
737 			continue;
738 		}
739 		if (tag == OF_DT_NOP)
740 			continue;
741 		if (tag == OF_DT_END)
742 			break;
743 		if (tag == OF_DT_PROP) {
744 			u32 sz = *((u32 *)p);
745 			p += 8;
746 			if (initial_boot_params->version < 0x10)
747 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
748 			p += sz;
749 			p = _ALIGN(p, 4);
750 			continue;
751 		}
752 		if (tag != OF_DT_BEGIN_NODE) {
753 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
754 			       " device tree !\n", tag);
755 			return -EINVAL;
756 		}
757 		depth++;
758 		pathp = (char *)p;
759 		p = _ALIGN(p + strlen(pathp) + 1, 4);
760 		if ((*pathp) == '/') {
761 			char *lp, *np;
762 			for (lp = NULL, np = pathp; *np; np++)
763 				if ((*np) == '/')
764 					lp = np+1;
765 			if (lp != NULL)
766 				pathp = lp;
767 		}
768 		rc = it(p, pathp, depth, data);
769 		if (rc != 0)
770 			break;
771 	} while(1);
772 
773 	return rc;
774 }
775 
776 /**
777  * This  function can be used within scan_flattened_dt callback to get
778  * access to properties
779  */
780 void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
781 				 unsigned long *size)
782 {
783 	unsigned long p = node;
784 
785 	do {
786 		u32 tag = *((u32 *)p);
787 		u32 sz, noff;
788 		const char *nstr;
789 
790 		p += 4;
791 		if (tag == OF_DT_NOP)
792 			continue;
793 		if (tag != OF_DT_PROP)
794 			return NULL;
795 
796 		sz = *((u32 *)p);
797 		noff = *((u32 *)(p + 4));
798 		p += 8;
799 		if (initial_boot_params->version < 0x10)
800 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
801 
802 		nstr = find_flat_dt_string(noff);
803 		if (nstr == NULL) {
804 			printk(KERN_WARNING "Can't find property index"
805 			       " name !\n");
806 			return NULL;
807 		}
808 		if (strcmp(name, nstr) == 0) {
809 			if (size)
810 				*size = sz;
811 			return (void *)p;
812 		}
813 		p += sz;
814 		p = _ALIGN(p, 4);
815 	} while(1);
816 }
817 
818 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
819 				       unsigned long align)
820 {
821 	void *res;
822 
823 	*mem = _ALIGN(*mem, align);
824 	res = (void *)*mem;
825 	*mem += size;
826 
827 	return res;
828 }
829 
830 static unsigned long __init unflatten_dt_node(unsigned long mem,
831 					      unsigned long *p,
832 					      struct device_node *dad,
833 					      struct device_node ***allnextpp,
834 					      unsigned long fpsize)
835 {
836 	struct device_node *np;
837 	struct property *pp, **prev_pp = NULL;
838 	char *pathp;
839 	u32 tag;
840 	unsigned int l, allocl;
841 	int has_name = 0;
842 	int new_format = 0;
843 
844 	tag = *((u32 *)(*p));
845 	if (tag != OF_DT_BEGIN_NODE) {
846 		printk("Weird tag at start of node: %x\n", tag);
847 		return mem;
848 	}
849 	*p += 4;
850 	pathp = (char *)*p;
851 	l = allocl = strlen(pathp) + 1;
852 	*p = _ALIGN(*p + l, 4);
853 
854 	/* version 0x10 has a more compact unit name here instead of the full
855 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
856 	 * it later. We detect this because the first character of the name is
857 	 * not '/'.
858 	 */
859 	if ((*pathp) != '/') {
860 		new_format = 1;
861 		if (fpsize == 0) {
862 			/* root node: special case. fpsize accounts for path
863 			 * plus terminating zero. root node only has '/', so
864 			 * fpsize should be 2, but we want to avoid the first
865 			 * level nodes to have two '/' so we use fpsize 1 here
866 			 */
867 			fpsize = 1;
868 			allocl = 2;
869 		} else {
870 			/* account for '/' and path size minus terminal 0
871 			 * already in 'l'
872 			 */
873 			fpsize += l;
874 			allocl = fpsize;
875 		}
876 	}
877 
878 
879 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
880 				__alignof__(struct device_node));
881 	if (allnextpp) {
882 		memset(np, 0, sizeof(*np));
883 		np->full_name = ((char*)np) + sizeof(struct device_node);
884 		if (new_format) {
885 			char *p = np->full_name;
886 			/* rebuild full path for new format */
887 			if (dad && dad->parent) {
888 				strcpy(p, dad->full_name);
889 #ifdef DEBUG
890 				if ((strlen(p) + l + 1) != allocl) {
891 					DBG("%s: p: %d, l: %d, a: %d\n",
892 					    pathp, strlen(p), l, allocl);
893 				}
894 #endif
895 				p += strlen(p);
896 			}
897 			*(p++) = '/';
898 			memcpy(p, pathp, l);
899 		} else
900 			memcpy(np->full_name, pathp, l);
901 		prev_pp = &np->properties;
902 		**allnextpp = np;
903 		*allnextpp = &np->allnext;
904 		if (dad != NULL) {
905 			np->parent = dad;
906 			/* we temporarily use the next field as `last_child'*/
907 			if (dad->next == 0)
908 				dad->child = np;
909 			else
910 				dad->next->sibling = np;
911 			dad->next = np;
912 		}
913 		kref_init(&np->kref);
914 	}
915 	while(1) {
916 		u32 sz, noff;
917 		char *pname;
918 
919 		tag = *((u32 *)(*p));
920 		if (tag == OF_DT_NOP) {
921 			*p += 4;
922 			continue;
923 		}
924 		if (tag != OF_DT_PROP)
925 			break;
926 		*p += 4;
927 		sz = *((u32 *)(*p));
928 		noff = *((u32 *)((*p) + 4));
929 		*p += 8;
930 		if (initial_boot_params->version < 0x10)
931 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
932 
933 		pname = find_flat_dt_string(noff);
934 		if (pname == NULL) {
935 			printk("Can't find property name in list !\n");
936 			break;
937 		}
938 		if (strcmp(pname, "name") == 0)
939 			has_name = 1;
940 		l = strlen(pname) + 1;
941 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
942 					__alignof__(struct property));
943 		if (allnextpp) {
944 			if (strcmp(pname, "linux,phandle") == 0) {
945 				np->node = *((u32 *)*p);
946 				if (np->linux_phandle == 0)
947 					np->linux_phandle = np->node;
948 			}
949 			if (strcmp(pname, "ibm,phandle") == 0)
950 				np->linux_phandle = *((u32 *)*p);
951 			pp->name = pname;
952 			pp->length = sz;
953 			pp->value = (void *)*p;
954 			*prev_pp = pp;
955 			prev_pp = &pp->next;
956 		}
957 		*p = _ALIGN((*p) + sz, 4);
958 	}
959 	/* with version 0x10 we may not have the name property, recreate
960 	 * it here from the unit name if absent
961 	 */
962 	if (!has_name) {
963 		char *p = pathp, *ps = pathp, *pa = NULL;
964 		int sz;
965 
966 		while (*p) {
967 			if ((*p) == '@')
968 				pa = p;
969 			if ((*p) == '/')
970 				ps = p + 1;
971 			p++;
972 		}
973 		if (pa < ps)
974 			pa = p;
975 		sz = (pa - ps) + 1;
976 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
977 					__alignof__(struct property));
978 		if (allnextpp) {
979 			pp->name = "name";
980 			pp->length = sz;
981 			pp->value = (unsigned char *)(pp + 1);
982 			*prev_pp = pp;
983 			prev_pp = &pp->next;
984 			memcpy(pp->value, ps, sz - 1);
985 			((char *)pp->value)[sz - 1] = 0;
986 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
987 		}
988 	}
989 	if (allnextpp) {
990 		*prev_pp = NULL;
991 		np->name = get_property(np, "name", NULL);
992 		np->type = get_property(np, "device_type", NULL);
993 
994 		if (!np->name)
995 			np->name = "<NULL>";
996 		if (!np->type)
997 			np->type = "<NULL>";
998 	}
999 	while (tag == OF_DT_BEGIN_NODE) {
1000 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1001 		tag = *((u32 *)(*p));
1002 	}
1003 	if (tag != OF_DT_END_NODE) {
1004 		printk("Weird tag at end of node: %x\n", tag);
1005 		return mem;
1006 	}
1007 	*p += 4;
1008 	return mem;
1009 }
1010 
1011 
1012 /**
1013  * unflattens the device-tree passed by the firmware, creating the
1014  * tree of struct device_node. It also fills the "name" and "type"
1015  * pointers of the nodes so the normal device-tree walking functions
1016  * can be used (this used to be done by finish_device_tree)
1017  */
1018 void __init unflatten_device_tree(void)
1019 {
1020 	unsigned long start, mem, size;
1021 	struct device_node **allnextp = &allnodes;
1022 	char *p = NULL;
1023 	int l = 0;
1024 
1025 	DBG(" -> unflatten_device_tree()\n");
1026 
1027 	/* First pass, scan for size */
1028 	start = ((unsigned long)initial_boot_params) +
1029 		initial_boot_params->off_dt_struct;
1030 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1031 	size = (size | 3) + 1;
1032 
1033 	DBG("  size is %lx, allocating...\n", size);
1034 
1035 	/* Allocate memory for the expanded device tree */
1036 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1037 	if (!mem) {
1038 		DBG("Couldn't allocate memory with lmb_alloc()!\n");
1039 		panic("Couldn't allocate memory with lmb_alloc()!\n");
1040 	}
1041 	mem = (unsigned long) __va(mem);
1042 
1043 	((u32 *)mem)[size / 4] = 0xdeadbeef;
1044 
1045 	DBG("  unflattening %lx...\n", mem);
1046 
1047 	/* Second pass, do actual unflattening */
1048 	start = ((unsigned long)initial_boot_params) +
1049 		initial_boot_params->off_dt_struct;
1050 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1051 	if (*((u32 *)start) != OF_DT_END)
1052 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1053 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1054 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1055 		       ((u32 *)mem)[size / 4] );
1056 	*allnextp = NULL;
1057 
1058 	/* Get pointer to OF "/chosen" node for use everywhere */
1059 	of_chosen = of_find_node_by_path("/chosen");
1060 	if (of_chosen == NULL)
1061 		of_chosen = of_find_node_by_path("/chosen@0");
1062 
1063 	/* Retreive command line */
1064 	if (of_chosen != NULL) {
1065 		p = (char *)get_property(of_chosen, "bootargs", &l);
1066 		if (p != NULL && l > 0)
1067 			strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1068 	}
1069 #ifdef CONFIG_CMDLINE
1070 	if (l == 0 || (l == 1 && (*p) == 0))
1071 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1072 #endif /* CONFIG_CMDLINE */
1073 
1074 	DBG("Command line is: %s\n", cmd_line);
1075 
1076 	DBG(" <- unflatten_device_tree()\n");
1077 }
1078 
1079 
1080 static int __init early_init_dt_scan_cpus(unsigned long node,
1081 					  const char *uname, int depth, void *data)
1082 {
1083 	u32 *prop;
1084 	unsigned long size;
1085 	char *type = of_get_flat_dt_prop(node, "device_type", &size);
1086 
1087 	/* We are scanning "cpu" nodes only */
1088 	if (type == NULL || strcmp(type, "cpu") != 0)
1089 		return 0;
1090 
1091 	boot_cpuid = 0;
1092 	boot_cpuid_phys = 0;
1093 	if (initial_boot_params && initial_boot_params->version >= 2) {
1094 		/* version 2 of the kexec param format adds the phys cpuid
1095 		 * of booted proc.
1096 		 */
1097 		boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1098 	} else {
1099 		/* Check if it's the boot-cpu, set it's hw index now */
1100 		if (of_get_flat_dt_prop(node,
1101 					"linux,boot-cpu", NULL) != NULL) {
1102 			prop = of_get_flat_dt_prop(node, "reg", NULL);
1103 			if (prop != NULL)
1104 				boot_cpuid_phys = *prop;
1105 		}
1106 	}
1107 	set_hard_smp_processor_id(0, boot_cpuid_phys);
1108 
1109 #ifdef CONFIG_ALTIVEC
1110 	/* Check if we have a VMX and eventually update CPU features */
1111 	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1112 	if (prop && (*prop) > 0) {
1113 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1114 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1115 	}
1116 
1117 	/* Same goes for Apple's "altivec" property */
1118 	prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1119 	if (prop) {
1120 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1121 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1122 	}
1123 #endif /* CONFIG_ALTIVEC */
1124 
1125 #ifdef CONFIG_PPC_PSERIES
1126 	/*
1127 	 * Check for an SMT capable CPU and set the CPU feature. We do
1128 	 * this by looking at the size of the ibm,ppc-interrupt-server#s
1129 	 * property
1130 	 */
1131 	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1132 				       &size);
1133 	cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1134 	if (prop && ((size / sizeof(u32)) > 1))
1135 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1136 #endif
1137 
1138 	return 0;
1139 }
1140 
1141 static int __init early_init_dt_scan_chosen(unsigned long node,
1142 					    const char *uname, int depth, void *data)
1143 {
1144 	u32 *prop;
1145 	unsigned long *lprop;
1146 
1147 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1148 
1149 	if (depth != 1 ||
1150 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1151 		return 0;
1152 
1153 	/* get platform type */
1154 	prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
1155 	if (prop == NULL)
1156 		return 0;
1157 #ifdef CONFIG_PPC_MULTIPLATFORM
1158 	_machine = *prop;
1159 #endif
1160 
1161 #ifdef CONFIG_PPC64
1162 	/* check if iommu is forced on or off */
1163 	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1164 		iommu_is_off = 1;
1165 	if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1166 		iommu_force_on = 1;
1167 #endif
1168 
1169  	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1170  	if (lprop)
1171  		memory_limit = *lprop;
1172 
1173 #ifdef CONFIG_PPC64
1174  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1175  	if (lprop)
1176  		tce_alloc_start = *lprop;
1177  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1178  	if (lprop)
1179  		tce_alloc_end = *lprop;
1180 #endif
1181 
1182 #ifdef CONFIG_PPC_RTAS
1183 	/* To help early debugging via the front panel, we retreive a minimal
1184 	 * set of RTAS infos now if available
1185 	 */
1186 	{
1187 		u64 *basep, *entryp;
1188 
1189 		basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1190 		entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1191 		prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
1192 		if (basep && entryp && prop) {
1193 			rtas.base = *basep;
1194 			rtas.entry = *entryp;
1195 			rtas.size = *prop;
1196 		}
1197 	}
1198 #endif /* CONFIG_PPC_RTAS */
1199 
1200 	/* break now */
1201 	return 1;
1202 }
1203 
1204 static int __init early_init_dt_scan_root(unsigned long node,
1205 					  const char *uname, int depth, void *data)
1206 {
1207 	u32 *prop;
1208 
1209 	if (depth != 0)
1210 		return 0;
1211 
1212 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1213 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1214 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1215 
1216 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1217 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1218 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1219 
1220 	/* break now */
1221 	return 1;
1222 }
1223 
1224 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1225 {
1226 	cell_t *p = *cellp;
1227 	unsigned long r;
1228 
1229 	/* Ignore more than 2 cells */
1230 	while (s > sizeof(unsigned long) / 4) {
1231 		p++;
1232 		s--;
1233 	}
1234 	r = *p++;
1235 #ifdef CONFIG_PPC64
1236 	if (s > 1) {
1237 		r <<= 32;
1238 		r |= *(p++);
1239 		s--;
1240 	}
1241 #endif
1242 
1243 	*cellp = p;
1244 	return r;
1245 }
1246 
1247 
1248 static int __init early_init_dt_scan_memory(unsigned long node,
1249 					    const char *uname, int depth, void *data)
1250 {
1251 	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1252 	cell_t *reg, *endp;
1253 	unsigned long l;
1254 
1255 	/* We are scanning "memory" nodes only */
1256 	if (type == NULL) {
1257 		/*
1258 		 * The longtrail doesn't have a device_type on the
1259 		 * /memory node, so look for the node called /memory@0.
1260 		 */
1261 		if (depth != 1 || strcmp(uname, "memory@0") != 0)
1262 			return 0;
1263 	} else if (strcmp(type, "memory") != 0)
1264 		return 0;
1265 
1266 	reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1267 	if (reg == NULL)
1268 		return 0;
1269 
1270 	endp = reg + (l / sizeof(cell_t));
1271 
1272 	DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1273 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1274 
1275 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1276 		unsigned long base, size;
1277 
1278 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1279 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1280 
1281 		if (size == 0)
1282 			continue;
1283 		DBG(" - %lx ,  %lx\n", base, size);
1284 #ifdef CONFIG_PPC64
1285 		if (iommu_is_off) {
1286 			if (base >= 0x80000000ul)
1287 				continue;
1288 			if ((base + size) > 0x80000000ul)
1289 				size = 0x80000000ul - base;
1290 		}
1291 #endif
1292 		lmb_add(base, size);
1293 	}
1294 	return 0;
1295 }
1296 
1297 static void __init early_reserve_mem(void)
1298 {
1299 	unsigned long base, size;
1300 	unsigned long *reserve_map;
1301 
1302 	reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1303 					initial_boot_params->off_mem_rsvmap);
1304 	while (1) {
1305 		base = *(reserve_map++);
1306 		size = *(reserve_map++);
1307 		if (size == 0)
1308 			break;
1309 		DBG("reserving: %lx -> %lx\n", base, size);
1310 		lmb_reserve(base, size);
1311 	}
1312 
1313 #if 0
1314 	DBG("memory reserved, lmbs :\n");
1315       	lmb_dump_all();
1316 #endif
1317 }
1318 
1319 void __init early_init_devtree(void *params)
1320 {
1321 	DBG(" -> early_init_devtree()\n");
1322 
1323 	/* Setup flat device-tree pointer */
1324 	initial_boot_params = params;
1325 
1326 	/* Retrieve various informations from the /chosen node of the
1327 	 * device-tree, including the platform type, initrd location and
1328 	 * size, TCE reserve, and more ...
1329 	 */
1330 	of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1331 
1332 	/* Scan memory nodes and rebuild LMBs */
1333 	lmb_init();
1334 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
1335 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1336 	lmb_enforce_memory_limit(memory_limit);
1337 	lmb_analyze();
1338 	lmb_reserve(0, __pa(klimit));
1339 
1340 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1341 
1342 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1343 	early_reserve_mem();
1344 
1345 	DBG("Scanning CPUs ...\n");
1346 
1347 	/* Retreive CPU related informations from the flat tree
1348 	 * (altivec support, boot CPU ID, ...)
1349 	 */
1350 	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1351 
1352 	DBG(" <- early_init_devtree()\n");
1353 }
1354 
1355 #undef printk
1356 
1357 int
1358 prom_n_addr_cells(struct device_node* np)
1359 {
1360 	int* ip;
1361 	do {
1362 		if (np->parent)
1363 			np = np->parent;
1364 		ip = (int *) get_property(np, "#address-cells", NULL);
1365 		if (ip != NULL)
1366 			return *ip;
1367 	} while (np->parent);
1368 	/* No #address-cells property for the root node, default to 1 */
1369 	return 1;
1370 }
1371 EXPORT_SYMBOL(prom_n_addr_cells);
1372 
1373 int
1374 prom_n_size_cells(struct device_node* np)
1375 {
1376 	int* ip;
1377 	do {
1378 		if (np->parent)
1379 			np = np->parent;
1380 		ip = (int *) get_property(np, "#size-cells", NULL);
1381 		if (ip != NULL)
1382 			return *ip;
1383 	} while (np->parent);
1384 	/* No #size-cells property for the root node, default to 1 */
1385 	return 1;
1386 }
1387 EXPORT_SYMBOL(prom_n_size_cells);
1388 
1389 /**
1390  * Work out the sense (active-low level / active-high edge)
1391  * of each interrupt from the device tree.
1392  */
1393 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1394 {
1395 	struct device_node *np;
1396 	int i, j;
1397 
1398 	/* default to level-triggered */
1399 	memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1400 
1401 	for (np = allnodes; np != 0; np = np->allnext) {
1402 		for (j = 0; j < np->n_intrs; j++) {
1403 			i = np->intrs[j].line;
1404 			if (i >= off && i < max)
1405 				senses[i-off] = np->intrs[j].sense;
1406 		}
1407 	}
1408 }
1409 
1410 /**
1411  * Construct and return a list of the device_nodes with a given name.
1412  */
1413 struct device_node *find_devices(const char *name)
1414 {
1415 	struct device_node *head, **prevp, *np;
1416 
1417 	prevp = &head;
1418 	for (np = allnodes; np != 0; np = np->allnext) {
1419 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1420 			*prevp = np;
1421 			prevp = &np->next;
1422 		}
1423 	}
1424 	*prevp = NULL;
1425 	return head;
1426 }
1427 EXPORT_SYMBOL(find_devices);
1428 
1429 /**
1430  * Construct and return a list of the device_nodes with a given type.
1431  */
1432 struct device_node *find_type_devices(const char *type)
1433 {
1434 	struct device_node *head, **prevp, *np;
1435 
1436 	prevp = &head;
1437 	for (np = allnodes; np != 0; np = np->allnext) {
1438 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1439 			*prevp = np;
1440 			prevp = &np->next;
1441 		}
1442 	}
1443 	*prevp = NULL;
1444 	return head;
1445 }
1446 EXPORT_SYMBOL(find_type_devices);
1447 
1448 /**
1449  * Returns all nodes linked together
1450  */
1451 struct device_node *find_all_nodes(void)
1452 {
1453 	struct device_node *head, **prevp, *np;
1454 
1455 	prevp = &head;
1456 	for (np = allnodes; np != 0; np = np->allnext) {
1457 		*prevp = np;
1458 		prevp = &np->next;
1459 	}
1460 	*prevp = NULL;
1461 	return head;
1462 }
1463 EXPORT_SYMBOL(find_all_nodes);
1464 
1465 /** Checks if the given "compat" string matches one of the strings in
1466  * the device's "compatible" property
1467  */
1468 int device_is_compatible(struct device_node *device, const char *compat)
1469 {
1470 	const char* cp;
1471 	int cplen, l;
1472 
1473 	cp = (char *) get_property(device, "compatible", &cplen);
1474 	if (cp == NULL)
1475 		return 0;
1476 	while (cplen > 0) {
1477 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1478 			return 1;
1479 		l = strlen(cp) + 1;
1480 		cp += l;
1481 		cplen -= l;
1482 	}
1483 
1484 	return 0;
1485 }
1486 EXPORT_SYMBOL(device_is_compatible);
1487 
1488 
1489 /**
1490  * Indicates whether the root node has a given value in its
1491  * compatible property.
1492  */
1493 int machine_is_compatible(const char *compat)
1494 {
1495 	struct device_node *root;
1496 	int rc = 0;
1497 
1498 	root = of_find_node_by_path("/");
1499 	if (root) {
1500 		rc = device_is_compatible(root, compat);
1501 		of_node_put(root);
1502 	}
1503 	return rc;
1504 }
1505 EXPORT_SYMBOL(machine_is_compatible);
1506 
1507 /**
1508  * Construct and return a list of the device_nodes with a given type
1509  * and compatible property.
1510  */
1511 struct device_node *find_compatible_devices(const char *type,
1512 					    const char *compat)
1513 {
1514 	struct device_node *head, **prevp, *np;
1515 
1516 	prevp = &head;
1517 	for (np = allnodes; np != 0; np = np->allnext) {
1518 		if (type != NULL
1519 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1520 			continue;
1521 		if (device_is_compatible(np, compat)) {
1522 			*prevp = np;
1523 			prevp = &np->next;
1524 		}
1525 	}
1526 	*prevp = NULL;
1527 	return head;
1528 }
1529 EXPORT_SYMBOL(find_compatible_devices);
1530 
1531 /**
1532  * Find the device_node with a given full_name.
1533  */
1534 struct device_node *find_path_device(const char *path)
1535 {
1536 	struct device_node *np;
1537 
1538 	for (np = allnodes; np != 0; np = np->allnext)
1539 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1540 			return np;
1541 	return NULL;
1542 }
1543 EXPORT_SYMBOL(find_path_device);
1544 
1545 /*******
1546  *
1547  * New implementation of the OF "find" APIs, return a refcounted
1548  * object, call of_node_put() when done.  The device tree and list
1549  * are protected by a rw_lock.
1550  *
1551  * Note that property management will need some locking as well,
1552  * this isn't dealt with yet.
1553  *
1554  *******/
1555 
1556 /**
1557  *	of_find_node_by_name - Find a node by its "name" property
1558  *	@from:	The node to start searching from or NULL, the node
1559  *		you pass will not be searched, only the next one
1560  *		will; typically, you pass what the previous call
1561  *		returned. of_node_put() will be called on it
1562  *	@name:	The name string to match against
1563  *
1564  *	Returns a node pointer with refcount incremented, use
1565  *	of_node_put() on it when done.
1566  */
1567 struct device_node *of_find_node_by_name(struct device_node *from,
1568 	const char *name)
1569 {
1570 	struct device_node *np;
1571 
1572 	read_lock(&devtree_lock);
1573 	np = from ? from->allnext : allnodes;
1574 	for (; np != 0; np = np->allnext)
1575 		if (np->name != 0 && strcasecmp(np->name, name) == 0
1576 		    && of_node_get(np))
1577 			break;
1578 	if (from)
1579 		of_node_put(from);
1580 	read_unlock(&devtree_lock);
1581 	return np;
1582 }
1583 EXPORT_SYMBOL(of_find_node_by_name);
1584 
1585 /**
1586  *	of_find_node_by_type - Find a node by its "device_type" property
1587  *	@from:	The node to start searching from or NULL, the node
1588  *		you pass will not be searched, only the next one
1589  *		will; typically, you pass what the previous call
1590  *		returned. of_node_put() will be called on it
1591  *	@name:	The type string to match against
1592  *
1593  *	Returns a node pointer with refcount incremented, use
1594  *	of_node_put() on it when done.
1595  */
1596 struct device_node *of_find_node_by_type(struct device_node *from,
1597 	const char *type)
1598 {
1599 	struct device_node *np;
1600 
1601 	read_lock(&devtree_lock);
1602 	np = from ? from->allnext : allnodes;
1603 	for (; np != 0; np = np->allnext)
1604 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1605 		    && of_node_get(np))
1606 			break;
1607 	if (from)
1608 		of_node_put(from);
1609 	read_unlock(&devtree_lock);
1610 	return np;
1611 }
1612 EXPORT_SYMBOL(of_find_node_by_type);
1613 
1614 /**
1615  *	of_find_compatible_node - Find a node based on type and one of the
1616  *                                tokens in its "compatible" property
1617  *	@from:		The node to start searching from or NULL, the node
1618  *			you pass will not be searched, only the next one
1619  *			will; typically, you pass what the previous call
1620  *			returned. of_node_put() will be called on it
1621  *	@type:		The type string to match "device_type" or NULL to ignore
1622  *	@compatible:	The string to match to one of the tokens in the device
1623  *			"compatible" list.
1624  *
1625  *	Returns a node pointer with refcount incremented, use
1626  *	of_node_put() on it when done.
1627  */
1628 struct device_node *of_find_compatible_node(struct device_node *from,
1629 	const char *type, const char *compatible)
1630 {
1631 	struct device_node *np;
1632 
1633 	read_lock(&devtree_lock);
1634 	np = from ? from->allnext : allnodes;
1635 	for (; np != 0; np = np->allnext) {
1636 		if (type != NULL
1637 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1638 			continue;
1639 		if (device_is_compatible(np, compatible) && of_node_get(np))
1640 			break;
1641 	}
1642 	if (from)
1643 		of_node_put(from);
1644 	read_unlock(&devtree_lock);
1645 	return np;
1646 }
1647 EXPORT_SYMBOL(of_find_compatible_node);
1648 
1649 /**
1650  *	of_find_node_by_path - Find a node matching a full OF path
1651  *	@path:	The full path to match
1652  *
1653  *	Returns a node pointer with refcount incremented, use
1654  *	of_node_put() on it when done.
1655  */
1656 struct device_node *of_find_node_by_path(const char *path)
1657 {
1658 	struct device_node *np = allnodes;
1659 
1660 	read_lock(&devtree_lock);
1661 	for (; np != 0; np = np->allnext) {
1662 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1663 		    && of_node_get(np))
1664 			break;
1665 	}
1666 	read_unlock(&devtree_lock);
1667 	return np;
1668 }
1669 EXPORT_SYMBOL(of_find_node_by_path);
1670 
1671 /**
1672  *	of_find_node_by_phandle - Find a node given a phandle
1673  *	@handle:	phandle of the node to find
1674  *
1675  *	Returns a node pointer with refcount incremented, use
1676  *	of_node_put() on it when done.
1677  */
1678 struct device_node *of_find_node_by_phandle(phandle handle)
1679 {
1680 	struct device_node *np;
1681 
1682 	read_lock(&devtree_lock);
1683 	for (np = allnodes; np != 0; np = np->allnext)
1684 		if (np->linux_phandle == handle)
1685 			break;
1686 	if (np)
1687 		of_node_get(np);
1688 	read_unlock(&devtree_lock);
1689 	return np;
1690 }
1691 EXPORT_SYMBOL(of_find_node_by_phandle);
1692 
1693 /**
1694  *	of_find_all_nodes - Get next node in global list
1695  *	@prev:	Previous node or NULL to start iteration
1696  *		of_node_put() will be called on it
1697  *
1698  *	Returns a node pointer with refcount incremented, use
1699  *	of_node_put() on it when done.
1700  */
1701 struct device_node *of_find_all_nodes(struct device_node *prev)
1702 {
1703 	struct device_node *np;
1704 
1705 	read_lock(&devtree_lock);
1706 	np = prev ? prev->allnext : allnodes;
1707 	for (; np != 0; np = np->allnext)
1708 		if (of_node_get(np))
1709 			break;
1710 	if (prev)
1711 		of_node_put(prev);
1712 	read_unlock(&devtree_lock);
1713 	return np;
1714 }
1715 EXPORT_SYMBOL(of_find_all_nodes);
1716 
1717 /**
1718  *	of_get_parent - Get a node's parent if any
1719  *	@node:	Node to get parent
1720  *
1721  *	Returns a node pointer with refcount incremented, use
1722  *	of_node_put() on it when done.
1723  */
1724 struct device_node *of_get_parent(const struct device_node *node)
1725 {
1726 	struct device_node *np;
1727 
1728 	if (!node)
1729 		return NULL;
1730 
1731 	read_lock(&devtree_lock);
1732 	np = of_node_get(node->parent);
1733 	read_unlock(&devtree_lock);
1734 	return np;
1735 }
1736 EXPORT_SYMBOL(of_get_parent);
1737 
1738 /**
1739  *	of_get_next_child - Iterate a node childs
1740  *	@node:	parent node
1741  *	@prev:	previous child of the parent node, or NULL to get first
1742  *
1743  *	Returns a node pointer with refcount incremented, use
1744  *	of_node_put() on it when done.
1745  */
1746 struct device_node *of_get_next_child(const struct device_node *node,
1747 	struct device_node *prev)
1748 {
1749 	struct device_node *next;
1750 
1751 	read_lock(&devtree_lock);
1752 	next = prev ? prev->sibling : node->child;
1753 	for (; next != 0; next = next->sibling)
1754 		if (of_node_get(next))
1755 			break;
1756 	if (prev)
1757 		of_node_put(prev);
1758 	read_unlock(&devtree_lock);
1759 	return next;
1760 }
1761 EXPORT_SYMBOL(of_get_next_child);
1762 
1763 /**
1764  *	of_node_get - Increment refcount of a node
1765  *	@node:	Node to inc refcount, NULL is supported to
1766  *		simplify writing of callers
1767  *
1768  *	Returns node.
1769  */
1770 struct device_node *of_node_get(struct device_node *node)
1771 {
1772 	if (node)
1773 		kref_get(&node->kref);
1774 	return node;
1775 }
1776 EXPORT_SYMBOL(of_node_get);
1777 
1778 static inline struct device_node * kref_to_device_node(struct kref *kref)
1779 {
1780 	return container_of(kref, struct device_node, kref);
1781 }
1782 
1783 /**
1784  *	of_node_release - release a dynamically allocated node
1785  *	@kref:  kref element of the node to be released
1786  *
1787  *	In of_node_put() this function is passed to kref_put()
1788  *	as the destructor.
1789  */
1790 static void of_node_release(struct kref *kref)
1791 {
1792 	struct device_node *node = kref_to_device_node(kref);
1793 	struct property *prop = node->properties;
1794 
1795 	if (!OF_IS_DYNAMIC(node))
1796 		return;
1797 	while (prop) {
1798 		struct property *next = prop->next;
1799 		kfree(prop->name);
1800 		kfree(prop->value);
1801 		kfree(prop);
1802 		prop = next;
1803 	}
1804 	kfree(node->intrs);
1805 	kfree(node->addrs);
1806 	kfree(node->full_name);
1807 	kfree(node->data);
1808 	kfree(node);
1809 }
1810 
1811 /**
1812  *	of_node_put - Decrement refcount of a node
1813  *	@node:	Node to dec refcount, NULL is supported to
1814  *		simplify writing of callers
1815  *
1816  */
1817 void of_node_put(struct device_node *node)
1818 {
1819 	if (node)
1820 		kref_put(&node->kref, of_node_release);
1821 }
1822 EXPORT_SYMBOL(of_node_put);
1823 
1824 /*
1825  * Plug a device node into the tree and global list.
1826  */
1827 void of_attach_node(struct device_node *np)
1828 {
1829 	write_lock(&devtree_lock);
1830 	np->sibling = np->parent->child;
1831 	np->allnext = allnodes;
1832 	np->parent->child = np;
1833 	allnodes = np;
1834 	write_unlock(&devtree_lock);
1835 }
1836 
1837 /*
1838  * "Unplug" a node from the device tree.  The caller must hold
1839  * a reference to the node.  The memory associated with the node
1840  * is not freed until its refcount goes to zero.
1841  */
1842 void of_detach_node(const struct device_node *np)
1843 {
1844 	struct device_node *parent;
1845 
1846 	write_lock(&devtree_lock);
1847 
1848 	parent = np->parent;
1849 
1850 	if (allnodes == np)
1851 		allnodes = np->allnext;
1852 	else {
1853 		struct device_node *prev;
1854 		for (prev = allnodes;
1855 		     prev->allnext != np;
1856 		     prev = prev->allnext)
1857 			;
1858 		prev->allnext = np->allnext;
1859 	}
1860 
1861 	if (parent->child == np)
1862 		parent->child = np->sibling;
1863 	else {
1864 		struct device_node *prevsib;
1865 		for (prevsib = np->parent->child;
1866 		     prevsib->sibling != np;
1867 		     prevsib = prevsib->sibling)
1868 			;
1869 		prevsib->sibling = np->sibling;
1870 	}
1871 
1872 	write_unlock(&devtree_lock);
1873 }
1874 
1875 #ifdef CONFIG_PPC_PSERIES
1876 /*
1877  * Fix up the uninitialized fields in a new device node:
1878  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1879  *
1880  * A lot of boot-time code is duplicated here, because functions such
1881  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1882  * slab allocator.
1883  *
1884  * This should probably be split up into smaller chunks.
1885  */
1886 
1887 static int of_finish_dynamic_node(struct device_node *node,
1888 				  unsigned long *unused1, int unused2,
1889 				  int unused3, int unused4)
1890 {
1891 	struct device_node *parent = of_get_parent(node);
1892 	int err = 0;
1893 	phandle *ibm_phandle;
1894 
1895 	node->name = get_property(node, "name", NULL);
1896 	node->type = get_property(node, "device_type", NULL);
1897 
1898 	if (!parent) {
1899 		err = -ENODEV;
1900 		goto out;
1901 	}
1902 
1903 	/* We don't support that function on PowerMac, at least
1904 	 * not yet
1905 	 */
1906 	if (_machine == PLATFORM_POWERMAC)
1907 		return -ENODEV;
1908 
1909 	/* fix up new node's linux_phandle field */
1910 	if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1911 		node->linux_phandle = *ibm_phandle;
1912 
1913 out:
1914 	of_node_put(parent);
1915 	return err;
1916 }
1917 
1918 static int prom_reconfig_notifier(struct notifier_block *nb,
1919 				  unsigned long action, void *node)
1920 {
1921 	int err;
1922 
1923 	switch (action) {
1924 	case PSERIES_RECONFIG_ADD:
1925 		err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1926 		if (err < 0) {
1927 			printk(KERN_ERR "finish_node returned %d\n", err);
1928 			err = NOTIFY_BAD;
1929 		}
1930 		break;
1931 	default:
1932 		err = NOTIFY_DONE;
1933 		break;
1934 	}
1935 	return err;
1936 }
1937 
1938 static struct notifier_block prom_reconfig_nb = {
1939 	.notifier_call = prom_reconfig_notifier,
1940 	.priority = 10, /* This one needs to run first */
1941 };
1942 
1943 static int __init prom_reconfig_setup(void)
1944 {
1945 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1946 }
1947 __initcall(prom_reconfig_setup);
1948 #endif
1949 
1950 /*
1951  * Find a property with a given name for a given node
1952  * and return the value.
1953  */
1954 unsigned char *get_property(struct device_node *np, const char *name,
1955 			    int *lenp)
1956 {
1957 	struct property *pp;
1958 
1959 	for (pp = np->properties; pp != 0; pp = pp->next)
1960 		if (strcmp(pp->name, name) == 0) {
1961 			if (lenp != 0)
1962 				*lenp = pp->length;
1963 			return pp->value;
1964 		}
1965 	return NULL;
1966 }
1967 EXPORT_SYMBOL(get_property);
1968 
1969 /*
1970  * Add a property to a node
1971  */
1972 int prom_add_property(struct device_node* np, struct property* prop)
1973 {
1974 	struct property **next;
1975 
1976 	prop->next = NULL;
1977 	write_lock(&devtree_lock);
1978 	next = &np->properties;
1979 	while (*next) {
1980 		if (strcmp(prop->name, (*next)->name) == 0) {
1981 			/* duplicate ! don't insert it */
1982 			write_unlock(&devtree_lock);
1983 			return -1;
1984 		}
1985 		next = &(*next)->next;
1986 	}
1987 	*next = prop;
1988 	write_unlock(&devtree_lock);
1989 
1990 #ifdef CONFIG_PROC_DEVICETREE
1991 	/* try to add to proc as well if it was initialized */
1992 	if (np->pde)
1993 		proc_device_tree_add_prop(np->pde, prop);
1994 #endif /* CONFIG_PROC_DEVICETREE */
1995 
1996 	return 0;
1997 }
1998 
1999 /* I quickly hacked that one, check against spec ! */
2000 static inline unsigned long
2001 bus_space_to_resource_flags(unsigned int bus_space)
2002 {
2003 	u8 space = (bus_space >> 24) & 0xf;
2004 	if (space == 0)
2005 		space = 0x02;
2006 	if (space == 0x02)
2007 		return IORESOURCE_MEM;
2008 	else if (space == 0x01)
2009 		return IORESOURCE_IO;
2010 	else {
2011 		printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2012 		    	bus_space);
2013 		return 0;
2014 	}
2015 }
2016 
2017 #ifdef CONFIG_PCI
2018 static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2019 						 struct address_range *range)
2020 {
2021 	unsigned long mask;
2022 	int i;
2023 
2024 	/* Check this one */
2025 	mask = bus_space_to_resource_flags(range->space);
2026 	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2027 		if ((pdev->resource[i].flags & mask) == mask &&
2028 			pdev->resource[i].start <= range->address &&
2029 			pdev->resource[i].end > range->address) {
2030 				if ((range->address + range->size - 1) > pdev->resource[i].end) {
2031 					/* Add better message */
2032 					printk(KERN_WARNING "PCI/OF resource overlap !\n");
2033 					return NULL;
2034 				}
2035 				break;
2036 			}
2037 	}
2038 	if (i == DEVICE_COUNT_RESOURCE)
2039 		return NULL;
2040 	return &pdev->resource[i];
2041 }
2042 
2043 /*
2044  * Request an OF device resource. Currently handles child of PCI devices,
2045  * or other nodes attached to the root node. Ultimately, put some
2046  * link to resources in the OF node.
2047  */
2048 struct resource *request_OF_resource(struct device_node* node, int index,
2049 				     const char* name_postfix)
2050 {
2051 	struct pci_dev* pcidev;
2052 	u8 pci_bus, pci_devfn;
2053 	unsigned long iomask;
2054 	struct device_node* nd;
2055 	struct resource* parent;
2056 	struct resource *res = NULL;
2057 	int nlen, plen;
2058 
2059 	if (index >= node->n_addrs)
2060 		goto fail;
2061 
2062 	/* Sanity check on bus space */
2063 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2064 	if (iomask & IORESOURCE_MEM)
2065 		parent = &iomem_resource;
2066 	else if (iomask & IORESOURCE_IO)
2067 		parent = &ioport_resource;
2068 	else
2069 		goto fail;
2070 
2071 	/* Find a PCI parent if any */
2072 	nd = node;
2073 	pcidev = NULL;
2074 	while (nd) {
2075 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2076 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2077 		if (pcidev) break;
2078 		nd = nd->parent;
2079 	}
2080 	if (pcidev)
2081 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2082 	if (!parent) {
2083 		printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2084 			node->name);
2085 		goto fail;
2086 	}
2087 
2088 	res = __request_region(parent, node->addrs[index].address,
2089 			       node->addrs[index].size, NULL);
2090 	if (!res)
2091 		goto fail;
2092 	nlen = strlen(node->name);
2093 	plen = name_postfix ? strlen(name_postfix) : 0;
2094 	res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2095 	if (res->name) {
2096 		strcpy((char *)res->name, node->name);
2097 		if (plen)
2098 			strcpy((char *)res->name+nlen, name_postfix);
2099 	}
2100 	return res;
2101 fail:
2102 	return NULL;
2103 }
2104 EXPORT_SYMBOL(request_OF_resource);
2105 
2106 int release_OF_resource(struct device_node *node, int index)
2107 {
2108 	struct pci_dev* pcidev;
2109 	u8 pci_bus, pci_devfn;
2110 	unsigned long iomask, start, end;
2111 	struct device_node* nd;
2112 	struct resource* parent;
2113 	struct resource *res = NULL;
2114 
2115 	if (index >= node->n_addrs)
2116 		return -EINVAL;
2117 
2118 	/* Sanity check on bus space */
2119 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2120 	if (iomask & IORESOURCE_MEM)
2121 		parent = &iomem_resource;
2122 	else if (iomask & IORESOURCE_IO)
2123 		parent = &ioport_resource;
2124 	else
2125 		return -EINVAL;
2126 
2127 	/* Find a PCI parent if any */
2128 	nd = node;
2129 	pcidev = NULL;
2130 	while(nd) {
2131 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2132 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2133 		if (pcidev) break;
2134 		nd = nd->parent;
2135 	}
2136 	if (pcidev)
2137 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2138 	if (!parent) {
2139 		printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2140 			node->name);
2141 		return -ENODEV;
2142 	}
2143 
2144 	/* Find us in the parent and its childs */
2145 	res = parent->child;
2146 	start = node->addrs[index].address;
2147 	end = start + node->addrs[index].size - 1;
2148 	while (res) {
2149 		if (res->start == start && res->end == end &&
2150 		    (res->flags & IORESOURCE_BUSY))
2151 		    	break;
2152 		if (res->start <= start && res->end >= end)
2153 			res = res->child;
2154 		else
2155 			res = res->sibling;
2156 	}
2157 	if (!res)
2158 		return -ENODEV;
2159 
2160 	if (res->name) {
2161 		kfree(res->name);
2162 		res->name = NULL;
2163 	}
2164 	release_resource(res);
2165 	kfree(res);
2166 
2167 	return 0;
2168 }
2169 EXPORT_SYMBOL(release_OF_resource);
2170 #endif /* CONFIG_PCI */
2171