xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision cf00a8d1)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/lmb.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/mmu.h>
43 #include <asm/pgtable.h>
44 #include <asm/pci.h>
45 #include <asm/iommu.h>
46 #include <asm/btext.h>
47 #include <asm/sections.h>
48 #include <asm/machdep.h>
49 #include <asm/pSeries_reconfig.h>
50 #include <asm/pci-bridge.h>
51 #ifdef CONFIG_PPC64
52 #include <asm/systemcfg.h>
53 #endif
54 
55 #ifdef DEBUG
56 #define DBG(fmt...) printk(KERN_ERR fmt)
57 #else
58 #define DBG(fmt...)
59 #endif
60 
61 struct pci_reg_property {
62 	struct pci_address addr;
63 	u32 size_hi;
64 	u32 size_lo;
65 };
66 
67 struct isa_reg_property {
68 	u32 space;
69 	u32 address;
70 	u32 size;
71 };
72 
73 
74 typedef int interpret_func(struct device_node *, unsigned long *,
75 			   int, int, int);
76 
77 extern struct rtas_t rtas;
78 extern struct lmb lmb;
79 extern unsigned long klimit;
80 
81 static int __initdata dt_root_addr_cells;
82 static int __initdata dt_root_size_cells;
83 
84 #ifdef CONFIG_PPC64
85 static int __initdata iommu_is_off;
86 int __initdata iommu_force_on;
87 unsigned long tce_alloc_start, tce_alloc_end;
88 #endif
89 
90 typedef u32 cell_t;
91 
92 #if 0
93 static struct boot_param_header *initial_boot_params __initdata;
94 #else
95 struct boot_param_header *initial_boot_params;
96 #endif
97 
98 static struct device_node *allnodes = NULL;
99 
100 /* use when traversing tree through the allnext, child, sibling,
101  * or parent members of struct device_node.
102  */
103 static DEFINE_RWLOCK(devtree_lock);
104 
105 /* export that to outside world */
106 struct device_node *of_chosen;
107 
108 struct device_node *dflt_interrupt_controller;
109 int num_interrupt_controllers;
110 
111 /*
112  * Wrapper for allocating memory for various data that needs to be
113  * attached to device nodes as they are processed at boot or when
114  * added to the device tree later (e.g. DLPAR).  At boot there is
115  * already a region reserved so we just increment *mem_start by size;
116  * otherwise we call kmalloc.
117  */
118 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
119 {
120 	unsigned long tmp;
121 
122 	if (!mem_start)
123 		return kmalloc(size, GFP_KERNEL);
124 
125 	tmp = *mem_start;
126 	*mem_start += size;
127 	return (void *)tmp;
128 }
129 
130 /*
131  * Find the device_node with a given phandle.
132  */
133 static struct device_node * find_phandle(phandle ph)
134 {
135 	struct device_node *np;
136 
137 	for (np = allnodes; np != 0; np = np->allnext)
138 		if (np->linux_phandle == ph)
139 			return np;
140 	return NULL;
141 }
142 
143 /*
144  * Find the interrupt parent of a node.
145  */
146 static struct device_node * __devinit intr_parent(struct device_node *p)
147 {
148 	phandle *parp;
149 
150 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
151 	if (parp == NULL)
152 		return p->parent;
153 	p = find_phandle(*parp);
154 	if (p != NULL)
155 		return p;
156 	/*
157 	 * On a powermac booted with BootX, we don't get to know the
158 	 * phandles for any nodes, so find_phandle will return NULL.
159 	 * Fortunately these machines only have one interrupt controller
160 	 * so there isn't in fact any ambiguity.  -- paulus
161 	 */
162 	if (num_interrupt_controllers == 1)
163 		p = dflt_interrupt_controller;
164 	return p;
165 }
166 
167 /*
168  * Find out the size of each entry of the interrupts property
169  * for a node.
170  */
171 int __devinit prom_n_intr_cells(struct device_node *np)
172 {
173 	struct device_node *p;
174 	unsigned int *icp;
175 
176 	for (p = np; (p = intr_parent(p)) != NULL; ) {
177 		icp = (unsigned int *)
178 			get_property(p, "#interrupt-cells", NULL);
179 		if (icp != NULL)
180 			return *icp;
181 		if (get_property(p, "interrupt-controller", NULL) != NULL
182 		    || get_property(p, "interrupt-map", NULL) != NULL) {
183 			printk("oops, node %s doesn't have #interrupt-cells\n",
184 			       p->full_name);
185 			return 1;
186 		}
187 	}
188 #ifdef DEBUG_IRQ
189 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
190 #endif
191 	return 1;
192 }
193 
194 /*
195  * Map an interrupt from a device up to the platform interrupt
196  * descriptor.
197  */
198 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
199 				   struct device_node *np, unsigned int *ints,
200 				   int nintrc)
201 {
202 	struct device_node *p, *ipar;
203 	unsigned int *imap, *imask, *ip;
204 	int i, imaplen, match;
205 	int newintrc = 0, newaddrc = 0;
206 	unsigned int *reg;
207 	int naddrc;
208 
209 	reg = (unsigned int *) get_property(np, "reg", NULL);
210 	naddrc = prom_n_addr_cells(np);
211 	p = intr_parent(np);
212 	while (p != NULL) {
213 		if (get_property(p, "interrupt-controller", NULL) != NULL)
214 			/* this node is an interrupt controller, stop here */
215 			break;
216 		imap = (unsigned int *)
217 			get_property(p, "interrupt-map", &imaplen);
218 		if (imap == NULL) {
219 			p = intr_parent(p);
220 			continue;
221 		}
222 		imask = (unsigned int *)
223 			get_property(p, "interrupt-map-mask", NULL);
224 		if (imask == NULL) {
225 			printk("oops, %s has interrupt-map but no mask\n",
226 			       p->full_name);
227 			return 0;
228 		}
229 		imaplen /= sizeof(unsigned int);
230 		match = 0;
231 		ipar = NULL;
232 		while (imaplen > 0 && !match) {
233 			/* check the child-interrupt field */
234 			match = 1;
235 			for (i = 0; i < naddrc && match; ++i)
236 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
237 			for (; i < naddrc + nintrc && match; ++i)
238 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
239 			imap += naddrc + nintrc;
240 			imaplen -= naddrc + nintrc;
241 			/* grab the interrupt parent */
242 			ipar = find_phandle((phandle) *imap++);
243 			--imaplen;
244 			if (ipar == NULL && num_interrupt_controllers == 1)
245 				/* cope with BootX not giving us phandles */
246 				ipar = dflt_interrupt_controller;
247 			if (ipar == NULL) {
248 				printk("oops, no int parent %x in map of %s\n",
249 				       imap[-1], p->full_name);
250 				return 0;
251 			}
252 			/* find the parent's # addr and intr cells */
253 			ip = (unsigned int *)
254 				get_property(ipar, "#interrupt-cells", NULL);
255 			if (ip == NULL) {
256 				printk("oops, no #interrupt-cells on %s\n",
257 				       ipar->full_name);
258 				return 0;
259 			}
260 			newintrc = *ip;
261 			ip = (unsigned int *)
262 				get_property(ipar, "#address-cells", NULL);
263 			newaddrc = (ip == NULL)? 0: *ip;
264 			imap += newaddrc + newintrc;
265 			imaplen -= newaddrc + newintrc;
266 		}
267 		if (imaplen < 0) {
268 			printk("oops, error decoding int-map on %s, len=%d\n",
269 			       p->full_name, imaplen);
270 			return 0;
271 		}
272 		if (!match) {
273 #ifdef DEBUG_IRQ
274 			printk("oops, no match in %s int-map for %s\n",
275 			       p->full_name, np->full_name);
276 #endif
277 			return 0;
278 		}
279 		p = ipar;
280 		naddrc = newaddrc;
281 		nintrc = newintrc;
282 		ints = imap - nintrc;
283 		reg = ints - naddrc;
284 	}
285 	if (p == NULL) {
286 #ifdef DEBUG_IRQ
287 		printk("hmmm, int tree for %s doesn't have ctrler\n",
288 		       np->full_name);
289 #endif
290 		return 0;
291 	}
292 	*irq = ints;
293 	*ictrler = p;
294 	return nintrc;
295 }
296 
297 static unsigned char map_isa_senses[4] = {
298 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
299 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
300 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
301 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE
302 };
303 
304 static unsigned char map_mpic_senses[4] = {
305 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE,
306 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
307 	/* 2 seems to be used for the 8259 cascade... */
308 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
309 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
310 };
311 
312 static int __devinit finish_node_interrupts(struct device_node *np,
313 					    unsigned long *mem_start,
314 					    int measure_only)
315 {
316 	unsigned int *ints;
317 	int intlen, intrcells, intrcount;
318 	int i, j, n, sense;
319 	unsigned int *irq, virq;
320 	struct device_node *ic;
321 
322 	if (num_interrupt_controllers == 0) {
323 		/*
324 		 * Old machines just have a list of interrupt numbers
325 		 * and no interrupt-controller nodes.
326 		 */
327 		ints = (unsigned int *) get_property(np, "AAPL,interrupts",
328 						     &intlen);
329 		/* XXX old interpret_pci_props looked in parent too */
330 		/* XXX old interpret_macio_props looked for interrupts
331 		   before AAPL,interrupts */
332 		if (ints == NULL)
333 			ints = (unsigned int *) get_property(np, "interrupts",
334 							     &intlen);
335 		if (ints == NULL)
336 			return 0;
337 
338 		np->n_intrs = intlen / sizeof(unsigned int);
339 		np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
340 				       mem_start);
341 		if (!np->intrs)
342 			return -ENOMEM;
343 		if (measure_only)
344 			return 0;
345 
346 		for (i = 0; i < np->n_intrs; ++i) {
347 			np->intrs[i].line = *ints++;
348 			np->intrs[i].sense = IRQ_SENSE_LEVEL
349 				| IRQ_POLARITY_NEGATIVE;
350 		}
351 		return 0;
352 	}
353 
354 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
355 	if (ints == NULL)
356 		return 0;
357 	intrcells = prom_n_intr_cells(np);
358 	intlen /= intrcells * sizeof(unsigned int);
359 
360 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
361 	if (!np->intrs)
362 		return -ENOMEM;
363 
364 	if (measure_only)
365 		return 0;
366 
367 	intrcount = 0;
368 	for (i = 0; i < intlen; ++i, ints += intrcells) {
369 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
370 		if (n <= 0)
371 			continue;
372 
373 		/* don't map IRQ numbers under a cascaded 8259 controller */
374 		if (ic && device_is_compatible(ic, "chrp,iic")) {
375 			np->intrs[intrcount].line = irq[0];
376 			sense = (n > 1)? (irq[1] & 3): 3;
377 			np->intrs[intrcount].sense = map_isa_senses[sense];
378 		} else {
379 			virq = virt_irq_create_mapping(irq[0]);
380 #ifdef CONFIG_PPC64
381 			if (virq == NO_IRQ) {
382 				printk(KERN_CRIT "Could not allocate interrupt"
383 				       " number for %s\n", np->full_name);
384 				continue;
385 			}
386 #endif
387 			np->intrs[intrcount].line = irq_offset_up(virq);
388 			sense = (n > 1)? (irq[1] & 3): 1;
389 			np->intrs[intrcount].sense = map_mpic_senses[sense];
390 		}
391 
392 #ifdef CONFIG_PPC64
393 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
394 		if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
395 			char *name = get_property(ic->parent, "name", NULL);
396 			if (name && !strcmp(name, "u3"))
397 				np->intrs[intrcount].line += 128;
398 			else if (!(name && !strcmp(name, "mac-io")))
399 				/* ignore other cascaded controllers, such as
400 				   the k2-sata-root */
401 				break;
402 		}
403 #endif
404 		if (n > 2) {
405 			printk("hmmm, got %d intr cells for %s:", n,
406 			       np->full_name);
407 			for (j = 0; j < n; ++j)
408 				printk(" %d", irq[j]);
409 			printk("\n");
410 		}
411 		++intrcount;
412 	}
413 	np->n_intrs = intrcount;
414 
415 	return 0;
416 }
417 
418 static int __devinit interpret_pci_props(struct device_node *np,
419 					 unsigned long *mem_start,
420 					 int naddrc, int nsizec,
421 					 int measure_only)
422 {
423 	struct address_range *adr;
424 	struct pci_reg_property *pci_addrs;
425 	int i, l, n_addrs;
426 
427 	pci_addrs = (struct pci_reg_property *)
428 		get_property(np, "assigned-addresses", &l);
429 	if (!pci_addrs)
430 		return 0;
431 
432 	n_addrs = l / sizeof(*pci_addrs);
433 
434 	adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
435 	if (!adr)
436 		return -ENOMEM;
437 
438  	if (measure_only)
439  		return 0;
440 
441  	np->addrs = adr;
442  	np->n_addrs = n_addrs;
443 
444  	for (i = 0; i < n_addrs; i++) {
445  		adr[i].space = pci_addrs[i].addr.a_hi;
446  		adr[i].address = pci_addrs[i].addr.a_lo |
447 			((u64)pci_addrs[i].addr.a_mid << 32);
448  		adr[i].size = pci_addrs[i].size_lo;
449 	}
450 
451 	return 0;
452 }
453 
454 static int __init interpret_dbdma_props(struct device_node *np,
455 					unsigned long *mem_start,
456 					int naddrc, int nsizec,
457 					int measure_only)
458 {
459 	struct reg_property32 *rp;
460 	struct address_range *adr;
461 	unsigned long base_address;
462 	int i, l;
463 	struct device_node *db;
464 
465 	base_address = 0;
466 	if (!measure_only) {
467 		for (db = np->parent; db != NULL; db = db->parent) {
468 			if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
469 				base_address = db->addrs[0].address;
470 				break;
471 			}
472 		}
473 	}
474 
475 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
476 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
477 		i = 0;
478 		adr = (struct address_range *) (*mem_start);
479 		while ((l -= sizeof(struct reg_property32)) >= 0) {
480 			if (!measure_only) {
481 				adr[i].space = 2;
482 				adr[i].address = rp[i].address + base_address;
483 				adr[i].size = rp[i].size;
484 			}
485 			++i;
486 		}
487 		np->addrs = adr;
488 		np->n_addrs = i;
489 		(*mem_start) += i * sizeof(struct address_range);
490 	}
491 
492 	return 0;
493 }
494 
495 static int __init interpret_macio_props(struct device_node *np,
496 					unsigned long *mem_start,
497 					int naddrc, int nsizec,
498 					int measure_only)
499 {
500 	struct reg_property32 *rp;
501 	struct address_range *adr;
502 	unsigned long base_address;
503 	int i, l;
504 	struct device_node *db;
505 
506 	base_address = 0;
507 	if (!measure_only) {
508 		for (db = np->parent; db != NULL; db = db->parent) {
509 			if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
510 				base_address = db->addrs[0].address;
511 				break;
512 			}
513 		}
514 	}
515 
516 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
517 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
518 		i = 0;
519 		adr = (struct address_range *) (*mem_start);
520 		while ((l -= sizeof(struct reg_property32)) >= 0) {
521 			if (!measure_only) {
522 				adr[i].space = 2;
523 				adr[i].address = rp[i].address + base_address;
524 				adr[i].size = rp[i].size;
525 			}
526 			++i;
527 		}
528 		np->addrs = adr;
529 		np->n_addrs = i;
530 		(*mem_start) += i * sizeof(struct address_range);
531 	}
532 
533 	return 0;
534 }
535 
536 static int __init interpret_isa_props(struct device_node *np,
537 				      unsigned long *mem_start,
538 				      int naddrc, int nsizec,
539 				      int measure_only)
540 {
541 	struct isa_reg_property *rp;
542 	struct address_range *adr;
543 	int i, l;
544 
545 	rp = (struct isa_reg_property *) get_property(np, "reg", &l);
546 	if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
547 		i = 0;
548 		adr = (struct address_range *) (*mem_start);
549 		while ((l -= sizeof(struct isa_reg_property)) >= 0) {
550 			if (!measure_only) {
551 				adr[i].space = rp[i].space;
552 				adr[i].address = rp[i].address;
553 				adr[i].size = rp[i].size;
554 			}
555 			++i;
556 		}
557 		np->addrs = adr;
558 		np->n_addrs = i;
559 		(*mem_start) += i * sizeof(struct address_range);
560 	}
561 
562 	return 0;
563 }
564 
565 static int __init interpret_root_props(struct device_node *np,
566 				       unsigned long *mem_start,
567 				       int naddrc, int nsizec,
568 				       int measure_only)
569 {
570 	struct address_range *adr;
571 	int i, l;
572 	unsigned int *rp;
573 	int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
574 
575 	rp = (unsigned int *) get_property(np, "reg", &l);
576 	if (rp != 0 && l >= rpsize) {
577 		i = 0;
578 		adr = (struct address_range *) (*mem_start);
579 		while ((l -= rpsize) >= 0) {
580 			if (!measure_only) {
581 				adr[i].space = 0;
582 				adr[i].address = rp[naddrc - 1];
583 				adr[i].size = rp[naddrc + nsizec - 1];
584 			}
585 			++i;
586 			rp += naddrc + nsizec;
587 		}
588 		np->addrs = adr;
589 		np->n_addrs = i;
590 		(*mem_start) += i * sizeof(struct address_range);
591 	}
592 
593 	return 0;
594 }
595 
596 static int __devinit finish_node(struct device_node *np,
597 				 unsigned long *mem_start,
598 				 interpret_func *ifunc,
599 				 int naddrc, int nsizec,
600 				 int measure_only)
601 {
602 	struct device_node *child;
603 	int *ip, rc = 0;
604 
605 	/* get the device addresses and interrupts */
606 	if (ifunc != NULL)
607 		rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
608 	if (rc)
609 		goto out;
610 
611 	rc = finish_node_interrupts(np, mem_start, measure_only);
612 	if (rc)
613 		goto out;
614 
615 	/* Look for #address-cells and #size-cells properties. */
616 	ip = (int *) get_property(np, "#address-cells", NULL);
617 	if (ip != NULL)
618 		naddrc = *ip;
619 	ip = (int *) get_property(np, "#size-cells", NULL);
620 	if (ip != NULL)
621 		nsizec = *ip;
622 
623 	if (!strcmp(np->name, "device-tree") || np->parent == NULL)
624 		ifunc = interpret_root_props;
625 	else if (np->type == 0)
626 		ifunc = NULL;
627 	else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
628 		ifunc = interpret_pci_props;
629 	else if (!strcmp(np->type, "dbdma"))
630 		ifunc = interpret_dbdma_props;
631 	else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
632 		ifunc = interpret_macio_props;
633 	else if (!strcmp(np->type, "isa"))
634 		ifunc = interpret_isa_props;
635 	else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
636 		ifunc = interpret_root_props;
637 	else if (!((ifunc == interpret_dbdma_props
638 		    || ifunc == interpret_macio_props)
639 		   && (!strcmp(np->type, "escc")
640 		       || !strcmp(np->type, "media-bay"))))
641 		ifunc = NULL;
642 
643 	for (child = np->child; child != NULL; child = child->sibling) {
644 		rc = finish_node(child, mem_start, ifunc,
645 				 naddrc, nsizec, measure_only);
646 		if (rc)
647 			goto out;
648 	}
649 out:
650 	return rc;
651 }
652 
653 static void __init scan_interrupt_controllers(void)
654 {
655 	struct device_node *np;
656 	int n = 0;
657 	char *name, *ic;
658 	int iclen;
659 
660 	for (np = allnodes; np != NULL; np = np->allnext) {
661 		ic = get_property(np, "interrupt-controller", &iclen);
662 		name = get_property(np, "name", NULL);
663 		/* checking iclen makes sure we don't get a false
664 		   match on /chosen.interrupt_controller */
665 		if ((name != NULL
666 		     && strcmp(name, "interrupt-controller") == 0)
667 		    || (ic != NULL && iclen == 0
668 			&& strcmp(name, "AppleKiwi"))) {
669 			if (n == 0)
670 				dflt_interrupt_controller = np;
671 			++n;
672 		}
673 	}
674 	num_interrupt_controllers = n;
675 }
676 
677 /**
678  * finish_device_tree is called once things are running normally
679  * (i.e. with text and data mapped to the address they were linked at).
680  * It traverses the device tree and fills in some of the additional,
681  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
682  * mapping is also initialized at this point.
683  */
684 void __init finish_device_tree(void)
685 {
686 	unsigned long start, end, size = 0;
687 
688 	DBG(" -> finish_device_tree\n");
689 
690 #ifdef CONFIG_PPC64
691 	/* Initialize virtual IRQ map */
692 	virt_irq_init();
693 #endif
694 	scan_interrupt_controllers();
695 
696 	/*
697 	 * Finish device-tree (pre-parsing some properties etc...)
698 	 * We do this in 2 passes. One with "measure_only" set, which
699 	 * will only measure the amount of memory needed, then we can
700 	 * allocate that memory, and call finish_node again. However,
701 	 * we must be careful as most routines will fail nowadays when
702 	 * prom_alloc() returns 0, so we must make sure our first pass
703 	 * doesn't start at 0. We pre-initialize size to 16 for that
704 	 * reason and then remove those additional 16 bytes
705 	 */
706 	size = 16;
707 	finish_node(allnodes, &size, NULL, 0, 0, 1);
708 	size -= 16;
709 	end = start = (unsigned long) __va(lmb_alloc(size, 128));
710 	finish_node(allnodes, &end, NULL, 0, 0, 0);
711 	BUG_ON(end != start + size);
712 
713 	DBG(" <- finish_device_tree\n");
714 }
715 
716 static inline char *find_flat_dt_string(u32 offset)
717 {
718 	return ((char *)initial_boot_params) +
719 		initial_boot_params->off_dt_strings + offset;
720 }
721 
722 /**
723  * This function is used to scan the flattened device-tree, it is
724  * used to extract the memory informations at boot before we can
725  * unflatten the tree
726  */
727 static int __init scan_flat_dt(int (*it)(unsigned long node,
728 					 const char *uname, int depth,
729 					 void *data),
730 			       void *data)
731 {
732 	unsigned long p = ((unsigned long)initial_boot_params) +
733 		initial_boot_params->off_dt_struct;
734 	int rc = 0;
735 	int depth = -1;
736 
737 	do {
738 		u32 tag = *((u32 *)p);
739 		char *pathp;
740 
741 		p += 4;
742 		if (tag == OF_DT_END_NODE) {
743 			depth --;
744 			continue;
745 		}
746 		if (tag == OF_DT_NOP)
747 			continue;
748 		if (tag == OF_DT_END)
749 			break;
750 		if (tag == OF_DT_PROP) {
751 			u32 sz = *((u32 *)p);
752 			p += 8;
753 			if (initial_boot_params->version < 0x10)
754 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
755 			p += sz;
756 			p = _ALIGN(p, 4);
757 			continue;
758 		}
759 		if (tag != OF_DT_BEGIN_NODE) {
760 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
761 			       " device tree !\n", tag);
762 			return -EINVAL;
763 		}
764 		depth++;
765 		pathp = (char *)p;
766 		p = _ALIGN(p + strlen(pathp) + 1, 4);
767 		if ((*pathp) == '/') {
768 			char *lp, *np;
769 			for (lp = NULL, np = pathp; *np; np++)
770 				if ((*np) == '/')
771 					lp = np+1;
772 			if (lp != NULL)
773 				pathp = lp;
774 		}
775 		rc = it(p, pathp, depth, data);
776 		if (rc != 0)
777 			break;
778 	} while(1);
779 
780 	return rc;
781 }
782 
783 /**
784  * This  function can be used within scan_flattened_dt callback to get
785  * access to properties
786  */
787 static void* __init get_flat_dt_prop(unsigned long node, const char *name,
788 				     unsigned long *size)
789 {
790 	unsigned long p = node;
791 
792 	do {
793 		u32 tag = *((u32 *)p);
794 		u32 sz, noff;
795 		const char *nstr;
796 
797 		p += 4;
798 		if (tag == OF_DT_NOP)
799 			continue;
800 		if (tag != OF_DT_PROP)
801 			return NULL;
802 
803 		sz = *((u32 *)p);
804 		noff = *((u32 *)(p + 4));
805 		p += 8;
806 		if (initial_boot_params->version < 0x10)
807 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
808 
809 		nstr = find_flat_dt_string(noff);
810 		if (nstr == NULL) {
811 			printk(KERN_WARNING "Can't find property index"
812 			       " name !\n");
813 			return NULL;
814 		}
815 		if (strcmp(name, nstr) == 0) {
816 			if (size)
817 				*size = sz;
818 			return (void *)p;
819 		}
820 		p += sz;
821 		p = _ALIGN(p, 4);
822 	} while(1);
823 }
824 
825 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
826 				       unsigned long align)
827 {
828 	void *res;
829 
830 	*mem = _ALIGN(*mem, align);
831 	res = (void *)*mem;
832 	*mem += size;
833 
834 	return res;
835 }
836 
837 static unsigned long __init unflatten_dt_node(unsigned long mem,
838 					      unsigned long *p,
839 					      struct device_node *dad,
840 					      struct device_node ***allnextpp,
841 					      unsigned long fpsize)
842 {
843 	struct device_node *np;
844 	struct property *pp, **prev_pp = NULL;
845 	char *pathp;
846 	u32 tag;
847 	unsigned int l, allocl;
848 	int has_name = 0;
849 	int new_format = 0;
850 
851 	tag = *((u32 *)(*p));
852 	if (tag != OF_DT_BEGIN_NODE) {
853 		printk("Weird tag at start of node: %x\n", tag);
854 		return mem;
855 	}
856 	*p += 4;
857 	pathp = (char *)*p;
858 	l = allocl = strlen(pathp) + 1;
859 	*p = _ALIGN(*p + l, 4);
860 
861 	/* version 0x10 has a more compact unit name here instead of the full
862 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
863 	 * it later. We detect this because the first character of the name is
864 	 * not '/'.
865 	 */
866 	if ((*pathp) != '/') {
867 		new_format = 1;
868 		if (fpsize == 0) {
869 			/* root node: special case. fpsize accounts for path
870 			 * plus terminating zero. root node only has '/', so
871 			 * fpsize should be 2, but we want to avoid the first
872 			 * level nodes to have two '/' so we use fpsize 1 here
873 			 */
874 			fpsize = 1;
875 			allocl = 2;
876 		} else {
877 			/* account for '/' and path size minus terminal 0
878 			 * already in 'l'
879 			 */
880 			fpsize += l;
881 			allocl = fpsize;
882 		}
883 	}
884 
885 
886 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
887 				__alignof__(struct device_node));
888 	if (allnextpp) {
889 		memset(np, 0, sizeof(*np));
890 		np->full_name = ((char*)np) + sizeof(struct device_node);
891 		if (new_format) {
892 			char *p = np->full_name;
893 			/* rebuild full path for new format */
894 			if (dad && dad->parent) {
895 				strcpy(p, dad->full_name);
896 #ifdef DEBUG
897 				if ((strlen(p) + l + 1) != allocl) {
898 					DBG("%s: p: %d, l: %d, a: %d\n",
899 					    pathp, strlen(p), l, allocl);
900 				}
901 #endif
902 				p += strlen(p);
903 			}
904 			*(p++) = '/';
905 			memcpy(p, pathp, l);
906 		} else
907 			memcpy(np->full_name, pathp, l);
908 		prev_pp = &np->properties;
909 		**allnextpp = np;
910 		*allnextpp = &np->allnext;
911 		if (dad != NULL) {
912 			np->parent = dad;
913 			/* we temporarily use the next field as `last_child'*/
914 			if (dad->next == 0)
915 				dad->child = np;
916 			else
917 				dad->next->sibling = np;
918 			dad->next = np;
919 		}
920 		kref_init(&np->kref);
921 	}
922 	while(1) {
923 		u32 sz, noff;
924 		char *pname;
925 
926 		tag = *((u32 *)(*p));
927 		if (tag == OF_DT_NOP) {
928 			*p += 4;
929 			continue;
930 		}
931 		if (tag != OF_DT_PROP)
932 			break;
933 		*p += 4;
934 		sz = *((u32 *)(*p));
935 		noff = *((u32 *)((*p) + 4));
936 		*p += 8;
937 		if (initial_boot_params->version < 0x10)
938 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
939 
940 		pname = find_flat_dt_string(noff);
941 		if (pname == NULL) {
942 			printk("Can't find property name in list !\n");
943 			break;
944 		}
945 		if (strcmp(pname, "name") == 0)
946 			has_name = 1;
947 		l = strlen(pname) + 1;
948 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
949 					__alignof__(struct property));
950 		if (allnextpp) {
951 			if (strcmp(pname, "linux,phandle") == 0) {
952 				np->node = *((u32 *)*p);
953 				if (np->linux_phandle == 0)
954 					np->linux_phandle = np->node;
955 			}
956 			if (strcmp(pname, "ibm,phandle") == 0)
957 				np->linux_phandle = *((u32 *)*p);
958 			pp->name = pname;
959 			pp->length = sz;
960 			pp->value = (void *)*p;
961 			*prev_pp = pp;
962 			prev_pp = &pp->next;
963 		}
964 		*p = _ALIGN((*p) + sz, 4);
965 	}
966 	/* with version 0x10 we may not have the name property, recreate
967 	 * it here from the unit name if absent
968 	 */
969 	if (!has_name) {
970 		char *p = pathp, *ps = pathp, *pa = NULL;
971 		int sz;
972 
973 		while (*p) {
974 			if ((*p) == '@')
975 				pa = p;
976 			if ((*p) == '/')
977 				ps = p + 1;
978 			p++;
979 		}
980 		if (pa < ps)
981 			pa = p;
982 		sz = (pa - ps) + 1;
983 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
984 					__alignof__(struct property));
985 		if (allnextpp) {
986 			pp->name = "name";
987 			pp->length = sz;
988 			pp->value = (unsigned char *)(pp + 1);
989 			*prev_pp = pp;
990 			prev_pp = &pp->next;
991 			memcpy(pp->value, ps, sz - 1);
992 			((char *)pp->value)[sz - 1] = 0;
993 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
994 		}
995 	}
996 	if (allnextpp) {
997 		*prev_pp = NULL;
998 		np->name = get_property(np, "name", NULL);
999 		np->type = get_property(np, "device_type", NULL);
1000 
1001 		if (!np->name)
1002 			np->name = "<NULL>";
1003 		if (!np->type)
1004 			np->type = "<NULL>";
1005 	}
1006 	while (tag == OF_DT_BEGIN_NODE) {
1007 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1008 		tag = *((u32 *)(*p));
1009 	}
1010 	if (tag != OF_DT_END_NODE) {
1011 		printk("Weird tag at end of node: %x\n", tag);
1012 		return mem;
1013 	}
1014 	*p += 4;
1015 	return mem;
1016 }
1017 
1018 
1019 /**
1020  * unflattens the device-tree passed by the firmware, creating the
1021  * tree of struct device_node. It also fills the "name" and "type"
1022  * pointers of the nodes so the normal device-tree walking functions
1023  * can be used (this used to be done by finish_device_tree)
1024  */
1025 void __init unflatten_device_tree(void)
1026 {
1027 	unsigned long start, mem, size;
1028 	struct device_node **allnextp = &allnodes;
1029 	char *p = NULL;
1030 	int l = 0;
1031 
1032 	DBG(" -> unflatten_device_tree()\n");
1033 
1034 	/* First pass, scan for size */
1035 	start = ((unsigned long)initial_boot_params) +
1036 		initial_boot_params->off_dt_struct;
1037 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1038 	size = (size | 3) + 1;
1039 
1040 	DBG("  size is %lx, allocating...\n", size);
1041 
1042 	/* Allocate memory for the expanded device tree */
1043 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1044 	if (!mem) {
1045 		DBG("Couldn't allocate memory with lmb_alloc()!\n");
1046 		panic("Couldn't allocate memory with lmb_alloc()!\n");
1047 	}
1048 	mem = (unsigned long) __va(mem);
1049 
1050 	((u32 *)mem)[size / 4] = 0xdeadbeef;
1051 
1052 	DBG("  unflattening %lx...\n", mem);
1053 
1054 	/* Second pass, do actual unflattening */
1055 	start = ((unsigned long)initial_boot_params) +
1056 		initial_boot_params->off_dt_struct;
1057 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1058 	if (*((u32 *)start) != OF_DT_END)
1059 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1060 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1061 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1062 		       ((u32 *)mem)[size / 4] );
1063 	*allnextp = NULL;
1064 
1065 	/* Get pointer to OF "/chosen" node for use everywhere */
1066 	of_chosen = of_find_node_by_path("/chosen");
1067 	if (of_chosen == NULL)
1068 		of_chosen = of_find_node_by_path("/chosen@0");
1069 
1070 	/* Retreive command line */
1071 	if (of_chosen != NULL) {
1072 		p = (char *)get_property(of_chosen, "bootargs", &l);
1073 		if (p != NULL && l > 0)
1074 			strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1075 	}
1076 #ifdef CONFIG_CMDLINE
1077 	if (l == 0 || (l == 1 && (*p) == 0))
1078 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1079 #endif /* CONFIG_CMDLINE */
1080 
1081 	DBG("Command line is: %s\n", cmd_line);
1082 
1083 	DBG(" <- unflatten_device_tree()\n");
1084 }
1085 
1086 
1087 static int __init early_init_dt_scan_cpus(unsigned long node,
1088 					  const char *uname, int depth, void *data)
1089 {
1090 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1091 	u32 *prop;
1092 	unsigned long size = 0;
1093 
1094 	/* We are scanning "cpu" nodes only */
1095 	if (type == NULL || strcmp(type, "cpu") != 0)
1096 		return 0;
1097 
1098 #ifdef CONFIG_PPC_PSERIES
1099 	/* On LPAR, look for the first ibm,pft-size property for the  hash table size
1100 	 */
1101 	if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1102 		u32 *pft_size;
1103 		pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1104 		if (pft_size != NULL) {
1105 			/* pft_size[0] is the NUMA CEC cookie */
1106 			ppc64_pft_size = pft_size[1];
1107 		}
1108 	}
1109 #endif
1110 
1111 	boot_cpuid = 0;
1112 	boot_cpuid_phys = 0;
1113 	if (initial_boot_params && initial_boot_params->version >= 2) {
1114 		/* version 2 of the kexec param format adds the phys cpuid
1115 		 * of booted proc.
1116 		 */
1117 		boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1118 	} else {
1119 		/* Check if it's the boot-cpu, set it's hw index now */
1120 		if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1121 			prop = get_flat_dt_prop(node, "reg", NULL);
1122 			if (prop != NULL)
1123 				boot_cpuid_phys = *prop;
1124 		}
1125 	}
1126 	set_hard_smp_processor_id(0, boot_cpuid_phys);
1127 
1128 #ifdef CONFIG_ALTIVEC
1129 	/* Check if we have a VMX and eventually update CPU features */
1130 	prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1131 	if (prop && (*prop) > 0) {
1132 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1133 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1134 	}
1135 
1136 	/* Same goes for Apple's "altivec" property */
1137 	prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1138 	if (prop) {
1139 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1140 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1141 	}
1142 #endif /* CONFIG_ALTIVEC */
1143 
1144 #ifdef CONFIG_PPC_PSERIES
1145 	/*
1146 	 * Check for an SMT capable CPU and set the CPU feature. We do
1147 	 * this by looking at the size of the ibm,ppc-interrupt-server#s
1148 	 * property
1149 	 */
1150 	prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1151 				       &size);
1152 	cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1153 	if (prop && ((size / sizeof(u32)) > 1))
1154 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1155 #endif
1156 
1157 	return 0;
1158 }
1159 
1160 static int __init early_init_dt_scan_chosen(unsigned long node,
1161 					    const char *uname, int depth, void *data)
1162 {
1163 	u32 *prop;
1164 	unsigned long *lprop;
1165 
1166 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1167 
1168 	if (depth != 1 ||
1169 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1170 		return 0;
1171 
1172 	/* get platform type */
1173 	prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1174 	if (prop == NULL)
1175 		return 0;
1176 #ifdef CONFIG_PPC64
1177 	systemcfg->platform = *prop;
1178 #else
1179 #ifdef CONFIG_PPC_MULTIPLATFORM
1180 	_machine = *prop;
1181 #endif
1182 #endif
1183 
1184 #ifdef CONFIG_PPC64
1185 	/* check if iommu is forced on or off */
1186 	if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1187 		iommu_is_off = 1;
1188 	if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1189 		iommu_force_on = 1;
1190 #endif
1191 
1192  	lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1193  	if (lprop)
1194  		memory_limit = *lprop;
1195 
1196 #ifdef CONFIG_PPC64
1197  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1198  	if (lprop)
1199  		tce_alloc_start = *lprop;
1200  	lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1201  	if (lprop)
1202  		tce_alloc_end = *lprop;
1203 #endif
1204 
1205 #ifdef CONFIG_PPC_RTAS
1206 	/* To help early debugging via the front panel, we retreive a minimal
1207 	 * set of RTAS infos now if available
1208 	 */
1209 	{
1210 		u64 *basep, *entryp;
1211 
1212 		basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1213 		entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1214 		prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1215 		if (basep && entryp && prop) {
1216 			rtas.base = *basep;
1217 			rtas.entry = *entryp;
1218 			rtas.size = *prop;
1219 		}
1220 	}
1221 #endif /* CONFIG_PPC_RTAS */
1222 
1223 	/* break now */
1224 	return 1;
1225 }
1226 
1227 static int __init early_init_dt_scan_root(unsigned long node,
1228 					  const char *uname, int depth, void *data)
1229 {
1230 	u32 *prop;
1231 
1232 	if (depth != 0)
1233 		return 0;
1234 
1235 	prop = get_flat_dt_prop(node, "#size-cells", NULL);
1236 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1237 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1238 
1239 	prop = get_flat_dt_prop(node, "#address-cells", NULL);
1240 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1241 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1242 
1243 	/* break now */
1244 	return 1;
1245 }
1246 
1247 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1248 {
1249 	cell_t *p = *cellp;
1250 	unsigned long r;
1251 
1252 	/* Ignore more than 2 cells */
1253 	while (s > sizeof(unsigned long) / 4) {
1254 		p++;
1255 		s--;
1256 	}
1257 	r = *p++;
1258 #ifdef CONFIG_PPC64
1259 	if (s > 1) {
1260 		r <<= 32;
1261 		r |= *(p++);
1262 		s--;
1263 	}
1264 #endif
1265 
1266 	*cellp = p;
1267 	return r;
1268 }
1269 
1270 
1271 static int __init early_init_dt_scan_memory(unsigned long node,
1272 					    const char *uname, int depth, void *data)
1273 {
1274 	char *type = get_flat_dt_prop(node, "device_type", NULL);
1275 	cell_t *reg, *endp;
1276 	unsigned long l;
1277 
1278 	/* We are scanning "memory" nodes only */
1279 	if (type == NULL || strcmp(type, "memory") != 0)
1280 		return 0;
1281 
1282 	reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1283 	if (reg == NULL)
1284 		return 0;
1285 
1286 	endp = reg + (l / sizeof(cell_t));
1287 
1288 	DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1289 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1290 
1291 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1292 		unsigned long base, size;
1293 
1294 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1295 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1296 
1297 		if (size == 0)
1298 			continue;
1299 		DBG(" - %lx ,  %lx\n", base, size);
1300 #ifdef CONFIG_PPC64
1301 		if (iommu_is_off) {
1302 			if (base >= 0x80000000ul)
1303 				continue;
1304 			if ((base + size) > 0x80000000ul)
1305 				size = 0x80000000ul - base;
1306 		}
1307 #endif
1308 		lmb_add(base, size);
1309 	}
1310 	return 0;
1311 }
1312 
1313 static void __init early_reserve_mem(void)
1314 {
1315 	unsigned long base, size;
1316 	unsigned long *reserve_map;
1317 
1318 	reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1319 					initial_boot_params->off_mem_rsvmap);
1320 	while (1) {
1321 		base = *(reserve_map++);
1322 		size = *(reserve_map++);
1323 		if (size == 0)
1324 			break;
1325 		DBG("reserving: %lx -> %lx\n", base, size);
1326 		lmb_reserve(base, size);
1327 	}
1328 
1329 #if 0
1330 	DBG("memory reserved, lmbs :\n");
1331       	lmb_dump_all();
1332 #endif
1333 }
1334 
1335 void __init early_init_devtree(void *params)
1336 {
1337 	DBG(" -> early_init_devtree()\n");
1338 
1339 	/* Setup flat device-tree pointer */
1340 	initial_boot_params = params;
1341 
1342 	/* Retrieve various informations from the /chosen node of the
1343 	 * device-tree, including the platform type, initrd location and
1344 	 * size, TCE reserve, and more ...
1345 	 */
1346 	scan_flat_dt(early_init_dt_scan_chosen, NULL);
1347 
1348 	/* Scan memory nodes and rebuild LMBs */
1349 	lmb_init();
1350 	scan_flat_dt(early_init_dt_scan_root, NULL);
1351 	scan_flat_dt(early_init_dt_scan_memory, NULL);
1352 	lmb_enforce_memory_limit(memory_limit);
1353 	lmb_analyze();
1354 #ifdef CONFIG_PPC64
1355 	systemcfg->physicalMemorySize = lmb_phys_mem_size();
1356 #endif
1357 	lmb_reserve(0, __pa(klimit));
1358 
1359 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1360 
1361 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1362 	early_reserve_mem();
1363 
1364 	DBG("Scanning CPUs ...\n");
1365 
1366 	/* Retreive hash table size from flattened tree plus other
1367 	 * CPU related informations (altivec support, boot CPU ID, ...)
1368 	 */
1369 	scan_flat_dt(early_init_dt_scan_cpus, NULL);
1370 
1371 	DBG(" <- early_init_devtree()\n");
1372 }
1373 
1374 #undef printk
1375 
1376 int
1377 prom_n_addr_cells(struct device_node* np)
1378 {
1379 	int* ip;
1380 	do {
1381 		if (np->parent)
1382 			np = np->parent;
1383 		ip = (int *) get_property(np, "#address-cells", NULL);
1384 		if (ip != NULL)
1385 			return *ip;
1386 	} while (np->parent);
1387 	/* No #address-cells property for the root node, default to 1 */
1388 	return 1;
1389 }
1390 
1391 int
1392 prom_n_size_cells(struct device_node* np)
1393 {
1394 	int* ip;
1395 	do {
1396 		if (np->parent)
1397 			np = np->parent;
1398 		ip = (int *) get_property(np, "#size-cells", NULL);
1399 		if (ip != NULL)
1400 			return *ip;
1401 	} while (np->parent);
1402 	/* No #size-cells property for the root node, default to 1 */
1403 	return 1;
1404 }
1405 
1406 /**
1407  * Work out the sense (active-low level / active-high edge)
1408  * of each interrupt from the device tree.
1409  */
1410 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1411 {
1412 	struct device_node *np;
1413 	int i, j;
1414 
1415 	/* default to level-triggered */
1416 	memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1417 
1418 	for (np = allnodes; np != 0; np = np->allnext) {
1419 		for (j = 0; j < np->n_intrs; j++) {
1420 			i = np->intrs[j].line;
1421 			if (i >= off && i < max)
1422 				senses[i-off] = np->intrs[j].sense;
1423 		}
1424 	}
1425 }
1426 
1427 /**
1428  * Construct and return a list of the device_nodes with a given name.
1429  */
1430 struct device_node *find_devices(const char *name)
1431 {
1432 	struct device_node *head, **prevp, *np;
1433 
1434 	prevp = &head;
1435 	for (np = allnodes; np != 0; np = np->allnext) {
1436 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1437 			*prevp = np;
1438 			prevp = &np->next;
1439 		}
1440 	}
1441 	*prevp = NULL;
1442 	return head;
1443 }
1444 EXPORT_SYMBOL(find_devices);
1445 
1446 /**
1447  * Construct and return a list of the device_nodes with a given type.
1448  */
1449 struct device_node *find_type_devices(const char *type)
1450 {
1451 	struct device_node *head, **prevp, *np;
1452 
1453 	prevp = &head;
1454 	for (np = allnodes; np != 0; np = np->allnext) {
1455 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1456 			*prevp = np;
1457 			prevp = &np->next;
1458 		}
1459 	}
1460 	*prevp = NULL;
1461 	return head;
1462 }
1463 EXPORT_SYMBOL(find_type_devices);
1464 
1465 /**
1466  * Returns all nodes linked together
1467  */
1468 struct device_node *find_all_nodes(void)
1469 {
1470 	struct device_node *head, **prevp, *np;
1471 
1472 	prevp = &head;
1473 	for (np = allnodes; np != 0; np = np->allnext) {
1474 		*prevp = np;
1475 		prevp = &np->next;
1476 	}
1477 	*prevp = NULL;
1478 	return head;
1479 }
1480 EXPORT_SYMBOL(find_all_nodes);
1481 
1482 /** Checks if the given "compat" string matches one of the strings in
1483  * the device's "compatible" property
1484  */
1485 int device_is_compatible(struct device_node *device, const char *compat)
1486 {
1487 	const char* cp;
1488 	int cplen, l;
1489 
1490 	cp = (char *) get_property(device, "compatible", &cplen);
1491 	if (cp == NULL)
1492 		return 0;
1493 	while (cplen > 0) {
1494 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1495 			return 1;
1496 		l = strlen(cp) + 1;
1497 		cp += l;
1498 		cplen -= l;
1499 	}
1500 
1501 	return 0;
1502 }
1503 EXPORT_SYMBOL(device_is_compatible);
1504 
1505 
1506 /**
1507  * Indicates whether the root node has a given value in its
1508  * compatible property.
1509  */
1510 int machine_is_compatible(const char *compat)
1511 {
1512 	struct device_node *root;
1513 	int rc = 0;
1514 
1515 	root = of_find_node_by_path("/");
1516 	if (root) {
1517 		rc = device_is_compatible(root, compat);
1518 		of_node_put(root);
1519 	}
1520 	return rc;
1521 }
1522 EXPORT_SYMBOL(machine_is_compatible);
1523 
1524 /**
1525  * Construct and return a list of the device_nodes with a given type
1526  * and compatible property.
1527  */
1528 struct device_node *find_compatible_devices(const char *type,
1529 					    const char *compat)
1530 {
1531 	struct device_node *head, **prevp, *np;
1532 
1533 	prevp = &head;
1534 	for (np = allnodes; np != 0; np = np->allnext) {
1535 		if (type != NULL
1536 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1537 			continue;
1538 		if (device_is_compatible(np, compat)) {
1539 			*prevp = np;
1540 			prevp = &np->next;
1541 		}
1542 	}
1543 	*prevp = NULL;
1544 	return head;
1545 }
1546 EXPORT_SYMBOL(find_compatible_devices);
1547 
1548 /**
1549  * Find the device_node with a given full_name.
1550  */
1551 struct device_node *find_path_device(const char *path)
1552 {
1553 	struct device_node *np;
1554 
1555 	for (np = allnodes; np != 0; np = np->allnext)
1556 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1557 			return np;
1558 	return NULL;
1559 }
1560 EXPORT_SYMBOL(find_path_device);
1561 
1562 /*******
1563  *
1564  * New implementation of the OF "find" APIs, return a refcounted
1565  * object, call of_node_put() when done.  The device tree and list
1566  * are protected by a rw_lock.
1567  *
1568  * Note that property management will need some locking as well,
1569  * this isn't dealt with yet.
1570  *
1571  *******/
1572 
1573 /**
1574  *	of_find_node_by_name - Find a node by its "name" property
1575  *	@from:	The node to start searching from or NULL, the node
1576  *		you pass will not be searched, only the next one
1577  *		will; typically, you pass what the previous call
1578  *		returned. of_node_put() will be called on it
1579  *	@name:	The name string to match against
1580  *
1581  *	Returns a node pointer with refcount incremented, use
1582  *	of_node_put() on it when done.
1583  */
1584 struct device_node *of_find_node_by_name(struct device_node *from,
1585 	const char *name)
1586 {
1587 	struct device_node *np;
1588 
1589 	read_lock(&devtree_lock);
1590 	np = from ? from->allnext : allnodes;
1591 	for (; np != 0; np = np->allnext)
1592 		if (np->name != 0 && strcasecmp(np->name, name) == 0
1593 		    && of_node_get(np))
1594 			break;
1595 	if (from)
1596 		of_node_put(from);
1597 	read_unlock(&devtree_lock);
1598 	return np;
1599 }
1600 EXPORT_SYMBOL(of_find_node_by_name);
1601 
1602 /**
1603  *	of_find_node_by_type - Find a node by its "device_type" property
1604  *	@from:	The node to start searching from or NULL, the node
1605  *		you pass will not be searched, only the next one
1606  *		will; typically, you pass what the previous call
1607  *		returned. of_node_put() will be called on it
1608  *	@name:	The type string to match against
1609  *
1610  *	Returns a node pointer with refcount incremented, use
1611  *	of_node_put() on it when done.
1612  */
1613 struct device_node *of_find_node_by_type(struct device_node *from,
1614 	const char *type)
1615 {
1616 	struct device_node *np;
1617 
1618 	read_lock(&devtree_lock);
1619 	np = from ? from->allnext : allnodes;
1620 	for (; np != 0; np = np->allnext)
1621 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1622 		    && of_node_get(np))
1623 			break;
1624 	if (from)
1625 		of_node_put(from);
1626 	read_unlock(&devtree_lock);
1627 	return np;
1628 }
1629 EXPORT_SYMBOL(of_find_node_by_type);
1630 
1631 /**
1632  *	of_find_compatible_node - Find a node based on type and one of the
1633  *                                tokens in its "compatible" property
1634  *	@from:		The node to start searching from or NULL, the node
1635  *			you pass will not be searched, only the next one
1636  *			will; typically, you pass what the previous call
1637  *			returned. of_node_put() will be called on it
1638  *	@type:		The type string to match "device_type" or NULL to ignore
1639  *	@compatible:	The string to match to one of the tokens in the device
1640  *			"compatible" list.
1641  *
1642  *	Returns a node pointer with refcount incremented, use
1643  *	of_node_put() on it when done.
1644  */
1645 struct device_node *of_find_compatible_node(struct device_node *from,
1646 	const char *type, const char *compatible)
1647 {
1648 	struct device_node *np;
1649 
1650 	read_lock(&devtree_lock);
1651 	np = from ? from->allnext : allnodes;
1652 	for (; np != 0; np = np->allnext) {
1653 		if (type != NULL
1654 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1655 			continue;
1656 		if (device_is_compatible(np, compatible) && of_node_get(np))
1657 			break;
1658 	}
1659 	if (from)
1660 		of_node_put(from);
1661 	read_unlock(&devtree_lock);
1662 	return np;
1663 }
1664 EXPORT_SYMBOL(of_find_compatible_node);
1665 
1666 /**
1667  *	of_find_node_by_path - Find a node matching a full OF path
1668  *	@path:	The full path to match
1669  *
1670  *	Returns a node pointer with refcount incremented, use
1671  *	of_node_put() on it when done.
1672  */
1673 struct device_node *of_find_node_by_path(const char *path)
1674 {
1675 	struct device_node *np = allnodes;
1676 
1677 	read_lock(&devtree_lock);
1678 	for (; np != 0; np = np->allnext) {
1679 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1680 		    && of_node_get(np))
1681 			break;
1682 	}
1683 	read_unlock(&devtree_lock);
1684 	return np;
1685 }
1686 EXPORT_SYMBOL(of_find_node_by_path);
1687 
1688 /**
1689  *	of_find_node_by_phandle - Find a node given a phandle
1690  *	@handle:	phandle of the node to find
1691  *
1692  *	Returns a node pointer with refcount incremented, use
1693  *	of_node_put() on it when done.
1694  */
1695 struct device_node *of_find_node_by_phandle(phandle handle)
1696 {
1697 	struct device_node *np;
1698 
1699 	read_lock(&devtree_lock);
1700 	for (np = allnodes; np != 0; np = np->allnext)
1701 		if (np->linux_phandle == handle)
1702 			break;
1703 	if (np)
1704 		of_node_get(np);
1705 	read_unlock(&devtree_lock);
1706 	return np;
1707 }
1708 EXPORT_SYMBOL(of_find_node_by_phandle);
1709 
1710 /**
1711  *	of_find_all_nodes - Get next node in global list
1712  *	@prev:	Previous node or NULL to start iteration
1713  *		of_node_put() will be called on it
1714  *
1715  *	Returns a node pointer with refcount incremented, use
1716  *	of_node_put() on it when done.
1717  */
1718 struct device_node *of_find_all_nodes(struct device_node *prev)
1719 {
1720 	struct device_node *np;
1721 
1722 	read_lock(&devtree_lock);
1723 	np = prev ? prev->allnext : allnodes;
1724 	for (; np != 0; np = np->allnext)
1725 		if (of_node_get(np))
1726 			break;
1727 	if (prev)
1728 		of_node_put(prev);
1729 	read_unlock(&devtree_lock);
1730 	return np;
1731 }
1732 EXPORT_SYMBOL(of_find_all_nodes);
1733 
1734 /**
1735  *	of_get_parent - Get a node's parent if any
1736  *	@node:	Node to get parent
1737  *
1738  *	Returns a node pointer with refcount incremented, use
1739  *	of_node_put() on it when done.
1740  */
1741 struct device_node *of_get_parent(const struct device_node *node)
1742 {
1743 	struct device_node *np;
1744 
1745 	if (!node)
1746 		return NULL;
1747 
1748 	read_lock(&devtree_lock);
1749 	np = of_node_get(node->parent);
1750 	read_unlock(&devtree_lock);
1751 	return np;
1752 }
1753 EXPORT_SYMBOL(of_get_parent);
1754 
1755 /**
1756  *	of_get_next_child - Iterate a node childs
1757  *	@node:	parent node
1758  *	@prev:	previous child of the parent node, or NULL to get first
1759  *
1760  *	Returns a node pointer with refcount incremented, use
1761  *	of_node_put() on it when done.
1762  */
1763 struct device_node *of_get_next_child(const struct device_node *node,
1764 	struct device_node *prev)
1765 {
1766 	struct device_node *next;
1767 
1768 	read_lock(&devtree_lock);
1769 	next = prev ? prev->sibling : node->child;
1770 	for (; next != 0; next = next->sibling)
1771 		if (of_node_get(next))
1772 			break;
1773 	if (prev)
1774 		of_node_put(prev);
1775 	read_unlock(&devtree_lock);
1776 	return next;
1777 }
1778 EXPORT_SYMBOL(of_get_next_child);
1779 
1780 /**
1781  *	of_node_get - Increment refcount of a node
1782  *	@node:	Node to inc refcount, NULL is supported to
1783  *		simplify writing of callers
1784  *
1785  *	Returns node.
1786  */
1787 struct device_node *of_node_get(struct device_node *node)
1788 {
1789 	if (node)
1790 		kref_get(&node->kref);
1791 	return node;
1792 }
1793 EXPORT_SYMBOL(of_node_get);
1794 
1795 static inline struct device_node * kref_to_device_node(struct kref *kref)
1796 {
1797 	return container_of(kref, struct device_node, kref);
1798 }
1799 
1800 /**
1801  *	of_node_release - release a dynamically allocated node
1802  *	@kref:  kref element of the node to be released
1803  *
1804  *	In of_node_put() this function is passed to kref_put()
1805  *	as the destructor.
1806  */
1807 static void of_node_release(struct kref *kref)
1808 {
1809 	struct device_node *node = kref_to_device_node(kref);
1810 	struct property *prop = node->properties;
1811 
1812 	if (!OF_IS_DYNAMIC(node))
1813 		return;
1814 	while (prop) {
1815 		struct property *next = prop->next;
1816 		kfree(prop->name);
1817 		kfree(prop->value);
1818 		kfree(prop);
1819 		prop = next;
1820 	}
1821 	kfree(node->intrs);
1822 	kfree(node->addrs);
1823 	kfree(node->full_name);
1824 	kfree(node->data);
1825 	kfree(node);
1826 }
1827 
1828 /**
1829  *	of_node_put - Decrement refcount of a node
1830  *	@node:	Node to dec refcount, NULL is supported to
1831  *		simplify writing of callers
1832  *
1833  */
1834 void of_node_put(struct device_node *node)
1835 {
1836 	if (node)
1837 		kref_put(&node->kref, of_node_release);
1838 }
1839 EXPORT_SYMBOL(of_node_put);
1840 
1841 /*
1842  * Plug a device node into the tree and global list.
1843  */
1844 void of_attach_node(struct device_node *np)
1845 {
1846 	write_lock(&devtree_lock);
1847 	np->sibling = np->parent->child;
1848 	np->allnext = allnodes;
1849 	np->parent->child = np;
1850 	allnodes = np;
1851 	write_unlock(&devtree_lock);
1852 }
1853 
1854 /*
1855  * "Unplug" a node from the device tree.  The caller must hold
1856  * a reference to the node.  The memory associated with the node
1857  * is not freed until its refcount goes to zero.
1858  */
1859 void of_detach_node(const struct device_node *np)
1860 {
1861 	struct device_node *parent;
1862 
1863 	write_lock(&devtree_lock);
1864 
1865 	parent = np->parent;
1866 
1867 	if (allnodes == np)
1868 		allnodes = np->allnext;
1869 	else {
1870 		struct device_node *prev;
1871 		for (prev = allnodes;
1872 		     prev->allnext != np;
1873 		     prev = prev->allnext)
1874 			;
1875 		prev->allnext = np->allnext;
1876 	}
1877 
1878 	if (parent->child == np)
1879 		parent->child = np->sibling;
1880 	else {
1881 		struct device_node *prevsib;
1882 		for (prevsib = np->parent->child;
1883 		     prevsib->sibling != np;
1884 		     prevsib = prevsib->sibling)
1885 			;
1886 		prevsib->sibling = np->sibling;
1887 	}
1888 
1889 	write_unlock(&devtree_lock);
1890 }
1891 
1892 #ifdef CONFIG_PPC_PSERIES
1893 /*
1894  * Fix up the uninitialized fields in a new device node:
1895  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1896  *
1897  * A lot of boot-time code is duplicated here, because functions such
1898  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1899  * slab allocator.
1900  *
1901  * This should probably be split up into smaller chunks.
1902  */
1903 
1904 static int of_finish_dynamic_node(struct device_node *node,
1905 				  unsigned long *unused1, int unused2,
1906 				  int unused3, int unused4)
1907 {
1908 	struct device_node *parent = of_get_parent(node);
1909 	int err = 0;
1910 	phandle *ibm_phandle;
1911 
1912 	node->name = get_property(node, "name", NULL);
1913 	node->type = get_property(node, "device_type", NULL);
1914 
1915 	if (!parent) {
1916 		err = -ENODEV;
1917 		goto out;
1918 	}
1919 
1920 	/* We don't support that function on PowerMac, at least
1921 	 * not yet
1922 	 */
1923 	if (systemcfg->platform == PLATFORM_POWERMAC)
1924 		return -ENODEV;
1925 
1926 	/* fix up new node's linux_phandle field */
1927 	if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1928 		node->linux_phandle = *ibm_phandle;
1929 
1930 out:
1931 	of_node_put(parent);
1932 	return err;
1933 }
1934 
1935 static int prom_reconfig_notifier(struct notifier_block *nb,
1936 				  unsigned long action, void *node)
1937 {
1938 	int err;
1939 
1940 	switch (action) {
1941 	case PSERIES_RECONFIG_ADD:
1942 		err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1943 		if (err < 0) {
1944 			printk(KERN_ERR "finish_node returned %d\n", err);
1945 			err = NOTIFY_BAD;
1946 		}
1947 		break;
1948 	default:
1949 		err = NOTIFY_DONE;
1950 		break;
1951 	}
1952 	return err;
1953 }
1954 
1955 static struct notifier_block prom_reconfig_nb = {
1956 	.notifier_call = prom_reconfig_notifier,
1957 	.priority = 10, /* This one needs to run first */
1958 };
1959 
1960 static int __init prom_reconfig_setup(void)
1961 {
1962 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1963 }
1964 __initcall(prom_reconfig_setup);
1965 #endif
1966 
1967 /*
1968  * Find a property with a given name for a given node
1969  * and return the value.
1970  */
1971 unsigned char *get_property(struct device_node *np, const char *name,
1972 			    int *lenp)
1973 {
1974 	struct property *pp;
1975 
1976 	for (pp = np->properties; pp != 0; pp = pp->next)
1977 		if (strcmp(pp->name, name) == 0) {
1978 			if (lenp != 0)
1979 				*lenp = pp->length;
1980 			return pp->value;
1981 		}
1982 	return NULL;
1983 }
1984 EXPORT_SYMBOL(get_property);
1985 
1986 /*
1987  * Add a property to a node
1988  */
1989 void prom_add_property(struct device_node* np, struct property* prop)
1990 {
1991 	struct property **next = &np->properties;
1992 
1993 	prop->next = NULL;
1994 	while (*next)
1995 		next = &(*next)->next;
1996 	*next = prop;
1997 }
1998 
1999 /* I quickly hacked that one, check against spec ! */
2000 static inline unsigned long
2001 bus_space_to_resource_flags(unsigned int bus_space)
2002 {
2003 	u8 space = (bus_space >> 24) & 0xf;
2004 	if (space == 0)
2005 		space = 0x02;
2006 	if (space == 0x02)
2007 		return IORESOURCE_MEM;
2008 	else if (space == 0x01)
2009 		return IORESOURCE_IO;
2010 	else {
2011 		printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2012 		    	bus_space);
2013 		return 0;
2014 	}
2015 }
2016 
2017 #ifdef CONFIG_PCI
2018 static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2019 						 struct address_range *range)
2020 {
2021 	unsigned long mask;
2022 	int i;
2023 
2024 	/* Check this one */
2025 	mask = bus_space_to_resource_flags(range->space);
2026 	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2027 		if ((pdev->resource[i].flags & mask) == mask &&
2028 			pdev->resource[i].start <= range->address &&
2029 			pdev->resource[i].end > range->address) {
2030 				if ((range->address + range->size - 1) > pdev->resource[i].end) {
2031 					/* Add better message */
2032 					printk(KERN_WARNING "PCI/OF resource overlap !\n");
2033 					return NULL;
2034 				}
2035 				break;
2036 			}
2037 	}
2038 	if (i == DEVICE_COUNT_RESOURCE)
2039 		return NULL;
2040 	return &pdev->resource[i];
2041 }
2042 
2043 /*
2044  * Request an OF device resource. Currently handles child of PCI devices,
2045  * or other nodes attached to the root node. Ultimately, put some
2046  * link to resources in the OF node.
2047  */
2048 struct resource *request_OF_resource(struct device_node* node, int index,
2049 				     const char* name_postfix)
2050 {
2051 	struct pci_dev* pcidev;
2052 	u8 pci_bus, pci_devfn;
2053 	unsigned long iomask;
2054 	struct device_node* nd;
2055 	struct resource* parent;
2056 	struct resource *res = NULL;
2057 	int nlen, plen;
2058 
2059 	if (index >= node->n_addrs)
2060 		goto fail;
2061 
2062 	/* Sanity check on bus space */
2063 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2064 	if (iomask & IORESOURCE_MEM)
2065 		parent = &iomem_resource;
2066 	else if (iomask & IORESOURCE_IO)
2067 		parent = &ioport_resource;
2068 	else
2069 		goto fail;
2070 
2071 	/* Find a PCI parent if any */
2072 	nd = node;
2073 	pcidev = NULL;
2074 	while (nd) {
2075 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2076 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2077 		if (pcidev) break;
2078 		nd = nd->parent;
2079 	}
2080 	if (pcidev)
2081 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2082 	if (!parent) {
2083 		printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2084 			node->name);
2085 		goto fail;
2086 	}
2087 
2088 	res = __request_region(parent, node->addrs[index].address,
2089 			       node->addrs[index].size, NULL);
2090 	if (!res)
2091 		goto fail;
2092 	nlen = strlen(node->name);
2093 	plen = name_postfix ? strlen(name_postfix) : 0;
2094 	res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2095 	if (res->name) {
2096 		strcpy((char *)res->name, node->name);
2097 		if (plen)
2098 			strcpy((char *)res->name+nlen, name_postfix);
2099 	}
2100 	return res;
2101 fail:
2102 	return NULL;
2103 }
2104 EXPORT_SYMBOL(request_OF_resource);
2105 
2106 int release_OF_resource(struct device_node *node, int index)
2107 {
2108 	struct pci_dev* pcidev;
2109 	u8 pci_bus, pci_devfn;
2110 	unsigned long iomask, start, end;
2111 	struct device_node* nd;
2112 	struct resource* parent;
2113 	struct resource *res = NULL;
2114 
2115 	if (index >= node->n_addrs)
2116 		return -EINVAL;
2117 
2118 	/* Sanity check on bus space */
2119 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2120 	if (iomask & IORESOURCE_MEM)
2121 		parent = &iomem_resource;
2122 	else if (iomask & IORESOURCE_IO)
2123 		parent = &ioport_resource;
2124 	else
2125 		return -EINVAL;
2126 
2127 	/* Find a PCI parent if any */
2128 	nd = node;
2129 	pcidev = NULL;
2130 	while(nd) {
2131 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2132 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2133 		if (pcidev) break;
2134 		nd = nd->parent;
2135 	}
2136 	if (pcidev)
2137 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2138 	if (!parent) {
2139 		printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2140 			node->name);
2141 		return -ENODEV;
2142 	}
2143 
2144 	/* Find us in the parent and its childs */
2145 	res = parent->child;
2146 	start = node->addrs[index].address;
2147 	end = start + node->addrs[index].size - 1;
2148 	while (res) {
2149 		if (res->start == start && res->end == end &&
2150 		    (res->flags & IORESOURCE_BUSY))
2151 		    	break;
2152 		if (res->start <= start && res->end >= end)
2153 			res = res->child;
2154 		else
2155 			res = res->sibling;
2156 	}
2157 	if (!res)
2158 		return -ENODEV;
2159 
2160 	if (res->name) {
2161 		kfree(res->name);
2162 		res->name = NULL;
2163 	}
2164 	release_resource(res);
2165 	kfree(res);
2166 
2167 	return 0;
2168 }
2169 EXPORT_SYMBOL(release_OF_resource);
2170 #endif /* CONFIG_PCI */
2171