xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision dcee3036)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 #include <linux/kexec.h>
33 
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 #include <asm/lmb.h>
37 #include <asm/page.h>
38 #include <asm/processor.h>
39 #include <asm/irq.h>
40 #include <asm/io.h>
41 #include <asm/kdump.h>
42 #include <asm/smp.h>
43 #include <asm/system.h>
44 #include <asm/mmu.h>
45 #include <asm/pgtable.h>
46 #include <asm/pci.h>
47 #include <asm/iommu.h>
48 #include <asm/btext.h>
49 #include <asm/sections.h>
50 #include <asm/machdep.h>
51 #include <asm/pSeries_reconfig.h>
52 #include <asm/pci-bridge.h>
53 
54 #ifdef DEBUG
55 #define DBG(fmt...) printk(KERN_ERR fmt)
56 #else
57 #define DBG(fmt...)
58 #endif
59 
60 struct pci_reg_property {
61 	struct pci_address addr;
62 	u32 size_hi;
63 	u32 size_lo;
64 };
65 
66 struct isa_reg_property {
67 	u32 space;
68 	u32 address;
69 	u32 size;
70 };
71 
72 
73 typedef int interpret_func(struct device_node *, unsigned long *,
74 			   int, int, int);
75 
76 static int __initdata dt_root_addr_cells;
77 static int __initdata dt_root_size_cells;
78 
79 #ifdef CONFIG_PPC64
80 static int __initdata iommu_is_off;
81 int __initdata iommu_force_on;
82 unsigned long tce_alloc_start, tce_alloc_end;
83 #endif
84 
85 typedef u32 cell_t;
86 
87 #if 0
88 static struct boot_param_header *initial_boot_params __initdata;
89 #else
90 struct boot_param_header *initial_boot_params;
91 #endif
92 
93 static struct device_node *allnodes = NULL;
94 
95 /* use when traversing tree through the allnext, child, sibling,
96  * or parent members of struct device_node.
97  */
98 static DEFINE_RWLOCK(devtree_lock);
99 
100 /* export that to outside world */
101 struct device_node *of_chosen;
102 
103 struct device_node *dflt_interrupt_controller;
104 int num_interrupt_controllers;
105 
106 /*
107  * Wrapper for allocating memory for various data that needs to be
108  * attached to device nodes as they are processed at boot or when
109  * added to the device tree later (e.g. DLPAR).  At boot there is
110  * already a region reserved so we just increment *mem_start by size;
111  * otherwise we call kmalloc.
112  */
113 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
114 {
115 	unsigned long tmp;
116 
117 	if (!mem_start)
118 		return kmalloc(size, GFP_KERNEL);
119 
120 	tmp = *mem_start;
121 	*mem_start += size;
122 	return (void *)tmp;
123 }
124 
125 /*
126  * Find the device_node with a given phandle.
127  */
128 static struct device_node * find_phandle(phandle ph)
129 {
130 	struct device_node *np;
131 
132 	for (np = allnodes; np != 0; np = np->allnext)
133 		if (np->linux_phandle == ph)
134 			return np;
135 	return NULL;
136 }
137 
138 /*
139  * Find the interrupt parent of a node.
140  */
141 static struct device_node * __devinit intr_parent(struct device_node *p)
142 {
143 	phandle *parp;
144 
145 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
146 	if (parp == NULL)
147 		return p->parent;
148 	p = find_phandle(*parp);
149 	if (p != NULL)
150 		return p;
151 	/*
152 	 * On a powermac booted with BootX, we don't get to know the
153 	 * phandles for any nodes, so find_phandle will return NULL.
154 	 * Fortunately these machines only have one interrupt controller
155 	 * so there isn't in fact any ambiguity.  -- paulus
156 	 */
157 	if (num_interrupt_controllers == 1)
158 		p = dflt_interrupt_controller;
159 	return p;
160 }
161 
162 /*
163  * Find out the size of each entry of the interrupts property
164  * for a node.
165  */
166 int __devinit prom_n_intr_cells(struct device_node *np)
167 {
168 	struct device_node *p;
169 	unsigned int *icp;
170 
171 	for (p = np; (p = intr_parent(p)) != NULL; ) {
172 		icp = (unsigned int *)
173 			get_property(p, "#interrupt-cells", NULL);
174 		if (icp != NULL)
175 			return *icp;
176 		if (get_property(p, "interrupt-controller", NULL) != NULL
177 		    || get_property(p, "interrupt-map", NULL) != NULL) {
178 			printk("oops, node %s doesn't have #interrupt-cells\n",
179 			       p->full_name);
180 			return 1;
181 		}
182 	}
183 #ifdef DEBUG_IRQ
184 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
185 #endif
186 	return 1;
187 }
188 
189 /*
190  * Map an interrupt from a device up to the platform interrupt
191  * descriptor.
192  */
193 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
194 				   struct device_node *np, unsigned int *ints,
195 				   int nintrc)
196 {
197 	struct device_node *p, *ipar;
198 	unsigned int *imap, *imask, *ip;
199 	int i, imaplen, match;
200 	int newintrc = 0, newaddrc = 0;
201 	unsigned int *reg;
202 	int naddrc;
203 
204 	reg = (unsigned int *) get_property(np, "reg", NULL);
205 	naddrc = prom_n_addr_cells(np);
206 	p = intr_parent(np);
207 	while (p != NULL) {
208 		if (get_property(p, "interrupt-controller", NULL) != NULL)
209 			/* this node is an interrupt controller, stop here */
210 			break;
211 		imap = (unsigned int *)
212 			get_property(p, "interrupt-map", &imaplen);
213 		if (imap == NULL) {
214 			p = intr_parent(p);
215 			continue;
216 		}
217 		imask = (unsigned int *)
218 			get_property(p, "interrupt-map-mask", NULL);
219 		if (imask == NULL) {
220 			printk("oops, %s has interrupt-map but no mask\n",
221 			       p->full_name);
222 			return 0;
223 		}
224 		imaplen /= sizeof(unsigned int);
225 		match = 0;
226 		ipar = NULL;
227 		while (imaplen > 0 && !match) {
228 			/* check the child-interrupt field */
229 			match = 1;
230 			for (i = 0; i < naddrc && match; ++i)
231 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
232 			for (; i < naddrc + nintrc && match; ++i)
233 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
234 			imap += naddrc + nintrc;
235 			imaplen -= naddrc + nintrc;
236 			/* grab the interrupt parent */
237 			ipar = find_phandle((phandle) *imap++);
238 			--imaplen;
239 			if (ipar == NULL && num_interrupt_controllers == 1)
240 				/* cope with BootX not giving us phandles */
241 				ipar = dflt_interrupt_controller;
242 			if (ipar == NULL) {
243 				printk("oops, no int parent %x in map of %s\n",
244 				       imap[-1], p->full_name);
245 				return 0;
246 			}
247 			/* find the parent's # addr and intr cells */
248 			ip = (unsigned int *)
249 				get_property(ipar, "#interrupt-cells", NULL);
250 			if (ip == NULL) {
251 				printk("oops, no #interrupt-cells on %s\n",
252 				       ipar->full_name);
253 				return 0;
254 			}
255 			newintrc = *ip;
256 			ip = (unsigned int *)
257 				get_property(ipar, "#address-cells", NULL);
258 			newaddrc = (ip == NULL)? 0: *ip;
259 			imap += newaddrc + newintrc;
260 			imaplen -= newaddrc + newintrc;
261 		}
262 		if (imaplen < 0) {
263 			printk("oops, error decoding int-map on %s, len=%d\n",
264 			       p->full_name, imaplen);
265 			return 0;
266 		}
267 		if (!match) {
268 #ifdef DEBUG_IRQ
269 			printk("oops, no match in %s int-map for %s\n",
270 			       p->full_name, np->full_name);
271 #endif
272 			return 0;
273 		}
274 		p = ipar;
275 		naddrc = newaddrc;
276 		nintrc = newintrc;
277 		ints = imap - nintrc;
278 		reg = ints - naddrc;
279 	}
280 	if (p == NULL) {
281 #ifdef DEBUG_IRQ
282 		printk("hmmm, int tree for %s doesn't have ctrler\n",
283 		       np->full_name);
284 #endif
285 		return 0;
286 	}
287 	*irq = ints;
288 	*ictrler = p;
289 	return nintrc;
290 }
291 
292 static unsigned char map_isa_senses[4] = {
293 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
294 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
295 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
296 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE
297 };
298 
299 static unsigned char map_mpic_senses[4] = {
300 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE,
301 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
302 	/* 2 seems to be used for the 8259 cascade... */
303 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
304 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
305 };
306 
307 static int __devinit finish_node_interrupts(struct device_node *np,
308 					    unsigned long *mem_start,
309 					    int measure_only)
310 {
311 	unsigned int *ints;
312 	int intlen, intrcells, intrcount;
313 	int i, j, n, sense;
314 	unsigned int *irq, virq;
315 	struct device_node *ic;
316 
317 	if (num_interrupt_controllers == 0) {
318 		/*
319 		 * Old machines just have a list of interrupt numbers
320 		 * and no interrupt-controller nodes.
321 		 */
322 		ints = (unsigned int *) get_property(np, "AAPL,interrupts",
323 						     &intlen);
324 		/* XXX old interpret_pci_props looked in parent too */
325 		/* XXX old interpret_macio_props looked for interrupts
326 		   before AAPL,interrupts */
327 		if (ints == NULL)
328 			ints = (unsigned int *) get_property(np, "interrupts",
329 							     &intlen);
330 		if (ints == NULL)
331 			return 0;
332 
333 		np->n_intrs = intlen / sizeof(unsigned int);
334 		np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
335 				       mem_start);
336 		if (!np->intrs)
337 			return -ENOMEM;
338 		if (measure_only)
339 			return 0;
340 
341 		for (i = 0; i < np->n_intrs; ++i) {
342 			np->intrs[i].line = *ints++;
343 			np->intrs[i].sense = IRQ_SENSE_LEVEL
344 				| IRQ_POLARITY_NEGATIVE;
345 		}
346 		return 0;
347 	}
348 
349 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
350 	if (ints == NULL)
351 		return 0;
352 	intrcells = prom_n_intr_cells(np);
353 	intlen /= intrcells * sizeof(unsigned int);
354 
355 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
356 	if (!np->intrs)
357 		return -ENOMEM;
358 
359 	if (measure_only)
360 		return 0;
361 
362 	intrcount = 0;
363 	for (i = 0; i < intlen; ++i, ints += intrcells) {
364 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
365 		if (n <= 0)
366 			continue;
367 
368 		/* don't map IRQ numbers under a cascaded 8259 controller */
369 		if (ic && device_is_compatible(ic, "chrp,iic")) {
370 			np->intrs[intrcount].line = irq[0];
371 			sense = (n > 1)? (irq[1] & 3): 3;
372 			np->intrs[intrcount].sense = map_isa_senses[sense];
373 		} else {
374 			virq = virt_irq_create_mapping(irq[0]);
375 #ifdef CONFIG_PPC64
376 			if (virq == NO_IRQ) {
377 				printk(KERN_CRIT "Could not allocate interrupt"
378 				       " number for %s\n", np->full_name);
379 				continue;
380 			}
381 #endif
382 			np->intrs[intrcount].line = irq_offset_up(virq);
383 			sense = (n > 1)? (irq[1] & 3): 1;
384 			np->intrs[intrcount].sense = map_mpic_senses[sense];
385 		}
386 
387 #ifdef CONFIG_PPC64
388 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
389 		if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
390 			char *name = get_property(ic->parent, "name", NULL);
391 			if (name && !strcmp(name, "u3"))
392 				np->intrs[intrcount].line += 128;
393 			else if (!(name && !strcmp(name, "mac-io")))
394 				/* ignore other cascaded controllers, such as
395 				   the k2-sata-root */
396 				break;
397 		}
398 #endif
399 		if (n > 2) {
400 			printk("hmmm, got %d intr cells for %s:", n,
401 			       np->full_name);
402 			for (j = 0; j < n; ++j)
403 				printk(" %d", irq[j]);
404 			printk("\n");
405 		}
406 		++intrcount;
407 	}
408 	np->n_intrs = intrcount;
409 
410 	return 0;
411 }
412 
413 static int __devinit interpret_pci_props(struct device_node *np,
414 					 unsigned long *mem_start,
415 					 int naddrc, int nsizec,
416 					 int measure_only)
417 {
418 	struct address_range *adr;
419 	struct pci_reg_property *pci_addrs;
420 	int i, l, n_addrs;
421 
422 	pci_addrs = (struct pci_reg_property *)
423 		get_property(np, "assigned-addresses", &l);
424 	if (!pci_addrs)
425 		return 0;
426 
427 	n_addrs = l / sizeof(*pci_addrs);
428 
429 	adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
430 	if (!adr)
431 		return -ENOMEM;
432 
433  	if (measure_only)
434  		return 0;
435 
436  	np->addrs = adr;
437  	np->n_addrs = n_addrs;
438 
439  	for (i = 0; i < n_addrs; i++) {
440  		adr[i].space = pci_addrs[i].addr.a_hi;
441  		adr[i].address = pci_addrs[i].addr.a_lo |
442 			((u64)pci_addrs[i].addr.a_mid << 32);
443  		adr[i].size = pci_addrs[i].size_lo;
444 	}
445 
446 	return 0;
447 }
448 
449 static int __init interpret_dbdma_props(struct device_node *np,
450 					unsigned long *mem_start,
451 					int naddrc, int nsizec,
452 					int measure_only)
453 {
454 	struct reg_property32 *rp;
455 	struct address_range *adr;
456 	unsigned long base_address;
457 	int i, l;
458 	struct device_node *db;
459 
460 	base_address = 0;
461 	if (!measure_only) {
462 		for (db = np->parent; db != NULL; db = db->parent) {
463 			if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
464 				base_address = db->addrs[0].address;
465 				break;
466 			}
467 		}
468 	}
469 
470 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
471 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
472 		i = 0;
473 		adr = (struct address_range *) (*mem_start);
474 		while ((l -= sizeof(struct reg_property32)) >= 0) {
475 			if (!measure_only) {
476 				adr[i].space = 2;
477 				adr[i].address = rp[i].address + base_address;
478 				adr[i].size = rp[i].size;
479 			}
480 			++i;
481 		}
482 		np->addrs = adr;
483 		np->n_addrs = i;
484 		(*mem_start) += i * sizeof(struct address_range);
485 	}
486 
487 	return 0;
488 }
489 
490 static int __init interpret_macio_props(struct device_node *np,
491 					unsigned long *mem_start,
492 					int naddrc, int nsizec,
493 					int measure_only)
494 {
495 	struct reg_property32 *rp;
496 	struct address_range *adr;
497 	unsigned long base_address;
498 	int i, l;
499 	struct device_node *db;
500 
501 	base_address = 0;
502 	if (!measure_only) {
503 		for (db = np->parent; db != NULL; db = db->parent) {
504 			if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
505 				base_address = db->addrs[0].address;
506 				break;
507 			}
508 		}
509 	}
510 
511 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
512 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
513 		i = 0;
514 		adr = (struct address_range *) (*mem_start);
515 		while ((l -= sizeof(struct reg_property32)) >= 0) {
516 			if (!measure_only) {
517 				adr[i].space = 2;
518 				adr[i].address = rp[i].address + base_address;
519 				adr[i].size = rp[i].size;
520 			}
521 			++i;
522 		}
523 		np->addrs = adr;
524 		np->n_addrs = i;
525 		(*mem_start) += i * sizeof(struct address_range);
526 	}
527 
528 	return 0;
529 }
530 
531 static int __init interpret_isa_props(struct device_node *np,
532 				      unsigned long *mem_start,
533 				      int naddrc, int nsizec,
534 				      int measure_only)
535 {
536 	struct isa_reg_property *rp;
537 	struct address_range *adr;
538 	int i, l;
539 
540 	rp = (struct isa_reg_property *) get_property(np, "reg", &l);
541 	if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
542 		i = 0;
543 		adr = (struct address_range *) (*mem_start);
544 		while ((l -= sizeof(struct isa_reg_property)) >= 0) {
545 			if (!measure_only) {
546 				adr[i].space = rp[i].space;
547 				adr[i].address = rp[i].address;
548 				adr[i].size = rp[i].size;
549 			}
550 			++i;
551 		}
552 		np->addrs = adr;
553 		np->n_addrs = i;
554 		(*mem_start) += i * sizeof(struct address_range);
555 	}
556 
557 	return 0;
558 }
559 
560 static int __init interpret_root_props(struct device_node *np,
561 				       unsigned long *mem_start,
562 				       int naddrc, int nsizec,
563 				       int measure_only)
564 {
565 	struct address_range *adr;
566 	int i, l;
567 	unsigned int *rp;
568 	int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
569 
570 	rp = (unsigned int *) get_property(np, "reg", &l);
571 	if (rp != 0 && l >= rpsize) {
572 		i = 0;
573 		adr = (struct address_range *) (*mem_start);
574 		while ((l -= rpsize) >= 0) {
575 			if (!measure_only) {
576 				adr[i].space = 0;
577 				adr[i].address = rp[naddrc - 1];
578 				adr[i].size = rp[naddrc + nsizec - 1];
579 			}
580 			++i;
581 			rp += naddrc + nsizec;
582 		}
583 		np->addrs = adr;
584 		np->n_addrs = i;
585 		(*mem_start) += i * sizeof(struct address_range);
586 	}
587 
588 	return 0;
589 }
590 
591 static int __devinit finish_node(struct device_node *np,
592 				 unsigned long *mem_start,
593 				 interpret_func *ifunc,
594 				 int naddrc, int nsizec,
595 				 int measure_only)
596 {
597 	struct device_node *child;
598 	int *ip, rc = 0;
599 
600 	/* get the device addresses and interrupts */
601 	if (ifunc != NULL)
602 		rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
603 	if (rc)
604 		goto out;
605 
606 	rc = finish_node_interrupts(np, mem_start, measure_only);
607 	if (rc)
608 		goto out;
609 
610 	/* Look for #address-cells and #size-cells properties. */
611 	ip = (int *) get_property(np, "#address-cells", NULL);
612 	if (ip != NULL)
613 		naddrc = *ip;
614 	ip = (int *) get_property(np, "#size-cells", NULL);
615 	if (ip != NULL)
616 		nsizec = *ip;
617 
618 	if (!strcmp(np->name, "device-tree") || np->parent == NULL)
619 		ifunc = interpret_root_props;
620 	else if (np->type == 0)
621 		ifunc = NULL;
622 	else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
623 		ifunc = interpret_pci_props;
624 	else if (!strcmp(np->type, "dbdma"))
625 		ifunc = interpret_dbdma_props;
626 	else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
627 		ifunc = interpret_macio_props;
628 	else if (!strcmp(np->type, "isa"))
629 		ifunc = interpret_isa_props;
630 	else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
631 		ifunc = interpret_root_props;
632 	else if (!((ifunc == interpret_dbdma_props
633 		    || ifunc == interpret_macio_props)
634 		   && (!strcmp(np->type, "escc")
635 		       || !strcmp(np->type, "media-bay"))))
636 		ifunc = NULL;
637 
638 	for (child = np->child; child != NULL; child = child->sibling) {
639 		rc = finish_node(child, mem_start, ifunc,
640 				 naddrc, nsizec, measure_only);
641 		if (rc)
642 			goto out;
643 	}
644 out:
645 	return rc;
646 }
647 
648 static void __init scan_interrupt_controllers(void)
649 {
650 	struct device_node *np;
651 	int n = 0;
652 	char *name, *ic;
653 	int iclen;
654 
655 	for (np = allnodes; np != NULL; np = np->allnext) {
656 		ic = get_property(np, "interrupt-controller", &iclen);
657 		name = get_property(np, "name", NULL);
658 		/* checking iclen makes sure we don't get a false
659 		   match on /chosen.interrupt_controller */
660 		if ((name != NULL
661 		     && strcmp(name, "interrupt-controller") == 0)
662 		    || (ic != NULL && iclen == 0
663 			&& strcmp(name, "AppleKiwi"))) {
664 			if (n == 0)
665 				dflt_interrupt_controller = np;
666 			++n;
667 		}
668 	}
669 	num_interrupt_controllers = n;
670 }
671 
672 /**
673  * finish_device_tree is called once things are running normally
674  * (i.e. with text and data mapped to the address they were linked at).
675  * It traverses the device tree and fills in some of the additional,
676  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
677  * mapping is also initialized at this point.
678  */
679 void __init finish_device_tree(void)
680 {
681 	unsigned long start, end, size = 0;
682 
683 	DBG(" -> finish_device_tree\n");
684 
685 #ifdef CONFIG_PPC64
686 	/* Initialize virtual IRQ map */
687 	virt_irq_init();
688 #endif
689 	scan_interrupt_controllers();
690 
691 	/*
692 	 * Finish device-tree (pre-parsing some properties etc...)
693 	 * We do this in 2 passes. One with "measure_only" set, which
694 	 * will only measure the amount of memory needed, then we can
695 	 * allocate that memory, and call finish_node again. However,
696 	 * we must be careful as most routines will fail nowadays when
697 	 * prom_alloc() returns 0, so we must make sure our first pass
698 	 * doesn't start at 0. We pre-initialize size to 16 for that
699 	 * reason and then remove those additional 16 bytes
700 	 */
701 	size = 16;
702 	finish_node(allnodes, &size, NULL, 0, 0, 1);
703 	size -= 16;
704 	end = start = (unsigned long) __va(lmb_alloc(size, 128));
705 	finish_node(allnodes, &end, NULL, 0, 0, 0);
706 	BUG_ON(end != start + size);
707 
708 	DBG(" <- finish_device_tree\n");
709 }
710 
711 static inline char *find_flat_dt_string(u32 offset)
712 {
713 	return ((char *)initial_boot_params) +
714 		initial_boot_params->off_dt_strings + offset;
715 }
716 
717 /**
718  * This function is used to scan the flattened device-tree, it is
719  * used to extract the memory informations at boot before we can
720  * unflatten the tree
721  */
722 int __init of_scan_flat_dt(int (*it)(unsigned long node,
723 				     const char *uname, int depth,
724 				     void *data),
725 			   void *data)
726 {
727 	unsigned long p = ((unsigned long)initial_boot_params) +
728 		initial_boot_params->off_dt_struct;
729 	int rc = 0;
730 	int depth = -1;
731 
732 	do {
733 		u32 tag = *((u32 *)p);
734 		char *pathp;
735 
736 		p += 4;
737 		if (tag == OF_DT_END_NODE) {
738 			depth --;
739 			continue;
740 		}
741 		if (tag == OF_DT_NOP)
742 			continue;
743 		if (tag == OF_DT_END)
744 			break;
745 		if (tag == OF_DT_PROP) {
746 			u32 sz = *((u32 *)p);
747 			p += 8;
748 			if (initial_boot_params->version < 0x10)
749 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
750 			p += sz;
751 			p = _ALIGN(p, 4);
752 			continue;
753 		}
754 		if (tag != OF_DT_BEGIN_NODE) {
755 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
756 			       " device tree !\n", tag);
757 			return -EINVAL;
758 		}
759 		depth++;
760 		pathp = (char *)p;
761 		p = _ALIGN(p + strlen(pathp) + 1, 4);
762 		if ((*pathp) == '/') {
763 			char *lp, *np;
764 			for (lp = NULL, np = pathp; *np; np++)
765 				if ((*np) == '/')
766 					lp = np+1;
767 			if (lp != NULL)
768 				pathp = lp;
769 		}
770 		rc = it(p, pathp, depth, data);
771 		if (rc != 0)
772 			break;
773 	} while(1);
774 
775 	return rc;
776 }
777 
778 /**
779  * This  function can be used within scan_flattened_dt callback to get
780  * access to properties
781  */
782 void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
783 				 unsigned long *size)
784 {
785 	unsigned long p = node;
786 
787 	do {
788 		u32 tag = *((u32 *)p);
789 		u32 sz, noff;
790 		const char *nstr;
791 
792 		p += 4;
793 		if (tag == OF_DT_NOP)
794 			continue;
795 		if (tag != OF_DT_PROP)
796 			return NULL;
797 
798 		sz = *((u32 *)p);
799 		noff = *((u32 *)(p + 4));
800 		p += 8;
801 		if (initial_boot_params->version < 0x10)
802 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
803 
804 		nstr = find_flat_dt_string(noff);
805 		if (nstr == NULL) {
806 			printk(KERN_WARNING "Can't find property index"
807 			       " name !\n");
808 			return NULL;
809 		}
810 		if (strcmp(name, nstr) == 0) {
811 			if (size)
812 				*size = sz;
813 			return (void *)p;
814 		}
815 		p += sz;
816 		p = _ALIGN(p, 4);
817 	} while(1);
818 }
819 
820 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
821 				       unsigned long align)
822 {
823 	void *res;
824 
825 	*mem = _ALIGN(*mem, align);
826 	res = (void *)*mem;
827 	*mem += size;
828 
829 	return res;
830 }
831 
832 static unsigned long __init unflatten_dt_node(unsigned long mem,
833 					      unsigned long *p,
834 					      struct device_node *dad,
835 					      struct device_node ***allnextpp,
836 					      unsigned long fpsize)
837 {
838 	struct device_node *np;
839 	struct property *pp, **prev_pp = NULL;
840 	char *pathp;
841 	u32 tag;
842 	unsigned int l, allocl;
843 	int has_name = 0;
844 	int new_format = 0;
845 
846 	tag = *((u32 *)(*p));
847 	if (tag != OF_DT_BEGIN_NODE) {
848 		printk("Weird tag at start of node: %x\n", tag);
849 		return mem;
850 	}
851 	*p += 4;
852 	pathp = (char *)*p;
853 	l = allocl = strlen(pathp) + 1;
854 	*p = _ALIGN(*p + l, 4);
855 
856 	/* version 0x10 has a more compact unit name here instead of the full
857 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
858 	 * it later. We detect this because the first character of the name is
859 	 * not '/'.
860 	 */
861 	if ((*pathp) != '/') {
862 		new_format = 1;
863 		if (fpsize == 0) {
864 			/* root node: special case. fpsize accounts for path
865 			 * plus terminating zero. root node only has '/', so
866 			 * fpsize should be 2, but we want to avoid the first
867 			 * level nodes to have two '/' so we use fpsize 1 here
868 			 */
869 			fpsize = 1;
870 			allocl = 2;
871 		} else {
872 			/* account for '/' and path size minus terminal 0
873 			 * already in 'l'
874 			 */
875 			fpsize += l;
876 			allocl = fpsize;
877 		}
878 	}
879 
880 
881 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
882 				__alignof__(struct device_node));
883 	if (allnextpp) {
884 		memset(np, 0, sizeof(*np));
885 		np->full_name = ((char*)np) + sizeof(struct device_node);
886 		if (new_format) {
887 			char *p = np->full_name;
888 			/* rebuild full path for new format */
889 			if (dad && dad->parent) {
890 				strcpy(p, dad->full_name);
891 #ifdef DEBUG
892 				if ((strlen(p) + l + 1) != allocl) {
893 					DBG("%s: p: %d, l: %d, a: %d\n",
894 					    pathp, strlen(p), l, allocl);
895 				}
896 #endif
897 				p += strlen(p);
898 			}
899 			*(p++) = '/';
900 			memcpy(p, pathp, l);
901 		} else
902 			memcpy(np->full_name, pathp, l);
903 		prev_pp = &np->properties;
904 		**allnextpp = np;
905 		*allnextpp = &np->allnext;
906 		if (dad != NULL) {
907 			np->parent = dad;
908 			/* we temporarily use the next field as `last_child'*/
909 			if (dad->next == 0)
910 				dad->child = np;
911 			else
912 				dad->next->sibling = np;
913 			dad->next = np;
914 		}
915 		kref_init(&np->kref);
916 	}
917 	while(1) {
918 		u32 sz, noff;
919 		char *pname;
920 
921 		tag = *((u32 *)(*p));
922 		if (tag == OF_DT_NOP) {
923 			*p += 4;
924 			continue;
925 		}
926 		if (tag != OF_DT_PROP)
927 			break;
928 		*p += 4;
929 		sz = *((u32 *)(*p));
930 		noff = *((u32 *)((*p) + 4));
931 		*p += 8;
932 		if (initial_boot_params->version < 0x10)
933 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
934 
935 		pname = find_flat_dt_string(noff);
936 		if (pname == NULL) {
937 			printk("Can't find property name in list !\n");
938 			break;
939 		}
940 		if (strcmp(pname, "name") == 0)
941 			has_name = 1;
942 		l = strlen(pname) + 1;
943 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
944 					__alignof__(struct property));
945 		if (allnextpp) {
946 			if (strcmp(pname, "linux,phandle") == 0) {
947 				np->node = *((u32 *)*p);
948 				if (np->linux_phandle == 0)
949 					np->linux_phandle = np->node;
950 			}
951 			if (strcmp(pname, "ibm,phandle") == 0)
952 				np->linux_phandle = *((u32 *)*p);
953 			pp->name = pname;
954 			pp->length = sz;
955 			pp->value = (void *)*p;
956 			*prev_pp = pp;
957 			prev_pp = &pp->next;
958 		}
959 		*p = _ALIGN((*p) + sz, 4);
960 	}
961 	/* with version 0x10 we may not have the name property, recreate
962 	 * it here from the unit name if absent
963 	 */
964 	if (!has_name) {
965 		char *p = pathp, *ps = pathp, *pa = NULL;
966 		int sz;
967 
968 		while (*p) {
969 			if ((*p) == '@')
970 				pa = p;
971 			if ((*p) == '/')
972 				ps = p + 1;
973 			p++;
974 		}
975 		if (pa < ps)
976 			pa = p;
977 		sz = (pa - ps) + 1;
978 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
979 					__alignof__(struct property));
980 		if (allnextpp) {
981 			pp->name = "name";
982 			pp->length = sz;
983 			pp->value = (unsigned char *)(pp + 1);
984 			*prev_pp = pp;
985 			prev_pp = &pp->next;
986 			memcpy(pp->value, ps, sz - 1);
987 			((char *)pp->value)[sz - 1] = 0;
988 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
989 		}
990 	}
991 	if (allnextpp) {
992 		*prev_pp = NULL;
993 		np->name = get_property(np, "name", NULL);
994 		np->type = get_property(np, "device_type", NULL);
995 
996 		if (!np->name)
997 			np->name = "<NULL>";
998 		if (!np->type)
999 			np->type = "<NULL>";
1000 	}
1001 	while (tag == OF_DT_BEGIN_NODE) {
1002 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1003 		tag = *((u32 *)(*p));
1004 	}
1005 	if (tag != OF_DT_END_NODE) {
1006 		printk("Weird tag at end of node: %x\n", tag);
1007 		return mem;
1008 	}
1009 	*p += 4;
1010 	return mem;
1011 }
1012 
1013 
1014 /**
1015  * unflattens the device-tree passed by the firmware, creating the
1016  * tree of struct device_node. It also fills the "name" and "type"
1017  * pointers of the nodes so the normal device-tree walking functions
1018  * can be used (this used to be done by finish_device_tree)
1019  */
1020 void __init unflatten_device_tree(void)
1021 {
1022 	unsigned long start, mem, size;
1023 	struct device_node **allnextp = &allnodes;
1024 	char *p = NULL;
1025 	int l = 0;
1026 
1027 	DBG(" -> unflatten_device_tree()\n");
1028 
1029 	/* First pass, scan for size */
1030 	start = ((unsigned long)initial_boot_params) +
1031 		initial_boot_params->off_dt_struct;
1032 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1033 	size = (size | 3) + 1;
1034 
1035 	DBG("  size is %lx, allocating...\n", size);
1036 
1037 	/* Allocate memory for the expanded device tree */
1038 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1039 	if (!mem) {
1040 		DBG("Couldn't allocate memory with lmb_alloc()!\n");
1041 		panic("Couldn't allocate memory with lmb_alloc()!\n");
1042 	}
1043 	mem = (unsigned long) __va(mem);
1044 
1045 	((u32 *)mem)[size / 4] = 0xdeadbeef;
1046 
1047 	DBG("  unflattening %lx...\n", mem);
1048 
1049 	/* Second pass, do actual unflattening */
1050 	start = ((unsigned long)initial_boot_params) +
1051 		initial_boot_params->off_dt_struct;
1052 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1053 	if (*((u32 *)start) != OF_DT_END)
1054 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1055 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1056 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1057 		       ((u32 *)mem)[size / 4] );
1058 	*allnextp = NULL;
1059 
1060 	/* Get pointer to OF "/chosen" node for use everywhere */
1061 	of_chosen = of_find_node_by_path("/chosen");
1062 	if (of_chosen == NULL)
1063 		of_chosen = of_find_node_by_path("/chosen@0");
1064 
1065 	/* Retreive command line */
1066 	if (of_chosen != NULL) {
1067 		p = (char *)get_property(of_chosen, "bootargs", &l);
1068 		if (p != NULL && l > 0)
1069 			strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1070 	}
1071 #ifdef CONFIG_CMDLINE
1072 	if (l == 0 || (l == 1 && (*p) == 0))
1073 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1074 #endif /* CONFIG_CMDLINE */
1075 
1076 	DBG("Command line is: %s\n", cmd_line);
1077 
1078 	DBG(" <- unflatten_device_tree()\n");
1079 }
1080 
1081 
1082 static int __init early_init_dt_scan_cpus(unsigned long node,
1083 					  const char *uname, int depth, void *data)
1084 {
1085 	u32 *prop;
1086 	unsigned long size;
1087 	char *type = of_get_flat_dt_prop(node, "device_type", &size);
1088 
1089 	/* We are scanning "cpu" nodes only */
1090 	if (type == NULL || strcmp(type, "cpu") != 0)
1091 		return 0;
1092 
1093 	boot_cpuid = 0;
1094 	boot_cpuid_phys = 0;
1095 	if (initial_boot_params && initial_boot_params->version >= 2) {
1096 		/* version 2 of the kexec param format adds the phys cpuid
1097 		 * of booted proc.
1098 		 */
1099 		boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1100 	} else {
1101 		/* Check if it's the boot-cpu, set it's hw index now */
1102 		if (of_get_flat_dt_prop(node,
1103 					"linux,boot-cpu", NULL) != NULL) {
1104 			prop = of_get_flat_dt_prop(node, "reg", NULL);
1105 			if (prop != NULL)
1106 				boot_cpuid_phys = *prop;
1107 		}
1108 	}
1109 	set_hard_smp_processor_id(0, boot_cpuid_phys);
1110 
1111 #ifdef CONFIG_ALTIVEC
1112 	/* Check if we have a VMX and eventually update CPU features */
1113 	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1114 	if (prop && (*prop) > 0) {
1115 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1116 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1117 	}
1118 
1119 	/* Same goes for Apple's "altivec" property */
1120 	prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1121 	if (prop) {
1122 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1123 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1124 	}
1125 #endif /* CONFIG_ALTIVEC */
1126 
1127 #ifdef CONFIG_PPC_PSERIES
1128 	/*
1129 	 * Check for an SMT capable CPU and set the CPU feature. We do
1130 	 * this by looking at the size of the ibm,ppc-interrupt-server#s
1131 	 * property
1132 	 */
1133 	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1134 				       &size);
1135 	cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1136 	if (prop && ((size / sizeof(u32)) > 1))
1137 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1138 #endif
1139 
1140 	return 0;
1141 }
1142 
1143 static int __init early_init_dt_scan_chosen(unsigned long node,
1144 					    const char *uname, int depth, void *data)
1145 {
1146 	u32 *prop;
1147 	unsigned long *lprop;
1148 
1149 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1150 
1151 	if (depth != 1 ||
1152 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1153 		return 0;
1154 
1155 	/* get platform type */
1156 	prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
1157 	if (prop == NULL)
1158 		return 0;
1159 #ifdef CONFIG_PPC_MULTIPLATFORM
1160 	_machine = *prop;
1161 #endif
1162 
1163 #ifdef CONFIG_PPC64
1164 	/* check if iommu is forced on or off */
1165 	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1166 		iommu_is_off = 1;
1167 	if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1168 		iommu_force_on = 1;
1169 #endif
1170 
1171  	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1172  	if (lprop)
1173  		memory_limit = *lprop;
1174 
1175 #ifdef CONFIG_PPC64
1176  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1177  	if (lprop)
1178  		tce_alloc_start = *lprop;
1179  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1180  	if (lprop)
1181  		tce_alloc_end = *lprop;
1182 #endif
1183 
1184 #ifdef CONFIG_PPC_RTAS
1185 	/* To help early debugging via the front panel, we retreive a minimal
1186 	 * set of RTAS infos now if available
1187 	 */
1188 	{
1189 		u64 *basep, *entryp;
1190 
1191 		basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1192 		entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1193 		prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
1194 		if (basep && entryp && prop) {
1195 			rtas.base = *basep;
1196 			rtas.entry = *entryp;
1197 			rtas.size = *prop;
1198 		}
1199 	}
1200 #endif /* CONFIG_PPC_RTAS */
1201 
1202 #ifdef CONFIG_KEXEC
1203        lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
1204        if (lprop)
1205                crashk_res.start = *lprop;
1206 
1207        lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
1208        if (lprop)
1209                crashk_res.end = crashk_res.start + *lprop - 1;
1210 #endif
1211 
1212 	/* break now */
1213 	return 1;
1214 }
1215 
1216 static int __init early_init_dt_scan_root(unsigned long node,
1217 					  const char *uname, int depth, void *data)
1218 {
1219 	u32 *prop;
1220 
1221 	if (depth != 0)
1222 		return 0;
1223 
1224 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1225 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1226 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1227 
1228 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1229 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1230 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1231 
1232 	/* break now */
1233 	return 1;
1234 }
1235 
1236 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1237 {
1238 	cell_t *p = *cellp;
1239 	unsigned long r;
1240 
1241 	/* Ignore more than 2 cells */
1242 	while (s > sizeof(unsigned long) / 4) {
1243 		p++;
1244 		s--;
1245 	}
1246 	r = *p++;
1247 #ifdef CONFIG_PPC64
1248 	if (s > 1) {
1249 		r <<= 32;
1250 		r |= *(p++);
1251 		s--;
1252 	}
1253 #endif
1254 
1255 	*cellp = p;
1256 	return r;
1257 }
1258 
1259 
1260 static int __init early_init_dt_scan_memory(unsigned long node,
1261 					    const char *uname, int depth, void *data)
1262 {
1263 	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1264 	cell_t *reg, *endp;
1265 	unsigned long l;
1266 
1267 	/* We are scanning "memory" nodes only */
1268 	if (type == NULL) {
1269 		/*
1270 		 * The longtrail doesn't have a device_type on the
1271 		 * /memory node, so look for the node called /memory@0.
1272 		 */
1273 		if (depth != 1 || strcmp(uname, "memory@0") != 0)
1274 			return 0;
1275 	} else if (strcmp(type, "memory") != 0)
1276 		return 0;
1277 
1278 	reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1279 	if (reg == NULL)
1280 		return 0;
1281 
1282 	endp = reg + (l / sizeof(cell_t));
1283 
1284 	DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1285 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1286 
1287 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1288 		unsigned long base, size;
1289 
1290 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1291 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1292 
1293 		if (size == 0)
1294 			continue;
1295 		DBG(" - %lx ,  %lx\n", base, size);
1296 #ifdef CONFIG_PPC64
1297 		if (iommu_is_off) {
1298 			if (base >= 0x80000000ul)
1299 				continue;
1300 			if ((base + size) > 0x80000000ul)
1301 				size = 0x80000000ul - base;
1302 		}
1303 #endif
1304 		lmb_add(base, size);
1305 	}
1306 	return 0;
1307 }
1308 
1309 static void __init early_reserve_mem(void)
1310 {
1311 	unsigned long base, size;
1312 	unsigned long *reserve_map;
1313 
1314 	reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1315 					initial_boot_params->off_mem_rsvmap);
1316 	while (1) {
1317 		base = *(reserve_map++);
1318 		size = *(reserve_map++);
1319 		if (size == 0)
1320 			break;
1321 		DBG("reserving: %lx -> %lx\n", base, size);
1322 		lmb_reserve(base, size);
1323 	}
1324 
1325 #if 0
1326 	DBG("memory reserved, lmbs :\n");
1327       	lmb_dump_all();
1328 #endif
1329 }
1330 
1331 void __init early_init_devtree(void *params)
1332 {
1333 	DBG(" -> early_init_devtree()\n");
1334 
1335 	/* Setup flat device-tree pointer */
1336 	initial_boot_params = params;
1337 
1338 	/* Retrieve various informations from the /chosen node of the
1339 	 * device-tree, including the platform type, initrd location and
1340 	 * size, TCE reserve, and more ...
1341 	 */
1342 	of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1343 
1344 	/* Scan memory nodes and rebuild LMBs */
1345 	lmb_init();
1346 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
1347 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1348 	lmb_enforce_memory_limit(memory_limit);
1349 	lmb_analyze();
1350 
1351 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1352 
1353 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1354 	lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1355 #ifdef CONFIG_CRASH_DUMP
1356 	lmb_reserve(0, KDUMP_RESERVE_LIMIT);
1357 #endif
1358 	early_reserve_mem();
1359 
1360 	DBG("Scanning CPUs ...\n");
1361 
1362 	/* Retreive CPU related informations from the flat tree
1363 	 * (altivec support, boot CPU ID, ...)
1364 	 */
1365 	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1366 
1367 	DBG(" <- early_init_devtree()\n");
1368 }
1369 
1370 #undef printk
1371 
1372 int
1373 prom_n_addr_cells(struct device_node* np)
1374 {
1375 	int* ip;
1376 	do {
1377 		if (np->parent)
1378 			np = np->parent;
1379 		ip = (int *) get_property(np, "#address-cells", NULL);
1380 		if (ip != NULL)
1381 			return *ip;
1382 	} while (np->parent);
1383 	/* No #address-cells property for the root node, default to 1 */
1384 	return 1;
1385 }
1386 EXPORT_SYMBOL(prom_n_addr_cells);
1387 
1388 int
1389 prom_n_size_cells(struct device_node* np)
1390 {
1391 	int* ip;
1392 	do {
1393 		if (np->parent)
1394 			np = np->parent;
1395 		ip = (int *) get_property(np, "#size-cells", NULL);
1396 		if (ip != NULL)
1397 			return *ip;
1398 	} while (np->parent);
1399 	/* No #size-cells property for the root node, default to 1 */
1400 	return 1;
1401 }
1402 EXPORT_SYMBOL(prom_n_size_cells);
1403 
1404 /**
1405  * Work out the sense (active-low level / active-high edge)
1406  * of each interrupt from the device tree.
1407  */
1408 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1409 {
1410 	struct device_node *np;
1411 	int i, j;
1412 
1413 	/* default to level-triggered */
1414 	memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1415 
1416 	for (np = allnodes; np != 0; np = np->allnext) {
1417 		for (j = 0; j < np->n_intrs; j++) {
1418 			i = np->intrs[j].line;
1419 			if (i >= off && i < max)
1420 				senses[i-off] = np->intrs[j].sense;
1421 		}
1422 	}
1423 }
1424 
1425 /**
1426  * Construct and return a list of the device_nodes with a given name.
1427  */
1428 struct device_node *find_devices(const char *name)
1429 {
1430 	struct device_node *head, **prevp, *np;
1431 
1432 	prevp = &head;
1433 	for (np = allnodes; np != 0; np = np->allnext) {
1434 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1435 			*prevp = np;
1436 			prevp = &np->next;
1437 		}
1438 	}
1439 	*prevp = NULL;
1440 	return head;
1441 }
1442 EXPORT_SYMBOL(find_devices);
1443 
1444 /**
1445  * Construct and return a list of the device_nodes with a given type.
1446  */
1447 struct device_node *find_type_devices(const char *type)
1448 {
1449 	struct device_node *head, **prevp, *np;
1450 
1451 	prevp = &head;
1452 	for (np = allnodes; np != 0; np = np->allnext) {
1453 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1454 			*prevp = np;
1455 			prevp = &np->next;
1456 		}
1457 	}
1458 	*prevp = NULL;
1459 	return head;
1460 }
1461 EXPORT_SYMBOL(find_type_devices);
1462 
1463 /**
1464  * Returns all nodes linked together
1465  */
1466 struct device_node *find_all_nodes(void)
1467 {
1468 	struct device_node *head, **prevp, *np;
1469 
1470 	prevp = &head;
1471 	for (np = allnodes; np != 0; np = np->allnext) {
1472 		*prevp = np;
1473 		prevp = &np->next;
1474 	}
1475 	*prevp = NULL;
1476 	return head;
1477 }
1478 EXPORT_SYMBOL(find_all_nodes);
1479 
1480 /** Checks if the given "compat" string matches one of the strings in
1481  * the device's "compatible" property
1482  */
1483 int device_is_compatible(struct device_node *device, const char *compat)
1484 {
1485 	const char* cp;
1486 	int cplen, l;
1487 
1488 	cp = (char *) get_property(device, "compatible", &cplen);
1489 	if (cp == NULL)
1490 		return 0;
1491 	while (cplen > 0) {
1492 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1493 			return 1;
1494 		l = strlen(cp) + 1;
1495 		cp += l;
1496 		cplen -= l;
1497 	}
1498 
1499 	return 0;
1500 }
1501 EXPORT_SYMBOL(device_is_compatible);
1502 
1503 
1504 /**
1505  * Indicates whether the root node has a given value in its
1506  * compatible property.
1507  */
1508 int machine_is_compatible(const char *compat)
1509 {
1510 	struct device_node *root;
1511 	int rc = 0;
1512 
1513 	root = of_find_node_by_path("/");
1514 	if (root) {
1515 		rc = device_is_compatible(root, compat);
1516 		of_node_put(root);
1517 	}
1518 	return rc;
1519 }
1520 EXPORT_SYMBOL(machine_is_compatible);
1521 
1522 /**
1523  * Construct and return a list of the device_nodes with a given type
1524  * and compatible property.
1525  */
1526 struct device_node *find_compatible_devices(const char *type,
1527 					    const char *compat)
1528 {
1529 	struct device_node *head, **prevp, *np;
1530 
1531 	prevp = &head;
1532 	for (np = allnodes; np != 0; np = np->allnext) {
1533 		if (type != NULL
1534 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1535 			continue;
1536 		if (device_is_compatible(np, compat)) {
1537 			*prevp = np;
1538 			prevp = &np->next;
1539 		}
1540 	}
1541 	*prevp = NULL;
1542 	return head;
1543 }
1544 EXPORT_SYMBOL(find_compatible_devices);
1545 
1546 /**
1547  * Find the device_node with a given full_name.
1548  */
1549 struct device_node *find_path_device(const char *path)
1550 {
1551 	struct device_node *np;
1552 
1553 	for (np = allnodes; np != 0; np = np->allnext)
1554 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1555 			return np;
1556 	return NULL;
1557 }
1558 EXPORT_SYMBOL(find_path_device);
1559 
1560 /*******
1561  *
1562  * New implementation of the OF "find" APIs, return a refcounted
1563  * object, call of_node_put() when done.  The device tree and list
1564  * are protected by a rw_lock.
1565  *
1566  * Note that property management will need some locking as well,
1567  * this isn't dealt with yet.
1568  *
1569  *******/
1570 
1571 /**
1572  *	of_find_node_by_name - Find a node by its "name" property
1573  *	@from:	The node to start searching from or NULL, the node
1574  *		you pass will not be searched, only the next one
1575  *		will; typically, you pass what the previous call
1576  *		returned. of_node_put() will be called on it
1577  *	@name:	The name string to match against
1578  *
1579  *	Returns a node pointer with refcount incremented, use
1580  *	of_node_put() on it when done.
1581  */
1582 struct device_node *of_find_node_by_name(struct device_node *from,
1583 	const char *name)
1584 {
1585 	struct device_node *np;
1586 
1587 	read_lock(&devtree_lock);
1588 	np = from ? from->allnext : allnodes;
1589 	for (; np != 0; np = np->allnext)
1590 		if (np->name != 0 && strcasecmp(np->name, name) == 0
1591 		    && of_node_get(np))
1592 			break;
1593 	if (from)
1594 		of_node_put(from);
1595 	read_unlock(&devtree_lock);
1596 	return np;
1597 }
1598 EXPORT_SYMBOL(of_find_node_by_name);
1599 
1600 /**
1601  *	of_find_node_by_type - Find a node by its "device_type" property
1602  *	@from:	The node to start searching from or NULL, the node
1603  *		you pass will not be searched, only the next one
1604  *		will; typically, you pass what the previous call
1605  *		returned. of_node_put() will be called on it
1606  *	@name:	The type string to match against
1607  *
1608  *	Returns a node pointer with refcount incremented, use
1609  *	of_node_put() on it when done.
1610  */
1611 struct device_node *of_find_node_by_type(struct device_node *from,
1612 	const char *type)
1613 {
1614 	struct device_node *np;
1615 
1616 	read_lock(&devtree_lock);
1617 	np = from ? from->allnext : allnodes;
1618 	for (; np != 0; np = np->allnext)
1619 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1620 		    && of_node_get(np))
1621 			break;
1622 	if (from)
1623 		of_node_put(from);
1624 	read_unlock(&devtree_lock);
1625 	return np;
1626 }
1627 EXPORT_SYMBOL(of_find_node_by_type);
1628 
1629 /**
1630  *	of_find_compatible_node - Find a node based on type and one of the
1631  *                                tokens in its "compatible" property
1632  *	@from:		The node to start searching from or NULL, the node
1633  *			you pass will not be searched, only the next one
1634  *			will; typically, you pass what the previous call
1635  *			returned. of_node_put() will be called on it
1636  *	@type:		The type string to match "device_type" or NULL to ignore
1637  *	@compatible:	The string to match to one of the tokens in the device
1638  *			"compatible" list.
1639  *
1640  *	Returns a node pointer with refcount incremented, use
1641  *	of_node_put() on it when done.
1642  */
1643 struct device_node *of_find_compatible_node(struct device_node *from,
1644 	const char *type, const char *compatible)
1645 {
1646 	struct device_node *np;
1647 
1648 	read_lock(&devtree_lock);
1649 	np = from ? from->allnext : allnodes;
1650 	for (; np != 0; np = np->allnext) {
1651 		if (type != NULL
1652 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1653 			continue;
1654 		if (device_is_compatible(np, compatible) && of_node_get(np))
1655 			break;
1656 	}
1657 	if (from)
1658 		of_node_put(from);
1659 	read_unlock(&devtree_lock);
1660 	return np;
1661 }
1662 EXPORT_SYMBOL(of_find_compatible_node);
1663 
1664 /**
1665  *	of_find_node_by_path - Find a node matching a full OF path
1666  *	@path:	The full path to match
1667  *
1668  *	Returns a node pointer with refcount incremented, use
1669  *	of_node_put() on it when done.
1670  */
1671 struct device_node *of_find_node_by_path(const char *path)
1672 {
1673 	struct device_node *np = allnodes;
1674 
1675 	read_lock(&devtree_lock);
1676 	for (; np != 0; np = np->allnext) {
1677 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1678 		    && of_node_get(np))
1679 			break;
1680 	}
1681 	read_unlock(&devtree_lock);
1682 	return np;
1683 }
1684 EXPORT_SYMBOL(of_find_node_by_path);
1685 
1686 /**
1687  *	of_find_node_by_phandle - Find a node given a phandle
1688  *	@handle:	phandle of the node to find
1689  *
1690  *	Returns a node pointer with refcount incremented, use
1691  *	of_node_put() on it when done.
1692  */
1693 struct device_node *of_find_node_by_phandle(phandle handle)
1694 {
1695 	struct device_node *np;
1696 
1697 	read_lock(&devtree_lock);
1698 	for (np = allnodes; np != 0; np = np->allnext)
1699 		if (np->linux_phandle == handle)
1700 			break;
1701 	if (np)
1702 		of_node_get(np);
1703 	read_unlock(&devtree_lock);
1704 	return np;
1705 }
1706 EXPORT_SYMBOL(of_find_node_by_phandle);
1707 
1708 /**
1709  *	of_find_all_nodes - Get next node in global list
1710  *	@prev:	Previous node or NULL to start iteration
1711  *		of_node_put() will be called on it
1712  *
1713  *	Returns a node pointer with refcount incremented, use
1714  *	of_node_put() on it when done.
1715  */
1716 struct device_node *of_find_all_nodes(struct device_node *prev)
1717 {
1718 	struct device_node *np;
1719 
1720 	read_lock(&devtree_lock);
1721 	np = prev ? prev->allnext : allnodes;
1722 	for (; np != 0; np = np->allnext)
1723 		if (of_node_get(np))
1724 			break;
1725 	if (prev)
1726 		of_node_put(prev);
1727 	read_unlock(&devtree_lock);
1728 	return np;
1729 }
1730 EXPORT_SYMBOL(of_find_all_nodes);
1731 
1732 /**
1733  *	of_get_parent - Get a node's parent if any
1734  *	@node:	Node to get parent
1735  *
1736  *	Returns a node pointer with refcount incremented, use
1737  *	of_node_put() on it when done.
1738  */
1739 struct device_node *of_get_parent(const struct device_node *node)
1740 {
1741 	struct device_node *np;
1742 
1743 	if (!node)
1744 		return NULL;
1745 
1746 	read_lock(&devtree_lock);
1747 	np = of_node_get(node->parent);
1748 	read_unlock(&devtree_lock);
1749 	return np;
1750 }
1751 EXPORT_SYMBOL(of_get_parent);
1752 
1753 /**
1754  *	of_get_next_child - Iterate a node childs
1755  *	@node:	parent node
1756  *	@prev:	previous child of the parent node, or NULL to get first
1757  *
1758  *	Returns a node pointer with refcount incremented, use
1759  *	of_node_put() on it when done.
1760  */
1761 struct device_node *of_get_next_child(const struct device_node *node,
1762 	struct device_node *prev)
1763 {
1764 	struct device_node *next;
1765 
1766 	read_lock(&devtree_lock);
1767 	next = prev ? prev->sibling : node->child;
1768 	for (; next != 0; next = next->sibling)
1769 		if (of_node_get(next))
1770 			break;
1771 	if (prev)
1772 		of_node_put(prev);
1773 	read_unlock(&devtree_lock);
1774 	return next;
1775 }
1776 EXPORT_SYMBOL(of_get_next_child);
1777 
1778 /**
1779  *	of_node_get - Increment refcount of a node
1780  *	@node:	Node to inc refcount, NULL is supported to
1781  *		simplify writing of callers
1782  *
1783  *	Returns node.
1784  */
1785 struct device_node *of_node_get(struct device_node *node)
1786 {
1787 	if (node)
1788 		kref_get(&node->kref);
1789 	return node;
1790 }
1791 EXPORT_SYMBOL(of_node_get);
1792 
1793 static inline struct device_node * kref_to_device_node(struct kref *kref)
1794 {
1795 	return container_of(kref, struct device_node, kref);
1796 }
1797 
1798 /**
1799  *	of_node_release - release a dynamically allocated node
1800  *	@kref:  kref element of the node to be released
1801  *
1802  *	In of_node_put() this function is passed to kref_put()
1803  *	as the destructor.
1804  */
1805 static void of_node_release(struct kref *kref)
1806 {
1807 	struct device_node *node = kref_to_device_node(kref);
1808 	struct property *prop = node->properties;
1809 
1810 	if (!OF_IS_DYNAMIC(node))
1811 		return;
1812 	while (prop) {
1813 		struct property *next = prop->next;
1814 		kfree(prop->name);
1815 		kfree(prop->value);
1816 		kfree(prop);
1817 		prop = next;
1818 	}
1819 	kfree(node->intrs);
1820 	kfree(node->addrs);
1821 	kfree(node->full_name);
1822 	kfree(node->data);
1823 	kfree(node);
1824 }
1825 
1826 /**
1827  *	of_node_put - Decrement refcount of a node
1828  *	@node:	Node to dec refcount, NULL is supported to
1829  *		simplify writing of callers
1830  *
1831  */
1832 void of_node_put(struct device_node *node)
1833 {
1834 	if (node)
1835 		kref_put(&node->kref, of_node_release);
1836 }
1837 EXPORT_SYMBOL(of_node_put);
1838 
1839 /*
1840  * Plug a device node into the tree and global list.
1841  */
1842 void of_attach_node(struct device_node *np)
1843 {
1844 	write_lock(&devtree_lock);
1845 	np->sibling = np->parent->child;
1846 	np->allnext = allnodes;
1847 	np->parent->child = np;
1848 	allnodes = np;
1849 	write_unlock(&devtree_lock);
1850 }
1851 
1852 /*
1853  * "Unplug" a node from the device tree.  The caller must hold
1854  * a reference to the node.  The memory associated with the node
1855  * is not freed until its refcount goes to zero.
1856  */
1857 void of_detach_node(const struct device_node *np)
1858 {
1859 	struct device_node *parent;
1860 
1861 	write_lock(&devtree_lock);
1862 
1863 	parent = np->parent;
1864 
1865 	if (allnodes == np)
1866 		allnodes = np->allnext;
1867 	else {
1868 		struct device_node *prev;
1869 		for (prev = allnodes;
1870 		     prev->allnext != np;
1871 		     prev = prev->allnext)
1872 			;
1873 		prev->allnext = np->allnext;
1874 	}
1875 
1876 	if (parent->child == np)
1877 		parent->child = np->sibling;
1878 	else {
1879 		struct device_node *prevsib;
1880 		for (prevsib = np->parent->child;
1881 		     prevsib->sibling != np;
1882 		     prevsib = prevsib->sibling)
1883 			;
1884 		prevsib->sibling = np->sibling;
1885 	}
1886 
1887 	write_unlock(&devtree_lock);
1888 }
1889 
1890 #ifdef CONFIG_PPC_PSERIES
1891 /*
1892  * Fix up the uninitialized fields in a new device node:
1893  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1894  *
1895  * A lot of boot-time code is duplicated here, because functions such
1896  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1897  * slab allocator.
1898  *
1899  * This should probably be split up into smaller chunks.
1900  */
1901 
1902 static int of_finish_dynamic_node(struct device_node *node,
1903 				  unsigned long *unused1, int unused2,
1904 				  int unused3, int unused4)
1905 {
1906 	struct device_node *parent = of_get_parent(node);
1907 	int err = 0;
1908 	phandle *ibm_phandle;
1909 
1910 	node->name = get_property(node, "name", NULL);
1911 	node->type = get_property(node, "device_type", NULL);
1912 
1913 	if (!parent) {
1914 		err = -ENODEV;
1915 		goto out;
1916 	}
1917 
1918 	/* We don't support that function on PowerMac, at least
1919 	 * not yet
1920 	 */
1921 	if (_machine == PLATFORM_POWERMAC)
1922 		return -ENODEV;
1923 
1924 	/* fix up new node's linux_phandle field */
1925 	if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1926 		node->linux_phandle = *ibm_phandle;
1927 
1928 out:
1929 	of_node_put(parent);
1930 	return err;
1931 }
1932 
1933 static int prom_reconfig_notifier(struct notifier_block *nb,
1934 				  unsigned long action, void *node)
1935 {
1936 	int err;
1937 
1938 	switch (action) {
1939 	case PSERIES_RECONFIG_ADD:
1940 		err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1941 		if (err < 0) {
1942 			printk(KERN_ERR "finish_node returned %d\n", err);
1943 			err = NOTIFY_BAD;
1944 		}
1945 		break;
1946 	default:
1947 		err = NOTIFY_DONE;
1948 		break;
1949 	}
1950 	return err;
1951 }
1952 
1953 static struct notifier_block prom_reconfig_nb = {
1954 	.notifier_call = prom_reconfig_notifier,
1955 	.priority = 10, /* This one needs to run first */
1956 };
1957 
1958 static int __init prom_reconfig_setup(void)
1959 {
1960 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1961 }
1962 __initcall(prom_reconfig_setup);
1963 #endif
1964 
1965 /*
1966  * Find a property with a given name for a given node
1967  * and return the value.
1968  */
1969 unsigned char *get_property(struct device_node *np, const char *name,
1970 			    int *lenp)
1971 {
1972 	struct property *pp;
1973 
1974 	for (pp = np->properties; pp != 0; pp = pp->next)
1975 		if (strcmp(pp->name, name) == 0) {
1976 			if (lenp != 0)
1977 				*lenp = pp->length;
1978 			return pp->value;
1979 		}
1980 	return NULL;
1981 }
1982 EXPORT_SYMBOL(get_property);
1983 
1984 /*
1985  * Add a property to a node
1986  */
1987 int prom_add_property(struct device_node* np, struct property* prop)
1988 {
1989 	struct property **next;
1990 
1991 	prop->next = NULL;
1992 	write_lock(&devtree_lock);
1993 	next = &np->properties;
1994 	while (*next) {
1995 		if (strcmp(prop->name, (*next)->name) == 0) {
1996 			/* duplicate ! don't insert it */
1997 			write_unlock(&devtree_lock);
1998 			return -1;
1999 		}
2000 		next = &(*next)->next;
2001 	}
2002 	*next = prop;
2003 	write_unlock(&devtree_lock);
2004 
2005 #ifdef CONFIG_PROC_DEVICETREE
2006 	/* try to add to proc as well if it was initialized */
2007 	if (np->pde)
2008 		proc_device_tree_add_prop(np->pde, prop);
2009 #endif /* CONFIG_PROC_DEVICETREE */
2010 
2011 	return 0;
2012 }
2013 
2014 /* I quickly hacked that one, check against spec ! */
2015 static inline unsigned long
2016 bus_space_to_resource_flags(unsigned int bus_space)
2017 {
2018 	u8 space = (bus_space >> 24) & 0xf;
2019 	if (space == 0)
2020 		space = 0x02;
2021 	if (space == 0x02)
2022 		return IORESOURCE_MEM;
2023 	else if (space == 0x01)
2024 		return IORESOURCE_IO;
2025 	else {
2026 		printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2027 		    	bus_space);
2028 		return 0;
2029 	}
2030 }
2031 
2032 #ifdef CONFIG_PCI
2033 static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2034 						 struct address_range *range)
2035 {
2036 	unsigned long mask;
2037 	int i;
2038 
2039 	/* Check this one */
2040 	mask = bus_space_to_resource_flags(range->space);
2041 	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2042 		if ((pdev->resource[i].flags & mask) == mask &&
2043 			pdev->resource[i].start <= range->address &&
2044 			pdev->resource[i].end > range->address) {
2045 				if ((range->address + range->size - 1) > pdev->resource[i].end) {
2046 					/* Add better message */
2047 					printk(KERN_WARNING "PCI/OF resource overlap !\n");
2048 					return NULL;
2049 				}
2050 				break;
2051 			}
2052 	}
2053 	if (i == DEVICE_COUNT_RESOURCE)
2054 		return NULL;
2055 	return &pdev->resource[i];
2056 }
2057 
2058 /*
2059  * Request an OF device resource. Currently handles child of PCI devices,
2060  * or other nodes attached to the root node. Ultimately, put some
2061  * link to resources in the OF node.
2062  */
2063 struct resource *request_OF_resource(struct device_node* node, int index,
2064 				     const char* name_postfix)
2065 {
2066 	struct pci_dev* pcidev;
2067 	u8 pci_bus, pci_devfn;
2068 	unsigned long iomask;
2069 	struct device_node* nd;
2070 	struct resource* parent;
2071 	struct resource *res = NULL;
2072 	int nlen, plen;
2073 
2074 	if (index >= node->n_addrs)
2075 		goto fail;
2076 
2077 	/* Sanity check on bus space */
2078 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2079 	if (iomask & IORESOURCE_MEM)
2080 		parent = &iomem_resource;
2081 	else if (iomask & IORESOURCE_IO)
2082 		parent = &ioport_resource;
2083 	else
2084 		goto fail;
2085 
2086 	/* Find a PCI parent if any */
2087 	nd = node;
2088 	pcidev = NULL;
2089 	while (nd) {
2090 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2091 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2092 		if (pcidev) break;
2093 		nd = nd->parent;
2094 	}
2095 	if (pcidev)
2096 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2097 	if (!parent) {
2098 		printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2099 			node->name);
2100 		goto fail;
2101 	}
2102 
2103 	res = __request_region(parent, node->addrs[index].address,
2104 			       node->addrs[index].size, NULL);
2105 	if (!res)
2106 		goto fail;
2107 	nlen = strlen(node->name);
2108 	plen = name_postfix ? strlen(name_postfix) : 0;
2109 	res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2110 	if (res->name) {
2111 		strcpy((char *)res->name, node->name);
2112 		if (plen)
2113 			strcpy((char *)res->name+nlen, name_postfix);
2114 	}
2115 	return res;
2116 fail:
2117 	return NULL;
2118 }
2119 EXPORT_SYMBOL(request_OF_resource);
2120 
2121 int release_OF_resource(struct device_node *node, int index)
2122 {
2123 	struct pci_dev* pcidev;
2124 	u8 pci_bus, pci_devfn;
2125 	unsigned long iomask, start, end;
2126 	struct device_node* nd;
2127 	struct resource* parent;
2128 	struct resource *res = NULL;
2129 
2130 	if (index >= node->n_addrs)
2131 		return -EINVAL;
2132 
2133 	/* Sanity check on bus space */
2134 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2135 	if (iomask & IORESOURCE_MEM)
2136 		parent = &iomem_resource;
2137 	else if (iomask & IORESOURCE_IO)
2138 		parent = &ioport_resource;
2139 	else
2140 		return -EINVAL;
2141 
2142 	/* Find a PCI parent if any */
2143 	nd = node;
2144 	pcidev = NULL;
2145 	while(nd) {
2146 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2147 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2148 		if (pcidev) break;
2149 		nd = nd->parent;
2150 	}
2151 	if (pcidev)
2152 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2153 	if (!parent) {
2154 		printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2155 			node->name);
2156 		return -ENODEV;
2157 	}
2158 
2159 	/* Find us in the parent and its childs */
2160 	res = parent->child;
2161 	start = node->addrs[index].address;
2162 	end = start + node->addrs[index].size - 1;
2163 	while (res) {
2164 		if (res->start == start && res->end == end &&
2165 		    (res->flags & IORESOURCE_BUSY))
2166 		    	break;
2167 		if (res->start <= start && res->end >= end)
2168 			res = res->child;
2169 		else
2170 			res = res->sibling;
2171 	}
2172 	if (!res)
2173 		return -ENODEV;
2174 
2175 	if (res->name) {
2176 		kfree(res->name);
2177 		res->name = NULL;
2178 	}
2179 	release_resource(res);
2180 	kfree(res);
2181 
2182 	return 0;
2183 }
2184 EXPORT_SYMBOL(release_OF_resource);
2185 #endif /* CONFIG_PCI */
2186