xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision 0cc4746c)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/lmb.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/kdump.h>
41 #include <asm/smp.h>
42 #include <asm/system.h>
43 #include <asm/mmu.h>
44 #include <asm/pgtable.h>
45 #include <asm/pci.h>
46 #include <asm/iommu.h>
47 #include <asm/btext.h>
48 #include <asm/sections.h>
49 #include <asm/machdep.h>
50 #include <asm/pSeries_reconfig.h>
51 #include <asm/pci-bridge.h>
52 
53 #ifdef DEBUG
54 #define DBG(fmt...) printk(KERN_ERR fmt)
55 #else
56 #define DBG(fmt...)
57 #endif
58 
59 struct pci_reg_property {
60 	struct pci_address addr;
61 	u32 size_hi;
62 	u32 size_lo;
63 };
64 
65 struct isa_reg_property {
66 	u32 space;
67 	u32 address;
68 	u32 size;
69 };
70 
71 
72 typedef int interpret_func(struct device_node *, unsigned long *,
73 			   int, int, int);
74 
75 static int __initdata dt_root_addr_cells;
76 static int __initdata dt_root_size_cells;
77 
78 #ifdef CONFIG_PPC64
79 static int __initdata iommu_is_off;
80 int __initdata iommu_force_on;
81 unsigned long tce_alloc_start, tce_alloc_end;
82 #endif
83 
84 typedef u32 cell_t;
85 
86 #if 0
87 static struct boot_param_header *initial_boot_params __initdata;
88 #else
89 struct boot_param_header *initial_boot_params;
90 #endif
91 
92 static struct device_node *allnodes = NULL;
93 
94 /* use when traversing tree through the allnext, child, sibling,
95  * or parent members of struct device_node.
96  */
97 static DEFINE_RWLOCK(devtree_lock);
98 
99 /* export that to outside world */
100 struct device_node *of_chosen;
101 
102 struct device_node *dflt_interrupt_controller;
103 int num_interrupt_controllers;
104 
105 /*
106  * Wrapper for allocating memory for various data that needs to be
107  * attached to device nodes as they are processed at boot or when
108  * added to the device tree later (e.g. DLPAR).  At boot there is
109  * already a region reserved so we just increment *mem_start by size;
110  * otherwise we call kmalloc.
111  */
112 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
113 {
114 	unsigned long tmp;
115 
116 	if (!mem_start)
117 		return kmalloc(size, GFP_KERNEL);
118 
119 	tmp = *mem_start;
120 	*mem_start += size;
121 	return (void *)tmp;
122 }
123 
124 /*
125  * Find the device_node with a given phandle.
126  */
127 static struct device_node * find_phandle(phandle ph)
128 {
129 	struct device_node *np;
130 
131 	for (np = allnodes; np != 0; np = np->allnext)
132 		if (np->linux_phandle == ph)
133 			return np;
134 	return NULL;
135 }
136 
137 /*
138  * Find the interrupt parent of a node.
139  */
140 static struct device_node * __devinit intr_parent(struct device_node *p)
141 {
142 	phandle *parp;
143 
144 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
145 	if (parp == NULL)
146 		return p->parent;
147 	p = find_phandle(*parp);
148 	if (p != NULL)
149 		return p;
150 	/*
151 	 * On a powermac booted with BootX, we don't get to know the
152 	 * phandles for any nodes, so find_phandle will return NULL.
153 	 * Fortunately these machines only have one interrupt controller
154 	 * so there isn't in fact any ambiguity.  -- paulus
155 	 */
156 	if (num_interrupt_controllers == 1)
157 		p = dflt_interrupt_controller;
158 	return p;
159 }
160 
161 /*
162  * Find out the size of each entry of the interrupts property
163  * for a node.
164  */
165 int __devinit prom_n_intr_cells(struct device_node *np)
166 {
167 	struct device_node *p;
168 	unsigned int *icp;
169 
170 	for (p = np; (p = intr_parent(p)) != NULL; ) {
171 		icp = (unsigned int *)
172 			get_property(p, "#interrupt-cells", NULL);
173 		if (icp != NULL)
174 			return *icp;
175 		if (get_property(p, "interrupt-controller", NULL) != NULL
176 		    || get_property(p, "interrupt-map", NULL) != NULL) {
177 			printk("oops, node %s doesn't have #interrupt-cells\n",
178 			       p->full_name);
179 			return 1;
180 		}
181 	}
182 #ifdef DEBUG_IRQ
183 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
184 #endif
185 	return 1;
186 }
187 
188 /*
189  * Map an interrupt from a device up to the platform interrupt
190  * descriptor.
191  */
192 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
193 				   struct device_node *np, unsigned int *ints,
194 				   int nintrc)
195 {
196 	struct device_node *p, *ipar;
197 	unsigned int *imap, *imask, *ip;
198 	int i, imaplen, match;
199 	int newintrc = 0, newaddrc = 0;
200 	unsigned int *reg;
201 	int naddrc;
202 
203 	reg = (unsigned int *) get_property(np, "reg", NULL);
204 	naddrc = prom_n_addr_cells(np);
205 	p = intr_parent(np);
206 	while (p != NULL) {
207 		if (get_property(p, "interrupt-controller", NULL) != NULL)
208 			/* this node is an interrupt controller, stop here */
209 			break;
210 		imap = (unsigned int *)
211 			get_property(p, "interrupt-map", &imaplen);
212 		if (imap == NULL) {
213 			p = intr_parent(p);
214 			continue;
215 		}
216 		imask = (unsigned int *)
217 			get_property(p, "interrupt-map-mask", NULL);
218 		if (imask == NULL) {
219 			printk("oops, %s has interrupt-map but no mask\n",
220 			       p->full_name);
221 			return 0;
222 		}
223 		imaplen /= sizeof(unsigned int);
224 		match = 0;
225 		ipar = NULL;
226 		while (imaplen > 0 && !match) {
227 			/* check the child-interrupt field */
228 			match = 1;
229 			for (i = 0; i < naddrc && match; ++i)
230 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
231 			for (; i < naddrc + nintrc && match; ++i)
232 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
233 			imap += naddrc + nintrc;
234 			imaplen -= naddrc + nintrc;
235 			/* grab the interrupt parent */
236 			ipar = find_phandle((phandle) *imap++);
237 			--imaplen;
238 			if (ipar == NULL && num_interrupt_controllers == 1)
239 				/* cope with BootX not giving us phandles */
240 				ipar = dflt_interrupt_controller;
241 			if (ipar == NULL) {
242 				printk("oops, no int parent %x in map of %s\n",
243 				       imap[-1], p->full_name);
244 				return 0;
245 			}
246 			/* find the parent's # addr and intr cells */
247 			ip = (unsigned int *)
248 				get_property(ipar, "#interrupt-cells", NULL);
249 			if (ip == NULL) {
250 				printk("oops, no #interrupt-cells on %s\n",
251 				       ipar->full_name);
252 				return 0;
253 			}
254 			newintrc = *ip;
255 			ip = (unsigned int *)
256 				get_property(ipar, "#address-cells", NULL);
257 			newaddrc = (ip == NULL)? 0: *ip;
258 			imap += newaddrc + newintrc;
259 			imaplen -= newaddrc + newintrc;
260 		}
261 		if (imaplen < 0) {
262 			printk("oops, error decoding int-map on %s, len=%d\n",
263 			       p->full_name, imaplen);
264 			return 0;
265 		}
266 		if (!match) {
267 #ifdef DEBUG_IRQ
268 			printk("oops, no match in %s int-map for %s\n",
269 			       p->full_name, np->full_name);
270 #endif
271 			return 0;
272 		}
273 		p = ipar;
274 		naddrc = newaddrc;
275 		nintrc = newintrc;
276 		ints = imap - nintrc;
277 		reg = ints - naddrc;
278 	}
279 	if (p == NULL) {
280 #ifdef DEBUG_IRQ
281 		printk("hmmm, int tree for %s doesn't have ctrler\n",
282 		       np->full_name);
283 #endif
284 		return 0;
285 	}
286 	*irq = ints;
287 	*ictrler = p;
288 	return nintrc;
289 }
290 
291 static unsigned char map_isa_senses[4] = {
292 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
293 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
294 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
295 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE
296 };
297 
298 static unsigned char map_mpic_senses[4] = {
299 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE,
300 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
301 	/* 2 seems to be used for the 8259 cascade... */
302 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
303 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
304 };
305 
306 static int __devinit finish_node_interrupts(struct device_node *np,
307 					    unsigned long *mem_start,
308 					    int measure_only)
309 {
310 	unsigned int *ints;
311 	int intlen, intrcells, intrcount;
312 	int i, j, n, sense;
313 	unsigned int *irq, virq;
314 	struct device_node *ic;
315 
316 	if (num_interrupt_controllers == 0) {
317 		/*
318 		 * Old machines just have a list of interrupt numbers
319 		 * and no interrupt-controller nodes.
320 		 */
321 		ints = (unsigned int *) get_property(np, "AAPL,interrupts",
322 						     &intlen);
323 		/* XXX old interpret_pci_props looked in parent too */
324 		/* XXX old interpret_macio_props looked for interrupts
325 		   before AAPL,interrupts */
326 		if (ints == NULL)
327 			ints = (unsigned int *) get_property(np, "interrupts",
328 							     &intlen);
329 		if (ints == NULL)
330 			return 0;
331 
332 		np->n_intrs = intlen / sizeof(unsigned int);
333 		np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
334 				       mem_start);
335 		if (!np->intrs)
336 			return -ENOMEM;
337 		if (measure_only)
338 			return 0;
339 
340 		for (i = 0; i < np->n_intrs; ++i) {
341 			np->intrs[i].line = *ints++;
342 			np->intrs[i].sense = IRQ_SENSE_LEVEL
343 				| IRQ_POLARITY_NEGATIVE;
344 		}
345 		return 0;
346 	}
347 
348 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
349 	if (ints == NULL)
350 		return 0;
351 	intrcells = prom_n_intr_cells(np);
352 	intlen /= intrcells * sizeof(unsigned int);
353 
354 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
355 	if (!np->intrs)
356 		return -ENOMEM;
357 
358 	if (measure_only)
359 		return 0;
360 
361 	intrcount = 0;
362 	for (i = 0; i < intlen; ++i, ints += intrcells) {
363 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
364 		if (n <= 0)
365 			continue;
366 
367 		/* don't map IRQ numbers under a cascaded 8259 controller */
368 		if (ic && device_is_compatible(ic, "chrp,iic")) {
369 			np->intrs[intrcount].line = irq[0];
370 			sense = (n > 1)? (irq[1] & 3): 3;
371 			np->intrs[intrcount].sense = map_isa_senses[sense];
372 		} else {
373 			virq = virt_irq_create_mapping(irq[0]);
374 #ifdef CONFIG_PPC64
375 			if (virq == NO_IRQ) {
376 				printk(KERN_CRIT "Could not allocate interrupt"
377 				       " number for %s\n", np->full_name);
378 				continue;
379 			}
380 #endif
381 			np->intrs[intrcount].line = irq_offset_up(virq);
382 			sense = (n > 1)? (irq[1] & 3): 1;
383 			np->intrs[intrcount].sense = map_mpic_senses[sense];
384 		}
385 
386 #ifdef CONFIG_PPC64
387 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
388 		if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
389 			char *name = get_property(ic->parent, "name", NULL);
390 			if (name && !strcmp(name, "u3"))
391 				np->intrs[intrcount].line += 128;
392 			else if (!(name && !strcmp(name, "mac-io")))
393 				/* ignore other cascaded controllers, such as
394 				   the k2-sata-root */
395 				break;
396 		}
397 #endif
398 		if (n > 2) {
399 			printk("hmmm, got %d intr cells for %s:", n,
400 			       np->full_name);
401 			for (j = 0; j < n; ++j)
402 				printk(" %d", irq[j]);
403 			printk("\n");
404 		}
405 		++intrcount;
406 	}
407 	np->n_intrs = intrcount;
408 
409 	return 0;
410 }
411 
412 static int __devinit interpret_pci_props(struct device_node *np,
413 					 unsigned long *mem_start,
414 					 int naddrc, int nsizec,
415 					 int measure_only)
416 {
417 	struct address_range *adr;
418 	struct pci_reg_property *pci_addrs;
419 	int i, l, n_addrs;
420 
421 	pci_addrs = (struct pci_reg_property *)
422 		get_property(np, "assigned-addresses", &l);
423 	if (!pci_addrs)
424 		return 0;
425 
426 	n_addrs = l / sizeof(*pci_addrs);
427 
428 	adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
429 	if (!adr)
430 		return -ENOMEM;
431 
432  	if (measure_only)
433  		return 0;
434 
435  	np->addrs = adr;
436  	np->n_addrs = n_addrs;
437 
438  	for (i = 0; i < n_addrs; i++) {
439  		adr[i].space = pci_addrs[i].addr.a_hi;
440  		adr[i].address = pci_addrs[i].addr.a_lo |
441 			((u64)pci_addrs[i].addr.a_mid << 32);
442  		adr[i].size = pci_addrs[i].size_lo;
443 	}
444 
445 	return 0;
446 }
447 
448 static int __init interpret_dbdma_props(struct device_node *np,
449 					unsigned long *mem_start,
450 					int naddrc, int nsizec,
451 					int measure_only)
452 {
453 	struct reg_property32 *rp;
454 	struct address_range *adr;
455 	unsigned long base_address;
456 	int i, l;
457 	struct device_node *db;
458 
459 	base_address = 0;
460 	if (!measure_only) {
461 		for (db = np->parent; db != NULL; db = db->parent) {
462 			if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
463 				base_address = db->addrs[0].address;
464 				break;
465 			}
466 		}
467 	}
468 
469 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
470 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
471 		i = 0;
472 		adr = (struct address_range *) (*mem_start);
473 		while ((l -= sizeof(struct reg_property32)) >= 0) {
474 			if (!measure_only) {
475 				adr[i].space = 2;
476 				adr[i].address = rp[i].address + base_address;
477 				adr[i].size = rp[i].size;
478 			}
479 			++i;
480 		}
481 		np->addrs = adr;
482 		np->n_addrs = i;
483 		(*mem_start) += i * sizeof(struct address_range);
484 	}
485 
486 	return 0;
487 }
488 
489 static int __init interpret_macio_props(struct device_node *np,
490 					unsigned long *mem_start,
491 					int naddrc, int nsizec,
492 					int measure_only)
493 {
494 	struct reg_property32 *rp;
495 	struct address_range *adr;
496 	unsigned long base_address;
497 	int i, l;
498 	struct device_node *db;
499 
500 	base_address = 0;
501 	if (!measure_only) {
502 		for (db = np->parent; db != NULL; db = db->parent) {
503 			if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
504 				base_address = db->addrs[0].address;
505 				break;
506 			}
507 		}
508 	}
509 
510 	rp = (struct reg_property32 *) get_property(np, "reg", &l);
511 	if (rp != 0 && l >= sizeof(struct reg_property32)) {
512 		i = 0;
513 		adr = (struct address_range *) (*mem_start);
514 		while ((l -= sizeof(struct reg_property32)) >= 0) {
515 			if (!measure_only) {
516 				adr[i].space = 2;
517 				adr[i].address = rp[i].address + base_address;
518 				adr[i].size = rp[i].size;
519 			}
520 			++i;
521 		}
522 		np->addrs = adr;
523 		np->n_addrs = i;
524 		(*mem_start) += i * sizeof(struct address_range);
525 	}
526 
527 	return 0;
528 }
529 
530 static int __init interpret_isa_props(struct device_node *np,
531 				      unsigned long *mem_start,
532 				      int naddrc, int nsizec,
533 				      int measure_only)
534 {
535 	struct isa_reg_property *rp;
536 	struct address_range *adr;
537 	int i, l;
538 
539 	rp = (struct isa_reg_property *) get_property(np, "reg", &l);
540 	if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
541 		i = 0;
542 		adr = (struct address_range *) (*mem_start);
543 		while ((l -= sizeof(struct isa_reg_property)) >= 0) {
544 			if (!measure_only) {
545 				adr[i].space = rp[i].space;
546 				adr[i].address = rp[i].address;
547 				adr[i].size = rp[i].size;
548 			}
549 			++i;
550 		}
551 		np->addrs = adr;
552 		np->n_addrs = i;
553 		(*mem_start) += i * sizeof(struct address_range);
554 	}
555 
556 	return 0;
557 }
558 
559 static int __init interpret_root_props(struct device_node *np,
560 				       unsigned long *mem_start,
561 				       int naddrc, int nsizec,
562 				       int measure_only)
563 {
564 	struct address_range *adr;
565 	int i, l;
566 	unsigned int *rp;
567 	int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
568 
569 	rp = (unsigned int *) get_property(np, "reg", &l);
570 	if (rp != 0 && l >= rpsize) {
571 		i = 0;
572 		adr = (struct address_range *) (*mem_start);
573 		while ((l -= rpsize) >= 0) {
574 			if (!measure_only) {
575 				adr[i].space = 0;
576 				adr[i].address = rp[naddrc - 1];
577 				adr[i].size = rp[naddrc + nsizec - 1];
578 			}
579 			++i;
580 			rp += naddrc + nsizec;
581 		}
582 		np->addrs = adr;
583 		np->n_addrs = i;
584 		(*mem_start) += i * sizeof(struct address_range);
585 	}
586 
587 	return 0;
588 }
589 
590 static int __devinit finish_node(struct device_node *np,
591 				 unsigned long *mem_start,
592 				 interpret_func *ifunc,
593 				 int naddrc, int nsizec,
594 				 int measure_only)
595 {
596 	struct device_node *child;
597 	int *ip, rc = 0;
598 
599 	/* get the device addresses and interrupts */
600 	if (ifunc != NULL)
601 		rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
602 	if (rc)
603 		goto out;
604 
605 	rc = finish_node_interrupts(np, mem_start, measure_only);
606 	if (rc)
607 		goto out;
608 
609 	/* Look for #address-cells and #size-cells properties. */
610 	ip = (int *) get_property(np, "#address-cells", NULL);
611 	if (ip != NULL)
612 		naddrc = *ip;
613 	ip = (int *) get_property(np, "#size-cells", NULL);
614 	if (ip != NULL)
615 		nsizec = *ip;
616 
617 	if (!strcmp(np->name, "device-tree") || np->parent == NULL)
618 		ifunc = interpret_root_props;
619 	else if (np->type == 0)
620 		ifunc = NULL;
621 	else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
622 		ifunc = interpret_pci_props;
623 	else if (!strcmp(np->type, "dbdma"))
624 		ifunc = interpret_dbdma_props;
625 	else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
626 		ifunc = interpret_macio_props;
627 	else if (!strcmp(np->type, "isa"))
628 		ifunc = interpret_isa_props;
629 	else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
630 		ifunc = interpret_root_props;
631 	else if (!((ifunc == interpret_dbdma_props
632 		    || ifunc == interpret_macio_props)
633 		   && (!strcmp(np->type, "escc")
634 		       || !strcmp(np->type, "media-bay"))))
635 		ifunc = NULL;
636 
637 	for (child = np->child; child != NULL; child = child->sibling) {
638 		rc = finish_node(child, mem_start, ifunc,
639 				 naddrc, nsizec, measure_only);
640 		if (rc)
641 			goto out;
642 	}
643 out:
644 	return rc;
645 }
646 
647 static void __init scan_interrupt_controllers(void)
648 {
649 	struct device_node *np;
650 	int n = 0;
651 	char *name, *ic;
652 	int iclen;
653 
654 	for (np = allnodes; np != NULL; np = np->allnext) {
655 		ic = get_property(np, "interrupt-controller", &iclen);
656 		name = get_property(np, "name", NULL);
657 		/* checking iclen makes sure we don't get a false
658 		   match on /chosen.interrupt_controller */
659 		if ((name != NULL
660 		     && strcmp(name, "interrupt-controller") == 0)
661 		    || (ic != NULL && iclen == 0
662 			&& strcmp(name, "AppleKiwi"))) {
663 			if (n == 0)
664 				dflt_interrupt_controller = np;
665 			++n;
666 		}
667 	}
668 	num_interrupt_controllers = n;
669 }
670 
671 /**
672  * finish_device_tree is called once things are running normally
673  * (i.e. with text and data mapped to the address they were linked at).
674  * It traverses the device tree and fills in some of the additional,
675  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
676  * mapping is also initialized at this point.
677  */
678 void __init finish_device_tree(void)
679 {
680 	unsigned long start, end, size = 0;
681 
682 	DBG(" -> finish_device_tree\n");
683 
684 #ifdef CONFIG_PPC64
685 	/* Initialize virtual IRQ map */
686 	virt_irq_init();
687 #endif
688 	scan_interrupt_controllers();
689 
690 	/*
691 	 * Finish device-tree (pre-parsing some properties etc...)
692 	 * We do this in 2 passes. One with "measure_only" set, which
693 	 * will only measure the amount of memory needed, then we can
694 	 * allocate that memory, and call finish_node again. However,
695 	 * we must be careful as most routines will fail nowadays when
696 	 * prom_alloc() returns 0, so we must make sure our first pass
697 	 * doesn't start at 0. We pre-initialize size to 16 for that
698 	 * reason and then remove those additional 16 bytes
699 	 */
700 	size = 16;
701 	finish_node(allnodes, &size, NULL, 0, 0, 1);
702 	size -= 16;
703 	end = start = (unsigned long) __va(lmb_alloc(size, 128));
704 	finish_node(allnodes, &end, NULL, 0, 0, 0);
705 	BUG_ON(end != start + size);
706 
707 	DBG(" <- finish_device_tree\n");
708 }
709 
710 static inline char *find_flat_dt_string(u32 offset)
711 {
712 	return ((char *)initial_boot_params) +
713 		initial_boot_params->off_dt_strings + offset;
714 }
715 
716 /**
717  * This function is used to scan the flattened device-tree, it is
718  * used to extract the memory informations at boot before we can
719  * unflatten the tree
720  */
721 int __init of_scan_flat_dt(int (*it)(unsigned long node,
722 				     const char *uname, int depth,
723 				     void *data),
724 			   void *data)
725 {
726 	unsigned long p = ((unsigned long)initial_boot_params) +
727 		initial_boot_params->off_dt_struct;
728 	int rc = 0;
729 	int depth = -1;
730 
731 	do {
732 		u32 tag = *((u32 *)p);
733 		char *pathp;
734 
735 		p += 4;
736 		if (tag == OF_DT_END_NODE) {
737 			depth --;
738 			continue;
739 		}
740 		if (tag == OF_DT_NOP)
741 			continue;
742 		if (tag == OF_DT_END)
743 			break;
744 		if (tag == OF_DT_PROP) {
745 			u32 sz = *((u32 *)p);
746 			p += 8;
747 			if (initial_boot_params->version < 0x10)
748 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
749 			p += sz;
750 			p = _ALIGN(p, 4);
751 			continue;
752 		}
753 		if (tag != OF_DT_BEGIN_NODE) {
754 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
755 			       " device tree !\n", tag);
756 			return -EINVAL;
757 		}
758 		depth++;
759 		pathp = (char *)p;
760 		p = _ALIGN(p + strlen(pathp) + 1, 4);
761 		if ((*pathp) == '/') {
762 			char *lp, *np;
763 			for (lp = NULL, np = pathp; *np; np++)
764 				if ((*np) == '/')
765 					lp = np+1;
766 			if (lp != NULL)
767 				pathp = lp;
768 		}
769 		rc = it(p, pathp, depth, data);
770 		if (rc != 0)
771 			break;
772 	} while(1);
773 
774 	return rc;
775 }
776 
777 /**
778  * This  function can be used within scan_flattened_dt callback to get
779  * access to properties
780  */
781 void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
782 				 unsigned long *size)
783 {
784 	unsigned long p = node;
785 
786 	do {
787 		u32 tag = *((u32 *)p);
788 		u32 sz, noff;
789 		const char *nstr;
790 
791 		p += 4;
792 		if (tag == OF_DT_NOP)
793 			continue;
794 		if (tag != OF_DT_PROP)
795 			return NULL;
796 
797 		sz = *((u32 *)p);
798 		noff = *((u32 *)(p + 4));
799 		p += 8;
800 		if (initial_boot_params->version < 0x10)
801 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
802 
803 		nstr = find_flat_dt_string(noff);
804 		if (nstr == NULL) {
805 			printk(KERN_WARNING "Can't find property index"
806 			       " name !\n");
807 			return NULL;
808 		}
809 		if (strcmp(name, nstr) == 0) {
810 			if (size)
811 				*size = sz;
812 			return (void *)p;
813 		}
814 		p += sz;
815 		p = _ALIGN(p, 4);
816 	} while(1);
817 }
818 
819 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
820 				       unsigned long align)
821 {
822 	void *res;
823 
824 	*mem = _ALIGN(*mem, align);
825 	res = (void *)*mem;
826 	*mem += size;
827 
828 	return res;
829 }
830 
831 static unsigned long __init unflatten_dt_node(unsigned long mem,
832 					      unsigned long *p,
833 					      struct device_node *dad,
834 					      struct device_node ***allnextpp,
835 					      unsigned long fpsize)
836 {
837 	struct device_node *np;
838 	struct property *pp, **prev_pp = NULL;
839 	char *pathp;
840 	u32 tag;
841 	unsigned int l, allocl;
842 	int has_name = 0;
843 	int new_format = 0;
844 
845 	tag = *((u32 *)(*p));
846 	if (tag != OF_DT_BEGIN_NODE) {
847 		printk("Weird tag at start of node: %x\n", tag);
848 		return mem;
849 	}
850 	*p += 4;
851 	pathp = (char *)*p;
852 	l = allocl = strlen(pathp) + 1;
853 	*p = _ALIGN(*p + l, 4);
854 
855 	/* version 0x10 has a more compact unit name here instead of the full
856 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
857 	 * it later. We detect this because the first character of the name is
858 	 * not '/'.
859 	 */
860 	if ((*pathp) != '/') {
861 		new_format = 1;
862 		if (fpsize == 0) {
863 			/* root node: special case. fpsize accounts for path
864 			 * plus terminating zero. root node only has '/', so
865 			 * fpsize should be 2, but we want to avoid the first
866 			 * level nodes to have two '/' so we use fpsize 1 here
867 			 */
868 			fpsize = 1;
869 			allocl = 2;
870 		} else {
871 			/* account for '/' and path size minus terminal 0
872 			 * already in 'l'
873 			 */
874 			fpsize += l;
875 			allocl = fpsize;
876 		}
877 	}
878 
879 
880 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
881 				__alignof__(struct device_node));
882 	if (allnextpp) {
883 		memset(np, 0, sizeof(*np));
884 		np->full_name = ((char*)np) + sizeof(struct device_node);
885 		if (new_format) {
886 			char *p = np->full_name;
887 			/* rebuild full path for new format */
888 			if (dad && dad->parent) {
889 				strcpy(p, dad->full_name);
890 #ifdef DEBUG
891 				if ((strlen(p) + l + 1) != allocl) {
892 					DBG("%s: p: %d, l: %d, a: %d\n",
893 					    pathp, strlen(p), l, allocl);
894 				}
895 #endif
896 				p += strlen(p);
897 			}
898 			*(p++) = '/';
899 			memcpy(p, pathp, l);
900 		} else
901 			memcpy(np->full_name, pathp, l);
902 		prev_pp = &np->properties;
903 		**allnextpp = np;
904 		*allnextpp = &np->allnext;
905 		if (dad != NULL) {
906 			np->parent = dad;
907 			/* we temporarily use the next field as `last_child'*/
908 			if (dad->next == 0)
909 				dad->child = np;
910 			else
911 				dad->next->sibling = np;
912 			dad->next = np;
913 		}
914 		kref_init(&np->kref);
915 	}
916 	while(1) {
917 		u32 sz, noff;
918 		char *pname;
919 
920 		tag = *((u32 *)(*p));
921 		if (tag == OF_DT_NOP) {
922 			*p += 4;
923 			continue;
924 		}
925 		if (tag != OF_DT_PROP)
926 			break;
927 		*p += 4;
928 		sz = *((u32 *)(*p));
929 		noff = *((u32 *)((*p) + 4));
930 		*p += 8;
931 		if (initial_boot_params->version < 0x10)
932 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
933 
934 		pname = find_flat_dt_string(noff);
935 		if (pname == NULL) {
936 			printk("Can't find property name in list !\n");
937 			break;
938 		}
939 		if (strcmp(pname, "name") == 0)
940 			has_name = 1;
941 		l = strlen(pname) + 1;
942 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
943 					__alignof__(struct property));
944 		if (allnextpp) {
945 			if (strcmp(pname, "linux,phandle") == 0) {
946 				np->node = *((u32 *)*p);
947 				if (np->linux_phandle == 0)
948 					np->linux_phandle = np->node;
949 			}
950 			if (strcmp(pname, "ibm,phandle") == 0)
951 				np->linux_phandle = *((u32 *)*p);
952 			pp->name = pname;
953 			pp->length = sz;
954 			pp->value = (void *)*p;
955 			*prev_pp = pp;
956 			prev_pp = &pp->next;
957 		}
958 		*p = _ALIGN((*p) + sz, 4);
959 	}
960 	/* with version 0x10 we may not have the name property, recreate
961 	 * it here from the unit name if absent
962 	 */
963 	if (!has_name) {
964 		char *p = pathp, *ps = pathp, *pa = NULL;
965 		int sz;
966 
967 		while (*p) {
968 			if ((*p) == '@')
969 				pa = p;
970 			if ((*p) == '/')
971 				ps = p + 1;
972 			p++;
973 		}
974 		if (pa < ps)
975 			pa = p;
976 		sz = (pa - ps) + 1;
977 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
978 					__alignof__(struct property));
979 		if (allnextpp) {
980 			pp->name = "name";
981 			pp->length = sz;
982 			pp->value = (unsigned char *)(pp + 1);
983 			*prev_pp = pp;
984 			prev_pp = &pp->next;
985 			memcpy(pp->value, ps, sz - 1);
986 			((char *)pp->value)[sz - 1] = 0;
987 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
988 		}
989 	}
990 	if (allnextpp) {
991 		*prev_pp = NULL;
992 		np->name = get_property(np, "name", NULL);
993 		np->type = get_property(np, "device_type", NULL);
994 
995 		if (!np->name)
996 			np->name = "<NULL>";
997 		if (!np->type)
998 			np->type = "<NULL>";
999 	}
1000 	while (tag == OF_DT_BEGIN_NODE) {
1001 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1002 		tag = *((u32 *)(*p));
1003 	}
1004 	if (tag != OF_DT_END_NODE) {
1005 		printk("Weird tag at end of node: %x\n", tag);
1006 		return mem;
1007 	}
1008 	*p += 4;
1009 	return mem;
1010 }
1011 
1012 
1013 /**
1014  * unflattens the device-tree passed by the firmware, creating the
1015  * tree of struct device_node. It also fills the "name" and "type"
1016  * pointers of the nodes so the normal device-tree walking functions
1017  * can be used (this used to be done by finish_device_tree)
1018  */
1019 void __init unflatten_device_tree(void)
1020 {
1021 	unsigned long start, mem, size;
1022 	struct device_node **allnextp = &allnodes;
1023 	char *p = NULL;
1024 	int l = 0;
1025 
1026 	DBG(" -> unflatten_device_tree()\n");
1027 
1028 	/* First pass, scan for size */
1029 	start = ((unsigned long)initial_boot_params) +
1030 		initial_boot_params->off_dt_struct;
1031 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1032 	size = (size | 3) + 1;
1033 
1034 	DBG("  size is %lx, allocating...\n", size);
1035 
1036 	/* Allocate memory for the expanded device tree */
1037 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1038 	if (!mem) {
1039 		DBG("Couldn't allocate memory with lmb_alloc()!\n");
1040 		panic("Couldn't allocate memory with lmb_alloc()!\n");
1041 	}
1042 	mem = (unsigned long) __va(mem);
1043 
1044 	((u32 *)mem)[size / 4] = 0xdeadbeef;
1045 
1046 	DBG("  unflattening %lx...\n", mem);
1047 
1048 	/* Second pass, do actual unflattening */
1049 	start = ((unsigned long)initial_boot_params) +
1050 		initial_boot_params->off_dt_struct;
1051 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1052 	if (*((u32 *)start) != OF_DT_END)
1053 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1054 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1055 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1056 		       ((u32 *)mem)[size / 4] );
1057 	*allnextp = NULL;
1058 
1059 	/* Get pointer to OF "/chosen" node for use everywhere */
1060 	of_chosen = of_find_node_by_path("/chosen");
1061 	if (of_chosen == NULL)
1062 		of_chosen = of_find_node_by_path("/chosen@0");
1063 
1064 	/* Retreive command line */
1065 	if (of_chosen != NULL) {
1066 		p = (char *)get_property(of_chosen, "bootargs", &l);
1067 		if (p != NULL && l > 0)
1068 			strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1069 	}
1070 #ifdef CONFIG_CMDLINE
1071 	if (l == 0 || (l == 1 && (*p) == 0))
1072 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1073 #endif /* CONFIG_CMDLINE */
1074 
1075 	DBG("Command line is: %s\n", cmd_line);
1076 
1077 	DBG(" <- unflatten_device_tree()\n");
1078 }
1079 
1080 
1081 static int __init early_init_dt_scan_cpus(unsigned long node,
1082 					  const char *uname, int depth, void *data)
1083 {
1084 	u32 *prop;
1085 	unsigned long size;
1086 	char *type = of_get_flat_dt_prop(node, "device_type", &size);
1087 
1088 	/* We are scanning "cpu" nodes only */
1089 	if (type == NULL || strcmp(type, "cpu") != 0)
1090 		return 0;
1091 
1092 	boot_cpuid = 0;
1093 	boot_cpuid_phys = 0;
1094 	if (initial_boot_params && initial_boot_params->version >= 2) {
1095 		/* version 2 of the kexec param format adds the phys cpuid
1096 		 * of booted proc.
1097 		 */
1098 		boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1099 	} else {
1100 		/* Check if it's the boot-cpu, set it's hw index now */
1101 		if (of_get_flat_dt_prop(node,
1102 					"linux,boot-cpu", NULL) != NULL) {
1103 			prop = of_get_flat_dt_prop(node, "reg", NULL);
1104 			if (prop != NULL)
1105 				boot_cpuid_phys = *prop;
1106 		}
1107 	}
1108 	set_hard_smp_processor_id(0, boot_cpuid_phys);
1109 
1110 #ifdef CONFIG_ALTIVEC
1111 	/* Check if we have a VMX and eventually update CPU features */
1112 	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1113 	if (prop && (*prop) > 0) {
1114 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1115 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1116 	}
1117 
1118 	/* Same goes for Apple's "altivec" property */
1119 	prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1120 	if (prop) {
1121 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1122 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1123 	}
1124 #endif /* CONFIG_ALTIVEC */
1125 
1126 #ifdef CONFIG_PPC_PSERIES
1127 	/*
1128 	 * Check for an SMT capable CPU and set the CPU feature. We do
1129 	 * this by looking at the size of the ibm,ppc-interrupt-server#s
1130 	 * property
1131 	 */
1132 	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1133 				       &size);
1134 	cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1135 	if (prop && ((size / sizeof(u32)) > 1))
1136 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1137 #endif
1138 
1139 	return 0;
1140 }
1141 
1142 static int __init early_init_dt_scan_chosen(unsigned long node,
1143 					    const char *uname, int depth, void *data)
1144 {
1145 	u32 *prop;
1146 	unsigned long *lprop;
1147 
1148 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1149 
1150 	if (depth != 1 ||
1151 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1152 		return 0;
1153 
1154 	/* get platform type */
1155 	prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
1156 	if (prop == NULL)
1157 		return 0;
1158 #ifdef CONFIG_PPC_MULTIPLATFORM
1159 	_machine = *prop;
1160 #endif
1161 
1162 #ifdef CONFIG_PPC64
1163 	/* check if iommu is forced on or off */
1164 	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1165 		iommu_is_off = 1;
1166 	if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1167 		iommu_force_on = 1;
1168 #endif
1169 
1170  	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1171  	if (lprop)
1172  		memory_limit = *lprop;
1173 
1174 #ifdef CONFIG_PPC64
1175  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1176  	if (lprop)
1177  		tce_alloc_start = *lprop;
1178  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1179  	if (lprop)
1180  		tce_alloc_end = *lprop;
1181 #endif
1182 
1183 #ifdef CONFIG_PPC_RTAS
1184 	/* To help early debugging via the front panel, we retreive a minimal
1185 	 * set of RTAS infos now if available
1186 	 */
1187 	{
1188 		u64 *basep, *entryp;
1189 
1190 		basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1191 		entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1192 		prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
1193 		if (basep && entryp && prop) {
1194 			rtas.base = *basep;
1195 			rtas.entry = *entryp;
1196 			rtas.size = *prop;
1197 		}
1198 	}
1199 #endif /* CONFIG_PPC_RTAS */
1200 
1201 	/* break now */
1202 	return 1;
1203 }
1204 
1205 static int __init early_init_dt_scan_root(unsigned long node,
1206 					  const char *uname, int depth, void *data)
1207 {
1208 	u32 *prop;
1209 
1210 	if (depth != 0)
1211 		return 0;
1212 
1213 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1214 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1215 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1216 
1217 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1218 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1219 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1220 
1221 	/* break now */
1222 	return 1;
1223 }
1224 
1225 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1226 {
1227 	cell_t *p = *cellp;
1228 	unsigned long r;
1229 
1230 	/* Ignore more than 2 cells */
1231 	while (s > sizeof(unsigned long) / 4) {
1232 		p++;
1233 		s--;
1234 	}
1235 	r = *p++;
1236 #ifdef CONFIG_PPC64
1237 	if (s > 1) {
1238 		r <<= 32;
1239 		r |= *(p++);
1240 		s--;
1241 	}
1242 #endif
1243 
1244 	*cellp = p;
1245 	return r;
1246 }
1247 
1248 
1249 static int __init early_init_dt_scan_memory(unsigned long node,
1250 					    const char *uname, int depth, void *data)
1251 {
1252 	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1253 	cell_t *reg, *endp;
1254 	unsigned long l;
1255 
1256 	/* We are scanning "memory" nodes only */
1257 	if (type == NULL) {
1258 		/*
1259 		 * The longtrail doesn't have a device_type on the
1260 		 * /memory node, so look for the node called /memory@0.
1261 		 */
1262 		if (depth != 1 || strcmp(uname, "memory@0") != 0)
1263 			return 0;
1264 	} else if (strcmp(type, "memory") != 0)
1265 		return 0;
1266 
1267 	reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1268 	if (reg == NULL)
1269 		return 0;
1270 
1271 	endp = reg + (l / sizeof(cell_t));
1272 
1273 	DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1274 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1275 
1276 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1277 		unsigned long base, size;
1278 
1279 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1280 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1281 
1282 		if (size == 0)
1283 			continue;
1284 		DBG(" - %lx ,  %lx\n", base, size);
1285 #ifdef CONFIG_PPC64
1286 		if (iommu_is_off) {
1287 			if (base >= 0x80000000ul)
1288 				continue;
1289 			if ((base + size) > 0x80000000ul)
1290 				size = 0x80000000ul - base;
1291 		}
1292 #endif
1293 		lmb_add(base, size);
1294 	}
1295 	return 0;
1296 }
1297 
1298 static void __init early_reserve_mem(void)
1299 {
1300 	unsigned long base, size;
1301 	unsigned long *reserve_map;
1302 
1303 	reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1304 					initial_boot_params->off_mem_rsvmap);
1305 	while (1) {
1306 		base = *(reserve_map++);
1307 		size = *(reserve_map++);
1308 		if (size == 0)
1309 			break;
1310 		DBG("reserving: %lx -> %lx\n", base, size);
1311 		lmb_reserve(base, size);
1312 	}
1313 
1314 #if 0
1315 	DBG("memory reserved, lmbs :\n");
1316       	lmb_dump_all();
1317 #endif
1318 }
1319 
1320 void __init early_init_devtree(void *params)
1321 {
1322 	DBG(" -> early_init_devtree()\n");
1323 
1324 	/* Setup flat device-tree pointer */
1325 	initial_boot_params = params;
1326 
1327 	/* Retrieve various informations from the /chosen node of the
1328 	 * device-tree, including the platform type, initrd location and
1329 	 * size, TCE reserve, and more ...
1330 	 */
1331 	of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1332 
1333 	/* Scan memory nodes and rebuild LMBs */
1334 	lmb_init();
1335 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
1336 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1337 	lmb_enforce_memory_limit(memory_limit);
1338 	lmb_analyze();
1339 
1340 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1341 
1342 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1343 	lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1344 #ifdef CONFIG_CRASH_DUMP
1345 	lmb_reserve(0, KDUMP_RESERVE_LIMIT);
1346 #endif
1347 	early_reserve_mem();
1348 
1349 	DBG("Scanning CPUs ...\n");
1350 
1351 	/* Retreive CPU related informations from the flat tree
1352 	 * (altivec support, boot CPU ID, ...)
1353 	 */
1354 	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1355 
1356 	DBG(" <- early_init_devtree()\n");
1357 }
1358 
1359 #undef printk
1360 
1361 int
1362 prom_n_addr_cells(struct device_node* np)
1363 {
1364 	int* ip;
1365 	do {
1366 		if (np->parent)
1367 			np = np->parent;
1368 		ip = (int *) get_property(np, "#address-cells", NULL);
1369 		if (ip != NULL)
1370 			return *ip;
1371 	} while (np->parent);
1372 	/* No #address-cells property for the root node, default to 1 */
1373 	return 1;
1374 }
1375 EXPORT_SYMBOL(prom_n_addr_cells);
1376 
1377 int
1378 prom_n_size_cells(struct device_node* np)
1379 {
1380 	int* ip;
1381 	do {
1382 		if (np->parent)
1383 			np = np->parent;
1384 		ip = (int *) get_property(np, "#size-cells", NULL);
1385 		if (ip != NULL)
1386 			return *ip;
1387 	} while (np->parent);
1388 	/* No #size-cells property for the root node, default to 1 */
1389 	return 1;
1390 }
1391 EXPORT_SYMBOL(prom_n_size_cells);
1392 
1393 /**
1394  * Work out the sense (active-low level / active-high edge)
1395  * of each interrupt from the device tree.
1396  */
1397 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1398 {
1399 	struct device_node *np;
1400 	int i, j;
1401 
1402 	/* default to level-triggered */
1403 	memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1404 
1405 	for (np = allnodes; np != 0; np = np->allnext) {
1406 		for (j = 0; j < np->n_intrs; j++) {
1407 			i = np->intrs[j].line;
1408 			if (i >= off && i < max)
1409 				senses[i-off] = np->intrs[j].sense;
1410 		}
1411 	}
1412 }
1413 
1414 /**
1415  * Construct and return a list of the device_nodes with a given name.
1416  */
1417 struct device_node *find_devices(const char *name)
1418 {
1419 	struct device_node *head, **prevp, *np;
1420 
1421 	prevp = &head;
1422 	for (np = allnodes; np != 0; np = np->allnext) {
1423 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1424 			*prevp = np;
1425 			prevp = &np->next;
1426 		}
1427 	}
1428 	*prevp = NULL;
1429 	return head;
1430 }
1431 EXPORT_SYMBOL(find_devices);
1432 
1433 /**
1434  * Construct and return a list of the device_nodes with a given type.
1435  */
1436 struct device_node *find_type_devices(const char *type)
1437 {
1438 	struct device_node *head, **prevp, *np;
1439 
1440 	prevp = &head;
1441 	for (np = allnodes; np != 0; np = np->allnext) {
1442 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1443 			*prevp = np;
1444 			prevp = &np->next;
1445 		}
1446 	}
1447 	*prevp = NULL;
1448 	return head;
1449 }
1450 EXPORT_SYMBOL(find_type_devices);
1451 
1452 /**
1453  * Returns all nodes linked together
1454  */
1455 struct device_node *find_all_nodes(void)
1456 {
1457 	struct device_node *head, **prevp, *np;
1458 
1459 	prevp = &head;
1460 	for (np = allnodes; np != 0; np = np->allnext) {
1461 		*prevp = np;
1462 		prevp = &np->next;
1463 	}
1464 	*prevp = NULL;
1465 	return head;
1466 }
1467 EXPORT_SYMBOL(find_all_nodes);
1468 
1469 /** Checks if the given "compat" string matches one of the strings in
1470  * the device's "compatible" property
1471  */
1472 int device_is_compatible(struct device_node *device, const char *compat)
1473 {
1474 	const char* cp;
1475 	int cplen, l;
1476 
1477 	cp = (char *) get_property(device, "compatible", &cplen);
1478 	if (cp == NULL)
1479 		return 0;
1480 	while (cplen > 0) {
1481 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1482 			return 1;
1483 		l = strlen(cp) + 1;
1484 		cp += l;
1485 		cplen -= l;
1486 	}
1487 
1488 	return 0;
1489 }
1490 EXPORT_SYMBOL(device_is_compatible);
1491 
1492 
1493 /**
1494  * Indicates whether the root node has a given value in its
1495  * compatible property.
1496  */
1497 int machine_is_compatible(const char *compat)
1498 {
1499 	struct device_node *root;
1500 	int rc = 0;
1501 
1502 	root = of_find_node_by_path("/");
1503 	if (root) {
1504 		rc = device_is_compatible(root, compat);
1505 		of_node_put(root);
1506 	}
1507 	return rc;
1508 }
1509 EXPORT_SYMBOL(machine_is_compatible);
1510 
1511 /**
1512  * Construct and return a list of the device_nodes with a given type
1513  * and compatible property.
1514  */
1515 struct device_node *find_compatible_devices(const char *type,
1516 					    const char *compat)
1517 {
1518 	struct device_node *head, **prevp, *np;
1519 
1520 	prevp = &head;
1521 	for (np = allnodes; np != 0; np = np->allnext) {
1522 		if (type != NULL
1523 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1524 			continue;
1525 		if (device_is_compatible(np, compat)) {
1526 			*prevp = np;
1527 			prevp = &np->next;
1528 		}
1529 	}
1530 	*prevp = NULL;
1531 	return head;
1532 }
1533 EXPORT_SYMBOL(find_compatible_devices);
1534 
1535 /**
1536  * Find the device_node with a given full_name.
1537  */
1538 struct device_node *find_path_device(const char *path)
1539 {
1540 	struct device_node *np;
1541 
1542 	for (np = allnodes; np != 0; np = np->allnext)
1543 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1544 			return np;
1545 	return NULL;
1546 }
1547 EXPORT_SYMBOL(find_path_device);
1548 
1549 /*******
1550  *
1551  * New implementation of the OF "find" APIs, return a refcounted
1552  * object, call of_node_put() when done.  The device tree and list
1553  * are protected by a rw_lock.
1554  *
1555  * Note that property management will need some locking as well,
1556  * this isn't dealt with yet.
1557  *
1558  *******/
1559 
1560 /**
1561  *	of_find_node_by_name - Find a node by its "name" property
1562  *	@from:	The node to start searching from or NULL, the node
1563  *		you pass will not be searched, only the next one
1564  *		will; typically, you pass what the previous call
1565  *		returned. of_node_put() will be called on it
1566  *	@name:	The name string to match against
1567  *
1568  *	Returns a node pointer with refcount incremented, use
1569  *	of_node_put() on it when done.
1570  */
1571 struct device_node *of_find_node_by_name(struct device_node *from,
1572 	const char *name)
1573 {
1574 	struct device_node *np;
1575 
1576 	read_lock(&devtree_lock);
1577 	np = from ? from->allnext : allnodes;
1578 	for (; np != 0; np = np->allnext)
1579 		if (np->name != 0 && strcasecmp(np->name, name) == 0
1580 		    && of_node_get(np))
1581 			break;
1582 	if (from)
1583 		of_node_put(from);
1584 	read_unlock(&devtree_lock);
1585 	return np;
1586 }
1587 EXPORT_SYMBOL(of_find_node_by_name);
1588 
1589 /**
1590  *	of_find_node_by_type - Find a node by its "device_type" property
1591  *	@from:	The node to start searching from or NULL, the node
1592  *		you pass will not be searched, only the next one
1593  *		will; typically, you pass what the previous call
1594  *		returned. of_node_put() will be called on it
1595  *	@name:	The type string to match against
1596  *
1597  *	Returns a node pointer with refcount incremented, use
1598  *	of_node_put() on it when done.
1599  */
1600 struct device_node *of_find_node_by_type(struct device_node *from,
1601 	const char *type)
1602 {
1603 	struct device_node *np;
1604 
1605 	read_lock(&devtree_lock);
1606 	np = from ? from->allnext : allnodes;
1607 	for (; np != 0; np = np->allnext)
1608 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1609 		    && of_node_get(np))
1610 			break;
1611 	if (from)
1612 		of_node_put(from);
1613 	read_unlock(&devtree_lock);
1614 	return np;
1615 }
1616 EXPORT_SYMBOL(of_find_node_by_type);
1617 
1618 /**
1619  *	of_find_compatible_node - Find a node based on type and one of the
1620  *                                tokens in its "compatible" property
1621  *	@from:		The node to start searching from or NULL, the node
1622  *			you pass will not be searched, only the next one
1623  *			will; typically, you pass what the previous call
1624  *			returned. of_node_put() will be called on it
1625  *	@type:		The type string to match "device_type" or NULL to ignore
1626  *	@compatible:	The string to match to one of the tokens in the device
1627  *			"compatible" list.
1628  *
1629  *	Returns a node pointer with refcount incremented, use
1630  *	of_node_put() on it when done.
1631  */
1632 struct device_node *of_find_compatible_node(struct device_node *from,
1633 	const char *type, const char *compatible)
1634 {
1635 	struct device_node *np;
1636 
1637 	read_lock(&devtree_lock);
1638 	np = from ? from->allnext : allnodes;
1639 	for (; np != 0; np = np->allnext) {
1640 		if (type != NULL
1641 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1642 			continue;
1643 		if (device_is_compatible(np, compatible) && of_node_get(np))
1644 			break;
1645 	}
1646 	if (from)
1647 		of_node_put(from);
1648 	read_unlock(&devtree_lock);
1649 	return np;
1650 }
1651 EXPORT_SYMBOL(of_find_compatible_node);
1652 
1653 /**
1654  *	of_find_node_by_path - Find a node matching a full OF path
1655  *	@path:	The full path to match
1656  *
1657  *	Returns a node pointer with refcount incremented, use
1658  *	of_node_put() on it when done.
1659  */
1660 struct device_node *of_find_node_by_path(const char *path)
1661 {
1662 	struct device_node *np = allnodes;
1663 
1664 	read_lock(&devtree_lock);
1665 	for (; np != 0; np = np->allnext) {
1666 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1667 		    && of_node_get(np))
1668 			break;
1669 	}
1670 	read_unlock(&devtree_lock);
1671 	return np;
1672 }
1673 EXPORT_SYMBOL(of_find_node_by_path);
1674 
1675 /**
1676  *	of_find_node_by_phandle - Find a node given a phandle
1677  *	@handle:	phandle of the node to find
1678  *
1679  *	Returns a node pointer with refcount incremented, use
1680  *	of_node_put() on it when done.
1681  */
1682 struct device_node *of_find_node_by_phandle(phandle handle)
1683 {
1684 	struct device_node *np;
1685 
1686 	read_lock(&devtree_lock);
1687 	for (np = allnodes; np != 0; np = np->allnext)
1688 		if (np->linux_phandle == handle)
1689 			break;
1690 	if (np)
1691 		of_node_get(np);
1692 	read_unlock(&devtree_lock);
1693 	return np;
1694 }
1695 EXPORT_SYMBOL(of_find_node_by_phandle);
1696 
1697 /**
1698  *	of_find_all_nodes - Get next node in global list
1699  *	@prev:	Previous node or NULL to start iteration
1700  *		of_node_put() will be called on it
1701  *
1702  *	Returns a node pointer with refcount incremented, use
1703  *	of_node_put() on it when done.
1704  */
1705 struct device_node *of_find_all_nodes(struct device_node *prev)
1706 {
1707 	struct device_node *np;
1708 
1709 	read_lock(&devtree_lock);
1710 	np = prev ? prev->allnext : allnodes;
1711 	for (; np != 0; np = np->allnext)
1712 		if (of_node_get(np))
1713 			break;
1714 	if (prev)
1715 		of_node_put(prev);
1716 	read_unlock(&devtree_lock);
1717 	return np;
1718 }
1719 EXPORT_SYMBOL(of_find_all_nodes);
1720 
1721 /**
1722  *	of_get_parent - Get a node's parent if any
1723  *	@node:	Node to get parent
1724  *
1725  *	Returns a node pointer with refcount incremented, use
1726  *	of_node_put() on it when done.
1727  */
1728 struct device_node *of_get_parent(const struct device_node *node)
1729 {
1730 	struct device_node *np;
1731 
1732 	if (!node)
1733 		return NULL;
1734 
1735 	read_lock(&devtree_lock);
1736 	np = of_node_get(node->parent);
1737 	read_unlock(&devtree_lock);
1738 	return np;
1739 }
1740 EXPORT_SYMBOL(of_get_parent);
1741 
1742 /**
1743  *	of_get_next_child - Iterate a node childs
1744  *	@node:	parent node
1745  *	@prev:	previous child of the parent node, or NULL to get first
1746  *
1747  *	Returns a node pointer with refcount incremented, use
1748  *	of_node_put() on it when done.
1749  */
1750 struct device_node *of_get_next_child(const struct device_node *node,
1751 	struct device_node *prev)
1752 {
1753 	struct device_node *next;
1754 
1755 	read_lock(&devtree_lock);
1756 	next = prev ? prev->sibling : node->child;
1757 	for (; next != 0; next = next->sibling)
1758 		if (of_node_get(next))
1759 			break;
1760 	if (prev)
1761 		of_node_put(prev);
1762 	read_unlock(&devtree_lock);
1763 	return next;
1764 }
1765 EXPORT_SYMBOL(of_get_next_child);
1766 
1767 /**
1768  *	of_node_get - Increment refcount of a node
1769  *	@node:	Node to inc refcount, NULL is supported to
1770  *		simplify writing of callers
1771  *
1772  *	Returns node.
1773  */
1774 struct device_node *of_node_get(struct device_node *node)
1775 {
1776 	if (node)
1777 		kref_get(&node->kref);
1778 	return node;
1779 }
1780 EXPORT_SYMBOL(of_node_get);
1781 
1782 static inline struct device_node * kref_to_device_node(struct kref *kref)
1783 {
1784 	return container_of(kref, struct device_node, kref);
1785 }
1786 
1787 /**
1788  *	of_node_release - release a dynamically allocated node
1789  *	@kref:  kref element of the node to be released
1790  *
1791  *	In of_node_put() this function is passed to kref_put()
1792  *	as the destructor.
1793  */
1794 static void of_node_release(struct kref *kref)
1795 {
1796 	struct device_node *node = kref_to_device_node(kref);
1797 	struct property *prop = node->properties;
1798 
1799 	if (!OF_IS_DYNAMIC(node))
1800 		return;
1801 	while (prop) {
1802 		struct property *next = prop->next;
1803 		kfree(prop->name);
1804 		kfree(prop->value);
1805 		kfree(prop);
1806 		prop = next;
1807 	}
1808 	kfree(node->intrs);
1809 	kfree(node->addrs);
1810 	kfree(node->full_name);
1811 	kfree(node->data);
1812 	kfree(node);
1813 }
1814 
1815 /**
1816  *	of_node_put - Decrement refcount of a node
1817  *	@node:	Node to dec refcount, NULL is supported to
1818  *		simplify writing of callers
1819  *
1820  */
1821 void of_node_put(struct device_node *node)
1822 {
1823 	if (node)
1824 		kref_put(&node->kref, of_node_release);
1825 }
1826 EXPORT_SYMBOL(of_node_put);
1827 
1828 /*
1829  * Plug a device node into the tree and global list.
1830  */
1831 void of_attach_node(struct device_node *np)
1832 {
1833 	write_lock(&devtree_lock);
1834 	np->sibling = np->parent->child;
1835 	np->allnext = allnodes;
1836 	np->parent->child = np;
1837 	allnodes = np;
1838 	write_unlock(&devtree_lock);
1839 }
1840 
1841 /*
1842  * "Unplug" a node from the device tree.  The caller must hold
1843  * a reference to the node.  The memory associated with the node
1844  * is not freed until its refcount goes to zero.
1845  */
1846 void of_detach_node(const struct device_node *np)
1847 {
1848 	struct device_node *parent;
1849 
1850 	write_lock(&devtree_lock);
1851 
1852 	parent = np->parent;
1853 
1854 	if (allnodes == np)
1855 		allnodes = np->allnext;
1856 	else {
1857 		struct device_node *prev;
1858 		for (prev = allnodes;
1859 		     prev->allnext != np;
1860 		     prev = prev->allnext)
1861 			;
1862 		prev->allnext = np->allnext;
1863 	}
1864 
1865 	if (parent->child == np)
1866 		parent->child = np->sibling;
1867 	else {
1868 		struct device_node *prevsib;
1869 		for (prevsib = np->parent->child;
1870 		     prevsib->sibling != np;
1871 		     prevsib = prevsib->sibling)
1872 			;
1873 		prevsib->sibling = np->sibling;
1874 	}
1875 
1876 	write_unlock(&devtree_lock);
1877 }
1878 
1879 #ifdef CONFIG_PPC_PSERIES
1880 /*
1881  * Fix up the uninitialized fields in a new device node:
1882  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1883  *
1884  * A lot of boot-time code is duplicated here, because functions such
1885  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1886  * slab allocator.
1887  *
1888  * This should probably be split up into smaller chunks.
1889  */
1890 
1891 static int of_finish_dynamic_node(struct device_node *node,
1892 				  unsigned long *unused1, int unused2,
1893 				  int unused3, int unused4)
1894 {
1895 	struct device_node *parent = of_get_parent(node);
1896 	int err = 0;
1897 	phandle *ibm_phandle;
1898 
1899 	node->name = get_property(node, "name", NULL);
1900 	node->type = get_property(node, "device_type", NULL);
1901 
1902 	if (!parent) {
1903 		err = -ENODEV;
1904 		goto out;
1905 	}
1906 
1907 	/* We don't support that function on PowerMac, at least
1908 	 * not yet
1909 	 */
1910 	if (_machine == PLATFORM_POWERMAC)
1911 		return -ENODEV;
1912 
1913 	/* fix up new node's linux_phandle field */
1914 	if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1915 		node->linux_phandle = *ibm_phandle;
1916 
1917 out:
1918 	of_node_put(parent);
1919 	return err;
1920 }
1921 
1922 static int prom_reconfig_notifier(struct notifier_block *nb,
1923 				  unsigned long action, void *node)
1924 {
1925 	int err;
1926 
1927 	switch (action) {
1928 	case PSERIES_RECONFIG_ADD:
1929 		err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1930 		if (err < 0) {
1931 			printk(KERN_ERR "finish_node returned %d\n", err);
1932 			err = NOTIFY_BAD;
1933 		}
1934 		break;
1935 	default:
1936 		err = NOTIFY_DONE;
1937 		break;
1938 	}
1939 	return err;
1940 }
1941 
1942 static struct notifier_block prom_reconfig_nb = {
1943 	.notifier_call = prom_reconfig_notifier,
1944 	.priority = 10, /* This one needs to run first */
1945 };
1946 
1947 static int __init prom_reconfig_setup(void)
1948 {
1949 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1950 }
1951 __initcall(prom_reconfig_setup);
1952 #endif
1953 
1954 /*
1955  * Find a property with a given name for a given node
1956  * and return the value.
1957  */
1958 unsigned char *get_property(struct device_node *np, const char *name,
1959 			    int *lenp)
1960 {
1961 	struct property *pp;
1962 
1963 	for (pp = np->properties; pp != 0; pp = pp->next)
1964 		if (strcmp(pp->name, name) == 0) {
1965 			if (lenp != 0)
1966 				*lenp = pp->length;
1967 			return pp->value;
1968 		}
1969 	return NULL;
1970 }
1971 EXPORT_SYMBOL(get_property);
1972 
1973 /*
1974  * Add a property to a node
1975  */
1976 int prom_add_property(struct device_node* np, struct property* prop)
1977 {
1978 	struct property **next;
1979 
1980 	prop->next = NULL;
1981 	write_lock(&devtree_lock);
1982 	next = &np->properties;
1983 	while (*next) {
1984 		if (strcmp(prop->name, (*next)->name) == 0) {
1985 			/* duplicate ! don't insert it */
1986 			write_unlock(&devtree_lock);
1987 			return -1;
1988 		}
1989 		next = &(*next)->next;
1990 	}
1991 	*next = prop;
1992 	write_unlock(&devtree_lock);
1993 
1994 #ifdef CONFIG_PROC_DEVICETREE
1995 	/* try to add to proc as well if it was initialized */
1996 	if (np->pde)
1997 		proc_device_tree_add_prop(np->pde, prop);
1998 #endif /* CONFIG_PROC_DEVICETREE */
1999 
2000 	return 0;
2001 }
2002 
2003 /* I quickly hacked that one, check against spec ! */
2004 static inline unsigned long
2005 bus_space_to_resource_flags(unsigned int bus_space)
2006 {
2007 	u8 space = (bus_space >> 24) & 0xf;
2008 	if (space == 0)
2009 		space = 0x02;
2010 	if (space == 0x02)
2011 		return IORESOURCE_MEM;
2012 	else if (space == 0x01)
2013 		return IORESOURCE_IO;
2014 	else {
2015 		printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2016 		    	bus_space);
2017 		return 0;
2018 	}
2019 }
2020 
2021 #ifdef CONFIG_PCI
2022 static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2023 						 struct address_range *range)
2024 {
2025 	unsigned long mask;
2026 	int i;
2027 
2028 	/* Check this one */
2029 	mask = bus_space_to_resource_flags(range->space);
2030 	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2031 		if ((pdev->resource[i].flags & mask) == mask &&
2032 			pdev->resource[i].start <= range->address &&
2033 			pdev->resource[i].end > range->address) {
2034 				if ((range->address + range->size - 1) > pdev->resource[i].end) {
2035 					/* Add better message */
2036 					printk(KERN_WARNING "PCI/OF resource overlap !\n");
2037 					return NULL;
2038 				}
2039 				break;
2040 			}
2041 	}
2042 	if (i == DEVICE_COUNT_RESOURCE)
2043 		return NULL;
2044 	return &pdev->resource[i];
2045 }
2046 
2047 /*
2048  * Request an OF device resource. Currently handles child of PCI devices,
2049  * or other nodes attached to the root node. Ultimately, put some
2050  * link to resources in the OF node.
2051  */
2052 struct resource *request_OF_resource(struct device_node* node, int index,
2053 				     const char* name_postfix)
2054 {
2055 	struct pci_dev* pcidev;
2056 	u8 pci_bus, pci_devfn;
2057 	unsigned long iomask;
2058 	struct device_node* nd;
2059 	struct resource* parent;
2060 	struct resource *res = NULL;
2061 	int nlen, plen;
2062 
2063 	if (index >= node->n_addrs)
2064 		goto fail;
2065 
2066 	/* Sanity check on bus space */
2067 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2068 	if (iomask & IORESOURCE_MEM)
2069 		parent = &iomem_resource;
2070 	else if (iomask & IORESOURCE_IO)
2071 		parent = &ioport_resource;
2072 	else
2073 		goto fail;
2074 
2075 	/* Find a PCI parent if any */
2076 	nd = node;
2077 	pcidev = NULL;
2078 	while (nd) {
2079 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2080 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2081 		if (pcidev) break;
2082 		nd = nd->parent;
2083 	}
2084 	if (pcidev)
2085 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2086 	if (!parent) {
2087 		printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2088 			node->name);
2089 		goto fail;
2090 	}
2091 
2092 	res = __request_region(parent, node->addrs[index].address,
2093 			       node->addrs[index].size, NULL);
2094 	if (!res)
2095 		goto fail;
2096 	nlen = strlen(node->name);
2097 	plen = name_postfix ? strlen(name_postfix) : 0;
2098 	res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2099 	if (res->name) {
2100 		strcpy((char *)res->name, node->name);
2101 		if (plen)
2102 			strcpy((char *)res->name+nlen, name_postfix);
2103 	}
2104 	return res;
2105 fail:
2106 	return NULL;
2107 }
2108 EXPORT_SYMBOL(request_OF_resource);
2109 
2110 int release_OF_resource(struct device_node *node, int index)
2111 {
2112 	struct pci_dev* pcidev;
2113 	u8 pci_bus, pci_devfn;
2114 	unsigned long iomask, start, end;
2115 	struct device_node* nd;
2116 	struct resource* parent;
2117 	struct resource *res = NULL;
2118 
2119 	if (index >= node->n_addrs)
2120 		return -EINVAL;
2121 
2122 	/* Sanity check on bus space */
2123 	iomask = bus_space_to_resource_flags(node->addrs[index].space);
2124 	if (iomask & IORESOURCE_MEM)
2125 		parent = &iomem_resource;
2126 	else if (iomask & IORESOURCE_IO)
2127 		parent = &ioport_resource;
2128 	else
2129 		return -EINVAL;
2130 
2131 	/* Find a PCI parent if any */
2132 	nd = node;
2133 	pcidev = NULL;
2134 	while(nd) {
2135 		if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2136 			pcidev = pci_find_slot(pci_bus, pci_devfn);
2137 		if (pcidev) break;
2138 		nd = nd->parent;
2139 	}
2140 	if (pcidev)
2141 		parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2142 	if (!parent) {
2143 		printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2144 			node->name);
2145 		return -ENODEV;
2146 	}
2147 
2148 	/* Find us in the parent and its childs */
2149 	res = parent->child;
2150 	start = node->addrs[index].address;
2151 	end = start + node->addrs[index].size - 1;
2152 	while (res) {
2153 		if (res->start == start && res->end == end &&
2154 		    (res->flags & IORESOURCE_BUSY))
2155 		    	break;
2156 		if (res->start <= start && res->end >= end)
2157 			res = res->child;
2158 		else
2159 			res = res->sibling;
2160 	}
2161 	if (!res)
2162 		return -ENODEV;
2163 
2164 	if (res->name) {
2165 		kfree(res->name);
2166 		res->name = NULL;
2167 	}
2168 	release_resource(res);
2169 	kfree(res);
2170 
2171 	return 0;
2172 }
2173 EXPORT_SYMBOL(release_OF_resource);
2174 #endif /* CONFIG_PCI */
2175