xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision 458148c0)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 #include <linux/kexec.h>
33 #include <linux/debugfs.h>
34 
35 #include <asm/prom.h>
36 #include <asm/rtas.h>
37 #include <asm/lmb.h>
38 #include <asm/page.h>
39 #include <asm/processor.h>
40 #include <asm/irq.h>
41 #include <asm/io.h>
42 #include <asm/kdump.h>
43 #include <asm/smp.h>
44 #include <asm/system.h>
45 #include <asm/mmu.h>
46 #include <asm/pgtable.h>
47 #include <asm/pci.h>
48 #include <asm/iommu.h>
49 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/machdep.h>
52 #include <asm/pSeries_reconfig.h>
53 #include <asm/pci-bridge.h>
54 #include <asm/kexec.h>
55 
56 #ifdef DEBUG
57 #define DBG(fmt...) printk(KERN_ERR fmt)
58 #else
59 #define DBG(fmt...)
60 #endif
61 
62 
63 static int __initdata dt_root_addr_cells;
64 static int __initdata dt_root_size_cells;
65 
66 #ifdef CONFIG_PPC64
67 int __initdata iommu_is_off;
68 int __initdata iommu_force_on;
69 unsigned long tce_alloc_start, tce_alloc_end;
70 #endif
71 
72 typedef u32 cell_t;
73 
74 #if 0
75 static struct boot_param_header *initial_boot_params __initdata;
76 #else
77 struct boot_param_header *initial_boot_params;
78 #endif
79 
80 static struct device_node *allnodes = NULL;
81 
82 /* use when traversing tree through the allnext, child, sibling,
83  * or parent members of struct device_node.
84  */
85 static DEFINE_RWLOCK(devtree_lock);
86 
87 /* export that to outside world */
88 struct device_node *of_chosen;
89 
90 struct device_node *dflt_interrupt_controller;
91 int num_interrupt_controllers;
92 
93 /*
94  * Wrapper for allocating memory for various data that needs to be
95  * attached to device nodes as they are processed at boot or when
96  * added to the device tree later (e.g. DLPAR).  At boot there is
97  * already a region reserved so we just increment *mem_start by size;
98  * otherwise we call kmalloc.
99  */
100 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
101 {
102 	unsigned long tmp;
103 
104 	if (!mem_start)
105 		return kmalloc(size, GFP_KERNEL);
106 
107 	tmp = *mem_start;
108 	*mem_start += size;
109 	return (void *)tmp;
110 }
111 
112 /*
113  * Find the device_node with a given phandle.
114  */
115 static struct device_node * find_phandle(phandle ph)
116 {
117 	struct device_node *np;
118 
119 	for (np = allnodes; np != 0; np = np->allnext)
120 		if (np->linux_phandle == ph)
121 			return np;
122 	return NULL;
123 }
124 
125 /*
126  * Find the interrupt parent of a node.
127  */
128 static struct device_node * __devinit intr_parent(struct device_node *p)
129 {
130 	phandle *parp;
131 
132 	parp = (phandle *) get_property(p, "interrupt-parent", NULL);
133 	if (parp == NULL)
134 		return p->parent;
135 	p = find_phandle(*parp);
136 	if (p != NULL)
137 		return p;
138 	/*
139 	 * On a powermac booted with BootX, we don't get to know the
140 	 * phandles for any nodes, so find_phandle will return NULL.
141 	 * Fortunately these machines only have one interrupt controller
142 	 * so there isn't in fact any ambiguity.  -- paulus
143 	 */
144 	if (num_interrupt_controllers == 1)
145 		p = dflt_interrupt_controller;
146 	return p;
147 }
148 
149 /*
150  * Find out the size of each entry of the interrupts property
151  * for a node.
152  */
153 int __devinit prom_n_intr_cells(struct device_node *np)
154 {
155 	struct device_node *p;
156 	unsigned int *icp;
157 
158 	for (p = np; (p = intr_parent(p)) != NULL; ) {
159 		icp = (unsigned int *)
160 			get_property(p, "#interrupt-cells", NULL);
161 		if (icp != NULL)
162 			return *icp;
163 		if (get_property(p, "interrupt-controller", NULL) != NULL
164 		    || get_property(p, "interrupt-map", NULL) != NULL) {
165 			printk("oops, node %s doesn't have #interrupt-cells\n",
166 			       p->full_name);
167 			return 1;
168 		}
169 	}
170 #ifdef DEBUG_IRQ
171 	printk("prom_n_intr_cells failed for %s\n", np->full_name);
172 #endif
173 	return 1;
174 }
175 
176 /*
177  * Map an interrupt from a device up to the platform interrupt
178  * descriptor.
179  */
180 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
181 				   struct device_node *np, unsigned int *ints,
182 				   int nintrc)
183 {
184 	struct device_node *p, *ipar;
185 	unsigned int *imap, *imask, *ip;
186 	int i, imaplen, match;
187 	int newintrc = 0, newaddrc = 0;
188 	unsigned int *reg;
189 	int naddrc;
190 
191 	reg = (unsigned int *) get_property(np, "reg", NULL);
192 	naddrc = prom_n_addr_cells(np);
193 	p = intr_parent(np);
194 	while (p != NULL) {
195 		if (get_property(p, "interrupt-controller", NULL) != NULL)
196 			/* this node is an interrupt controller, stop here */
197 			break;
198 		imap = (unsigned int *)
199 			get_property(p, "interrupt-map", &imaplen);
200 		if (imap == NULL) {
201 			p = intr_parent(p);
202 			continue;
203 		}
204 		imask = (unsigned int *)
205 			get_property(p, "interrupt-map-mask", NULL);
206 		if (imask == NULL) {
207 			printk("oops, %s has interrupt-map but no mask\n",
208 			       p->full_name);
209 			return 0;
210 		}
211 		imaplen /= sizeof(unsigned int);
212 		match = 0;
213 		ipar = NULL;
214 		while (imaplen > 0 && !match) {
215 			/* check the child-interrupt field */
216 			match = 1;
217 			for (i = 0; i < naddrc && match; ++i)
218 				match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
219 			for (; i < naddrc + nintrc && match; ++i)
220 				match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
221 			imap += naddrc + nintrc;
222 			imaplen -= naddrc + nintrc;
223 			/* grab the interrupt parent */
224 			ipar = find_phandle((phandle) *imap++);
225 			--imaplen;
226 			if (ipar == NULL && num_interrupt_controllers == 1)
227 				/* cope with BootX not giving us phandles */
228 				ipar = dflt_interrupt_controller;
229 			if (ipar == NULL) {
230 				printk("oops, no int parent %x in map of %s\n",
231 				       imap[-1], p->full_name);
232 				return 0;
233 			}
234 			/* find the parent's # addr and intr cells */
235 			ip = (unsigned int *)
236 				get_property(ipar, "#interrupt-cells", NULL);
237 			if (ip == NULL) {
238 				printk("oops, no #interrupt-cells on %s\n",
239 				       ipar->full_name);
240 				return 0;
241 			}
242 			newintrc = *ip;
243 			ip = (unsigned int *)
244 				get_property(ipar, "#address-cells", NULL);
245 			newaddrc = (ip == NULL)? 0: *ip;
246 			imap += newaddrc + newintrc;
247 			imaplen -= newaddrc + newintrc;
248 		}
249 		if (imaplen < 0) {
250 			printk("oops, error decoding int-map on %s, len=%d\n",
251 			       p->full_name, imaplen);
252 			return 0;
253 		}
254 		if (!match) {
255 #ifdef DEBUG_IRQ
256 			printk("oops, no match in %s int-map for %s\n",
257 			       p->full_name, np->full_name);
258 #endif
259 			return 0;
260 		}
261 		p = ipar;
262 		naddrc = newaddrc;
263 		nintrc = newintrc;
264 		ints = imap - nintrc;
265 		reg = ints - naddrc;
266 	}
267 	if (p == NULL) {
268 #ifdef DEBUG_IRQ
269 		printk("hmmm, int tree for %s doesn't have ctrler\n",
270 		       np->full_name);
271 #endif
272 		return 0;
273 	}
274 	*irq = ints;
275 	*ictrler = p;
276 	return nintrc;
277 }
278 
279 static unsigned char map_isa_senses[4] = {
280 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
281 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
282 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
283 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE
284 };
285 
286 static unsigned char map_mpic_senses[4] = {
287 	IRQ_SENSE_EDGE  | IRQ_POLARITY_POSITIVE,
288 	IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
289 	/* 2 seems to be used for the 8259 cascade... */
290 	IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
291 	IRQ_SENSE_EDGE  | IRQ_POLARITY_NEGATIVE,
292 };
293 
294 static int __devinit finish_node_interrupts(struct device_node *np,
295 					    unsigned long *mem_start,
296 					    int measure_only)
297 {
298 	unsigned int *ints;
299 	int intlen, intrcells, intrcount;
300 	int i, j, n, sense;
301 	unsigned int *irq, virq;
302 	struct device_node *ic;
303 	int trace = 0;
304 
305 	//#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
306 #define TRACE(fmt...)
307 
308 	if (!strcmp(np->name, "smu-doorbell"))
309 		trace = 1;
310 
311 	TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
312 	      num_interrupt_controllers);
313 
314 	if (num_interrupt_controllers == 0) {
315 		/*
316 		 * Old machines just have a list of interrupt numbers
317 		 * and no interrupt-controller nodes.
318 		 */
319 		ints = (unsigned int *) get_property(np, "AAPL,interrupts",
320 						     &intlen);
321 		/* XXX old interpret_pci_props looked in parent too */
322 		/* XXX old interpret_macio_props looked for interrupts
323 		   before AAPL,interrupts */
324 		if (ints == NULL)
325 			ints = (unsigned int *) get_property(np, "interrupts",
326 							     &intlen);
327 		if (ints == NULL)
328 			return 0;
329 
330 		np->n_intrs = intlen / sizeof(unsigned int);
331 		np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
332 				       mem_start);
333 		if (!np->intrs)
334 			return -ENOMEM;
335 		if (measure_only)
336 			return 0;
337 
338 		for (i = 0; i < np->n_intrs; ++i) {
339 			np->intrs[i].line = *ints++;
340 			np->intrs[i].sense = IRQ_SENSE_LEVEL
341 				| IRQ_POLARITY_NEGATIVE;
342 		}
343 		return 0;
344 	}
345 
346 	ints = (unsigned int *) get_property(np, "interrupts", &intlen);
347 	TRACE("ints=%p, intlen=%d\n", ints, intlen);
348 	if (ints == NULL)
349 		return 0;
350 	intrcells = prom_n_intr_cells(np);
351 	intlen /= intrcells * sizeof(unsigned int);
352 	TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
353 	np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
354 	if (!np->intrs)
355 		return -ENOMEM;
356 
357 	if (measure_only)
358 		return 0;
359 
360 	intrcount = 0;
361 	for (i = 0; i < intlen; ++i, ints += intrcells) {
362 		n = map_interrupt(&irq, &ic, np, ints, intrcells);
363 		TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
364 		if (n <= 0)
365 			continue;
366 
367 		/* don't map IRQ numbers under a cascaded 8259 controller */
368 		if (ic && device_is_compatible(ic, "chrp,iic")) {
369 			np->intrs[intrcount].line = irq[0];
370 			sense = (n > 1)? (irq[1] & 3): 3;
371 			np->intrs[intrcount].sense = map_isa_senses[sense];
372 		} else {
373 			virq = virt_irq_create_mapping(irq[0]);
374 			TRACE("virq=%d\n", virq);
375 #ifdef CONFIG_PPC64
376 			if (virq == NO_IRQ) {
377 				printk(KERN_CRIT "Could not allocate interrupt"
378 				       " number for %s\n", np->full_name);
379 				continue;
380 			}
381 #endif
382 			np->intrs[intrcount].line = irq_offset_up(virq);
383 			sense = (n > 1)? (irq[1] & 3): 1;
384 
385 			/* Apple uses bits in there in a different way, let's
386 			 * only keep the real sense bit on macs
387 			 */
388 			if (machine_is(powermac))
389 				sense &= 0x1;
390 			np->intrs[intrcount].sense = map_mpic_senses[sense];
391 		}
392 
393 #ifdef CONFIG_PPC64
394 		/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
395 		if (machine_is(powermac) && ic && ic->parent) {
396 			char *name = get_property(ic->parent, "name", NULL);
397 			if (name && !strcmp(name, "u3"))
398 				np->intrs[intrcount].line += 128;
399 			else if (!(name && (!strcmp(name, "mac-io") ||
400 					    !strcmp(name, "u4"))))
401 				/* ignore other cascaded controllers, such as
402 				   the k2-sata-root */
403 				break;
404 		}
405 #endif /* CONFIG_PPC64 */
406 		if (n > 2) {
407 			printk("hmmm, got %d intr cells for %s:", n,
408 			       np->full_name);
409 			for (j = 0; j < n; ++j)
410 				printk(" %d", irq[j]);
411 			printk("\n");
412 		}
413 		++intrcount;
414 	}
415 	np->n_intrs = intrcount;
416 
417 	return 0;
418 }
419 
420 static int __devinit finish_node(struct device_node *np,
421 				 unsigned long *mem_start,
422 				 int measure_only)
423 {
424 	struct device_node *child;
425 	int rc = 0;
426 
427 	rc = finish_node_interrupts(np, mem_start, measure_only);
428 	if (rc)
429 		goto out;
430 
431 	for (child = np->child; child != NULL; child = child->sibling) {
432 		rc = finish_node(child, mem_start, measure_only);
433 		if (rc)
434 			goto out;
435 	}
436 out:
437 	return rc;
438 }
439 
440 static void __init scan_interrupt_controllers(void)
441 {
442 	struct device_node *np;
443 	int n = 0;
444 	char *name, *ic;
445 	int iclen;
446 
447 	for (np = allnodes; np != NULL; np = np->allnext) {
448 		ic = get_property(np, "interrupt-controller", &iclen);
449 		name = get_property(np, "name", NULL);
450 		/* checking iclen makes sure we don't get a false
451 		   match on /chosen.interrupt_controller */
452 		if ((name != NULL
453 		     && strcmp(name, "interrupt-controller") == 0)
454 		    || (ic != NULL && iclen == 0
455 			&& strcmp(name, "AppleKiwi"))) {
456 			if (n == 0)
457 				dflt_interrupt_controller = np;
458 			++n;
459 		}
460 	}
461 	num_interrupt_controllers = n;
462 }
463 
464 /**
465  * finish_device_tree is called once things are running normally
466  * (i.e. with text and data mapped to the address they were linked at).
467  * It traverses the device tree and fills in some of the additional,
468  * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
469  * mapping is also initialized at this point.
470  */
471 void __init finish_device_tree(void)
472 {
473 	unsigned long start, end, size = 0;
474 
475 	DBG(" -> finish_device_tree\n");
476 
477 #ifdef CONFIG_PPC64
478 	/* Initialize virtual IRQ map */
479 	virt_irq_init();
480 #endif
481 	scan_interrupt_controllers();
482 
483 	/*
484 	 * Finish device-tree (pre-parsing some properties etc...)
485 	 * We do this in 2 passes. One with "measure_only" set, which
486 	 * will only measure the amount of memory needed, then we can
487 	 * allocate that memory, and call finish_node again. However,
488 	 * we must be careful as most routines will fail nowadays when
489 	 * prom_alloc() returns 0, so we must make sure our first pass
490 	 * doesn't start at 0. We pre-initialize size to 16 for that
491 	 * reason and then remove those additional 16 bytes
492 	 */
493 	size = 16;
494 	finish_node(allnodes, &size, 1);
495 	size -= 16;
496 
497 	if (0 == size)
498 		end = start = 0;
499 	else
500 		end = start = (unsigned long)__va(lmb_alloc(size, 128));
501 
502 	finish_node(allnodes, &end, 0);
503 	BUG_ON(end != start + size);
504 
505 	DBG(" <- finish_device_tree\n");
506 }
507 
508 static inline char *find_flat_dt_string(u32 offset)
509 {
510 	return ((char *)initial_boot_params) +
511 		initial_boot_params->off_dt_strings + offset;
512 }
513 
514 /**
515  * This function is used to scan the flattened device-tree, it is
516  * used to extract the memory informations at boot before we can
517  * unflatten the tree
518  */
519 int __init of_scan_flat_dt(int (*it)(unsigned long node,
520 				     const char *uname, int depth,
521 				     void *data),
522 			   void *data)
523 {
524 	unsigned long p = ((unsigned long)initial_boot_params) +
525 		initial_boot_params->off_dt_struct;
526 	int rc = 0;
527 	int depth = -1;
528 
529 	do {
530 		u32 tag = *((u32 *)p);
531 		char *pathp;
532 
533 		p += 4;
534 		if (tag == OF_DT_END_NODE) {
535 			depth --;
536 			continue;
537 		}
538 		if (tag == OF_DT_NOP)
539 			continue;
540 		if (tag == OF_DT_END)
541 			break;
542 		if (tag == OF_DT_PROP) {
543 			u32 sz = *((u32 *)p);
544 			p += 8;
545 			if (initial_boot_params->version < 0x10)
546 				p = _ALIGN(p, sz >= 8 ? 8 : 4);
547 			p += sz;
548 			p = _ALIGN(p, 4);
549 			continue;
550 		}
551 		if (tag != OF_DT_BEGIN_NODE) {
552 			printk(KERN_WARNING "Invalid tag %x scanning flattened"
553 			       " device tree !\n", tag);
554 			return -EINVAL;
555 		}
556 		depth++;
557 		pathp = (char *)p;
558 		p = _ALIGN(p + strlen(pathp) + 1, 4);
559 		if ((*pathp) == '/') {
560 			char *lp, *np;
561 			for (lp = NULL, np = pathp; *np; np++)
562 				if ((*np) == '/')
563 					lp = np+1;
564 			if (lp != NULL)
565 				pathp = lp;
566 		}
567 		rc = it(p, pathp, depth, data);
568 		if (rc != 0)
569 			break;
570 	} while(1);
571 
572 	return rc;
573 }
574 
575 unsigned long __init of_get_flat_dt_root(void)
576 {
577 	unsigned long p = ((unsigned long)initial_boot_params) +
578 		initial_boot_params->off_dt_struct;
579 
580 	while(*((u32 *)p) == OF_DT_NOP)
581 		p += 4;
582 	BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
583 	p += 4;
584 	return _ALIGN(p + strlen((char *)p) + 1, 4);
585 }
586 
587 /**
588  * This  function can be used within scan_flattened_dt callback to get
589  * access to properties
590  */
591 void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
592 				 unsigned long *size)
593 {
594 	unsigned long p = node;
595 
596 	do {
597 		u32 tag = *((u32 *)p);
598 		u32 sz, noff;
599 		const char *nstr;
600 
601 		p += 4;
602 		if (tag == OF_DT_NOP)
603 			continue;
604 		if (tag != OF_DT_PROP)
605 			return NULL;
606 
607 		sz = *((u32 *)p);
608 		noff = *((u32 *)(p + 4));
609 		p += 8;
610 		if (initial_boot_params->version < 0x10)
611 			p = _ALIGN(p, sz >= 8 ? 8 : 4);
612 
613 		nstr = find_flat_dt_string(noff);
614 		if (nstr == NULL) {
615 			printk(KERN_WARNING "Can't find property index"
616 			       " name !\n");
617 			return NULL;
618 		}
619 		if (strcmp(name, nstr) == 0) {
620 			if (size)
621 				*size = sz;
622 			return (void *)p;
623 		}
624 		p += sz;
625 		p = _ALIGN(p, 4);
626 	} while(1);
627 }
628 
629 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
630 {
631 	const char* cp;
632 	unsigned long cplen, l;
633 
634 	cp = of_get_flat_dt_prop(node, "compatible", &cplen);
635 	if (cp == NULL)
636 		return 0;
637 	while (cplen > 0) {
638 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
639 			return 1;
640 		l = strlen(cp) + 1;
641 		cp += l;
642 		cplen -= l;
643 	}
644 
645 	return 0;
646 }
647 
648 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
649 				       unsigned long align)
650 {
651 	void *res;
652 
653 	*mem = _ALIGN(*mem, align);
654 	res = (void *)*mem;
655 	*mem += size;
656 
657 	return res;
658 }
659 
660 static unsigned long __init unflatten_dt_node(unsigned long mem,
661 					      unsigned long *p,
662 					      struct device_node *dad,
663 					      struct device_node ***allnextpp,
664 					      unsigned long fpsize)
665 {
666 	struct device_node *np;
667 	struct property *pp, **prev_pp = NULL;
668 	char *pathp;
669 	u32 tag;
670 	unsigned int l, allocl;
671 	int has_name = 0;
672 	int new_format = 0;
673 
674 	tag = *((u32 *)(*p));
675 	if (tag != OF_DT_BEGIN_NODE) {
676 		printk("Weird tag at start of node: %x\n", tag);
677 		return mem;
678 	}
679 	*p += 4;
680 	pathp = (char *)*p;
681 	l = allocl = strlen(pathp) + 1;
682 	*p = _ALIGN(*p + l, 4);
683 
684 	/* version 0x10 has a more compact unit name here instead of the full
685 	 * path. we accumulate the full path size using "fpsize", we'll rebuild
686 	 * it later. We detect this because the first character of the name is
687 	 * not '/'.
688 	 */
689 	if ((*pathp) != '/') {
690 		new_format = 1;
691 		if (fpsize == 0) {
692 			/* root node: special case. fpsize accounts for path
693 			 * plus terminating zero. root node only has '/', so
694 			 * fpsize should be 2, but we want to avoid the first
695 			 * level nodes to have two '/' so we use fpsize 1 here
696 			 */
697 			fpsize = 1;
698 			allocl = 2;
699 		} else {
700 			/* account for '/' and path size minus terminal 0
701 			 * already in 'l'
702 			 */
703 			fpsize += l;
704 			allocl = fpsize;
705 		}
706 	}
707 
708 
709 	np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
710 				__alignof__(struct device_node));
711 	if (allnextpp) {
712 		memset(np, 0, sizeof(*np));
713 		np->full_name = ((char*)np) + sizeof(struct device_node);
714 		if (new_format) {
715 			char *p = np->full_name;
716 			/* rebuild full path for new format */
717 			if (dad && dad->parent) {
718 				strcpy(p, dad->full_name);
719 #ifdef DEBUG
720 				if ((strlen(p) + l + 1) != allocl) {
721 					DBG("%s: p: %d, l: %d, a: %d\n",
722 					    pathp, (int)strlen(p), l, allocl);
723 				}
724 #endif
725 				p += strlen(p);
726 			}
727 			*(p++) = '/';
728 			memcpy(p, pathp, l);
729 		} else
730 			memcpy(np->full_name, pathp, l);
731 		prev_pp = &np->properties;
732 		**allnextpp = np;
733 		*allnextpp = &np->allnext;
734 		if (dad != NULL) {
735 			np->parent = dad;
736 			/* we temporarily use the next field as `last_child'*/
737 			if (dad->next == 0)
738 				dad->child = np;
739 			else
740 				dad->next->sibling = np;
741 			dad->next = np;
742 		}
743 		kref_init(&np->kref);
744 	}
745 	while(1) {
746 		u32 sz, noff;
747 		char *pname;
748 
749 		tag = *((u32 *)(*p));
750 		if (tag == OF_DT_NOP) {
751 			*p += 4;
752 			continue;
753 		}
754 		if (tag != OF_DT_PROP)
755 			break;
756 		*p += 4;
757 		sz = *((u32 *)(*p));
758 		noff = *((u32 *)((*p) + 4));
759 		*p += 8;
760 		if (initial_boot_params->version < 0x10)
761 			*p = _ALIGN(*p, sz >= 8 ? 8 : 4);
762 
763 		pname = find_flat_dt_string(noff);
764 		if (pname == NULL) {
765 			printk("Can't find property name in list !\n");
766 			break;
767 		}
768 		if (strcmp(pname, "name") == 0)
769 			has_name = 1;
770 		l = strlen(pname) + 1;
771 		pp = unflatten_dt_alloc(&mem, sizeof(struct property),
772 					__alignof__(struct property));
773 		if (allnextpp) {
774 			if (strcmp(pname, "linux,phandle") == 0) {
775 				np->node = *((u32 *)*p);
776 				if (np->linux_phandle == 0)
777 					np->linux_phandle = np->node;
778 			}
779 			if (strcmp(pname, "ibm,phandle") == 0)
780 				np->linux_phandle = *((u32 *)*p);
781 			pp->name = pname;
782 			pp->length = sz;
783 			pp->value = (void *)*p;
784 			*prev_pp = pp;
785 			prev_pp = &pp->next;
786 		}
787 		*p = _ALIGN((*p) + sz, 4);
788 	}
789 	/* with version 0x10 we may not have the name property, recreate
790 	 * it here from the unit name if absent
791 	 */
792 	if (!has_name) {
793 		char *p = pathp, *ps = pathp, *pa = NULL;
794 		int sz;
795 
796 		while (*p) {
797 			if ((*p) == '@')
798 				pa = p;
799 			if ((*p) == '/')
800 				ps = p + 1;
801 			p++;
802 		}
803 		if (pa < ps)
804 			pa = p;
805 		sz = (pa - ps) + 1;
806 		pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
807 					__alignof__(struct property));
808 		if (allnextpp) {
809 			pp->name = "name";
810 			pp->length = sz;
811 			pp->value = (unsigned char *)(pp + 1);
812 			*prev_pp = pp;
813 			prev_pp = &pp->next;
814 			memcpy(pp->value, ps, sz - 1);
815 			((char *)pp->value)[sz - 1] = 0;
816 			DBG("fixed up name for %s -> %s\n", pathp, pp->value);
817 		}
818 	}
819 	if (allnextpp) {
820 		*prev_pp = NULL;
821 		np->name = get_property(np, "name", NULL);
822 		np->type = get_property(np, "device_type", NULL);
823 
824 		if (!np->name)
825 			np->name = "<NULL>";
826 		if (!np->type)
827 			np->type = "<NULL>";
828 	}
829 	while (tag == OF_DT_BEGIN_NODE) {
830 		mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
831 		tag = *((u32 *)(*p));
832 	}
833 	if (tag != OF_DT_END_NODE) {
834 		printk("Weird tag at end of node: %x\n", tag);
835 		return mem;
836 	}
837 	*p += 4;
838 	return mem;
839 }
840 
841 static int __init early_parse_mem(char *p)
842 {
843 	if (!p)
844 		return 1;
845 
846 	memory_limit = PAGE_ALIGN(memparse(p, &p));
847 	DBG("memory limit = 0x%lx\n", memory_limit);
848 
849 	return 0;
850 }
851 early_param("mem", early_parse_mem);
852 
853 /*
854  * The device tree may be allocated below our memory limit, or inside the
855  * crash kernel region for kdump. If so, move it out now.
856  */
857 static void move_device_tree(void)
858 {
859 	unsigned long start, size;
860 	void *p;
861 
862 	DBG("-> move_device_tree\n");
863 
864 	start = __pa(initial_boot_params);
865 	size = initial_boot_params->totalsize;
866 
867 	if ((memory_limit && (start + size) > memory_limit) ||
868 			overlaps_crashkernel(start, size)) {
869 		p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
870 		memcpy(p, initial_boot_params, size);
871 		initial_boot_params = (struct boot_param_header *)p;
872 		DBG("Moved device tree to 0x%p\n", p);
873 	}
874 
875 	DBG("<- move_device_tree\n");
876 }
877 
878 /**
879  * unflattens the device-tree passed by the firmware, creating the
880  * tree of struct device_node. It also fills the "name" and "type"
881  * pointers of the nodes so the normal device-tree walking functions
882  * can be used (this used to be done by finish_device_tree)
883  */
884 void __init unflatten_device_tree(void)
885 {
886 	unsigned long start, mem, size;
887 	struct device_node **allnextp = &allnodes;
888 
889 	DBG(" -> unflatten_device_tree()\n");
890 
891 	/* First pass, scan for size */
892 	start = ((unsigned long)initial_boot_params) +
893 		initial_boot_params->off_dt_struct;
894 	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
895 	size = (size | 3) + 1;
896 
897 	DBG("  size is %lx, allocating...\n", size);
898 
899 	/* Allocate memory for the expanded device tree */
900 	mem = lmb_alloc(size + 4, __alignof__(struct device_node));
901 	mem = (unsigned long) __va(mem);
902 
903 	((u32 *)mem)[size / 4] = 0xdeadbeef;
904 
905 	DBG("  unflattening %lx...\n", mem);
906 
907 	/* Second pass, do actual unflattening */
908 	start = ((unsigned long)initial_boot_params) +
909 		initial_boot_params->off_dt_struct;
910 	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
911 	if (*((u32 *)start) != OF_DT_END)
912 		printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
913 	if (((u32 *)mem)[size / 4] != 0xdeadbeef)
914 		printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
915 		       ((u32 *)mem)[size / 4] );
916 	*allnextp = NULL;
917 
918 	/* Get pointer to OF "/chosen" node for use everywhere */
919 	of_chosen = of_find_node_by_path("/chosen");
920 	if (of_chosen == NULL)
921 		of_chosen = of_find_node_by_path("/chosen@0");
922 
923 	DBG(" <- unflatten_device_tree()\n");
924 }
925 
926 /*
927  * ibm,pa-features is a per-cpu property that contains a string of
928  * attribute descriptors, each of which has a 2 byte header plus up
929  * to 254 bytes worth of processor attribute bits.  First header
930  * byte specifies the number of bytes following the header.
931  * Second header byte is an "attribute-specifier" type, of which
932  * zero is the only currently-defined value.
933  * Implementation:  Pass in the byte and bit offset for the feature
934  * that we are interested in.  The function will return -1 if the
935  * pa-features property is missing, or a 1/0 to indicate if the feature
936  * is supported/not supported.  Note that the bit numbers are
937  * big-endian to match the definition in PAPR.
938  */
939 static struct ibm_pa_feature {
940 	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
941 	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
942 	unsigned char	pabyte;		/* byte number in ibm,pa-features */
943 	unsigned char	pabit;		/* bit number (big-endian) */
944 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
945 } ibm_pa_features[] __initdata = {
946 	{0, PPC_FEATURE_HAS_MMU,	0, 0, 0},
947 	{0, PPC_FEATURE_HAS_FPU,	0, 1, 0},
948 	{CPU_FTR_SLB, 0,		0, 2, 0},
949 	{CPU_FTR_CTRL, 0,		0, 3, 0},
950 	{CPU_FTR_NOEXECUTE, 0,		0, 6, 0},
951 	{CPU_FTR_NODSISRALIGN, 0,	1, 1, 1},
952 #if 0
953 	/* put this back once we know how to test if firmware does 64k IO */
954 	{CPU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
955 #endif
956 };
957 
958 static void __init check_cpu_pa_features(unsigned long node)
959 {
960 	unsigned char *pa_ftrs;
961 	unsigned long len, tablelen, i, bit;
962 
963 	pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
964 	if (pa_ftrs == NULL)
965 		return;
966 
967 	/* find descriptor with type == 0 */
968 	for (;;) {
969 		if (tablelen < 3)
970 			return;
971 		len = 2 + pa_ftrs[0];
972 		if (tablelen < len)
973 			return;		/* descriptor 0 not found */
974 		if (pa_ftrs[1] == 0)
975 			break;
976 		tablelen -= len;
977 		pa_ftrs += len;
978 	}
979 
980 	/* loop over bits we know about */
981 	for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
982 		struct ibm_pa_feature *fp = &ibm_pa_features[i];
983 
984 		if (fp->pabyte >= pa_ftrs[0])
985 			continue;
986 		bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
987 		if (bit ^ fp->invert) {
988 			cur_cpu_spec->cpu_features |= fp->cpu_features;
989 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
990 		} else {
991 			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
992 			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
993 		}
994 	}
995 }
996 
997 static int __init early_init_dt_scan_cpus(unsigned long node,
998 					  const char *uname, int depth,
999 					  void *data)
1000 {
1001 	static int logical_cpuid = 0;
1002 	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1003 #ifdef CONFIG_ALTIVEC
1004 	u32 *prop;
1005 #endif
1006 	u32 *intserv;
1007 	int i, nthreads;
1008 	unsigned long len;
1009 	int found = 0;
1010 
1011 	/* We are scanning "cpu" nodes only */
1012 	if (type == NULL || strcmp(type, "cpu") != 0)
1013 		return 0;
1014 
1015 	/* Get physical cpuid */
1016 	intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
1017 	if (intserv) {
1018 		nthreads = len / sizeof(int);
1019 	} else {
1020 		intserv = of_get_flat_dt_prop(node, "reg", NULL);
1021 		nthreads = 1;
1022 	}
1023 
1024 	/*
1025 	 * Now see if any of these threads match our boot cpu.
1026 	 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
1027 	 */
1028 	for (i = 0; i < nthreads; i++) {
1029 		/*
1030 		 * version 2 of the kexec param format adds the phys cpuid of
1031 		 * booted proc.
1032 		 */
1033 		if (initial_boot_params && initial_boot_params->version >= 2) {
1034 			if (intserv[i] ==
1035 					initial_boot_params->boot_cpuid_phys) {
1036 				found = 1;
1037 				break;
1038 			}
1039 		} else {
1040 			/*
1041 			 * Check if it's the boot-cpu, set it's hw index now,
1042 			 * unfortunately this format did not support booting
1043 			 * off secondary threads.
1044 			 */
1045 			if (of_get_flat_dt_prop(node,
1046 					"linux,boot-cpu", NULL) != NULL) {
1047 				found = 1;
1048 				break;
1049 			}
1050 		}
1051 
1052 #ifdef CONFIG_SMP
1053 		/* logical cpu id is always 0 on UP kernels */
1054 		logical_cpuid++;
1055 #endif
1056 	}
1057 
1058 	if (found) {
1059 		DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
1060 			intserv[i]);
1061 		boot_cpuid = logical_cpuid;
1062 		set_hard_smp_processor_id(boot_cpuid, intserv[i]);
1063 	}
1064 
1065 #ifdef CONFIG_ALTIVEC
1066 	/* Check if we have a VMX and eventually update CPU features */
1067 	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1068 	if (prop && (*prop) > 0) {
1069 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1070 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1071 	}
1072 
1073 	/* Same goes for Apple's "altivec" property */
1074 	prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1075 	if (prop) {
1076 		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1077 		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1078 	}
1079 #endif /* CONFIG_ALTIVEC */
1080 
1081 	check_cpu_pa_features(node);
1082 
1083 #ifdef CONFIG_PPC_PSERIES
1084 	if (nthreads > 1)
1085 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1086 	else
1087 		cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1088 #endif
1089 
1090 	return 0;
1091 }
1092 
1093 static int __init early_init_dt_scan_chosen(unsigned long node,
1094 					    const char *uname, int depth, void *data)
1095 {
1096 	unsigned long *lprop;
1097 	unsigned long l;
1098 	char *p;
1099 
1100 	DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1101 
1102 	if (depth != 1 ||
1103 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1104 		return 0;
1105 
1106 #ifdef CONFIG_PPC64
1107 	/* check if iommu is forced on or off */
1108 	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1109 		iommu_is_off = 1;
1110 	if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1111 		iommu_force_on = 1;
1112 #endif
1113 
1114 	/* mem=x on the command line is the preferred mechanism */
1115  	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1116  	if (lprop)
1117  		memory_limit = *lprop;
1118 
1119 #ifdef CONFIG_PPC64
1120  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1121  	if (lprop)
1122  		tce_alloc_start = *lprop;
1123  	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1124  	if (lprop)
1125  		tce_alloc_end = *lprop;
1126 #endif
1127 
1128 #ifdef CONFIG_KEXEC
1129        lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
1130        if (lprop)
1131                crashk_res.start = *lprop;
1132 
1133        lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
1134        if (lprop)
1135                crashk_res.end = crashk_res.start + *lprop - 1;
1136 #endif
1137 
1138 	/* Retreive command line */
1139  	p = of_get_flat_dt_prop(node, "bootargs", &l);
1140 	if (p != NULL && l > 0)
1141 		strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
1142 
1143 #ifdef CONFIG_CMDLINE
1144 	if (l == 0 || (l == 1 && (*p) == 0))
1145 		strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1146 #endif /* CONFIG_CMDLINE */
1147 
1148 	DBG("Command line is: %s\n", cmd_line);
1149 
1150 	/* break now */
1151 	return 1;
1152 }
1153 
1154 static int __init early_init_dt_scan_root(unsigned long node,
1155 					  const char *uname, int depth, void *data)
1156 {
1157 	u32 *prop;
1158 
1159 	if (depth != 0)
1160 		return 0;
1161 
1162 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1163 	dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1164 	DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1165 
1166 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1167 	dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1168 	DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1169 
1170 	/* break now */
1171 	return 1;
1172 }
1173 
1174 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1175 {
1176 	cell_t *p = *cellp;
1177 	unsigned long r;
1178 
1179 	/* Ignore more than 2 cells */
1180 	while (s > sizeof(unsigned long) / 4) {
1181 		p++;
1182 		s--;
1183 	}
1184 	r = *p++;
1185 #ifdef CONFIG_PPC64
1186 	if (s > 1) {
1187 		r <<= 32;
1188 		r |= *(p++);
1189 		s--;
1190 	}
1191 #endif
1192 
1193 	*cellp = p;
1194 	return r;
1195 }
1196 
1197 
1198 static int __init early_init_dt_scan_memory(unsigned long node,
1199 					    const char *uname, int depth, void *data)
1200 {
1201 	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1202 	cell_t *reg, *endp;
1203 	unsigned long l;
1204 
1205 	/* We are scanning "memory" nodes only */
1206 	if (type == NULL) {
1207 		/*
1208 		 * The longtrail doesn't have a device_type on the
1209 		 * /memory node, so look for the node called /memory@0.
1210 		 */
1211 		if (depth != 1 || strcmp(uname, "memory@0") != 0)
1212 			return 0;
1213 	} else if (strcmp(type, "memory") != 0)
1214 		return 0;
1215 
1216 	reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1217 	if (reg == NULL)
1218 		reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1219 	if (reg == NULL)
1220 		return 0;
1221 
1222 	endp = reg + (l / sizeof(cell_t));
1223 
1224 	DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1225 	    uname, l, reg[0], reg[1], reg[2], reg[3]);
1226 
1227 	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1228 		unsigned long base, size;
1229 
1230 		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1231 		size = dt_mem_next_cell(dt_root_size_cells, &reg);
1232 
1233 		if (size == 0)
1234 			continue;
1235 		DBG(" - %lx ,  %lx\n", base, size);
1236 #ifdef CONFIG_PPC64
1237 		if (iommu_is_off) {
1238 			if (base >= 0x80000000ul)
1239 				continue;
1240 			if ((base + size) > 0x80000000ul)
1241 				size = 0x80000000ul - base;
1242 		}
1243 #endif
1244 		lmb_add(base, size);
1245 	}
1246 	return 0;
1247 }
1248 
1249 static void __init early_reserve_mem(void)
1250 {
1251 	u64 base, size;
1252 	u64 *reserve_map;
1253 	unsigned long self_base;
1254 	unsigned long self_size;
1255 
1256 	reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
1257 					initial_boot_params->off_mem_rsvmap);
1258 
1259 	/* before we do anything, lets reserve the dt blob */
1260 	self_base = __pa((unsigned long)initial_boot_params);
1261 	self_size = initial_boot_params->totalsize;
1262 	lmb_reserve(self_base, self_size);
1263 
1264 #ifdef CONFIG_PPC32
1265 	/*
1266 	 * Handle the case where we might be booting from an old kexec
1267 	 * image that setup the mem_rsvmap as pairs of 32-bit values
1268 	 */
1269 	if (*reserve_map > 0xffffffffull) {
1270 		u32 base_32, size_32;
1271 		u32 *reserve_map_32 = (u32 *)reserve_map;
1272 
1273 		while (1) {
1274 			base_32 = *(reserve_map_32++);
1275 			size_32 = *(reserve_map_32++);
1276 			if (size_32 == 0)
1277 				break;
1278 			/* skip if the reservation is for the blob */
1279 			if (base_32 == self_base && size_32 == self_size)
1280 				continue;
1281 			DBG("reserving: %x -> %x\n", base_32, size_32);
1282 			lmb_reserve(base_32, size_32);
1283 		}
1284 		return;
1285 	}
1286 #endif
1287 	while (1) {
1288 		base = *(reserve_map++);
1289 		size = *(reserve_map++);
1290 		if (size == 0)
1291 			break;
1292 		/* skip if the reservation is for the blob */
1293 		if (base == self_base && size == self_size)
1294 			continue;
1295 		DBG("reserving: %llx -> %llx\n", base, size);
1296 		lmb_reserve(base, size);
1297 	}
1298 
1299 #if 0
1300 	DBG("memory reserved, lmbs :\n");
1301       	lmb_dump_all();
1302 #endif
1303 }
1304 
1305 void __init early_init_devtree(void *params)
1306 {
1307 	DBG(" -> early_init_devtree()\n");
1308 
1309 	/* Setup flat device-tree pointer */
1310 	initial_boot_params = params;
1311 
1312 #ifdef CONFIG_PPC_RTAS
1313 	/* Some machines might need RTAS info for debugging, grab it now. */
1314 	of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
1315 #endif
1316 
1317 	/* Retrieve various informations from the /chosen node of the
1318 	 * device-tree, including the platform type, initrd location and
1319 	 * size, TCE reserve, and more ...
1320 	 */
1321 	of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1322 
1323 	/* Scan memory nodes and rebuild LMBs */
1324 	lmb_init();
1325 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
1326 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1327 
1328 	/* Save command line for /proc/cmdline and then parse parameters */
1329 	strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
1330 	parse_early_param();
1331 
1332 	/* Reserve LMB regions used by kernel, initrd, dt, etc... */
1333 	lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1334 	reserve_kdump_trampoline();
1335 	reserve_crashkernel();
1336 	early_reserve_mem();
1337 
1338 	lmb_enforce_memory_limit(memory_limit);
1339 	lmb_analyze();
1340 
1341 	DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1342 
1343 	/* We may need to relocate the flat tree, do it now.
1344 	 * FIXME .. and the initrd too? */
1345 	move_device_tree();
1346 
1347 	DBG("Scanning CPUs ...\n");
1348 
1349 	/* Retreive CPU related informations from the flat tree
1350 	 * (altivec support, boot CPU ID, ...)
1351 	 */
1352 	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1353 
1354 	DBG(" <- early_init_devtree()\n");
1355 }
1356 
1357 #undef printk
1358 
1359 int
1360 prom_n_addr_cells(struct device_node* np)
1361 {
1362 	int* ip;
1363 	do {
1364 		if (np->parent)
1365 			np = np->parent;
1366 		ip = (int *) get_property(np, "#address-cells", NULL);
1367 		if (ip != NULL)
1368 			return *ip;
1369 	} while (np->parent);
1370 	/* No #address-cells property for the root node, default to 1 */
1371 	return 1;
1372 }
1373 EXPORT_SYMBOL(prom_n_addr_cells);
1374 
1375 int
1376 prom_n_size_cells(struct device_node* np)
1377 {
1378 	int* ip;
1379 	do {
1380 		if (np->parent)
1381 			np = np->parent;
1382 		ip = (int *) get_property(np, "#size-cells", NULL);
1383 		if (ip != NULL)
1384 			return *ip;
1385 	} while (np->parent);
1386 	/* No #size-cells property for the root node, default to 1 */
1387 	return 1;
1388 }
1389 EXPORT_SYMBOL(prom_n_size_cells);
1390 
1391 /**
1392  * Work out the sense (active-low level / active-high edge)
1393  * of each interrupt from the device tree.
1394  */
1395 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1396 {
1397 	struct device_node *np;
1398 	int i, j;
1399 
1400 	/* default to level-triggered */
1401 	memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1402 
1403 	for (np = allnodes; np != 0; np = np->allnext) {
1404 		for (j = 0; j < np->n_intrs; j++) {
1405 			i = np->intrs[j].line;
1406 			if (i >= off && i < max)
1407 				senses[i-off] = np->intrs[j].sense;
1408 		}
1409 	}
1410 }
1411 
1412 /**
1413  * Construct and return a list of the device_nodes with a given name.
1414  */
1415 struct device_node *find_devices(const char *name)
1416 {
1417 	struct device_node *head, **prevp, *np;
1418 
1419 	prevp = &head;
1420 	for (np = allnodes; np != 0; np = np->allnext) {
1421 		if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1422 			*prevp = np;
1423 			prevp = &np->next;
1424 		}
1425 	}
1426 	*prevp = NULL;
1427 	return head;
1428 }
1429 EXPORT_SYMBOL(find_devices);
1430 
1431 /**
1432  * Construct and return a list of the device_nodes with a given type.
1433  */
1434 struct device_node *find_type_devices(const char *type)
1435 {
1436 	struct device_node *head, **prevp, *np;
1437 
1438 	prevp = &head;
1439 	for (np = allnodes; np != 0; np = np->allnext) {
1440 		if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1441 			*prevp = np;
1442 			prevp = &np->next;
1443 		}
1444 	}
1445 	*prevp = NULL;
1446 	return head;
1447 }
1448 EXPORT_SYMBOL(find_type_devices);
1449 
1450 /**
1451  * Returns all nodes linked together
1452  */
1453 struct device_node *find_all_nodes(void)
1454 {
1455 	struct device_node *head, **prevp, *np;
1456 
1457 	prevp = &head;
1458 	for (np = allnodes; np != 0; np = np->allnext) {
1459 		*prevp = np;
1460 		prevp = &np->next;
1461 	}
1462 	*prevp = NULL;
1463 	return head;
1464 }
1465 EXPORT_SYMBOL(find_all_nodes);
1466 
1467 /** Checks if the given "compat" string matches one of the strings in
1468  * the device's "compatible" property
1469  */
1470 int device_is_compatible(struct device_node *device, const char *compat)
1471 {
1472 	const char* cp;
1473 	int cplen, l;
1474 
1475 	cp = (char *) get_property(device, "compatible", &cplen);
1476 	if (cp == NULL)
1477 		return 0;
1478 	while (cplen > 0) {
1479 		if (strncasecmp(cp, compat, strlen(compat)) == 0)
1480 			return 1;
1481 		l = strlen(cp) + 1;
1482 		cp += l;
1483 		cplen -= l;
1484 	}
1485 
1486 	return 0;
1487 }
1488 EXPORT_SYMBOL(device_is_compatible);
1489 
1490 
1491 /**
1492  * Indicates whether the root node has a given value in its
1493  * compatible property.
1494  */
1495 int machine_is_compatible(const char *compat)
1496 {
1497 	struct device_node *root;
1498 	int rc = 0;
1499 
1500 	root = of_find_node_by_path("/");
1501 	if (root) {
1502 		rc = device_is_compatible(root, compat);
1503 		of_node_put(root);
1504 	}
1505 	return rc;
1506 }
1507 EXPORT_SYMBOL(machine_is_compatible);
1508 
1509 /**
1510  * Construct and return a list of the device_nodes with a given type
1511  * and compatible property.
1512  */
1513 struct device_node *find_compatible_devices(const char *type,
1514 					    const char *compat)
1515 {
1516 	struct device_node *head, **prevp, *np;
1517 
1518 	prevp = &head;
1519 	for (np = allnodes; np != 0; np = np->allnext) {
1520 		if (type != NULL
1521 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1522 			continue;
1523 		if (device_is_compatible(np, compat)) {
1524 			*prevp = np;
1525 			prevp = &np->next;
1526 		}
1527 	}
1528 	*prevp = NULL;
1529 	return head;
1530 }
1531 EXPORT_SYMBOL(find_compatible_devices);
1532 
1533 /**
1534  * Find the device_node with a given full_name.
1535  */
1536 struct device_node *find_path_device(const char *path)
1537 {
1538 	struct device_node *np;
1539 
1540 	for (np = allnodes; np != 0; np = np->allnext)
1541 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1542 			return np;
1543 	return NULL;
1544 }
1545 EXPORT_SYMBOL(find_path_device);
1546 
1547 /*******
1548  *
1549  * New implementation of the OF "find" APIs, return a refcounted
1550  * object, call of_node_put() when done.  The device tree and list
1551  * are protected by a rw_lock.
1552  *
1553  * Note that property management will need some locking as well,
1554  * this isn't dealt with yet.
1555  *
1556  *******/
1557 
1558 /**
1559  *	of_find_node_by_name - Find a node by its "name" property
1560  *	@from:	The node to start searching from or NULL, the node
1561  *		you pass will not be searched, only the next one
1562  *		will; typically, you pass what the previous call
1563  *		returned. of_node_put() will be called on it
1564  *	@name:	The name string to match against
1565  *
1566  *	Returns a node pointer with refcount incremented, use
1567  *	of_node_put() on it when done.
1568  */
1569 struct device_node *of_find_node_by_name(struct device_node *from,
1570 	const char *name)
1571 {
1572 	struct device_node *np;
1573 
1574 	read_lock(&devtree_lock);
1575 	np = from ? from->allnext : allnodes;
1576 	for (; np != NULL; np = np->allnext)
1577 		if (np->name != NULL && strcasecmp(np->name, name) == 0
1578 		    && of_node_get(np))
1579 			break;
1580 	if (from)
1581 		of_node_put(from);
1582 	read_unlock(&devtree_lock);
1583 	return np;
1584 }
1585 EXPORT_SYMBOL(of_find_node_by_name);
1586 
1587 /**
1588  *	of_find_node_by_type - Find a node by its "device_type" property
1589  *	@from:	The node to start searching from or NULL, the node
1590  *		you pass will not be searched, only the next one
1591  *		will; typically, you pass what the previous call
1592  *		returned. of_node_put() will be called on it
1593  *	@name:	The type string to match against
1594  *
1595  *	Returns a node pointer with refcount incremented, use
1596  *	of_node_put() on it when done.
1597  */
1598 struct device_node *of_find_node_by_type(struct device_node *from,
1599 	const char *type)
1600 {
1601 	struct device_node *np;
1602 
1603 	read_lock(&devtree_lock);
1604 	np = from ? from->allnext : allnodes;
1605 	for (; np != 0; np = np->allnext)
1606 		if (np->type != 0 && strcasecmp(np->type, type) == 0
1607 		    && of_node_get(np))
1608 			break;
1609 	if (from)
1610 		of_node_put(from);
1611 	read_unlock(&devtree_lock);
1612 	return np;
1613 }
1614 EXPORT_SYMBOL(of_find_node_by_type);
1615 
1616 /**
1617  *	of_find_compatible_node - Find a node based on type and one of the
1618  *                                tokens in its "compatible" property
1619  *	@from:		The node to start searching from or NULL, the node
1620  *			you pass will not be searched, only the next one
1621  *			will; typically, you pass what the previous call
1622  *			returned. of_node_put() will be called on it
1623  *	@type:		The type string to match "device_type" or NULL to ignore
1624  *	@compatible:	The string to match to one of the tokens in the device
1625  *			"compatible" list.
1626  *
1627  *	Returns a node pointer with refcount incremented, use
1628  *	of_node_put() on it when done.
1629  */
1630 struct device_node *of_find_compatible_node(struct device_node *from,
1631 	const char *type, const char *compatible)
1632 {
1633 	struct device_node *np;
1634 
1635 	read_lock(&devtree_lock);
1636 	np = from ? from->allnext : allnodes;
1637 	for (; np != 0; np = np->allnext) {
1638 		if (type != NULL
1639 		    && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1640 			continue;
1641 		if (device_is_compatible(np, compatible) && of_node_get(np))
1642 			break;
1643 	}
1644 	if (from)
1645 		of_node_put(from);
1646 	read_unlock(&devtree_lock);
1647 	return np;
1648 }
1649 EXPORT_SYMBOL(of_find_compatible_node);
1650 
1651 /**
1652  *	of_find_node_by_path - Find a node matching a full OF path
1653  *	@path:	The full path to match
1654  *
1655  *	Returns a node pointer with refcount incremented, use
1656  *	of_node_put() on it when done.
1657  */
1658 struct device_node *of_find_node_by_path(const char *path)
1659 {
1660 	struct device_node *np = allnodes;
1661 
1662 	read_lock(&devtree_lock);
1663 	for (; np != 0; np = np->allnext) {
1664 		if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1665 		    && of_node_get(np))
1666 			break;
1667 	}
1668 	read_unlock(&devtree_lock);
1669 	return np;
1670 }
1671 EXPORT_SYMBOL(of_find_node_by_path);
1672 
1673 /**
1674  *	of_find_node_by_phandle - Find a node given a phandle
1675  *	@handle:	phandle of the node to find
1676  *
1677  *	Returns a node pointer with refcount incremented, use
1678  *	of_node_put() on it when done.
1679  */
1680 struct device_node *of_find_node_by_phandle(phandle handle)
1681 {
1682 	struct device_node *np;
1683 
1684 	read_lock(&devtree_lock);
1685 	for (np = allnodes; np != 0; np = np->allnext)
1686 		if (np->linux_phandle == handle)
1687 			break;
1688 	if (np)
1689 		of_node_get(np);
1690 	read_unlock(&devtree_lock);
1691 	return np;
1692 }
1693 EXPORT_SYMBOL(of_find_node_by_phandle);
1694 
1695 /**
1696  *	of_find_all_nodes - Get next node in global list
1697  *	@prev:	Previous node or NULL to start iteration
1698  *		of_node_put() will be called on it
1699  *
1700  *	Returns a node pointer with refcount incremented, use
1701  *	of_node_put() on it when done.
1702  */
1703 struct device_node *of_find_all_nodes(struct device_node *prev)
1704 {
1705 	struct device_node *np;
1706 
1707 	read_lock(&devtree_lock);
1708 	np = prev ? prev->allnext : allnodes;
1709 	for (; np != 0; np = np->allnext)
1710 		if (of_node_get(np))
1711 			break;
1712 	if (prev)
1713 		of_node_put(prev);
1714 	read_unlock(&devtree_lock);
1715 	return np;
1716 }
1717 EXPORT_SYMBOL(of_find_all_nodes);
1718 
1719 /**
1720  *	of_get_parent - Get a node's parent if any
1721  *	@node:	Node to get parent
1722  *
1723  *	Returns a node pointer with refcount incremented, use
1724  *	of_node_put() on it when done.
1725  */
1726 struct device_node *of_get_parent(const struct device_node *node)
1727 {
1728 	struct device_node *np;
1729 
1730 	if (!node)
1731 		return NULL;
1732 
1733 	read_lock(&devtree_lock);
1734 	np = of_node_get(node->parent);
1735 	read_unlock(&devtree_lock);
1736 	return np;
1737 }
1738 EXPORT_SYMBOL(of_get_parent);
1739 
1740 /**
1741  *	of_get_next_child - Iterate a node childs
1742  *	@node:	parent node
1743  *	@prev:	previous child of the parent node, or NULL to get first
1744  *
1745  *	Returns a node pointer with refcount incremented, use
1746  *	of_node_put() on it when done.
1747  */
1748 struct device_node *of_get_next_child(const struct device_node *node,
1749 	struct device_node *prev)
1750 {
1751 	struct device_node *next;
1752 
1753 	read_lock(&devtree_lock);
1754 	next = prev ? prev->sibling : node->child;
1755 	for (; next != 0; next = next->sibling)
1756 		if (of_node_get(next))
1757 			break;
1758 	if (prev)
1759 		of_node_put(prev);
1760 	read_unlock(&devtree_lock);
1761 	return next;
1762 }
1763 EXPORT_SYMBOL(of_get_next_child);
1764 
1765 /**
1766  *	of_node_get - Increment refcount of a node
1767  *	@node:	Node to inc refcount, NULL is supported to
1768  *		simplify writing of callers
1769  *
1770  *	Returns node.
1771  */
1772 struct device_node *of_node_get(struct device_node *node)
1773 {
1774 	if (node)
1775 		kref_get(&node->kref);
1776 	return node;
1777 }
1778 EXPORT_SYMBOL(of_node_get);
1779 
1780 static inline struct device_node * kref_to_device_node(struct kref *kref)
1781 {
1782 	return container_of(kref, struct device_node, kref);
1783 }
1784 
1785 /**
1786  *	of_node_release - release a dynamically allocated node
1787  *	@kref:  kref element of the node to be released
1788  *
1789  *	In of_node_put() this function is passed to kref_put()
1790  *	as the destructor.
1791  */
1792 static void of_node_release(struct kref *kref)
1793 {
1794 	struct device_node *node = kref_to_device_node(kref);
1795 	struct property *prop = node->properties;
1796 
1797 	if (!OF_IS_DYNAMIC(node))
1798 		return;
1799 	while (prop) {
1800 		struct property *next = prop->next;
1801 		kfree(prop->name);
1802 		kfree(prop->value);
1803 		kfree(prop);
1804 		prop = next;
1805 
1806 		if (!prop) {
1807 			prop = node->deadprops;
1808 			node->deadprops = NULL;
1809 		}
1810 	}
1811 	kfree(node->intrs);
1812 	kfree(node->full_name);
1813 	kfree(node->data);
1814 	kfree(node);
1815 }
1816 
1817 /**
1818  *	of_node_put - Decrement refcount of a node
1819  *	@node:	Node to dec refcount, NULL is supported to
1820  *		simplify writing of callers
1821  *
1822  */
1823 void of_node_put(struct device_node *node)
1824 {
1825 	if (node)
1826 		kref_put(&node->kref, of_node_release);
1827 }
1828 EXPORT_SYMBOL(of_node_put);
1829 
1830 /*
1831  * Plug a device node into the tree and global list.
1832  */
1833 void of_attach_node(struct device_node *np)
1834 {
1835 	write_lock(&devtree_lock);
1836 	np->sibling = np->parent->child;
1837 	np->allnext = allnodes;
1838 	np->parent->child = np;
1839 	allnodes = np;
1840 	write_unlock(&devtree_lock);
1841 }
1842 
1843 /*
1844  * "Unplug" a node from the device tree.  The caller must hold
1845  * a reference to the node.  The memory associated with the node
1846  * is not freed until its refcount goes to zero.
1847  */
1848 void of_detach_node(const struct device_node *np)
1849 {
1850 	struct device_node *parent;
1851 
1852 	write_lock(&devtree_lock);
1853 
1854 	parent = np->parent;
1855 
1856 	if (allnodes == np)
1857 		allnodes = np->allnext;
1858 	else {
1859 		struct device_node *prev;
1860 		for (prev = allnodes;
1861 		     prev->allnext != np;
1862 		     prev = prev->allnext)
1863 			;
1864 		prev->allnext = np->allnext;
1865 	}
1866 
1867 	if (parent->child == np)
1868 		parent->child = np->sibling;
1869 	else {
1870 		struct device_node *prevsib;
1871 		for (prevsib = np->parent->child;
1872 		     prevsib->sibling != np;
1873 		     prevsib = prevsib->sibling)
1874 			;
1875 		prevsib->sibling = np->sibling;
1876 	}
1877 
1878 	write_unlock(&devtree_lock);
1879 }
1880 
1881 #ifdef CONFIG_PPC_PSERIES
1882 /*
1883  * Fix up the uninitialized fields in a new device node:
1884  * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1885  *
1886  * A lot of boot-time code is duplicated here, because functions such
1887  * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1888  * slab allocator.
1889  *
1890  * This should probably be split up into smaller chunks.
1891  */
1892 
1893 static int of_finish_dynamic_node(struct device_node *node)
1894 {
1895 	struct device_node *parent = of_get_parent(node);
1896 	int err = 0;
1897 	phandle *ibm_phandle;
1898 
1899 	node->name = get_property(node, "name", NULL);
1900 	node->type = get_property(node, "device_type", NULL);
1901 
1902 	if (!parent) {
1903 		err = -ENODEV;
1904 		goto out;
1905 	}
1906 
1907 	/* We don't support that function on PowerMac, at least
1908 	 * not yet
1909 	 */
1910 	if (machine_is(powermac))
1911 		return -ENODEV;
1912 
1913 	/* fix up new node's linux_phandle field */
1914 	if ((ibm_phandle = (unsigned int *)get_property(node,
1915 							"ibm,phandle", NULL)))
1916 		node->linux_phandle = *ibm_phandle;
1917 
1918 out:
1919 	of_node_put(parent);
1920 	return err;
1921 }
1922 
1923 static int prom_reconfig_notifier(struct notifier_block *nb,
1924 				  unsigned long action, void *node)
1925 {
1926 	int err;
1927 
1928 	switch (action) {
1929 	case PSERIES_RECONFIG_ADD:
1930 		err = of_finish_dynamic_node(node);
1931 		if (!err)
1932 			finish_node(node, NULL, 0);
1933 		if (err < 0) {
1934 			printk(KERN_ERR "finish_node returned %d\n", err);
1935 			err = NOTIFY_BAD;
1936 		}
1937 		break;
1938 	default:
1939 		err = NOTIFY_DONE;
1940 		break;
1941 	}
1942 	return err;
1943 }
1944 
1945 static struct notifier_block prom_reconfig_nb = {
1946 	.notifier_call = prom_reconfig_notifier,
1947 	.priority = 10, /* This one needs to run first */
1948 };
1949 
1950 static int __init prom_reconfig_setup(void)
1951 {
1952 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1953 }
1954 __initcall(prom_reconfig_setup);
1955 #endif
1956 
1957 struct property *of_find_property(struct device_node *np, const char *name,
1958 				  int *lenp)
1959 {
1960 	struct property *pp;
1961 
1962 	read_lock(&devtree_lock);
1963 	for (pp = np->properties; pp != 0; pp = pp->next)
1964 		if (strcmp(pp->name, name) == 0) {
1965 			if (lenp != 0)
1966 				*lenp = pp->length;
1967 			break;
1968 		}
1969 	read_unlock(&devtree_lock);
1970 
1971 	return pp;
1972 }
1973 
1974 /*
1975  * Find a property with a given name for a given node
1976  * and return the value.
1977  */
1978 unsigned char *get_property(struct device_node *np, const char *name,
1979 			    int *lenp)
1980 {
1981 	struct property *pp = of_find_property(np,name,lenp);
1982 	return pp ? pp->value : NULL;
1983 }
1984 EXPORT_SYMBOL(get_property);
1985 
1986 /*
1987  * Add a property to a node
1988  */
1989 int prom_add_property(struct device_node* np, struct property* prop)
1990 {
1991 	struct property **next;
1992 
1993 	prop->next = NULL;
1994 	write_lock(&devtree_lock);
1995 	next = &np->properties;
1996 	while (*next) {
1997 		if (strcmp(prop->name, (*next)->name) == 0) {
1998 			/* duplicate ! don't insert it */
1999 			write_unlock(&devtree_lock);
2000 			return -1;
2001 		}
2002 		next = &(*next)->next;
2003 	}
2004 	*next = prop;
2005 	write_unlock(&devtree_lock);
2006 
2007 #ifdef CONFIG_PROC_DEVICETREE
2008 	/* try to add to proc as well if it was initialized */
2009 	if (np->pde)
2010 		proc_device_tree_add_prop(np->pde, prop);
2011 #endif /* CONFIG_PROC_DEVICETREE */
2012 
2013 	return 0;
2014 }
2015 
2016 /*
2017  * Remove a property from a node.  Note that we don't actually
2018  * remove it, since we have given out who-knows-how-many pointers
2019  * to the data using get-property.  Instead we just move the property
2020  * to the "dead properties" list, so it won't be found any more.
2021  */
2022 int prom_remove_property(struct device_node *np, struct property *prop)
2023 {
2024 	struct property **next;
2025 	int found = 0;
2026 
2027 	write_lock(&devtree_lock);
2028 	next = &np->properties;
2029 	while (*next) {
2030 		if (*next == prop) {
2031 			/* found the node */
2032 			*next = prop->next;
2033 			prop->next = np->deadprops;
2034 			np->deadprops = prop;
2035 			found = 1;
2036 			break;
2037 		}
2038 		next = &(*next)->next;
2039 	}
2040 	write_unlock(&devtree_lock);
2041 
2042 	if (!found)
2043 		return -ENODEV;
2044 
2045 #ifdef CONFIG_PROC_DEVICETREE
2046 	/* try to remove the proc node as well */
2047 	if (np->pde)
2048 		proc_device_tree_remove_prop(np->pde, prop);
2049 #endif /* CONFIG_PROC_DEVICETREE */
2050 
2051 	return 0;
2052 }
2053 
2054 /*
2055  * Update a property in a node.  Note that we don't actually
2056  * remove it, since we have given out who-knows-how-many pointers
2057  * to the data using get-property.  Instead we just move the property
2058  * to the "dead properties" list, and add the new property to the
2059  * property list
2060  */
2061 int prom_update_property(struct device_node *np,
2062 			 struct property *newprop,
2063 			 struct property *oldprop)
2064 {
2065 	struct property **next;
2066 	int found = 0;
2067 
2068 	write_lock(&devtree_lock);
2069 	next = &np->properties;
2070 	while (*next) {
2071 		if (*next == oldprop) {
2072 			/* found the node */
2073 			newprop->next = oldprop->next;
2074 			*next = newprop;
2075 			oldprop->next = np->deadprops;
2076 			np->deadprops = oldprop;
2077 			found = 1;
2078 			break;
2079 		}
2080 		next = &(*next)->next;
2081 	}
2082 	write_unlock(&devtree_lock);
2083 
2084 	if (!found)
2085 		return -ENODEV;
2086 
2087 #ifdef CONFIG_PROC_DEVICETREE
2088 	/* try to add to proc as well if it was initialized */
2089 	if (np->pde)
2090 		proc_device_tree_update_prop(np->pde, newprop, oldprop);
2091 #endif /* CONFIG_PROC_DEVICETREE */
2092 
2093 	return 0;
2094 }
2095 
2096 
2097 /* Find the device node for a given logical cpu number, also returns the cpu
2098  * local thread number (index in ibm,interrupt-server#s) if relevant and
2099  * asked for (non NULL)
2100  */
2101 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
2102 {
2103 	int hardid;
2104 	struct device_node *np;
2105 
2106 	hardid = get_hard_smp_processor_id(cpu);
2107 
2108 	for_each_node_by_type(np, "cpu") {
2109 		u32 *intserv;
2110 		unsigned int plen, t;
2111 
2112 		/* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
2113 		 * fallback to "reg" property and assume no threads
2114 		 */
2115 		intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s",
2116 					      &plen);
2117 		if (intserv == NULL) {
2118 			u32 *reg = (u32 *)get_property(np, "reg", NULL);
2119 			if (reg == NULL)
2120 				continue;
2121 			if (*reg == hardid) {
2122 				if (thread)
2123 					*thread = 0;
2124 				return np;
2125 			}
2126 		} else {
2127 			plen /= sizeof(u32);
2128 			for (t = 0; t < plen; t++) {
2129 				if (hardid == intserv[t]) {
2130 					if (thread)
2131 						*thread = t;
2132 					return np;
2133 				}
2134 			}
2135 		}
2136 	}
2137 	return NULL;
2138 }
2139 
2140 #ifdef DEBUG
2141 static struct debugfs_blob_wrapper flat_dt_blob;
2142 
2143 static int __init export_flat_device_tree(void)
2144 {
2145 	struct dentry *d;
2146 
2147 	d = debugfs_create_dir("powerpc", NULL);
2148 	if (!d)
2149 		return 1;
2150 
2151 	flat_dt_blob.data = initial_boot_params;
2152 	flat_dt_blob.size = initial_boot_params->totalsize;
2153 
2154 	d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
2155 				d, &flat_dt_blob);
2156 	if (!d)
2157 		return 1;
2158 
2159 	return 0;
2160 }
2161 __initcall(export_flat_device_tree);
2162 #endif
2163