xref: /openbmc/linux/arch/powerpc/kernel/prom.c (revision 54525552)
1 /*
2  * Procedures for creating, accessing and interpreting the device tree.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG
17 
18 #include <stdarg.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/stringify.h>
27 #include <linux/delay.h>
28 #include <linux/initrd.h>
29 #include <linux/bitops.h>
30 #include <linux/module.h>
31 #include <linux/kexec.h>
32 #include <linux/debugfs.h>
33 #include <linux/irq.h>
34 #include <linux/memblock.h>
35 
36 #include <asm/prom.h>
37 #include <asm/rtas.h>
38 #include <asm/page.h>
39 #include <asm/processor.h>
40 #include <asm/irq.h>
41 #include <asm/io.h>
42 #include <asm/kdump.h>
43 #include <asm/smp.h>
44 #include <asm/system.h>
45 #include <asm/mmu.h>
46 #include <asm/paca.h>
47 #include <asm/pgtable.h>
48 #include <asm/pci.h>
49 #include <asm/iommu.h>
50 #include <asm/btext.h>
51 #include <asm/sections.h>
52 #include <asm/machdep.h>
53 #include <asm/pSeries_reconfig.h>
54 #include <asm/pci-bridge.h>
55 #include <asm/phyp_dump.h>
56 #include <asm/kexec.h>
57 #include <mm/mmu_decl.h>
58 
59 #ifdef DEBUG
60 #define DBG(fmt...) printk(KERN_ERR fmt)
61 #else
62 #define DBG(fmt...)
63 #endif
64 
65 #ifdef CONFIG_PPC64
66 int __initdata iommu_is_off;
67 int __initdata iommu_force_on;
68 unsigned long tce_alloc_start, tce_alloc_end;
69 u64 ppc64_rma_size;
70 #endif
71 static phys_addr_t first_memblock_size;
72 
73 static int __init early_parse_mem(char *p)
74 {
75 	if (!p)
76 		return 1;
77 
78 	memory_limit = PAGE_ALIGN(memparse(p, &p));
79 	DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit);
80 
81 	return 0;
82 }
83 early_param("mem", early_parse_mem);
84 
85 /**
86  * move_device_tree - move tree to an unused area, if needed.
87  *
88  * The device tree may be allocated beyond our memory limit, or inside the
89  * crash kernel region for kdump. If so, move it out of the way.
90  */
91 static void __init move_device_tree(void)
92 {
93 	unsigned long start, size;
94 	void *p;
95 
96 	DBG("-> move_device_tree\n");
97 
98 	start = __pa(initial_boot_params);
99 	size = be32_to_cpu(initial_boot_params->totalsize);
100 
101 	if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
102 			overlaps_crashkernel(start, size)) {
103 		p = __va(memblock_alloc(size, PAGE_SIZE));
104 		memcpy(p, initial_boot_params, size);
105 		initial_boot_params = (struct boot_param_header *)p;
106 		DBG("Moved device tree to 0x%p\n", p);
107 	}
108 
109 	DBG("<- move_device_tree\n");
110 }
111 
112 /*
113  * ibm,pa-features is a per-cpu property that contains a string of
114  * attribute descriptors, each of which has a 2 byte header plus up
115  * to 254 bytes worth of processor attribute bits.  First header
116  * byte specifies the number of bytes following the header.
117  * Second header byte is an "attribute-specifier" type, of which
118  * zero is the only currently-defined value.
119  * Implementation:  Pass in the byte and bit offset for the feature
120  * that we are interested in.  The function will return -1 if the
121  * pa-features property is missing, or a 1/0 to indicate if the feature
122  * is supported/not supported.  Note that the bit numbers are
123  * big-endian to match the definition in PAPR.
124  */
125 static struct ibm_pa_feature {
126 	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
127 	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
128 	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
129 	unsigned char	pabyte;		/* byte number in ibm,pa-features */
130 	unsigned char	pabit;		/* bit number (big-endian) */
131 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
132 } ibm_pa_features[] __initdata = {
133 	{0, 0, PPC_FEATURE_HAS_MMU,	0, 0, 0},
134 	{0, 0, PPC_FEATURE_HAS_FPU,	0, 1, 0},
135 	{0, MMU_FTR_SLB, 0,		0, 2, 0},
136 	{CPU_FTR_CTRL, 0, 0,		0, 3, 0},
137 	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
138 	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
139 	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
140 	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
141 };
142 
143 static void __init scan_features(unsigned long node, unsigned char *ftrs,
144 				 unsigned long tablelen,
145 				 struct ibm_pa_feature *fp,
146 				 unsigned long ft_size)
147 {
148 	unsigned long i, len, bit;
149 
150 	/* find descriptor with type == 0 */
151 	for (;;) {
152 		if (tablelen < 3)
153 			return;
154 		len = 2 + ftrs[0];
155 		if (tablelen < len)
156 			return;		/* descriptor 0 not found */
157 		if (ftrs[1] == 0)
158 			break;
159 		tablelen -= len;
160 		ftrs += len;
161 	}
162 
163 	/* loop over bits we know about */
164 	for (i = 0; i < ft_size; ++i, ++fp) {
165 		if (fp->pabyte >= ftrs[0])
166 			continue;
167 		bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
168 		if (bit ^ fp->invert) {
169 			cur_cpu_spec->cpu_features |= fp->cpu_features;
170 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
171 			cur_cpu_spec->mmu_features |= fp->mmu_features;
172 		} else {
173 			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
174 			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
175 			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
176 		}
177 	}
178 }
179 
180 static void __init check_cpu_pa_features(unsigned long node)
181 {
182 	unsigned char *pa_ftrs;
183 	unsigned long tablelen;
184 
185 	pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
186 	if (pa_ftrs == NULL)
187 		return;
188 
189 	scan_features(node, pa_ftrs, tablelen,
190 		      ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
191 }
192 
193 #ifdef CONFIG_PPC_STD_MMU_64
194 static void __init check_cpu_slb_size(unsigned long node)
195 {
196 	u32 *slb_size_ptr;
197 
198 	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
199 	if (slb_size_ptr != NULL) {
200 		mmu_slb_size = *slb_size_ptr;
201 		return;
202 	}
203 	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
204 	if (slb_size_ptr != NULL) {
205 		mmu_slb_size = *slb_size_ptr;
206 	}
207 }
208 #else
209 #define check_cpu_slb_size(node) do { } while(0)
210 #endif
211 
212 static struct feature_property {
213 	const char *name;
214 	u32 min_value;
215 	unsigned long cpu_feature;
216 	unsigned long cpu_user_ftr;
217 } feature_properties[] __initdata = {
218 #ifdef CONFIG_ALTIVEC
219 	{"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
220 	{"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
221 #endif /* CONFIG_ALTIVEC */
222 #ifdef CONFIG_VSX
223 	/* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
224 	{"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
225 #endif /* CONFIG_VSX */
226 #ifdef CONFIG_PPC64
227 	{"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
228 	{"ibm,purr", 1, CPU_FTR_PURR, 0},
229 	{"ibm,spurr", 1, CPU_FTR_SPURR, 0},
230 #endif /* CONFIG_PPC64 */
231 };
232 
233 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
234 static inline void identical_pvr_fixup(unsigned long node)
235 {
236 	unsigned int pvr;
237 	char *model = of_get_flat_dt_prop(node, "model", NULL);
238 
239 	/*
240 	 * Since 440GR(x)/440EP(x) processors have the same pvr,
241 	 * we check the node path and set bit 28 in the cur_cpu_spec
242 	 * pvr for EP(x) processor version. This bit is always 0 in
243 	 * the "real" pvr. Then we call identify_cpu again with
244 	 * the new logical pvr to enable FPU support.
245 	 */
246 	if (model && strstr(model, "440EP")) {
247 		pvr = cur_cpu_spec->pvr_value | 0x8;
248 		identify_cpu(0, pvr);
249 		DBG("Using logical pvr %x for %s\n", pvr, model);
250 	}
251 }
252 #else
253 #define identical_pvr_fixup(node) do { } while(0)
254 #endif
255 
256 static void __init check_cpu_feature_properties(unsigned long node)
257 {
258 	unsigned long i;
259 	struct feature_property *fp = feature_properties;
260 	const u32 *prop;
261 
262 	for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
263 		prop = of_get_flat_dt_prop(node, fp->name, NULL);
264 		if (prop && *prop >= fp->min_value) {
265 			cur_cpu_spec->cpu_features |= fp->cpu_feature;
266 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
267 		}
268 	}
269 }
270 
271 static int __init early_init_dt_scan_cpus(unsigned long node,
272 					  const char *uname, int depth,
273 					  void *data)
274 {
275 	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
276 	const u32 *prop;
277 	const u32 *intserv;
278 	int i, nthreads;
279 	unsigned long len;
280 	int found = -1;
281 	int found_thread = 0;
282 
283 	/* We are scanning "cpu" nodes only */
284 	if (type == NULL || strcmp(type, "cpu") != 0)
285 		return 0;
286 
287 	/* Get physical cpuid */
288 	intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
289 	if (intserv) {
290 		nthreads = len / sizeof(int);
291 	} else {
292 		intserv = of_get_flat_dt_prop(node, "reg", NULL);
293 		nthreads = 1;
294 	}
295 
296 	/*
297 	 * Now see if any of these threads match our boot cpu.
298 	 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
299 	 */
300 	for (i = 0; i < nthreads; i++) {
301 		/*
302 		 * version 2 of the kexec param format adds the phys cpuid of
303 		 * booted proc.
304 		 */
305 		if (initial_boot_params->version >= 2) {
306 			if (intserv[i] == initial_boot_params->boot_cpuid_phys) {
307 				found = boot_cpu_count;
308 				found_thread = i;
309 			}
310 		} else {
311 			/*
312 			 * Check if it's the boot-cpu, set it's hw index now,
313 			 * unfortunately this format did not support booting
314 			 * off secondary threads.
315 			 */
316 			if (of_get_flat_dt_prop(node,
317 					"linux,boot-cpu", NULL) != NULL)
318 				found = boot_cpu_count;
319 		}
320 #ifdef CONFIG_SMP
321 		/* logical cpu id is always 0 on UP kernels */
322 		boot_cpu_count++;
323 #endif
324 	}
325 
326 	if (found >= 0) {
327 		DBG("boot cpu: logical %d physical %d\n", found,
328 			intserv[found_thread]);
329 		boot_cpuid = found;
330 		set_hard_smp_processor_id(found, intserv[found_thread]);
331 
332 		/*
333 		 * PAPR defines "logical" PVR values for cpus that
334 		 * meet various levels of the architecture:
335 		 * 0x0f000001	Architecture version 2.04
336 		 * 0x0f000002	Architecture version 2.05
337 		 * If the cpu-version property in the cpu node contains
338 		 * such a value, we call identify_cpu again with the
339 		 * logical PVR value in order to use the cpu feature
340 		 * bits appropriate for the architecture level.
341 		 *
342 		 * A POWER6 partition in "POWER6 architected" mode
343 		 * uses the 0x0f000002 PVR value; in POWER5+ mode
344 		 * it uses 0x0f000001.
345 		 */
346 		prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
347 		if (prop && (*prop & 0xff000000) == 0x0f000000)
348 			identify_cpu(0, *prop);
349 
350 		identical_pvr_fixup(node);
351 	}
352 
353 	check_cpu_feature_properties(node);
354 	check_cpu_pa_features(node);
355 	check_cpu_slb_size(node);
356 
357 #ifdef CONFIG_PPC_PSERIES
358 	if (nthreads > 1)
359 		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
360 	else
361 		cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
362 #endif
363 
364 	return 0;
365 }
366 
367 int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
368 					 int depth, void *data)
369 {
370 	unsigned long *lprop;
371 
372 	/* Use common scan routine to determine if this is the chosen node */
373 	if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
374 		return 0;
375 
376 #ifdef CONFIG_PPC64
377 	/* check if iommu is forced on or off */
378 	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
379 		iommu_is_off = 1;
380 	if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
381 		iommu_force_on = 1;
382 #endif
383 
384 	/* mem=x on the command line is the preferred mechanism */
385 	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
386 	if (lprop)
387 		memory_limit = *lprop;
388 
389 #ifdef CONFIG_PPC64
390 	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
391 	if (lprop)
392 		tce_alloc_start = *lprop;
393 	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
394 	if (lprop)
395 		tce_alloc_end = *lprop;
396 #endif
397 
398 #ifdef CONFIG_KEXEC
399 	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
400 	if (lprop)
401 		crashk_res.start = *lprop;
402 
403 	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
404 	if (lprop)
405 		crashk_res.end = crashk_res.start + *lprop - 1;
406 #endif
407 
408 	/* break now */
409 	return 1;
410 }
411 
412 #ifdef CONFIG_PPC_PSERIES
413 /*
414  * Interpret the ibm,dynamic-memory property in the
415  * /ibm,dynamic-reconfiguration-memory node.
416  * This contains a list of memory blocks along with NUMA affinity
417  * information.
418  */
419 static int __init early_init_dt_scan_drconf_memory(unsigned long node)
420 {
421 	__be32 *dm, *ls, *usm;
422 	unsigned long l, n, flags;
423 	u64 base, size, memblock_size;
424 	unsigned int is_kexec_kdump = 0, rngs;
425 
426 	ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
427 	if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
428 		return 0;
429 	memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
430 
431 	dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
432 	if (dm == NULL || l < sizeof(__be32))
433 		return 0;
434 
435 	n = *dm++;	/* number of entries */
436 	if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
437 		return 0;
438 
439 	/* check if this is a kexec/kdump kernel. */
440 	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
441 						 &l);
442 	if (usm != NULL)
443 		is_kexec_kdump = 1;
444 
445 	for (; n != 0; --n) {
446 		base = dt_mem_next_cell(dt_root_addr_cells, &dm);
447 		flags = dm[3];
448 		/* skip DRC index, pad, assoc. list index, flags */
449 		dm += 4;
450 		/* skip this block if the reserved bit is set in flags (0x80)
451 		   or if the block is not assigned to this partition (0x8) */
452 		if ((flags & 0x80) || !(flags & 0x8))
453 			continue;
454 		size = memblock_size;
455 		rngs = 1;
456 		if (is_kexec_kdump) {
457 			/*
458 			 * For each memblock in ibm,dynamic-memory, a corresponding
459 			 * entry in linux,drconf-usable-memory property contains
460 			 * a counter 'p' followed by 'p' (base, size) duple.
461 			 * Now read the counter from
462 			 * linux,drconf-usable-memory property
463 			 */
464 			rngs = dt_mem_next_cell(dt_root_size_cells, &usm);
465 			if (!rngs) /* there are no (base, size) duple */
466 				continue;
467 		}
468 		do {
469 			if (is_kexec_kdump) {
470 				base = dt_mem_next_cell(dt_root_addr_cells,
471 							 &usm);
472 				size = dt_mem_next_cell(dt_root_size_cells,
473 							 &usm);
474 			}
475 			if (iommu_is_off) {
476 				if (base >= 0x80000000ul)
477 					continue;
478 				if ((base + size) > 0x80000000ul)
479 					size = 0x80000000ul - base;
480 			}
481 			memblock_add(base, size);
482 		} while (--rngs);
483 	}
484 	memblock_dump_all();
485 	return 0;
486 }
487 #else
488 #define early_init_dt_scan_drconf_memory(node)	0
489 #endif /* CONFIG_PPC_PSERIES */
490 
491 static int __init early_init_dt_scan_memory_ppc(unsigned long node,
492 						const char *uname,
493 						int depth, void *data)
494 {
495 	if (depth == 1 &&
496 	    strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
497 		return early_init_dt_scan_drconf_memory(node);
498 
499 	return early_init_dt_scan_memory(node, uname, depth, data);
500 }
501 
502 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
503 {
504 #ifdef CONFIG_PPC64
505 	if (iommu_is_off) {
506 		if (base >= 0x80000000ul)
507 			return;
508 		if ((base + size) > 0x80000000ul)
509 			size = 0x80000000ul - base;
510 	}
511 #endif
512 	/* Keep track of the beginning of memory -and- the size of
513 	 * the very first block in the device-tree as it represents
514 	 * the RMA on ppc64 server
515 	 */
516 	if (base < memstart_addr) {
517 		memstart_addr = base;
518 		first_memblock_size = size;
519 	}
520 
521 	/* Add the chunk to the MEMBLOCK list */
522 	memblock_add(base, size);
523 }
524 
525 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
526 {
527 	return __va(memblock_alloc(size, align));
528 }
529 
530 #ifdef CONFIG_BLK_DEV_INITRD
531 void __init early_init_dt_setup_initrd_arch(unsigned long start,
532 		unsigned long end)
533 {
534 	initrd_start = (unsigned long)__va(start);
535 	initrd_end = (unsigned long)__va(end);
536 	initrd_below_start_ok = 1;
537 }
538 #endif
539 
540 static void __init early_reserve_mem(void)
541 {
542 	u64 base, size;
543 	u64 *reserve_map;
544 	unsigned long self_base;
545 	unsigned long self_size;
546 
547 	reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
548 					initial_boot_params->off_mem_rsvmap);
549 
550 	/* before we do anything, lets reserve the dt blob */
551 	self_base = __pa((unsigned long)initial_boot_params);
552 	self_size = initial_boot_params->totalsize;
553 	memblock_reserve(self_base, self_size);
554 
555 #ifdef CONFIG_BLK_DEV_INITRD
556 	/* then reserve the initrd, if any */
557 	if (initrd_start && (initrd_end > initrd_start))
558 		memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
559 #endif /* CONFIG_BLK_DEV_INITRD */
560 
561 #ifdef CONFIG_PPC32
562 	/*
563 	 * Handle the case where we might be booting from an old kexec
564 	 * image that setup the mem_rsvmap as pairs of 32-bit values
565 	 */
566 	if (*reserve_map > 0xffffffffull) {
567 		u32 base_32, size_32;
568 		u32 *reserve_map_32 = (u32 *)reserve_map;
569 
570 		while (1) {
571 			base_32 = *(reserve_map_32++);
572 			size_32 = *(reserve_map_32++);
573 			if (size_32 == 0)
574 				break;
575 			/* skip if the reservation is for the blob */
576 			if (base_32 == self_base && size_32 == self_size)
577 				continue;
578 			DBG("reserving: %x -> %x\n", base_32, size_32);
579 			memblock_reserve(base_32, size_32);
580 		}
581 		return;
582 	}
583 #endif
584 	while (1) {
585 		base = *(reserve_map++);
586 		size = *(reserve_map++);
587 		if (size == 0)
588 			break;
589 		DBG("reserving: %llx -> %llx\n", base, size);
590 		memblock_reserve(base, size);
591 	}
592 }
593 
594 #ifdef CONFIG_PHYP_DUMP
595 /**
596  * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
597  *
598  * Function to find the largest size we need to reserve
599  * during early boot process.
600  *
601  * It either looks for boot param and returns that OR
602  * returns larger of 256 or 5% rounded down to multiples of 256MB.
603  *
604  */
605 static inline unsigned long phyp_dump_calculate_reserve_size(void)
606 {
607 	unsigned long tmp;
608 
609 	if (phyp_dump_info->reserve_bootvar)
610 		return phyp_dump_info->reserve_bootvar;
611 
612 	/* divide by 20 to get 5% of value */
613 	tmp = memblock_end_of_DRAM();
614 	do_div(tmp, 20);
615 
616 	/* round it down in multiples of 256 */
617 	tmp = tmp & ~0x0FFFFFFFUL;
618 
619 	return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
620 }
621 
622 /**
623  * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
624  *
625  * This routine may reserve memory regions in the kernel only
626  * if the system is supported and a dump was taken in last
627  * boot instance or if the hardware is supported and the
628  * scratch area needs to be setup. In other instances it returns
629  * without reserving anything. The memory in case of dump being
630  * active is freed when the dump is collected (by userland tools).
631  */
632 static void __init phyp_dump_reserve_mem(void)
633 {
634 	unsigned long base, size;
635 	unsigned long variable_reserve_size;
636 
637 	if (!phyp_dump_info->phyp_dump_configured) {
638 		printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
639 		return;
640 	}
641 
642 	if (!phyp_dump_info->phyp_dump_at_boot) {
643 		printk(KERN_INFO "Phyp-dump disabled at boot time\n");
644 		return;
645 	}
646 
647 	variable_reserve_size = phyp_dump_calculate_reserve_size();
648 
649 	if (phyp_dump_info->phyp_dump_is_active) {
650 		/* Reserve *everything* above RMR.Area freed by userland tools*/
651 		base = variable_reserve_size;
652 		size = memblock_end_of_DRAM() - base;
653 
654 		/* XXX crashed_ram_end is wrong, since it may be beyond
655 		 * the memory_limit, it will need to be adjusted. */
656 		memblock_reserve(base, size);
657 
658 		phyp_dump_info->init_reserve_start = base;
659 		phyp_dump_info->init_reserve_size = size;
660 	} else {
661 		size = phyp_dump_info->cpu_state_size +
662 			phyp_dump_info->hpte_region_size +
663 			variable_reserve_size;
664 		base = memblock_end_of_DRAM() - size;
665 		memblock_reserve(base, size);
666 		phyp_dump_info->init_reserve_start = base;
667 		phyp_dump_info->init_reserve_size = size;
668 	}
669 }
670 #else
671 static inline void __init phyp_dump_reserve_mem(void) {}
672 #endif /* CONFIG_PHYP_DUMP  && CONFIG_PPC_RTAS */
673 
674 void __init early_init_devtree(void *params)
675 {
676 	phys_addr_t limit;
677 
678 	DBG(" -> early_init_devtree(%p)\n", params);
679 
680 	/* Setup flat device-tree pointer */
681 	initial_boot_params = params;
682 
683 #ifdef CONFIG_PPC_RTAS
684 	/* Some machines might need RTAS info for debugging, grab it now. */
685 	of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
686 #endif
687 
688 #ifdef CONFIG_PHYP_DUMP
689 	/* scan tree to see if dump occurred during last boot */
690 	of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
691 #endif
692 
693 	/* Retrieve various informations from the /chosen node of the
694 	 * device-tree, including the platform type, initrd location and
695 	 * size, TCE reserve, and more ...
696 	 */
697 	of_scan_flat_dt(early_init_dt_scan_chosen_ppc, NULL);
698 
699 	/* Scan memory nodes and rebuild MEMBLOCKs */
700 	memblock_init();
701 
702 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
703 	of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
704 	setup_initial_memory_limit(memstart_addr, first_memblock_size);
705 
706 	/* Save command line for /proc/cmdline and then parse parameters */
707 	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
708 	parse_early_param();
709 
710 	/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
711 	memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
712 	/* If relocatable, reserve first 32k for interrupt vectors etc. */
713 	if (PHYSICAL_START > MEMORY_START)
714 		memblock_reserve(MEMORY_START, 0x8000);
715 	reserve_kdump_trampoline();
716 	reserve_crashkernel();
717 	early_reserve_mem();
718 	phyp_dump_reserve_mem();
719 
720 	limit = memory_limit;
721 	if (! limit) {
722 		phys_addr_t memsize;
723 
724 		/* Ensure that total memory size is page-aligned, because
725 		 * otherwise mark_bootmem() gets upset. */
726 		memblock_analyze();
727 		memsize = memblock_phys_mem_size();
728 		if ((memsize & PAGE_MASK) != memsize)
729 			limit = memsize & PAGE_MASK;
730 	}
731 	memblock_enforce_memory_limit(limit);
732 
733 	memblock_analyze();
734 	memblock_dump_all();
735 
736 	DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
737 
738 	/* We may need to relocate the flat tree, do it now.
739 	 * FIXME .. and the initrd too? */
740 	move_device_tree();
741 
742 	allocate_pacas();
743 
744 	DBG("Scanning CPUs ...\n");
745 
746 	/* Retrieve CPU related informations from the flat tree
747 	 * (altivec support, boot CPU ID, ...)
748 	 */
749 	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
750 
751 	DBG(" <- early_init_devtree()\n");
752 }
753 
754 /*******
755  *
756  * New implementation of the OF "find" APIs, return a refcounted
757  * object, call of_node_put() when done.  The device tree and list
758  * are protected by a rw_lock.
759  *
760  * Note that property management will need some locking as well,
761  * this isn't dealt with yet.
762  *
763  *******/
764 
765 /**
766  *	of_find_next_cache_node - Find a node's subsidiary cache
767  *	@np:	node of type "cpu" or "cache"
768  *
769  *	Returns a node pointer with refcount incremented, use
770  *	of_node_put() on it when done.  Caller should hold a reference
771  *	to np.
772  */
773 struct device_node *of_find_next_cache_node(struct device_node *np)
774 {
775 	struct device_node *child;
776 	const phandle *handle;
777 
778 	handle = of_get_property(np, "l2-cache", NULL);
779 	if (!handle)
780 		handle = of_get_property(np, "next-level-cache", NULL);
781 
782 	if (handle)
783 		return of_find_node_by_phandle(*handle);
784 
785 	/* OF on pmac has nodes instead of properties named "l2-cache"
786 	 * beneath CPU nodes.
787 	 */
788 	if (!strcmp(np->type, "cpu"))
789 		for_each_child_of_node(np, child)
790 			if (!strcmp(child->type, "cache"))
791 				return child;
792 
793 	return NULL;
794 }
795 
796 #ifdef CONFIG_PPC_PSERIES
797 /*
798  * Fix up the uninitialized fields in a new device node:
799  * name, type and pci-specific fields
800  */
801 
802 static int of_finish_dynamic_node(struct device_node *node)
803 {
804 	struct device_node *parent = of_get_parent(node);
805 	int err = 0;
806 	const phandle *ibm_phandle;
807 
808 	node->name = of_get_property(node, "name", NULL);
809 	node->type = of_get_property(node, "device_type", NULL);
810 
811 	if (!node->name)
812 		node->name = "<NULL>";
813 	if (!node->type)
814 		node->type = "<NULL>";
815 
816 	if (!parent) {
817 		err = -ENODEV;
818 		goto out;
819 	}
820 
821 	/* We don't support that function on PowerMac, at least
822 	 * not yet
823 	 */
824 	if (machine_is(powermac))
825 		return -ENODEV;
826 
827 	/* fix up new node's phandle field */
828 	if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
829 		node->phandle = *ibm_phandle;
830 
831 out:
832 	of_node_put(parent);
833 	return err;
834 }
835 
836 static int prom_reconfig_notifier(struct notifier_block *nb,
837 				  unsigned long action, void *node)
838 {
839 	int err;
840 
841 	switch (action) {
842 	case PSERIES_RECONFIG_ADD:
843 		err = of_finish_dynamic_node(node);
844 		if (err < 0) {
845 			printk(KERN_ERR "finish_node returned %d\n", err);
846 			err = NOTIFY_BAD;
847 		}
848 		break;
849 	default:
850 		err = NOTIFY_DONE;
851 		break;
852 	}
853 	return err;
854 }
855 
856 static struct notifier_block prom_reconfig_nb = {
857 	.notifier_call = prom_reconfig_notifier,
858 	.priority = 10, /* This one needs to run first */
859 };
860 
861 static int __init prom_reconfig_setup(void)
862 {
863 	return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
864 }
865 __initcall(prom_reconfig_setup);
866 #endif
867 
868 /* Find the device node for a given logical cpu number, also returns the cpu
869  * local thread number (index in ibm,interrupt-server#s) if relevant and
870  * asked for (non NULL)
871  */
872 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
873 {
874 	int hardid;
875 	struct device_node *np;
876 
877 	hardid = get_hard_smp_processor_id(cpu);
878 
879 	for_each_node_by_type(np, "cpu") {
880 		const u32 *intserv;
881 		unsigned int plen, t;
882 
883 		/* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
884 		 * fallback to "reg" property and assume no threads
885 		 */
886 		intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
887 				&plen);
888 		if (intserv == NULL) {
889 			const u32 *reg = of_get_property(np, "reg", NULL);
890 			if (reg == NULL)
891 				continue;
892 			if (*reg == hardid) {
893 				if (thread)
894 					*thread = 0;
895 				return np;
896 			}
897 		} else {
898 			plen /= sizeof(u32);
899 			for (t = 0; t < plen; t++) {
900 				if (hardid == intserv[t]) {
901 					if (thread)
902 						*thread = t;
903 					return np;
904 				}
905 			}
906 		}
907 	}
908 	return NULL;
909 }
910 EXPORT_SYMBOL(of_get_cpu_node);
911 
912 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
913 static struct debugfs_blob_wrapper flat_dt_blob;
914 
915 static int __init export_flat_device_tree(void)
916 {
917 	struct dentry *d;
918 
919 	flat_dt_blob.data = initial_boot_params;
920 	flat_dt_blob.size = initial_boot_params->totalsize;
921 
922 	d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
923 				powerpc_debugfs_root, &flat_dt_blob);
924 	if (!d)
925 		return 1;
926 
927 	return 0;
928 }
929 __initcall(export_flat_device_tree);
930 #endif
931