xref: /openbmc/linux/arch/powerpc/kernel/prom_init.c (revision b732539e)
1 /*
2  * Procedures for interfacing to Open Firmware.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG_PROM
17 
18 /* we cannot use FORTIFY as it brings in new symbols */
19 #define __NO_FORTIFY
20 
21 #include <stdarg.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/threads.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/proc_fs.h>
30 #include <linux/stringify.h>
31 #include <linux/delay.h>
32 #include <linux/initrd.h>
33 #include <linux/bitops.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/mmu.h>
42 #include <asm/pgtable.h>
43 #include <asm/iommu.h>
44 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/machdep.h>
47 #include <asm/opal.h>
48 #include <asm/asm-prototypes.h>
49 
50 #include <linux/linux_logo.h>
51 
52 /*
53  * Eventually bump that one up
54  */
55 #define DEVTREE_CHUNK_SIZE	0x100000
56 
57 /*
58  * This is the size of the local memory reserve map that gets copied
59  * into the boot params passed to the kernel. That size is totally
60  * flexible as the kernel just reads the list until it encounters an
61  * entry with size 0, so it can be changed without breaking binary
62  * compatibility
63  */
64 #define MEM_RESERVE_MAP_SIZE	8
65 
66 /*
67  * prom_init() is called very early on, before the kernel text
68  * and data have been mapped to KERNELBASE.  At this point the code
69  * is running at whatever address it has been loaded at.
70  * On ppc32 we compile with -mrelocatable, which means that references
71  * to extern and static variables get relocated automatically.
72  * ppc64 objects are always relocatable, we just need to relocate the
73  * TOC.
74  *
75  * Because OF may have mapped I/O devices into the area starting at
76  * KERNELBASE, particularly on CHRP machines, we can't safely call
77  * OF once the kernel has been mapped to KERNELBASE.  Therefore all
78  * OF calls must be done within prom_init().
79  *
80  * ADDR is used in calls to call_prom.  The 4th and following
81  * arguments to call_prom should be 32-bit values.
82  * On ppc64, 64 bit values are truncated to 32 bits (and
83  * fortunately don't get interpreted as two arguments).
84  */
85 #define ADDR(x)		(u32)(unsigned long)(x)
86 
87 #ifdef CONFIG_PPC64
88 #define OF_WORKAROUNDS	0
89 #else
90 #define OF_WORKAROUNDS	of_workarounds
91 int of_workarounds;
92 #endif
93 
94 #define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
95 #define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
96 
97 #define PROM_BUG() do {						\
98         prom_printf("kernel BUG at %s line 0x%x!\n",		\
99 		    __FILE__, __LINE__);			\
100         __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);	\
101 } while (0)
102 
103 #ifdef DEBUG_PROM
104 #define prom_debug(x...)	prom_printf(x)
105 #else
106 #define prom_debug(x...)
107 #endif
108 
109 
110 typedef u32 prom_arg_t;
111 
112 struct prom_args {
113         __be32 service;
114         __be32 nargs;
115         __be32 nret;
116         __be32 args[10];
117 };
118 
119 struct prom_t {
120 	ihandle root;
121 	phandle chosen;
122 	int cpu;
123 	ihandle stdout;
124 	ihandle mmumap;
125 	ihandle memory;
126 };
127 
128 struct mem_map_entry {
129 	__be64	base;
130 	__be64	size;
131 };
132 
133 typedef __be32 cell_t;
134 
135 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
136 		    unsigned long r6, unsigned long r7, unsigned long r8,
137 		    unsigned long r9);
138 
139 #ifdef CONFIG_PPC64
140 extern int enter_prom(struct prom_args *args, unsigned long entry);
141 #else
142 static inline int enter_prom(struct prom_args *args, unsigned long entry)
143 {
144 	return ((int (*)(struct prom_args *))entry)(args);
145 }
146 #endif
147 
148 extern void copy_and_flush(unsigned long dest, unsigned long src,
149 			   unsigned long size, unsigned long offset);
150 
151 /* prom structure */
152 static struct prom_t __initdata prom;
153 
154 static unsigned long prom_entry __initdata;
155 
156 #define PROM_SCRATCH_SIZE 256
157 
158 static char __initdata of_stdout_device[256];
159 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
160 
161 static unsigned long __initdata dt_header_start;
162 static unsigned long __initdata dt_struct_start, dt_struct_end;
163 static unsigned long __initdata dt_string_start, dt_string_end;
164 
165 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
166 
167 #ifdef CONFIG_PPC64
168 static int __initdata prom_iommu_force_on;
169 static int __initdata prom_iommu_off;
170 static unsigned long __initdata prom_tce_alloc_start;
171 static unsigned long __initdata prom_tce_alloc_end;
172 #endif
173 
174 static bool prom_radix_disable __initdata = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
175 
176 struct platform_support {
177 	bool hash_mmu;
178 	bool radix_mmu;
179 	bool radix_gtse;
180 	bool xive;
181 };
182 
183 /* Platforms codes are now obsolete in the kernel. Now only used within this
184  * file and ultimately gone too. Feel free to change them if you need, they
185  * are not shared with anything outside of this file anymore
186  */
187 #define PLATFORM_PSERIES	0x0100
188 #define PLATFORM_PSERIES_LPAR	0x0101
189 #define PLATFORM_LPAR		0x0001
190 #define PLATFORM_POWERMAC	0x0400
191 #define PLATFORM_GENERIC	0x0500
192 #define PLATFORM_OPAL		0x0600
193 
194 static int __initdata of_platform;
195 
196 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
197 
198 static unsigned long __initdata prom_memory_limit;
199 
200 static unsigned long __initdata alloc_top;
201 static unsigned long __initdata alloc_top_high;
202 static unsigned long __initdata alloc_bottom;
203 static unsigned long __initdata rmo_top;
204 static unsigned long __initdata ram_top;
205 
206 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
207 static int __initdata mem_reserve_cnt;
208 
209 static cell_t __initdata regbuf[1024];
210 
211 static bool rtas_has_query_cpu_stopped;
212 
213 
214 /*
215  * Error results ... some OF calls will return "-1" on error, some
216  * will return 0, some will return either. To simplify, here are
217  * macros to use with any ihandle or phandle return value to check if
218  * it is valid
219  */
220 
221 #define PROM_ERROR		(-1u)
222 #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
223 #define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)
224 
225 
226 /* This is the one and *ONLY* place where we actually call open
227  * firmware.
228  */
229 
230 static int __init call_prom(const char *service, int nargs, int nret, ...)
231 {
232 	int i;
233 	struct prom_args args;
234 	va_list list;
235 
236 	args.service = cpu_to_be32(ADDR(service));
237 	args.nargs = cpu_to_be32(nargs);
238 	args.nret = cpu_to_be32(nret);
239 
240 	va_start(list, nret);
241 	for (i = 0; i < nargs; i++)
242 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
243 	va_end(list);
244 
245 	for (i = 0; i < nret; i++)
246 		args.args[nargs+i] = 0;
247 
248 	if (enter_prom(&args, prom_entry) < 0)
249 		return PROM_ERROR;
250 
251 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
252 }
253 
254 static int __init call_prom_ret(const char *service, int nargs, int nret,
255 				prom_arg_t *rets, ...)
256 {
257 	int i;
258 	struct prom_args args;
259 	va_list list;
260 
261 	args.service = cpu_to_be32(ADDR(service));
262 	args.nargs = cpu_to_be32(nargs);
263 	args.nret = cpu_to_be32(nret);
264 
265 	va_start(list, rets);
266 	for (i = 0; i < nargs; i++)
267 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
268 	va_end(list);
269 
270 	for (i = 0; i < nret; i++)
271 		args.args[nargs+i] = 0;
272 
273 	if (enter_prom(&args, prom_entry) < 0)
274 		return PROM_ERROR;
275 
276 	if (rets != NULL)
277 		for (i = 1; i < nret; ++i)
278 			rets[i-1] = be32_to_cpu(args.args[nargs+i]);
279 
280 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
281 }
282 
283 
284 static void __init prom_print(const char *msg)
285 {
286 	const char *p, *q;
287 
288 	if (prom.stdout == 0)
289 		return;
290 
291 	for (p = msg; *p != 0; p = q) {
292 		for (q = p; *q != 0 && *q != '\n'; ++q)
293 			;
294 		if (q > p)
295 			call_prom("write", 3, 1, prom.stdout, p, q - p);
296 		if (*q == 0)
297 			break;
298 		++q;
299 		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
300 	}
301 }
302 
303 
304 static void __init prom_print_hex(unsigned long val)
305 {
306 	int i, nibbles = sizeof(val)*2;
307 	char buf[sizeof(val)*2+1];
308 
309 	for (i = nibbles-1;  i >= 0;  i--) {
310 		buf[i] = (val & 0xf) + '0';
311 		if (buf[i] > '9')
312 			buf[i] += ('a'-'0'-10);
313 		val >>= 4;
314 	}
315 	buf[nibbles] = '\0';
316 	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
317 }
318 
319 /* max number of decimal digits in an unsigned long */
320 #define UL_DIGITS 21
321 static void __init prom_print_dec(unsigned long val)
322 {
323 	int i, size;
324 	char buf[UL_DIGITS+1];
325 
326 	for (i = UL_DIGITS-1; i >= 0;  i--) {
327 		buf[i] = (val % 10) + '0';
328 		val = val/10;
329 		if (val == 0)
330 			break;
331 	}
332 	/* shift stuff down */
333 	size = UL_DIGITS - i;
334 	call_prom("write", 3, 1, prom.stdout, buf+i, size);
335 }
336 
337 static void __init prom_printf(const char *format, ...)
338 {
339 	const char *p, *q, *s;
340 	va_list args;
341 	unsigned long v;
342 	long vs;
343 
344 	va_start(args, format);
345 	for (p = format; *p != 0; p = q) {
346 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
347 			;
348 		if (q > p)
349 			call_prom("write", 3, 1, prom.stdout, p, q - p);
350 		if (*q == 0)
351 			break;
352 		if (*q == '\n') {
353 			++q;
354 			call_prom("write", 3, 1, prom.stdout,
355 				  ADDR("\r\n"), 2);
356 			continue;
357 		}
358 		++q;
359 		if (*q == 0)
360 			break;
361 		switch (*q) {
362 		case 's':
363 			++q;
364 			s = va_arg(args, const char *);
365 			prom_print(s);
366 			break;
367 		case 'x':
368 			++q;
369 			v = va_arg(args, unsigned long);
370 			prom_print_hex(v);
371 			break;
372 		case 'd':
373 			++q;
374 			vs = va_arg(args, int);
375 			if (vs < 0) {
376 				prom_print("-");
377 				vs = -vs;
378 			}
379 			prom_print_dec(vs);
380 			break;
381 		case 'l':
382 			++q;
383 			if (*q == 0)
384 				break;
385 			else if (*q == 'x') {
386 				++q;
387 				v = va_arg(args, unsigned long);
388 				prom_print_hex(v);
389 			} else if (*q == 'u') { /* '%lu' */
390 				++q;
391 				v = va_arg(args, unsigned long);
392 				prom_print_dec(v);
393 			} else if (*q == 'd') { /* %ld */
394 				++q;
395 				vs = va_arg(args, long);
396 				if (vs < 0) {
397 					prom_print("-");
398 					vs = -vs;
399 				}
400 				prom_print_dec(vs);
401 			}
402 			break;
403 		}
404 	}
405 	va_end(args);
406 }
407 
408 
409 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
410 				unsigned long align)
411 {
412 
413 	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
414 		/*
415 		 * Old OF requires we claim physical and virtual separately
416 		 * and then map explicitly (assuming virtual mode)
417 		 */
418 		int ret;
419 		prom_arg_t result;
420 
421 		ret = call_prom_ret("call-method", 5, 2, &result,
422 				    ADDR("claim"), prom.memory,
423 				    align, size, virt);
424 		if (ret != 0 || result == -1)
425 			return -1;
426 		ret = call_prom_ret("call-method", 5, 2, &result,
427 				    ADDR("claim"), prom.mmumap,
428 				    align, size, virt);
429 		if (ret != 0) {
430 			call_prom("call-method", 4, 1, ADDR("release"),
431 				  prom.memory, size, virt);
432 			return -1;
433 		}
434 		/* the 0x12 is M (coherence) + PP == read/write */
435 		call_prom("call-method", 6, 1,
436 			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
437 		return virt;
438 	}
439 	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
440 			 (prom_arg_t)align);
441 }
442 
443 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
444 {
445 	prom_print(reason);
446 	/* Do not call exit because it clears the screen on pmac
447 	 * it also causes some sort of double-fault on early pmacs */
448 	if (of_platform == PLATFORM_POWERMAC)
449 		asm("trap\n");
450 
451 	/* ToDo: should put up an SRC here on pSeries */
452 	call_prom("exit", 0, 0);
453 
454 	for (;;)			/* should never get here */
455 		;
456 }
457 
458 
459 static int __init prom_next_node(phandle *nodep)
460 {
461 	phandle node;
462 
463 	if ((node = *nodep) != 0
464 	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
465 		return 1;
466 	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
467 		return 1;
468 	for (;;) {
469 		if ((node = call_prom("parent", 1, 1, node)) == 0)
470 			return 0;
471 		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
472 			return 1;
473 	}
474 }
475 
476 static inline int prom_getprop(phandle node, const char *pname,
477 			       void *value, size_t valuelen)
478 {
479 	return call_prom("getprop", 4, 1, node, ADDR(pname),
480 			 (u32)(unsigned long) value, (u32) valuelen);
481 }
482 
483 static inline int prom_getproplen(phandle node, const char *pname)
484 {
485 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
486 }
487 
488 static void add_string(char **str, const char *q)
489 {
490 	char *p = *str;
491 
492 	while (*q)
493 		*p++ = *q++;
494 	*p++ = ' ';
495 	*str = p;
496 }
497 
498 static char *tohex(unsigned int x)
499 {
500 	static char digits[] = "0123456789abcdef";
501 	static char result[9];
502 	int i;
503 
504 	result[8] = 0;
505 	i = 8;
506 	do {
507 		--i;
508 		result[i] = digits[x & 0xf];
509 		x >>= 4;
510 	} while (x != 0 && i > 0);
511 	return &result[i];
512 }
513 
514 static int __init prom_setprop(phandle node, const char *nodename,
515 			       const char *pname, void *value, size_t valuelen)
516 {
517 	char cmd[256], *p;
518 
519 	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
520 		return call_prom("setprop", 4, 1, node, ADDR(pname),
521 				 (u32)(unsigned long) value, (u32) valuelen);
522 
523 	/* gah... setprop doesn't work on longtrail, have to use interpret */
524 	p = cmd;
525 	add_string(&p, "dev");
526 	add_string(&p, nodename);
527 	add_string(&p, tohex((u32)(unsigned long) value));
528 	add_string(&p, tohex(valuelen));
529 	add_string(&p, tohex(ADDR(pname)));
530 	add_string(&p, tohex(strlen(pname)));
531 	add_string(&p, "property");
532 	*p = 0;
533 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
534 }
535 
536 /* We can't use the standard versions because of relocation headaches. */
537 #define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
538 			 || ('a' <= (c) && (c) <= 'f') \
539 			 || ('A' <= (c) && (c) <= 'F'))
540 
541 #define isdigit(c)	('0' <= (c) && (c) <= '9')
542 #define islower(c)	('a' <= (c) && (c) <= 'z')
543 #define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))
544 
545 static unsigned long prom_strtoul(const char *cp, const char **endp)
546 {
547 	unsigned long result = 0, base = 10, value;
548 
549 	if (*cp == '0') {
550 		base = 8;
551 		cp++;
552 		if (toupper(*cp) == 'X') {
553 			cp++;
554 			base = 16;
555 		}
556 	}
557 
558 	while (isxdigit(*cp) &&
559 	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
560 		result = result * base + value;
561 		cp++;
562 	}
563 
564 	if (endp)
565 		*endp = cp;
566 
567 	return result;
568 }
569 
570 static unsigned long prom_memparse(const char *ptr, const char **retptr)
571 {
572 	unsigned long ret = prom_strtoul(ptr, retptr);
573 	int shift = 0;
574 
575 	/*
576 	 * We can't use a switch here because GCC *may* generate a
577 	 * jump table which won't work, because we're not running at
578 	 * the address we're linked at.
579 	 */
580 	if ('G' == **retptr || 'g' == **retptr)
581 		shift = 30;
582 
583 	if ('M' == **retptr || 'm' == **retptr)
584 		shift = 20;
585 
586 	if ('K' == **retptr || 'k' == **retptr)
587 		shift = 10;
588 
589 	if (shift) {
590 		ret <<= shift;
591 		(*retptr)++;
592 	}
593 
594 	return ret;
595 }
596 
597 /*
598  * Early parsing of the command line passed to the kernel, used for
599  * "mem=x" and the options that affect the iommu
600  */
601 static void __init early_cmdline_parse(void)
602 {
603 	const char *opt;
604 
605 	char *p;
606 	int l = 0;
607 
608 	prom_cmd_line[0] = 0;
609 	p = prom_cmd_line;
610 	if ((long)prom.chosen > 0)
611 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
612 #ifdef CONFIG_CMDLINE
613 	if (l <= 0 || p[0] == '\0') /* dbl check */
614 		strlcpy(prom_cmd_line,
615 			CONFIG_CMDLINE, sizeof(prom_cmd_line));
616 #endif /* CONFIG_CMDLINE */
617 	prom_printf("command line: %s\n", prom_cmd_line);
618 
619 #ifdef CONFIG_PPC64
620 	opt = strstr(prom_cmd_line, "iommu=");
621 	if (opt) {
622 		prom_printf("iommu opt is: %s\n", opt);
623 		opt += 6;
624 		while (*opt && *opt == ' ')
625 			opt++;
626 		if (!strncmp(opt, "off", 3))
627 			prom_iommu_off = 1;
628 		else if (!strncmp(opt, "force", 5))
629 			prom_iommu_force_on = 1;
630 	}
631 #endif
632 	opt = strstr(prom_cmd_line, "mem=");
633 	if (opt) {
634 		opt += 4;
635 		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
636 #ifdef CONFIG_PPC64
637 		/* Align to 16 MB == size of ppc64 large page */
638 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
639 #endif
640 	}
641 
642 	opt = strstr(prom_cmd_line, "disable_radix");
643 	if (opt) {
644 		opt += 13;
645 		if (*opt && *opt == '=') {
646 			bool val;
647 
648 			if (kstrtobool(++opt, &val))
649 				prom_radix_disable = false;
650 			else
651 				prom_radix_disable = val;
652 		} else
653 			prom_radix_disable = true;
654 	}
655 	if (prom_radix_disable)
656 		prom_debug("Radix disabled from cmdline\n");
657 }
658 
659 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
660 /*
661  * The architecture vector has an array of PVR mask/value pairs,
662  * followed by # option vectors - 1, followed by the option vectors.
663  *
664  * See prom.h for the definition of the bits specified in the
665  * architecture vector.
666  */
667 
668 /* Firmware expects the value to be n - 1, where n is the # of vectors */
669 #define NUM_VECTORS(n)		((n) - 1)
670 
671 /*
672  * Firmware expects 1 + n - 2, where n is the length of the option vector in
673  * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
674  */
675 #define VECTOR_LENGTH(n)	(1 + (n) - 2)
676 
677 struct option_vector1 {
678 	u8 byte1;
679 	u8 arch_versions;
680 	u8 arch_versions3;
681 } __packed;
682 
683 struct option_vector2 {
684 	u8 byte1;
685 	__be16 reserved;
686 	__be32 real_base;
687 	__be32 real_size;
688 	__be32 virt_base;
689 	__be32 virt_size;
690 	__be32 load_base;
691 	__be32 min_rma;
692 	__be32 min_load;
693 	u8 min_rma_percent;
694 	u8 max_pft_size;
695 } __packed;
696 
697 struct option_vector3 {
698 	u8 byte1;
699 	u8 byte2;
700 } __packed;
701 
702 struct option_vector4 {
703 	u8 byte1;
704 	u8 min_vp_cap;
705 } __packed;
706 
707 struct option_vector5 {
708 	u8 byte1;
709 	u8 byte2;
710 	u8 byte3;
711 	u8 cmo;
712 	u8 associativity;
713 	u8 bin_opts;
714 	u8 micro_checkpoint;
715 	u8 reserved0;
716 	__be32 max_cpus;
717 	__be16 papr_level;
718 	__be16 reserved1;
719 	u8 platform_facilities;
720 	u8 reserved2;
721 	__be16 reserved3;
722 	u8 subprocessors;
723 	u8 byte22;
724 	u8 intarch;
725 	u8 mmu;
726 	u8 hash_ext;
727 	u8 radix_ext;
728 } __packed;
729 
730 struct option_vector6 {
731 	u8 reserved;
732 	u8 secondary_pteg;
733 	u8 os_name;
734 } __packed;
735 
736 struct ibm_arch_vec {
737 	struct { u32 mask, val; } pvrs[12];
738 
739 	u8 num_vectors;
740 
741 	u8 vec1_len;
742 	struct option_vector1 vec1;
743 
744 	u8 vec2_len;
745 	struct option_vector2 vec2;
746 
747 	u8 vec3_len;
748 	struct option_vector3 vec3;
749 
750 	u8 vec4_len;
751 	struct option_vector4 vec4;
752 
753 	u8 vec5_len;
754 	struct option_vector5 vec5;
755 
756 	u8 vec6_len;
757 	struct option_vector6 vec6;
758 } __packed;
759 
760 struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
761 	.pvrs = {
762 		{
763 			.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
764 			.val  = cpu_to_be32(0x003a0000),
765 		},
766 		{
767 			.mask = cpu_to_be32(0xffff0000), /* POWER6 */
768 			.val  = cpu_to_be32(0x003e0000),
769 		},
770 		{
771 			.mask = cpu_to_be32(0xffff0000), /* POWER7 */
772 			.val  = cpu_to_be32(0x003f0000),
773 		},
774 		{
775 			.mask = cpu_to_be32(0xffff0000), /* POWER8E */
776 			.val  = cpu_to_be32(0x004b0000),
777 		},
778 		{
779 			.mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
780 			.val  = cpu_to_be32(0x004c0000),
781 		},
782 		{
783 			.mask = cpu_to_be32(0xffff0000), /* POWER8 */
784 			.val  = cpu_to_be32(0x004d0000),
785 		},
786 		{
787 			.mask = cpu_to_be32(0xffff0000), /* POWER9 */
788 			.val  = cpu_to_be32(0x004e0000),
789 		},
790 		{
791 			.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
792 			.val  = cpu_to_be32(0x0f000005),
793 		},
794 		{
795 			.mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
796 			.val  = cpu_to_be32(0x0f000004),
797 		},
798 		{
799 			.mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
800 			.val  = cpu_to_be32(0x0f000003),
801 		},
802 		{
803 			.mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
804 			.val  = cpu_to_be32(0x0f000002),
805 		},
806 		{
807 			.mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
808 			.val  = cpu_to_be32(0x0f000001),
809 		},
810 	},
811 
812 	.num_vectors = NUM_VECTORS(6),
813 
814 	.vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
815 	.vec1 = {
816 		.byte1 = 0,
817 		.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
818 				 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
819 		.arch_versions3 = OV1_PPC_3_00,
820 	},
821 
822 	.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
823 	/* option vector 2: Open Firmware options supported */
824 	.vec2 = {
825 		.byte1 = OV2_REAL_MODE,
826 		.reserved = 0,
827 		.real_base = cpu_to_be32(0xffffffff),
828 		.real_size = cpu_to_be32(0xffffffff),
829 		.virt_base = cpu_to_be32(0xffffffff),
830 		.virt_size = cpu_to_be32(0xffffffff),
831 		.load_base = cpu_to_be32(0xffffffff),
832 		.min_rma = cpu_to_be32(512),		/* 512MB min RMA */
833 		.min_load = cpu_to_be32(0xffffffff),	/* full client load */
834 		.min_rma_percent = 0,	/* min RMA percentage of total RAM */
835 		.max_pft_size = 48,	/* max log_2(hash table size) */
836 	},
837 
838 	.vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
839 	/* option vector 3: processor options supported */
840 	.vec3 = {
841 		.byte1 = 0,			/* don't ignore, don't halt */
842 		.byte2 = OV3_FP | OV3_VMX | OV3_DFP,
843 	},
844 
845 	.vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
846 	/* option vector 4: IBM PAPR implementation */
847 	.vec4 = {
848 		.byte1 = 0,			/* don't halt */
849 		.min_vp_cap = OV4_MIN_ENT_CAP,	/* minimum VP entitled capacity */
850 	},
851 
852 	.vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
853 	/* option vector 5: PAPR/OF options */
854 	.vec5 = {
855 		.byte1 = 0,				/* don't ignore, don't halt */
856 		.byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
857 		OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
858 #ifdef CONFIG_PCI_MSI
859 		/* PCIe/MSI support.  Without MSI full PCIe is not supported */
860 		OV5_FEAT(OV5_MSI),
861 #else
862 		0,
863 #endif
864 		.byte3 = 0,
865 		.cmo =
866 #ifdef CONFIG_PPC_SMLPAR
867 		OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
868 #else
869 		0,
870 #endif
871 		.associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
872 		.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
873 		.micro_checkpoint = 0,
874 		.reserved0 = 0,
875 		.max_cpus = cpu_to_be32(NR_CPUS),	/* number of cores supported */
876 		.papr_level = 0,
877 		.reserved1 = 0,
878 		.platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
879 		.reserved2 = 0,
880 		.reserved3 = 0,
881 		.subprocessors = 1,
882 		.byte22 = OV5_FEAT(OV5_DRMEM_V2),
883 		.intarch = 0,
884 		.mmu = 0,
885 		.hash_ext = 0,
886 		.radix_ext = 0,
887 	},
888 
889 	/* option vector 6: IBM PAPR hints */
890 	.vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
891 	.vec6 = {
892 		.reserved = 0,
893 		.secondary_pteg = 0,
894 		.os_name = OV6_LINUX,
895 	},
896 };
897 
898 /* Old method - ELF header with PT_NOTE sections only works on BE */
899 #ifdef __BIG_ENDIAN__
900 static struct fake_elf {
901 	Elf32_Ehdr	elfhdr;
902 	Elf32_Phdr	phdr[2];
903 	struct chrpnote {
904 		u32	namesz;
905 		u32	descsz;
906 		u32	type;
907 		char	name[8];	/* "PowerPC" */
908 		struct chrpdesc {
909 			u32	real_mode;
910 			u32	real_base;
911 			u32	real_size;
912 			u32	virt_base;
913 			u32	virt_size;
914 			u32	load_base;
915 		} chrpdesc;
916 	} chrpnote;
917 	struct rpanote {
918 		u32	namesz;
919 		u32	descsz;
920 		u32	type;
921 		char	name[24];	/* "IBM,RPA-Client-Config" */
922 		struct rpadesc {
923 			u32	lpar_affinity;
924 			u32	min_rmo_size;
925 			u32	min_rmo_percent;
926 			u32	max_pft_size;
927 			u32	splpar;
928 			u32	min_load;
929 			u32	new_mem_def;
930 			u32	ignore_me;
931 		} rpadesc;
932 	} rpanote;
933 } fake_elf = {
934 	.elfhdr = {
935 		.e_ident = { 0x7f, 'E', 'L', 'F',
936 			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
937 		.e_type = ET_EXEC,	/* yeah right */
938 		.e_machine = EM_PPC,
939 		.e_version = EV_CURRENT,
940 		.e_phoff = offsetof(struct fake_elf, phdr),
941 		.e_phentsize = sizeof(Elf32_Phdr),
942 		.e_phnum = 2
943 	},
944 	.phdr = {
945 		[0] = {
946 			.p_type = PT_NOTE,
947 			.p_offset = offsetof(struct fake_elf, chrpnote),
948 			.p_filesz = sizeof(struct chrpnote)
949 		}, [1] = {
950 			.p_type = PT_NOTE,
951 			.p_offset = offsetof(struct fake_elf, rpanote),
952 			.p_filesz = sizeof(struct rpanote)
953 		}
954 	},
955 	.chrpnote = {
956 		.namesz = sizeof("PowerPC"),
957 		.descsz = sizeof(struct chrpdesc),
958 		.type = 0x1275,
959 		.name = "PowerPC",
960 		.chrpdesc = {
961 			.real_mode = ~0U,	/* ~0 means "don't care" */
962 			.real_base = ~0U,
963 			.real_size = ~0U,
964 			.virt_base = ~0U,
965 			.virt_size = ~0U,
966 			.load_base = ~0U
967 		},
968 	},
969 	.rpanote = {
970 		.namesz = sizeof("IBM,RPA-Client-Config"),
971 		.descsz = sizeof(struct rpadesc),
972 		.type = 0x12759999,
973 		.name = "IBM,RPA-Client-Config",
974 		.rpadesc = {
975 			.lpar_affinity = 0,
976 			.min_rmo_size = 64,	/* in megabytes */
977 			.min_rmo_percent = 0,
978 			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
979 			.splpar = 1,
980 			.min_load = ~0U,
981 			.new_mem_def = 0
982 		}
983 	}
984 };
985 #endif /* __BIG_ENDIAN__ */
986 
987 static int __init prom_count_smt_threads(void)
988 {
989 	phandle node;
990 	char type[64];
991 	unsigned int plen;
992 
993 	/* Pick up th first CPU node we can find */
994 	for (node = 0; prom_next_node(&node); ) {
995 		type[0] = 0;
996 		prom_getprop(node, "device_type", type, sizeof(type));
997 
998 		if (strcmp(type, "cpu"))
999 			continue;
1000 		/*
1001 		 * There is an entry for each smt thread, each entry being
1002 		 * 4 bytes long.  All cpus should have the same number of
1003 		 * smt threads, so return after finding the first.
1004 		 */
1005 		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1006 		if (plen == PROM_ERROR)
1007 			break;
1008 		plen >>= 2;
1009 		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1010 
1011 		/* Sanity check */
1012 		if (plen < 1 || plen > 64) {
1013 			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1014 				    (unsigned long)plen);
1015 			return 1;
1016 		}
1017 		return plen;
1018 	}
1019 	prom_debug("No threads found, assuming 1 per core\n");
1020 
1021 	return 1;
1022 
1023 }
1024 
1025 static void __init prom_parse_mmu_model(u8 val,
1026 					struct platform_support *support)
1027 {
1028 	switch (val) {
1029 	case OV5_FEAT(OV5_MMU_DYNAMIC):
1030 	case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1031 		prom_debug("MMU - either supported\n");
1032 		support->radix_mmu = !prom_radix_disable;
1033 		support->hash_mmu = true;
1034 		break;
1035 	case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1036 		prom_debug("MMU - radix only\n");
1037 		if (prom_radix_disable) {
1038 			/*
1039 			 * If we __have__ to do radix, we're better off ignoring
1040 			 * the command line rather than not booting.
1041 			 */
1042 			prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1043 		}
1044 		support->radix_mmu = true;
1045 		break;
1046 	case OV5_FEAT(OV5_MMU_HASH):
1047 		prom_debug("MMU - hash only\n");
1048 		support->hash_mmu = true;
1049 		break;
1050 	default:
1051 		prom_debug("Unknown mmu support option: 0x%x\n", val);
1052 		break;
1053 	}
1054 }
1055 
1056 static void __init prom_parse_xive_model(u8 val,
1057 					 struct platform_support *support)
1058 {
1059 	switch (val) {
1060 	case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1061 		prom_debug("XIVE - either mode supported\n");
1062 		support->xive = true;
1063 		break;
1064 	case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1065 		prom_debug("XIVE - exploitation mode supported\n");
1066 		support->xive = true;
1067 		break;
1068 	case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1069 		prom_debug("XIVE - legacy mode supported\n");
1070 		break;
1071 	default:
1072 		prom_debug("Unknown xive support option: 0x%x\n", val);
1073 		break;
1074 	}
1075 }
1076 
1077 static void __init prom_parse_platform_support(u8 index, u8 val,
1078 					       struct platform_support *support)
1079 {
1080 	switch (index) {
1081 	case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1082 		prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1083 		break;
1084 	case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1085 		if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1086 			prom_debug("Radix - GTSE supported\n");
1087 			support->radix_gtse = true;
1088 		}
1089 		break;
1090 	case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1091 		prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1092 				      support);
1093 		break;
1094 	}
1095 }
1096 
1097 static void __init prom_check_platform_support(void)
1098 {
1099 	struct platform_support supported = {
1100 		.hash_mmu = false,
1101 		.radix_mmu = false,
1102 		.radix_gtse = false,
1103 		.xive = false
1104 	};
1105 	int prop_len = prom_getproplen(prom.chosen,
1106 				       "ibm,arch-vec-5-platform-support");
1107 	if (prop_len > 1) {
1108 		int i;
1109 		u8 vec[prop_len];
1110 		prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1111 			   prop_len);
1112 		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1113 			     &vec, sizeof(vec));
1114 		for (i = 0; i < prop_len; i += 2) {
1115 			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1116 								  , vec[i]
1117 								  , vec[i + 1]);
1118 			prom_parse_platform_support(vec[i], vec[i + 1],
1119 						    &supported);
1120 		}
1121 	}
1122 
1123 	if (supported.radix_mmu && supported.radix_gtse &&
1124 	    IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1125 		/* Radix preferred - but we require GTSE for now */
1126 		prom_debug("Asking for radix with GTSE\n");
1127 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1128 		ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1129 	} else if (supported.hash_mmu) {
1130 		/* Default to hash mmu (if we can) */
1131 		prom_debug("Asking for hash\n");
1132 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1133 	} else {
1134 		/* We're probably on a legacy hypervisor */
1135 		prom_debug("Assuming legacy hash support\n");
1136 	}
1137 
1138 	if (supported.xive) {
1139 		prom_debug("Asking for XIVE\n");
1140 		ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1141 	}
1142 }
1143 
1144 static void __init prom_send_capabilities(void)
1145 {
1146 	ihandle root;
1147 	prom_arg_t ret;
1148 	u32 cores;
1149 
1150 	/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1151 	prom_check_platform_support();
1152 
1153 	root = call_prom("open", 1, 1, ADDR("/"));
1154 	if (root != 0) {
1155 		/* We need to tell the FW about the number of cores we support.
1156 		 *
1157 		 * To do that, we count the number of threads on the first core
1158 		 * (we assume this is the same for all cores) and use it to
1159 		 * divide NR_CPUS.
1160 		 */
1161 
1162 		cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1163 		prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
1164 			    cores, NR_CPUS);
1165 
1166 		ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1167 
1168 		/* try calling the ibm,client-architecture-support method */
1169 		prom_printf("Calling ibm,client-architecture-support...");
1170 		if (call_prom_ret("call-method", 3, 2, &ret,
1171 				  ADDR("ibm,client-architecture-support"),
1172 				  root,
1173 				  ADDR(&ibm_architecture_vec)) == 0) {
1174 			/* the call exists... */
1175 			if (ret)
1176 				prom_printf("\nWARNING: ibm,client-architecture"
1177 					    "-support call FAILED!\n");
1178 			call_prom("close", 1, 0, root);
1179 			prom_printf(" done\n");
1180 			return;
1181 		}
1182 		call_prom("close", 1, 0, root);
1183 		prom_printf(" not implemented\n");
1184 	}
1185 
1186 #ifdef __BIG_ENDIAN__
1187 	{
1188 		ihandle elfloader;
1189 
1190 		/* no ibm,client-architecture-support call, try the old way */
1191 		elfloader = call_prom("open", 1, 1,
1192 				      ADDR("/packages/elf-loader"));
1193 		if (elfloader == 0) {
1194 			prom_printf("couldn't open /packages/elf-loader\n");
1195 			return;
1196 		}
1197 		call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1198 			  elfloader, ADDR(&fake_elf));
1199 		call_prom("close", 1, 0, elfloader);
1200 	}
1201 #endif /* __BIG_ENDIAN__ */
1202 }
1203 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1204 
1205 /*
1206  * Memory allocation strategy... our layout is normally:
1207  *
1208  *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
1209  *  rare cases, initrd might end up being before the kernel though.
1210  *  We assume this won't override the final kernel at 0, we have no
1211  *  provision to handle that in this version, but it should hopefully
1212  *  never happen.
1213  *
1214  *  alloc_top is set to the top of RMO, eventually shrink down if the
1215  *  TCEs overlap
1216  *
1217  *  alloc_bottom is set to the top of kernel/initrd
1218  *
1219  *  from there, allocations are done this way : rtas is allocated
1220  *  topmost, and the device-tree is allocated from the bottom. We try
1221  *  to grow the device-tree allocation as we progress. If we can't,
1222  *  then we fail, we don't currently have a facility to restart
1223  *  elsewhere, but that shouldn't be necessary.
1224  *
1225  *  Note that calls to reserve_mem have to be done explicitly, memory
1226  *  allocated with either alloc_up or alloc_down isn't automatically
1227  *  reserved.
1228  */
1229 
1230 
1231 /*
1232  * Allocates memory in the RMO upward from the kernel/initrd
1233  *
1234  * When align is 0, this is a special case, it means to allocate in place
1235  * at the current location of alloc_bottom or fail (that is basically
1236  * extending the previous allocation). Used for the device-tree flattening
1237  */
1238 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1239 {
1240 	unsigned long base = alloc_bottom;
1241 	unsigned long addr = 0;
1242 
1243 	if (align)
1244 		base = _ALIGN_UP(base, align);
1245 	prom_debug("alloc_up(%x, %x)\n", size, align);
1246 	if (ram_top == 0)
1247 		prom_panic("alloc_up() called with mem not initialized\n");
1248 
1249 	if (align)
1250 		base = _ALIGN_UP(alloc_bottom, align);
1251 	else
1252 		base = alloc_bottom;
1253 
1254 	for(; (base + size) <= alloc_top;
1255 	    base = _ALIGN_UP(base + 0x100000, align)) {
1256 		prom_debug("    trying: 0x%x\n\r", base);
1257 		addr = (unsigned long)prom_claim(base, size, 0);
1258 		if (addr != PROM_ERROR && addr != 0)
1259 			break;
1260 		addr = 0;
1261 		if (align == 0)
1262 			break;
1263 	}
1264 	if (addr == 0)
1265 		return 0;
1266 	alloc_bottom = addr + size;
1267 
1268 	prom_debug(" -> %x\n", addr);
1269 	prom_debug("  alloc_bottom : %x\n", alloc_bottom);
1270 	prom_debug("  alloc_top    : %x\n", alloc_top);
1271 	prom_debug("  alloc_top_hi : %x\n", alloc_top_high);
1272 	prom_debug("  rmo_top      : %x\n", rmo_top);
1273 	prom_debug("  ram_top      : %x\n", ram_top);
1274 
1275 	return addr;
1276 }
1277 
1278 /*
1279  * Allocates memory downward, either from top of RMO, or if highmem
1280  * is set, from the top of RAM.  Note that this one doesn't handle
1281  * failures.  It does claim memory if highmem is not set.
1282  */
1283 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1284 				       int highmem)
1285 {
1286 	unsigned long base, addr = 0;
1287 
1288 	prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1289 		   highmem ? "(high)" : "(low)");
1290 	if (ram_top == 0)
1291 		prom_panic("alloc_down() called with mem not initialized\n");
1292 
1293 	if (highmem) {
1294 		/* Carve out storage for the TCE table. */
1295 		addr = _ALIGN_DOWN(alloc_top_high - size, align);
1296 		if (addr <= alloc_bottom)
1297 			return 0;
1298 		/* Will we bump into the RMO ? If yes, check out that we
1299 		 * didn't overlap existing allocations there, if we did,
1300 		 * we are dead, we must be the first in town !
1301 		 */
1302 		if (addr < rmo_top) {
1303 			/* Good, we are first */
1304 			if (alloc_top == rmo_top)
1305 				alloc_top = rmo_top = addr;
1306 			else
1307 				return 0;
1308 		}
1309 		alloc_top_high = addr;
1310 		goto bail;
1311 	}
1312 
1313 	base = _ALIGN_DOWN(alloc_top - size, align);
1314 	for (; base > alloc_bottom;
1315 	     base = _ALIGN_DOWN(base - 0x100000, align))  {
1316 		prom_debug("    trying: 0x%x\n\r", base);
1317 		addr = (unsigned long)prom_claim(base, size, 0);
1318 		if (addr != PROM_ERROR && addr != 0)
1319 			break;
1320 		addr = 0;
1321 	}
1322 	if (addr == 0)
1323 		return 0;
1324 	alloc_top = addr;
1325 
1326  bail:
1327 	prom_debug(" -> %x\n", addr);
1328 	prom_debug("  alloc_bottom : %x\n", alloc_bottom);
1329 	prom_debug("  alloc_top    : %x\n", alloc_top);
1330 	prom_debug("  alloc_top_hi : %x\n", alloc_top_high);
1331 	prom_debug("  rmo_top      : %x\n", rmo_top);
1332 	prom_debug("  ram_top      : %x\n", ram_top);
1333 
1334 	return addr;
1335 }
1336 
1337 /*
1338  * Parse a "reg" cell
1339  */
1340 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1341 {
1342 	cell_t *p = *cellp;
1343 	unsigned long r = 0;
1344 
1345 	/* Ignore more than 2 cells */
1346 	while (s > sizeof(unsigned long) / 4) {
1347 		p++;
1348 		s--;
1349 	}
1350 	r = be32_to_cpu(*p++);
1351 #ifdef CONFIG_PPC64
1352 	if (s > 1) {
1353 		r <<= 32;
1354 		r |= be32_to_cpu(*(p++));
1355 	}
1356 #endif
1357 	*cellp = p;
1358 	return r;
1359 }
1360 
1361 /*
1362  * Very dumb function for adding to the memory reserve list, but
1363  * we don't need anything smarter at this point
1364  *
1365  * XXX Eventually check for collisions.  They should NEVER happen.
1366  * If problems seem to show up, it would be a good start to track
1367  * them down.
1368  */
1369 static void __init reserve_mem(u64 base, u64 size)
1370 {
1371 	u64 top = base + size;
1372 	unsigned long cnt = mem_reserve_cnt;
1373 
1374 	if (size == 0)
1375 		return;
1376 
1377 	/* We need to always keep one empty entry so that we
1378 	 * have our terminator with "size" set to 0 since we are
1379 	 * dumb and just copy this entire array to the boot params
1380 	 */
1381 	base = _ALIGN_DOWN(base, PAGE_SIZE);
1382 	top = _ALIGN_UP(top, PAGE_SIZE);
1383 	size = top - base;
1384 
1385 	if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1386 		prom_panic("Memory reserve map exhausted !\n");
1387 	mem_reserve_map[cnt].base = cpu_to_be64(base);
1388 	mem_reserve_map[cnt].size = cpu_to_be64(size);
1389 	mem_reserve_cnt = cnt + 1;
1390 }
1391 
1392 /*
1393  * Initialize memory allocation mechanism, parse "memory" nodes and
1394  * obtain that way the top of memory and RMO to setup out local allocator
1395  */
1396 static void __init prom_init_mem(void)
1397 {
1398 	phandle node;
1399 	char *path, type[64];
1400 	unsigned int plen;
1401 	cell_t *p, *endp;
1402 	__be32 val;
1403 	u32 rac, rsc;
1404 
1405 	/*
1406 	 * We iterate the memory nodes to find
1407 	 * 1) top of RMO (first node)
1408 	 * 2) top of memory
1409 	 */
1410 	val = cpu_to_be32(2);
1411 	prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1412 	rac = be32_to_cpu(val);
1413 	val = cpu_to_be32(1);
1414 	prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1415 	rsc = be32_to_cpu(val);
1416 	prom_debug("root_addr_cells: %x\n", rac);
1417 	prom_debug("root_size_cells: %x\n", rsc);
1418 
1419 	prom_debug("scanning memory:\n");
1420 	path = prom_scratch;
1421 
1422 	for (node = 0; prom_next_node(&node); ) {
1423 		type[0] = 0;
1424 		prom_getprop(node, "device_type", type, sizeof(type));
1425 
1426 		if (type[0] == 0) {
1427 			/*
1428 			 * CHRP Longtrail machines have no device_type
1429 			 * on the memory node, so check the name instead...
1430 			 */
1431 			prom_getprop(node, "name", type, sizeof(type));
1432 		}
1433 		if (strcmp(type, "memory"))
1434 			continue;
1435 
1436 		plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1437 		if (plen > sizeof(regbuf)) {
1438 			prom_printf("memory node too large for buffer !\n");
1439 			plen = sizeof(regbuf);
1440 		}
1441 		p = regbuf;
1442 		endp = p + (plen / sizeof(cell_t));
1443 
1444 #ifdef DEBUG_PROM
1445 		memset(path, 0, PROM_SCRATCH_SIZE);
1446 		call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1447 		prom_debug("  node %s :\n", path);
1448 #endif /* DEBUG_PROM */
1449 
1450 		while ((endp - p) >= (rac + rsc)) {
1451 			unsigned long base, size;
1452 
1453 			base = prom_next_cell(rac, &p);
1454 			size = prom_next_cell(rsc, &p);
1455 
1456 			if (size == 0)
1457 				continue;
1458 			prom_debug("    %x %x\n", base, size);
1459 			if (base == 0 && (of_platform & PLATFORM_LPAR))
1460 				rmo_top = size;
1461 			if ((base + size) > ram_top)
1462 				ram_top = base + size;
1463 		}
1464 	}
1465 
1466 	alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1467 
1468 	/*
1469 	 * If prom_memory_limit is set we reduce the upper limits *except* for
1470 	 * alloc_top_high. This must be the real top of RAM so we can put
1471 	 * TCE's up there.
1472 	 */
1473 
1474 	alloc_top_high = ram_top;
1475 
1476 	if (prom_memory_limit) {
1477 		if (prom_memory_limit <= alloc_bottom) {
1478 			prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1479 				prom_memory_limit);
1480 			prom_memory_limit = 0;
1481 		} else if (prom_memory_limit >= ram_top) {
1482 			prom_printf("Ignoring mem=%x >= ram_top.\n",
1483 				prom_memory_limit);
1484 			prom_memory_limit = 0;
1485 		} else {
1486 			ram_top = prom_memory_limit;
1487 			rmo_top = min(rmo_top, prom_memory_limit);
1488 		}
1489 	}
1490 
1491 	/*
1492 	 * Setup our top alloc point, that is top of RMO or top of
1493 	 * segment 0 when running non-LPAR.
1494 	 * Some RS64 machines have buggy firmware where claims up at
1495 	 * 1GB fail.  Cap at 768MB as a workaround.
1496 	 * Since 768MB is plenty of room, and we need to cap to something
1497 	 * reasonable on 32-bit, cap at 768MB on all machines.
1498 	 */
1499 	if (!rmo_top)
1500 		rmo_top = ram_top;
1501 	rmo_top = min(0x30000000ul, rmo_top);
1502 	alloc_top = rmo_top;
1503 	alloc_top_high = ram_top;
1504 
1505 	/*
1506 	 * Check if we have an initrd after the kernel but still inside
1507 	 * the RMO.  If we do move our bottom point to after it.
1508 	 */
1509 	if (prom_initrd_start &&
1510 	    prom_initrd_start < rmo_top &&
1511 	    prom_initrd_end > alloc_bottom)
1512 		alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1513 
1514 	prom_printf("memory layout at init:\n");
1515 	prom_printf("  memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1516 	prom_printf("  alloc_bottom : %x\n", alloc_bottom);
1517 	prom_printf("  alloc_top    : %x\n", alloc_top);
1518 	prom_printf("  alloc_top_hi : %x\n", alloc_top_high);
1519 	prom_printf("  rmo_top      : %x\n", rmo_top);
1520 	prom_printf("  ram_top      : %x\n", ram_top);
1521 }
1522 
1523 static void __init prom_close_stdin(void)
1524 {
1525 	__be32 val;
1526 	ihandle stdin;
1527 
1528 	if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1529 		stdin = be32_to_cpu(val);
1530 		call_prom("close", 1, 0, stdin);
1531 	}
1532 }
1533 
1534 #ifdef CONFIG_PPC_POWERNV
1535 
1536 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1537 static u64 __initdata prom_opal_base;
1538 static u64 __initdata prom_opal_entry;
1539 #endif
1540 
1541 /*
1542  * Allocate room for and instantiate OPAL
1543  */
1544 static void __init prom_instantiate_opal(void)
1545 {
1546 	phandle opal_node;
1547 	ihandle opal_inst;
1548 	u64 base, entry;
1549 	u64 size = 0, align = 0x10000;
1550 	__be64 val64;
1551 	u32 rets[2];
1552 
1553 	prom_debug("prom_instantiate_opal: start...\n");
1554 
1555 	opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1556 	prom_debug("opal_node: %x\n", opal_node);
1557 	if (!PHANDLE_VALID(opal_node))
1558 		return;
1559 
1560 	val64 = 0;
1561 	prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1562 	size = be64_to_cpu(val64);
1563 	if (size == 0)
1564 		return;
1565 	val64 = 0;
1566 	prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1567 	align = be64_to_cpu(val64);
1568 
1569 	base = alloc_down(size, align, 0);
1570 	if (base == 0) {
1571 		prom_printf("OPAL allocation failed !\n");
1572 		return;
1573 	}
1574 
1575 	opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1576 	if (!IHANDLE_VALID(opal_inst)) {
1577 		prom_printf("opening opal package failed (%x)\n", opal_inst);
1578 		return;
1579 	}
1580 
1581 	prom_printf("instantiating opal at 0x%x...", base);
1582 
1583 	if (call_prom_ret("call-method", 4, 3, rets,
1584 			  ADDR("load-opal-runtime"),
1585 			  opal_inst,
1586 			  base >> 32, base & 0xffffffff) != 0
1587 	    || (rets[0] == 0 && rets[1] == 0)) {
1588 		prom_printf(" failed\n");
1589 		return;
1590 	}
1591 	entry = (((u64)rets[0]) << 32) | rets[1];
1592 
1593 	prom_printf(" done\n");
1594 
1595 	reserve_mem(base, size);
1596 
1597 	prom_debug("opal base     = 0x%x\n", base);
1598 	prom_debug("opal align    = 0x%x\n", align);
1599 	prom_debug("opal entry    = 0x%x\n", entry);
1600 	prom_debug("opal size     = 0x%x\n", (long)size);
1601 
1602 	prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1603 		     &base, sizeof(base));
1604 	prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1605 		     &entry, sizeof(entry));
1606 
1607 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1608 	prom_opal_base = base;
1609 	prom_opal_entry = entry;
1610 #endif
1611 	prom_debug("prom_instantiate_opal: end...\n");
1612 }
1613 
1614 #endif /* CONFIG_PPC_POWERNV */
1615 
1616 /*
1617  * Allocate room for and instantiate RTAS
1618  */
1619 static void __init prom_instantiate_rtas(void)
1620 {
1621 	phandle rtas_node;
1622 	ihandle rtas_inst;
1623 	u32 base, entry = 0;
1624 	__be32 val;
1625 	u32 size = 0;
1626 
1627 	prom_debug("prom_instantiate_rtas: start...\n");
1628 
1629 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1630 	prom_debug("rtas_node: %x\n", rtas_node);
1631 	if (!PHANDLE_VALID(rtas_node))
1632 		return;
1633 
1634 	val = 0;
1635 	prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1636 	size = be32_to_cpu(val);
1637 	if (size == 0)
1638 		return;
1639 
1640 	base = alloc_down(size, PAGE_SIZE, 0);
1641 	if (base == 0)
1642 		prom_panic("Could not allocate memory for RTAS\n");
1643 
1644 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1645 	if (!IHANDLE_VALID(rtas_inst)) {
1646 		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1647 		return;
1648 	}
1649 
1650 	prom_printf("instantiating rtas at 0x%x...", base);
1651 
1652 	if (call_prom_ret("call-method", 3, 2, &entry,
1653 			  ADDR("instantiate-rtas"),
1654 			  rtas_inst, base) != 0
1655 	    || entry == 0) {
1656 		prom_printf(" failed\n");
1657 		return;
1658 	}
1659 	prom_printf(" done\n");
1660 
1661 	reserve_mem(base, size);
1662 
1663 	val = cpu_to_be32(base);
1664 	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1665 		     &val, sizeof(val));
1666 	val = cpu_to_be32(entry);
1667 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1668 		     &val, sizeof(val));
1669 
1670 	/* Check if it supports "query-cpu-stopped-state" */
1671 	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1672 			 &val, sizeof(val)) != PROM_ERROR)
1673 		rtas_has_query_cpu_stopped = true;
1674 
1675 	prom_debug("rtas base     = 0x%x\n", base);
1676 	prom_debug("rtas entry    = 0x%x\n", entry);
1677 	prom_debug("rtas size     = 0x%x\n", (long)size);
1678 
1679 	prom_debug("prom_instantiate_rtas: end...\n");
1680 }
1681 
1682 #ifdef CONFIG_PPC64
1683 /*
1684  * Allocate room for and instantiate Stored Measurement Log (SML)
1685  */
1686 static void __init prom_instantiate_sml(void)
1687 {
1688 	phandle ibmvtpm_node;
1689 	ihandle ibmvtpm_inst;
1690 	u32 entry = 0, size = 0, succ = 0;
1691 	u64 base;
1692 	__be32 val;
1693 
1694 	prom_debug("prom_instantiate_sml: start...\n");
1695 
1696 	ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1697 	prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1698 	if (!PHANDLE_VALID(ibmvtpm_node))
1699 		return;
1700 
1701 	ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1702 	if (!IHANDLE_VALID(ibmvtpm_inst)) {
1703 		prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1704 		return;
1705 	}
1706 
1707 	if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1708 			 &val, sizeof(val)) != PROM_ERROR) {
1709 		if (call_prom_ret("call-method", 2, 2, &succ,
1710 				  ADDR("reformat-sml-to-efi-alignment"),
1711 				  ibmvtpm_inst) != 0 || succ == 0) {
1712 			prom_printf("Reformat SML to EFI alignment failed\n");
1713 			return;
1714 		}
1715 
1716 		if (call_prom_ret("call-method", 2, 2, &size,
1717 				  ADDR("sml-get-allocated-size"),
1718 				  ibmvtpm_inst) != 0 || size == 0) {
1719 			prom_printf("SML get allocated size failed\n");
1720 			return;
1721 		}
1722 	} else {
1723 		if (call_prom_ret("call-method", 2, 2, &size,
1724 				  ADDR("sml-get-handover-size"),
1725 				  ibmvtpm_inst) != 0 || size == 0) {
1726 			prom_printf("SML get handover size failed\n");
1727 			return;
1728 		}
1729 	}
1730 
1731 	base = alloc_down(size, PAGE_SIZE, 0);
1732 	if (base == 0)
1733 		prom_panic("Could not allocate memory for sml\n");
1734 
1735 	prom_printf("instantiating sml at 0x%x...", base);
1736 
1737 	memset((void *)base, 0, size);
1738 
1739 	if (call_prom_ret("call-method", 4, 2, &entry,
1740 			  ADDR("sml-handover"),
1741 			  ibmvtpm_inst, size, base) != 0 || entry == 0) {
1742 		prom_printf("SML handover failed\n");
1743 		return;
1744 	}
1745 	prom_printf(" done\n");
1746 
1747 	reserve_mem(base, size);
1748 
1749 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1750 		     &base, sizeof(base));
1751 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1752 		     &size, sizeof(size));
1753 
1754 	prom_debug("sml base     = 0x%x\n", base);
1755 	prom_debug("sml size     = 0x%x\n", (long)size);
1756 
1757 	prom_debug("prom_instantiate_sml: end...\n");
1758 }
1759 
1760 /*
1761  * Allocate room for and initialize TCE tables
1762  */
1763 #ifdef __BIG_ENDIAN__
1764 static void __init prom_initialize_tce_table(void)
1765 {
1766 	phandle node;
1767 	ihandle phb_node;
1768 	char compatible[64], type[64], model[64];
1769 	char *path = prom_scratch;
1770 	u64 base, align;
1771 	u32 minalign, minsize;
1772 	u64 tce_entry, *tce_entryp;
1773 	u64 local_alloc_top, local_alloc_bottom;
1774 	u64 i;
1775 
1776 	if (prom_iommu_off)
1777 		return;
1778 
1779 	prom_debug("starting prom_initialize_tce_table\n");
1780 
1781 	/* Cache current top of allocs so we reserve a single block */
1782 	local_alloc_top = alloc_top_high;
1783 	local_alloc_bottom = local_alloc_top;
1784 
1785 	/* Search all nodes looking for PHBs. */
1786 	for (node = 0; prom_next_node(&node); ) {
1787 		compatible[0] = 0;
1788 		type[0] = 0;
1789 		model[0] = 0;
1790 		prom_getprop(node, "compatible",
1791 			     compatible, sizeof(compatible));
1792 		prom_getprop(node, "device_type", type, sizeof(type));
1793 		prom_getprop(node, "model", model, sizeof(model));
1794 
1795 		if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1796 			continue;
1797 
1798 		/* Keep the old logic intact to avoid regression. */
1799 		if (compatible[0] != 0) {
1800 			if ((strstr(compatible, "python") == NULL) &&
1801 			    (strstr(compatible, "Speedwagon") == NULL) &&
1802 			    (strstr(compatible, "Winnipeg") == NULL))
1803 				continue;
1804 		} else if (model[0] != 0) {
1805 			if ((strstr(model, "ython") == NULL) &&
1806 			    (strstr(model, "peedwagon") == NULL) &&
1807 			    (strstr(model, "innipeg") == NULL))
1808 				continue;
1809 		}
1810 
1811 		if (prom_getprop(node, "tce-table-minalign", &minalign,
1812 				 sizeof(minalign)) == PROM_ERROR)
1813 			minalign = 0;
1814 		if (prom_getprop(node, "tce-table-minsize", &minsize,
1815 				 sizeof(minsize)) == PROM_ERROR)
1816 			minsize = 4UL << 20;
1817 
1818 		/*
1819 		 * Even though we read what OF wants, we just set the table
1820 		 * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
1821 		 * By doing this, we avoid the pitfalls of trying to DMA to
1822 		 * MMIO space and the DMA alias hole.
1823 		 */
1824 		minsize = 4UL << 20;
1825 
1826 		/* Align to the greater of the align or size */
1827 		align = max(minalign, minsize);
1828 		base = alloc_down(minsize, align, 1);
1829 		if (base == 0)
1830 			prom_panic("ERROR, cannot find space for TCE table.\n");
1831 		if (base < local_alloc_bottom)
1832 			local_alloc_bottom = base;
1833 
1834 		/* It seems OF doesn't null-terminate the path :-( */
1835 		memset(path, 0, PROM_SCRATCH_SIZE);
1836 		/* Call OF to setup the TCE hardware */
1837 		if (call_prom("package-to-path", 3, 1, node,
1838 			      path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1839 			prom_printf("package-to-path failed\n");
1840 		}
1841 
1842 		/* Save away the TCE table attributes for later use. */
1843 		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1844 		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1845 
1846 		prom_debug("TCE table: %s\n", path);
1847 		prom_debug("\tnode = 0x%x\n", node);
1848 		prom_debug("\tbase = 0x%x\n", base);
1849 		prom_debug("\tsize = 0x%x\n", minsize);
1850 
1851 		/* Initialize the table to have a one-to-one mapping
1852 		 * over the allocated size.
1853 		 */
1854 		tce_entryp = (u64 *)base;
1855 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1856 			tce_entry = (i << PAGE_SHIFT);
1857 			tce_entry |= 0x3;
1858 			*tce_entryp = tce_entry;
1859 		}
1860 
1861 		prom_printf("opening PHB %s", path);
1862 		phb_node = call_prom("open", 1, 1, path);
1863 		if (phb_node == 0)
1864 			prom_printf("... failed\n");
1865 		else
1866 			prom_printf("... done\n");
1867 
1868 		call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1869 			  phb_node, -1, minsize,
1870 			  (u32) base, (u32) (base >> 32));
1871 		call_prom("close", 1, 0, phb_node);
1872 	}
1873 
1874 	reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1875 
1876 	/* These are only really needed if there is a memory limit in
1877 	 * effect, but we don't know so export them always. */
1878 	prom_tce_alloc_start = local_alloc_bottom;
1879 	prom_tce_alloc_end = local_alloc_top;
1880 
1881 	/* Flag the first invalid entry */
1882 	prom_debug("ending prom_initialize_tce_table\n");
1883 }
1884 #endif /* __BIG_ENDIAN__ */
1885 #endif /* CONFIG_PPC64 */
1886 
1887 /*
1888  * With CHRP SMP we need to use the OF to start the other processors.
1889  * We can't wait until smp_boot_cpus (the OF is trashed by then)
1890  * so we have to put the processors into a holding pattern controlled
1891  * by the kernel (not OF) before we destroy the OF.
1892  *
1893  * This uses a chunk of low memory, puts some holding pattern
1894  * code there and sends the other processors off to there until
1895  * smp_boot_cpus tells them to do something.  The holding pattern
1896  * checks that address until its cpu # is there, when it is that
1897  * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
1898  * of setting those values.
1899  *
1900  * We also use physical address 0x4 here to tell when a cpu
1901  * is in its holding pattern code.
1902  *
1903  * -- Cort
1904  */
1905 /*
1906  * We want to reference the copy of __secondary_hold_* in the
1907  * 0 - 0x100 address range
1908  */
1909 #define LOW_ADDR(x)	(((unsigned long) &(x)) & 0xff)
1910 
1911 static void __init prom_hold_cpus(void)
1912 {
1913 	unsigned long i;
1914 	phandle node;
1915 	char type[64];
1916 	unsigned long *spinloop
1917 		= (void *) LOW_ADDR(__secondary_hold_spinloop);
1918 	unsigned long *acknowledge
1919 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
1920 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1921 
1922 	/*
1923 	 * On pseries, if RTAS supports "query-cpu-stopped-state",
1924 	 * we skip this stage, the CPUs will be started by the
1925 	 * kernel using RTAS.
1926 	 */
1927 	if ((of_platform == PLATFORM_PSERIES ||
1928 	     of_platform == PLATFORM_PSERIES_LPAR) &&
1929 	    rtas_has_query_cpu_stopped) {
1930 		prom_printf("prom_hold_cpus: skipped\n");
1931 		return;
1932 	}
1933 
1934 	prom_debug("prom_hold_cpus: start...\n");
1935 	prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
1936 	prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
1937 	prom_debug("    1) acknowledge    = 0x%x\n",
1938 		   (unsigned long)acknowledge);
1939 	prom_debug("    1) *acknowledge   = 0x%x\n", *acknowledge);
1940 	prom_debug("    1) secondary_hold = 0x%x\n", secondary_hold);
1941 
1942 	/* Set the common spinloop variable, so all of the secondary cpus
1943 	 * will block when they are awakened from their OF spinloop.
1944 	 * This must occur for both SMP and non SMP kernels, since OF will
1945 	 * be trashed when we move the kernel.
1946 	 */
1947 	*spinloop = 0;
1948 
1949 	/* look for cpus */
1950 	for (node = 0; prom_next_node(&node); ) {
1951 		unsigned int cpu_no;
1952 		__be32 reg;
1953 
1954 		type[0] = 0;
1955 		prom_getprop(node, "device_type", type, sizeof(type));
1956 		if (strcmp(type, "cpu") != 0)
1957 			continue;
1958 
1959 		/* Skip non-configured cpus. */
1960 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1961 			if (strcmp(type, "okay") != 0)
1962 				continue;
1963 
1964 		reg = cpu_to_be32(-1); /* make sparse happy */
1965 		prom_getprop(node, "reg", &reg, sizeof(reg));
1966 		cpu_no = be32_to_cpu(reg);
1967 
1968 		prom_debug("cpu hw idx   = %lu\n", cpu_no);
1969 
1970 		/* Init the acknowledge var which will be reset by
1971 		 * the secondary cpu when it awakens from its OF
1972 		 * spinloop.
1973 		 */
1974 		*acknowledge = (unsigned long)-1;
1975 
1976 		if (cpu_no != prom.cpu) {
1977 			/* Primary Thread of non-boot cpu or any thread */
1978 			prom_printf("starting cpu hw idx %lu... ", cpu_no);
1979 			call_prom("start-cpu", 3, 0, node,
1980 				  secondary_hold, cpu_no);
1981 
1982 			for (i = 0; (i < 100000000) &&
1983 			     (*acknowledge == ((unsigned long)-1)); i++ )
1984 				mb();
1985 
1986 			if (*acknowledge == cpu_no)
1987 				prom_printf("done\n");
1988 			else
1989 				prom_printf("failed: %x\n", *acknowledge);
1990 		}
1991 #ifdef CONFIG_SMP
1992 		else
1993 			prom_printf("boot cpu hw idx %lu\n", cpu_no);
1994 #endif /* CONFIG_SMP */
1995 	}
1996 
1997 	prom_debug("prom_hold_cpus: end...\n");
1998 }
1999 
2000 
2001 static void __init prom_init_client_services(unsigned long pp)
2002 {
2003 	/* Get a handle to the prom entry point before anything else */
2004 	prom_entry = pp;
2005 
2006 	/* get a handle for the stdout device */
2007 	prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2008 	if (!PHANDLE_VALID(prom.chosen))
2009 		prom_panic("cannot find chosen"); /* msg won't be printed :( */
2010 
2011 	/* get device tree root */
2012 	prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2013 	if (!PHANDLE_VALID(prom.root))
2014 		prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2015 
2016 	prom.mmumap = 0;
2017 }
2018 
2019 #ifdef CONFIG_PPC32
2020 /*
2021  * For really old powermacs, we need to map things we claim.
2022  * For that, we need the ihandle of the mmu.
2023  * Also, on the longtrail, we need to work around other bugs.
2024  */
2025 static void __init prom_find_mmu(void)
2026 {
2027 	phandle oprom;
2028 	char version[64];
2029 
2030 	oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2031 	if (!PHANDLE_VALID(oprom))
2032 		return;
2033 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2034 		return;
2035 	version[sizeof(version) - 1] = 0;
2036 	/* XXX might need to add other versions here */
2037 	if (strcmp(version, "Open Firmware, 1.0.5") == 0)
2038 		of_workarounds = OF_WA_CLAIM;
2039 	else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
2040 		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2041 		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2042 	} else
2043 		return;
2044 	prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2045 	prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2046 		     sizeof(prom.mmumap));
2047 	prom.mmumap = be32_to_cpu(prom.mmumap);
2048 	if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2049 		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
2050 }
2051 #else
2052 #define prom_find_mmu()
2053 #endif
2054 
2055 static void __init prom_init_stdout(void)
2056 {
2057 	char *path = of_stdout_device;
2058 	char type[16];
2059 	phandle stdout_node;
2060 	__be32 val;
2061 
2062 	if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2063 		prom_panic("cannot find stdout");
2064 
2065 	prom.stdout = be32_to_cpu(val);
2066 
2067 	/* Get the full OF pathname of the stdout device */
2068 	memset(path, 0, 256);
2069 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2070 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
2071 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2072 		     path, strlen(path) + 1);
2073 
2074 	/* instance-to-package fails on PA-Semi */
2075 	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2076 	if (stdout_node != PROM_ERROR) {
2077 		val = cpu_to_be32(stdout_node);
2078 		prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
2079 			     &val, sizeof(val));
2080 
2081 		/* If it's a display, note it */
2082 		memset(type, 0, sizeof(type));
2083 		prom_getprop(stdout_node, "device_type", type, sizeof(type));
2084 		if (strcmp(type, "display") == 0)
2085 			prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2086 	}
2087 }
2088 
2089 static int __init prom_find_machine_type(void)
2090 {
2091 	char compat[256];
2092 	int len, i = 0;
2093 #ifdef CONFIG_PPC64
2094 	phandle rtas;
2095 	int x;
2096 #endif
2097 
2098 	/* Look for a PowerMac or a Cell */
2099 	len = prom_getprop(prom.root, "compatible",
2100 			   compat, sizeof(compat)-1);
2101 	if (len > 0) {
2102 		compat[len] = 0;
2103 		while (i < len) {
2104 			char *p = &compat[i];
2105 			int sl = strlen(p);
2106 			if (sl == 0)
2107 				break;
2108 			if (strstr(p, "Power Macintosh") ||
2109 			    strstr(p, "MacRISC"))
2110 				return PLATFORM_POWERMAC;
2111 #ifdef CONFIG_PPC64
2112 			/* We must make sure we don't detect the IBM Cell
2113 			 * blades as pSeries due to some firmware issues,
2114 			 * so we do it here.
2115 			 */
2116 			if (strstr(p, "IBM,CBEA") ||
2117 			    strstr(p, "IBM,CPBW-1.0"))
2118 				return PLATFORM_GENERIC;
2119 #endif /* CONFIG_PPC64 */
2120 			i += sl + 1;
2121 		}
2122 	}
2123 #ifdef CONFIG_PPC64
2124 	/* Try to detect OPAL */
2125 	if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2126 		return PLATFORM_OPAL;
2127 
2128 	/* Try to figure out if it's an IBM pSeries or any other
2129 	 * PAPR compliant platform. We assume it is if :
2130 	 *  - /device_type is "chrp" (please, do NOT use that for future
2131 	 *    non-IBM designs !
2132 	 *  - it has /rtas
2133 	 */
2134 	len = prom_getprop(prom.root, "device_type",
2135 			   compat, sizeof(compat)-1);
2136 	if (len <= 0)
2137 		return PLATFORM_GENERIC;
2138 	if (strcmp(compat, "chrp"))
2139 		return PLATFORM_GENERIC;
2140 
2141 	/* Default to pSeries. We need to know if we are running LPAR */
2142 	rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2143 	if (!PHANDLE_VALID(rtas))
2144 		return PLATFORM_GENERIC;
2145 	x = prom_getproplen(rtas, "ibm,hypertas-functions");
2146 	if (x != PROM_ERROR) {
2147 		prom_debug("Hypertas detected, assuming LPAR !\n");
2148 		return PLATFORM_PSERIES_LPAR;
2149 	}
2150 	return PLATFORM_PSERIES;
2151 #else
2152 	return PLATFORM_GENERIC;
2153 #endif
2154 }
2155 
2156 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2157 {
2158 	return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2159 }
2160 
2161 /*
2162  * If we have a display that we don't know how to drive,
2163  * we will want to try to execute OF's open method for it
2164  * later.  However, OF will probably fall over if we do that
2165  * we've taken over the MMU.
2166  * So we check whether we will need to open the display,
2167  * and if so, open it now.
2168  */
2169 static void __init prom_check_displays(void)
2170 {
2171 	char type[16], *path;
2172 	phandle node;
2173 	ihandle ih;
2174 	int i;
2175 
2176 	static unsigned char default_colors[] = {
2177 		0x00, 0x00, 0x00,
2178 		0x00, 0x00, 0xaa,
2179 		0x00, 0xaa, 0x00,
2180 		0x00, 0xaa, 0xaa,
2181 		0xaa, 0x00, 0x00,
2182 		0xaa, 0x00, 0xaa,
2183 		0xaa, 0xaa, 0x00,
2184 		0xaa, 0xaa, 0xaa,
2185 		0x55, 0x55, 0x55,
2186 		0x55, 0x55, 0xff,
2187 		0x55, 0xff, 0x55,
2188 		0x55, 0xff, 0xff,
2189 		0xff, 0x55, 0x55,
2190 		0xff, 0x55, 0xff,
2191 		0xff, 0xff, 0x55,
2192 		0xff, 0xff, 0xff
2193 	};
2194 	const unsigned char *clut;
2195 
2196 	prom_debug("Looking for displays\n");
2197 	for (node = 0; prom_next_node(&node); ) {
2198 		memset(type, 0, sizeof(type));
2199 		prom_getprop(node, "device_type", type, sizeof(type));
2200 		if (strcmp(type, "display") != 0)
2201 			continue;
2202 
2203 		/* It seems OF doesn't null-terminate the path :-( */
2204 		path = prom_scratch;
2205 		memset(path, 0, PROM_SCRATCH_SIZE);
2206 
2207 		/*
2208 		 * leave some room at the end of the path for appending extra
2209 		 * arguments
2210 		 */
2211 		if (call_prom("package-to-path", 3, 1, node, path,
2212 			      PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2213 			continue;
2214 		prom_printf("found display   : %s, opening... ", path);
2215 
2216 		ih = call_prom("open", 1, 1, path);
2217 		if (ih == 0) {
2218 			prom_printf("failed\n");
2219 			continue;
2220 		}
2221 
2222 		/* Success */
2223 		prom_printf("done\n");
2224 		prom_setprop(node, path, "linux,opened", NULL, 0);
2225 
2226 		/* Setup a usable color table when the appropriate
2227 		 * method is available. Should update this to set-colors */
2228 		clut = default_colors;
2229 		for (i = 0; i < 16; i++, clut += 3)
2230 			if (prom_set_color(ih, i, clut[0], clut[1],
2231 					   clut[2]) != 0)
2232 				break;
2233 
2234 #ifdef CONFIG_LOGO_LINUX_CLUT224
2235 		clut = PTRRELOC(logo_linux_clut224.clut);
2236 		for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2237 			if (prom_set_color(ih, i + 32, clut[0], clut[1],
2238 					   clut[2]) != 0)
2239 				break;
2240 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2241 
2242 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2243 		if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2244 		    PROM_ERROR) {
2245 			u32 width, height, pitch, addr;
2246 
2247 			prom_printf("Setting btext !\n");
2248 			prom_getprop(node, "width", &width, 4);
2249 			prom_getprop(node, "height", &height, 4);
2250 			prom_getprop(node, "linebytes", &pitch, 4);
2251 			prom_getprop(node, "address", &addr, 4);
2252 			prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2253 				    width, height, pitch, addr);
2254 			btext_setup_display(width, height, 8, pitch, addr);
2255 		}
2256 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2257 	}
2258 }
2259 
2260 
2261 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2262 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2263 			      unsigned long needed, unsigned long align)
2264 {
2265 	void *ret;
2266 
2267 	*mem_start = _ALIGN(*mem_start, align);
2268 	while ((*mem_start + needed) > *mem_end) {
2269 		unsigned long room, chunk;
2270 
2271 		prom_debug("Chunk exhausted, claiming more at %x...\n",
2272 			   alloc_bottom);
2273 		room = alloc_top - alloc_bottom;
2274 		if (room > DEVTREE_CHUNK_SIZE)
2275 			room = DEVTREE_CHUNK_SIZE;
2276 		if (room < PAGE_SIZE)
2277 			prom_panic("No memory for flatten_device_tree "
2278 				   "(no room)\n");
2279 		chunk = alloc_up(room, 0);
2280 		if (chunk == 0)
2281 			prom_panic("No memory for flatten_device_tree "
2282 				   "(claim failed)\n");
2283 		*mem_end = chunk + room;
2284 	}
2285 
2286 	ret = (void *)*mem_start;
2287 	*mem_start += needed;
2288 
2289 	return ret;
2290 }
2291 
2292 #define dt_push_token(token, mem_start, mem_end) do { 			\
2293 		void *room = make_room(mem_start, mem_end, 4, 4);	\
2294 		*(__be32 *)room = cpu_to_be32(token);			\
2295 	} while(0)
2296 
2297 static unsigned long __init dt_find_string(char *str)
2298 {
2299 	char *s, *os;
2300 
2301 	s = os = (char *)dt_string_start;
2302 	s += 4;
2303 	while (s <  (char *)dt_string_end) {
2304 		if (strcmp(s, str) == 0)
2305 			return s - os;
2306 		s += strlen(s) + 1;
2307 	}
2308 	return 0;
2309 }
2310 
2311 /*
2312  * The Open Firmware 1275 specification states properties must be 31 bytes or
2313  * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2314  */
2315 #define MAX_PROPERTY_NAME 64
2316 
2317 static void __init scan_dt_build_strings(phandle node,
2318 					 unsigned long *mem_start,
2319 					 unsigned long *mem_end)
2320 {
2321 	char *prev_name, *namep, *sstart;
2322 	unsigned long soff;
2323 	phandle child;
2324 
2325 	sstart =  (char *)dt_string_start;
2326 
2327 	/* get and store all property names */
2328 	prev_name = "";
2329 	for (;;) {
2330 		/* 64 is max len of name including nul. */
2331 		namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2332 		if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2333 			/* No more nodes: unwind alloc */
2334 			*mem_start = (unsigned long)namep;
2335 			break;
2336 		}
2337 
2338  		/* skip "name" */
2339  		if (strcmp(namep, "name") == 0) {
2340  			*mem_start = (unsigned long)namep;
2341  			prev_name = "name";
2342  			continue;
2343  		}
2344 		/* get/create string entry */
2345 		soff = dt_find_string(namep);
2346 		if (soff != 0) {
2347 			*mem_start = (unsigned long)namep;
2348 			namep = sstart + soff;
2349 		} else {
2350 			/* Trim off some if we can */
2351 			*mem_start = (unsigned long)namep + strlen(namep) + 1;
2352 			dt_string_end = *mem_start;
2353 		}
2354 		prev_name = namep;
2355 	}
2356 
2357 	/* do all our children */
2358 	child = call_prom("child", 1, 1, node);
2359 	while (child != 0) {
2360 		scan_dt_build_strings(child, mem_start, mem_end);
2361 		child = call_prom("peer", 1, 1, child);
2362 	}
2363 }
2364 
2365 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2366 					unsigned long *mem_end)
2367 {
2368 	phandle child;
2369 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2370 	unsigned long soff;
2371 	unsigned char *valp;
2372 	static char pname[MAX_PROPERTY_NAME];
2373 	int l, room, has_phandle = 0;
2374 
2375 	dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2376 
2377 	/* get the node's full name */
2378 	namep = (char *)*mem_start;
2379 	room = *mem_end - *mem_start;
2380 	if (room > 255)
2381 		room = 255;
2382 	l = call_prom("package-to-path", 3, 1, node, namep, room);
2383 	if (l >= 0) {
2384 		/* Didn't fit?  Get more room. */
2385 		if (l >= room) {
2386 			if (l >= *mem_end - *mem_start)
2387 				namep = make_room(mem_start, mem_end, l+1, 1);
2388 			call_prom("package-to-path", 3, 1, node, namep, l);
2389 		}
2390 		namep[l] = '\0';
2391 
2392 		/* Fixup an Apple bug where they have bogus \0 chars in the
2393 		 * middle of the path in some properties, and extract
2394 		 * the unit name (everything after the last '/').
2395 		 */
2396 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
2397 			if (*p == '/')
2398 				lp = namep;
2399 			else if (*p != 0)
2400 				*lp++ = *p;
2401 		}
2402 		*lp = 0;
2403 		*mem_start = _ALIGN((unsigned long)lp + 1, 4);
2404 	}
2405 
2406 	/* get it again for debugging */
2407 	path = prom_scratch;
2408 	memset(path, 0, PROM_SCRATCH_SIZE);
2409 	call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2410 
2411 	/* get and store all properties */
2412 	prev_name = "";
2413 	sstart = (char *)dt_string_start;
2414 	for (;;) {
2415 		if (call_prom("nextprop", 3, 1, node, prev_name,
2416 			      pname) != 1)
2417 			break;
2418 
2419  		/* skip "name" */
2420  		if (strcmp(pname, "name") == 0) {
2421  			prev_name = "name";
2422  			continue;
2423  		}
2424 
2425 		/* find string offset */
2426 		soff = dt_find_string(pname);
2427 		if (soff == 0) {
2428 			prom_printf("WARNING: Can't find string index for"
2429 				    " <%s>, node %s\n", pname, path);
2430 			break;
2431 		}
2432 		prev_name = sstart + soff;
2433 
2434 		/* get length */
2435 		l = call_prom("getproplen", 2, 1, node, pname);
2436 
2437 		/* sanity checks */
2438 		if (l == PROM_ERROR)
2439 			continue;
2440 
2441 		/* push property head */
2442 		dt_push_token(OF_DT_PROP, mem_start, mem_end);
2443 		dt_push_token(l, mem_start, mem_end);
2444 		dt_push_token(soff, mem_start, mem_end);
2445 
2446 		/* push property content */
2447 		valp = make_room(mem_start, mem_end, l, 4);
2448 		call_prom("getprop", 4, 1, node, pname, valp, l);
2449 		*mem_start = _ALIGN(*mem_start, 4);
2450 
2451 		if (!strcmp(pname, "phandle"))
2452 			has_phandle = 1;
2453 	}
2454 
2455 	/* Add a "linux,phandle" property if no "phandle" property already
2456 	 * existed (can happen with OPAL)
2457 	 */
2458 	if (!has_phandle) {
2459 		soff = dt_find_string("linux,phandle");
2460 		if (soff == 0)
2461 			prom_printf("WARNING: Can't find string index for"
2462 				    " <linux-phandle> node %s\n", path);
2463 		else {
2464 			dt_push_token(OF_DT_PROP, mem_start, mem_end);
2465 			dt_push_token(4, mem_start, mem_end);
2466 			dt_push_token(soff, mem_start, mem_end);
2467 			valp = make_room(mem_start, mem_end, 4, 4);
2468 			*(__be32 *)valp = cpu_to_be32(node);
2469 		}
2470 	}
2471 
2472 	/* do all our children */
2473 	child = call_prom("child", 1, 1, node);
2474 	while (child != 0) {
2475 		scan_dt_build_struct(child, mem_start, mem_end);
2476 		child = call_prom("peer", 1, 1, child);
2477 	}
2478 
2479 	dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2480 }
2481 
2482 static void __init flatten_device_tree(void)
2483 {
2484 	phandle root;
2485 	unsigned long mem_start, mem_end, room;
2486 	struct boot_param_header *hdr;
2487 	char *namep;
2488 	u64 *rsvmap;
2489 
2490 	/*
2491 	 * Check how much room we have between alloc top & bottom (+/- a
2492 	 * few pages), crop to 1MB, as this is our "chunk" size
2493 	 */
2494 	room = alloc_top - alloc_bottom - 0x4000;
2495 	if (room > DEVTREE_CHUNK_SIZE)
2496 		room = DEVTREE_CHUNK_SIZE;
2497 	prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2498 
2499 	/* Now try to claim that */
2500 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2501 	if (mem_start == 0)
2502 		prom_panic("Can't allocate initial device-tree chunk\n");
2503 	mem_end = mem_start + room;
2504 
2505 	/* Get root of tree */
2506 	root = call_prom("peer", 1, 1, (phandle)0);
2507 	if (root == (phandle)0)
2508 		prom_panic ("couldn't get device tree root\n");
2509 
2510 	/* Build header and make room for mem rsv map */
2511 	mem_start = _ALIGN(mem_start, 4);
2512 	hdr = make_room(&mem_start, &mem_end,
2513 			sizeof(struct boot_param_header), 4);
2514 	dt_header_start = (unsigned long)hdr;
2515 	rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2516 
2517 	/* Start of strings */
2518 	mem_start = PAGE_ALIGN(mem_start);
2519 	dt_string_start = mem_start;
2520 	mem_start += 4; /* hole */
2521 
2522 	/* Add "linux,phandle" in there, we'll need it */
2523 	namep = make_room(&mem_start, &mem_end, 16, 1);
2524 	strcpy(namep, "linux,phandle");
2525 	mem_start = (unsigned long)namep + strlen(namep) + 1;
2526 
2527 	/* Build string array */
2528 	prom_printf("Building dt strings...\n");
2529 	scan_dt_build_strings(root, &mem_start, &mem_end);
2530 	dt_string_end = mem_start;
2531 
2532 	/* Build structure */
2533 	mem_start = PAGE_ALIGN(mem_start);
2534 	dt_struct_start = mem_start;
2535 	prom_printf("Building dt structure...\n");
2536 	scan_dt_build_struct(root, &mem_start, &mem_end);
2537 	dt_push_token(OF_DT_END, &mem_start, &mem_end);
2538 	dt_struct_end = PAGE_ALIGN(mem_start);
2539 
2540 	/* Finish header */
2541 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2542 	hdr->magic = cpu_to_be32(OF_DT_HEADER);
2543 	hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2544 	hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2545 	hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2546 	hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2547 	hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2548 	hdr->version = cpu_to_be32(OF_DT_VERSION);
2549 	/* Version 16 is not backward compatible */
2550 	hdr->last_comp_version = cpu_to_be32(0x10);
2551 
2552 	/* Copy the reserve map in */
2553 	memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2554 
2555 #ifdef DEBUG_PROM
2556 	{
2557 		int i;
2558 		prom_printf("reserved memory map:\n");
2559 		for (i = 0; i < mem_reserve_cnt; i++)
2560 			prom_printf("  %x - %x\n",
2561 				    be64_to_cpu(mem_reserve_map[i].base),
2562 				    be64_to_cpu(mem_reserve_map[i].size));
2563 	}
2564 #endif
2565 	/* Bump mem_reserve_cnt to cause further reservations to fail
2566 	 * since it's too late.
2567 	 */
2568 	mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2569 
2570 	prom_printf("Device tree strings 0x%x -> 0x%x\n",
2571 		    dt_string_start, dt_string_end);
2572 	prom_printf("Device tree struct  0x%x -> 0x%x\n",
2573 		    dt_struct_start, dt_struct_end);
2574 }
2575 
2576 #ifdef CONFIG_PPC_MAPLE
2577 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2578  * The values are bad, and it doesn't even have the right number of cells. */
2579 static void __init fixup_device_tree_maple(void)
2580 {
2581 	phandle isa;
2582 	u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2583 	u32 isa_ranges[6];
2584 	char *name;
2585 
2586 	name = "/ht@0/isa@4";
2587 	isa = call_prom("finddevice", 1, 1, ADDR(name));
2588 	if (!PHANDLE_VALID(isa)) {
2589 		name = "/ht@0/isa@6";
2590 		isa = call_prom("finddevice", 1, 1, ADDR(name));
2591 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2592 	}
2593 	if (!PHANDLE_VALID(isa))
2594 		return;
2595 
2596 	if (prom_getproplen(isa, "ranges") != 12)
2597 		return;
2598 	if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2599 		== PROM_ERROR)
2600 		return;
2601 
2602 	if (isa_ranges[0] != 0x1 ||
2603 		isa_ranges[1] != 0xf4000000 ||
2604 		isa_ranges[2] != 0x00010000)
2605 		return;
2606 
2607 	prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2608 
2609 	isa_ranges[0] = 0x1;
2610 	isa_ranges[1] = 0x0;
2611 	isa_ranges[2] = rloc;
2612 	isa_ranges[3] = 0x0;
2613 	isa_ranges[4] = 0x0;
2614 	isa_ranges[5] = 0x00010000;
2615 	prom_setprop(isa, name, "ranges",
2616 			isa_ranges, sizeof(isa_ranges));
2617 }
2618 
2619 #define CPC925_MC_START		0xf8000000
2620 #define CPC925_MC_LENGTH	0x1000000
2621 /* The values for memory-controller don't have right number of cells */
2622 static void __init fixup_device_tree_maple_memory_controller(void)
2623 {
2624 	phandle mc;
2625 	u32 mc_reg[4];
2626 	char *name = "/hostbridge@f8000000";
2627 	u32 ac, sc;
2628 
2629 	mc = call_prom("finddevice", 1, 1, ADDR(name));
2630 	if (!PHANDLE_VALID(mc))
2631 		return;
2632 
2633 	if (prom_getproplen(mc, "reg") != 8)
2634 		return;
2635 
2636 	prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2637 	prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2638 	if ((ac != 2) || (sc != 2))
2639 		return;
2640 
2641 	if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2642 		return;
2643 
2644 	if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2645 		return;
2646 
2647 	prom_printf("Fixing up bogus hostbridge on Maple...\n");
2648 
2649 	mc_reg[0] = 0x0;
2650 	mc_reg[1] = CPC925_MC_START;
2651 	mc_reg[2] = 0x0;
2652 	mc_reg[3] = CPC925_MC_LENGTH;
2653 	prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2654 }
2655 #else
2656 #define fixup_device_tree_maple()
2657 #define fixup_device_tree_maple_memory_controller()
2658 #endif
2659 
2660 #ifdef CONFIG_PPC_CHRP
2661 /*
2662  * Pegasos and BriQ lacks the "ranges" property in the isa node
2663  * Pegasos needs decimal IRQ 14/15, not hexadecimal
2664  * Pegasos has the IDE configured in legacy mode, but advertised as native
2665  */
2666 static void __init fixup_device_tree_chrp(void)
2667 {
2668 	phandle ph;
2669 	u32 prop[6];
2670 	u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2671 	char *name;
2672 	int rc;
2673 
2674 	name = "/pci@80000000/isa@c";
2675 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2676 	if (!PHANDLE_VALID(ph)) {
2677 		name = "/pci@ff500000/isa@6";
2678 		ph = call_prom("finddevice", 1, 1, ADDR(name));
2679 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2680 	}
2681 	if (PHANDLE_VALID(ph)) {
2682 		rc = prom_getproplen(ph, "ranges");
2683 		if (rc == 0 || rc == PROM_ERROR) {
2684 			prom_printf("Fixing up missing ISA range on Pegasos...\n");
2685 
2686 			prop[0] = 0x1;
2687 			prop[1] = 0x0;
2688 			prop[2] = rloc;
2689 			prop[3] = 0x0;
2690 			prop[4] = 0x0;
2691 			prop[5] = 0x00010000;
2692 			prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2693 		}
2694 	}
2695 
2696 	name = "/pci@80000000/ide@C,1";
2697 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2698 	if (PHANDLE_VALID(ph)) {
2699 		prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2700 		prop[0] = 14;
2701 		prop[1] = 0x0;
2702 		prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2703 		prom_printf("Fixing up IDE class-code on Pegasos...\n");
2704 		rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2705 		if (rc == sizeof(u32)) {
2706 			prop[0] &= ~0x5;
2707 			prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2708 		}
2709 	}
2710 }
2711 #else
2712 #define fixup_device_tree_chrp()
2713 #endif
2714 
2715 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2716 static void __init fixup_device_tree_pmac(void)
2717 {
2718 	phandle u3, i2c, mpic;
2719 	u32 u3_rev;
2720 	u32 interrupts[2];
2721 	u32 parent;
2722 
2723 	/* Some G5s have a missing interrupt definition, fix it up here */
2724 	u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2725 	if (!PHANDLE_VALID(u3))
2726 		return;
2727 	i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2728 	if (!PHANDLE_VALID(i2c))
2729 		return;
2730 	mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2731 	if (!PHANDLE_VALID(mpic))
2732 		return;
2733 
2734 	/* check if proper rev of u3 */
2735 	if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2736 	    == PROM_ERROR)
2737 		return;
2738 	if (u3_rev < 0x35 || u3_rev > 0x39)
2739 		return;
2740 	/* does it need fixup ? */
2741 	if (prom_getproplen(i2c, "interrupts") > 0)
2742 		return;
2743 
2744 	prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2745 
2746 	/* interrupt on this revision of u3 is number 0 and level */
2747 	interrupts[0] = 0;
2748 	interrupts[1] = 1;
2749 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2750 		     &interrupts, sizeof(interrupts));
2751 	parent = (u32)mpic;
2752 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2753 		     &parent, sizeof(parent));
2754 }
2755 #else
2756 #define fixup_device_tree_pmac()
2757 #endif
2758 
2759 #ifdef CONFIG_PPC_EFIKA
2760 /*
2761  * The MPC5200 FEC driver requires an phy-handle property to tell it how
2762  * to talk to the phy.  If the phy-handle property is missing, then this
2763  * function is called to add the appropriate nodes and link it to the
2764  * ethernet node.
2765  */
2766 static void __init fixup_device_tree_efika_add_phy(void)
2767 {
2768 	u32 node;
2769 	char prop[64];
2770 	int rv;
2771 
2772 	/* Check if /builtin/ethernet exists - bail if it doesn't */
2773 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2774 	if (!PHANDLE_VALID(node))
2775 		return;
2776 
2777 	/* Check if the phy-handle property exists - bail if it does */
2778 	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2779 	if (!rv)
2780 		return;
2781 
2782 	/*
2783 	 * At this point the ethernet device doesn't have a phy described.
2784 	 * Now we need to add the missing phy node and linkage
2785 	 */
2786 
2787 	/* Check for an MDIO bus node - if missing then create one */
2788 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2789 	if (!PHANDLE_VALID(node)) {
2790 		prom_printf("Adding Ethernet MDIO node\n");
2791 		call_prom("interpret", 1, 1,
2792 			" s\" /builtin\" find-device"
2793 			" new-device"
2794 				" 1 encode-int s\" #address-cells\" property"
2795 				" 0 encode-int s\" #size-cells\" property"
2796 				" s\" mdio\" device-name"
2797 				" s\" fsl,mpc5200b-mdio\" encode-string"
2798 				" s\" compatible\" property"
2799 				" 0xf0003000 0x400 reg"
2800 				" 0x2 encode-int"
2801 				" 0x5 encode-int encode+"
2802 				" 0x3 encode-int encode+"
2803 				" s\" interrupts\" property"
2804 			" finish-device");
2805 	};
2806 
2807 	/* Check for a PHY device node - if missing then create one and
2808 	 * give it's phandle to the ethernet node */
2809 	node = call_prom("finddevice", 1, 1,
2810 			 ADDR("/builtin/mdio/ethernet-phy"));
2811 	if (!PHANDLE_VALID(node)) {
2812 		prom_printf("Adding Ethernet PHY node\n");
2813 		call_prom("interpret", 1, 1,
2814 			" s\" /builtin/mdio\" find-device"
2815 			" new-device"
2816 				" s\" ethernet-phy\" device-name"
2817 				" 0x10 encode-int s\" reg\" property"
2818 				" my-self"
2819 				" ihandle>phandle"
2820 			" finish-device"
2821 			" s\" /builtin/ethernet\" find-device"
2822 				" encode-int"
2823 				" s\" phy-handle\" property"
2824 			" device-end");
2825 	}
2826 }
2827 
2828 static void __init fixup_device_tree_efika(void)
2829 {
2830 	int sound_irq[3] = { 2, 2, 0 };
2831 	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2832 				3,4,0, 3,5,0, 3,6,0, 3,7,0,
2833 				3,8,0, 3,9,0, 3,10,0, 3,11,0,
2834 				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2835 	u32 node;
2836 	char prop[64];
2837 	int rv, len;
2838 
2839 	/* Check if we're really running on a EFIKA */
2840 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2841 	if (!PHANDLE_VALID(node))
2842 		return;
2843 
2844 	rv = prom_getprop(node, "model", prop, sizeof(prop));
2845 	if (rv == PROM_ERROR)
2846 		return;
2847 	if (strcmp(prop, "EFIKA5K2"))
2848 		return;
2849 
2850 	prom_printf("Applying EFIKA device tree fixups\n");
2851 
2852 	/* Claiming to be 'chrp' is death */
2853 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2854 	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2855 	if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2856 		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2857 
2858 	/* CODEGEN,description is exposed in /proc/cpuinfo so
2859 	   fix that too */
2860 	rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2861 	if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2862 		prom_setprop(node, "/", "CODEGEN,description",
2863 			     "Efika 5200B PowerPC System",
2864 			     sizeof("Efika 5200B PowerPC System"));
2865 
2866 	/* Fixup bestcomm interrupts property */
2867 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2868 	if (PHANDLE_VALID(node)) {
2869 		len = prom_getproplen(node, "interrupts");
2870 		if (len == 12) {
2871 			prom_printf("Fixing bestcomm interrupts property\n");
2872 			prom_setprop(node, "/builtin/bestcom", "interrupts",
2873 				     bcomm_irq, sizeof(bcomm_irq));
2874 		}
2875 	}
2876 
2877 	/* Fixup sound interrupts property */
2878 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2879 	if (PHANDLE_VALID(node)) {
2880 		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2881 		if (rv == PROM_ERROR) {
2882 			prom_printf("Adding sound interrupts property\n");
2883 			prom_setprop(node, "/builtin/sound", "interrupts",
2884 				     sound_irq, sizeof(sound_irq));
2885 		}
2886 	}
2887 
2888 	/* Make sure ethernet phy-handle property exists */
2889 	fixup_device_tree_efika_add_phy();
2890 }
2891 #else
2892 #define fixup_device_tree_efika()
2893 #endif
2894 
2895 #ifdef CONFIG_PPC_PASEMI_NEMO
2896 /*
2897  * CFE supplied on Nemo is broken in several ways, biggest
2898  * problem is that it reassigns ISA interrupts to unused mpic ints.
2899  * Add an interrupt-controller property for the io-bridge to use
2900  * and correct the ints so we can attach them to an irq_domain
2901  */
2902 static void __init fixup_device_tree_pasemi(void)
2903 {
2904 	u32 interrupts[2], parent, rval, val = 0;
2905 	char *name, *pci_name;
2906 	phandle iob, node;
2907 
2908 	/* Find the root pci node */
2909 	name = "/pxp@0,e0000000";
2910 	iob = call_prom("finddevice", 1, 1, ADDR(name));
2911 	if (!PHANDLE_VALID(iob))
2912 		return;
2913 
2914 	/* check if interrupt-controller node set yet */
2915 	if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
2916 		return;
2917 
2918 	prom_printf("adding interrupt-controller property for SB600...\n");
2919 
2920 	prom_setprop(iob, name, "interrupt-controller", &val, 0);
2921 
2922 	pci_name = "/pxp@0,e0000000/pci@11";
2923 	node = call_prom("finddevice", 1, 1, ADDR(pci_name));
2924 	parent = ADDR(iob);
2925 
2926 	for( ; prom_next_node(&node); ) {
2927 		/* scan each node for one with an interrupt */
2928 		if (!PHANDLE_VALID(node))
2929 			continue;
2930 
2931 		rval = prom_getproplen(node, "interrupts");
2932 		if (rval == 0 || rval == PROM_ERROR)
2933 			continue;
2934 
2935 		prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
2936 		if ((interrupts[0] < 212) || (interrupts[0] > 222))
2937 			continue;
2938 
2939 		/* found a node, update both interrupts and interrupt-parent */
2940 		if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
2941 			interrupts[0] -= 203;
2942 		if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
2943 			interrupts[0] -= 213;
2944 		if (interrupts[0] == 221)
2945 			interrupts[0] = 14;
2946 		if (interrupts[0] == 222)
2947 			interrupts[0] = 8;
2948 
2949 		prom_setprop(node, pci_name, "interrupts", interrupts,
2950 					sizeof(interrupts));
2951 		prom_setprop(node, pci_name, "interrupt-parent", &parent,
2952 					sizeof(parent));
2953 	}
2954 
2955 	/*
2956 	 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2957 	 * so that generic isa-bridge code can add the SB600 and its on-board
2958 	 * peripherals.
2959 	 */
2960 	name = "/pxp@0,e0000000/io-bridge@0";
2961 	iob = call_prom("finddevice", 1, 1, ADDR(name));
2962 	if (!PHANDLE_VALID(iob))
2963 		return;
2964 
2965 	/* device_type is already set, just change it. */
2966 
2967 	prom_printf("Changing device_type of SB600 node...\n");
2968 
2969 	prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
2970 }
2971 #else	/* !CONFIG_PPC_PASEMI_NEMO */
2972 static inline void fixup_device_tree_pasemi(void) { }
2973 #endif
2974 
2975 static void __init fixup_device_tree(void)
2976 {
2977 	fixup_device_tree_maple();
2978 	fixup_device_tree_maple_memory_controller();
2979 	fixup_device_tree_chrp();
2980 	fixup_device_tree_pmac();
2981 	fixup_device_tree_efika();
2982 	fixup_device_tree_pasemi();
2983 }
2984 
2985 static void __init prom_find_boot_cpu(void)
2986 {
2987 	__be32 rval;
2988 	ihandle prom_cpu;
2989 	phandle cpu_pkg;
2990 
2991 	rval = 0;
2992 	if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2993 		return;
2994 	prom_cpu = be32_to_cpu(rval);
2995 
2996 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2997 
2998 	if (!PHANDLE_VALID(cpu_pkg))
2999 		return;
3000 
3001 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3002 	prom.cpu = be32_to_cpu(rval);
3003 
3004 	prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
3005 }
3006 
3007 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3008 {
3009 #ifdef CONFIG_BLK_DEV_INITRD
3010 	if (r3 && r4 && r4 != 0xdeadbeef) {
3011 		__be64 val;
3012 
3013 		prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3014 		prom_initrd_end = prom_initrd_start + r4;
3015 
3016 		val = cpu_to_be64(prom_initrd_start);
3017 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3018 			     &val, sizeof(val));
3019 		val = cpu_to_be64(prom_initrd_end);
3020 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3021 			     &val, sizeof(val));
3022 
3023 		reserve_mem(prom_initrd_start,
3024 			    prom_initrd_end - prom_initrd_start);
3025 
3026 		prom_debug("initrd_start=0x%x\n", prom_initrd_start);
3027 		prom_debug("initrd_end=0x%x\n", prom_initrd_end);
3028 	}
3029 #endif /* CONFIG_BLK_DEV_INITRD */
3030 }
3031 
3032 #ifdef CONFIG_PPC64
3033 #ifdef CONFIG_RELOCATABLE
3034 static void reloc_toc(void)
3035 {
3036 }
3037 
3038 static void unreloc_toc(void)
3039 {
3040 }
3041 #else
3042 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3043 {
3044 	unsigned long i;
3045 	unsigned long *toc_entry;
3046 
3047 	/* Get the start of the TOC by using r2 directly. */
3048 	asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3049 
3050 	for (i = 0; i < nr_entries; i++) {
3051 		*toc_entry = *toc_entry + offset;
3052 		toc_entry++;
3053 	}
3054 }
3055 
3056 static void reloc_toc(void)
3057 {
3058 	unsigned long offset = reloc_offset();
3059 	unsigned long nr_entries =
3060 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3061 
3062 	__reloc_toc(offset, nr_entries);
3063 
3064 	mb();
3065 }
3066 
3067 static void unreloc_toc(void)
3068 {
3069 	unsigned long offset = reloc_offset();
3070 	unsigned long nr_entries =
3071 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3072 
3073 	mb();
3074 
3075 	__reloc_toc(-offset, nr_entries);
3076 }
3077 #endif
3078 #endif
3079 
3080 /*
3081  * We enter here early on, when the Open Firmware prom is still
3082  * handling exceptions and the MMU hash table for us.
3083  */
3084 
3085 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3086 			       unsigned long pp,
3087 			       unsigned long r6, unsigned long r7,
3088 			       unsigned long kbase)
3089 {
3090 	unsigned long hdr;
3091 
3092 #ifdef CONFIG_PPC32
3093 	unsigned long offset = reloc_offset();
3094 	reloc_got2(offset);
3095 #else
3096 	reloc_toc();
3097 #endif
3098 
3099 	/*
3100 	 * First zero the BSS
3101 	 */
3102 	memset(&__bss_start, 0, __bss_stop - __bss_start);
3103 
3104 	/*
3105 	 * Init interface to Open Firmware, get some node references,
3106 	 * like /chosen
3107 	 */
3108 	prom_init_client_services(pp);
3109 
3110 	/*
3111 	 * See if this OF is old enough that we need to do explicit maps
3112 	 * and other workarounds
3113 	 */
3114 	prom_find_mmu();
3115 
3116 	/*
3117 	 * Init prom stdout device
3118 	 */
3119 	prom_init_stdout();
3120 
3121 	prom_printf("Preparing to boot %s", linux_banner);
3122 
3123 	/*
3124 	 * Get default machine type. At this point, we do not differentiate
3125 	 * between pSeries SMP and pSeries LPAR
3126 	 */
3127 	of_platform = prom_find_machine_type();
3128 	prom_printf("Detected machine type: %x\n", of_platform);
3129 
3130 #ifndef CONFIG_NONSTATIC_KERNEL
3131 	/* Bail if this is a kdump kernel. */
3132 	if (PHYSICAL_START > 0)
3133 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3134 #endif
3135 
3136 	/*
3137 	 * Check for an initrd
3138 	 */
3139 	prom_check_initrd(r3, r4);
3140 
3141 	/*
3142 	 * Do early parsing of command line
3143 	 */
3144 	early_cmdline_parse();
3145 
3146 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
3147 	/*
3148 	 * On pSeries, inform the firmware about our capabilities
3149 	 */
3150 	if (of_platform == PLATFORM_PSERIES ||
3151 	    of_platform == PLATFORM_PSERIES_LPAR)
3152 		prom_send_capabilities();
3153 #endif
3154 
3155 	/*
3156 	 * Copy the CPU hold code
3157 	 */
3158 	if (of_platform != PLATFORM_POWERMAC)
3159 		copy_and_flush(0, kbase, 0x100, 0);
3160 
3161 	/*
3162 	 * Initialize memory management within prom_init
3163 	 */
3164 	prom_init_mem();
3165 
3166 	/*
3167 	 * Determine which cpu is actually running right _now_
3168 	 */
3169 	prom_find_boot_cpu();
3170 
3171 	/*
3172 	 * Initialize display devices
3173 	 */
3174 	prom_check_displays();
3175 
3176 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3177 	/*
3178 	 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3179 	 * that uses the allocator, we need to make sure we get the top of memory
3180 	 * available for us here...
3181 	 */
3182 	if (of_platform == PLATFORM_PSERIES)
3183 		prom_initialize_tce_table();
3184 #endif
3185 
3186 	/*
3187 	 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3188 	 * have a usable RTAS implementation.
3189 	 */
3190 	if (of_platform != PLATFORM_POWERMAC &&
3191 	    of_platform != PLATFORM_OPAL)
3192 		prom_instantiate_rtas();
3193 
3194 #ifdef CONFIG_PPC_POWERNV
3195 	if (of_platform == PLATFORM_OPAL)
3196 		prom_instantiate_opal();
3197 #endif /* CONFIG_PPC_POWERNV */
3198 
3199 #ifdef CONFIG_PPC64
3200 	/* instantiate sml */
3201 	prom_instantiate_sml();
3202 #endif
3203 
3204 	/*
3205 	 * On non-powermacs, put all CPUs in spin-loops.
3206 	 *
3207 	 * PowerMacs use a different mechanism to spin CPUs
3208 	 *
3209 	 * (This must be done after instanciating RTAS)
3210 	 */
3211 	if (of_platform != PLATFORM_POWERMAC &&
3212 	    of_platform != PLATFORM_OPAL)
3213 		prom_hold_cpus();
3214 
3215 	/*
3216 	 * Fill in some infos for use by the kernel later on
3217 	 */
3218 	if (prom_memory_limit) {
3219 		__be64 val = cpu_to_be64(prom_memory_limit);
3220 		prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3221 			     &val, sizeof(val));
3222 	}
3223 #ifdef CONFIG_PPC64
3224 	if (prom_iommu_off)
3225 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3226 			     NULL, 0);
3227 
3228 	if (prom_iommu_force_on)
3229 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3230 			     NULL, 0);
3231 
3232 	if (prom_tce_alloc_start) {
3233 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3234 			     &prom_tce_alloc_start,
3235 			     sizeof(prom_tce_alloc_start));
3236 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3237 			     &prom_tce_alloc_end,
3238 			     sizeof(prom_tce_alloc_end));
3239 	}
3240 #endif
3241 
3242 	/*
3243 	 * Fixup any known bugs in the device-tree
3244 	 */
3245 	fixup_device_tree();
3246 
3247 	/*
3248 	 * Now finally create the flattened device-tree
3249 	 */
3250 	prom_printf("copying OF device tree...\n");
3251 	flatten_device_tree();
3252 
3253 	/*
3254 	 * in case stdin is USB and still active on IBM machines...
3255 	 * Unfortunately quiesce crashes on some powermacs if we have
3256 	 * closed stdin already (in particular the powerbook 101). It
3257 	 * appears that the OPAL version of OFW doesn't like it either.
3258 	 */
3259 	if (of_platform != PLATFORM_POWERMAC &&
3260 	    of_platform != PLATFORM_OPAL)
3261 		prom_close_stdin();
3262 
3263 	/*
3264 	 * Call OF "quiesce" method to shut down pending DMA's from
3265 	 * devices etc...
3266 	 */
3267 	prom_printf("Quiescing Open Firmware ...\n");
3268 	call_prom("quiesce", 0, 0);
3269 
3270 	/*
3271 	 * And finally, call the kernel passing it the flattened device
3272 	 * tree and NULL as r5, thus triggering the new entry point which
3273 	 * is common to us and kexec
3274 	 */
3275 	hdr = dt_header_start;
3276 
3277 	/* Don't print anything after quiesce under OPAL, it crashes OFW */
3278 	if (of_platform != PLATFORM_OPAL) {
3279 		prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3280 		prom_debug("->dt_header_start=0x%x\n", hdr);
3281 	}
3282 
3283 #ifdef CONFIG_PPC32
3284 	reloc_got2(-offset);
3285 #else
3286 	unreloc_toc();
3287 #endif
3288 
3289 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3290 	/* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3291 	__start(hdr, kbase, 0, 0, 0,
3292 		prom_opal_base, prom_opal_entry);
3293 #else
3294 	__start(hdr, kbase, 0, 0, 0, 0, 0);
3295 #endif
3296 
3297 	return 0;
3298 }
3299