xref: /openbmc/linux/arch/powerpc/kernel/prom_init.c (revision be709d48)
1 /*
2  * Procedures for interfacing to Open Firmware.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG_PROM
17 
18 /* we cannot use FORTIFY as it brings in new symbols */
19 #define __NO_FORTIFY
20 
21 #include <stdarg.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/threads.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/proc_fs.h>
30 #include <linux/delay.h>
31 #include <linux/initrd.h>
32 #include <linux/bitops.h>
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/page.h>
36 #include <asm/processor.h>
37 #include <asm/irq.h>
38 #include <asm/io.h>
39 #include <asm/smp.h>
40 #include <asm/mmu.h>
41 #include <asm/pgtable.h>
42 #include <asm/iommu.h>
43 #include <asm/btext.h>
44 #include <asm/sections.h>
45 #include <asm/machdep.h>
46 #include <asm/asm-prototypes.h>
47 
48 #include <linux/linux_logo.h>
49 
50 /* All of prom_init bss lives here */
51 #define __prombss __section(.bss.prominit)
52 
53 /*
54  * Eventually bump that one up
55  */
56 #define DEVTREE_CHUNK_SIZE	0x100000
57 
58 /*
59  * This is the size of the local memory reserve map that gets copied
60  * into the boot params passed to the kernel. That size is totally
61  * flexible as the kernel just reads the list until it encounters an
62  * entry with size 0, so it can be changed without breaking binary
63  * compatibility
64  */
65 #define MEM_RESERVE_MAP_SIZE	8
66 
67 /*
68  * prom_init() is called very early on, before the kernel text
69  * and data have been mapped to KERNELBASE.  At this point the code
70  * is running at whatever address it has been loaded at.
71  * On ppc32 we compile with -mrelocatable, which means that references
72  * to extern and static variables get relocated automatically.
73  * ppc64 objects are always relocatable, we just need to relocate the
74  * TOC.
75  *
76  * Because OF may have mapped I/O devices into the area starting at
77  * KERNELBASE, particularly on CHRP machines, we can't safely call
78  * OF once the kernel has been mapped to KERNELBASE.  Therefore all
79  * OF calls must be done within prom_init().
80  *
81  * ADDR is used in calls to call_prom.  The 4th and following
82  * arguments to call_prom should be 32-bit values.
83  * On ppc64, 64 bit values are truncated to 32 bits (and
84  * fortunately don't get interpreted as two arguments).
85  */
86 #define ADDR(x)		(u32)(unsigned long)(x)
87 
88 #ifdef CONFIG_PPC64
89 #define OF_WORKAROUNDS	0
90 #else
91 #define OF_WORKAROUNDS	of_workarounds
92 static int of_workarounds __prombss;
93 #endif
94 
95 #define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
96 #define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
97 
98 #define PROM_BUG() do {						\
99         prom_printf("kernel BUG at %s line 0x%x!\n",		\
100 		    __FILE__, __LINE__);			\
101         __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);	\
102 } while (0)
103 
104 #ifdef DEBUG_PROM
105 #define prom_debug(x...)	prom_printf(x)
106 #else
107 #define prom_debug(x...)	do { } while (0)
108 #endif
109 
110 
111 typedef u32 prom_arg_t;
112 
113 struct prom_args {
114         __be32 service;
115         __be32 nargs;
116         __be32 nret;
117         __be32 args[10];
118 };
119 
120 struct prom_t {
121 	ihandle root;
122 	phandle chosen;
123 	int cpu;
124 	ihandle stdout;
125 	ihandle mmumap;
126 	ihandle memory;
127 };
128 
129 struct mem_map_entry {
130 	__be64	base;
131 	__be64	size;
132 };
133 
134 typedef __be32 cell_t;
135 
136 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
137 		    unsigned long r6, unsigned long r7, unsigned long r8,
138 		    unsigned long r9);
139 
140 #ifdef CONFIG_PPC64
141 extern int enter_prom(struct prom_args *args, unsigned long entry);
142 #else
143 static inline int enter_prom(struct prom_args *args, unsigned long entry)
144 {
145 	return ((int (*)(struct prom_args *))entry)(args);
146 }
147 #endif
148 
149 extern void copy_and_flush(unsigned long dest, unsigned long src,
150 			   unsigned long size, unsigned long offset);
151 
152 /* prom structure */
153 static struct prom_t __prombss prom;
154 
155 static unsigned long __prombss prom_entry;
156 
157 #define PROM_SCRATCH_SIZE 256
158 
159 static char __prombss of_stdout_device[256];
160 static char __prombss prom_scratch[PROM_SCRATCH_SIZE];
161 
162 static unsigned long __prombss dt_header_start;
163 static unsigned long __prombss dt_struct_start, dt_struct_end;
164 static unsigned long __prombss dt_string_start, dt_string_end;
165 
166 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
167 
168 #ifdef CONFIG_PPC64
169 static int __prombss prom_iommu_force_on;
170 static int __prombss prom_iommu_off;
171 static unsigned long __prombss prom_tce_alloc_start;
172 static unsigned long __prombss prom_tce_alloc_end;
173 #endif
174 
175 #ifdef CONFIG_PPC_PSERIES
176 static bool __prombss prom_radix_disable;
177 #endif
178 
179 struct platform_support {
180 	bool hash_mmu;
181 	bool radix_mmu;
182 	bool radix_gtse;
183 	bool xive;
184 };
185 
186 /* Platforms codes are now obsolete in the kernel. Now only used within this
187  * file and ultimately gone too. Feel free to change them if you need, they
188  * are not shared with anything outside of this file anymore
189  */
190 #define PLATFORM_PSERIES	0x0100
191 #define PLATFORM_PSERIES_LPAR	0x0101
192 #define PLATFORM_LPAR		0x0001
193 #define PLATFORM_POWERMAC	0x0400
194 #define PLATFORM_GENERIC	0x0500
195 
196 static int __prombss of_platform;
197 
198 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
199 
200 static unsigned long __prombss prom_memory_limit;
201 
202 static unsigned long __prombss alloc_top;
203 static unsigned long __prombss alloc_top_high;
204 static unsigned long __prombss alloc_bottom;
205 static unsigned long __prombss rmo_top;
206 static unsigned long __prombss ram_top;
207 
208 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
209 static int __prombss mem_reserve_cnt;
210 
211 static cell_t __prombss regbuf[1024];
212 
213 static bool  __prombss rtas_has_query_cpu_stopped;
214 
215 
216 /*
217  * Error results ... some OF calls will return "-1" on error, some
218  * will return 0, some will return either. To simplify, here are
219  * macros to use with any ihandle or phandle return value to check if
220  * it is valid
221  */
222 
223 #define PROM_ERROR		(-1u)
224 #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
225 #define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)
226 
227 
228 /* This is the one and *ONLY* place where we actually call open
229  * firmware.
230  */
231 
232 static int __init call_prom(const char *service, int nargs, int nret, ...)
233 {
234 	int i;
235 	struct prom_args args;
236 	va_list list;
237 
238 	args.service = cpu_to_be32(ADDR(service));
239 	args.nargs = cpu_to_be32(nargs);
240 	args.nret = cpu_to_be32(nret);
241 
242 	va_start(list, nret);
243 	for (i = 0; i < nargs; i++)
244 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
245 	va_end(list);
246 
247 	for (i = 0; i < nret; i++)
248 		args.args[nargs+i] = 0;
249 
250 	if (enter_prom(&args, prom_entry) < 0)
251 		return PROM_ERROR;
252 
253 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
254 }
255 
256 static int __init call_prom_ret(const char *service, int nargs, int nret,
257 				prom_arg_t *rets, ...)
258 {
259 	int i;
260 	struct prom_args args;
261 	va_list list;
262 
263 	args.service = cpu_to_be32(ADDR(service));
264 	args.nargs = cpu_to_be32(nargs);
265 	args.nret = cpu_to_be32(nret);
266 
267 	va_start(list, rets);
268 	for (i = 0; i < nargs; i++)
269 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
270 	va_end(list);
271 
272 	for (i = 0; i < nret; i++)
273 		args.args[nargs+i] = 0;
274 
275 	if (enter_prom(&args, prom_entry) < 0)
276 		return PROM_ERROR;
277 
278 	if (rets != NULL)
279 		for (i = 1; i < nret; ++i)
280 			rets[i-1] = be32_to_cpu(args.args[nargs+i]);
281 
282 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
283 }
284 
285 
286 static void __init prom_print(const char *msg)
287 {
288 	const char *p, *q;
289 
290 	if (prom.stdout == 0)
291 		return;
292 
293 	for (p = msg; *p != 0; p = q) {
294 		for (q = p; *q != 0 && *q != '\n'; ++q)
295 			;
296 		if (q > p)
297 			call_prom("write", 3, 1, prom.stdout, p, q - p);
298 		if (*q == 0)
299 			break;
300 		++q;
301 		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
302 	}
303 }
304 
305 
306 /*
307  * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
308  * we do not need __udivdi3 or __umoddi3 on 32bits.
309  */
310 static void __init prom_print_hex(unsigned long val)
311 {
312 	int i, nibbles = sizeof(val)*2;
313 	char buf[sizeof(val)*2+1];
314 
315 	for (i = nibbles-1;  i >= 0;  i--) {
316 		buf[i] = (val & 0xf) + '0';
317 		if (buf[i] > '9')
318 			buf[i] += ('a'-'0'-10);
319 		val >>= 4;
320 	}
321 	buf[nibbles] = '\0';
322 	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
323 }
324 
325 /* max number of decimal digits in an unsigned long */
326 #define UL_DIGITS 21
327 static void __init prom_print_dec(unsigned long val)
328 {
329 	int i, size;
330 	char buf[UL_DIGITS+1];
331 
332 	for (i = UL_DIGITS-1; i >= 0;  i--) {
333 		buf[i] = (val % 10) + '0';
334 		val = val/10;
335 		if (val == 0)
336 			break;
337 	}
338 	/* shift stuff down */
339 	size = UL_DIGITS - i;
340 	call_prom("write", 3, 1, prom.stdout, buf+i, size);
341 }
342 
343 __printf(1, 2)
344 static void __init prom_printf(const char *format, ...)
345 {
346 	const char *p, *q, *s;
347 	va_list args;
348 	unsigned long v;
349 	long vs;
350 	int n = 0;
351 
352 	va_start(args, format);
353 	for (p = format; *p != 0; p = q) {
354 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
355 			;
356 		if (q > p)
357 			call_prom("write", 3, 1, prom.stdout, p, q - p);
358 		if (*q == 0)
359 			break;
360 		if (*q == '\n') {
361 			++q;
362 			call_prom("write", 3, 1, prom.stdout,
363 				  ADDR("\r\n"), 2);
364 			continue;
365 		}
366 		++q;
367 		if (*q == 0)
368 			break;
369 		while (*q == 'l') {
370 			++q;
371 			++n;
372 		}
373 		switch (*q) {
374 		case 's':
375 			++q;
376 			s = va_arg(args, const char *);
377 			prom_print(s);
378 			break;
379 		case 'x':
380 			++q;
381 			switch (n) {
382 			case 0:
383 				v = va_arg(args, unsigned int);
384 				break;
385 			case 1:
386 				v = va_arg(args, unsigned long);
387 				break;
388 			case 2:
389 			default:
390 				v = va_arg(args, unsigned long long);
391 				break;
392 			}
393 			prom_print_hex(v);
394 			break;
395 		case 'u':
396 			++q;
397 			switch (n) {
398 			case 0:
399 				v = va_arg(args, unsigned int);
400 				break;
401 			case 1:
402 				v = va_arg(args, unsigned long);
403 				break;
404 			case 2:
405 			default:
406 				v = va_arg(args, unsigned long long);
407 				break;
408 			}
409 			prom_print_dec(v);
410 			break;
411 		case 'd':
412 			++q;
413 			switch (n) {
414 			case 0:
415 				vs = va_arg(args, int);
416 				break;
417 			case 1:
418 				vs = va_arg(args, long);
419 				break;
420 			case 2:
421 			default:
422 				vs = va_arg(args, long long);
423 				break;
424 			}
425 			if (vs < 0) {
426 				prom_print("-");
427 				vs = -vs;
428 			}
429 			prom_print_dec(vs);
430 			break;
431 		}
432 	}
433 	va_end(args);
434 }
435 
436 
437 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
438 				unsigned long align)
439 {
440 
441 	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
442 		/*
443 		 * Old OF requires we claim physical and virtual separately
444 		 * and then map explicitly (assuming virtual mode)
445 		 */
446 		int ret;
447 		prom_arg_t result;
448 
449 		ret = call_prom_ret("call-method", 5, 2, &result,
450 				    ADDR("claim"), prom.memory,
451 				    align, size, virt);
452 		if (ret != 0 || result == -1)
453 			return -1;
454 		ret = call_prom_ret("call-method", 5, 2, &result,
455 				    ADDR("claim"), prom.mmumap,
456 				    align, size, virt);
457 		if (ret != 0) {
458 			call_prom("call-method", 4, 1, ADDR("release"),
459 				  prom.memory, size, virt);
460 			return -1;
461 		}
462 		/* the 0x12 is M (coherence) + PP == read/write */
463 		call_prom("call-method", 6, 1,
464 			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
465 		return virt;
466 	}
467 	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
468 			 (prom_arg_t)align);
469 }
470 
471 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
472 {
473 	prom_print(reason);
474 	/* Do not call exit because it clears the screen on pmac
475 	 * it also causes some sort of double-fault on early pmacs */
476 	if (of_platform == PLATFORM_POWERMAC)
477 		asm("trap\n");
478 
479 	/* ToDo: should put up an SRC here on pSeries */
480 	call_prom("exit", 0, 0);
481 
482 	for (;;)			/* should never get here */
483 		;
484 }
485 
486 
487 static int __init prom_next_node(phandle *nodep)
488 {
489 	phandle node;
490 
491 	if ((node = *nodep) != 0
492 	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
493 		return 1;
494 	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
495 		return 1;
496 	for (;;) {
497 		if ((node = call_prom("parent", 1, 1, node)) == 0)
498 			return 0;
499 		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
500 			return 1;
501 	}
502 }
503 
504 static inline int prom_getprop(phandle node, const char *pname,
505 			       void *value, size_t valuelen)
506 {
507 	return call_prom("getprop", 4, 1, node, ADDR(pname),
508 			 (u32)(unsigned long) value, (u32) valuelen);
509 }
510 
511 static inline int prom_getproplen(phandle node, const char *pname)
512 {
513 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
514 }
515 
516 static void add_string(char **str, const char *q)
517 {
518 	char *p = *str;
519 
520 	while (*q)
521 		*p++ = *q++;
522 	*p++ = ' ';
523 	*str = p;
524 }
525 
526 static char *tohex(unsigned int x)
527 {
528 	static const char digits[] __initconst = "0123456789abcdef";
529 	static char result[9] __prombss;
530 	int i;
531 
532 	result[8] = 0;
533 	i = 8;
534 	do {
535 		--i;
536 		result[i] = digits[x & 0xf];
537 		x >>= 4;
538 	} while (x != 0 && i > 0);
539 	return &result[i];
540 }
541 
542 static int __init prom_setprop(phandle node, const char *nodename,
543 			       const char *pname, void *value, size_t valuelen)
544 {
545 	char cmd[256], *p;
546 
547 	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
548 		return call_prom("setprop", 4, 1, node, ADDR(pname),
549 				 (u32)(unsigned long) value, (u32) valuelen);
550 
551 	/* gah... setprop doesn't work on longtrail, have to use interpret */
552 	p = cmd;
553 	add_string(&p, "dev");
554 	add_string(&p, nodename);
555 	add_string(&p, tohex((u32)(unsigned long) value));
556 	add_string(&p, tohex(valuelen));
557 	add_string(&p, tohex(ADDR(pname)));
558 	add_string(&p, tohex(strlen(pname)));
559 	add_string(&p, "property");
560 	*p = 0;
561 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
562 }
563 
564 /* We can't use the standard versions because of relocation headaches. */
565 #define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
566 			 || ('a' <= (c) && (c) <= 'f') \
567 			 || ('A' <= (c) && (c) <= 'F'))
568 
569 #define isdigit(c)	('0' <= (c) && (c) <= '9')
570 #define islower(c)	('a' <= (c) && (c) <= 'z')
571 #define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))
572 
573 static unsigned long prom_strtoul(const char *cp, const char **endp)
574 {
575 	unsigned long result = 0, base = 10, value;
576 
577 	if (*cp == '0') {
578 		base = 8;
579 		cp++;
580 		if (toupper(*cp) == 'X') {
581 			cp++;
582 			base = 16;
583 		}
584 	}
585 
586 	while (isxdigit(*cp) &&
587 	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
588 		result = result * base + value;
589 		cp++;
590 	}
591 
592 	if (endp)
593 		*endp = cp;
594 
595 	return result;
596 }
597 
598 static unsigned long prom_memparse(const char *ptr, const char **retptr)
599 {
600 	unsigned long ret = prom_strtoul(ptr, retptr);
601 	int shift = 0;
602 
603 	/*
604 	 * We can't use a switch here because GCC *may* generate a
605 	 * jump table which won't work, because we're not running at
606 	 * the address we're linked at.
607 	 */
608 	if ('G' == **retptr || 'g' == **retptr)
609 		shift = 30;
610 
611 	if ('M' == **retptr || 'm' == **retptr)
612 		shift = 20;
613 
614 	if ('K' == **retptr || 'k' == **retptr)
615 		shift = 10;
616 
617 	if (shift) {
618 		ret <<= shift;
619 		(*retptr)++;
620 	}
621 
622 	return ret;
623 }
624 
625 /*
626  * Early parsing of the command line passed to the kernel, used for
627  * "mem=x" and the options that affect the iommu
628  */
629 static void __init early_cmdline_parse(void)
630 {
631 	const char *opt;
632 
633 	char *p;
634 	int l __maybe_unused = 0;
635 
636 	prom_cmd_line[0] = 0;
637 	p = prom_cmd_line;
638 	if ((long)prom.chosen > 0)
639 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
640 #ifdef CONFIG_CMDLINE
641 	if (l <= 0 || p[0] == '\0') /* dbl check */
642 		strlcpy(prom_cmd_line,
643 			CONFIG_CMDLINE, sizeof(prom_cmd_line));
644 #endif /* CONFIG_CMDLINE */
645 	prom_printf("command line: %s\n", prom_cmd_line);
646 
647 #ifdef CONFIG_PPC64
648 	opt = strstr(prom_cmd_line, "iommu=");
649 	if (opt) {
650 		prom_printf("iommu opt is: %s\n", opt);
651 		opt += 6;
652 		while (*opt && *opt == ' ')
653 			opt++;
654 		if (!strncmp(opt, "off", 3))
655 			prom_iommu_off = 1;
656 		else if (!strncmp(opt, "force", 5))
657 			prom_iommu_force_on = 1;
658 	}
659 #endif
660 	opt = strstr(prom_cmd_line, "mem=");
661 	if (opt) {
662 		opt += 4;
663 		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
664 #ifdef CONFIG_PPC64
665 		/* Align to 16 MB == size of ppc64 large page */
666 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
667 #endif
668 	}
669 
670 #ifdef CONFIG_PPC_PSERIES
671 	prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
672 	opt = strstr(prom_cmd_line, "disable_radix");
673 	if (opt) {
674 		opt += 13;
675 		if (*opt && *opt == '=') {
676 			bool val;
677 
678 			if (kstrtobool(++opt, &val))
679 				prom_radix_disable = false;
680 			else
681 				prom_radix_disable = val;
682 		} else
683 			prom_radix_disable = true;
684 	}
685 	if (prom_radix_disable)
686 		prom_debug("Radix disabled from cmdline\n");
687 #endif /* CONFIG_PPC_PSERIES */
688 }
689 
690 #ifdef CONFIG_PPC_PSERIES
691 /*
692  * The architecture vector has an array of PVR mask/value pairs,
693  * followed by # option vectors - 1, followed by the option vectors.
694  *
695  * See prom.h for the definition of the bits specified in the
696  * architecture vector.
697  */
698 
699 /* Firmware expects the value to be n - 1, where n is the # of vectors */
700 #define NUM_VECTORS(n)		((n) - 1)
701 
702 /*
703  * Firmware expects 1 + n - 2, where n is the length of the option vector in
704  * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
705  */
706 #define VECTOR_LENGTH(n)	(1 + (n) - 2)
707 
708 struct option_vector1 {
709 	u8 byte1;
710 	u8 arch_versions;
711 	u8 arch_versions3;
712 } __packed;
713 
714 struct option_vector2 {
715 	u8 byte1;
716 	__be16 reserved;
717 	__be32 real_base;
718 	__be32 real_size;
719 	__be32 virt_base;
720 	__be32 virt_size;
721 	__be32 load_base;
722 	__be32 min_rma;
723 	__be32 min_load;
724 	u8 min_rma_percent;
725 	u8 max_pft_size;
726 } __packed;
727 
728 struct option_vector3 {
729 	u8 byte1;
730 	u8 byte2;
731 } __packed;
732 
733 struct option_vector4 {
734 	u8 byte1;
735 	u8 min_vp_cap;
736 } __packed;
737 
738 struct option_vector5 {
739 	u8 byte1;
740 	u8 byte2;
741 	u8 byte3;
742 	u8 cmo;
743 	u8 associativity;
744 	u8 bin_opts;
745 	u8 micro_checkpoint;
746 	u8 reserved0;
747 	__be32 max_cpus;
748 	__be16 papr_level;
749 	__be16 reserved1;
750 	u8 platform_facilities;
751 	u8 reserved2;
752 	__be16 reserved3;
753 	u8 subprocessors;
754 	u8 byte22;
755 	u8 intarch;
756 	u8 mmu;
757 	u8 hash_ext;
758 	u8 radix_ext;
759 } __packed;
760 
761 struct option_vector6 {
762 	u8 reserved;
763 	u8 secondary_pteg;
764 	u8 os_name;
765 } __packed;
766 
767 struct ibm_arch_vec {
768 	struct { u32 mask, val; } pvrs[12];
769 
770 	u8 num_vectors;
771 
772 	u8 vec1_len;
773 	struct option_vector1 vec1;
774 
775 	u8 vec2_len;
776 	struct option_vector2 vec2;
777 
778 	u8 vec3_len;
779 	struct option_vector3 vec3;
780 
781 	u8 vec4_len;
782 	struct option_vector4 vec4;
783 
784 	u8 vec5_len;
785 	struct option_vector5 vec5;
786 
787 	u8 vec6_len;
788 	struct option_vector6 vec6;
789 } __packed;
790 
791 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
792 	.pvrs = {
793 		{
794 			.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
795 			.val  = cpu_to_be32(0x003a0000),
796 		},
797 		{
798 			.mask = cpu_to_be32(0xffff0000), /* POWER6 */
799 			.val  = cpu_to_be32(0x003e0000),
800 		},
801 		{
802 			.mask = cpu_to_be32(0xffff0000), /* POWER7 */
803 			.val  = cpu_to_be32(0x003f0000),
804 		},
805 		{
806 			.mask = cpu_to_be32(0xffff0000), /* POWER8E */
807 			.val  = cpu_to_be32(0x004b0000),
808 		},
809 		{
810 			.mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
811 			.val  = cpu_to_be32(0x004c0000),
812 		},
813 		{
814 			.mask = cpu_to_be32(0xffff0000), /* POWER8 */
815 			.val  = cpu_to_be32(0x004d0000),
816 		},
817 		{
818 			.mask = cpu_to_be32(0xffff0000), /* POWER9 */
819 			.val  = cpu_to_be32(0x004e0000),
820 		},
821 		{
822 			.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
823 			.val  = cpu_to_be32(0x0f000005),
824 		},
825 		{
826 			.mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
827 			.val  = cpu_to_be32(0x0f000004),
828 		},
829 		{
830 			.mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
831 			.val  = cpu_to_be32(0x0f000003),
832 		},
833 		{
834 			.mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
835 			.val  = cpu_to_be32(0x0f000002),
836 		},
837 		{
838 			.mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
839 			.val  = cpu_to_be32(0x0f000001),
840 		},
841 	},
842 
843 	.num_vectors = NUM_VECTORS(6),
844 
845 	.vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
846 	.vec1 = {
847 		.byte1 = 0,
848 		.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
849 				 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
850 		.arch_versions3 = OV1_PPC_3_00,
851 	},
852 
853 	.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
854 	/* option vector 2: Open Firmware options supported */
855 	.vec2 = {
856 		.byte1 = OV2_REAL_MODE,
857 		.reserved = 0,
858 		.real_base = cpu_to_be32(0xffffffff),
859 		.real_size = cpu_to_be32(0xffffffff),
860 		.virt_base = cpu_to_be32(0xffffffff),
861 		.virt_size = cpu_to_be32(0xffffffff),
862 		.load_base = cpu_to_be32(0xffffffff),
863 		.min_rma = cpu_to_be32(512),		/* 512MB min RMA */
864 		.min_load = cpu_to_be32(0xffffffff),	/* full client load */
865 		.min_rma_percent = 0,	/* min RMA percentage of total RAM */
866 		.max_pft_size = 48,	/* max log_2(hash table size) */
867 	},
868 
869 	.vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
870 	/* option vector 3: processor options supported */
871 	.vec3 = {
872 		.byte1 = 0,			/* don't ignore, don't halt */
873 		.byte2 = OV3_FP | OV3_VMX | OV3_DFP,
874 	},
875 
876 	.vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
877 	/* option vector 4: IBM PAPR implementation */
878 	.vec4 = {
879 		.byte1 = 0,			/* don't halt */
880 		.min_vp_cap = OV4_MIN_ENT_CAP,	/* minimum VP entitled capacity */
881 	},
882 
883 	.vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
884 	/* option vector 5: PAPR/OF options */
885 	.vec5 = {
886 		.byte1 = 0,				/* don't ignore, don't halt */
887 		.byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
888 		OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
889 #ifdef CONFIG_PCI_MSI
890 		/* PCIe/MSI support.  Without MSI full PCIe is not supported */
891 		OV5_FEAT(OV5_MSI),
892 #else
893 		0,
894 #endif
895 		.byte3 = 0,
896 		.cmo =
897 #ifdef CONFIG_PPC_SMLPAR
898 		OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
899 #else
900 		0,
901 #endif
902 		.associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
903 		.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
904 		.micro_checkpoint = 0,
905 		.reserved0 = 0,
906 		.max_cpus = cpu_to_be32(NR_CPUS),	/* number of cores supported */
907 		.papr_level = 0,
908 		.reserved1 = 0,
909 		.platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
910 		.reserved2 = 0,
911 		.reserved3 = 0,
912 		.subprocessors = 1,
913 		.byte22 = OV5_FEAT(OV5_DRMEM_V2),
914 		.intarch = 0,
915 		.mmu = 0,
916 		.hash_ext = 0,
917 		.radix_ext = 0,
918 	},
919 
920 	/* option vector 6: IBM PAPR hints */
921 	.vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
922 	.vec6 = {
923 		.reserved = 0,
924 		.secondary_pteg = 0,
925 		.os_name = OV6_LINUX,
926 	},
927 };
928 
929 static struct ibm_arch_vec __prombss ibm_architecture_vec  ____cacheline_aligned;
930 
931 /* Old method - ELF header with PT_NOTE sections only works on BE */
932 #ifdef __BIG_ENDIAN__
933 static const struct fake_elf {
934 	Elf32_Ehdr	elfhdr;
935 	Elf32_Phdr	phdr[2];
936 	struct chrpnote {
937 		u32	namesz;
938 		u32	descsz;
939 		u32	type;
940 		char	name[8];	/* "PowerPC" */
941 		struct chrpdesc {
942 			u32	real_mode;
943 			u32	real_base;
944 			u32	real_size;
945 			u32	virt_base;
946 			u32	virt_size;
947 			u32	load_base;
948 		} chrpdesc;
949 	} chrpnote;
950 	struct rpanote {
951 		u32	namesz;
952 		u32	descsz;
953 		u32	type;
954 		char	name[24];	/* "IBM,RPA-Client-Config" */
955 		struct rpadesc {
956 			u32	lpar_affinity;
957 			u32	min_rmo_size;
958 			u32	min_rmo_percent;
959 			u32	max_pft_size;
960 			u32	splpar;
961 			u32	min_load;
962 			u32	new_mem_def;
963 			u32	ignore_me;
964 		} rpadesc;
965 	} rpanote;
966 } fake_elf __initconst = {
967 	.elfhdr = {
968 		.e_ident = { 0x7f, 'E', 'L', 'F',
969 			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
970 		.e_type = ET_EXEC,	/* yeah right */
971 		.e_machine = EM_PPC,
972 		.e_version = EV_CURRENT,
973 		.e_phoff = offsetof(struct fake_elf, phdr),
974 		.e_phentsize = sizeof(Elf32_Phdr),
975 		.e_phnum = 2
976 	},
977 	.phdr = {
978 		[0] = {
979 			.p_type = PT_NOTE,
980 			.p_offset = offsetof(struct fake_elf, chrpnote),
981 			.p_filesz = sizeof(struct chrpnote)
982 		}, [1] = {
983 			.p_type = PT_NOTE,
984 			.p_offset = offsetof(struct fake_elf, rpanote),
985 			.p_filesz = sizeof(struct rpanote)
986 		}
987 	},
988 	.chrpnote = {
989 		.namesz = sizeof("PowerPC"),
990 		.descsz = sizeof(struct chrpdesc),
991 		.type = 0x1275,
992 		.name = "PowerPC",
993 		.chrpdesc = {
994 			.real_mode = ~0U,	/* ~0 means "don't care" */
995 			.real_base = ~0U,
996 			.real_size = ~0U,
997 			.virt_base = ~0U,
998 			.virt_size = ~0U,
999 			.load_base = ~0U
1000 		},
1001 	},
1002 	.rpanote = {
1003 		.namesz = sizeof("IBM,RPA-Client-Config"),
1004 		.descsz = sizeof(struct rpadesc),
1005 		.type = 0x12759999,
1006 		.name = "IBM,RPA-Client-Config",
1007 		.rpadesc = {
1008 			.lpar_affinity = 0,
1009 			.min_rmo_size = 64,	/* in megabytes */
1010 			.min_rmo_percent = 0,
1011 			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
1012 			.splpar = 1,
1013 			.min_load = ~0U,
1014 			.new_mem_def = 0
1015 		}
1016 	}
1017 };
1018 #endif /* __BIG_ENDIAN__ */
1019 
1020 static int __init prom_count_smt_threads(void)
1021 {
1022 	phandle node;
1023 	char type[64];
1024 	unsigned int plen;
1025 
1026 	/* Pick up th first CPU node we can find */
1027 	for (node = 0; prom_next_node(&node); ) {
1028 		type[0] = 0;
1029 		prom_getprop(node, "device_type", type, sizeof(type));
1030 
1031 		if (strcmp(type, "cpu"))
1032 			continue;
1033 		/*
1034 		 * There is an entry for each smt thread, each entry being
1035 		 * 4 bytes long.  All cpus should have the same number of
1036 		 * smt threads, so return after finding the first.
1037 		 */
1038 		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1039 		if (plen == PROM_ERROR)
1040 			break;
1041 		plen >>= 2;
1042 		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1043 
1044 		/* Sanity check */
1045 		if (plen < 1 || plen > 64) {
1046 			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1047 				    (unsigned long)plen);
1048 			return 1;
1049 		}
1050 		return plen;
1051 	}
1052 	prom_debug("No threads found, assuming 1 per core\n");
1053 
1054 	return 1;
1055 
1056 }
1057 
1058 static void __init prom_parse_mmu_model(u8 val,
1059 					struct platform_support *support)
1060 {
1061 	switch (val) {
1062 	case OV5_FEAT(OV5_MMU_DYNAMIC):
1063 	case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1064 		prom_debug("MMU - either supported\n");
1065 		support->radix_mmu = !prom_radix_disable;
1066 		support->hash_mmu = true;
1067 		break;
1068 	case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1069 		prom_debug("MMU - radix only\n");
1070 		if (prom_radix_disable) {
1071 			/*
1072 			 * If we __have__ to do radix, we're better off ignoring
1073 			 * the command line rather than not booting.
1074 			 */
1075 			prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1076 		}
1077 		support->radix_mmu = true;
1078 		break;
1079 	case OV5_FEAT(OV5_MMU_HASH):
1080 		prom_debug("MMU - hash only\n");
1081 		support->hash_mmu = true;
1082 		break;
1083 	default:
1084 		prom_debug("Unknown mmu support option: 0x%x\n", val);
1085 		break;
1086 	}
1087 }
1088 
1089 static void __init prom_parse_xive_model(u8 val,
1090 					 struct platform_support *support)
1091 {
1092 	switch (val) {
1093 	case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1094 		prom_debug("XIVE - either mode supported\n");
1095 		support->xive = true;
1096 		break;
1097 	case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1098 		prom_debug("XIVE - exploitation mode supported\n");
1099 		support->xive = true;
1100 		break;
1101 	case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1102 		prom_debug("XIVE - legacy mode supported\n");
1103 		break;
1104 	default:
1105 		prom_debug("Unknown xive support option: 0x%x\n", val);
1106 		break;
1107 	}
1108 }
1109 
1110 static void __init prom_parse_platform_support(u8 index, u8 val,
1111 					       struct platform_support *support)
1112 {
1113 	switch (index) {
1114 	case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1115 		prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1116 		break;
1117 	case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1118 		if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1119 			prom_debug("Radix - GTSE supported\n");
1120 			support->radix_gtse = true;
1121 		}
1122 		break;
1123 	case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1124 		prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1125 				      support);
1126 		break;
1127 	}
1128 }
1129 
1130 static void __init prom_check_platform_support(void)
1131 {
1132 	struct platform_support supported = {
1133 		.hash_mmu = false,
1134 		.radix_mmu = false,
1135 		.radix_gtse = false,
1136 		.xive = false
1137 	};
1138 	int prop_len = prom_getproplen(prom.chosen,
1139 				       "ibm,arch-vec-5-platform-support");
1140 
1141 	/* First copy the architecture vec template */
1142 	ibm_architecture_vec = ibm_architecture_vec_template;
1143 
1144 	if (prop_len > 1) {
1145 		int i;
1146 		u8 vec[8];
1147 		prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1148 			   prop_len);
1149 		if (prop_len > sizeof(vec))
1150 			prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1151 				    prop_len);
1152 		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1153 			     &vec, sizeof(vec));
1154 		for (i = 0; i < sizeof(vec); i += 2) {
1155 			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1156 								  , vec[i]
1157 								  , vec[i + 1]);
1158 			prom_parse_platform_support(vec[i], vec[i + 1],
1159 						    &supported);
1160 		}
1161 	}
1162 
1163 	if (supported.radix_mmu && supported.radix_gtse &&
1164 	    IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1165 		/* Radix preferred - but we require GTSE for now */
1166 		prom_debug("Asking for radix with GTSE\n");
1167 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1168 		ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1169 	} else if (supported.hash_mmu) {
1170 		/* Default to hash mmu (if we can) */
1171 		prom_debug("Asking for hash\n");
1172 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1173 	} else {
1174 		/* We're probably on a legacy hypervisor */
1175 		prom_debug("Assuming legacy hash support\n");
1176 	}
1177 
1178 	if (supported.xive) {
1179 		prom_debug("Asking for XIVE\n");
1180 		ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1181 	}
1182 }
1183 
1184 static void __init prom_send_capabilities(void)
1185 {
1186 	ihandle root;
1187 	prom_arg_t ret;
1188 	u32 cores;
1189 
1190 	/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1191 	prom_check_platform_support();
1192 
1193 	root = call_prom("open", 1, 1, ADDR("/"));
1194 	if (root != 0) {
1195 		/* We need to tell the FW about the number of cores we support.
1196 		 *
1197 		 * To do that, we count the number of threads on the first core
1198 		 * (we assume this is the same for all cores) and use it to
1199 		 * divide NR_CPUS.
1200 		 */
1201 
1202 		cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1203 		prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1204 			    cores, NR_CPUS);
1205 
1206 		ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1207 
1208 		/* try calling the ibm,client-architecture-support method */
1209 		prom_printf("Calling ibm,client-architecture-support...");
1210 		if (call_prom_ret("call-method", 3, 2, &ret,
1211 				  ADDR("ibm,client-architecture-support"),
1212 				  root,
1213 				  ADDR(&ibm_architecture_vec)) == 0) {
1214 			/* the call exists... */
1215 			if (ret)
1216 				prom_printf("\nWARNING: ibm,client-architecture"
1217 					    "-support call FAILED!\n");
1218 			call_prom("close", 1, 0, root);
1219 			prom_printf(" done\n");
1220 			return;
1221 		}
1222 		call_prom("close", 1, 0, root);
1223 		prom_printf(" not implemented\n");
1224 	}
1225 
1226 #ifdef __BIG_ENDIAN__
1227 	{
1228 		ihandle elfloader;
1229 
1230 		/* no ibm,client-architecture-support call, try the old way */
1231 		elfloader = call_prom("open", 1, 1,
1232 				      ADDR("/packages/elf-loader"));
1233 		if (elfloader == 0) {
1234 			prom_printf("couldn't open /packages/elf-loader\n");
1235 			return;
1236 		}
1237 		call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1238 			  elfloader, ADDR(&fake_elf));
1239 		call_prom("close", 1, 0, elfloader);
1240 	}
1241 #endif /* __BIG_ENDIAN__ */
1242 }
1243 #endif /* CONFIG_PPC_PSERIES */
1244 
1245 /*
1246  * Memory allocation strategy... our layout is normally:
1247  *
1248  *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
1249  *  rare cases, initrd might end up being before the kernel though.
1250  *  We assume this won't override the final kernel at 0, we have no
1251  *  provision to handle that in this version, but it should hopefully
1252  *  never happen.
1253  *
1254  *  alloc_top is set to the top of RMO, eventually shrink down if the
1255  *  TCEs overlap
1256  *
1257  *  alloc_bottom is set to the top of kernel/initrd
1258  *
1259  *  from there, allocations are done this way : rtas is allocated
1260  *  topmost, and the device-tree is allocated from the bottom. We try
1261  *  to grow the device-tree allocation as we progress. If we can't,
1262  *  then we fail, we don't currently have a facility to restart
1263  *  elsewhere, but that shouldn't be necessary.
1264  *
1265  *  Note that calls to reserve_mem have to be done explicitly, memory
1266  *  allocated with either alloc_up or alloc_down isn't automatically
1267  *  reserved.
1268  */
1269 
1270 
1271 /*
1272  * Allocates memory in the RMO upward from the kernel/initrd
1273  *
1274  * When align is 0, this is a special case, it means to allocate in place
1275  * at the current location of alloc_bottom or fail (that is basically
1276  * extending the previous allocation). Used for the device-tree flattening
1277  */
1278 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1279 {
1280 	unsigned long base = alloc_bottom;
1281 	unsigned long addr = 0;
1282 
1283 	if (align)
1284 		base = _ALIGN_UP(base, align);
1285 	prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1286 	if (ram_top == 0)
1287 		prom_panic("alloc_up() called with mem not initialized\n");
1288 
1289 	if (align)
1290 		base = _ALIGN_UP(alloc_bottom, align);
1291 	else
1292 		base = alloc_bottom;
1293 
1294 	for(; (base + size) <= alloc_top;
1295 	    base = _ALIGN_UP(base + 0x100000, align)) {
1296 		prom_debug("    trying: 0x%lx\n\r", base);
1297 		addr = (unsigned long)prom_claim(base, size, 0);
1298 		if (addr != PROM_ERROR && addr != 0)
1299 			break;
1300 		addr = 0;
1301 		if (align == 0)
1302 			break;
1303 	}
1304 	if (addr == 0)
1305 		return 0;
1306 	alloc_bottom = addr + size;
1307 
1308 	prom_debug(" -> %lx\n", addr);
1309 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1310 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1311 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1312 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1313 	prom_debug("  ram_top      : %lx\n", ram_top);
1314 
1315 	return addr;
1316 }
1317 
1318 /*
1319  * Allocates memory downward, either from top of RMO, or if highmem
1320  * is set, from the top of RAM.  Note that this one doesn't handle
1321  * failures.  It does claim memory if highmem is not set.
1322  */
1323 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1324 				       int highmem)
1325 {
1326 	unsigned long base, addr = 0;
1327 
1328 	prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1329 		   highmem ? "(high)" : "(low)");
1330 	if (ram_top == 0)
1331 		prom_panic("alloc_down() called with mem not initialized\n");
1332 
1333 	if (highmem) {
1334 		/* Carve out storage for the TCE table. */
1335 		addr = _ALIGN_DOWN(alloc_top_high - size, align);
1336 		if (addr <= alloc_bottom)
1337 			return 0;
1338 		/* Will we bump into the RMO ? If yes, check out that we
1339 		 * didn't overlap existing allocations there, if we did,
1340 		 * we are dead, we must be the first in town !
1341 		 */
1342 		if (addr < rmo_top) {
1343 			/* Good, we are first */
1344 			if (alloc_top == rmo_top)
1345 				alloc_top = rmo_top = addr;
1346 			else
1347 				return 0;
1348 		}
1349 		alloc_top_high = addr;
1350 		goto bail;
1351 	}
1352 
1353 	base = _ALIGN_DOWN(alloc_top - size, align);
1354 	for (; base > alloc_bottom;
1355 	     base = _ALIGN_DOWN(base - 0x100000, align))  {
1356 		prom_debug("    trying: 0x%lx\n\r", base);
1357 		addr = (unsigned long)prom_claim(base, size, 0);
1358 		if (addr != PROM_ERROR && addr != 0)
1359 			break;
1360 		addr = 0;
1361 	}
1362 	if (addr == 0)
1363 		return 0;
1364 	alloc_top = addr;
1365 
1366  bail:
1367 	prom_debug(" -> %lx\n", addr);
1368 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1369 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1370 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1371 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1372 	prom_debug("  ram_top      : %lx\n", ram_top);
1373 
1374 	return addr;
1375 }
1376 
1377 /*
1378  * Parse a "reg" cell
1379  */
1380 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1381 {
1382 	cell_t *p = *cellp;
1383 	unsigned long r = 0;
1384 
1385 	/* Ignore more than 2 cells */
1386 	while (s > sizeof(unsigned long) / 4) {
1387 		p++;
1388 		s--;
1389 	}
1390 	r = be32_to_cpu(*p++);
1391 #ifdef CONFIG_PPC64
1392 	if (s > 1) {
1393 		r <<= 32;
1394 		r |= be32_to_cpu(*(p++));
1395 	}
1396 #endif
1397 	*cellp = p;
1398 	return r;
1399 }
1400 
1401 /*
1402  * Very dumb function for adding to the memory reserve list, but
1403  * we don't need anything smarter at this point
1404  *
1405  * XXX Eventually check for collisions.  They should NEVER happen.
1406  * If problems seem to show up, it would be a good start to track
1407  * them down.
1408  */
1409 static void __init reserve_mem(u64 base, u64 size)
1410 {
1411 	u64 top = base + size;
1412 	unsigned long cnt = mem_reserve_cnt;
1413 
1414 	if (size == 0)
1415 		return;
1416 
1417 	/* We need to always keep one empty entry so that we
1418 	 * have our terminator with "size" set to 0 since we are
1419 	 * dumb and just copy this entire array to the boot params
1420 	 */
1421 	base = _ALIGN_DOWN(base, PAGE_SIZE);
1422 	top = _ALIGN_UP(top, PAGE_SIZE);
1423 	size = top - base;
1424 
1425 	if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1426 		prom_panic("Memory reserve map exhausted !\n");
1427 	mem_reserve_map[cnt].base = cpu_to_be64(base);
1428 	mem_reserve_map[cnt].size = cpu_to_be64(size);
1429 	mem_reserve_cnt = cnt + 1;
1430 }
1431 
1432 /*
1433  * Initialize memory allocation mechanism, parse "memory" nodes and
1434  * obtain that way the top of memory and RMO to setup out local allocator
1435  */
1436 static void __init prom_init_mem(void)
1437 {
1438 	phandle node;
1439 #ifdef DEBUG_PROM
1440 	char *path;
1441 #endif
1442 	char type[64];
1443 	unsigned int plen;
1444 	cell_t *p, *endp;
1445 	__be32 val;
1446 	u32 rac, rsc;
1447 
1448 	/*
1449 	 * We iterate the memory nodes to find
1450 	 * 1) top of RMO (first node)
1451 	 * 2) top of memory
1452 	 */
1453 	val = cpu_to_be32(2);
1454 	prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1455 	rac = be32_to_cpu(val);
1456 	val = cpu_to_be32(1);
1457 	prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1458 	rsc = be32_to_cpu(val);
1459 	prom_debug("root_addr_cells: %x\n", rac);
1460 	prom_debug("root_size_cells: %x\n", rsc);
1461 
1462 	prom_debug("scanning memory:\n");
1463 #ifdef DEBUG_PROM
1464 	path = prom_scratch;
1465 #endif
1466 
1467 	for (node = 0; prom_next_node(&node); ) {
1468 		type[0] = 0;
1469 		prom_getprop(node, "device_type", type, sizeof(type));
1470 
1471 		if (type[0] == 0) {
1472 			/*
1473 			 * CHRP Longtrail machines have no device_type
1474 			 * on the memory node, so check the name instead...
1475 			 */
1476 			prom_getprop(node, "name", type, sizeof(type));
1477 		}
1478 		if (strcmp(type, "memory"))
1479 			continue;
1480 
1481 		plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1482 		if (plen > sizeof(regbuf)) {
1483 			prom_printf("memory node too large for buffer !\n");
1484 			plen = sizeof(regbuf);
1485 		}
1486 		p = regbuf;
1487 		endp = p + (plen / sizeof(cell_t));
1488 
1489 #ifdef DEBUG_PROM
1490 		memset(path, 0, PROM_SCRATCH_SIZE);
1491 		call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1492 		prom_debug("  node %s :\n", path);
1493 #endif /* DEBUG_PROM */
1494 
1495 		while ((endp - p) >= (rac + rsc)) {
1496 			unsigned long base, size;
1497 
1498 			base = prom_next_cell(rac, &p);
1499 			size = prom_next_cell(rsc, &p);
1500 
1501 			if (size == 0)
1502 				continue;
1503 			prom_debug("    %lx %lx\n", base, size);
1504 			if (base == 0 && (of_platform & PLATFORM_LPAR))
1505 				rmo_top = size;
1506 			if ((base + size) > ram_top)
1507 				ram_top = base + size;
1508 		}
1509 	}
1510 
1511 	alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1512 
1513 	/*
1514 	 * If prom_memory_limit is set we reduce the upper limits *except* for
1515 	 * alloc_top_high. This must be the real top of RAM so we can put
1516 	 * TCE's up there.
1517 	 */
1518 
1519 	alloc_top_high = ram_top;
1520 
1521 	if (prom_memory_limit) {
1522 		if (prom_memory_limit <= alloc_bottom) {
1523 			prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1524 				    prom_memory_limit);
1525 			prom_memory_limit = 0;
1526 		} else if (prom_memory_limit >= ram_top) {
1527 			prom_printf("Ignoring mem=%lx >= ram_top.\n",
1528 				    prom_memory_limit);
1529 			prom_memory_limit = 0;
1530 		} else {
1531 			ram_top = prom_memory_limit;
1532 			rmo_top = min(rmo_top, prom_memory_limit);
1533 		}
1534 	}
1535 
1536 	/*
1537 	 * Setup our top alloc point, that is top of RMO or top of
1538 	 * segment 0 when running non-LPAR.
1539 	 * Some RS64 machines have buggy firmware where claims up at
1540 	 * 1GB fail.  Cap at 768MB as a workaround.
1541 	 * Since 768MB is plenty of room, and we need to cap to something
1542 	 * reasonable on 32-bit, cap at 768MB on all machines.
1543 	 */
1544 	if (!rmo_top)
1545 		rmo_top = ram_top;
1546 	rmo_top = min(0x30000000ul, rmo_top);
1547 	alloc_top = rmo_top;
1548 	alloc_top_high = ram_top;
1549 
1550 	/*
1551 	 * Check if we have an initrd after the kernel but still inside
1552 	 * the RMO.  If we do move our bottom point to after it.
1553 	 */
1554 	if (prom_initrd_start &&
1555 	    prom_initrd_start < rmo_top &&
1556 	    prom_initrd_end > alloc_bottom)
1557 		alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1558 
1559 	prom_printf("memory layout at init:\n");
1560 	prom_printf("  memory_limit : %lx (16 MB aligned)\n",
1561 		    prom_memory_limit);
1562 	prom_printf("  alloc_bottom : %lx\n", alloc_bottom);
1563 	prom_printf("  alloc_top    : %lx\n", alloc_top);
1564 	prom_printf("  alloc_top_hi : %lx\n", alloc_top_high);
1565 	prom_printf("  rmo_top      : %lx\n", rmo_top);
1566 	prom_printf("  ram_top      : %lx\n", ram_top);
1567 }
1568 
1569 static void __init prom_close_stdin(void)
1570 {
1571 	__be32 val;
1572 	ihandle stdin;
1573 
1574 	if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1575 		stdin = be32_to_cpu(val);
1576 		call_prom("close", 1, 0, stdin);
1577 	}
1578 }
1579 
1580 /*
1581  * Allocate room for and instantiate RTAS
1582  */
1583 static void __init prom_instantiate_rtas(void)
1584 {
1585 	phandle rtas_node;
1586 	ihandle rtas_inst;
1587 	u32 base, entry = 0;
1588 	__be32 val;
1589 	u32 size = 0;
1590 
1591 	prom_debug("prom_instantiate_rtas: start...\n");
1592 
1593 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1594 	prom_debug("rtas_node: %x\n", rtas_node);
1595 	if (!PHANDLE_VALID(rtas_node))
1596 		return;
1597 
1598 	val = 0;
1599 	prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1600 	size = be32_to_cpu(val);
1601 	if (size == 0)
1602 		return;
1603 
1604 	base = alloc_down(size, PAGE_SIZE, 0);
1605 	if (base == 0)
1606 		prom_panic("Could not allocate memory for RTAS\n");
1607 
1608 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1609 	if (!IHANDLE_VALID(rtas_inst)) {
1610 		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1611 		return;
1612 	}
1613 
1614 	prom_printf("instantiating rtas at 0x%x...", base);
1615 
1616 	if (call_prom_ret("call-method", 3, 2, &entry,
1617 			  ADDR("instantiate-rtas"),
1618 			  rtas_inst, base) != 0
1619 	    || entry == 0) {
1620 		prom_printf(" failed\n");
1621 		return;
1622 	}
1623 	prom_printf(" done\n");
1624 
1625 	reserve_mem(base, size);
1626 
1627 	val = cpu_to_be32(base);
1628 	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1629 		     &val, sizeof(val));
1630 	val = cpu_to_be32(entry);
1631 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1632 		     &val, sizeof(val));
1633 
1634 	/* Check if it supports "query-cpu-stopped-state" */
1635 	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1636 			 &val, sizeof(val)) != PROM_ERROR)
1637 		rtas_has_query_cpu_stopped = true;
1638 
1639 	prom_debug("rtas base     = 0x%x\n", base);
1640 	prom_debug("rtas entry    = 0x%x\n", entry);
1641 	prom_debug("rtas size     = 0x%x\n", size);
1642 
1643 	prom_debug("prom_instantiate_rtas: end...\n");
1644 }
1645 
1646 #ifdef CONFIG_PPC64
1647 /*
1648  * Allocate room for and instantiate Stored Measurement Log (SML)
1649  */
1650 static void __init prom_instantiate_sml(void)
1651 {
1652 	phandle ibmvtpm_node;
1653 	ihandle ibmvtpm_inst;
1654 	u32 entry = 0, size = 0, succ = 0;
1655 	u64 base;
1656 	__be32 val;
1657 
1658 	prom_debug("prom_instantiate_sml: start...\n");
1659 
1660 	ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1661 	prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1662 	if (!PHANDLE_VALID(ibmvtpm_node))
1663 		return;
1664 
1665 	ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1666 	if (!IHANDLE_VALID(ibmvtpm_inst)) {
1667 		prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1668 		return;
1669 	}
1670 
1671 	if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1672 			 &val, sizeof(val)) != PROM_ERROR) {
1673 		if (call_prom_ret("call-method", 2, 2, &succ,
1674 				  ADDR("reformat-sml-to-efi-alignment"),
1675 				  ibmvtpm_inst) != 0 || succ == 0) {
1676 			prom_printf("Reformat SML to EFI alignment failed\n");
1677 			return;
1678 		}
1679 
1680 		if (call_prom_ret("call-method", 2, 2, &size,
1681 				  ADDR("sml-get-allocated-size"),
1682 				  ibmvtpm_inst) != 0 || size == 0) {
1683 			prom_printf("SML get allocated size failed\n");
1684 			return;
1685 		}
1686 	} else {
1687 		if (call_prom_ret("call-method", 2, 2, &size,
1688 				  ADDR("sml-get-handover-size"),
1689 				  ibmvtpm_inst) != 0 || size == 0) {
1690 			prom_printf("SML get handover size failed\n");
1691 			return;
1692 		}
1693 	}
1694 
1695 	base = alloc_down(size, PAGE_SIZE, 0);
1696 	if (base == 0)
1697 		prom_panic("Could not allocate memory for sml\n");
1698 
1699 	prom_printf("instantiating sml at 0x%llx...", base);
1700 
1701 	memset((void *)base, 0, size);
1702 
1703 	if (call_prom_ret("call-method", 4, 2, &entry,
1704 			  ADDR("sml-handover"),
1705 			  ibmvtpm_inst, size, base) != 0 || entry == 0) {
1706 		prom_printf("SML handover failed\n");
1707 		return;
1708 	}
1709 	prom_printf(" done\n");
1710 
1711 	reserve_mem(base, size);
1712 
1713 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1714 		     &base, sizeof(base));
1715 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1716 		     &size, sizeof(size));
1717 
1718 	prom_debug("sml base     = 0x%llx\n", base);
1719 	prom_debug("sml size     = 0x%x\n", size);
1720 
1721 	prom_debug("prom_instantiate_sml: end...\n");
1722 }
1723 
1724 /*
1725  * Allocate room for and initialize TCE tables
1726  */
1727 #ifdef __BIG_ENDIAN__
1728 static void __init prom_initialize_tce_table(void)
1729 {
1730 	phandle node;
1731 	ihandle phb_node;
1732 	char compatible[64], type[64], model[64];
1733 	char *path = prom_scratch;
1734 	u64 base, align;
1735 	u32 minalign, minsize;
1736 	u64 tce_entry, *tce_entryp;
1737 	u64 local_alloc_top, local_alloc_bottom;
1738 	u64 i;
1739 
1740 	if (prom_iommu_off)
1741 		return;
1742 
1743 	prom_debug("starting prom_initialize_tce_table\n");
1744 
1745 	/* Cache current top of allocs so we reserve a single block */
1746 	local_alloc_top = alloc_top_high;
1747 	local_alloc_bottom = local_alloc_top;
1748 
1749 	/* Search all nodes looking for PHBs. */
1750 	for (node = 0; prom_next_node(&node); ) {
1751 		compatible[0] = 0;
1752 		type[0] = 0;
1753 		model[0] = 0;
1754 		prom_getprop(node, "compatible",
1755 			     compatible, sizeof(compatible));
1756 		prom_getprop(node, "device_type", type, sizeof(type));
1757 		prom_getprop(node, "model", model, sizeof(model));
1758 
1759 		if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1760 			continue;
1761 
1762 		/* Keep the old logic intact to avoid regression. */
1763 		if (compatible[0] != 0) {
1764 			if ((strstr(compatible, "python") == NULL) &&
1765 			    (strstr(compatible, "Speedwagon") == NULL) &&
1766 			    (strstr(compatible, "Winnipeg") == NULL))
1767 				continue;
1768 		} else if (model[0] != 0) {
1769 			if ((strstr(model, "ython") == NULL) &&
1770 			    (strstr(model, "peedwagon") == NULL) &&
1771 			    (strstr(model, "innipeg") == NULL))
1772 				continue;
1773 		}
1774 
1775 		if (prom_getprop(node, "tce-table-minalign", &minalign,
1776 				 sizeof(minalign)) == PROM_ERROR)
1777 			minalign = 0;
1778 		if (prom_getprop(node, "tce-table-minsize", &minsize,
1779 				 sizeof(minsize)) == PROM_ERROR)
1780 			minsize = 4UL << 20;
1781 
1782 		/*
1783 		 * Even though we read what OF wants, we just set the table
1784 		 * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
1785 		 * By doing this, we avoid the pitfalls of trying to DMA to
1786 		 * MMIO space and the DMA alias hole.
1787 		 */
1788 		minsize = 4UL << 20;
1789 
1790 		/* Align to the greater of the align or size */
1791 		align = max(minalign, minsize);
1792 		base = alloc_down(minsize, align, 1);
1793 		if (base == 0)
1794 			prom_panic("ERROR, cannot find space for TCE table.\n");
1795 		if (base < local_alloc_bottom)
1796 			local_alloc_bottom = base;
1797 
1798 		/* It seems OF doesn't null-terminate the path :-( */
1799 		memset(path, 0, PROM_SCRATCH_SIZE);
1800 		/* Call OF to setup the TCE hardware */
1801 		if (call_prom("package-to-path", 3, 1, node,
1802 			      path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1803 			prom_printf("package-to-path failed\n");
1804 		}
1805 
1806 		/* Save away the TCE table attributes for later use. */
1807 		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1808 		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1809 
1810 		prom_debug("TCE table: %s\n", path);
1811 		prom_debug("\tnode = 0x%x\n", node);
1812 		prom_debug("\tbase = 0x%llx\n", base);
1813 		prom_debug("\tsize = 0x%x\n", minsize);
1814 
1815 		/* Initialize the table to have a one-to-one mapping
1816 		 * over the allocated size.
1817 		 */
1818 		tce_entryp = (u64 *)base;
1819 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1820 			tce_entry = (i << PAGE_SHIFT);
1821 			tce_entry |= 0x3;
1822 			*tce_entryp = tce_entry;
1823 		}
1824 
1825 		prom_printf("opening PHB %s", path);
1826 		phb_node = call_prom("open", 1, 1, path);
1827 		if (phb_node == 0)
1828 			prom_printf("... failed\n");
1829 		else
1830 			prom_printf("... done\n");
1831 
1832 		call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1833 			  phb_node, -1, minsize,
1834 			  (u32) base, (u32) (base >> 32));
1835 		call_prom("close", 1, 0, phb_node);
1836 	}
1837 
1838 	reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1839 
1840 	/* These are only really needed if there is a memory limit in
1841 	 * effect, but we don't know so export them always. */
1842 	prom_tce_alloc_start = local_alloc_bottom;
1843 	prom_tce_alloc_end = local_alloc_top;
1844 
1845 	/* Flag the first invalid entry */
1846 	prom_debug("ending prom_initialize_tce_table\n");
1847 }
1848 #endif /* __BIG_ENDIAN__ */
1849 #endif /* CONFIG_PPC64 */
1850 
1851 /*
1852  * With CHRP SMP we need to use the OF to start the other processors.
1853  * We can't wait until smp_boot_cpus (the OF is trashed by then)
1854  * so we have to put the processors into a holding pattern controlled
1855  * by the kernel (not OF) before we destroy the OF.
1856  *
1857  * This uses a chunk of low memory, puts some holding pattern
1858  * code there and sends the other processors off to there until
1859  * smp_boot_cpus tells them to do something.  The holding pattern
1860  * checks that address until its cpu # is there, when it is that
1861  * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
1862  * of setting those values.
1863  *
1864  * We also use physical address 0x4 here to tell when a cpu
1865  * is in its holding pattern code.
1866  *
1867  * -- Cort
1868  */
1869 /*
1870  * We want to reference the copy of __secondary_hold_* in the
1871  * 0 - 0x100 address range
1872  */
1873 #define LOW_ADDR(x)	(((unsigned long) &(x)) & 0xff)
1874 
1875 static void __init prom_hold_cpus(void)
1876 {
1877 	unsigned long i;
1878 	phandle node;
1879 	char type[64];
1880 	unsigned long *spinloop
1881 		= (void *) LOW_ADDR(__secondary_hold_spinloop);
1882 	unsigned long *acknowledge
1883 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
1884 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1885 
1886 	/*
1887 	 * On pseries, if RTAS supports "query-cpu-stopped-state",
1888 	 * we skip this stage, the CPUs will be started by the
1889 	 * kernel using RTAS.
1890 	 */
1891 	if ((of_platform == PLATFORM_PSERIES ||
1892 	     of_platform == PLATFORM_PSERIES_LPAR) &&
1893 	    rtas_has_query_cpu_stopped) {
1894 		prom_printf("prom_hold_cpus: skipped\n");
1895 		return;
1896 	}
1897 
1898 	prom_debug("prom_hold_cpus: start...\n");
1899 	prom_debug("    1) spinloop       = 0x%lx\n", (unsigned long)spinloop);
1900 	prom_debug("    1) *spinloop      = 0x%lx\n", *spinloop);
1901 	prom_debug("    1) acknowledge    = 0x%lx\n",
1902 		   (unsigned long)acknowledge);
1903 	prom_debug("    1) *acknowledge   = 0x%lx\n", *acknowledge);
1904 	prom_debug("    1) secondary_hold = 0x%lx\n", secondary_hold);
1905 
1906 	/* Set the common spinloop variable, so all of the secondary cpus
1907 	 * will block when they are awakened from their OF spinloop.
1908 	 * This must occur for both SMP and non SMP kernels, since OF will
1909 	 * be trashed when we move the kernel.
1910 	 */
1911 	*spinloop = 0;
1912 
1913 	/* look for cpus */
1914 	for (node = 0; prom_next_node(&node); ) {
1915 		unsigned int cpu_no;
1916 		__be32 reg;
1917 
1918 		type[0] = 0;
1919 		prom_getprop(node, "device_type", type, sizeof(type));
1920 		if (strcmp(type, "cpu") != 0)
1921 			continue;
1922 
1923 		/* Skip non-configured cpus. */
1924 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1925 			if (strcmp(type, "okay") != 0)
1926 				continue;
1927 
1928 		reg = cpu_to_be32(-1); /* make sparse happy */
1929 		prom_getprop(node, "reg", &reg, sizeof(reg));
1930 		cpu_no = be32_to_cpu(reg);
1931 
1932 		prom_debug("cpu hw idx   = %u\n", cpu_no);
1933 
1934 		/* Init the acknowledge var which will be reset by
1935 		 * the secondary cpu when it awakens from its OF
1936 		 * spinloop.
1937 		 */
1938 		*acknowledge = (unsigned long)-1;
1939 
1940 		if (cpu_no != prom.cpu) {
1941 			/* Primary Thread of non-boot cpu or any thread */
1942 			prom_printf("starting cpu hw idx %u... ", cpu_no);
1943 			call_prom("start-cpu", 3, 0, node,
1944 				  secondary_hold, cpu_no);
1945 
1946 			for (i = 0; (i < 100000000) &&
1947 			     (*acknowledge == ((unsigned long)-1)); i++ )
1948 				mb();
1949 
1950 			if (*acknowledge == cpu_no)
1951 				prom_printf("done\n");
1952 			else
1953 				prom_printf("failed: %lx\n", *acknowledge);
1954 		}
1955 #ifdef CONFIG_SMP
1956 		else
1957 			prom_printf("boot cpu hw idx %u\n", cpu_no);
1958 #endif /* CONFIG_SMP */
1959 	}
1960 
1961 	prom_debug("prom_hold_cpus: end...\n");
1962 }
1963 
1964 
1965 static void __init prom_init_client_services(unsigned long pp)
1966 {
1967 	/* Get a handle to the prom entry point before anything else */
1968 	prom_entry = pp;
1969 
1970 	/* get a handle for the stdout device */
1971 	prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1972 	if (!PHANDLE_VALID(prom.chosen))
1973 		prom_panic("cannot find chosen"); /* msg won't be printed :( */
1974 
1975 	/* get device tree root */
1976 	prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1977 	if (!PHANDLE_VALID(prom.root))
1978 		prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1979 
1980 	prom.mmumap = 0;
1981 }
1982 
1983 #ifdef CONFIG_PPC32
1984 /*
1985  * For really old powermacs, we need to map things we claim.
1986  * For that, we need the ihandle of the mmu.
1987  * Also, on the longtrail, we need to work around other bugs.
1988  */
1989 static void __init prom_find_mmu(void)
1990 {
1991 	phandle oprom;
1992 	char version[64];
1993 
1994 	oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1995 	if (!PHANDLE_VALID(oprom))
1996 		return;
1997 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1998 		return;
1999 	version[sizeof(version) - 1] = 0;
2000 	/* XXX might need to add other versions here */
2001 	if (strcmp(version, "Open Firmware, 1.0.5") == 0)
2002 		of_workarounds = OF_WA_CLAIM;
2003 	else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
2004 		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2005 		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2006 	} else
2007 		return;
2008 	prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2009 	prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2010 		     sizeof(prom.mmumap));
2011 	prom.mmumap = be32_to_cpu(prom.mmumap);
2012 	if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2013 		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
2014 }
2015 #else
2016 #define prom_find_mmu()
2017 #endif
2018 
2019 static void __init prom_init_stdout(void)
2020 {
2021 	char *path = of_stdout_device;
2022 	char type[16];
2023 	phandle stdout_node;
2024 	__be32 val;
2025 
2026 	if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2027 		prom_panic("cannot find stdout");
2028 
2029 	prom.stdout = be32_to_cpu(val);
2030 
2031 	/* Get the full OF pathname of the stdout device */
2032 	memset(path, 0, 256);
2033 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2034 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
2035 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2036 		     path, strlen(path) + 1);
2037 
2038 	/* instance-to-package fails on PA-Semi */
2039 	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2040 	if (stdout_node != PROM_ERROR) {
2041 		val = cpu_to_be32(stdout_node);
2042 
2043 		/* If it's a display, note it */
2044 		memset(type, 0, sizeof(type));
2045 		prom_getprop(stdout_node, "device_type", type, sizeof(type));
2046 		if (strcmp(type, "display") == 0)
2047 			prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2048 	}
2049 }
2050 
2051 static int __init prom_find_machine_type(void)
2052 {
2053 	char compat[256];
2054 	int len, i = 0;
2055 #ifdef CONFIG_PPC64
2056 	phandle rtas;
2057 	int x;
2058 #endif
2059 
2060 	/* Look for a PowerMac or a Cell */
2061 	len = prom_getprop(prom.root, "compatible",
2062 			   compat, sizeof(compat)-1);
2063 	if (len > 0) {
2064 		compat[len] = 0;
2065 		while (i < len) {
2066 			char *p = &compat[i];
2067 			int sl = strlen(p);
2068 			if (sl == 0)
2069 				break;
2070 			if (strstr(p, "Power Macintosh") ||
2071 			    strstr(p, "MacRISC"))
2072 				return PLATFORM_POWERMAC;
2073 #ifdef CONFIG_PPC64
2074 			/* We must make sure we don't detect the IBM Cell
2075 			 * blades as pSeries due to some firmware issues,
2076 			 * so we do it here.
2077 			 */
2078 			if (strstr(p, "IBM,CBEA") ||
2079 			    strstr(p, "IBM,CPBW-1.0"))
2080 				return PLATFORM_GENERIC;
2081 #endif /* CONFIG_PPC64 */
2082 			i += sl + 1;
2083 		}
2084 	}
2085 #ifdef CONFIG_PPC64
2086 	/* Try to figure out if it's an IBM pSeries or any other
2087 	 * PAPR compliant platform. We assume it is if :
2088 	 *  - /device_type is "chrp" (please, do NOT use that for future
2089 	 *    non-IBM designs !
2090 	 *  - it has /rtas
2091 	 */
2092 	len = prom_getprop(prom.root, "device_type",
2093 			   compat, sizeof(compat)-1);
2094 	if (len <= 0)
2095 		return PLATFORM_GENERIC;
2096 	if (strcmp(compat, "chrp"))
2097 		return PLATFORM_GENERIC;
2098 
2099 	/* Default to pSeries. We need to know if we are running LPAR */
2100 	rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2101 	if (!PHANDLE_VALID(rtas))
2102 		return PLATFORM_GENERIC;
2103 	x = prom_getproplen(rtas, "ibm,hypertas-functions");
2104 	if (x != PROM_ERROR) {
2105 		prom_debug("Hypertas detected, assuming LPAR !\n");
2106 		return PLATFORM_PSERIES_LPAR;
2107 	}
2108 	return PLATFORM_PSERIES;
2109 #else
2110 	return PLATFORM_GENERIC;
2111 #endif
2112 }
2113 
2114 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2115 {
2116 	return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2117 }
2118 
2119 /*
2120  * If we have a display that we don't know how to drive,
2121  * we will want to try to execute OF's open method for it
2122  * later.  However, OF will probably fall over if we do that
2123  * we've taken over the MMU.
2124  * So we check whether we will need to open the display,
2125  * and if so, open it now.
2126  */
2127 static void __init prom_check_displays(void)
2128 {
2129 	char type[16], *path;
2130 	phandle node;
2131 	ihandle ih;
2132 	int i;
2133 
2134 	static const unsigned char default_colors[] __initconst = {
2135 		0x00, 0x00, 0x00,
2136 		0x00, 0x00, 0xaa,
2137 		0x00, 0xaa, 0x00,
2138 		0x00, 0xaa, 0xaa,
2139 		0xaa, 0x00, 0x00,
2140 		0xaa, 0x00, 0xaa,
2141 		0xaa, 0xaa, 0x00,
2142 		0xaa, 0xaa, 0xaa,
2143 		0x55, 0x55, 0x55,
2144 		0x55, 0x55, 0xff,
2145 		0x55, 0xff, 0x55,
2146 		0x55, 0xff, 0xff,
2147 		0xff, 0x55, 0x55,
2148 		0xff, 0x55, 0xff,
2149 		0xff, 0xff, 0x55,
2150 		0xff, 0xff, 0xff
2151 	};
2152 	const unsigned char *clut;
2153 
2154 	prom_debug("Looking for displays\n");
2155 	for (node = 0; prom_next_node(&node); ) {
2156 		memset(type, 0, sizeof(type));
2157 		prom_getprop(node, "device_type", type, sizeof(type));
2158 		if (strcmp(type, "display") != 0)
2159 			continue;
2160 
2161 		/* It seems OF doesn't null-terminate the path :-( */
2162 		path = prom_scratch;
2163 		memset(path, 0, PROM_SCRATCH_SIZE);
2164 
2165 		/*
2166 		 * leave some room at the end of the path for appending extra
2167 		 * arguments
2168 		 */
2169 		if (call_prom("package-to-path", 3, 1, node, path,
2170 			      PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2171 			continue;
2172 		prom_printf("found display   : %s, opening... ", path);
2173 
2174 		ih = call_prom("open", 1, 1, path);
2175 		if (ih == 0) {
2176 			prom_printf("failed\n");
2177 			continue;
2178 		}
2179 
2180 		/* Success */
2181 		prom_printf("done\n");
2182 		prom_setprop(node, path, "linux,opened", NULL, 0);
2183 
2184 		/* Setup a usable color table when the appropriate
2185 		 * method is available. Should update this to set-colors */
2186 		clut = default_colors;
2187 		for (i = 0; i < 16; i++, clut += 3)
2188 			if (prom_set_color(ih, i, clut[0], clut[1],
2189 					   clut[2]) != 0)
2190 				break;
2191 
2192 #ifdef CONFIG_LOGO_LINUX_CLUT224
2193 		clut = PTRRELOC(logo_linux_clut224.clut);
2194 		for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2195 			if (prom_set_color(ih, i + 32, clut[0], clut[1],
2196 					   clut[2]) != 0)
2197 				break;
2198 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2199 
2200 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2201 		if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2202 		    PROM_ERROR) {
2203 			u32 width, height, pitch, addr;
2204 
2205 			prom_printf("Setting btext !\n");
2206 			prom_getprop(node, "width", &width, 4);
2207 			prom_getprop(node, "height", &height, 4);
2208 			prom_getprop(node, "linebytes", &pitch, 4);
2209 			prom_getprop(node, "address", &addr, 4);
2210 			prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2211 				    width, height, pitch, addr);
2212 			btext_setup_display(width, height, 8, pitch, addr);
2213 		}
2214 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2215 	}
2216 }
2217 
2218 
2219 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2220 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2221 			      unsigned long needed, unsigned long align)
2222 {
2223 	void *ret;
2224 
2225 	*mem_start = _ALIGN(*mem_start, align);
2226 	while ((*mem_start + needed) > *mem_end) {
2227 		unsigned long room, chunk;
2228 
2229 		prom_debug("Chunk exhausted, claiming more at %lx...\n",
2230 			   alloc_bottom);
2231 		room = alloc_top - alloc_bottom;
2232 		if (room > DEVTREE_CHUNK_SIZE)
2233 			room = DEVTREE_CHUNK_SIZE;
2234 		if (room < PAGE_SIZE)
2235 			prom_panic("No memory for flatten_device_tree "
2236 				   "(no room)\n");
2237 		chunk = alloc_up(room, 0);
2238 		if (chunk == 0)
2239 			prom_panic("No memory for flatten_device_tree "
2240 				   "(claim failed)\n");
2241 		*mem_end = chunk + room;
2242 	}
2243 
2244 	ret = (void *)*mem_start;
2245 	*mem_start += needed;
2246 
2247 	return ret;
2248 }
2249 
2250 #define dt_push_token(token, mem_start, mem_end) do { 			\
2251 		void *room = make_room(mem_start, mem_end, 4, 4);	\
2252 		*(__be32 *)room = cpu_to_be32(token);			\
2253 	} while(0)
2254 
2255 static unsigned long __init dt_find_string(char *str)
2256 {
2257 	char *s, *os;
2258 
2259 	s = os = (char *)dt_string_start;
2260 	s += 4;
2261 	while (s <  (char *)dt_string_end) {
2262 		if (strcmp(s, str) == 0)
2263 			return s - os;
2264 		s += strlen(s) + 1;
2265 	}
2266 	return 0;
2267 }
2268 
2269 /*
2270  * The Open Firmware 1275 specification states properties must be 31 bytes or
2271  * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2272  */
2273 #define MAX_PROPERTY_NAME 64
2274 
2275 static void __init scan_dt_build_strings(phandle node,
2276 					 unsigned long *mem_start,
2277 					 unsigned long *mem_end)
2278 {
2279 	char *prev_name, *namep, *sstart;
2280 	unsigned long soff;
2281 	phandle child;
2282 
2283 	sstart =  (char *)dt_string_start;
2284 
2285 	/* get and store all property names */
2286 	prev_name = "";
2287 	for (;;) {
2288 		/* 64 is max len of name including nul. */
2289 		namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2290 		if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2291 			/* No more nodes: unwind alloc */
2292 			*mem_start = (unsigned long)namep;
2293 			break;
2294 		}
2295 
2296  		/* skip "name" */
2297  		if (strcmp(namep, "name") == 0) {
2298  			*mem_start = (unsigned long)namep;
2299  			prev_name = "name";
2300  			continue;
2301  		}
2302 		/* get/create string entry */
2303 		soff = dt_find_string(namep);
2304 		if (soff != 0) {
2305 			*mem_start = (unsigned long)namep;
2306 			namep = sstart + soff;
2307 		} else {
2308 			/* Trim off some if we can */
2309 			*mem_start = (unsigned long)namep + strlen(namep) + 1;
2310 			dt_string_end = *mem_start;
2311 		}
2312 		prev_name = namep;
2313 	}
2314 
2315 	/* do all our children */
2316 	child = call_prom("child", 1, 1, node);
2317 	while (child != 0) {
2318 		scan_dt_build_strings(child, mem_start, mem_end);
2319 		child = call_prom("peer", 1, 1, child);
2320 	}
2321 }
2322 
2323 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2324 					unsigned long *mem_end)
2325 {
2326 	phandle child;
2327 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2328 	unsigned long soff;
2329 	unsigned char *valp;
2330 	static char pname[MAX_PROPERTY_NAME] __prombss;
2331 	int l, room, has_phandle = 0;
2332 
2333 	dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2334 
2335 	/* get the node's full name */
2336 	namep = (char *)*mem_start;
2337 	room = *mem_end - *mem_start;
2338 	if (room > 255)
2339 		room = 255;
2340 	l = call_prom("package-to-path", 3, 1, node, namep, room);
2341 	if (l >= 0) {
2342 		/* Didn't fit?  Get more room. */
2343 		if (l >= room) {
2344 			if (l >= *mem_end - *mem_start)
2345 				namep = make_room(mem_start, mem_end, l+1, 1);
2346 			call_prom("package-to-path", 3, 1, node, namep, l);
2347 		}
2348 		namep[l] = '\0';
2349 
2350 		/* Fixup an Apple bug where they have bogus \0 chars in the
2351 		 * middle of the path in some properties, and extract
2352 		 * the unit name (everything after the last '/').
2353 		 */
2354 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
2355 			if (*p == '/')
2356 				lp = namep;
2357 			else if (*p != 0)
2358 				*lp++ = *p;
2359 		}
2360 		*lp = 0;
2361 		*mem_start = _ALIGN((unsigned long)lp + 1, 4);
2362 	}
2363 
2364 	/* get it again for debugging */
2365 	path = prom_scratch;
2366 	memset(path, 0, PROM_SCRATCH_SIZE);
2367 	call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2368 
2369 	/* get and store all properties */
2370 	prev_name = "";
2371 	sstart = (char *)dt_string_start;
2372 	for (;;) {
2373 		if (call_prom("nextprop", 3, 1, node, prev_name,
2374 			      pname) != 1)
2375 			break;
2376 
2377  		/* skip "name" */
2378  		if (strcmp(pname, "name") == 0) {
2379  			prev_name = "name";
2380  			continue;
2381  		}
2382 
2383 		/* find string offset */
2384 		soff = dt_find_string(pname);
2385 		if (soff == 0) {
2386 			prom_printf("WARNING: Can't find string index for"
2387 				    " <%s>, node %s\n", pname, path);
2388 			break;
2389 		}
2390 		prev_name = sstart + soff;
2391 
2392 		/* get length */
2393 		l = call_prom("getproplen", 2, 1, node, pname);
2394 
2395 		/* sanity checks */
2396 		if (l == PROM_ERROR)
2397 			continue;
2398 
2399 		/* push property head */
2400 		dt_push_token(OF_DT_PROP, mem_start, mem_end);
2401 		dt_push_token(l, mem_start, mem_end);
2402 		dt_push_token(soff, mem_start, mem_end);
2403 
2404 		/* push property content */
2405 		valp = make_room(mem_start, mem_end, l, 4);
2406 		call_prom("getprop", 4, 1, node, pname, valp, l);
2407 		*mem_start = _ALIGN(*mem_start, 4);
2408 
2409 		if (!strcmp(pname, "phandle"))
2410 			has_phandle = 1;
2411 	}
2412 
2413 	/* Add a "phandle" property if none already exist */
2414 	if (!has_phandle) {
2415 		soff = dt_find_string("phandle");
2416 		if (soff == 0)
2417 			prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2418 		else {
2419 			dt_push_token(OF_DT_PROP, mem_start, mem_end);
2420 			dt_push_token(4, mem_start, mem_end);
2421 			dt_push_token(soff, mem_start, mem_end);
2422 			valp = make_room(mem_start, mem_end, 4, 4);
2423 			*(__be32 *)valp = cpu_to_be32(node);
2424 		}
2425 	}
2426 
2427 	/* do all our children */
2428 	child = call_prom("child", 1, 1, node);
2429 	while (child != 0) {
2430 		scan_dt_build_struct(child, mem_start, mem_end);
2431 		child = call_prom("peer", 1, 1, child);
2432 	}
2433 
2434 	dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2435 }
2436 
2437 static void __init flatten_device_tree(void)
2438 {
2439 	phandle root;
2440 	unsigned long mem_start, mem_end, room;
2441 	struct boot_param_header *hdr;
2442 	char *namep;
2443 	u64 *rsvmap;
2444 
2445 	/*
2446 	 * Check how much room we have between alloc top & bottom (+/- a
2447 	 * few pages), crop to 1MB, as this is our "chunk" size
2448 	 */
2449 	room = alloc_top - alloc_bottom - 0x4000;
2450 	if (room > DEVTREE_CHUNK_SIZE)
2451 		room = DEVTREE_CHUNK_SIZE;
2452 	prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2453 
2454 	/* Now try to claim that */
2455 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2456 	if (mem_start == 0)
2457 		prom_panic("Can't allocate initial device-tree chunk\n");
2458 	mem_end = mem_start + room;
2459 
2460 	/* Get root of tree */
2461 	root = call_prom("peer", 1, 1, (phandle)0);
2462 	if (root == (phandle)0)
2463 		prom_panic ("couldn't get device tree root\n");
2464 
2465 	/* Build header and make room for mem rsv map */
2466 	mem_start = _ALIGN(mem_start, 4);
2467 	hdr = make_room(&mem_start, &mem_end,
2468 			sizeof(struct boot_param_header), 4);
2469 	dt_header_start = (unsigned long)hdr;
2470 	rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2471 
2472 	/* Start of strings */
2473 	mem_start = PAGE_ALIGN(mem_start);
2474 	dt_string_start = mem_start;
2475 	mem_start += 4; /* hole */
2476 
2477 	/* Add "phandle" in there, we'll need it */
2478 	namep = make_room(&mem_start, &mem_end, 16, 1);
2479 	strcpy(namep, "phandle");
2480 	mem_start = (unsigned long)namep + strlen(namep) + 1;
2481 
2482 	/* Build string array */
2483 	prom_printf("Building dt strings...\n");
2484 	scan_dt_build_strings(root, &mem_start, &mem_end);
2485 	dt_string_end = mem_start;
2486 
2487 	/* Build structure */
2488 	mem_start = PAGE_ALIGN(mem_start);
2489 	dt_struct_start = mem_start;
2490 	prom_printf("Building dt structure...\n");
2491 	scan_dt_build_struct(root, &mem_start, &mem_end);
2492 	dt_push_token(OF_DT_END, &mem_start, &mem_end);
2493 	dt_struct_end = PAGE_ALIGN(mem_start);
2494 
2495 	/* Finish header */
2496 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2497 	hdr->magic = cpu_to_be32(OF_DT_HEADER);
2498 	hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2499 	hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2500 	hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2501 	hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2502 	hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2503 	hdr->version = cpu_to_be32(OF_DT_VERSION);
2504 	/* Version 16 is not backward compatible */
2505 	hdr->last_comp_version = cpu_to_be32(0x10);
2506 
2507 	/* Copy the reserve map in */
2508 	memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2509 
2510 #ifdef DEBUG_PROM
2511 	{
2512 		int i;
2513 		prom_printf("reserved memory map:\n");
2514 		for (i = 0; i < mem_reserve_cnt; i++)
2515 			prom_printf("  %llx - %llx\n",
2516 				    be64_to_cpu(mem_reserve_map[i].base),
2517 				    be64_to_cpu(mem_reserve_map[i].size));
2518 	}
2519 #endif
2520 	/* Bump mem_reserve_cnt to cause further reservations to fail
2521 	 * since it's too late.
2522 	 */
2523 	mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2524 
2525 	prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2526 		    dt_string_start, dt_string_end);
2527 	prom_printf("Device tree struct  0x%lx -> 0x%lx\n",
2528 		    dt_struct_start, dt_struct_end);
2529 }
2530 
2531 #ifdef CONFIG_PPC_MAPLE
2532 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2533  * The values are bad, and it doesn't even have the right number of cells. */
2534 static void __init fixup_device_tree_maple(void)
2535 {
2536 	phandle isa;
2537 	u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2538 	u32 isa_ranges[6];
2539 	char *name;
2540 
2541 	name = "/ht@0/isa@4";
2542 	isa = call_prom("finddevice", 1, 1, ADDR(name));
2543 	if (!PHANDLE_VALID(isa)) {
2544 		name = "/ht@0/isa@6";
2545 		isa = call_prom("finddevice", 1, 1, ADDR(name));
2546 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2547 	}
2548 	if (!PHANDLE_VALID(isa))
2549 		return;
2550 
2551 	if (prom_getproplen(isa, "ranges") != 12)
2552 		return;
2553 	if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2554 		== PROM_ERROR)
2555 		return;
2556 
2557 	if (isa_ranges[0] != 0x1 ||
2558 		isa_ranges[1] != 0xf4000000 ||
2559 		isa_ranges[2] != 0x00010000)
2560 		return;
2561 
2562 	prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2563 
2564 	isa_ranges[0] = 0x1;
2565 	isa_ranges[1] = 0x0;
2566 	isa_ranges[2] = rloc;
2567 	isa_ranges[3] = 0x0;
2568 	isa_ranges[4] = 0x0;
2569 	isa_ranges[5] = 0x00010000;
2570 	prom_setprop(isa, name, "ranges",
2571 			isa_ranges, sizeof(isa_ranges));
2572 }
2573 
2574 #define CPC925_MC_START		0xf8000000
2575 #define CPC925_MC_LENGTH	0x1000000
2576 /* The values for memory-controller don't have right number of cells */
2577 static void __init fixup_device_tree_maple_memory_controller(void)
2578 {
2579 	phandle mc;
2580 	u32 mc_reg[4];
2581 	char *name = "/hostbridge@f8000000";
2582 	u32 ac, sc;
2583 
2584 	mc = call_prom("finddevice", 1, 1, ADDR(name));
2585 	if (!PHANDLE_VALID(mc))
2586 		return;
2587 
2588 	if (prom_getproplen(mc, "reg") != 8)
2589 		return;
2590 
2591 	prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2592 	prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2593 	if ((ac != 2) || (sc != 2))
2594 		return;
2595 
2596 	if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2597 		return;
2598 
2599 	if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2600 		return;
2601 
2602 	prom_printf("Fixing up bogus hostbridge on Maple...\n");
2603 
2604 	mc_reg[0] = 0x0;
2605 	mc_reg[1] = CPC925_MC_START;
2606 	mc_reg[2] = 0x0;
2607 	mc_reg[3] = CPC925_MC_LENGTH;
2608 	prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2609 }
2610 #else
2611 #define fixup_device_tree_maple()
2612 #define fixup_device_tree_maple_memory_controller()
2613 #endif
2614 
2615 #ifdef CONFIG_PPC_CHRP
2616 /*
2617  * Pegasos and BriQ lacks the "ranges" property in the isa node
2618  * Pegasos needs decimal IRQ 14/15, not hexadecimal
2619  * Pegasos has the IDE configured in legacy mode, but advertised as native
2620  */
2621 static void __init fixup_device_tree_chrp(void)
2622 {
2623 	phandle ph;
2624 	u32 prop[6];
2625 	u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2626 	char *name;
2627 	int rc;
2628 
2629 	name = "/pci@80000000/isa@c";
2630 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2631 	if (!PHANDLE_VALID(ph)) {
2632 		name = "/pci@ff500000/isa@6";
2633 		ph = call_prom("finddevice", 1, 1, ADDR(name));
2634 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2635 	}
2636 	if (PHANDLE_VALID(ph)) {
2637 		rc = prom_getproplen(ph, "ranges");
2638 		if (rc == 0 || rc == PROM_ERROR) {
2639 			prom_printf("Fixing up missing ISA range on Pegasos...\n");
2640 
2641 			prop[0] = 0x1;
2642 			prop[1] = 0x0;
2643 			prop[2] = rloc;
2644 			prop[3] = 0x0;
2645 			prop[4] = 0x0;
2646 			prop[5] = 0x00010000;
2647 			prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2648 		}
2649 	}
2650 
2651 	name = "/pci@80000000/ide@C,1";
2652 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2653 	if (PHANDLE_VALID(ph)) {
2654 		prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2655 		prop[0] = 14;
2656 		prop[1] = 0x0;
2657 		prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2658 		prom_printf("Fixing up IDE class-code on Pegasos...\n");
2659 		rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2660 		if (rc == sizeof(u32)) {
2661 			prop[0] &= ~0x5;
2662 			prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2663 		}
2664 	}
2665 }
2666 #else
2667 #define fixup_device_tree_chrp()
2668 #endif
2669 
2670 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2671 static void __init fixup_device_tree_pmac(void)
2672 {
2673 	phandle u3, i2c, mpic;
2674 	u32 u3_rev;
2675 	u32 interrupts[2];
2676 	u32 parent;
2677 
2678 	/* Some G5s have a missing interrupt definition, fix it up here */
2679 	u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2680 	if (!PHANDLE_VALID(u3))
2681 		return;
2682 	i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2683 	if (!PHANDLE_VALID(i2c))
2684 		return;
2685 	mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2686 	if (!PHANDLE_VALID(mpic))
2687 		return;
2688 
2689 	/* check if proper rev of u3 */
2690 	if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2691 	    == PROM_ERROR)
2692 		return;
2693 	if (u3_rev < 0x35 || u3_rev > 0x39)
2694 		return;
2695 	/* does it need fixup ? */
2696 	if (prom_getproplen(i2c, "interrupts") > 0)
2697 		return;
2698 
2699 	prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2700 
2701 	/* interrupt on this revision of u3 is number 0 and level */
2702 	interrupts[0] = 0;
2703 	interrupts[1] = 1;
2704 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2705 		     &interrupts, sizeof(interrupts));
2706 	parent = (u32)mpic;
2707 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2708 		     &parent, sizeof(parent));
2709 }
2710 #else
2711 #define fixup_device_tree_pmac()
2712 #endif
2713 
2714 #ifdef CONFIG_PPC_EFIKA
2715 /*
2716  * The MPC5200 FEC driver requires an phy-handle property to tell it how
2717  * to talk to the phy.  If the phy-handle property is missing, then this
2718  * function is called to add the appropriate nodes and link it to the
2719  * ethernet node.
2720  */
2721 static void __init fixup_device_tree_efika_add_phy(void)
2722 {
2723 	u32 node;
2724 	char prop[64];
2725 	int rv;
2726 
2727 	/* Check if /builtin/ethernet exists - bail if it doesn't */
2728 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2729 	if (!PHANDLE_VALID(node))
2730 		return;
2731 
2732 	/* Check if the phy-handle property exists - bail if it does */
2733 	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2734 	if (!rv)
2735 		return;
2736 
2737 	/*
2738 	 * At this point the ethernet device doesn't have a phy described.
2739 	 * Now we need to add the missing phy node and linkage
2740 	 */
2741 
2742 	/* Check for an MDIO bus node - if missing then create one */
2743 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2744 	if (!PHANDLE_VALID(node)) {
2745 		prom_printf("Adding Ethernet MDIO node\n");
2746 		call_prom("interpret", 1, 1,
2747 			" s\" /builtin\" find-device"
2748 			" new-device"
2749 				" 1 encode-int s\" #address-cells\" property"
2750 				" 0 encode-int s\" #size-cells\" property"
2751 				" s\" mdio\" device-name"
2752 				" s\" fsl,mpc5200b-mdio\" encode-string"
2753 				" s\" compatible\" property"
2754 				" 0xf0003000 0x400 reg"
2755 				" 0x2 encode-int"
2756 				" 0x5 encode-int encode+"
2757 				" 0x3 encode-int encode+"
2758 				" s\" interrupts\" property"
2759 			" finish-device");
2760 	};
2761 
2762 	/* Check for a PHY device node - if missing then create one and
2763 	 * give it's phandle to the ethernet node */
2764 	node = call_prom("finddevice", 1, 1,
2765 			 ADDR("/builtin/mdio/ethernet-phy"));
2766 	if (!PHANDLE_VALID(node)) {
2767 		prom_printf("Adding Ethernet PHY node\n");
2768 		call_prom("interpret", 1, 1,
2769 			" s\" /builtin/mdio\" find-device"
2770 			" new-device"
2771 				" s\" ethernet-phy\" device-name"
2772 				" 0x10 encode-int s\" reg\" property"
2773 				" my-self"
2774 				" ihandle>phandle"
2775 			" finish-device"
2776 			" s\" /builtin/ethernet\" find-device"
2777 				" encode-int"
2778 				" s\" phy-handle\" property"
2779 			" device-end");
2780 	}
2781 }
2782 
2783 static void __init fixup_device_tree_efika(void)
2784 {
2785 	int sound_irq[3] = { 2, 2, 0 };
2786 	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2787 				3,4,0, 3,5,0, 3,6,0, 3,7,0,
2788 				3,8,0, 3,9,0, 3,10,0, 3,11,0,
2789 				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2790 	u32 node;
2791 	char prop[64];
2792 	int rv, len;
2793 
2794 	/* Check if we're really running on a EFIKA */
2795 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2796 	if (!PHANDLE_VALID(node))
2797 		return;
2798 
2799 	rv = prom_getprop(node, "model", prop, sizeof(prop));
2800 	if (rv == PROM_ERROR)
2801 		return;
2802 	if (strcmp(prop, "EFIKA5K2"))
2803 		return;
2804 
2805 	prom_printf("Applying EFIKA device tree fixups\n");
2806 
2807 	/* Claiming to be 'chrp' is death */
2808 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2809 	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2810 	if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2811 		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2812 
2813 	/* CODEGEN,description is exposed in /proc/cpuinfo so
2814 	   fix that too */
2815 	rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2816 	if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2817 		prom_setprop(node, "/", "CODEGEN,description",
2818 			     "Efika 5200B PowerPC System",
2819 			     sizeof("Efika 5200B PowerPC System"));
2820 
2821 	/* Fixup bestcomm interrupts property */
2822 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2823 	if (PHANDLE_VALID(node)) {
2824 		len = prom_getproplen(node, "interrupts");
2825 		if (len == 12) {
2826 			prom_printf("Fixing bestcomm interrupts property\n");
2827 			prom_setprop(node, "/builtin/bestcom", "interrupts",
2828 				     bcomm_irq, sizeof(bcomm_irq));
2829 		}
2830 	}
2831 
2832 	/* Fixup sound interrupts property */
2833 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2834 	if (PHANDLE_VALID(node)) {
2835 		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2836 		if (rv == PROM_ERROR) {
2837 			prom_printf("Adding sound interrupts property\n");
2838 			prom_setprop(node, "/builtin/sound", "interrupts",
2839 				     sound_irq, sizeof(sound_irq));
2840 		}
2841 	}
2842 
2843 	/* Make sure ethernet phy-handle property exists */
2844 	fixup_device_tree_efika_add_phy();
2845 }
2846 #else
2847 #define fixup_device_tree_efika()
2848 #endif
2849 
2850 #ifdef CONFIG_PPC_PASEMI_NEMO
2851 /*
2852  * CFE supplied on Nemo is broken in several ways, biggest
2853  * problem is that it reassigns ISA interrupts to unused mpic ints.
2854  * Add an interrupt-controller property for the io-bridge to use
2855  * and correct the ints so we can attach them to an irq_domain
2856  */
2857 static void __init fixup_device_tree_pasemi(void)
2858 {
2859 	u32 interrupts[2], parent, rval, val = 0;
2860 	char *name, *pci_name;
2861 	phandle iob, node;
2862 
2863 	/* Find the root pci node */
2864 	name = "/pxp@0,e0000000";
2865 	iob = call_prom("finddevice", 1, 1, ADDR(name));
2866 	if (!PHANDLE_VALID(iob))
2867 		return;
2868 
2869 	/* check if interrupt-controller node set yet */
2870 	if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
2871 		return;
2872 
2873 	prom_printf("adding interrupt-controller property for SB600...\n");
2874 
2875 	prom_setprop(iob, name, "interrupt-controller", &val, 0);
2876 
2877 	pci_name = "/pxp@0,e0000000/pci@11";
2878 	node = call_prom("finddevice", 1, 1, ADDR(pci_name));
2879 	parent = ADDR(iob);
2880 
2881 	for( ; prom_next_node(&node); ) {
2882 		/* scan each node for one with an interrupt */
2883 		if (!PHANDLE_VALID(node))
2884 			continue;
2885 
2886 		rval = prom_getproplen(node, "interrupts");
2887 		if (rval == 0 || rval == PROM_ERROR)
2888 			continue;
2889 
2890 		prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
2891 		if ((interrupts[0] < 212) || (interrupts[0] > 222))
2892 			continue;
2893 
2894 		/* found a node, update both interrupts and interrupt-parent */
2895 		if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
2896 			interrupts[0] -= 203;
2897 		if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
2898 			interrupts[0] -= 213;
2899 		if (interrupts[0] == 221)
2900 			interrupts[0] = 14;
2901 		if (interrupts[0] == 222)
2902 			interrupts[0] = 8;
2903 
2904 		prom_setprop(node, pci_name, "interrupts", interrupts,
2905 					sizeof(interrupts));
2906 		prom_setprop(node, pci_name, "interrupt-parent", &parent,
2907 					sizeof(parent));
2908 	}
2909 
2910 	/*
2911 	 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2912 	 * so that generic isa-bridge code can add the SB600 and its on-board
2913 	 * peripherals.
2914 	 */
2915 	name = "/pxp@0,e0000000/io-bridge@0";
2916 	iob = call_prom("finddevice", 1, 1, ADDR(name));
2917 	if (!PHANDLE_VALID(iob))
2918 		return;
2919 
2920 	/* device_type is already set, just change it. */
2921 
2922 	prom_printf("Changing device_type of SB600 node...\n");
2923 
2924 	prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
2925 }
2926 #else	/* !CONFIG_PPC_PASEMI_NEMO */
2927 static inline void fixup_device_tree_pasemi(void) { }
2928 #endif
2929 
2930 static void __init fixup_device_tree(void)
2931 {
2932 	fixup_device_tree_maple();
2933 	fixup_device_tree_maple_memory_controller();
2934 	fixup_device_tree_chrp();
2935 	fixup_device_tree_pmac();
2936 	fixup_device_tree_efika();
2937 	fixup_device_tree_pasemi();
2938 }
2939 
2940 static void __init prom_find_boot_cpu(void)
2941 {
2942 	__be32 rval;
2943 	ihandle prom_cpu;
2944 	phandle cpu_pkg;
2945 
2946 	rval = 0;
2947 	if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2948 		return;
2949 	prom_cpu = be32_to_cpu(rval);
2950 
2951 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2952 
2953 	if (!PHANDLE_VALID(cpu_pkg))
2954 		return;
2955 
2956 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2957 	prom.cpu = be32_to_cpu(rval);
2958 
2959 	prom_debug("Booting CPU hw index = %d\n", prom.cpu);
2960 }
2961 
2962 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2963 {
2964 #ifdef CONFIG_BLK_DEV_INITRD
2965 	if (r3 && r4 && r4 != 0xdeadbeef) {
2966 		__be64 val;
2967 
2968 		prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2969 		prom_initrd_end = prom_initrd_start + r4;
2970 
2971 		val = cpu_to_be64(prom_initrd_start);
2972 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2973 			     &val, sizeof(val));
2974 		val = cpu_to_be64(prom_initrd_end);
2975 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2976 			     &val, sizeof(val));
2977 
2978 		reserve_mem(prom_initrd_start,
2979 			    prom_initrd_end - prom_initrd_start);
2980 
2981 		prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
2982 		prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
2983 	}
2984 #endif /* CONFIG_BLK_DEV_INITRD */
2985 }
2986 
2987 #ifdef CONFIG_PPC64
2988 #ifdef CONFIG_RELOCATABLE
2989 static void reloc_toc(void)
2990 {
2991 }
2992 
2993 static void unreloc_toc(void)
2994 {
2995 }
2996 #else
2997 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2998 {
2999 	unsigned long i;
3000 	unsigned long *toc_entry;
3001 
3002 	/* Get the start of the TOC by using r2 directly. */
3003 	asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3004 
3005 	for (i = 0; i < nr_entries; i++) {
3006 		*toc_entry = *toc_entry + offset;
3007 		toc_entry++;
3008 	}
3009 }
3010 
3011 static void reloc_toc(void)
3012 {
3013 	unsigned long offset = reloc_offset();
3014 	unsigned long nr_entries =
3015 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3016 
3017 	__reloc_toc(offset, nr_entries);
3018 
3019 	mb();
3020 }
3021 
3022 static void unreloc_toc(void)
3023 {
3024 	unsigned long offset = reloc_offset();
3025 	unsigned long nr_entries =
3026 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3027 
3028 	mb();
3029 
3030 	__reloc_toc(-offset, nr_entries);
3031 }
3032 #endif
3033 #endif
3034 
3035 /*
3036  * We enter here early on, when the Open Firmware prom is still
3037  * handling exceptions and the MMU hash table for us.
3038  */
3039 
3040 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3041 			       unsigned long pp,
3042 			       unsigned long r6, unsigned long r7,
3043 			       unsigned long kbase)
3044 {
3045 	unsigned long hdr;
3046 
3047 #ifdef CONFIG_PPC32
3048 	unsigned long offset = reloc_offset();
3049 	reloc_got2(offset);
3050 #else
3051 	reloc_toc();
3052 #endif
3053 
3054 	/*
3055 	 * First zero the BSS
3056 	 */
3057 	memset(&__bss_start, 0, __bss_stop - __bss_start);
3058 
3059 	/*
3060 	 * Init interface to Open Firmware, get some node references,
3061 	 * like /chosen
3062 	 */
3063 	prom_init_client_services(pp);
3064 
3065 	/*
3066 	 * See if this OF is old enough that we need to do explicit maps
3067 	 * and other workarounds
3068 	 */
3069 	prom_find_mmu();
3070 
3071 	/*
3072 	 * Init prom stdout device
3073 	 */
3074 	prom_init_stdout();
3075 
3076 	prom_printf("Preparing to boot %s", linux_banner);
3077 
3078 	/*
3079 	 * Get default machine type. At this point, we do not differentiate
3080 	 * between pSeries SMP and pSeries LPAR
3081 	 */
3082 	of_platform = prom_find_machine_type();
3083 	prom_printf("Detected machine type: %x\n", of_platform);
3084 
3085 #ifndef CONFIG_NONSTATIC_KERNEL
3086 	/* Bail if this is a kdump kernel. */
3087 	if (PHYSICAL_START > 0)
3088 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3089 #endif
3090 
3091 	/*
3092 	 * Check for an initrd
3093 	 */
3094 	prom_check_initrd(r3, r4);
3095 
3096 	/*
3097 	 * Do early parsing of command line
3098 	 */
3099 	early_cmdline_parse();
3100 
3101 #ifdef CONFIG_PPC_PSERIES
3102 	/*
3103 	 * On pSeries, inform the firmware about our capabilities
3104 	 */
3105 	if (of_platform == PLATFORM_PSERIES ||
3106 	    of_platform == PLATFORM_PSERIES_LPAR)
3107 		prom_send_capabilities();
3108 #endif
3109 
3110 	/*
3111 	 * Copy the CPU hold code
3112 	 */
3113 	if (of_platform != PLATFORM_POWERMAC)
3114 		copy_and_flush(0, kbase, 0x100, 0);
3115 
3116 	/*
3117 	 * Initialize memory management within prom_init
3118 	 */
3119 	prom_init_mem();
3120 
3121 	/*
3122 	 * Determine which cpu is actually running right _now_
3123 	 */
3124 	prom_find_boot_cpu();
3125 
3126 	/*
3127 	 * Initialize display devices
3128 	 */
3129 	prom_check_displays();
3130 
3131 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3132 	/*
3133 	 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3134 	 * that uses the allocator, we need to make sure we get the top of memory
3135 	 * available for us here...
3136 	 */
3137 	if (of_platform == PLATFORM_PSERIES)
3138 		prom_initialize_tce_table();
3139 #endif
3140 
3141 	/*
3142 	 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3143 	 * have a usable RTAS implementation.
3144 	 */
3145 	if (of_platform != PLATFORM_POWERMAC)
3146 		prom_instantiate_rtas();
3147 
3148 #ifdef CONFIG_PPC64
3149 	/* instantiate sml */
3150 	prom_instantiate_sml();
3151 #endif
3152 
3153 	/*
3154 	 * On non-powermacs, put all CPUs in spin-loops.
3155 	 *
3156 	 * PowerMacs use a different mechanism to spin CPUs
3157 	 *
3158 	 * (This must be done after instanciating RTAS)
3159 	 */
3160 	if (of_platform != PLATFORM_POWERMAC)
3161 		prom_hold_cpus();
3162 
3163 	/*
3164 	 * Fill in some infos for use by the kernel later on
3165 	 */
3166 	if (prom_memory_limit) {
3167 		__be64 val = cpu_to_be64(prom_memory_limit);
3168 		prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3169 			     &val, sizeof(val));
3170 	}
3171 #ifdef CONFIG_PPC64
3172 	if (prom_iommu_off)
3173 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3174 			     NULL, 0);
3175 
3176 	if (prom_iommu_force_on)
3177 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3178 			     NULL, 0);
3179 
3180 	if (prom_tce_alloc_start) {
3181 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3182 			     &prom_tce_alloc_start,
3183 			     sizeof(prom_tce_alloc_start));
3184 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3185 			     &prom_tce_alloc_end,
3186 			     sizeof(prom_tce_alloc_end));
3187 	}
3188 #endif
3189 
3190 	/*
3191 	 * Fixup any known bugs in the device-tree
3192 	 */
3193 	fixup_device_tree();
3194 
3195 	/*
3196 	 * Now finally create the flattened device-tree
3197 	 */
3198 	prom_printf("copying OF device tree...\n");
3199 	flatten_device_tree();
3200 
3201 	/*
3202 	 * in case stdin is USB and still active on IBM machines...
3203 	 * Unfortunately quiesce crashes on some powermacs if we have
3204 	 * closed stdin already (in particular the powerbook 101).
3205 	 */
3206 	if (of_platform != PLATFORM_POWERMAC)
3207 		prom_close_stdin();
3208 
3209 	/*
3210 	 * Call OF "quiesce" method to shut down pending DMA's from
3211 	 * devices etc...
3212 	 */
3213 	prom_printf("Quiescing Open Firmware ...\n");
3214 	call_prom("quiesce", 0, 0);
3215 
3216 	/*
3217 	 * And finally, call the kernel passing it the flattened device
3218 	 * tree and NULL as r5, thus triggering the new entry point which
3219 	 * is common to us and kexec
3220 	 */
3221 	hdr = dt_header_start;
3222 
3223 	/* Don't print anything after quiesce under OPAL, it crashes OFW */
3224 	prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3225 	prom_debug("->dt_header_start=0x%lx\n", hdr);
3226 
3227 #ifdef CONFIG_PPC32
3228 	reloc_got2(-offset);
3229 #else
3230 	unreloc_toc();
3231 #endif
3232 
3233 	__start(hdr, kbase, 0, 0, 0, 0, 0);
3234 
3235 	return 0;
3236 }
3237