xref: /openbmc/linux/arch/powerpc/kernel/prom_init.c (revision 6e2055a9)
1 /*
2  * Procedures for interfacing to Open Firmware.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG_PROM
17 
18 #include <stdarg.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/proc_fs.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <asm/prom.h>
32 #include <asm/rtas.h>
33 #include <asm/page.h>
34 #include <asm/processor.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37 #include <asm/smp.h>
38 #include <asm/mmu.h>
39 #include <asm/pgtable.h>
40 #include <asm/pci.h>
41 #include <asm/iommu.h>
42 #include <asm/btext.h>
43 #include <asm/sections.h>
44 #include <asm/machdep.h>
45 #include <asm/opal.h>
46 
47 #include <linux/linux_logo.h>
48 
49 /*
50  * Eventually bump that one up
51  */
52 #define DEVTREE_CHUNK_SIZE	0x100000
53 
54 /*
55  * This is the size of the local memory reserve map that gets copied
56  * into the boot params passed to the kernel. That size is totally
57  * flexible as the kernel just reads the list until it encounters an
58  * entry with size 0, so it can be changed without breaking binary
59  * compatibility
60  */
61 #define MEM_RESERVE_MAP_SIZE	8
62 
63 /*
64  * prom_init() is called very early on, before the kernel text
65  * and data have been mapped to KERNELBASE.  At this point the code
66  * is running at whatever address it has been loaded at.
67  * On ppc32 we compile with -mrelocatable, which means that references
68  * to extern and static variables get relocated automatically.
69  * ppc64 objects are always relocatable, we just need to relocate the
70  * TOC.
71  *
72  * Because OF may have mapped I/O devices into the area starting at
73  * KERNELBASE, particularly on CHRP machines, we can't safely call
74  * OF once the kernel has been mapped to KERNELBASE.  Therefore all
75  * OF calls must be done within prom_init().
76  *
77  * ADDR is used in calls to call_prom.  The 4th and following
78  * arguments to call_prom should be 32-bit values.
79  * On ppc64, 64 bit values are truncated to 32 bits (and
80  * fortunately don't get interpreted as two arguments).
81  */
82 #define ADDR(x)		(u32)(unsigned long)(x)
83 
84 #ifdef CONFIG_PPC64
85 #define OF_WORKAROUNDS	0
86 #else
87 #define OF_WORKAROUNDS	of_workarounds
88 int of_workarounds;
89 #endif
90 
91 #define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
92 #define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
93 
94 #define PROM_BUG() do {						\
95         prom_printf("kernel BUG at %s line 0x%x!\n",		\
96 		    __FILE__, __LINE__);			\
97         __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);	\
98 } while (0)
99 
100 #ifdef DEBUG_PROM
101 #define prom_debug(x...)	prom_printf(x)
102 #else
103 #define prom_debug(x...)
104 #endif
105 
106 
107 typedef u32 prom_arg_t;
108 
109 struct prom_args {
110         __be32 service;
111         __be32 nargs;
112         __be32 nret;
113         __be32 args[10];
114 };
115 
116 struct prom_t {
117 	ihandle root;
118 	phandle chosen;
119 	int cpu;
120 	ihandle stdout;
121 	ihandle mmumap;
122 	ihandle memory;
123 };
124 
125 struct mem_map_entry {
126 	__be64	base;
127 	__be64	size;
128 };
129 
130 typedef __be32 cell_t;
131 
132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 		    unsigned long r6, unsigned long r7, unsigned long r8,
134 		    unsigned long r9);
135 
136 #ifdef CONFIG_PPC64
137 extern int enter_prom(struct prom_args *args, unsigned long entry);
138 #else
139 static inline int enter_prom(struct prom_args *args, unsigned long entry)
140 {
141 	return ((int (*)(struct prom_args *))entry)(args);
142 }
143 #endif
144 
145 extern void copy_and_flush(unsigned long dest, unsigned long src,
146 			   unsigned long size, unsigned long offset);
147 
148 /* prom structure */
149 static struct prom_t __initdata prom;
150 
151 static unsigned long prom_entry __initdata;
152 
153 #define PROM_SCRATCH_SIZE 256
154 
155 static char __initdata of_stdout_device[256];
156 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157 
158 static unsigned long __initdata dt_header_start;
159 static unsigned long __initdata dt_struct_start, dt_struct_end;
160 static unsigned long __initdata dt_string_start, dt_string_end;
161 
162 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
163 
164 #ifdef CONFIG_PPC64
165 static int __initdata prom_iommu_force_on;
166 static int __initdata prom_iommu_off;
167 static unsigned long __initdata prom_tce_alloc_start;
168 static unsigned long __initdata prom_tce_alloc_end;
169 #endif
170 
171 /* Platforms codes are now obsolete in the kernel. Now only used within this
172  * file and ultimately gone too. Feel free to change them if you need, they
173  * are not shared with anything outside of this file anymore
174  */
175 #define PLATFORM_PSERIES	0x0100
176 #define PLATFORM_PSERIES_LPAR	0x0101
177 #define PLATFORM_LPAR		0x0001
178 #define PLATFORM_POWERMAC	0x0400
179 #define PLATFORM_GENERIC	0x0500
180 #define PLATFORM_OPAL		0x0600
181 
182 static int __initdata of_platform;
183 
184 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185 
186 static unsigned long __initdata prom_memory_limit;
187 
188 static unsigned long __initdata alloc_top;
189 static unsigned long __initdata alloc_top_high;
190 static unsigned long __initdata alloc_bottom;
191 static unsigned long __initdata rmo_top;
192 static unsigned long __initdata ram_top;
193 
194 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
195 static int __initdata mem_reserve_cnt;
196 
197 static cell_t __initdata regbuf[1024];
198 
199 static bool rtas_has_query_cpu_stopped;
200 
201 
202 /*
203  * Error results ... some OF calls will return "-1" on error, some
204  * will return 0, some will return either. To simplify, here are
205  * macros to use with any ihandle or phandle return value to check if
206  * it is valid
207  */
208 
209 #define PROM_ERROR		(-1u)
210 #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
211 #define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)
212 
213 
214 /* This is the one and *ONLY* place where we actually call open
215  * firmware.
216  */
217 
218 static int __init call_prom(const char *service, int nargs, int nret, ...)
219 {
220 	int i;
221 	struct prom_args args;
222 	va_list list;
223 
224 	args.service = cpu_to_be32(ADDR(service));
225 	args.nargs = cpu_to_be32(nargs);
226 	args.nret = cpu_to_be32(nret);
227 
228 	va_start(list, nret);
229 	for (i = 0; i < nargs; i++)
230 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
231 	va_end(list);
232 
233 	for (i = 0; i < nret; i++)
234 		args.args[nargs+i] = 0;
235 
236 	if (enter_prom(&args, prom_entry) < 0)
237 		return PROM_ERROR;
238 
239 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
240 }
241 
242 static int __init call_prom_ret(const char *service, int nargs, int nret,
243 				prom_arg_t *rets, ...)
244 {
245 	int i;
246 	struct prom_args args;
247 	va_list list;
248 
249 	args.service = cpu_to_be32(ADDR(service));
250 	args.nargs = cpu_to_be32(nargs);
251 	args.nret = cpu_to_be32(nret);
252 
253 	va_start(list, rets);
254 	for (i = 0; i < nargs; i++)
255 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
256 	va_end(list);
257 
258 	for (i = 0; i < nret; i++)
259 		args.args[nargs+i] = 0;
260 
261 	if (enter_prom(&args, prom_entry) < 0)
262 		return PROM_ERROR;
263 
264 	if (rets != NULL)
265 		for (i = 1; i < nret; ++i)
266 			rets[i-1] = be32_to_cpu(args.args[nargs+i]);
267 
268 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
269 }
270 
271 
272 static void __init prom_print(const char *msg)
273 {
274 	const char *p, *q;
275 
276 	if (prom.stdout == 0)
277 		return;
278 
279 	for (p = msg; *p != 0; p = q) {
280 		for (q = p; *q != 0 && *q != '\n'; ++q)
281 			;
282 		if (q > p)
283 			call_prom("write", 3, 1, prom.stdout, p, q - p);
284 		if (*q == 0)
285 			break;
286 		++q;
287 		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
288 	}
289 }
290 
291 
292 static void __init prom_print_hex(unsigned long val)
293 {
294 	int i, nibbles = sizeof(val)*2;
295 	char buf[sizeof(val)*2+1];
296 
297 	for (i = nibbles-1;  i >= 0;  i--) {
298 		buf[i] = (val & 0xf) + '0';
299 		if (buf[i] > '9')
300 			buf[i] += ('a'-'0'-10);
301 		val >>= 4;
302 	}
303 	buf[nibbles] = '\0';
304 	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
305 }
306 
307 /* max number of decimal digits in an unsigned long */
308 #define UL_DIGITS 21
309 static void __init prom_print_dec(unsigned long val)
310 {
311 	int i, size;
312 	char buf[UL_DIGITS+1];
313 
314 	for (i = UL_DIGITS-1; i >= 0;  i--) {
315 		buf[i] = (val % 10) + '0';
316 		val = val/10;
317 		if (val == 0)
318 			break;
319 	}
320 	/* shift stuff down */
321 	size = UL_DIGITS - i;
322 	call_prom("write", 3, 1, prom.stdout, buf+i, size);
323 }
324 
325 static void __init prom_printf(const char *format, ...)
326 {
327 	const char *p, *q, *s;
328 	va_list args;
329 	unsigned long v;
330 	long vs;
331 
332 	va_start(args, format);
333 	for (p = format; *p != 0; p = q) {
334 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
335 			;
336 		if (q > p)
337 			call_prom("write", 3, 1, prom.stdout, p, q - p);
338 		if (*q == 0)
339 			break;
340 		if (*q == '\n') {
341 			++q;
342 			call_prom("write", 3, 1, prom.stdout,
343 				  ADDR("\r\n"), 2);
344 			continue;
345 		}
346 		++q;
347 		if (*q == 0)
348 			break;
349 		switch (*q) {
350 		case 's':
351 			++q;
352 			s = va_arg(args, const char *);
353 			prom_print(s);
354 			break;
355 		case 'x':
356 			++q;
357 			v = va_arg(args, unsigned long);
358 			prom_print_hex(v);
359 			break;
360 		case 'd':
361 			++q;
362 			vs = va_arg(args, int);
363 			if (vs < 0) {
364 				prom_print("-");
365 				vs = -vs;
366 			}
367 			prom_print_dec(vs);
368 			break;
369 		case 'l':
370 			++q;
371 			if (*q == 0)
372 				break;
373 			else if (*q == 'x') {
374 				++q;
375 				v = va_arg(args, unsigned long);
376 				prom_print_hex(v);
377 			} else if (*q == 'u') { /* '%lu' */
378 				++q;
379 				v = va_arg(args, unsigned long);
380 				prom_print_dec(v);
381 			} else if (*q == 'd') { /* %ld */
382 				++q;
383 				vs = va_arg(args, long);
384 				if (vs < 0) {
385 					prom_print("-");
386 					vs = -vs;
387 				}
388 				prom_print_dec(vs);
389 			}
390 			break;
391 		}
392 	}
393 }
394 
395 
396 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
397 				unsigned long align)
398 {
399 
400 	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
401 		/*
402 		 * Old OF requires we claim physical and virtual separately
403 		 * and then map explicitly (assuming virtual mode)
404 		 */
405 		int ret;
406 		prom_arg_t result;
407 
408 		ret = call_prom_ret("call-method", 5, 2, &result,
409 				    ADDR("claim"), prom.memory,
410 				    align, size, virt);
411 		if (ret != 0 || result == -1)
412 			return -1;
413 		ret = call_prom_ret("call-method", 5, 2, &result,
414 				    ADDR("claim"), prom.mmumap,
415 				    align, size, virt);
416 		if (ret != 0) {
417 			call_prom("call-method", 4, 1, ADDR("release"),
418 				  prom.memory, size, virt);
419 			return -1;
420 		}
421 		/* the 0x12 is M (coherence) + PP == read/write */
422 		call_prom("call-method", 6, 1,
423 			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
424 		return virt;
425 	}
426 	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
427 			 (prom_arg_t)align);
428 }
429 
430 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
431 {
432 	prom_print(reason);
433 	/* Do not call exit because it clears the screen on pmac
434 	 * it also causes some sort of double-fault on early pmacs */
435 	if (of_platform == PLATFORM_POWERMAC)
436 		asm("trap\n");
437 
438 	/* ToDo: should put up an SRC here on pSeries */
439 	call_prom("exit", 0, 0);
440 
441 	for (;;)			/* should never get here */
442 		;
443 }
444 
445 
446 static int __init prom_next_node(phandle *nodep)
447 {
448 	phandle node;
449 
450 	if ((node = *nodep) != 0
451 	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
452 		return 1;
453 	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
454 		return 1;
455 	for (;;) {
456 		if ((node = call_prom("parent", 1, 1, node)) == 0)
457 			return 0;
458 		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
459 			return 1;
460 	}
461 }
462 
463 static int inline prom_getprop(phandle node, const char *pname,
464 			       void *value, size_t valuelen)
465 {
466 	return call_prom("getprop", 4, 1, node, ADDR(pname),
467 			 (u32)(unsigned long) value, (u32) valuelen);
468 }
469 
470 static int inline prom_getproplen(phandle node, const char *pname)
471 {
472 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
473 }
474 
475 static void add_string(char **str, const char *q)
476 {
477 	char *p = *str;
478 
479 	while (*q)
480 		*p++ = *q++;
481 	*p++ = ' ';
482 	*str = p;
483 }
484 
485 static char *tohex(unsigned int x)
486 {
487 	static char digits[] = "0123456789abcdef";
488 	static char result[9];
489 	int i;
490 
491 	result[8] = 0;
492 	i = 8;
493 	do {
494 		--i;
495 		result[i] = digits[x & 0xf];
496 		x >>= 4;
497 	} while (x != 0 && i > 0);
498 	return &result[i];
499 }
500 
501 static int __init prom_setprop(phandle node, const char *nodename,
502 			       const char *pname, void *value, size_t valuelen)
503 {
504 	char cmd[256], *p;
505 
506 	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
507 		return call_prom("setprop", 4, 1, node, ADDR(pname),
508 				 (u32)(unsigned long) value, (u32) valuelen);
509 
510 	/* gah... setprop doesn't work on longtrail, have to use interpret */
511 	p = cmd;
512 	add_string(&p, "dev");
513 	add_string(&p, nodename);
514 	add_string(&p, tohex((u32)(unsigned long) value));
515 	add_string(&p, tohex(valuelen));
516 	add_string(&p, tohex(ADDR(pname)));
517 	add_string(&p, tohex(strlen(pname)));
518 	add_string(&p, "property");
519 	*p = 0;
520 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
521 }
522 
523 /* We can't use the standard versions because of relocation headaches. */
524 #define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
525 			 || ('a' <= (c) && (c) <= 'f') \
526 			 || ('A' <= (c) && (c) <= 'F'))
527 
528 #define isdigit(c)	('0' <= (c) && (c) <= '9')
529 #define islower(c)	('a' <= (c) && (c) <= 'z')
530 #define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))
531 
532 static unsigned long prom_strtoul(const char *cp, const char **endp)
533 {
534 	unsigned long result = 0, base = 10, value;
535 
536 	if (*cp == '0') {
537 		base = 8;
538 		cp++;
539 		if (toupper(*cp) == 'X') {
540 			cp++;
541 			base = 16;
542 		}
543 	}
544 
545 	while (isxdigit(*cp) &&
546 	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
547 		result = result * base + value;
548 		cp++;
549 	}
550 
551 	if (endp)
552 		*endp = cp;
553 
554 	return result;
555 }
556 
557 static unsigned long prom_memparse(const char *ptr, const char **retptr)
558 {
559 	unsigned long ret = prom_strtoul(ptr, retptr);
560 	int shift = 0;
561 
562 	/*
563 	 * We can't use a switch here because GCC *may* generate a
564 	 * jump table which won't work, because we're not running at
565 	 * the address we're linked at.
566 	 */
567 	if ('G' == **retptr || 'g' == **retptr)
568 		shift = 30;
569 
570 	if ('M' == **retptr || 'm' == **retptr)
571 		shift = 20;
572 
573 	if ('K' == **retptr || 'k' == **retptr)
574 		shift = 10;
575 
576 	if (shift) {
577 		ret <<= shift;
578 		(*retptr)++;
579 	}
580 
581 	return ret;
582 }
583 
584 /*
585  * Early parsing of the command line passed to the kernel, used for
586  * "mem=x" and the options that affect the iommu
587  */
588 static void __init early_cmdline_parse(void)
589 {
590 	const char *opt;
591 
592 	char *p;
593 	int l = 0;
594 
595 	prom_cmd_line[0] = 0;
596 	p = prom_cmd_line;
597 	if ((long)prom.chosen > 0)
598 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
599 #ifdef CONFIG_CMDLINE
600 	if (l <= 0 || p[0] == '\0') /* dbl check */
601 		strlcpy(prom_cmd_line,
602 			CONFIG_CMDLINE, sizeof(prom_cmd_line));
603 #endif /* CONFIG_CMDLINE */
604 	prom_printf("command line: %s\n", prom_cmd_line);
605 
606 #ifdef CONFIG_PPC64
607 	opt = strstr(prom_cmd_line, "iommu=");
608 	if (opt) {
609 		prom_printf("iommu opt is: %s\n", opt);
610 		opt += 6;
611 		while (*opt && *opt == ' ')
612 			opt++;
613 		if (!strncmp(opt, "off", 3))
614 			prom_iommu_off = 1;
615 		else if (!strncmp(opt, "force", 5))
616 			prom_iommu_force_on = 1;
617 	}
618 #endif
619 	opt = strstr(prom_cmd_line, "mem=");
620 	if (opt) {
621 		opt += 4;
622 		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
623 #ifdef CONFIG_PPC64
624 		/* Align to 16 MB == size of ppc64 large page */
625 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
626 #endif
627 	}
628 }
629 
630 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
631 /*
632  * The architecture vector has an array of PVR mask/value pairs,
633  * followed by # option vectors - 1, followed by the option vectors.
634  *
635  * See prom.h for the definition of the bits specified in the
636  * architecture vector.
637  *
638  * Because the description vector contains a mix of byte and word
639  * values, we declare it as an unsigned char array, and use this
640  * macro to put word values in.
641  */
642 #define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
643 		((x) >> 8) & 0xff, (x) & 0xff
644 
645 unsigned char ibm_architecture_vec[] = {
646 	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
647 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
648 	W(0xffff0000), W(0x003f0000),	/* POWER7 */
649 	W(0xffff0000), W(0x004b0000),	/* POWER8E */
650 	W(0xffff0000), W(0x004d0000),	/* POWER8 */
651 	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
652 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
653 	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
654 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
655 	6 - 1,				/* 6 option vectors */
656 
657 	/* option vector 1: processor architectures supported */
658 	3 - 2,				/* length */
659 	0,				/* don't ignore, don't halt */
660 	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
661 	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
662 
663 	/* option vector 2: Open Firmware options supported */
664 	34 - 2,				/* length */
665 	OV2_REAL_MODE,
666 	0, 0,
667 	W(0xffffffff),			/* real_base */
668 	W(0xffffffff),			/* real_size */
669 	W(0xffffffff),			/* virt_base */
670 	W(0xffffffff),			/* virt_size */
671 	W(0xffffffff),			/* load_base */
672 	W(256),				/* 256MB min RMA */
673 	W(0xffffffff),			/* full client load */
674 	0,				/* min RMA percentage of total RAM */
675 	48,				/* max log_2(hash table size) */
676 
677 	/* option vector 3: processor options supported */
678 	3 - 2,				/* length */
679 	0,				/* don't ignore, don't halt */
680 	OV3_FP | OV3_VMX | OV3_DFP,
681 
682 	/* option vector 4: IBM PAPR implementation */
683 	3 - 2,				/* length */
684 	0,				/* don't halt */
685 	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
686 
687 	/* option vector 5: PAPR/OF options */
688 	19 - 2,				/* length */
689 	0,				/* don't ignore, don't halt */
690 	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
691 	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
692 #ifdef CONFIG_PCI_MSI
693 	/* PCIe/MSI support.  Without MSI full PCIe is not supported */
694 	OV5_FEAT(OV5_MSI),
695 #else
696 	0,
697 #endif
698 	0,
699 #ifdef CONFIG_PPC_SMLPAR
700 	OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
701 #else
702 	0,
703 #endif
704 	OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
705 	0,
706 	0,
707 	0,
708 	/* WARNING: The offset of the "number of cores" field below
709 	 * must match by the macro below. Update the definition if
710 	 * the structure layout changes.
711 	 */
712 #define IBM_ARCH_VEC_NRCORES_OFFSET	125
713 	W(NR_CPUS),			/* number of cores supported */
714 	0,
715 	0,
716 	0,
717 	0,
718 	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
719 	OV5_FEAT(OV5_PFO_HW_842),
720 	OV5_FEAT(OV5_SUB_PROCESSORS),
721 	/* option vector 6: IBM PAPR hints */
722 	4 - 2,				/* length */
723 	0,
724 	0,
725 	OV6_LINUX,
726 
727 };
728 
729 /* Old method - ELF header with PT_NOTE sections only works on BE */
730 #ifdef __BIG_ENDIAN__
731 static struct fake_elf {
732 	Elf32_Ehdr	elfhdr;
733 	Elf32_Phdr	phdr[2];
734 	struct chrpnote {
735 		u32	namesz;
736 		u32	descsz;
737 		u32	type;
738 		char	name[8];	/* "PowerPC" */
739 		struct chrpdesc {
740 			u32	real_mode;
741 			u32	real_base;
742 			u32	real_size;
743 			u32	virt_base;
744 			u32	virt_size;
745 			u32	load_base;
746 		} chrpdesc;
747 	} chrpnote;
748 	struct rpanote {
749 		u32	namesz;
750 		u32	descsz;
751 		u32	type;
752 		char	name[24];	/* "IBM,RPA-Client-Config" */
753 		struct rpadesc {
754 			u32	lpar_affinity;
755 			u32	min_rmo_size;
756 			u32	min_rmo_percent;
757 			u32	max_pft_size;
758 			u32	splpar;
759 			u32	min_load;
760 			u32	new_mem_def;
761 			u32	ignore_me;
762 		} rpadesc;
763 	} rpanote;
764 } fake_elf = {
765 	.elfhdr = {
766 		.e_ident = { 0x7f, 'E', 'L', 'F',
767 			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
768 		.e_type = ET_EXEC,	/* yeah right */
769 		.e_machine = EM_PPC,
770 		.e_version = EV_CURRENT,
771 		.e_phoff = offsetof(struct fake_elf, phdr),
772 		.e_phentsize = sizeof(Elf32_Phdr),
773 		.e_phnum = 2
774 	},
775 	.phdr = {
776 		[0] = {
777 			.p_type = PT_NOTE,
778 			.p_offset = offsetof(struct fake_elf, chrpnote),
779 			.p_filesz = sizeof(struct chrpnote)
780 		}, [1] = {
781 			.p_type = PT_NOTE,
782 			.p_offset = offsetof(struct fake_elf, rpanote),
783 			.p_filesz = sizeof(struct rpanote)
784 		}
785 	},
786 	.chrpnote = {
787 		.namesz = sizeof("PowerPC"),
788 		.descsz = sizeof(struct chrpdesc),
789 		.type = 0x1275,
790 		.name = "PowerPC",
791 		.chrpdesc = {
792 			.real_mode = ~0U,	/* ~0 means "don't care" */
793 			.real_base = ~0U,
794 			.real_size = ~0U,
795 			.virt_base = ~0U,
796 			.virt_size = ~0U,
797 			.load_base = ~0U
798 		},
799 	},
800 	.rpanote = {
801 		.namesz = sizeof("IBM,RPA-Client-Config"),
802 		.descsz = sizeof(struct rpadesc),
803 		.type = 0x12759999,
804 		.name = "IBM,RPA-Client-Config",
805 		.rpadesc = {
806 			.lpar_affinity = 0,
807 			.min_rmo_size = 64,	/* in megabytes */
808 			.min_rmo_percent = 0,
809 			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
810 			.splpar = 1,
811 			.min_load = ~0U,
812 			.new_mem_def = 0
813 		}
814 	}
815 };
816 #endif /* __BIG_ENDIAN__ */
817 
818 static int __init prom_count_smt_threads(void)
819 {
820 	phandle node;
821 	char type[64];
822 	unsigned int plen;
823 
824 	/* Pick up th first CPU node we can find */
825 	for (node = 0; prom_next_node(&node); ) {
826 		type[0] = 0;
827 		prom_getprop(node, "device_type", type, sizeof(type));
828 
829 		if (strcmp(type, "cpu"))
830 			continue;
831 		/*
832 		 * There is an entry for each smt thread, each entry being
833 		 * 4 bytes long.  All cpus should have the same number of
834 		 * smt threads, so return after finding the first.
835 		 */
836 		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
837 		if (plen == PROM_ERROR)
838 			break;
839 		plen >>= 2;
840 		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
841 
842 		/* Sanity check */
843 		if (plen < 1 || plen > 64) {
844 			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
845 				    (unsigned long)plen);
846 			return 1;
847 		}
848 		return plen;
849 	}
850 	prom_debug("No threads found, assuming 1 per core\n");
851 
852 	return 1;
853 
854 }
855 
856 
857 static void __init prom_send_capabilities(void)
858 {
859 	ihandle root;
860 	prom_arg_t ret;
861 	u32 cores;
862 	unsigned char *ptcores;
863 
864 	root = call_prom("open", 1, 1, ADDR("/"));
865 	if (root != 0) {
866 		/* We need to tell the FW about the number of cores we support.
867 		 *
868 		 * To do that, we count the number of threads on the first core
869 		 * (we assume this is the same for all cores) and use it to
870 		 * divide NR_CPUS.
871 		 */
872 
873 		/* The core value may start at an odd address. If such a word
874 		 * access is made at a cache line boundary, this leads to an
875 		 * exception which may not be handled at this time.
876 		 * Forcing a per byte access to avoid exception.
877 		 */
878 		ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
879 		cores = 0;
880 		cores |= ptcores[0] << 24;
881 		cores |= ptcores[1] << 16;
882 		cores |= ptcores[2] << 8;
883 		cores |= ptcores[3];
884 		if (cores != NR_CPUS) {
885 			prom_printf("WARNING ! "
886 				    "ibm_architecture_vec structure inconsistent: %lu!\n",
887 				    cores);
888 		} else {
889 			cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
890 			prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
891 				    cores, NR_CPUS);
892 			ptcores[0] = (cores >> 24) & 0xff;
893 			ptcores[1] = (cores >> 16) & 0xff;
894 			ptcores[2] = (cores >> 8) & 0xff;
895 			ptcores[3] = cores & 0xff;
896 		}
897 
898 		/* try calling the ibm,client-architecture-support method */
899 		prom_printf("Calling ibm,client-architecture-support...");
900 		if (call_prom_ret("call-method", 3, 2, &ret,
901 				  ADDR("ibm,client-architecture-support"),
902 				  root,
903 				  ADDR(ibm_architecture_vec)) == 0) {
904 			/* the call exists... */
905 			if (ret)
906 				prom_printf("\nWARNING: ibm,client-architecture"
907 					    "-support call FAILED!\n");
908 			call_prom("close", 1, 0, root);
909 			prom_printf(" done\n");
910 			return;
911 		}
912 		call_prom("close", 1, 0, root);
913 		prom_printf(" not implemented\n");
914 	}
915 
916 #ifdef __BIG_ENDIAN__
917 	{
918 		ihandle elfloader;
919 
920 		/* no ibm,client-architecture-support call, try the old way */
921 		elfloader = call_prom("open", 1, 1,
922 				      ADDR("/packages/elf-loader"));
923 		if (elfloader == 0) {
924 			prom_printf("couldn't open /packages/elf-loader\n");
925 			return;
926 		}
927 		call_prom("call-method", 3, 1, ADDR("process-elf-header"),
928 			  elfloader, ADDR(&fake_elf));
929 		call_prom("close", 1, 0, elfloader);
930 	}
931 #endif /* __BIG_ENDIAN__ */
932 }
933 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
934 
935 /*
936  * Memory allocation strategy... our layout is normally:
937  *
938  *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
939  *  rare cases, initrd might end up being before the kernel though.
940  *  We assume this won't override the final kernel at 0, we have no
941  *  provision to handle that in this version, but it should hopefully
942  *  never happen.
943  *
944  *  alloc_top is set to the top of RMO, eventually shrink down if the
945  *  TCEs overlap
946  *
947  *  alloc_bottom is set to the top of kernel/initrd
948  *
949  *  from there, allocations are done this way : rtas is allocated
950  *  topmost, and the device-tree is allocated from the bottom. We try
951  *  to grow the device-tree allocation as we progress. If we can't,
952  *  then we fail, we don't currently have a facility to restart
953  *  elsewhere, but that shouldn't be necessary.
954  *
955  *  Note that calls to reserve_mem have to be done explicitly, memory
956  *  allocated with either alloc_up or alloc_down isn't automatically
957  *  reserved.
958  */
959 
960 
961 /*
962  * Allocates memory in the RMO upward from the kernel/initrd
963  *
964  * When align is 0, this is a special case, it means to allocate in place
965  * at the current location of alloc_bottom or fail (that is basically
966  * extending the previous allocation). Used for the device-tree flattening
967  */
968 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
969 {
970 	unsigned long base = alloc_bottom;
971 	unsigned long addr = 0;
972 
973 	if (align)
974 		base = _ALIGN_UP(base, align);
975 	prom_debug("alloc_up(%x, %x)\n", size, align);
976 	if (ram_top == 0)
977 		prom_panic("alloc_up() called with mem not initialized\n");
978 
979 	if (align)
980 		base = _ALIGN_UP(alloc_bottom, align);
981 	else
982 		base = alloc_bottom;
983 
984 	for(; (base + size) <= alloc_top;
985 	    base = _ALIGN_UP(base + 0x100000, align)) {
986 		prom_debug("    trying: 0x%x\n\r", base);
987 		addr = (unsigned long)prom_claim(base, size, 0);
988 		if (addr != PROM_ERROR && addr != 0)
989 			break;
990 		addr = 0;
991 		if (align == 0)
992 			break;
993 	}
994 	if (addr == 0)
995 		return 0;
996 	alloc_bottom = addr + size;
997 
998 	prom_debug(" -> %x\n", addr);
999 	prom_debug("  alloc_bottom : %x\n", alloc_bottom);
1000 	prom_debug("  alloc_top    : %x\n", alloc_top);
1001 	prom_debug("  alloc_top_hi : %x\n", alloc_top_high);
1002 	prom_debug("  rmo_top      : %x\n", rmo_top);
1003 	prom_debug("  ram_top      : %x\n", ram_top);
1004 
1005 	return addr;
1006 }
1007 
1008 /*
1009  * Allocates memory downward, either from top of RMO, or if highmem
1010  * is set, from the top of RAM.  Note that this one doesn't handle
1011  * failures.  It does claim memory if highmem is not set.
1012  */
1013 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1014 				       int highmem)
1015 {
1016 	unsigned long base, addr = 0;
1017 
1018 	prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1019 		   highmem ? "(high)" : "(low)");
1020 	if (ram_top == 0)
1021 		prom_panic("alloc_down() called with mem not initialized\n");
1022 
1023 	if (highmem) {
1024 		/* Carve out storage for the TCE table. */
1025 		addr = _ALIGN_DOWN(alloc_top_high - size, align);
1026 		if (addr <= alloc_bottom)
1027 			return 0;
1028 		/* Will we bump into the RMO ? If yes, check out that we
1029 		 * didn't overlap existing allocations there, if we did,
1030 		 * we are dead, we must be the first in town !
1031 		 */
1032 		if (addr < rmo_top) {
1033 			/* Good, we are first */
1034 			if (alloc_top == rmo_top)
1035 				alloc_top = rmo_top = addr;
1036 			else
1037 				return 0;
1038 		}
1039 		alloc_top_high = addr;
1040 		goto bail;
1041 	}
1042 
1043 	base = _ALIGN_DOWN(alloc_top - size, align);
1044 	for (; base > alloc_bottom;
1045 	     base = _ALIGN_DOWN(base - 0x100000, align))  {
1046 		prom_debug("    trying: 0x%x\n\r", base);
1047 		addr = (unsigned long)prom_claim(base, size, 0);
1048 		if (addr != PROM_ERROR && addr != 0)
1049 			break;
1050 		addr = 0;
1051 	}
1052 	if (addr == 0)
1053 		return 0;
1054 	alloc_top = addr;
1055 
1056  bail:
1057 	prom_debug(" -> %x\n", addr);
1058 	prom_debug("  alloc_bottom : %x\n", alloc_bottom);
1059 	prom_debug("  alloc_top    : %x\n", alloc_top);
1060 	prom_debug("  alloc_top_hi : %x\n", alloc_top_high);
1061 	prom_debug("  rmo_top      : %x\n", rmo_top);
1062 	prom_debug("  ram_top      : %x\n", ram_top);
1063 
1064 	return addr;
1065 }
1066 
1067 /*
1068  * Parse a "reg" cell
1069  */
1070 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1071 {
1072 	cell_t *p = *cellp;
1073 	unsigned long r = 0;
1074 
1075 	/* Ignore more than 2 cells */
1076 	while (s > sizeof(unsigned long) / 4) {
1077 		p++;
1078 		s--;
1079 	}
1080 	r = be32_to_cpu(*p++);
1081 #ifdef CONFIG_PPC64
1082 	if (s > 1) {
1083 		r <<= 32;
1084 		r |= be32_to_cpu(*(p++));
1085 	}
1086 #endif
1087 	*cellp = p;
1088 	return r;
1089 }
1090 
1091 /*
1092  * Very dumb function for adding to the memory reserve list, but
1093  * we don't need anything smarter at this point
1094  *
1095  * XXX Eventually check for collisions.  They should NEVER happen.
1096  * If problems seem to show up, it would be a good start to track
1097  * them down.
1098  */
1099 static void __init reserve_mem(u64 base, u64 size)
1100 {
1101 	u64 top = base + size;
1102 	unsigned long cnt = mem_reserve_cnt;
1103 
1104 	if (size == 0)
1105 		return;
1106 
1107 	/* We need to always keep one empty entry so that we
1108 	 * have our terminator with "size" set to 0 since we are
1109 	 * dumb and just copy this entire array to the boot params
1110 	 */
1111 	base = _ALIGN_DOWN(base, PAGE_SIZE);
1112 	top = _ALIGN_UP(top, PAGE_SIZE);
1113 	size = top - base;
1114 
1115 	if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1116 		prom_panic("Memory reserve map exhausted !\n");
1117 	mem_reserve_map[cnt].base = cpu_to_be64(base);
1118 	mem_reserve_map[cnt].size = cpu_to_be64(size);
1119 	mem_reserve_cnt = cnt + 1;
1120 }
1121 
1122 /*
1123  * Initialize memory allocation mechanism, parse "memory" nodes and
1124  * obtain that way the top of memory and RMO to setup out local allocator
1125  */
1126 static void __init prom_init_mem(void)
1127 {
1128 	phandle node;
1129 	char *path, type[64];
1130 	unsigned int plen;
1131 	cell_t *p, *endp;
1132 	__be32 val;
1133 	u32 rac, rsc;
1134 
1135 	/*
1136 	 * We iterate the memory nodes to find
1137 	 * 1) top of RMO (first node)
1138 	 * 2) top of memory
1139 	 */
1140 	val = cpu_to_be32(2);
1141 	prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1142 	rac = be32_to_cpu(val);
1143 	val = cpu_to_be32(1);
1144 	prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1145 	rsc = be32_to_cpu(val);
1146 	prom_debug("root_addr_cells: %x\n", rac);
1147 	prom_debug("root_size_cells: %x\n", rsc);
1148 
1149 	prom_debug("scanning memory:\n");
1150 	path = prom_scratch;
1151 
1152 	for (node = 0; prom_next_node(&node); ) {
1153 		type[0] = 0;
1154 		prom_getprop(node, "device_type", type, sizeof(type));
1155 
1156 		if (type[0] == 0) {
1157 			/*
1158 			 * CHRP Longtrail machines have no device_type
1159 			 * on the memory node, so check the name instead...
1160 			 */
1161 			prom_getprop(node, "name", type, sizeof(type));
1162 		}
1163 		if (strcmp(type, "memory"))
1164 			continue;
1165 
1166 		plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1167 		if (plen > sizeof(regbuf)) {
1168 			prom_printf("memory node too large for buffer !\n");
1169 			plen = sizeof(regbuf);
1170 		}
1171 		p = regbuf;
1172 		endp = p + (plen / sizeof(cell_t));
1173 
1174 #ifdef DEBUG_PROM
1175 		memset(path, 0, PROM_SCRATCH_SIZE);
1176 		call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1177 		prom_debug("  node %s :\n", path);
1178 #endif /* DEBUG_PROM */
1179 
1180 		while ((endp - p) >= (rac + rsc)) {
1181 			unsigned long base, size;
1182 
1183 			base = prom_next_cell(rac, &p);
1184 			size = prom_next_cell(rsc, &p);
1185 
1186 			if (size == 0)
1187 				continue;
1188 			prom_debug("    %x %x\n", base, size);
1189 			if (base == 0 && (of_platform & PLATFORM_LPAR))
1190 				rmo_top = size;
1191 			if ((base + size) > ram_top)
1192 				ram_top = base + size;
1193 		}
1194 	}
1195 
1196 	alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1197 
1198 	/*
1199 	 * If prom_memory_limit is set we reduce the upper limits *except* for
1200 	 * alloc_top_high. This must be the real top of RAM so we can put
1201 	 * TCE's up there.
1202 	 */
1203 
1204 	alloc_top_high = ram_top;
1205 
1206 	if (prom_memory_limit) {
1207 		if (prom_memory_limit <= alloc_bottom) {
1208 			prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1209 				prom_memory_limit);
1210 			prom_memory_limit = 0;
1211 		} else if (prom_memory_limit >= ram_top) {
1212 			prom_printf("Ignoring mem=%x >= ram_top.\n",
1213 				prom_memory_limit);
1214 			prom_memory_limit = 0;
1215 		} else {
1216 			ram_top = prom_memory_limit;
1217 			rmo_top = min(rmo_top, prom_memory_limit);
1218 		}
1219 	}
1220 
1221 	/*
1222 	 * Setup our top alloc point, that is top of RMO or top of
1223 	 * segment 0 when running non-LPAR.
1224 	 * Some RS64 machines have buggy firmware where claims up at
1225 	 * 1GB fail.  Cap at 768MB as a workaround.
1226 	 * Since 768MB is plenty of room, and we need to cap to something
1227 	 * reasonable on 32-bit, cap at 768MB on all machines.
1228 	 */
1229 	if (!rmo_top)
1230 		rmo_top = ram_top;
1231 	rmo_top = min(0x30000000ul, rmo_top);
1232 	alloc_top = rmo_top;
1233 	alloc_top_high = ram_top;
1234 
1235 	/*
1236 	 * Check if we have an initrd after the kernel but still inside
1237 	 * the RMO.  If we do move our bottom point to after it.
1238 	 */
1239 	if (prom_initrd_start &&
1240 	    prom_initrd_start < rmo_top &&
1241 	    prom_initrd_end > alloc_bottom)
1242 		alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1243 
1244 	prom_printf("memory layout at init:\n");
1245 	prom_printf("  memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1246 	prom_printf("  alloc_bottom : %x\n", alloc_bottom);
1247 	prom_printf("  alloc_top    : %x\n", alloc_top);
1248 	prom_printf("  alloc_top_hi : %x\n", alloc_top_high);
1249 	prom_printf("  rmo_top      : %x\n", rmo_top);
1250 	prom_printf("  ram_top      : %x\n", ram_top);
1251 }
1252 
1253 static void __init prom_close_stdin(void)
1254 {
1255 	__be32 val;
1256 	ihandle stdin;
1257 
1258 	if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1259 		stdin = be32_to_cpu(val);
1260 		call_prom("close", 1, 0, stdin);
1261 	}
1262 }
1263 
1264 #ifdef CONFIG_PPC_POWERNV
1265 
1266 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1267 static u64 __initdata prom_opal_base;
1268 static u64 __initdata prom_opal_entry;
1269 #endif
1270 
1271 #ifdef __BIG_ENDIAN__
1272 /* XXX Don't change this structure without updating opal-takeover.S */
1273 static struct opal_secondary_data {
1274 	s64				ack;	/*  0 */
1275 	u64				go;	/*  8 */
1276 	struct opal_takeover_args	args;	/* 16 */
1277 } opal_secondary_data;
1278 
1279 static u64 __initdata prom_opal_align;
1280 static u64 __initdata prom_opal_size;
1281 static int __initdata prom_rtas_start_cpu;
1282 static u64 __initdata prom_rtas_data;
1283 static u64 __initdata prom_rtas_entry;
1284 
1285 extern char opal_secondary_entry;
1286 
1287 static void __init prom_query_opal(void)
1288 {
1289 	long rc;
1290 
1291 	/* We must not query for OPAL presence on a machine that
1292 	 * supports TNK takeover (970 blades), as this uses the same
1293 	 * h-call with different arguments and will crash
1294 	 */
1295 	if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1296 				    ADDR("/tnk-memory-map")))) {
1297 		prom_printf("TNK takeover detected, skipping OPAL check\n");
1298 		return;
1299 	}
1300 
1301 	prom_printf("Querying for OPAL presence... ");
1302 
1303 	rc = opal_query_takeover(&prom_opal_size,
1304 				 &prom_opal_align);
1305 	prom_debug("(rc = %ld) ", rc);
1306 	if (rc != 0) {
1307 		prom_printf("not there.\n");
1308 		return;
1309 	}
1310 	of_platform = PLATFORM_OPAL;
1311 	prom_printf(" there !\n");
1312 	prom_debug("  opal_size  = 0x%lx\n", prom_opal_size);
1313 	prom_debug("  opal_align = 0x%lx\n", prom_opal_align);
1314 	if (prom_opal_align < 0x10000)
1315 		prom_opal_align = 0x10000;
1316 }
1317 
1318 static int __init prom_rtas_call(int token, int nargs, int nret,
1319 				 int *outputs, ...)
1320 {
1321 	struct rtas_args rtas_args;
1322 	va_list list;
1323 	int i;
1324 
1325 	rtas_args.token = token;
1326 	rtas_args.nargs = nargs;
1327 	rtas_args.nret  = nret;
1328 	rtas_args.rets  = (rtas_arg_t *)&(rtas_args.args[nargs]);
1329 	va_start(list, outputs);
1330 	for (i = 0; i < nargs; ++i)
1331 		rtas_args.args[i] = va_arg(list, rtas_arg_t);
1332 	va_end(list);
1333 
1334 	for (i = 0; i < nret; ++i)
1335 		rtas_args.rets[i] = 0;
1336 
1337 	opal_enter_rtas(&rtas_args, prom_rtas_data,
1338 			prom_rtas_entry);
1339 
1340 	if (nret > 1 && outputs != NULL)
1341 		for (i = 0; i < nret-1; ++i)
1342 			outputs[i] = rtas_args.rets[i+1];
1343 	return (nret > 0)? rtas_args.rets[0]: 0;
1344 }
1345 
1346 static void __init prom_opal_hold_cpus(void)
1347 {
1348 	int i, cnt, cpu, rc;
1349 	long j;
1350 	phandle node;
1351 	char type[64];
1352 	u32 servers[8];
1353 	void *entry = (unsigned long *)&opal_secondary_entry;
1354 	struct opal_secondary_data *data = &opal_secondary_data;
1355 
1356 	prom_debug("prom_opal_hold_cpus: start...\n");
1357 	prom_debug("    - entry       = 0x%x\n", entry);
1358 	prom_debug("    - data        = 0x%x\n", data);
1359 
1360 	data->ack = -1;
1361 	data->go = 0;
1362 
1363 	/* look for cpus */
1364 	for (node = 0; prom_next_node(&node); ) {
1365 		type[0] = 0;
1366 		prom_getprop(node, "device_type", type, sizeof(type));
1367 		if (strcmp(type, "cpu") != 0)
1368 			continue;
1369 
1370 		/* Skip non-configured cpus. */
1371 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1372 			if (strcmp(type, "okay") != 0)
1373 				continue;
1374 
1375 		cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1376 			     sizeof(servers));
1377 		if (cnt == PROM_ERROR)
1378 			break;
1379 		cnt >>= 2;
1380 		for (i = 0; i < cnt; i++) {
1381 			cpu = servers[i];
1382 			prom_debug("CPU %d ... ", cpu);
1383 			if (cpu == prom.cpu) {
1384 				prom_debug("booted !\n");
1385 				continue;
1386 			}
1387 			prom_debug("starting ... ");
1388 
1389 			/* Init the acknowledge var which will be reset by
1390 			 * the secondary cpu when it awakens from its OF
1391 			 * spinloop.
1392 			 */
1393 			data->ack = -1;
1394 			rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1395 					    NULL, cpu, entry, data);
1396 			prom_debug("rtas rc=%d ...", rc);
1397 
1398 			for (j = 0; j < 100000000 && data->ack == -1; j++) {
1399 				HMT_low();
1400 				mb();
1401 			}
1402 			HMT_medium();
1403 			if (data->ack != -1)
1404 				prom_debug("done, PIR=0x%x\n", data->ack);
1405 			else
1406 				prom_debug("timeout !\n");
1407 		}
1408 	}
1409 	prom_debug("prom_opal_hold_cpus: end...\n");
1410 }
1411 
1412 static void __init prom_opal_takeover(void)
1413 {
1414 	struct opal_secondary_data *data = &opal_secondary_data;
1415 	struct opal_takeover_args *args = &data->args;
1416 	u64 align = prom_opal_align;
1417 	u64 top_addr, opal_addr;
1418 
1419 	args->k_image	= (u64)_stext;
1420 	args->k_size	= _end - _stext;
1421 	args->k_entry	= 0;
1422 	args->k_entry2	= 0x60;
1423 
1424 	top_addr = _ALIGN_UP(args->k_size, align);
1425 
1426 	if (prom_initrd_start != 0) {
1427 		args->rd_image = prom_initrd_start;
1428 		args->rd_size = prom_initrd_end - args->rd_image;
1429 		args->rd_loc = top_addr;
1430 		top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1431 	}
1432 
1433 	/* Pickup an address for the HAL. We want to go really high
1434 	 * up to avoid problem with future kexecs. On the other hand
1435 	 * we don't want to be all over the TCEs on P5IOC2 machines
1436 	 * which are going to be up there too. We assume the machine
1437 	 * has plenty of memory, and we ask for the HAL for now to
1438 	 * be just below the 1G point, or above the initrd
1439 	 */
1440 	opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1441 	if (opal_addr < top_addr)
1442 		opal_addr = top_addr;
1443 	args->hal_addr = opal_addr;
1444 
1445 	/* Copy the command line to the kernel image */
1446 	strlcpy(boot_command_line, prom_cmd_line,
1447 		COMMAND_LINE_SIZE);
1448 
1449 	prom_debug("  k_image    = 0x%lx\n", args->k_image);
1450 	prom_debug("  k_size     = 0x%lx\n", args->k_size);
1451 	prom_debug("  k_entry    = 0x%lx\n", args->k_entry);
1452 	prom_debug("  k_entry2   = 0x%lx\n", args->k_entry2);
1453 	prom_debug("  hal_addr   = 0x%lx\n", args->hal_addr);
1454 	prom_debug("  rd_image   = 0x%lx\n", args->rd_image);
1455 	prom_debug("  rd_size    = 0x%lx\n", args->rd_size);
1456 	prom_debug("  rd_loc     = 0x%lx\n", args->rd_loc);
1457 	prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1458 	prom_close_stdin();
1459 	mb();
1460 	data->go = 1;
1461 	for (;;)
1462 		opal_do_takeover(args);
1463 }
1464 #endif /* __BIG_ENDIAN__ */
1465 
1466 /*
1467  * Allocate room for and instantiate OPAL
1468  */
1469 static void __init prom_instantiate_opal(void)
1470 {
1471 	phandle opal_node;
1472 	ihandle opal_inst;
1473 	u64 base, entry;
1474 	u64 size = 0, align = 0x10000;
1475 	__be64 val64;
1476 	u32 rets[2];
1477 
1478 	prom_debug("prom_instantiate_opal: start...\n");
1479 
1480 	opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1481 	prom_debug("opal_node: %x\n", opal_node);
1482 	if (!PHANDLE_VALID(opal_node))
1483 		return;
1484 
1485 	val64 = 0;
1486 	prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1487 	size = be64_to_cpu(val64);
1488 	if (size == 0)
1489 		return;
1490 	val64 = 0;
1491 	prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1492 	align = be64_to_cpu(val64);
1493 
1494 	base = alloc_down(size, align, 0);
1495 	if (base == 0) {
1496 		prom_printf("OPAL allocation failed !\n");
1497 		return;
1498 	}
1499 
1500 	opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1501 	if (!IHANDLE_VALID(opal_inst)) {
1502 		prom_printf("opening opal package failed (%x)\n", opal_inst);
1503 		return;
1504 	}
1505 
1506 	prom_printf("instantiating opal at 0x%x...", base);
1507 
1508 	if (call_prom_ret("call-method", 4, 3, rets,
1509 			  ADDR("load-opal-runtime"),
1510 			  opal_inst,
1511 			  base >> 32, base & 0xffffffff) != 0
1512 	    || (rets[0] == 0 && rets[1] == 0)) {
1513 		prom_printf(" failed\n");
1514 		return;
1515 	}
1516 	entry = (((u64)rets[0]) << 32) | rets[1];
1517 
1518 	prom_printf(" done\n");
1519 
1520 	reserve_mem(base, size);
1521 
1522 	prom_debug("opal base     = 0x%x\n", base);
1523 	prom_debug("opal align    = 0x%x\n", align);
1524 	prom_debug("opal entry    = 0x%x\n", entry);
1525 	prom_debug("opal size     = 0x%x\n", (long)size);
1526 
1527 	prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1528 		     &base, sizeof(base));
1529 	prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1530 		     &entry, sizeof(entry));
1531 
1532 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1533 	prom_opal_base = base;
1534 	prom_opal_entry = entry;
1535 #endif
1536 	prom_debug("prom_instantiate_opal: end...\n");
1537 }
1538 
1539 #endif /* CONFIG_PPC_POWERNV */
1540 
1541 /*
1542  * Allocate room for and instantiate RTAS
1543  */
1544 static void __init prom_instantiate_rtas(void)
1545 {
1546 	phandle rtas_node;
1547 	ihandle rtas_inst;
1548 	u32 base, entry = 0;
1549 	__be32 val;
1550 	u32 size = 0;
1551 
1552 	prom_debug("prom_instantiate_rtas: start...\n");
1553 
1554 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1555 	prom_debug("rtas_node: %x\n", rtas_node);
1556 	if (!PHANDLE_VALID(rtas_node))
1557 		return;
1558 
1559 	val = 0;
1560 	prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1561 	size = be32_to_cpu(val);
1562 	if (size == 0)
1563 		return;
1564 
1565 	base = alloc_down(size, PAGE_SIZE, 0);
1566 	if (base == 0)
1567 		prom_panic("Could not allocate memory for RTAS\n");
1568 
1569 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1570 	if (!IHANDLE_VALID(rtas_inst)) {
1571 		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1572 		return;
1573 	}
1574 
1575 	prom_printf("instantiating rtas at 0x%x...", base);
1576 
1577 	if (call_prom_ret("call-method", 3, 2, &entry,
1578 			  ADDR("instantiate-rtas"),
1579 			  rtas_inst, base) != 0
1580 	    || entry == 0) {
1581 		prom_printf(" failed\n");
1582 		return;
1583 	}
1584 	prom_printf(" done\n");
1585 
1586 	reserve_mem(base, size);
1587 
1588 	val = cpu_to_be32(base);
1589 	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1590 		     &val, sizeof(val));
1591 	val = cpu_to_be32(entry);
1592 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1593 		     &val, sizeof(val));
1594 
1595 	/* Check if it supports "query-cpu-stopped-state" */
1596 	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1597 			 &val, sizeof(val)) != PROM_ERROR)
1598 		rtas_has_query_cpu_stopped = true;
1599 
1600 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1601 	/* PowerVN takeover hack */
1602 	prom_rtas_data = base;
1603 	prom_rtas_entry = entry;
1604 	prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1605 #endif
1606 	prom_debug("rtas base     = 0x%x\n", base);
1607 	prom_debug("rtas entry    = 0x%x\n", entry);
1608 	prom_debug("rtas size     = 0x%x\n", (long)size);
1609 
1610 	prom_debug("prom_instantiate_rtas: end...\n");
1611 }
1612 
1613 #ifdef CONFIG_PPC64
1614 /*
1615  * Allocate room for and instantiate Stored Measurement Log (SML)
1616  */
1617 static void __init prom_instantiate_sml(void)
1618 {
1619 	phandle ibmvtpm_node;
1620 	ihandle ibmvtpm_inst;
1621 	u32 entry = 0, size = 0;
1622 	u64 base;
1623 
1624 	prom_debug("prom_instantiate_sml: start...\n");
1625 
1626 	ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
1627 	prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1628 	if (!PHANDLE_VALID(ibmvtpm_node))
1629 		return;
1630 
1631 	ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
1632 	if (!IHANDLE_VALID(ibmvtpm_inst)) {
1633 		prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1634 		return;
1635 	}
1636 
1637 	if (call_prom_ret("call-method", 2, 2, &size,
1638 			  ADDR("sml-get-handover-size"),
1639 			  ibmvtpm_inst) != 0 || size == 0) {
1640 		prom_printf("SML get handover size failed\n");
1641 		return;
1642 	}
1643 
1644 	base = alloc_down(size, PAGE_SIZE, 0);
1645 	if (base == 0)
1646 		prom_panic("Could not allocate memory for sml\n");
1647 
1648 	prom_printf("instantiating sml at 0x%x...", base);
1649 
1650 	if (call_prom_ret("call-method", 4, 2, &entry,
1651 			  ADDR("sml-handover"),
1652 			  ibmvtpm_inst, size, base) != 0 || entry == 0) {
1653 		prom_printf("SML handover failed\n");
1654 		return;
1655 	}
1656 	prom_printf(" done\n");
1657 
1658 	reserve_mem(base, size);
1659 
1660 	prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
1661 		     &base, sizeof(base));
1662 	prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
1663 		     &size, sizeof(size));
1664 
1665 	prom_debug("sml base     = 0x%x\n", base);
1666 	prom_debug("sml size     = 0x%x\n", (long)size);
1667 
1668 	prom_debug("prom_instantiate_sml: end...\n");
1669 }
1670 
1671 /*
1672  * Allocate room for and initialize TCE tables
1673  */
1674 #ifdef __BIG_ENDIAN__
1675 static void __init prom_initialize_tce_table(void)
1676 {
1677 	phandle node;
1678 	ihandle phb_node;
1679 	char compatible[64], type[64], model[64];
1680 	char *path = prom_scratch;
1681 	u64 base, align;
1682 	u32 minalign, minsize;
1683 	u64 tce_entry, *tce_entryp;
1684 	u64 local_alloc_top, local_alloc_bottom;
1685 	u64 i;
1686 
1687 	if (prom_iommu_off)
1688 		return;
1689 
1690 	prom_debug("starting prom_initialize_tce_table\n");
1691 
1692 	/* Cache current top of allocs so we reserve a single block */
1693 	local_alloc_top = alloc_top_high;
1694 	local_alloc_bottom = local_alloc_top;
1695 
1696 	/* Search all nodes looking for PHBs. */
1697 	for (node = 0; prom_next_node(&node); ) {
1698 		compatible[0] = 0;
1699 		type[0] = 0;
1700 		model[0] = 0;
1701 		prom_getprop(node, "compatible",
1702 			     compatible, sizeof(compatible));
1703 		prom_getprop(node, "device_type", type, sizeof(type));
1704 		prom_getprop(node, "model", model, sizeof(model));
1705 
1706 		if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1707 			continue;
1708 
1709 		/* Keep the old logic intact to avoid regression. */
1710 		if (compatible[0] != 0) {
1711 			if ((strstr(compatible, "python") == NULL) &&
1712 			    (strstr(compatible, "Speedwagon") == NULL) &&
1713 			    (strstr(compatible, "Winnipeg") == NULL))
1714 				continue;
1715 		} else if (model[0] != 0) {
1716 			if ((strstr(model, "ython") == NULL) &&
1717 			    (strstr(model, "peedwagon") == NULL) &&
1718 			    (strstr(model, "innipeg") == NULL))
1719 				continue;
1720 		}
1721 
1722 		if (prom_getprop(node, "tce-table-minalign", &minalign,
1723 				 sizeof(minalign)) == PROM_ERROR)
1724 			minalign = 0;
1725 		if (prom_getprop(node, "tce-table-minsize", &minsize,
1726 				 sizeof(minsize)) == PROM_ERROR)
1727 			minsize = 4UL << 20;
1728 
1729 		/*
1730 		 * Even though we read what OF wants, we just set the table
1731 		 * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
1732 		 * By doing this, we avoid the pitfalls of trying to DMA to
1733 		 * MMIO space and the DMA alias hole.
1734 		 *
1735 		 * On POWER4, firmware sets the TCE region by assuming
1736 		 * each TCE table is 8MB. Using this memory for anything
1737 		 * else will impact performance, so we always allocate 8MB.
1738 		 * Anton
1739 		 */
1740 		if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1741 			minsize = 8UL << 20;
1742 		else
1743 			minsize = 4UL << 20;
1744 
1745 		/* Align to the greater of the align or size */
1746 		align = max(minalign, minsize);
1747 		base = alloc_down(minsize, align, 1);
1748 		if (base == 0)
1749 			prom_panic("ERROR, cannot find space for TCE table.\n");
1750 		if (base < local_alloc_bottom)
1751 			local_alloc_bottom = base;
1752 
1753 		/* It seems OF doesn't null-terminate the path :-( */
1754 		memset(path, 0, PROM_SCRATCH_SIZE);
1755 		/* Call OF to setup the TCE hardware */
1756 		if (call_prom("package-to-path", 3, 1, node,
1757 			      path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1758 			prom_printf("package-to-path failed\n");
1759 		}
1760 
1761 		/* Save away the TCE table attributes for later use. */
1762 		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1763 		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1764 
1765 		prom_debug("TCE table: %s\n", path);
1766 		prom_debug("\tnode = 0x%x\n", node);
1767 		prom_debug("\tbase = 0x%x\n", base);
1768 		prom_debug("\tsize = 0x%x\n", minsize);
1769 
1770 		/* Initialize the table to have a one-to-one mapping
1771 		 * over the allocated size.
1772 		 */
1773 		tce_entryp = (u64 *)base;
1774 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1775 			tce_entry = (i << PAGE_SHIFT);
1776 			tce_entry |= 0x3;
1777 			*tce_entryp = tce_entry;
1778 		}
1779 
1780 		prom_printf("opening PHB %s", path);
1781 		phb_node = call_prom("open", 1, 1, path);
1782 		if (phb_node == 0)
1783 			prom_printf("... failed\n");
1784 		else
1785 			prom_printf("... done\n");
1786 
1787 		call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1788 			  phb_node, -1, minsize,
1789 			  (u32) base, (u32) (base >> 32));
1790 		call_prom("close", 1, 0, phb_node);
1791 	}
1792 
1793 	reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1794 
1795 	/* These are only really needed if there is a memory limit in
1796 	 * effect, but we don't know so export them always. */
1797 	prom_tce_alloc_start = local_alloc_bottom;
1798 	prom_tce_alloc_end = local_alloc_top;
1799 
1800 	/* Flag the first invalid entry */
1801 	prom_debug("ending prom_initialize_tce_table\n");
1802 }
1803 #endif /* __BIG_ENDIAN__ */
1804 #endif /* CONFIG_PPC64 */
1805 
1806 /*
1807  * With CHRP SMP we need to use the OF to start the other processors.
1808  * We can't wait until smp_boot_cpus (the OF is trashed by then)
1809  * so we have to put the processors into a holding pattern controlled
1810  * by the kernel (not OF) before we destroy the OF.
1811  *
1812  * This uses a chunk of low memory, puts some holding pattern
1813  * code there and sends the other processors off to there until
1814  * smp_boot_cpus tells them to do something.  The holding pattern
1815  * checks that address until its cpu # is there, when it is that
1816  * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
1817  * of setting those values.
1818  *
1819  * We also use physical address 0x4 here to tell when a cpu
1820  * is in its holding pattern code.
1821  *
1822  * -- Cort
1823  */
1824 /*
1825  * We want to reference the copy of __secondary_hold_* in the
1826  * 0 - 0x100 address range
1827  */
1828 #define LOW_ADDR(x)	(((unsigned long) &(x)) & 0xff)
1829 
1830 static void __init prom_hold_cpus(void)
1831 {
1832 	unsigned long i;
1833 	phandle node;
1834 	char type[64];
1835 	unsigned long *spinloop
1836 		= (void *) LOW_ADDR(__secondary_hold_spinloop);
1837 	unsigned long *acknowledge
1838 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
1839 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1840 
1841 	/*
1842 	 * On pseries, if RTAS supports "query-cpu-stopped-state",
1843 	 * we skip this stage, the CPUs will be started by the
1844 	 * kernel using RTAS.
1845 	 */
1846 	if ((of_platform == PLATFORM_PSERIES ||
1847 	     of_platform == PLATFORM_PSERIES_LPAR) &&
1848 	    rtas_has_query_cpu_stopped) {
1849 		prom_printf("prom_hold_cpus: skipped\n");
1850 		return;
1851 	}
1852 
1853 	prom_debug("prom_hold_cpus: start...\n");
1854 	prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
1855 	prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
1856 	prom_debug("    1) acknowledge    = 0x%x\n",
1857 		   (unsigned long)acknowledge);
1858 	prom_debug("    1) *acknowledge   = 0x%x\n", *acknowledge);
1859 	prom_debug("    1) secondary_hold = 0x%x\n", secondary_hold);
1860 
1861 	/* Set the common spinloop variable, so all of the secondary cpus
1862 	 * will block when they are awakened from their OF spinloop.
1863 	 * This must occur for both SMP and non SMP kernels, since OF will
1864 	 * be trashed when we move the kernel.
1865 	 */
1866 	*spinloop = 0;
1867 
1868 	/* look for cpus */
1869 	for (node = 0; prom_next_node(&node); ) {
1870 		unsigned int cpu_no;
1871 		__be32 reg;
1872 
1873 		type[0] = 0;
1874 		prom_getprop(node, "device_type", type, sizeof(type));
1875 		if (strcmp(type, "cpu") != 0)
1876 			continue;
1877 
1878 		/* Skip non-configured cpus. */
1879 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1880 			if (strcmp(type, "okay") != 0)
1881 				continue;
1882 
1883 		reg = cpu_to_be32(-1); /* make sparse happy */
1884 		prom_getprop(node, "reg", &reg, sizeof(reg));
1885 		cpu_no = be32_to_cpu(reg);
1886 
1887 		prom_debug("cpu hw idx   = %lu\n", cpu_no);
1888 
1889 		/* Init the acknowledge var which will be reset by
1890 		 * the secondary cpu when it awakens from its OF
1891 		 * spinloop.
1892 		 */
1893 		*acknowledge = (unsigned long)-1;
1894 
1895 		if (cpu_no != prom.cpu) {
1896 			/* Primary Thread of non-boot cpu or any thread */
1897 			prom_printf("starting cpu hw idx %lu... ", cpu_no);
1898 			call_prom("start-cpu", 3, 0, node,
1899 				  secondary_hold, cpu_no);
1900 
1901 			for (i = 0; (i < 100000000) &&
1902 			     (*acknowledge == ((unsigned long)-1)); i++ )
1903 				mb();
1904 
1905 			if (*acknowledge == cpu_no)
1906 				prom_printf("done\n");
1907 			else
1908 				prom_printf("failed: %x\n", *acknowledge);
1909 		}
1910 #ifdef CONFIG_SMP
1911 		else
1912 			prom_printf("boot cpu hw idx %lu\n", cpu_no);
1913 #endif /* CONFIG_SMP */
1914 	}
1915 
1916 	prom_debug("prom_hold_cpus: end...\n");
1917 }
1918 
1919 
1920 static void __init prom_init_client_services(unsigned long pp)
1921 {
1922 	/* Get a handle to the prom entry point before anything else */
1923 	prom_entry = pp;
1924 
1925 	/* get a handle for the stdout device */
1926 	prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1927 	if (!PHANDLE_VALID(prom.chosen))
1928 		prom_panic("cannot find chosen"); /* msg won't be printed :( */
1929 
1930 	/* get device tree root */
1931 	prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1932 	if (!PHANDLE_VALID(prom.root))
1933 		prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1934 
1935 	prom.mmumap = 0;
1936 }
1937 
1938 #ifdef CONFIG_PPC32
1939 /*
1940  * For really old powermacs, we need to map things we claim.
1941  * For that, we need the ihandle of the mmu.
1942  * Also, on the longtrail, we need to work around other bugs.
1943  */
1944 static void __init prom_find_mmu(void)
1945 {
1946 	phandle oprom;
1947 	char version[64];
1948 
1949 	oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1950 	if (!PHANDLE_VALID(oprom))
1951 		return;
1952 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1953 		return;
1954 	version[sizeof(version) - 1] = 0;
1955 	/* XXX might need to add other versions here */
1956 	if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1957 		of_workarounds = OF_WA_CLAIM;
1958 	else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1959 		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1960 		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1961 	} else
1962 		return;
1963 	prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1964 	prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1965 		     sizeof(prom.mmumap));
1966 	prom.mmumap = be32_to_cpu(prom.mmumap);
1967 	if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1968 		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
1969 }
1970 #else
1971 #define prom_find_mmu()
1972 #endif
1973 
1974 static void __init prom_init_stdout(void)
1975 {
1976 	char *path = of_stdout_device;
1977 	char type[16];
1978 	phandle stdout_node;
1979 	__be32 val;
1980 
1981 	if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1982 		prom_panic("cannot find stdout");
1983 
1984 	prom.stdout = be32_to_cpu(val);
1985 
1986 	/* Get the full OF pathname of the stdout device */
1987 	memset(path, 0, 256);
1988 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1989 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
1990 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1991 		     path, strlen(path) + 1);
1992 
1993 	/* instance-to-package fails on PA-Semi */
1994 	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1995 	if (stdout_node != PROM_ERROR) {
1996 		val = cpu_to_be32(stdout_node);
1997 		prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1998 			     &val, sizeof(val));
1999 
2000 		/* If it's a display, note it */
2001 		memset(type, 0, sizeof(type));
2002 		prom_getprop(stdout_node, "device_type", type, sizeof(type));
2003 		if (strcmp(type, "display") == 0)
2004 			prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2005 	}
2006 }
2007 
2008 static int __init prom_find_machine_type(void)
2009 {
2010 	char compat[256];
2011 	int len, i = 0;
2012 #ifdef CONFIG_PPC64
2013 	phandle rtas;
2014 	int x;
2015 #endif
2016 
2017 	/* Look for a PowerMac or a Cell */
2018 	len = prom_getprop(prom.root, "compatible",
2019 			   compat, sizeof(compat)-1);
2020 	if (len > 0) {
2021 		compat[len] = 0;
2022 		while (i < len) {
2023 			char *p = &compat[i];
2024 			int sl = strlen(p);
2025 			if (sl == 0)
2026 				break;
2027 			if (strstr(p, "Power Macintosh") ||
2028 			    strstr(p, "MacRISC"))
2029 				return PLATFORM_POWERMAC;
2030 #ifdef CONFIG_PPC64
2031 			/* We must make sure we don't detect the IBM Cell
2032 			 * blades as pSeries due to some firmware issues,
2033 			 * so we do it here.
2034 			 */
2035 			if (strstr(p, "IBM,CBEA") ||
2036 			    strstr(p, "IBM,CPBW-1.0"))
2037 				return PLATFORM_GENERIC;
2038 #endif /* CONFIG_PPC64 */
2039 			i += sl + 1;
2040 		}
2041 	}
2042 #ifdef CONFIG_PPC64
2043 	/* Try to detect OPAL */
2044 	if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2045 		return PLATFORM_OPAL;
2046 
2047 	/* Try to figure out if it's an IBM pSeries or any other
2048 	 * PAPR compliant platform. We assume it is if :
2049 	 *  - /device_type is "chrp" (please, do NOT use that for future
2050 	 *    non-IBM designs !
2051 	 *  - it has /rtas
2052 	 */
2053 	len = prom_getprop(prom.root, "device_type",
2054 			   compat, sizeof(compat)-1);
2055 	if (len <= 0)
2056 		return PLATFORM_GENERIC;
2057 	if (strcmp(compat, "chrp"))
2058 		return PLATFORM_GENERIC;
2059 
2060 	/* Default to pSeries. We need to know if we are running LPAR */
2061 	rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2062 	if (!PHANDLE_VALID(rtas))
2063 		return PLATFORM_GENERIC;
2064 	x = prom_getproplen(rtas, "ibm,hypertas-functions");
2065 	if (x != PROM_ERROR) {
2066 		prom_debug("Hypertas detected, assuming LPAR !\n");
2067 		return PLATFORM_PSERIES_LPAR;
2068 	}
2069 	return PLATFORM_PSERIES;
2070 #else
2071 	return PLATFORM_GENERIC;
2072 #endif
2073 }
2074 
2075 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2076 {
2077 	return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2078 }
2079 
2080 /*
2081  * If we have a display that we don't know how to drive,
2082  * we will want to try to execute OF's open method for it
2083  * later.  However, OF will probably fall over if we do that
2084  * we've taken over the MMU.
2085  * So we check whether we will need to open the display,
2086  * and if so, open it now.
2087  */
2088 static void __init prom_check_displays(void)
2089 {
2090 	char type[16], *path;
2091 	phandle node;
2092 	ihandle ih;
2093 	int i;
2094 
2095 	static unsigned char default_colors[] = {
2096 		0x00, 0x00, 0x00,
2097 		0x00, 0x00, 0xaa,
2098 		0x00, 0xaa, 0x00,
2099 		0x00, 0xaa, 0xaa,
2100 		0xaa, 0x00, 0x00,
2101 		0xaa, 0x00, 0xaa,
2102 		0xaa, 0xaa, 0x00,
2103 		0xaa, 0xaa, 0xaa,
2104 		0x55, 0x55, 0x55,
2105 		0x55, 0x55, 0xff,
2106 		0x55, 0xff, 0x55,
2107 		0x55, 0xff, 0xff,
2108 		0xff, 0x55, 0x55,
2109 		0xff, 0x55, 0xff,
2110 		0xff, 0xff, 0x55,
2111 		0xff, 0xff, 0xff
2112 	};
2113 	const unsigned char *clut;
2114 
2115 	prom_debug("Looking for displays\n");
2116 	for (node = 0; prom_next_node(&node); ) {
2117 		memset(type, 0, sizeof(type));
2118 		prom_getprop(node, "device_type", type, sizeof(type));
2119 		if (strcmp(type, "display") != 0)
2120 			continue;
2121 
2122 		/* It seems OF doesn't null-terminate the path :-( */
2123 		path = prom_scratch;
2124 		memset(path, 0, PROM_SCRATCH_SIZE);
2125 
2126 		/*
2127 		 * leave some room at the end of the path for appending extra
2128 		 * arguments
2129 		 */
2130 		if (call_prom("package-to-path", 3, 1, node, path,
2131 			      PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2132 			continue;
2133 		prom_printf("found display   : %s, opening... ", path);
2134 
2135 		ih = call_prom("open", 1, 1, path);
2136 		if (ih == 0) {
2137 			prom_printf("failed\n");
2138 			continue;
2139 		}
2140 
2141 		/* Success */
2142 		prom_printf("done\n");
2143 		prom_setprop(node, path, "linux,opened", NULL, 0);
2144 
2145 		/* Setup a usable color table when the appropriate
2146 		 * method is available. Should update this to set-colors */
2147 		clut = default_colors;
2148 		for (i = 0; i < 16; i++, clut += 3)
2149 			if (prom_set_color(ih, i, clut[0], clut[1],
2150 					   clut[2]) != 0)
2151 				break;
2152 
2153 #ifdef CONFIG_LOGO_LINUX_CLUT224
2154 		clut = PTRRELOC(logo_linux_clut224.clut);
2155 		for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2156 			if (prom_set_color(ih, i + 32, clut[0], clut[1],
2157 					   clut[2]) != 0)
2158 				break;
2159 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2160 
2161 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2162 		if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2163 		    PROM_ERROR) {
2164 			u32 width, height, pitch, addr;
2165 
2166 			prom_printf("Setting btext !\n");
2167 			prom_getprop(node, "width", &width, 4);
2168 			prom_getprop(node, "height", &height, 4);
2169 			prom_getprop(node, "linebytes", &pitch, 4);
2170 			prom_getprop(node, "address", &addr, 4);
2171 			prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2172 				    width, height, pitch, addr);
2173 			btext_setup_display(width, height, 8, pitch, addr);
2174 		}
2175 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2176 	}
2177 }
2178 
2179 
2180 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2181 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2182 			      unsigned long needed, unsigned long align)
2183 {
2184 	void *ret;
2185 
2186 	*mem_start = _ALIGN(*mem_start, align);
2187 	while ((*mem_start + needed) > *mem_end) {
2188 		unsigned long room, chunk;
2189 
2190 		prom_debug("Chunk exhausted, claiming more at %x...\n",
2191 			   alloc_bottom);
2192 		room = alloc_top - alloc_bottom;
2193 		if (room > DEVTREE_CHUNK_SIZE)
2194 			room = DEVTREE_CHUNK_SIZE;
2195 		if (room < PAGE_SIZE)
2196 			prom_panic("No memory for flatten_device_tree "
2197 				   "(no room)\n");
2198 		chunk = alloc_up(room, 0);
2199 		if (chunk == 0)
2200 			prom_panic("No memory for flatten_device_tree "
2201 				   "(claim failed)\n");
2202 		*mem_end = chunk + room;
2203 	}
2204 
2205 	ret = (void *)*mem_start;
2206 	*mem_start += needed;
2207 
2208 	return ret;
2209 }
2210 
2211 #define dt_push_token(token, mem_start, mem_end) do { 			\
2212 		void *room = make_room(mem_start, mem_end, 4, 4);	\
2213 		*(__be32 *)room = cpu_to_be32(token);			\
2214 	} while(0)
2215 
2216 static unsigned long __init dt_find_string(char *str)
2217 {
2218 	char *s, *os;
2219 
2220 	s = os = (char *)dt_string_start;
2221 	s += 4;
2222 	while (s <  (char *)dt_string_end) {
2223 		if (strcmp(s, str) == 0)
2224 			return s - os;
2225 		s += strlen(s) + 1;
2226 	}
2227 	return 0;
2228 }
2229 
2230 /*
2231  * The Open Firmware 1275 specification states properties must be 31 bytes or
2232  * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2233  */
2234 #define MAX_PROPERTY_NAME 64
2235 
2236 static void __init scan_dt_build_strings(phandle node,
2237 					 unsigned long *mem_start,
2238 					 unsigned long *mem_end)
2239 {
2240 	char *prev_name, *namep, *sstart;
2241 	unsigned long soff;
2242 	phandle child;
2243 
2244 	sstart =  (char *)dt_string_start;
2245 
2246 	/* get and store all property names */
2247 	prev_name = "";
2248 	for (;;) {
2249 		/* 64 is max len of name including nul. */
2250 		namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2251 		if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2252 			/* No more nodes: unwind alloc */
2253 			*mem_start = (unsigned long)namep;
2254 			break;
2255 		}
2256 
2257  		/* skip "name" */
2258  		if (strcmp(namep, "name") == 0) {
2259  			*mem_start = (unsigned long)namep;
2260  			prev_name = "name";
2261  			continue;
2262  		}
2263 		/* get/create string entry */
2264 		soff = dt_find_string(namep);
2265 		if (soff != 0) {
2266 			*mem_start = (unsigned long)namep;
2267 			namep = sstart + soff;
2268 		} else {
2269 			/* Trim off some if we can */
2270 			*mem_start = (unsigned long)namep + strlen(namep) + 1;
2271 			dt_string_end = *mem_start;
2272 		}
2273 		prev_name = namep;
2274 	}
2275 
2276 	/* do all our children */
2277 	child = call_prom("child", 1, 1, node);
2278 	while (child != 0) {
2279 		scan_dt_build_strings(child, mem_start, mem_end);
2280 		child = call_prom("peer", 1, 1, child);
2281 	}
2282 }
2283 
2284 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2285 					unsigned long *mem_end)
2286 {
2287 	phandle child;
2288 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2289 	unsigned long soff;
2290 	unsigned char *valp;
2291 	static char pname[MAX_PROPERTY_NAME];
2292 	int l, room, has_phandle = 0;
2293 
2294 	dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2295 
2296 	/* get the node's full name */
2297 	namep = (char *)*mem_start;
2298 	room = *mem_end - *mem_start;
2299 	if (room > 255)
2300 		room = 255;
2301 	l = call_prom("package-to-path", 3, 1, node, namep, room);
2302 	if (l >= 0) {
2303 		/* Didn't fit?  Get more room. */
2304 		if (l >= room) {
2305 			if (l >= *mem_end - *mem_start)
2306 				namep = make_room(mem_start, mem_end, l+1, 1);
2307 			call_prom("package-to-path", 3, 1, node, namep, l);
2308 		}
2309 		namep[l] = '\0';
2310 
2311 		/* Fixup an Apple bug where they have bogus \0 chars in the
2312 		 * middle of the path in some properties, and extract
2313 		 * the unit name (everything after the last '/').
2314 		 */
2315 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
2316 			if (*p == '/')
2317 				lp = namep;
2318 			else if (*p != 0)
2319 				*lp++ = *p;
2320 		}
2321 		*lp = 0;
2322 		*mem_start = _ALIGN((unsigned long)lp + 1, 4);
2323 	}
2324 
2325 	/* get it again for debugging */
2326 	path = prom_scratch;
2327 	memset(path, 0, PROM_SCRATCH_SIZE);
2328 	call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2329 
2330 	/* get and store all properties */
2331 	prev_name = "";
2332 	sstart = (char *)dt_string_start;
2333 	for (;;) {
2334 		if (call_prom("nextprop", 3, 1, node, prev_name,
2335 			      pname) != 1)
2336 			break;
2337 
2338  		/* skip "name" */
2339  		if (strcmp(pname, "name") == 0) {
2340  			prev_name = "name";
2341  			continue;
2342  		}
2343 
2344 		/* find string offset */
2345 		soff = dt_find_string(pname);
2346 		if (soff == 0) {
2347 			prom_printf("WARNING: Can't find string index for"
2348 				    " <%s>, node %s\n", pname, path);
2349 			break;
2350 		}
2351 		prev_name = sstart + soff;
2352 
2353 		/* get length */
2354 		l = call_prom("getproplen", 2, 1, node, pname);
2355 
2356 		/* sanity checks */
2357 		if (l == PROM_ERROR)
2358 			continue;
2359 
2360 		/* push property head */
2361 		dt_push_token(OF_DT_PROP, mem_start, mem_end);
2362 		dt_push_token(l, mem_start, mem_end);
2363 		dt_push_token(soff, mem_start, mem_end);
2364 
2365 		/* push property content */
2366 		valp = make_room(mem_start, mem_end, l, 4);
2367 		call_prom("getprop", 4, 1, node, pname, valp, l);
2368 		*mem_start = _ALIGN(*mem_start, 4);
2369 
2370 		if (!strcmp(pname, "phandle"))
2371 			has_phandle = 1;
2372 	}
2373 
2374 	/* Add a "linux,phandle" property if no "phandle" property already
2375 	 * existed (can happen with OPAL)
2376 	 */
2377 	if (!has_phandle) {
2378 		soff = dt_find_string("linux,phandle");
2379 		if (soff == 0)
2380 			prom_printf("WARNING: Can't find string index for"
2381 				    " <linux-phandle> node %s\n", path);
2382 		else {
2383 			dt_push_token(OF_DT_PROP, mem_start, mem_end);
2384 			dt_push_token(4, mem_start, mem_end);
2385 			dt_push_token(soff, mem_start, mem_end);
2386 			valp = make_room(mem_start, mem_end, 4, 4);
2387 			*(__be32 *)valp = cpu_to_be32(node);
2388 		}
2389 	}
2390 
2391 	/* do all our children */
2392 	child = call_prom("child", 1, 1, node);
2393 	while (child != 0) {
2394 		scan_dt_build_struct(child, mem_start, mem_end);
2395 		child = call_prom("peer", 1, 1, child);
2396 	}
2397 
2398 	dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2399 }
2400 
2401 static void __init flatten_device_tree(void)
2402 {
2403 	phandle root;
2404 	unsigned long mem_start, mem_end, room;
2405 	struct boot_param_header *hdr;
2406 	char *namep;
2407 	u64 *rsvmap;
2408 
2409 	/*
2410 	 * Check how much room we have between alloc top & bottom (+/- a
2411 	 * few pages), crop to 1MB, as this is our "chunk" size
2412 	 */
2413 	room = alloc_top - alloc_bottom - 0x4000;
2414 	if (room > DEVTREE_CHUNK_SIZE)
2415 		room = DEVTREE_CHUNK_SIZE;
2416 	prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2417 
2418 	/* Now try to claim that */
2419 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2420 	if (mem_start == 0)
2421 		prom_panic("Can't allocate initial device-tree chunk\n");
2422 	mem_end = mem_start + room;
2423 
2424 	/* Get root of tree */
2425 	root = call_prom("peer", 1, 1, (phandle)0);
2426 	if (root == (phandle)0)
2427 		prom_panic ("couldn't get device tree root\n");
2428 
2429 	/* Build header and make room for mem rsv map */
2430 	mem_start = _ALIGN(mem_start, 4);
2431 	hdr = make_room(&mem_start, &mem_end,
2432 			sizeof(struct boot_param_header), 4);
2433 	dt_header_start = (unsigned long)hdr;
2434 	rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2435 
2436 	/* Start of strings */
2437 	mem_start = PAGE_ALIGN(mem_start);
2438 	dt_string_start = mem_start;
2439 	mem_start += 4; /* hole */
2440 
2441 	/* Add "linux,phandle" in there, we'll need it */
2442 	namep = make_room(&mem_start, &mem_end, 16, 1);
2443 	strcpy(namep, "linux,phandle");
2444 	mem_start = (unsigned long)namep + strlen(namep) + 1;
2445 
2446 	/* Build string array */
2447 	prom_printf("Building dt strings...\n");
2448 	scan_dt_build_strings(root, &mem_start, &mem_end);
2449 	dt_string_end = mem_start;
2450 
2451 	/* Build structure */
2452 	mem_start = PAGE_ALIGN(mem_start);
2453 	dt_struct_start = mem_start;
2454 	prom_printf("Building dt structure...\n");
2455 	scan_dt_build_struct(root, &mem_start, &mem_end);
2456 	dt_push_token(OF_DT_END, &mem_start, &mem_end);
2457 	dt_struct_end = PAGE_ALIGN(mem_start);
2458 
2459 	/* Finish header */
2460 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2461 	hdr->magic = cpu_to_be32(OF_DT_HEADER);
2462 	hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2463 	hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2464 	hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2465 	hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2466 	hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2467 	hdr->version = cpu_to_be32(OF_DT_VERSION);
2468 	/* Version 16 is not backward compatible */
2469 	hdr->last_comp_version = cpu_to_be32(0x10);
2470 
2471 	/* Copy the reserve map in */
2472 	memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2473 
2474 #ifdef DEBUG_PROM
2475 	{
2476 		int i;
2477 		prom_printf("reserved memory map:\n");
2478 		for (i = 0; i < mem_reserve_cnt; i++)
2479 			prom_printf("  %x - %x\n",
2480 				    be64_to_cpu(mem_reserve_map[i].base),
2481 				    be64_to_cpu(mem_reserve_map[i].size));
2482 	}
2483 #endif
2484 	/* Bump mem_reserve_cnt to cause further reservations to fail
2485 	 * since it's too late.
2486 	 */
2487 	mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2488 
2489 	prom_printf("Device tree strings 0x%x -> 0x%x\n",
2490 		    dt_string_start, dt_string_end);
2491 	prom_printf("Device tree struct  0x%x -> 0x%x\n",
2492 		    dt_struct_start, dt_struct_end);
2493 }
2494 
2495 #ifdef CONFIG_PPC_MAPLE
2496 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2497  * The values are bad, and it doesn't even have the right number of cells. */
2498 static void __init fixup_device_tree_maple(void)
2499 {
2500 	phandle isa;
2501 	u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2502 	u32 isa_ranges[6];
2503 	char *name;
2504 
2505 	name = "/ht@0/isa@4";
2506 	isa = call_prom("finddevice", 1, 1, ADDR(name));
2507 	if (!PHANDLE_VALID(isa)) {
2508 		name = "/ht@0/isa@6";
2509 		isa = call_prom("finddevice", 1, 1, ADDR(name));
2510 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2511 	}
2512 	if (!PHANDLE_VALID(isa))
2513 		return;
2514 
2515 	if (prom_getproplen(isa, "ranges") != 12)
2516 		return;
2517 	if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2518 		== PROM_ERROR)
2519 		return;
2520 
2521 	if (isa_ranges[0] != 0x1 ||
2522 		isa_ranges[1] != 0xf4000000 ||
2523 		isa_ranges[2] != 0x00010000)
2524 		return;
2525 
2526 	prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2527 
2528 	isa_ranges[0] = 0x1;
2529 	isa_ranges[1] = 0x0;
2530 	isa_ranges[2] = rloc;
2531 	isa_ranges[3] = 0x0;
2532 	isa_ranges[4] = 0x0;
2533 	isa_ranges[5] = 0x00010000;
2534 	prom_setprop(isa, name, "ranges",
2535 			isa_ranges, sizeof(isa_ranges));
2536 }
2537 
2538 #define CPC925_MC_START		0xf8000000
2539 #define CPC925_MC_LENGTH	0x1000000
2540 /* The values for memory-controller don't have right number of cells */
2541 static void __init fixup_device_tree_maple_memory_controller(void)
2542 {
2543 	phandle mc;
2544 	u32 mc_reg[4];
2545 	char *name = "/hostbridge@f8000000";
2546 	u32 ac, sc;
2547 
2548 	mc = call_prom("finddevice", 1, 1, ADDR(name));
2549 	if (!PHANDLE_VALID(mc))
2550 		return;
2551 
2552 	if (prom_getproplen(mc, "reg") != 8)
2553 		return;
2554 
2555 	prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2556 	prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2557 	if ((ac != 2) || (sc != 2))
2558 		return;
2559 
2560 	if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2561 		return;
2562 
2563 	if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2564 		return;
2565 
2566 	prom_printf("Fixing up bogus hostbridge on Maple...\n");
2567 
2568 	mc_reg[0] = 0x0;
2569 	mc_reg[1] = CPC925_MC_START;
2570 	mc_reg[2] = 0x0;
2571 	mc_reg[3] = CPC925_MC_LENGTH;
2572 	prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2573 }
2574 #else
2575 #define fixup_device_tree_maple()
2576 #define fixup_device_tree_maple_memory_controller()
2577 #endif
2578 
2579 #ifdef CONFIG_PPC_CHRP
2580 /*
2581  * Pegasos and BriQ lacks the "ranges" property in the isa node
2582  * Pegasos needs decimal IRQ 14/15, not hexadecimal
2583  * Pegasos has the IDE configured in legacy mode, but advertised as native
2584  */
2585 static void __init fixup_device_tree_chrp(void)
2586 {
2587 	phandle ph;
2588 	u32 prop[6];
2589 	u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2590 	char *name;
2591 	int rc;
2592 
2593 	name = "/pci@80000000/isa@c";
2594 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2595 	if (!PHANDLE_VALID(ph)) {
2596 		name = "/pci@ff500000/isa@6";
2597 		ph = call_prom("finddevice", 1, 1, ADDR(name));
2598 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2599 	}
2600 	if (PHANDLE_VALID(ph)) {
2601 		rc = prom_getproplen(ph, "ranges");
2602 		if (rc == 0 || rc == PROM_ERROR) {
2603 			prom_printf("Fixing up missing ISA range on Pegasos...\n");
2604 
2605 			prop[0] = 0x1;
2606 			prop[1] = 0x0;
2607 			prop[2] = rloc;
2608 			prop[3] = 0x0;
2609 			prop[4] = 0x0;
2610 			prop[5] = 0x00010000;
2611 			prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2612 		}
2613 	}
2614 
2615 	name = "/pci@80000000/ide@C,1";
2616 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2617 	if (PHANDLE_VALID(ph)) {
2618 		prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2619 		prop[0] = 14;
2620 		prop[1] = 0x0;
2621 		prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2622 		prom_printf("Fixing up IDE class-code on Pegasos...\n");
2623 		rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2624 		if (rc == sizeof(u32)) {
2625 			prop[0] &= ~0x5;
2626 			prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2627 		}
2628 	}
2629 }
2630 #else
2631 #define fixup_device_tree_chrp()
2632 #endif
2633 
2634 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2635 static void __init fixup_device_tree_pmac(void)
2636 {
2637 	phandle u3, i2c, mpic;
2638 	u32 u3_rev;
2639 	u32 interrupts[2];
2640 	u32 parent;
2641 
2642 	/* Some G5s have a missing interrupt definition, fix it up here */
2643 	u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2644 	if (!PHANDLE_VALID(u3))
2645 		return;
2646 	i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2647 	if (!PHANDLE_VALID(i2c))
2648 		return;
2649 	mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2650 	if (!PHANDLE_VALID(mpic))
2651 		return;
2652 
2653 	/* check if proper rev of u3 */
2654 	if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2655 	    == PROM_ERROR)
2656 		return;
2657 	if (u3_rev < 0x35 || u3_rev > 0x39)
2658 		return;
2659 	/* does it need fixup ? */
2660 	if (prom_getproplen(i2c, "interrupts") > 0)
2661 		return;
2662 
2663 	prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2664 
2665 	/* interrupt on this revision of u3 is number 0 and level */
2666 	interrupts[0] = 0;
2667 	interrupts[1] = 1;
2668 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2669 		     &interrupts, sizeof(interrupts));
2670 	parent = (u32)mpic;
2671 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2672 		     &parent, sizeof(parent));
2673 }
2674 #else
2675 #define fixup_device_tree_pmac()
2676 #endif
2677 
2678 #ifdef CONFIG_PPC_EFIKA
2679 /*
2680  * The MPC5200 FEC driver requires an phy-handle property to tell it how
2681  * to talk to the phy.  If the phy-handle property is missing, then this
2682  * function is called to add the appropriate nodes and link it to the
2683  * ethernet node.
2684  */
2685 static void __init fixup_device_tree_efika_add_phy(void)
2686 {
2687 	u32 node;
2688 	char prop[64];
2689 	int rv;
2690 
2691 	/* Check if /builtin/ethernet exists - bail if it doesn't */
2692 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2693 	if (!PHANDLE_VALID(node))
2694 		return;
2695 
2696 	/* Check if the phy-handle property exists - bail if it does */
2697 	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2698 	if (!rv)
2699 		return;
2700 
2701 	/*
2702 	 * At this point the ethernet device doesn't have a phy described.
2703 	 * Now we need to add the missing phy node and linkage
2704 	 */
2705 
2706 	/* Check for an MDIO bus node - if missing then create one */
2707 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2708 	if (!PHANDLE_VALID(node)) {
2709 		prom_printf("Adding Ethernet MDIO node\n");
2710 		call_prom("interpret", 1, 1,
2711 			" s\" /builtin\" find-device"
2712 			" new-device"
2713 				" 1 encode-int s\" #address-cells\" property"
2714 				" 0 encode-int s\" #size-cells\" property"
2715 				" s\" mdio\" device-name"
2716 				" s\" fsl,mpc5200b-mdio\" encode-string"
2717 				" s\" compatible\" property"
2718 				" 0xf0003000 0x400 reg"
2719 				" 0x2 encode-int"
2720 				" 0x5 encode-int encode+"
2721 				" 0x3 encode-int encode+"
2722 				" s\" interrupts\" property"
2723 			" finish-device");
2724 	};
2725 
2726 	/* Check for a PHY device node - if missing then create one and
2727 	 * give it's phandle to the ethernet node */
2728 	node = call_prom("finddevice", 1, 1,
2729 			 ADDR("/builtin/mdio/ethernet-phy"));
2730 	if (!PHANDLE_VALID(node)) {
2731 		prom_printf("Adding Ethernet PHY node\n");
2732 		call_prom("interpret", 1, 1,
2733 			" s\" /builtin/mdio\" find-device"
2734 			" new-device"
2735 				" s\" ethernet-phy\" device-name"
2736 				" 0x10 encode-int s\" reg\" property"
2737 				" my-self"
2738 				" ihandle>phandle"
2739 			" finish-device"
2740 			" s\" /builtin/ethernet\" find-device"
2741 				" encode-int"
2742 				" s\" phy-handle\" property"
2743 			" device-end");
2744 	}
2745 }
2746 
2747 static void __init fixup_device_tree_efika(void)
2748 {
2749 	int sound_irq[3] = { 2, 2, 0 };
2750 	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2751 				3,4,0, 3,5,0, 3,6,0, 3,7,0,
2752 				3,8,0, 3,9,0, 3,10,0, 3,11,0,
2753 				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2754 	u32 node;
2755 	char prop[64];
2756 	int rv, len;
2757 
2758 	/* Check if we're really running on a EFIKA */
2759 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2760 	if (!PHANDLE_VALID(node))
2761 		return;
2762 
2763 	rv = prom_getprop(node, "model", prop, sizeof(prop));
2764 	if (rv == PROM_ERROR)
2765 		return;
2766 	if (strcmp(prop, "EFIKA5K2"))
2767 		return;
2768 
2769 	prom_printf("Applying EFIKA device tree fixups\n");
2770 
2771 	/* Claiming to be 'chrp' is death */
2772 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2773 	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2774 	if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2775 		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2776 
2777 	/* CODEGEN,description is exposed in /proc/cpuinfo so
2778 	   fix that too */
2779 	rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2780 	if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2781 		prom_setprop(node, "/", "CODEGEN,description",
2782 			     "Efika 5200B PowerPC System",
2783 			     sizeof("Efika 5200B PowerPC System"));
2784 
2785 	/* Fixup bestcomm interrupts property */
2786 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2787 	if (PHANDLE_VALID(node)) {
2788 		len = prom_getproplen(node, "interrupts");
2789 		if (len == 12) {
2790 			prom_printf("Fixing bestcomm interrupts property\n");
2791 			prom_setprop(node, "/builtin/bestcom", "interrupts",
2792 				     bcomm_irq, sizeof(bcomm_irq));
2793 		}
2794 	}
2795 
2796 	/* Fixup sound interrupts property */
2797 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2798 	if (PHANDLE_VALID(node)) {
2799 		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2800 		if (rv == PROM_ERROR) {
2801 			prom_printf("Adding sound interrupts property\n");
2802 			prom_setprop(node, "/builtin/sound", "interrupts",
2803 				     sound_irq, sizeof(sound_irq));
2804 		}
2805 	}
2806 
2807 	/* Make sure ethernet phy-handle property exists */
2808 	fixup_device_tree_efika_add_phy();
2809 }
2810 #else
2811 #define fixup_device_tree_efika()
2812 #endif
2813 
2814 static void __init fixup_device_tree(void)
2815 {
2816 	fixup_device_tree_maple();
2817 	fixup_device_tree_maple_memory_controller();
2818 	fixup_device_tree_chrp();
2819 	fixup_device_tree_pmac();
2820 	fixup_device_tree_efika();
2821 }
2822 
2823 static void __init prom_find_boot_cpu(void)
2824 {
2825 	__be32 rval;
2826 	ihandle prom_cpu;
2827 	phandle cpu_pkg;
2828 
2829 	rval = 0;
2830 	if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2831 		return;
2832 	prom_cpu = be32_to_cpu(rval);
2833 
2834 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2835 
2836 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2837 	prom.cpu = be32_to_cpu(rval);
2838 
2839 	prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2840 }
2841 
2842 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2843 {
2844 #ifdef CONFIG_BLK_DEV_INITRD
2845 	if (r3 && r4 && r4 != 0xdeadbeef) {
2846 		__be64 val;
2847 
2848 		prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2849 		prom_initrd_end = prom_initrd_start + r4;
2850 
2851 		val = cpu_to_be64(prom_initrd_start);
2852 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2853 			     &val, sizeof(val));
2854 		val = cpu_to_be64(prom_initrd_end);
2855 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2856 			     &val, sizeof(val));
2857 
2858 		reserve_mem(prom_initrd_start,
2859 			    prom_initrd_end - prom_initrd_start);
2860 
2861 		prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2862 		prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2863 	}
2864 #endif /* CONFIG_BLK_DEV_INITRD */
2865 }
2866 
2867 #ifdef CONFIG_PPC64
2868 #ifdef CONFIG_RELOCATABLE
2869 static void reloc_toc(void)
2870 {
2871 }
2872 
2873 static void unreloc_toc(void)
2874 {
2875 }
2876 #else
2877 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2878 {
2879 	unsigned long i;
2880 	unsigned long *toc_entry;
2881 
2882 	/* Get the start of the TOC by using r2 directly. */
2883 	asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2884 
2885 	for (i = 0; i < nr_entries; i++) {
2886 		*toc_entry = *toc_entry + offset;
2887 		toc_entry++;
2888 	}
2889 }
2890 
2891 static void reloc_toc(void)
2892 {
2893 	unsigned long offset = reloc_offset();
2894 	unsigned long nr_entries =
2895 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2896 
2897 	__reloc_toc(offset, nr_entries);
2898 
2899 	mb();
2900 }
2901 
2902 static void unreloc_toc(void)
2903 {
2904 	unsigned long offset = reloc_offset();
2905 	unsigned long nr_entries =
2906 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2907 
2908 	mb();
2909 
2910 	__reloc_toc(-offset, nr_entries);
2911 }
2912 #endif
2913 #endif
2914 
2915 /*
2916  * We enter here early on, when the Open Firmware prom is still
2917  * handling exceptions and the MMU hash table for us.
2918  */
2919 
2920 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2921 			       unsigned long pp,
2922 			       unsigned long r6, unsigned long r7,
2923 			       unsigned long kbase)
2924 {
2925 	unsigned long hdr;
2926 
2927 #ifdef CONFIG_PPC32
2928 	unsigned long offset = reloc_offset();
2929 	reloc_got2(offset);
2930 #else
2931 	reloc_toc();
2932 #endif
2933 
2934 	/*
2935 	 * First zero the BSS
2936 	 */
2937 	memset(&__bss_start, 0, __bss_stop - __bss_start);
2938 
2939 	/*
2940 	 * Init interface to Open Firmware, get some node references,
2941 	 * like /chosen
2942 	 */
2943 	prom_init_client_services(pp);
2944 
2945 	/*
2946 	 * See if this OF is old enough that we need to do explicit maps
2947 	 * and other workarounds
2948 	 */
2949 	prom_find_mmu();
2950 
2951 	/*
2952 	 * Init prom stdout device
2953 	 */
2954 	prom_init_stdout();
2955 
2956 	prom_printf("Preparing to boot %s", linux_banner);
2957 
2958 	/*
2959 	 * Get default machine type. At this point, we do not differentiate
2960 	 * between pSeries SMP and pSeries LPAR
2961 	 */
2962 	of_platform = prom_find_machine_type();
2963 	prom_printf("Detected machine type: %x\n", of_platform);
2964 
2965 #ifndef CONFIG_NONSTATIC_KERNEL
2966 	/* Bail if this is a kdump kernel. */
2967 	if (PHYSICAL_START > 0)
2968 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2969 #endif
2970 
2971 	/*
2972 	 * Check for an initrd
2973 	 */
2974 	prom_check_initrd(r3, r4);
2975 
2976 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2977 	/*
2978 	 * On pSeries, inform the firmware about our capabilities
2979 	 */
2980 	if (of_platform == PLATFORM_PSERIES ||
2981 	    of_platform == PLATFORM_PSERIES_LPAR)
2982 		prom_send_capabilities();
2983 #endif
2984 
2985 	/*
2986 	 * Copy the CPU hold code
2987 	 */
2988 	if (of_platform != PLATFORM_POWERMAC)
2989 		copy_and_flush(0, kbase, 0x100, 0);
2990 
2991 	/*
2992 	 * Do early parsing of command line
2993 	 */
2994 	early_cmdline_parse();
2995 
2996 	/*
2997 	 * Initialize memory management within prom_init
2998 	 */
2999 	prom_init_mem();
3000 
3001 	/*
3002 	 * Determine which cpu is actually running right _now_
3003 	 */
3004 	prom_find_boot_cpu();
3005 
3006 	/*
3007 	 * Initialize display devices
3008 	 */
3009 	prom_check_displays();
3010 
3011 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3012 	/*
3013 	 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3014 	 * that uses the allocator, we need to make sure we get the top of memory
3015 	 * available for us here...
3016 	 */
3017 	if (of_platform == PLATFORM_PSERIES)
3018 		prom_initialize_tce_table();
3019 #endif
3020 
3021 	/*
3022 	 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3023 	 * have a usable RTAS implementation.
3024 	 */
3025 	if (of_platform != PLATFORM_POWERMAC &&
3026 	    of_platform != PLATFORM_OPAL)
3027 		prom_instantiate_rtas();
3028 
3029 #ifdef CONFIG_PPC_POWERNV
3030 #ifdef __BIG_ENDIAN__
3031 	/* Detect HAL and try instanciating it & doing takeover */
3032 	if (of_platform == PLATFORM_PSERIES_LPAR) {
3033 		prom_query_opal();
3034 		if (of_platform == PLATFORM_OPAL) {
3035 			prom_opal_hold_cpus();
3036 			prom_opal_takeover();
3037 		}
3038 	} else
3039 #endif /* __BIG_ENDIAN__ */
3040 	if (of_platform == PLATFORM_OPAL)
3041 		prom_instantiate_opal();
3042 #endif /* CONFIG_PPC_POWERNV */
3043 
3044 #ifdef CONFIG_PPC64
3045 	/* instantiate sml */
3046 	prom_instantiate_sml();
3047 #endif
3048 
3049 	/*
3050 	 * On non-powermacs, put all CPUs in spin-loops.
3051 	 *
3052 	 * PowerMacs use a different mechanism to spin CPUs
3053 	 *
3054 	 * (This must be done after instanciating RTAS)
3055 	 */
3056 	if (of_platform != PLATFORM_POWERMAC &&
3057 	    of_platform != PLATFORM_OPAL)
3058 		prom_hold_cpus();
3059 
3060 	/*
3061 	 * Fill in some infos for use by the kernel later on
3062 	 */
3063 	if (prom_memory_limit) {
3064 		__be64 val = cpu_to_be64(prom_memory_limit);
3065 		prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3066 			     &val, sizeof(val));
3067 	}
3068 #ifdef CONFIG_PPC64
3069 	if (prom_iommu_off)
3070 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3071 			     NULL, 0);
3072 
3073 	if (prom_iommu_force_on)
3074 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3075 			     NULL, 0);
3076 
3077 	if (prom_tce_alloc_start) {
3078 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3079 			     &prom_tce_alloc_start,
3080 			     sizeof(prom_tce_alloc_start));
3081 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3082 			     &prom_tce_alloc_end,
3083 			     sizeof(prom_tce_alloc_end));
3084 	}
3085 #endif
3086 
3087 	/*
3088 	 * Fixup any known bugs in the device-tree
3089 	 */
3090 	fixup_device_tree();
3091 
3092 	/*
3093 	 * Now finally create the flattened device-tree
3094 	 */
3095 	prom_printf("copying OF device tree...\n");
3096 	flatten_device_tree();
3097 
3098 	/*
3099 	 * in case stdin is USB and still active on IBM machines...
3100 	 * Unfortunately quiesce crashes on some powermacs if we have
3101 	 * closed stdin already (in particular the powerbook 101). It
3102 	 * appears that the OPAL version of OFW doesn't like it either.
3103 	 */
3104 	if (of_platform != PLATFORM_POWERMAC &&
3105 	    of_platform != PLATFORM_OPAL)
3106 		prom_close_stdin();
3107 
3108 	/*
3109 	 * Call OF "quiesce" method to shut down pending DMA's from
3110 	 * devices etc...
3111 	 */
3112 	prom_printf("Calling quiesce...\n");
3113 	call_prom("quiesce", 0, 0);
3114 
3115 	/*
3116 	 * And finally, call the kernel passing it the flattened device
3117 	 * tree and NULL as r5, thus triggering the new entry point which
3118 	 * is common to us and kexec
3119 	 */
3120 	hdr = dt_header_start;
3121 
3122 	/* Don't print anything after quiesce under OPAL, it crashes OFW */
3123 	if (of_platform != PLATFORM_OPAL) {
3124 		prom_printf("returning from prom_init\n");
3125 		prom_debug("->dt_header_start=0x%x\n", hdr);
3126 	}
3127 
3128 #ifdef CONFIG_PPC32
3129 	reloc_got2(-offset);
3130 #else
3131 	unreloc_toc();
3132 #endif
3133 
3134 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3135 	/* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3136 	__start(hdr, kbase, 0, 0, 0,
3137 		prom_opal_base, prom_opal_entry);
3138 #else
3139 	__start(hdr, kbase, 0, 0, 0, 0, 0);
3140 #endif
3141 
3142 	return 0;
3143 }
3144