xref: /openbmc/linux/arch/powerpc/kernel/prom_init.c (revision f7777dcc)
1 /*
2  * Procedures for interfacing to Open Firmware.
3  *
4  * Paul Mackerras	August 1996.
5  * Copyright (C) 1996-2005 Paul Mackerras.
6  *
7  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8  *    {engebret|bergner}@us.ibm.com
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #undef DEBUG_PROM
17 
18 #include <stdarg.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/proc_fs.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <asm/prom.h>
32 #include <asm/rtas.h>
33 #include <asm/page.h>
34 #include <asm/processor.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37 #include <asm/smp.h>
38 #include <asm/mmu.h>
39 #include <asm/pgtable.h>
40 #include <asm/pci.h>
41 #include <asm/iommu.h>
42 #include <asm/btext.h>
43 #include <asm/sections.h>
44 #include <asm/machdep.h>
45 #include <asm/opal.h>
46 
47 #include <linux/linux_logo.h>
48 
49 /*
50  * Eventually bump that one up
51  */
52 #define DEVTREE_CHUNK_SIZE	0x100000
53 
54 /*
55  * This is the size of the local memory reserve map that gets copied
56  * into the boot params passed to the kernel. That size is totally
57  * flexible as the kernel just reads the list until it encounters an
58  * entry with size 0, so it can be changed without breaking binary
59  * compatibility
60  */
61 #define MEM_RESERVE_MAP_SIZE	8
62 
63 /*
64  * prom_init() is called very early on, before the kernel text
65  * and data have been mapped to KERNELBASE.  At this point the code
66  * is running at whatever address it has been loaded at.
67  * On ppc32 we compile with -mrelocatable, which means that references
68  * to extern and static variables get relocated automatically.
69  * ppc64 objects are always relocatable, we just need to relocate the
70  * TOC.
71  *
72  * Because OF may have mapped I/O devices into the area starting at
73  * KERNELBASE, particularly on CHRP machines, we can't safely call
74  * OF once the kernel has been mapped to KERNELBASE.  Therefore all
75  * OF calls must be done within prom_init().
76  *
77  * ADDR is used in calls to call_prom.  The 4th and following
78  * arguments to call_prom should be 32-bit values.
79  * On ppc64, 64 bit values are truncated to 32 bits (and
80  * fortunately don't get interpreted as two arguments).
81  */
82 #define ADDR(x)		(u32)(unsigned long)(x)
83 
84 #ifdef CONFIG_PPC64
85 #define OF_WORKAROUNDS	0
86 #else
87 #define OF_WORKAROUNDS	of_workarounds
88 int of_workarounds;
89 #endif
90 
91 #define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
92 #define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
93 
94 #define PROM_BUG() do {						\
95         prom_printf("kernel BUG at %s line 0x%x!\n",		\
96 		    __FILE__, __LINE__);			\
97         __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);	\
98 } while (0)
99 
100 #ifdef DEBUG_PROM
101 #define prom_debug(x...)	prom_printf(x)
102 #else
103 #define prom_debug(x...)
104 #endif
105 
106 
107 typedef u32 prom_arg_t;
108 
109 struct prom_args {
110         __be32 service;
111         __be32 nargs;
112         __be32 nret;
113         __be32 args[10];
114 };
115 
116 struct prom_t {
117 	ihandle root;
118 	phandle chosen;
119 	int cpu;
120 	ihandle stdout;
121 	ihandle mmumap;
122 	ihandle memory;
123 };
124 
125 struct mem_map_entry {
126 	__be64	base;
127 	__be64	size;
128 };
129 
130 typedef __be32 cell_t;
131 
132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 		    unsigned long r6, unsigned long r7, unsigned long r8,
134 		    unsigned long r9);
135 
136 #ifdef CONFIG_PPC64
137 extern int enter_prom(struct prom_args *args, unsigned long entry);
138 #else
139 static inline int enter_prom(struct prom_args *args, unsigned long entry)
140 {
141 	return ((int (*)(struct prom_args *))entry)(args);
142 }
143 #endif
144 
145 extern void copy_and_flush(unsigned long dest, unsigned long src,
146 			   unsigned long size, unsigned long offset);
147 
148 /* prom structure */
149 static struct prom_t __initdata prom;
150 
151 static unsigned long prom_entry __initdata;
152 
153 #define PROM_SCRATCH_SIZE 256
154 
155 static char __initdata of_stdout_device[256];
156 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157 
158 static unsigned long __initdata dt_header_start;
159 static unsigned long __initdata dt_struct_start, dt_struct_end;
160 static unsigned long __initdata dt_string_start, dt_string_end;
161 
162 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
163 
164 #ifdef CONFIG_PPC64
165 static int __initdata prom_iommu_force_on;
166 static int __initdata prom_iommu_off;
167 static unsigned long __initdata prom_tce_alloc_start;
168 static unsigned long __initdata prom_tce_alloc_end;
169 #endif
170 
171 /* Platforms codes are now obsolete in the kernel. Now only used within this
172  * file and ultimately gone too. Feel free to change them if you need, they
173  * are not shared with anything outside of this file anymore
174  */
175 #define PLATFORM_PSERIES	0x0100
176 #define PLATFORM_PSERIES_LPAR	0x0101
177 #define PLATFORM_LPAR		0x0001
178 #define PLATFORM_POWERMAC	0x0400
179 #define PLATFORM_GENERIC	0x0500
180 #define PLATFORM_OPAL		0x0600
181 
182 static int __initdata of_platform;
183 
184 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185 
186 static unsigned long __initdata prom_memory_limit;
187 
188 static unsigned long __initdata alloc_top;
189 static unsigned long __initdata alloc_top_high;
190 static unsigned long __initdata alloc_bottom;
191 static unsigned long __initdata rmo_top;
192 static unsigned long __initdata ram_top;
193 
194 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
195 static int __initdata mem_reserve_cnt;
196 
197 static cell_t __initdata regbuf[1024];
198 
199 static bool rtas_has_query_cpu_stopped;
200 
201 
202 /*
203  * Error results ... some OF calls will return "-1" on error, some
204  * will return 0, some will return either. To simplify, here are
205  * macros to use with any ihandle or phandle return value to check if
206  * it is valid
207  */
208 
209 #define PROM_ERROR		(-1u)
210 #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
211 #define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)
212 
213 
214 /* This is the one and *ONLY* place where we actually call open
215  * firmware.
216  */
217 
218 static int __init call_prom(const char *service, int nargs, int nret, ...)
219 {
220 	int i;
221 	struct prom_args args;
222 	va_list list;
223 
224 	args.service = cpu_to_be32(ADDR(service));
225 	args.nargs = cpu_to_be32(nargs);
226 	args.nret = cpu_to_be32(nret);
227 
228 	va_start(list, nret);
229 	for (i = 0; i < nargs; i++)
230 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
231 	va_end(list);
232 
233 	for (i = 0; i < nret; i++)
234 		args.args[nargs+i] = 0;
235 
236 	if (enter_prom(&args, prom_entry) < 0)
237 		return PROM_ERROR;
238 
239 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
240 }
241 
242 static int __init call_prom_ret(const char *service, int nargs, int nret,
243 				prom_arg_t *rets, ...)
244 {
245 	int i;
246 	struct prom_args args;
247 	va_list list;
248 
249 	args.service = cpu_to_be32(ADDR(service));
250 	args.nargs = cpu_to_be32(nargs);
251 	args.nret = cpu_to_be32(nret);
252 
253 	va_start(list, rets);
254 	for (i = 0; i < nargs; i++)
255 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
256 	va_end(list);
257 
258 	for (i = 0; i < nret; i++)
259 		args.args[nargs+i] = 0;
260 
261 	if (enter_prom(&args, prom_entry) < 0)
262 		return PROM_ERROR;
263 
264 	if (rets != NULL)
265 		for (i = 1; i < nret; ++i)
266 			rets[i-1] = be32_to_cpu(args.args[nargs+i]);
267 
268 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
269 }
270 
271 
272 static void __init prom_print(const char *msg)
273 {
274 	const char *p, *q;
275 
276 	if (prom.stdout == 0)
277 		return;
278 
279 	for (p = msg; *p != 0; p = q) {
280 		for (q = p; *q != 0 && *q != '\n'; ++q)
281 			;
282 		if (q > p)
283 			call_prom("write", 3, 1, prom.stdout, p, q - p);
284 		if (*q == 0)
285 			break;
286 		++q;
287 		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
288 	}
289 }
290 
291 
292 static void __init prom_print_hex(unsigned long val)
293 {
294 	int i, nibbles = sizeof(val)*2;
295 	char buf[sizeof(val)*2+1];
296 
297 	for (i = nibbles-1;  i >= 0;  i--) {
298 		buf[i] = (val & 0xf) + '0';
299 		if (buf[i] > '9')
300 			buf[i] += ('a'-'0'-10);
301 		val >>= 4;
302 	}
303 	buf[nibbles] = '\0';
304 	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
305 }
306 
307 /* max number of decimal digits in an unsigned long */
308 #define UL_DIGITS 21
309 static void __init prom_print_dec(unsigned long val)
310 {
311 	int i, size;
312 	char buf[UL_DIGITS+1];
313 
314 	for (i = UL_DIGITS-1; i >= 0;  i--) {
315 		buf[i] = (val % 10) + '0';
316 		val = val/10;
317 		if (val == 0)
318 			break;
319 	}
320 	/* shift stuff down */
321 	size = UL_DIGITS - i;
322 	call_prom("write", 3, 1, prom.stdout, buf+i, size);
323 }
324 
325 static void __init prom_printf(const char *format, ...)
326 {
327 	const char *p, *q, *s;
328 	va_list args;
329 	unsigned long v;
330 	long vs;
331 
332 	va_start(args, format);
333 	for (p = format; *p != 0; p = q) {
334 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
335 			;
336 		if (q > p)
337 			call_prom("write", 3, 1, prom.stdout, p, q - p);
338 		if (*q == 0)
339 			break;
340 		if (*q == '\n') {
341 			++q;
342 			call_prom("write", 3, 1, prom.stdout,
343 				  ADDR("\r\n"), 2);
344 			continue;
345 		}
346 		++q;
347 		if (*q == 0)
348 			break;
349 		switch (*q) {
350 		case 's':
351 			++q;
352 			s = va_arg(args, const char *);
353 			prom_print(s);
354 			break;
355 		case 'x':
356 			++q;
357 			v = va_arg(args, unsigned long);
358 			prom_print_hex(v);
359 			break;
360 		case 'd':
361 			++q;
362 			vs = va_arg(args, int);
363 			if (vs < 0) {
364 				prom_print("-");
365 				vs = -vs;
366 			}
367 			prom_print_dec(vs);
368 			break;
369 		case 'l':
370 			++q;
371 			if (*q == 0)
372 				break;
373 			else if (*q == 'x') {
374 				++q;
375 				v = va_arg(args, unsigned long);
376 				prom_print_hex(v);
377 			} else if (*q == 'u') { /* '%lu' */
378 				++q;
379 				v = va_arg(args, unsigned long);
380 				prom_print_dec(v);
381 			} else if (*q == 'd') { /* %ld */
382 				++q;
383 				vs = va_arg(args, long);
384 				if (vs < 0) {
385 					prom_print("-");
386 					vs = -vs;
387 				}
388 				prom_print_dec(vs);
389 			}
390 			break;
391 		}
392 	}
393 }
394 
395 
396 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
397 				unsigned long align)
398 {
399 
400 	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
401 		/*
402 		 * Old OF requires we claim physical and virtual separately
403 		 * and then map explicitly (assuming virtual mode)
404 		 */
405 		int ret;
406 		prom_arg_t result;
407 
408 		ret = call_prom_ret("call-method", 5, 2, &result,
409 				    ADDR("claim"), prom.memory,
410 				    align, size, virt);
411 		if (ret != 0 || result == -1)
412 			return -1;
413 		ret = call_prom_ret("call-method", 5, 2, &result,
414 				    ADDR("claim"), prom.mmumap,
415 				    align, size, virt);
416 		if (ret != 0) {
417 			call_prom("call-method", 4, 1, ADDR("release"),
418 				  prom.memory, size, virt);
419 			return -1;
420 		}
421 		/* the 0x12 is M (coherence) + PP == read/write */
422 		call_prom("call-method", 6, 1,
423 			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
424 		return virt;
425 	}
426 	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
427 			 (prom_arg_t)align);
428 }
429 
430 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
431 {
432 	prom_print(reason);
433 	/* Do not call exit because it clears the screen on pmac
434 	 * it also causes some sort of double-fault on early pmacs */
435 	if (of_platform == PLATFORM_POWERMAC)
436 		asm("trap\n");
437 
438 	/* ToDo: should put up an SRC here on pSeries */
439 	call_prom("exit", 0, 0);
440 
441 	for (;;)			/* should never get here */
442 		;
443 }
444 
445 
446 static int __init prom_next_node(phandle *nodep)
447 {
448 	phandle node;
449 
450 	if ((node = *nodep) != 0
451 	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
452 		return 1;
453 	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
454 		return 1;
455 	for (;;) {
456 		if ((node = call_prom("parent", 1, 1, node)) == 0)
457 			return 0;
458 		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
459 			return 1;
460 	}
461 }
462 
463 static int inline prom_getprop(phandle node, const char *pname,
464 			       void *value, size_t valuelen)
465 {
466 	return call_prom("getprop", 4, 1, node, ADDR(pname),
467 			 (u32)(unsigned long) value, (u32) valuelen);
468 }
469 
470 static int inline prom_getproplen(phandle node, const char *pname)
471 {
472 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
473 }
474 
475 static void add_string(char **str, const char *q)
476 {
477 	char *p = *str;
478 
479 	while (*q)
480 		*p++ = *q++;
481 	*p++ = ' ';
482 	*str = p;
483 }
484 
485 static char *tohex(unsigned int x)
486 {
487 	static char digits[] = "0123456789abcdef";
488 	static char result[9];
489 	int i;
490 
491 	result[8] = 0;
492 	i = 8;
493 	do {
494 		--i;
495 		result[i] = digits[x & 0xf];
496 		x >>= 4;
497 	} while (x != 0 && i > 0);
498 	return &result[i];
499 }
500 
501 static int __init prom_setprop(phandle node, const char *nodename,
502 			       const char *pname, void *value, size_t valuelen)
503 {
504 	char cmd[256], *p;
505 
506 	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
507 		return call_prom("setprop", 4, 1, node, ADDR(pname),
508 				 (u32)(unsigned long) value, (u32) valuelen);
509 
510 	/* gah... setprop doesn't work on longtrail, have to use interpret */
511 	p = cmd;
512 	add_string(&p, "dev");
513 	add_string(&p, nodename);
514 	add_string(&p, tohex((u32)(unsigned long) value));
515 	add_string(&p, tohex(valuelen));
516 	add_string(&p, tohex(ADDR(pname)));
517 	add_string(&p, tohex(strlen(pname)));
518 	add_string(&p, "property");
519 	*p = 0;
520 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
521 }
522 
523 /* We can't use the standard versions because of relocation headaches. */
524 #define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
525 			 || ('a' <= (c) && (c) <= 'f') \
526 			 || ('A' <= (c) && (c) <= 'F'))
527 
528 #define isdigit(c)	('0' <= (c) && (c) <= '9')
529 #define islower(c)	('a' <= (c) && (c) <= 'z')
530 #define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))
531 
532 static unsigned long prom_strtoul(const char *cp, const char **endp)
533 {
534 	unsigned long result = 0, base = 10, value;
535 
536 	if (*cp == '0') {
537 		base = 8;
538 		cp++;
539 		if (toupper(*cp) == 'X') {
540 			cp++;
541 			base = 16;
542 		}
543 	}
544 
545 	while (isxdigit(*cp) &&
546 	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
547 		result = result * base + value;
548 		cp++;
549 	}
550 
551 	if (endp)
552 		*endp = cp;
553 
554 	return result;
555 }
556 
557 static unsigned long prom_memparse(const char *ptr, const char **retptr)
558 {
559 	unsigned long ret = prom_strtoul(ptr, retptr);
560 	int shift = 0;
561 
562 	/*
563 	 * We can't use a switch here because GCC *may* generate a
564 	 * jump table which won't work, because we're not running at
565 	 * the address we're linked at.
566 	 */
567 	if ('G' == **retptr || 'g' == **retptr)
568 		shift = 30;
569 
570 	if ('M' == **retptr || 'm' == **retptr)
571 		shift = 20;
572 
573 	if ('K' == **retptr || 'k' == **retptr)
574 		shift = 10;
575 
576 	if (shift) {
577 		ret <<= shift;
578 		(*retptr)++;
579 	}
580 
581 	return ret;
582 }
583 
584 /*
585  * Early parsing of the command line passed to the kernel, used for
586  * "mem=x" and the options that affect the iommu
587  */
588 static void __init early_cmdline_parse(void)
589 {
590 	const char *opt;
591 
592 	char *p;
593 	int l = 0;
594 
595 	prom_cmd_line[0] = 0;
596 	p = prom_cmd_line;
597 	if ((long)prom.chosen > 0)
598 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
599 #ifdef CONFIG_CMDLINE
600 	if (l <= 0 || p[0] == '\0') /* dbl check */
601 		strlcpy(prom_cmd_line,
602 			CONFIG_CMDLINE, sizeof(prom_cmd_line));
603 #endif /* CONFIG_CMDLINE */
604 	prom_printf("command line: %s\n", prom_cmd_line);
605 
606 #ifdef CONFIG_PPC64
607 	opt = strstr(prom_cmd_line, "iommu=");
608 	if (opt) {
609 		prom_printf("iommu opt is: %s\n", opt);
610 		opt += 6;
611 		while (*opt && *opt == ' ')
612 			opt++;
613 		if (!strncmp(opt, "off", 3))
614 			prom_iommu_off = 1;
615 		else if (!strncmp(opt, "force", 5))
616 			prom_iommu_force_on = 1;
617 	}
618 #endif
619 	opt = strstr(prom_cmd_line, "mem=");
620 	if (opt) {
621 		opt += 4;
622 		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
623 #ifdef CONFIG_PPC64
624 		/* Align to 16 MB == size of ppc64 large page */
625 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
626 #endif
627 	}
628 }
629 
630 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
631 /*
632  * The architecture vector has an array of PVR mask/value pairs,
633  * followed by # option vectors - 1, followed by the option vectors.
634  *
635  * See prom.h for the definition of the bits specified in the
636  * architecture vector.
637  *
638  * Because the description vector contains a mix of byte and word
639  * values, we declare it as an unsigned char array, and use this
640  * macro to put word values in.
641  */
642 #define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
643 		((x) >> 8) & 0xff, (x) & 0xff
644 
645 unsigned char ibm_architecture_vec[] = {
646 	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
647 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
648 	W(0xffff0000), W(0x003f0000),	/* POWER7 */
649 	W(0xffff0000), W(0x004b0000),	/* POWER8E */
650 	W(0xffff0000), W(0x004d0000),	/* POWER8 */
651 	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
652 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
653 	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
654 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
655 	6 - 1,				/* 6 option vectors */
656 
657 	/* option vector 1: processor architectures supported */
658 	3 - 2,				/* length */
659 	0,				/* don't ignore, don't halt */
660 	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
661 	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
662 
663 	/* option vector 2: Open Firmware options supported */
664 	34 - 2,				/* length */
665 	OV2_REAL_MODE,
666 	0, 0,
667 	W(0xffffffff),			/* real_base */
668 	W(0xffffffff),			/* real_size */
669 	W(0xffffffff),			/* virt_base */
670 	W(0xffffffff),			/* virt_size */
671 	W(0xffffffff),			/* load_base */
672 	W(256),				/* 256MB min RMA */
673 	W(0xffffffff),			/* full client load */
674 	0,				/* min RMA percentage of total RAM */
675 	48,				/* max log_2(hash table size) */
676 
677 	/* option vector 3: processor options supported */
678 	3 - 2,				/* length */
679 	0,				/* don't ignore, don't halt */
680 	OV3_FP | OV3_VMX | OV3_DFP,
681 
682 	/* option vector 4: IBM PAPR implementation */
683 	3 - 2,				/* length */
684 	0,				/* don't halt */
685 	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
686 
687 	/* option vector 5: PAPR/OF options */
688 	19 - 2,				/* length */
689 	0,				/* don't ignore, don't halt */
690 	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
691 	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
692 #ifdef CONFIG_PCI_MSI
693 	/* PCIe/MSI support.  Without MSI full PCIe is not supported */
694 	OV5_FEAT(OV5_MSI),
695 #else
696 	0,
697 #endif
698 	0,
699 #ifdef CONFIG_PPC_SMLPAR
700 	OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
701 #else
702 	0,
703 #endif
704 	OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
705 	0,
706 	0,
707 	0,
708 	/* WARNING: The offset of the "number of cores" field below
709 	 * must match by the macro below. Update the definition if
710 	 * the structure layout changes.
711 	 */
712 #define IBM_ARCH_VEC_NRCORES_OFFSET	125
713 	W(NR_CPUS),			/* number of cores supported */
714 	0,
715 	0,
716 	0,
717 	0,
718 	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
719 	OV5_FEAT(OV5_PFO_HW_842),
720 	OV5_FEAT(OV5_SUB_PROCESSORS),
721 	/* option vector 6: IBM PAPR hints */
722 	4 - 2,				/* length */
723 	0,
724 	0,
725 	OV6_LINUX,
726 
727 };
728 
729 /* Old method - ELF header with PT_NOTE sections only works on BE */
730 #ifdef __BIG_ENDIAN__
731 static struct fake_elf {
732 	Elf32_Ehdr	elfhdr;
733 	Elf32_Phdr	phdr[2];
734 	struct chrpnote {
735 		u32	namesz;
736 		u32	descsz;
737 		u32	type;
738 		char	name[8];	/* "PowerPC" */
739 		struct chrpdesc {
740 			u32	real_mode;
741 			u32	real_base;
742 			u32	real_size;
743 			u32	virt_base;
744 			u32	virt_size;
745 			u32	load_base;
746 		} chrpdesc;
747 	} chrpnote;
748 	struct rpanote {
749 		u32	namesz;
750 		u32	descsz;
751 		u32	type;
752 		char	name[24];	/* "IBM,RPA-Client-Config" */
753 		struct rpadesc {
754 			u32	lpar_affinity;
755 			u32	min_rmo_size;
756 			u32	min_rmo_percent;
757 			u32	max_pft_size;
758 			u32	splpar;
759 			u32	min_load;
760 			u32	new_mem_def;
761 			u32	ignore_me;
762 		} rpadesc;
763 	} rpanote;
764 } fake_elf = {
765 	.elfhdr = {
766 		.e_ident = { 0x7f, 'E', 'L', 'F',
767 			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
768 		.e_type = ET_EXEC,	/* yeah right */
769 		.e_machine = EM_PPC,
770 		.e_version = EV_CURRENT,
771 		.e_phoff = offsetof(struct fake_elf, phdr),
772 		.e_phentsize = sizeof(Elf32_Phdr),
773 		.e_phnum = 2
774 	},
775 	.phdr = {
776 		[0] = {
777 			.p_type = PT_NOTE,
778 			.p_offset = offsetof(struct fake_elf, chrpnote),
779 			.p_filesz = sizeof(struct chrpnote)
780 		}, [1] = {
781 			.p_type = PT_NOTE,
782 			.p_offset = offsetof(struct fake_elf, rpanote),
783 			.p_filesz = sizeof(struct rpanote)
784 		}
785 	},
786 	.chrpnote = {
787 		.namesz = sizeof("PowerPC"),
788 		.descsz = sizeof(struct chrpdesc),
789 		.type = 0x1275,
790 		.name = "PowerPC",
791 		.chrpdesc = {
792 			.real_mode = ~0U,	/* ~0 means "don't care" */
793 			.real_base = ~0U,
794 			.real_size = ~0U,
795 			.virt_base = ~0U,
796 			.virt_size = ~0U,
797 			.load_base = ~0U
798 		},
799 	},
800 	.rpanote = {
801 		.namesz = sizeof("IBM,RPA-Client-Config"),
802 		.descsz = sizeof(struct rpadesc),
803 		.type = 0x12759999,
804 		.name = "IBM,RPA-Client-Config",
805 		.rpadesc = {
806 			.lpar_affinity = 0,
807 			.min_rmo_size = 64,	/* in megabytes */
808 			.min_rmo_percent = 0,
809 			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
810 			.splpar = 1,
811 			.min_load = ~0U,
812 			.new_mem_def = 0
813 		}
814 	}
815 };
816 #endif /* __BIG_ENDIAN__ */
817 
818 static int __init prom_count_smt_threads(void)
819 {
820 	phandle node;
821 	char type[64];
822 	unsigned int plen;
823 
824 	/* Pick up th first CPU node we can find */
825 	for (node = 0; prom_next_node(&node); ) {
826 		type[0] = 0;
827 		prom_getprop(node, "device_type", type, sizeof(type));
828 
829 		if (strcmp(type, "cpu"))
830 			continue;
831 		/*
832 		 * There is an entry for each smt thread, each entry being
833 		 * 4 bytes long.  All cpus should have the same number of
834 		 * smt threads, so return after finding the first.
835 		 */
836 		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
837 		if (plen == PROM_ERROR)
838 			break;
839 		plen >>= 2;
840 		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
841 
842 		/* Sanity check */
843 		if (plen < 1 || plen > 64) {
844 			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
845 				    (unsigned long)plen);
846 			return 1;
847 		}
848 		return plen;
849 	}
850 	prom_debug("No threads found, assuming 1 per core\n");
851 
852 	return 1;
853 
854 }
855 
856 
857 static void __init prom_send_capabilities(void)
858 {
859 	ihandle root;
860 	prom_arg_t ret;
861 	__be32 *cores;
862 
863 	root = call_prom("open", 1, 1, ADDR("/"));
864 	if (root != 0) {
865 		/* We need to tell the FW about the number of cores we support.
866 		 *
867 		 * To do that, we count the number of threads on the first core
868 		 * (we assume this is the same for all cores) and use it to
869 		 * divide NR_CPUS.
870 		 */
871 		cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
872 		if (be32_to_cpup(cores) != NR_CPUS) {
873 			prom_printf("WARNING ! "
874 				    "ibm_architecture_vec structure inconsistent: %lu!\n",
875 				    be32_to_cpup(cores));
876 		} else {
877 			*cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
878 			prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
879 				    be32_to_cpup(cores), NR_CPUS);
880 		}
881 
882 		/* try calling the ibm,client-architecture-support method */
883 		prom_printf("Calling ibm,client-architecture-support...");
884 		if (call_prom_ret("call-method", 3, 2, &ret,
885 				  ADDR("ibm,client-architecture-support"),
886 				  root,
887 				  ADDR(ibm_architecture_vec)) == 0) {
888 			/* the call exists... */
889 			if (ret)
890 				prom_printf("\nWARNING: ibm,client-architecture"
891 					    "-support call FAILED!\n");
892 			call_prom("close", 1, 0, root);
893 			prom_printf(" done\n");
894 			return;
895 		}
896 		call_prom("close", 1, 0, root);
897 		prom_printf(" not implemented\n");
898 	}
899 
900 #ifdef __BIG_ENDIAN__
901 	{
902 		ihandle elfloader;
903 
904 		/* no ibm,client-architecture-support call, try the old way */
905 		elfloader = call_prom("open", 1, 1,
906 				      ADDR("/packages/elf-loader"));
907 		if (elfloader == 0) {
908 			prom_printf("couldn't open /packages/elf-loader\n");
909 			return;
910 		}
911 		call_prom("call-method", 3, 1, ADDR("process-elf-header"),
912 			  elfloader, ADDR(&fake_elf));
913 		call_prom("close", 1, 0, elfloader);
914 	}
915 #endif /* __BIG_ENDIAN__ */
916 }
917 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
918 
919 /*
920  * Memory allocation strategy... our layout is normally:
921  *
922  *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
923  *  rare cases, initrd might end up being before the kernel though.
924  *  We assume this won't override the final kernel at 0, we have no
925  *  provision to handle that in this version, but it should hopefully
926  *  never happen.
927  *
928  *  alloc_top is set to the top of RMO, eventually shrink down if the
929  *  TCEs overlap
930  *
931  *  alloc_bottom is set to the top of kernel/initrd
932  *
933  *  from there, allocations are done this way : rtas is allocated
934  *  topmost, and the device-tree is allocated from the bottom. We try
935  *  to grow the device-tree allocation as we progress. If we can't,
936  *  then we fail, we don't currently have a facility to restart
937  *  elsewhere, but that shouldn't be necessary.
938  *
939  *  Note that calls to reserve_mem have to be done explicitly, memory
940  *  allocated with either alloc_up or alloc_down isn't automatically
941  *  reserved.
942  */
943 
944 
945 /*
946  * Allocates memory in the RMO upward from the kernel/initrd
947  *
948  * When align is 0, this is a special case, it means to allocate in place
949  * at the current location of alloc_bottom or fail (that is basically
950  * extending the previous allocation). Used for the device-tree flattening
951  */
952 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
953 {
954 	unsigned long base = alloc_bottom;
955 	unsigned long addr = 0;
956 
957 	if (align)
958 		base = _ALIGN_UP(base, align);
959 	prom_debug("alloc_up(%x, %x)\n", size, align);
960 	if (ram_top == 0)
961 		prom_panic("alloc_up() called with mem not initialized\n");
962 
963 	if (align)
964 		base = _ALIGN_UP(alloc_bottom, align);
965 	else
966 		base = alloc_bottom;
967 
968 	for(; (base + size) <= alloc_top;
969 	    base = _ALIGN_UP(base + 0x100000, align)) {
970 		prom_debug("    trying: 0x%x\n\r", base);
971 		addr = (unsigned long)prom_claim(base, size, 0);
972 		if (addr != PROM_ERROR && addr != 0)
973 			break;
974 		addr = 0;
975 		if (align == 0)
976 			break;
977 	}
978 	if (addr == 0)
979 		return 0;
980 	alloc_bottom = addr + size;
981 
982 	prom_debug(" -> %x\n", addr);
983 	prom_debug("  alloc_bottom : %x\n", alloc_bottom);
984 	prom_debug("  alloc_top    : %x\n", alloc_top);
985 	prom_debug("  alloc_top_hi : %x\n", alloc_top_high);
986 	prom_debug("  rmo_top      : %x\n", rmo_top);
987 	prom_debug("  ram_top      : %x\n", ram_top);
988 
989 	return addr;
990 }
991 
992 /*
993  * Allocates memory downward, either from top of RMO, or if highmem
994  * is set, from the top of RAM.  Note that this one doesn't handle
995  * failures.  It does claim memory if highmem is not set.
996  */
997 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
998 				       int highmem)
999 {
1000 	unsigned long base, addr = 0;
1001 
1002 	prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1003 		   highmem ? "(high)" : "(low)");
1004 	if (ram_top == 0)
1005 		prom_panic("alloc_down() called with mem not initialized\n");
1006 
1007 	if (highmem) {
1008 		/* Carve out storage for the TCE table. */
1009 		addr = _ALIGN_DOWN(alloc_top_high - size, align);
1010 		if (addr <= alloc_bottom)
1011 			return 0;
1012 		/* Will we bump into the RMO ? If yes, check out that we
1013 		 * didn't overlap existing allocations there, if we did,
1014 		 * we are dead, we must be the first in town !
1015 		 */
1016 		if (addr < rmo_top) {
1017 			/* Good, we are first */
1018 			if (alloc_top == rmo_top)
1019 				alloc_top = rmo_top = addr;
1020 			else
1021 				return 0;
1022 		}
1023 		alloc_top_high = addr;
1024 		goto bail;
1025 	}
1026 
1027 	base = _ALIGN_DOWN(alloc_top - size, align);
1028 	for (; base > alloc_bottom;
1029 	     base = _ALIGN_DOWN(base - 0x100000, align))  {
1030 		prom_debug("    trying: 0x%x\n\r", base);
1031 		addr = (unsigned long)prom_claim(base, size, 0);
1032 		if (addr != PROM_ERROR && addr != 0)
1033 			break;
1034 		addr = 0;
1035 	}
1036 	if (addr == 0)
1037 		return 0;
1038 	alloc_top = addr;
1039 
1040  bail:
1041 	prom_debug(" -> %x\n", addr);
1042 	prom_debug("  alloc_bottom : %x\n", alloc_bottom);
1043 	prom_debug("  alloc_top    : %x\n", alloc_top);
1044 	prom_debug("  alloc_top_hi : %x\n", alloc_top_high);
1045 	prom_debug("  rmo_top      : %x\n", rmo_top);
1046 	prom_debug("  ram_top      : %x\n", ram_top);
1047 
1048 	return addr;
1049 }
1050 
1051 /*
1052  * Parse a "reg" cell
1053  */
1054 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1055 {
1056 	cell_t *p = *cellp;
1057 	unsigned long r = 0;
1058 
1059 	/* Ignore more than 2 cells */
1060 	while (s > sizeof(unsigned long) / 4) {
1061 		p++;
1062 		s--;
1063 	}
1064 	r = be32_to_cpu(*p++);
1065 #ifdef CONFIG_PPC64
1066 	if (s > 1) {
1067 		r <<= 32;
1068 		r |= be32_to_cpu(*(p++));
1069 	}
1070 #endif
1071 	*cellp = p;
1072 	return r;
1073 }
1074 
1075 /*
1076  * Very dumb function for adding to the memory reserve list, but
1077  * we don't need anything smarter at this point
1078  *
1079  * XXX Eventually check for collisions.  They should NEVER happen.
1080  * If problems seem to show up, it would be a good start to track
1081  * them down.
1082  */
1083 static void __init reserve_mem(u64 base, u64 size)
1084 {
1085 	u64 top = base + size;
1086 	unsigned long cnt = mem_reserve_cnt;
1087 
1088 	if (size == 0)
1089 		return;
1090 
1091 	/* We need to always keep one empty entry so that we
1092 	 * have our terminator with "size" set to 0 since we are
1093 	 * dumb and just copy this entire array to the boot params
1094 	 */
1095 	base = _ALIGN_DOWN(base, PAGE_SIZE);
1096 	top = _ALIGN_UP(top, PAGE_SIZE);
1097 	size = top - base;
1098 
1099 	if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1100 		prom_panic("Memory reserve map exhausted !\n");
1101 	mem_reserve_map[cnt].base = cpu_to_be64(base);
1102 	mem_reserve_map[cnt].size = cpu_to_be64(size);
1103 	mem_reserve_cnt = cnt + 1;
1104 }
1105 
1106 /*
1107  * Initialize memory allocation mechanism, parse "memory" nodes and
1108  * obtain that way the top of memory and RMO to setup out local allocator
1109  */
1110 static void __init prom_init_mem(void)
1111 {
1112 	phandle node;
1113 	char *path, type[64];
1114 	unsigned int plen;
1115 	cell_t *p, *endp;
1116 	__be32 val;
1117 	u32 rac, rsc;
1118 
1119 	/*
1120 	 * We iterate the memory nodes to find
1121 	 * 1) top of RMO (first node)
1122 	 * 2) top of memory
1123 	 */
1124 	val = cpu_to_be32(2);
1125 	prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1126 	rac = be32_to_cpu(val);
1127 	val = cpu_to_be32(1);
1128 	prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1129 	rsc = be32_to_cpu(val);
1130 	prom_debug("root_addr_cells: %x\n", rac);
1131 	prom_debug("root_size_cells: %x\n", rsc);
1132 
1133 	prom_debug("scanning memory:\n");
1134 	path = prom_scratch;
1135 
1136 	for (node = 0; prom_next_node(&node); ) {
1137 		type[0] = 0;
1138 		prom_getprop(node, "device_type", type, sizeof(type));
1139 
1140 		if (type[0] == 0) {
1141 			/*
1142 			 * CHRP Longtrail machines have no device_type
1143 			 * on the memory node, so check the name instead...
1144 			 */
1145 			prom_getprop(node, "name", type, sizeof(type));
1146 		}
1147 		if (strcmp(type, "memory"))
1148 			continue;
1149 
1150 		plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1151 		if (plen > sizeof(regbuf)) {
1152 			prom_printf("memory node too large for buffer !\n");
1153 			plen = sizeof(regbuf);
1154 		}
1155 		p = regbuf;
1156 		endp = p + (plen / sizeof(cell_t));
1157 
1158 #ifdef DEBUG_PROM
1159 		memset(path, 0, PROM_SCRATCH_SIZE);
1160 		call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1161 		prom_debug("  node %s :\n", path);
1162 #endif /* DEBUG_PROM */
1163 
1164 		while ((endp - p) >= (rac + rsc)) {
1165 			unsigned long base, size;
1166 
1167 			base = prom_next_cell(rac, &p);
1168 			size = prom_next_cell(rsc, &p);
1169 
1170 			if (size == 0)
1171 				continue;
1172 			prom_debug("    %x %x\n", base, size);
1173 			if (base == 0 && (of_platform & PLATFORM_LPAR))
1174 				rmo_top = size;
1175 			if ((base + size) > ram_top)
1176 				ram_top = base + size;
1177 		}
1178 	}
1179 
1180 	alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1181 
1182 	/*
1183 	 * If prom_memory_limit is set we reduce the upper limits *except* for
1184 	 * alloc_top_high. This must be the real top of RAM so we can put
1185 	 * TCE's up there.
1186 	 */
1187 
1188 	alloc_top_high = ram_top;
1189 
1190 	if (prom_memory_limit) {
1191 		if (prom_memory_limit <= alloc_bottom) {
1192 			prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1193 				prom_memory_limit);
1194 			prom_memory_limit = 0;
1195 		} else if (prom_memory_limit >= ram_top) {
1196 			prom_printf("Ignoring mem=%x >= ram_top.\n",
1197 				prom_memory_limit);
1198 			prom_memory_limit = 0;
1199 		} else {
1200 			ram_top = prom_memory_limit;
1201 			rmo_top = min(rmo_top, prom_memory_limit);
1202 		}
1203 	}
1204 
1205 	/*
1206 	 * Setup our top alloc point, that is top of RMO or top of
1207 	 * segment 0 when running non-LPAR.
1208 	 * Some RS64 machines have buggy firmware where claims up at
1209 	 * 1GB fail.  Cap at 768MB as a workaround.
1210 	 * Since 768MB is plenty of room, and we need to cap to something
1211 	 * reasonable on 32-bit, cap at 768MB on all machines.
1212 	 */
1213 	if (!rmo_top)
1214 		rmo_top = ram_top;
1215 	rmo_top = min(0x30000000ul, rmo_top);
1216 	alloc_top = rmo_top;
1217 	alloc_top_high = ram_top;
1218 
1219 	/*
1220 	 * Check if we have an initrd after the kernel but still inside
1221 	 * the RMO.  If we do move our bottom point to after it.
1222 	 */
1223 	if (prom_initrd_start &&
1224 	    prom_initrd_start < rmo_top &&
1225 	    prom_initrd_end > alloc_bottom)
1226 		alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1227 
1228 	prom_printf("memory layout at init:\n");
1229 	prom_printf("  memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1230 	prom_printf("  alloc_bottom : %x\n", alloc_bottom);
1231 	prom_printf("  alloc_top    : %x\n", alloc_top);
1232 	prom_printf("  alloc_top_hi : %x\n", alloc_top_high);
1233 	prom_printf("  rmo_top      : %x\n", rmo_top);
1234 	prom_printf("  ram_top      : %x\n", ram_top);
1235 }
1236 
1237 static void __init prom_close_stdin(void)
1238 {
1239 	__be32 val;
1240 	ihandle stdin;
1241 
1242 	if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1243 		stdin = be32_to_cpu(val);
1244 		call_prom("close", 1, 0, stdin);
1245 	}
1246 }
1247 
1248 #ifdef CONFIG_PPC_POWERNV
1249 
1250 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1251 static u64 __initdata prom_opal_base;
1252 static u64 __initdata prom_opal_entry;
1253 #endif
1254 
1255 #ifdef __BIG_ENDIAN__
1256 /* XXX Don't change this structure without updating opal-takeover.S */
1257 static struct opal_secondary_data {
1258 	s64				ack;	/*  0 */
1259 	u64				go;	/*  8 */
1260 	struct opal_takeover_args	args;	/* 16 */
1261 } opal_secondary_data;
1262 
1263 static u64 __initdata prom_opal_align;
1264 static u64 __initdata prom_opal_size;
1265 static int __initdata prom_rtas_start_cpu;
1266 static u64 __initdata prom_rtas_data;
1267 static u64 __initdata prom_rtas_entry;
1268 
1269 extern char opal_secondary_entry;
1270 
1271 static void __init prom_query_opal(void)
1272 {
1273 	long rc;
1274 
1275 	/* We must not query for OPAL presence on a machine that
1276 	 * supports TNK takeover (970 blades), as this uses the same
1277 	 * h-call with different arguments and will crash
1278 	 */
1279 	if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1280 				    ADDR("/tnk-memory-map")))) {
1281 		prom_printf("TNK takeover detected, skipping OPAL check\n");
1282 		return;
1283 	}
1284 
1285 	prom_printf("Querying for OPAL presence... ");
1286 
1287 	rc = opal_query_takeover(&prom_opal_size,
1288 				 &prom_opal_align);
1289 	prom_debug("(rc = %ld) ", rc);
1290 	if (rc != 0) {
1291 		prom_printf("not there.\n");
1292 		return;
1293 	}
1294 	of_platform = PLATFORM_OPAL;
1295 	prom_printf(" there !\n");
1296 	prom_debug("  opal_size  = 0x%lx\n", prom_opal_size);
1297 	prom_debug("  opal_align = 0x%lx\n", prom_opal_align);
1298 	if (prom_opal_align < 0x10000)
1299 		prom_opal_align = 0x10000;
1300 }
1301 
1302 static int __init prom_rtas_call(int token, int nargs, int nret,
1303 				 int *outputs, ...)
1304 {
1305 	struct rtas_args rtas_args;
1306 	va_list list;
1307 	int i;
1308 
1309 	rtas_args.token = token;
1310 	rtas_args.nargs = nargs;
1311 	rtas_args.nret  = nret;
1312 	rtas_args.rets  = (rtas_arg_t *)&(rtas_args.args[nargs]);
1313 	va_start(list, outputs);
1314 	for (i = 0; i < nargs; ++i)
1315 		rtas_args.args[i] = va_arg(list, rtas_arg_t);
1316 	va_end(list);
1317 
1318 	for (i = 0; i < nret; ++i)
1319 		rtas_args.rets[i] = 0;
1320 
1321 	opal_enter_rtas(&rtas_args, prom_rtas_data,
1322 			prom_rtas_entry);
1323 
1324 	if (nret > 1 && outputs != NULL)
1325 		for (i = 0; i < nret-1; ++i)
1326 			outputs[i] = rtas_args.rets[i+1];
1327 	return (nret > 0)? rtas_args.rets[0]: 0;
1328 }
1329 
1330 static void __init prom_opal_hold_cpus(void)
1331 {
1332 	int i, cnt, cpu, rc;
1333 	long j;
1334 	phandle node;
1335 	char type[64];
1336 	u32 servers[8];
1337 	void *entry = (unsigned long *)&opal_secondary_entry;
1338 	struct opal_secondary_data *data = &opal_secondary_data;
1339 
1340 	prom_debug("prom_opal_hold_cpus: start...\n");
1341 	prom_debug("    - entry       = 0x%x\n", entry);
1342 	prom_debug("    - data        = 0x%x\n", data);
1343 
1344 	data->ack = -1;
1345 	data->go = 0;
1346 
1347 	/* look for cpus */
1348 	for (node = 0; prom_next_node(&node); ) {
1349 		type[0] = 0;
1350 		prom_getprop(node, "device_type", type, sizeof(type));
1351 		if (strcmp(type, "cpu") != 0)
1352 			continue;
1353 
1354 		/* Skip non-configured cpus. */
1355 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1356 			if (strcmp(type, "okay") != 0)
1357 				continue;
1358 
1359 		cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1360 			     sizeof(servers));
1361 		if (cnt == PROM_ERROR)
1362 			break;
1363 		cnt >>= 2;
1364 		for (i = 0; i < cnt; i++) {
1365 			cpu = servers[i];
1366 			prom_debug("CPU %d ... ", cpu);
1367 			if (cpu == prom.cpu) {
1368 				prom_debug("booted !\n");
1369 				continue;
1370 			}
1371 			prom_debug("starting ... ");
1372 
1373 			/* Init the acknowledge var which will be reset by
1374 			 * the secondary cpu when it awakens from its OF
1375 			 * spinloop.
1376 			 */
1377 			data->ack = -1;
1378 			rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1379 					    NULL, cpu, entry, data);
1380 			prom_debug("rtas rc=%d ...", rc);
1381 
1382 			for (j = 0; j < 100000000 && data->ack == -1; j++) {
1383 				HMT_low();
1384 				mb();
1385 			}
1386 			HMT_medium();
1387 			if (data->ack != -1)
1388 				prom_debug("done, PIR=0x%x\n", data->ack);
1389 			else
1390 				prom_debug("timeout !\n");
1391 		}
1392 	}
1393 	prom_debug("prom_opal_hold_cpus: end...\n");
1394 }
1395 
1396 static void __init prom_opal_takeover(void)
1397 {
1398 	struct opal_secondary_data *data = &opal_secondary_data;
1399 	struct opal_takeover_args *args = &data->args;
1400 	u64 align = prom_opal_align;
1401 	u64 top_addr, opal_addr;
1402 
1403 	args->k_image	= (u64)_stext;
1404 	args->k_size	= _end - _stext;
1405 	args->k_entry	= 0;
1406 	args->k_entry2	= 0x60;
1407 
1408 	top_addr = _ALIGN_UP(args->k_size, align);
1409 
1410 	if (prom_initrd_start != 0) {
1411 		args->rd_image = prom_initrd_start;
1412 		args->rd_size = prom_initrd_end - args->rd_image;
1413 		args->rd_loc = top_addr;
1414 		top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1415 	}
1416 
1417 	/* Pickup an address for the HAL. We want to go really high
1418 	 * up to avoid problem with future kexecs. On the other hand
1419 	 * we don't want to be all over the TCEs on P5IOC2 machines
1420 	 * which are going to be up there too. We assume the machine
1421 	 * has plenty of memory, and we ask for the HAL for now to
1422 	 * be just below the 1G point, or above the initrd
1423 	 */
1424 	opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1425 	if (opal_addr < top_addr)
1426 		opal_addr = top_addr;
1427 	args->hal_addr = opal_addr;
1428 
1429 	/* Copy the command line to the kernel image */
1430 	strlcpy(boot_command_line, prom_cmd_line,
1431 		COMMAND_LINE_SIZE);
1432 
1433 	prom_debug("  k_image    = 0x%lx\n", args->k_image);
1434 	prom_debug("  k_size     = 0x%lx\n", args->k_size);
1435 	prom_debug("  k_entry    = 0x%lx\n", args->k_entry);
1436 	prom_debug("  k_entry2   = 0x%lx\n", args->k_entry2);
1437 	prom_debug("  hal_addr   = 0x%lx\n", args->hal_addr);
1438 	prom_debug("  rd_image   = 0x%lx\n", args->rd_image);
1439 	prom_debug("  rd_size    = 0x%lx\n", args->rd_size);
1440 	prom_debug("  rd_loc     = 0x%lx\n", args->rd_loc);
1441 	prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1442 	prom_close_stdin();
1443 	mb();
1444 	data->go = 1;
1445 	for (;;)
1446 		opal_do_takeover(args);
1447 }
1448 #endif /* __BIG_ENDIAN__ */
1449 
1450 /*
1451  * Allocate room for and instantiate OPAL
1452  */
1453 static void __init prom_instantiate_opal(void)
1454 {
1455 	phandle opal_node;
1456 	ihandle opal_inst;
1457 	u64 base, entry;
1458 	u64 size = 0, align = 0x10000;
1459 	__be64 val64;
1460 	u32 rets[2];
1461 
1462 	prom_debug("prom_instantiate_opal: start...\n");
1463 
1464 	opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1465 	prom_debug("opal_node: %x\n", opal_node);
1466 	if (!PHANDLE_VALID(opal_node))
1467 		return;
1468 
1469 	val64 = 0;
1470 	prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1471 	size = be64_to_cpu(val64);
1472 	if (size == 0)
1473 		return;
1474 	val64 = 0;
1475 	prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1476 	align = be64_to_cpu(val64);
1477 
1478 	base = alloc_down(size, align, 0);
1479 	if (base == 0) {
1480 		prom_printf("OPAL allocation failed !\n");
1481 		return;
1482 	}
1483 
1484 	opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1485 	if (!IHANDLE_VALID(opal_inst)) {
1486 		prom_printf("opening opal package failed (%x)\n", opal_inst);
1487 		return;
1488 	}
1489 
1490 	prom_printf("instantiating opal at 0x%x...", base);
1491 
1492 	if (call_prom_ret("call-method", 4, 3, rets,
1493 			  ADDR("load-opal-runtime"),
1494 			  opal_inst,
1495 			  base >> 32, base & 0xffffffff) != 0
1496 	    || (rets[0] == 0 && rets[1] == 0)) {
1497 		prom_printf(" failed\n");
1498 		return;
1499 	}
1500 	entry = (((u64)rets[0]) << 32) | rets[1];
1501 
1502 	prom_printf(" done\n");
1503 
1504 	reserve_mem(base, size);
1505 
1506 	prom_debug("opal base     = 0x%x\n", base);
1507 	prom_debug("opal align    = 0x%x\n", align);
1508 	prom_debug("opal entry    = 0x%x\n", entry);
1509 	prom_debug("opal size     = 0x%x\n", (long)size);
1510 
1511 	prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1512 		     &base, sizeof(base));
1513 	prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1514 		     &entry, sizeof(entry));
1515 
1516 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1517 	prom_opal_base = base;
1518 	prom_opal_entry = entry;
1519 #endif
1520 	prom_debug("prom_instantiate_opal: end...\n");
1521 }
1522 
1523 #endif /* CONFIG_PPC_POWERNV */
1524 
1525 /*
1526  * Allocate room for and instantiate RTAS
1527  */
1528 static void __init prom_instantiate_rtas(void)
1529 {
1530 	phandle rtas_node;
1531 	ihandle rtas_inst;
1532 	u32 base, entry = 0;
1533 	__be32 val;
1534 	u32 size = 0;
1535 
1536 	prom_debug("prom_instantiate_rtas: start...\n");
1537 
1538 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1539 	prom_debug("rtas_node: %x\n", rtas_node);
1540 	if (!PHANDLE_VALID(rtas_node))
1541 		return;
1542 
1543 	val = 0;
1544 	prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1545 	size = be32_to_cpu(val);
1546 	if (size == 0)
1547 		return;
1548 
1549 	base = alloc_down(size, PAGE_SIZE, 0);
1550 	if (base == 0)
1551 		prom_panic("Could not allocate memory for RTAS\n");
1552 
1553 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1554 	if (!IHANDLE_VALID(rtas_inst)) {
1555 		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1556 		return;
1557 	}
1558 
1559 	prom_printf("instantiating rtas at 0x%x...", base);
1560 
1561 	if (call_prom_ret("call-method", 3, 2, &entry,
1562 			  ADDR("instantiate-rtas"),
1563 			  rtas_inst, base) != 0
1564 	    || entry == 0) {
1565 		prom_printf(" failed\n");
1566 		return;
1567 	}
1568 	prom_printf(" done\n");
1569 
1570 	reserve_mem(base, size);
1571 
1572 	val = cpu_to_be32(base);
1573 	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1574 		     &val, sizeof(val));
1575 	val = cpu_to_be32(entry);
1576 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1577 		     &val, sizeof(val));
1578 
1579 	/* Check if it supports "query-cpu-stopped-state" */
1580 	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1581 			 &val, sizeof(val)) != PROM_ERROR)
1582 		rtas_has_query_cpu_stopped = true;
1583 
1584 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1585 	/* PowerVN takeover hack */
1586 	prom_rtas_data = base;
1587 	prom_rtas_entry = entry;
1588 	prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1589 #endif
1590 	prom_debug("rtas base     = 0x%x\n", base);
1591 	prom_debug("rtas entry    = 0x%x\n", entry);
1592 	prom_debug("rtas size     = 0x%x\n", (long)size);
1593 
1594 	prom_debug("prom_instantiate_rtas: end...\n");
1595 }
1596 
1597 #ifdef CONFIG_PPC64
1598 /*
1599  * Allocate room for and instantiate Stored Measurement Log (SML)
1600  */
1601 static void __init prom_instantiate_sml(void)
1602 {
1603 	phandle ibmvtpm_node;
1604 	ihandle ibmvtpm_inst;
1605 	u32 entry = 0, size = 0;
1606 	u64 base;
1607 
1608 	prom_debug("prom_instantiate_sml: start...\n");
1609 
1610 	ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
1611 	prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1612 	if (!PHANDLE_VALID(ibmvtpm_node))
1613 		return;
1614 
1615 	ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
1616 	if (!IHANDLE_VALID(ibmvtpm_inst)) {
1617 		prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1618 		return;
1619 	}
1620 
1621 	if (call_prom_ret("call-method", 2, 2, &size,
1622 			  ADDR("sml-get-handover-size"),
1623 			  ibmvtpm_inst) != 0 || size == 0) {
1624 		prom_printf("SML get handover size failed\n");
1625 		return;
1626 	}
1627 
1628 	base = alloc_down(size, PAGE_SIZE, 0);
1629 	if (base == 0)
1630 		prom_panic("Could not allocate memory for sml\n");
1631 
1632 	prom_printf("instantiating sml at 0x%x...", base);
1633 
1634 	if (call_prom_ret("call-method", 4, 2, &entry,
1635 			  ADDR("sml-handover"),
1636 			  ibmvtpm_inst, size, base) != 0 || entry == 0) {
1637 		prom_printf("SML handover failed\n");
1638 		return;
1639 	}
1640 	prom_printf(" done\n");
1641 
1642 	reserve_mem(base, size);
1643 
1644 	prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
1645 		     &base, sizeof(base));
1646 	prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
1647 		     &size, sizeof(size));
1648 
1649 	prom_debug("sml base     = 0x%x\n", base);
1650 	prom_debug("sml size     = 0x%x\n", (long)size);
1651 
1652 	prom_debug("prom_instantiate_sml: end...\n");
1653 }
1654 
1655 /*
1656  * Allocate room for and initialize TCE tables
1657  */
1658 #ifdef __BIG_ENDIAN__
1659 static void __init prom_initialize_tce_table(void)
1660 {
1661 	phandle node;
1662 	ihandle phb_node;
1663 	char compatible[64], type[64], model[64];
1664 	char *path = prom_scratch;
1665 	u64 base, align;
1666 	u32 minalign, minsize;
1667 	u64 tce_entry, *tce_entryp;
1668 	u64 local_alloc_top, local_alloc_bottom;
1669 	u64 i;
1670 
1671 	if (prom_iommu_off)
1672 		return;
1673 
1674 	prom_debug("starting prom_initialize_tce_table\n");
1675 
1676 	/* Cache current top of allocs so we reserve a single block */
1677 	local_alloc_top = alloc_top_high;
1678 	local_alloc_bottom = local_alloc_top;
1679 
1680 	/* Search all nodes looking for PHBs. */
1681 	for (node = 0; prom_next_node(&node); ) {
1682 		compatible[0] = 0;
1683 		type[0] = 0;
1684 		model[0] = 0;
1685 		prom_getprop(node, "compatible",
1686 			     compatible, sizeof(compatible));
1687 		prom_getprop(node, "device_type", type, sizeof(type));
1688 		prom_getprop(node, "model", model, sizeof(model));
1689 
1690 		if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1691 			continue;
1692 
1693 		/* Keep the old logic intact to avoid regression. */
1694 		if (compatible[0] != 0) {
1695 			if ((strstr(compatible, "python") == NULL) &&
1696 			    (strstr(compatible, "Speedwagon") == NULL) &&
1697 			    (strstr(compatible, "Winnipeg") == NULL))
1698 				continue;
1699 		} else if (model[0] != 0) {
1700 			if ((strstr(model, "ython") == NULL) &&
1701 			    (strstr(model, "peedwagon") == NULL) &&
1702 			    (strstr(model, "innipeg") == NULL))
1703 				continue;
1704 		}
1705 
1706 		if (prom_getprop(node, "tce-table-minalign", &minalign,
1707 				 sizeof(minalign)) == PROM_ERROR)
1708 			minalign = 0;
1709 		if (prom_getprop(node, "tce-table-minsize", &minsize,
1710 				 sizeof(minsize)) == PROM_ERROR)
1711 			minsize = 4UL << 20;
1712 
1713 		/*
1714 		 * Even though we read what OF wants, we just set the table
1715 		 * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
1716 		 * By doing this, we avoid the pitfalls of trying to DMA to
1717 		 * MMIO space and the DMA alias hole.
1718 		 *
1719 		 * On POWER4, firmware sets the TCE region by assuming
1720 		 * each TCE table is 8MB. Using this memory for anything
1721 		 * else will impact performance, so we always allocate 8MB.
1722 		 * Anton
1723 		 */
1724 		if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1725 			minsize = 8UL << 20;
1726 		else
1727 			minsize = 4UL << 20;
1728 
1729 		/* Align to the greater of the align or size */
1730 		align = max(minalign, minsize);
1731 		base = alloc_down(minsize, align, 1);
1732 		if (base == 0)
1733 			prom_panic("ERROR, cannot find space for TCE table.\n");
1734 		if (base < local_alloc_bottom)
1735 			local_alloc_bottom = base;
1736 
1737 		/* It seems OF doesn't null-terminate the path :-( */
1738 		memset(path, 0, PROM_SCRATCH_SIZE);
1739 		/* Call OF to setup the TCE hardware */
1740 		if (call_prom("package-to-path", 3, 1, node,
1741 			      path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1742 			prom_printf("package-to-path failed\n");
1743 		}
1744 
1745 		/* Save away the TCE table attributes for later use. */
1746 		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1747 		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1748 
1749 		prom_debug("TCE table: %s\n", path);
1750 		prom_debug("\tnode = 0x%x\n", node);
1751 		prom_debug("\tbase = 0x%x\n", base);
1752 		prom_debug("\tsize = 0x%x\n", minsize);
1753 
1754 		/* Initialize the table to have a one-to-one mapping
1755 		 * over the allocated size.
1756 		 */
1757 		tce_entryp = (u64 *)base;
1758 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1759 			tce_entry = (i << PAGE_SHIFT);
1760 			tce_entry |= 0x3;
1761 			*tce_entryp = tce_entry;
1762 		}
1763 
1764 		prom_printf("opening PHB %s", path);
1765 		phb_node = call_prom("open", 1, 1, path);
1766 		if (phb_node == 0)
1767 			prom_printf("... failed\n");
1768 		else
1769 			prom_printf("... done\n");
1770 
1771 		call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1772 			  phb_node, -1, minsize,
1773 			  (u32) base, (u32) (base >> 32));
1774 		call_prom("close", 1, 0, phb_node);
1775 	}
1776 
1777 	reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1778 
1779 	/* These are only really needed if there is a memory limit in
1780 	 * effect, but we don't know so export them always. */
1781 	prom_tce_alloc_start = local_alloc_bottom;
1782 	prom_tce_alloc_end = local_alloc_top;
1783 
1784 	/* Flag the first invalid entry */
1785 	prom_debug("ending prom_initialize_tce_table\n");
1786 }
1787 #endif /* __BIG_ENDIAN__ */
1788 #endif /* CONFIG_PPC64 */
1789 
1790 /*
1791  * With CHRP SMP we need to use the OF to start the other processors.
1792  * We can't wait until smp_boot_cpus (the OF is trashed by then)
1793  * so we have to put the processors into a holding pattern controlled
1794  * by the kernel (not OF) before we destroy the OF.
1795  *
1796  * This uses a chunk of low memory, puts some holding pattern
1797  * code there and sends the other processors off to there until
1798  * smp_boot_cpus tells them to do something.  The holding pattern
1799  * checks that address until its cpu # is there, when it is that
1800  * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
1801  * of setting those values.
1802  *
1803  * We also use physical address 0x4 here to tell when a cpu
1804  * is in its holding pattern code.
1805  *
1806  * -- Cort
1807  */
1808 /*
1809  * We want to reference the copy of __secondary_hold_* in the
1810  * 0 - 0x100 address range
1811  */
1812 #define LOW_ADDR(x)	(((unsigned long) &(x)) & 0xff)
1813 
1814 static void __init prom_hold_cpus(void)
1815 {
1816 	unsigned long i;
1817 	phandle node;
1818 	char type[64];
1819 	unsigned long *spinloop
1820 		= (void *) LOW_ADDR(__secondary_hold_spinloop);
1821 	unsigned long *acknowledge
1822 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
1823 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1824 
1825 	/*
1826 	 * On pseries, if RTAS supports "query-cpu-stopped-state",
1827 	 * we skip this stage, the CPUs will be started by the
1828 	 * kernel using RTAS.
1829 	 */
1830 	if ((of_platform == PLATFORM_PSERIES ||
1831 	     of_platform == PLATFORM_PSERIES_LPAR) &&
1832 	    rtas_has_query_cpu_stopped) {
1833 		prom_printf("prom_hold_cpus: skipped\n");
1834 		return;
1835 	}
1836 
1837 	prom_debug("prom_hold_cpus: start...\n");
1838 	prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
1839 	prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
1840 	prom_debug("    1) acknowledge    = 0x%x\n",
1841 		   (unsigned long)acknowledge);
1842 	prom_debug("    1) *acknowledge   = 0x%x\n", *acknowledge);
1843 	prom_debug("    1) secondary_hold = 0x%x\n", secondary_hold);
1844 
1845 	/* Set the common spinloop variable, so all of the secondary cpus
1846 	 * will block when they are awakened from their OF spinloop.
1847 	 * This must occur for both SMP and non SMP kernels, since OF will
1848 	 * be trashed when we move the kernel.
1849 	 */
1850 	*spinloop = 0;
1851 
1852 	/* look for cpus */
1853 	for (node = 0; prom_next_node(&node); ) {
1854 		unsigned int cpu_no;
1855 		__be32 reg;
1856 
1857 		type[0] = 0;
1858 		prom_getprop(node, "device_type", type, sizeof(type));
1859 		if (strcmp(type, "cpu") != 0)
1860 			continue;
1861 
1862 		/* Skip non-configured cpus. */
1863 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1864 			if (strcmp(type, "okay") != 0)
1865 				continue;
1866 
1867 		reg = cpu_to_be32(-1); /* make sparse happy */
1868 		prom_getprop(node, "reg", &reg, sizeof(reg));
1869 		cpu_no = be32_to_cpu(reg);
1870 
1871 		prom_debug("cpu hw idx   = %lu\n", cpu_no);
1872 
1873 		/* Init the acknowledge var which will be reset by
1874 		 * the secondary cpu when it awakens from its OF
1875 		 * spinloop.
1876 		 */
1877 		*acknowledge = (unsigned long)-1;
1878 
1879 		if (cpu_no != prom.cpu) {
1880 			/* Primary Thread of non-boot cpu or any thread */
1881 			prom_printf("starting cpu hw idx %lu... ", cpu_no);
1882 			call_prom("start-cpu", 3, 0, node,
1883 				  secondary_hold, cpu_no);
1884 
1885 			for (i = 0; (i < 100000000) &&
1886 			     (*acknowledge == ((unsigned long)-1)); i++ )
1887 				mb();
1888 
1889 			if (*acknowledge == cpu_no)
1890 				prom_printf("done\n");
1891 			else
1892 				prom_printf("failed: %x\n", *acknowledge);
1893 		}
1894 #ifdef CONFIG_SMP
1895 		else
1896 			prom_printf("boot cpu hw idx %lu\n", cpu_no);
1897 #endif /* CONFIG_SMP */
1898 	}
1899 
1900 	prom_debug("prom_hold_cpus: end...\n");
1901 }
1902 
1903 
1904 static void __init prom_init_client_services(unsigned long pp)
1905 {
1906 	/* Get a handle to the prom entry point before anything else */
1907 	prom_entry = pp;
1908 
1909 	/* get a handle for the stdout device */
1910 	prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1911 	if (!PHANDLE_VALID(prom.chosen))
1912 		prom_panic("cannot find chosen"); /* msg won't be printed :( */
1913 
1914 	/* get device tree root */
1915 	prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1916 	if (!PHANDLE_VALID(prom.root))
1917 		prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1918 
1919 	prom.mmumap = 0;
1920 }
1921 
1922 #ifdef CONFIG_PPC32
1923 /*
1924  * For really old powermacs, we need to map things we claim.
1925  * For that, we need the ihandle of the mmu.
1926  * Also, on the longtrail, we need to work around other bugs.
1927  */
1928 static void __init prom_find_mmu(void)
1929 {
1930 	phandle oprom;
1931 	char version[64];
1932 
1933 	oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1934 	if (!PHANDLE_VALID(oprom))
1935 		return;
1936 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1937 		return;
1938 	version[sizeof(version) - 1] = 0;
1939 	/* XXX might need to add other versions here */
1940 	if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1941 		of_workarounds = OF_WA_CLAIM;
1942 	else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1943 		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1944 		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1945 	} else
1946 		return;
1947 	prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1948 	prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1949 		     sizeof(prom.mmumap));
1950 	prom.mmumap = be32_to_cpu(prom.mmumap);
1951 	if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1952 		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
1953 }
1954 #else
1955 #define prom_find_mmu()
1956 #endif
1957 
1958 static void __init prom_init_stdout(void)
1959 {
1960 	char *path = of_stdout_device;
1961 	char type[16];
1962 	phandle stdout_node;
1963 	__be32 val;
1964 
1965 	if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1966 		prom_panic("cannot find stdout");
1967 
1968 	prom.stdout = be32_to_cpu(val);
1969 
1970 	/* Get the full OF pathname of the stdout device */
1971 	memset(path, 0, 256);
1972 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1973 	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1974 	val = cpu_to_be32(stdout_node);
1975 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1976 		     &val, sizeof(val));
1977 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
1978 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1979 		     path, strlen(path) + 1);
1980 
1981 	/* If it's a display, note it */
1982 	memset(type, 0, sizeof(type));
1983 	prom_getprop(stdout_node, "device_type", type, sizeof(type));
1984 	if (strcmp(type, "display") == 0)
1985 		prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
1986 }
1987 
1988 static int __init prom_find_machine_type(void)
1989 {
1990 	char compat[256];
1991 	int len, i = 0;
1992 #ifdef CONFIG_PPC64
1993 	phandle rtas;
1994 	int x;
1995 #endif
1996 
1997 	/* Look for a PowerMac or a Cell */
1998 	len = prom_getprop(prom.root, "compatible",
1999 			   compat, sizeof(compat)-1);
2000 	if (len > 0) {
2001 		compat[len] = 0;
2002 		while (i < len) {
2003 			char *p = &compat[i];
2004 			int sl = strlen(p);
2005 			if (sl == 0)
2006 				break;
2007 			if (strstr(p, "Power Macintosh") ||
2008 			    strstr(p, "MacRISC"))
2009 				return PLATFORM_POWERMAC;
2010 #ifdef CONFIG_PPC64
2011 			/* We must make sure we don't detect the IBM Cell
2012 			 * blades as pSeries due to some firmware issues,
2013 			 * so we do it here.
2014 			 */
2015 			if (strstr(p, "IBM,CBEA") ||
2016 			    strstr(p, "IBM,CPBW-1.0"))
2017 				return PLATFORM_GENERIC;
2018 #endif /* CONFIG_PPC64 */
2019 			i += sl + 1;
2020 		}
2021 	}
2022 #ifdef CONFIG_PPC64
2023 	/* Try to detect OPAL */
2024 	if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2025 		return PLATFORM_OPAL;
2026 
2027 	/* Try to figure out if it's an IBM pSeries or any other
2028 	 * PAPR compliant platform. We assume it is if :
2029 	 *  - /device_type is "chrp" (please, do NOT use that for future
2030 	 *    non-IBM designs !
2031 	 *  - it has /rtas
2032 	 */
2033 	len = prom_getprop(prom.root, "device_type",
2034 			   compat, sizeof(compat)-1);
2035 	if (len <= 0)
2036 		return PLATFORM_GENERIC;
2037 	if (strcmp(compat, "chrp"))
2038 		return PLATFORM_GENERIC;
2039 
2040 	/* Default to pSeries. We need to know if we are running LPAR */
2041 	rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2042 	if (!PHANDLE_VALID(rtas))
2043 		return PLATFORM_GENERIC;
2044 	x = prom_getproplen(rtas, "ibm,hypertas-functions");
2045 	if (x != PROM_ERROR) {
2046 		prom_debug("Hypertas detected, assuming LPAR !\n");
2047 		return PLATFORM_PSERIES_LPAR;
2048 	}
2049 	return PLATFORM_PSERIES;
2050 #else
2051 	return PLATFORM_GENERIC;
2052 #endif
2053 }
2054 
2055 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2056 {
2057 	return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2058 }
2059 
2060 /*
2061  * If we have a display that we don't know how to drive,
2062  * we will want to try to execute OF's open method for it
2063  * later.  However, OF will probably fall over if we do that
2064  * we've taken over the MMU.
2065  * So we check whether we will need to open the display,
2066  * and if so, open it now.
2067  */
2068 static void __init prom_check_displays(void)
2069 {
2070 	char type[16], *path;
2071 	phandle node;
2072 	ihandle ih;
2073 	int i;
2074 
2075 	static unsigned char default_colors[] = {
2076 		0x00, 0x00, 0x00,
2077 		0x00, 0x00, 0xaa,
2078 		0x00, 0xaa, 0x00,
2079 		0x00, 0xaa, 0xaa,
2080 		0xaa, 0x00, 0x00,
2081 		0xaa, 0x00, 0xaa,
2082 		0xaa, 0xaa, 0x00,
2083 		0xaa, 0xaa, 0xaa,
2084 		0x55, 0x55, 0x55,
2085 		0x55, 0x55, 0xff,
2086 		0x55, 0xff, 0x55,
2087 		0x55, 0xff, 0xff,
2088 		0xff, 0x55, 0x55,
2089 		0xff, 0x55, 0xff,
2090 		0xff, 0xff, 0x55,
2091 		0xff, 0xff, 0xff
2092 	};
2093 	const unsigned char *clut;
2094 
2095 	prom_debug("Looking for displays\n");
2096 	for (node = 0; prom_next_node(&node); ) {
2097 		memset(type, 0, sizeof(type));
2098 		prom_getprop(node, "device_type", type, sizeof(type));
2099 		if (strcmp(type, "display") != 0)
2100 			continue;
2101 
2102 		/* It seems OF doesn't null-terminate the path :-( */
2103 		path = prom_scratch;
2104 		memset(path, 0, PROM_SCRATCH_SIZE);
2105 
2106 		/*
2107 		 * leave some room at the end of the path for appending extra
2108 		 * arguments
2109 		 */
2110 		if (call_prom("package-to-path", 3, 1, node, path,
2111 			      PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2112 			continue;
2113 		prom_printf("found display   : %s, opening... ", path);
2114 
2115 		ih = call_prom("open", 1, 1, path);
2116 		if (ih == 0) {
2117 			prom_printf("failed\n");
2118 			continue;
2119 		}
2120 
2121 		/* Success */
2122 		prom_printf("done\n");
2123 		prom_setprop(node, path, "linux,opened", NULL, 0);
2124 
2125 		/* Setup a usable color table when the appropriate
2126 		 * method is available. Should update this to set-colors */
2127 		clut = default_colors;
2128 		for (i = 0; i < 16; i++, clut += 3)
2129 			if (prom_set_color(ih, i, clut[0], clut[1],
2130 					   clut[2]) != 0)
2131 				break;
2132 
2133 #ifdef CONFIG_LOGO_LINUX_CLUT224
2134 		clut = PTRRELOC(logo_linux_clut224.clut);
2135 		for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2136 			if (prom_set_color(ih, i + 32, clut[0], clut[1],
2137 					   clut[2]) != 0)
2138 				break;
2139 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2140 
2141 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2142 		if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2143 		    PROM_ERROR) {
2144 			u32 width, height, pitch, addr;
2145 
2146 			prom_printf("Setting btext !\n");
2147 			prom_getprop(node, "width", &width, 4);
2148 			prom_getprop(node, "height", &height, 4);
2149 			prom_getprop(node, "linebytes", &pitch, 4);
2150 			prom_getprop(node, "address", &addr, 4);
2151 			prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2152 				    width, height, pitch, addr);
2153 			btext_setup_display(width, height, 8, pitch, addr);
2154 		}
2155 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2156 	}
2157 }
2158 
2159 
2160 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2161 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2162 			      unsigned long needed, unsigned long align)
2163 {
2164 	void *ret;
2165 
2166 	*mem_start = _ALIGN(*mem_start, align);
2167 	while ((*mem_start + needed) > *mem_end) {
2168 		unsigned long room, chunk;
2169 
2170 		prom_debug("Chunk exhausted, claiming more at %x...\n",
2171 			   alloc_bottom);
2172 		room = alloc_top - alloc_bottom;
2173 		if (room > DEVTREE_CHUNK_SIZE)
2174 			room = DEVTREE_CHUNK_SIZE;
2175 		if (room < PAGE_SIZE)
2176 			prom_panic("No memory for flatten_device_tree "
2177 				   "(no room)\n");
2178 		chunk = alloc_up(room, 0);
2179 		if (chunk == 0)
2180 			prom_panic("No memory for flatten_device_tree "
2181 				   "(claim failed)\n");
2182 		*mem_end = chunk + room;
2183 	}
2184 
2185 	ret = (void *)*mem_start;
2186 	*mem_start += needed;
2187 
2188 	return ret;
2189 }
2190 
2191 #define dt_push_token(token, mem_start, mem_end) do { 			\
2192 		void *room = make_room(mem_start, mem_end, 4, 4);	\
2193 		*(__be32 *)room = cpu_to_be32(token);			\
2194 	} while(0)
2195 
2196 static unsigned long __init dt_find_string(char *str)
2197 {
2198 	char *s, *os;
2199 
2200 	s = os = (char *)dt_string_start;
2201 	s += 4;
2202 	while (s <  (char *)dt_string_end) {
2203 		if (strcmp(s, str) == 0)
2204 			return s - os;
2205 		s += strlen(s) + 1;
2206 	}
2207 	return 0;
2208 }
2209 
2210 /*
2211  * The Open Firmware 1275 specification states properties must be 31 bytes or
2212  * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2213  */
2214 #define MAX_PROPERTY_NAME 64
2215 
2216 static void __init scan_dt_build_strings(phandle node,
2217 					 unsigned long *mem_start,
2218 					 unsigned long *mem_end)
2219 {
2220 	char *prev_name, *namep, *sstart;
2221 	unsigned long soff;
2222 	phandle child;
2223 
2224 	sstart =  (char *)dt_string_start;
2225 
2226 	/* get and store all property names */
2227 	prev_name = "";
2228 	for (;;) {
2229 		/* 64 is max len of name including nul. */
2230 		namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2231 		if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2232 			/* No more nodes: unwind alloc */
2233 			*mem_start = (unsigned long)namep;
2234 			break;
2235 		}
2236 
2237  		/* skip "name" */
2238  		if (strcmp(namep, "name") == 0) {
2239  			*mem_start = (unsigned long)namep;
2240  			prev_name = "name";
2241  			continue;
2242  		}
2243 		/* get/create string entry */
2244 		soff = dt_find_string(namep);
2245 		if (soff != 0) {
2246 			*mem_start = (unsigned long)namep;
2247 			namep = sstart + soff;
2248 		} else {
2249 			/* Trim off some if we can */
2250 			*mem_start = (unsigned long)namep + strlen(namep) + 1;
2251 			dt_string_end = *mem_start;
2252 		}
2253 		prev_name = namep;
2254 	}
2255 
2256 	/* do all our children */
2257 	child = call_prom("child", 1, 1, node);
2258 	while (child != 0) {
2259 		scan_dt_build_strings(child, mem_start, mem_end);
2260 		child = call_prom("peer", 1, 1, child);
2261 	}
2262 }
2263 
2264 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2265 					unsigned long *mem_end)
2266 {
2267 	phandle child;
2268 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2269 	unsigned long soff;
2270 	unsigned char *valp;
2271 	static char pname[MAX_PROPERTY_NAME];
2272 	int l, room, has_phandle = 0;
2273 
2274 	dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2275 
2276 	/* get the node's full name */
2277 	namep = (char *)*mem_start;
2278 	room = *mem_end - *mem_start;
2279 	if (room > 255)
2280 		room = 255;
2281 	l = call_prom("package-to-path", 3, 1, node, namep, room);
2282 	if (l >= 0) {
2283 		/* Didn't fit?  Get more room. */
2284 		if (l >= room) {
2285 			if (l >= *mem_end - *mem_start)
2286 				namep = make_room(mem_start, mem_end, l+1, 1);
2287 			call_prom("package-to-path", 3, 1, node, namep, l);
2288 		}
2289 		namep[l] = '\0';
2290 
2291 		/* Fixup an Apple bug where they have bogus \0 chars in the
2292 		 * middle of the path in some properties, and extract
2293 		 * the unit name (everything after the last '/').
2294 		 */
2295 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
2296 			if (*p == '/')
2297 				lp = namep;
2298 			else if (*p != 0)
2299 				*lp++ = *p;
2300 		}
2301 		*lp = 0;
2302 		*mem_start = _ALIGN((unsigned long)lp + 1, 4);
2303 	}
2304 
2305 	/* get it again for debugging */
2306 	path = prom_scratch;
2307 	memset(path, 0, PROM_SCRATCH_SIZE);
2308 	call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2309 
2310 	/* get and store all properties */
2311 	prev_name = "";
2312 	sstart = (char *)dt_string_start;
2313 	for (;;) {
2314 		if (call_prom("nextprop", 3, 1, node, prev_name,
2315 			      pname) != 1)
2316 			break;
2317 
2318  		/* skip "name" */
2319  		if (strcmp(pname, "name") == 0) {
2320  			prev_name = "name";
2321  			continue;
2322  		}
2323 
2324 		/* find string offset */
2325 		soff = dt_find_string(pname);
2326 		if (soff == 0) {
2327 			prom_printf("WARNING: Can't find string index for"
2328 				    " <%s>, node %s\n", pname, path);
2329 			break;
2330 		}
2331 		prev_name = sstart + soff;
2332 
2333 		/* get length */
2334 		l = call_prom("getproplen", 2, 1, node, pname);
2335 
2336 		/* sanity checks */
2337 		if (l == PROM_ERROR)
2338 			continue;
2339 
2340 		/* push property head */
2341 		dt_push_token(OF_DT_PROP, mem_start, mem_end);
2342 		dt_push_token(l, mem_start, mem_end);
2343 		dt_push_token(soff, mem_start, mem_end);
2344 
2345 		/* push property content */
2346 		valp = make_room(mem_start, mem_end, l, 4);
2347 		call_prom("getprop", 4, 1, node, pname, valp, l);
2348 		*mem_start = _ALIGN(*mem_start, 4);
2349 
2350 		if (!strcmp(pname, "phandle"))
2351 			has_phandle = 1;
2352 	}
2353 
2354 	/* Add a "linux,phandle" property if no "phandle" property already
2355 	 * existed (can happen with OPAL)
2356 	 */
2357 	if (!has_phandle) {
2358 		soff = dt_find_string("linux,phandle");
2359 		if (soff == 0)
2360 			prom_printf("WARNING: Can't find string index for"
2361 				    " <linux-phandle> node %s\n", path);
2362 		else {
2363 			dt_push_token(OF_DT_PROP, mem_start, mem_end);
2364 			dt_push_token(4, mem_start, mem_end);
2365 			dt_push_token(soff, mem_start, mem_end);
2366 			valp = make_room(mem_start, mem_end, 4, 4);
2367 			*(__be32 *)valp = cpu_to_be32(node);
2368 		}
2369 	}
2370 
2371 	/* do all our children */
2372 	child = call_prom("child", 1, 1, node);
2373 	while (child != 0) {
2374 		scan_dt_build_struct(child, mem_start, mem_end);
2375 		child = call_prom("peer", 1, 1, child);
2376 	}
2377 
2378 	dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2379 }
2380 
2381 static void __init flatten_device_tree(void)
2382 {
2383 	phandle root;
2384 	unsigned long mem_start, mem_end, room;
2385 	struct boot_param_header *hdr;
2386 	char *namep;
2387 	u64 *rsvmap;
2388 
2389 	/*
2390 	 * Check how much room we have between alloc top & bottom (+/- a
2391 	 * few pages), crop to 1MB, as this is our "chunk" size
2392 	 */
2393 	room = alloc_top - alloc_bottom - 0x4000;
2394 	if (room > DEVTREE_CHUNK_SIZE)
2395 		room = DEVTREE_CHUNK_SIZE;
2396 	prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2397 
2398 	/* Now try to claim that */
2399 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2400 	if (mem_start == 0)
2401 		prom_panic("Can't allocate initial device-tree chunk\n");
2402 	mem_end = mem_start + room;
2403 
2404 	/* Get root of tree */
2405 	root = call_prom("peer", 1, 1, (phandle)0);
2406 	if (root == (phandle)0)
2407 		prom_panic ("couldn't get device tree root\n");
2408 
2409 	/* Build header and make room for mem rsv map */
2410 	mem_start = _ALIGN(mem_start, 4);
2411 	hdr = make_room(&mem_start, &mem_end,
2412 			sizeof(struct boot_param_header), 4);
2413 	dt_header_start = (unsigned long)hdr;
2414 	rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2415 
2416 	/* Start of strings */
2417 	mem_start = PAGE_ALIGN(mem_start);
2418 	dt_string_start = mem_start;
2419 	mem_start += 4; /* hole */
2420 
2421 	/* Add "linux,phandle" in there, we'll need it */
2422 	namep = make_room(&mem_start, &mem_end, 16, 1);
2423 	strcpy(namep, "linux,phandle");
2424 	mem_start = (unsigned long)namep + strlen(namep) + 1;
2425 
2426 	/* Build string array */
2427 	prom_printf("Building dt strings...\n");
2428 	scan_dt_build_strings(root, &mem_start, &mem_end);
2429 	dt_string_end = mem_start;
2430 
2431 	/* Build structure */
2432 	mem_start = PAGE_ALIGN(mem_start);
2433 	dt_struct_start = mem_start;
2434 	prom_printf("Building dt structure...\n");
2435 	scan_dt_build_struct(root, &mem_start, &mem_end);
2436 	dt_push_token(OF_DT_END, &mem_start, &mem_end);
2437 	dt_struct_end = PAGE_ALIGN(mem_start);
2438 
2439 	/* Finish header */
2440 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2441 	hdr->magic = cpu_to_be32(OF_DT_HEADER);
2442 	hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2443 	hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2444 	hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2445 	hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2446 	hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2447 	hdr->version = cpu_to_be32(OF_DT_VERSION);
2448 	/* Version 16 is not backward compatible */
2449 	hdr->last_comp_version = cpu_to_be32(0x10);
2450 
2451 	/* Copy the reserve map in */
2452 	memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2453 
2454 #ifdef DEBUG_PROM
2455 	{
2456 		int i;
2457 		prom_printf("reserved memory map:\n");
2458 		for (i = 0; i < mem_reserve_cnt; i++)
2459 			prom_printf("  %x - %x\n",
2460 				    be64_to_cpu(mem_reserve_map[i].base),
2461 				    be64_to_cpu(mem_reserve_map[i].size));
2462 	}
2463 #endif
2464 	/* Bump mem_reserve_cnt to cause further reservations to fail
2465 	 * since it's too late.
2466 	 */
2467 	mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2468 
2469 	prom_printf("Device tree strings 0x%x -> 0x%x\n",
2470 		    dt_string_start, dt_string_end);
2471 	prom_printf("Device tree struct  0x%x -> 0x%x\n",
2472 		    dt_struct_start, dt_struct_end);
2473 }
2474 
2475 #ifdef CONFIG_PPC_MAPLE
2476 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2477  * The values are bad, and it doesn't even have the right number of cells. */
2478 static void __init fixup_device_tree_maple(void)
2479 {
2480 	phandle isa;
2481 	u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2482 	u32 isa_ranges[6];
2483 	char *name;
2484 
2485 	name = "/ht@0/isa@4";
2486 	isa = call_prom("finddevice", 1, 1, ADDR(name));
2487 	if (!PHANDLE_VALID(isa)) {
2488 		name = "/ht@0/isa@6";
2489 		isa = call_prom("finddevice", 1, 1, ADDR(name));
2490 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2491 	}
2492 	if (!PHANDLE_VALID(isa))
2493 		return;
2494 
2495 	if (prom_getproplen(isa, "ranges") != 12)
2496 		return;
2497 	if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2498 		== PROM_ERROR)
2499 		return;
2500 
2501 	if (isa_ranges[0] != 0x1 ||
2502 		isa_ranges[1] != 0xf4000000 ||
2503 		isa_ranges[2] != 0x00010000)
2504 		return;
2505 
2506 	prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2507 
2508 	isa_ranges[0] = 0x1;
2509 	isa_ranges[1] = 0x0;
2510 	isa_ranges[2] = rloc;
2511 	isa_ranges[3] = 0x0;
2512 	isa_ranges[4] = 0x0;
2513 	isa_ranges[5] = 0x00010000;
2514 	prom_setprop(isa, name, "ranges",
2515 			isa_ranges, sizeof(isa_ranges));
2516 }
2517 
2518 #define CPC925_MC_START		0xf8000000
2519 #define CPC925_MC_LENGTH	0x1000000
2520 /* The values for memory-controller don't have right number of cells */
2521 static void __init fixup_device_tree_maple_memory_controller(void)
2522 {
2523 	phandle mc;
2524 	u32 mc_reg[4];
2525 	char *name = "/hostbridge@f8000000";
2526 	u32 ac, sc;
2527 
2528 	mc = call_prom("finddevice", 1, 1, ADDR(name));
2529 	if (!PHANDLE_VALID(mc))
2530 		return;
2531 
2532 	if (prom_getproplen(mc, "reg") != 8)
2533 		return;
2534 
2535 	prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2536 	prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2537 	if ((ac != 2) || (sc != 2))
2538 		return;
2539 
2540 	if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2541 		return;
2542 
2543 	if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2544 		return;
2545 
2546 	prom_printf("Fixing up bogus hostbridge on Maple...\n");
2547 
2548 	mc_reg[0] = 0x0;
2549 	mc_reg[1] = CPC925_MC_START;
2550 	mc_reg[2] = 0x0;
2551 	mc_reg[3] = CPC925_MC_LENGTH;
2552 	prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2553 }
2554 #else
2555 #define fixup_device_tree_maple()
2556 #define fixup_device_tree_maple_memory_controller()
2557 #endif
2558 
2559 #ifdef CONFIG_PPC_CHRP
2560 /*
2561  * Pegasos and BriQ lacks the "ranges" property in the isa node
2562  * Pegasos needs decimal IRQ 14/15, not hexadecimal
2563  * Pegasos has the IDE configured in legacy mode, but advertised as native
2564  */
2565 static void __init fixup_device_tree_chrp(void)
2566 {
2567 	phandle ph;
2568 	u32 prop[6];
2569 	u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2570 	char *name;
2571 	int rc;
2572 
2573 	name = "/pci@80000000/isa@c";
2574 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2575 	if (!PHANDLE_VALID(ph)) {
2576 		name = "/pci@ff500000/isa@6";
2577 		ph = call_prom("finddevice", 1, 1, ADDR(name));
2578 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2579 	}
2580 	if (PHANDLE_VALID(ph)) {
2581 		rc = prom_getproplen(ph, "ranges");
2582 		if (rc == 0 || rc == PROM_ERROR) {
2583 			prom_printf("Fixing up missing ISA range on Pegasos...\n");
2584 
2585 			prop[0] = 0x1;
2586 			prop[1] = 0x0;
2587 			prop[2] = rloc;
2588 			prop[3] = 0x0;
2589 			prop[4] = 0x0;
2590 			prop[5] = 0x00010000;
2591 			prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2592 		}
2593 	}
2594 
2595 	name = "/pci@80000000/ide@C,1";
2596 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2597 	if (PHANDLE_VALID(ph)) {
2598 		prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2599 		prop[0] = 14;
2600 		prop[1] = 0x0;
2601 		prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2602 		prom_printf("Fixing up IDE class-code on Pegasos...\n");
2603 		rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2604 		if (rc == sizeof(u32)) {
2605 			prop[0] &= ~0x5;
2606 			prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2607 		}
2608 	}
2609 }
2610 #else
2611 #define fixup_device_tree_chrp()
2612 #endif
2613 
2614 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2615 static void __init fixup_device_tree_pmac(void)
2616 {
2617 	phandle u3, i2c, mpic;
2618 	u32 u3_rev;
2619 	u32 interrupts[2];
2620 	u32 parent;
2621 
2622 	/* Some G5s have a missing interrupt definition, fix it up here */
2623 	u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2624 	if (!PHANDLE_VALID(u3))
2625 		return;
2626 	i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2627 	if (!PHANDLE_VALID(i2c))
2628 		return;
2629 	mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2630 	if (!PHANDLE_VALID(mpic))
2631 		return;
2632 
2633 	/* check if proper rev of u3 */
2634 	if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2635 	    == PROM_ERROR)
2636 		return;
2637 	if (u3_rev < 0x35 || u3_rev > 0x39)
2638 		return;
2639 	/* does it need fixup ? */
2640 	if (prom_getproplen(i2c, "interrupts") > 0)
2641 		return;
2642 
2643 	prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2644 
2645 	/* interrupt on this revision of u3 is number 0 and level */
2646 	interrupts[0] = 0;
2647 	interrupts[1] = 1;
2648 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2649 		     &interrupts, sizeof(interrupts));
2650 	parent = (u32)mpic;
2651 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2652 		     &parent, sizeof(parent));
2653 }
2654 #else
2655 #define fixup_device_tree_pmac()
2656 #endif
2657 
2658 #ifdef CONFIG_PPC_EFIKA
2659 /*
2660  * The MPC5200 FEC driver requires an phy-handle property to tell it how
2661  * to talk to the phy.  If the phy-handle property is missing, then this
2662  * function is called to add the appropriate nodes and link it to the
2663  * ethernet node.
2664  */
2665 static void __init fixup_device_tree_efika_add_phy(void)
2666 {
2667 	u32 node;
2668 	char prop[64];
2669 	int rv;
2670 
2671 	/* Check if /builtin/ethernet exists - bail if it doesn't */
2672 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2673 	if (!PHANDLE_VALID(node))
2674 		return;
2675 
2676 	/* Check if the phy-handle property exists - bail if it does */
2677 	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2678 	if (!rv)
2679 		return;
2680 
2681 	/*
2682 	 * At this point the ethernet device doesn't have a phy described.
2683 	 * Now we need to add the missing phy node and linkage
2684 	 */
2685 
2686 	/* Check for an MDIO bus node - if missing then create one */
2687 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2688 	if (!PHANDLE_VALID(node)) {
2689 		prom_printf("Adding Ethernet MDIO node\n");
2690 		call_prom("interpret", 1, 1,
2691 			" s\" /builtin\" find-device"
2692 			" new-device"
2693 				" 1 encode-int s\" #address-cells\" property"
2694 				" 0 encode-int s\" #size-cells\" property"
2695 				" s\" mdio\" device-name"
2696 				" s\" fsl,mpc5200b-mdio\" encode-string"
2697 				" s\" compatible\" property"
2698 				" 0xf0003000 0x400 reg"
2699 				" 0x2 encode-int"
2700 				" 0x5 encode-int encode+"
2701 				" 0x3 encode-int encode+"
2702 				" s\" interrupts\" property"
2703 			" finish-device");
2704 	};
2705 
2706 	/* Check for a PHY device node - if missing then create one and
2707 	 * give it's phandle to the ethernet node */
2708 	node = call_prom("finddevice", 1, 1,
2709 			 ADDR("/builtin/mdio/ethernet-phy"));
2710 	if (!PHANDLE_VALID(node)) {
2711 		prom_printf("Adding Ethernet PHY node\n");
2712 		call_prom("interpret", 1, 1,
2713 			" s\" /builtin/mdio\" find-device"
2714 			" new-device"
2715 				" s\" ethernet-phy\" device-name"
2716 				" 0x10 encode-int s\" reg\" property"
2717 				" my-self"
2718 				" ihandle>phandle"
2719 			" finish-device"
2720 			" s\" /builtin/ethernet\" find-device"
2721 				" encode-int"
2722 				" s\" phy-handle\" property"
2723 			" device-end");
2724 	}
2725 }
2726 
2727 static void __init fixup_device_tree_efika(void)
2728 {
2729 	int sound_irq[3] = { 2, 2, 0 };
2730 	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2731 				3,4,0, 3,5,0, 3,6,0, 3,7,0,
2732 				3,8,0, 3,9,0, 3,10,0, 3,11,0,
2733 				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2734 	u32 node;
2735 	char prop[64];
2736 	int rv, len;
2737 
2738 	/* Check if we're really running on a EFIKA */
2739 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2740 	if (!PHANDLE_VALID(node))
2741 		return;
2742 
2743 	rv = prom_getprop(node, "model", prop, sizeof(prop));
2744 	if (rv == PROM_ERROR)
2745 		return;
2746 	if (strcmp(prop, "EFIKA5K2"))
2747 		return;
2748 
2749 	prom_printf("Applying EFIKA device tree fixups\n");
2750 
2751 	/* Claiming to be 'chrp' is death */
2752 	node = call_prom("finddevice", 1, 1, ADDR("/"));
2753 	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2754 	if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2755 		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2756 
2757 	/* CODEGEN,description is exposed in /proc/cpuinfo so
2758 	   fix that too */
2759 	rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2760 	if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2761 		prom_setprop(node, "/", "CODEGEN,description",
2762 			     "Efika 5200B PowerPC System",
2763 			     sizeof("Efika 5200B PowerPC System"));
2764 
2765 	/* Fixup bestcomm interrupts property */
2766 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2767 	if (PHANDLE_VALID(node)) {
2768 		len = prom_getproplen(node, "interrupts");
2769 		if (len == 12) {
2770 			prom_printf("Fixing bestcomm interrupts property\n");
2771 			prom_setprop(node, "/builtin/bestcom", "interrupts",
2772 				     bcomm_irq, sizeof(bcomm_irq));
2773 		}
2774 	}
2775 
2776 	/* Fixup sound interrupts property */
2777 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2778 	if (PHANDLE_VALID(node)) {
2779 		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2780 		if (rv == PROM_ERROR) {
2781 			prom_printf("Adding sound interrupts property\n");
2782 			prom_setprop(node, "/builtin/sound", "interrupts",
2783 				     sound_irq, sizeof(sound_irq));
2784 		}
2785 	}
2786 
2787 	/* Make sure ethernet phy-handle property exists */
2788 	fixup_device_tree_efika_add_phy();
2789 }
2790 #else
2791 #define fixup_device_tree_efika()
2792 #endif
2793 
2794 static void __init fixup_device_tree(void)
2795 {
2796 	fixup_device_tree_maple();
2797 	fixup_device_tree_maple_memory_controller();
2798 	fixup_device_tree_chrp();
2799 	fixup_device_tree_pmac();
2800 	fixup_device_tree_efika();
2801 }
2802 
2803 static void __init prom_find_boot_cpu(void)
2804 {
2805 	__be32 rval;
2806 	ihandle prom_cpu;
2807 	phandle cpu_pkg;
2808 
2809 	rval = 0;
2810 	if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2811 		return;
2812 	prom_cpu = be32_to_cpu(rval);
2813 
2814 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2815 
2816 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2817 	prom.cpu = be32_to_cpu(rval);
2818 
2819 	prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2820 }
2821 
2822 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2823 {
2824 #ifdef CONFIG_BLK_DEV_INITRD
2825 	if (r3 && r4 && r4 != 0xdeadbeef) {
2826 		__be64 val;
2827 
2828 		prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2829 		prom_initrd_end = prom_initrd_start + r4;
2830 
2831 		val = cpu_to_be64(prom_initrd_start);
2832 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2833 			     &val, sizeof(val));
2834 		val = cpu_to_be64(prom_initrd_end);
2835 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2836 			     &val, sizeof(val));
2837 
2838 		reserve_mem(prom_initrd_start,
2839 			    prom_initrd_end - prom_initrd_start);
2840 
2841 		prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2842 		prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2843 	}
2844 #endif /* CONFIG_BLK_DEV_INITRD */
2845 }
2846 
2847 #ifdef CONFIG_PPC64
2848 #ifdef CONFIG_RELOCATABLE
2849 static void reloc_toc(void)
2850 {
2851 }
2852 
2853 static void unreloc_toc(void)
2854 {
2855 }
2856 #else
2857 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2858 {
2859 	unsigned long i;
2860 	unsigned long *toc_entry;
2861 
2862 	/* Get the start of the TOC by using r2 directly. */
2863 	asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2864 
2865 	for (i = 0; i < nr_entries; i++) {
2866 		*toc_entry = *toc_entry + offset;
2867 		toc_entry++;
2868 	}
2869 }
2870 
2871 static void reloc_toc(void)
2872 {
2873 	unsigned long offset = reloc_offset();
2874 	unsigned long nr_entries =
2875 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2876 
2877 	__reloc_toc(offset, nr_entries);
2878 
2879 	mb();
2880 }
2881 
2882 static void unreloc_toc(void)
2883 {
2884 	unsigned long offset = reloc_offset();
2885 	unsigned long nr_entries =
2886 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2887 
2888 	mb();
2889 
2890 	__reloc_toc(-offset, nr_entries);
2891 }
2892 #endif
2893 #endif
2894 
2895 /*
2896  * We enter here early on, when the Open Firmware prom is still
2897  * handling exceptions and the MMU hash table for us.
2898  */
2899 
2900 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2901 			       unsigned long pp,
2902 			       unsigned long r6, unsigned long r7,
2903 			       unsigned long kbase)
2904 {
2905 	unsigned long hdr;
2906 
2907 #ifdef CONFIG_PPC32
2908 	unsigned long offset = reloc_offset();
2909 	reloc_got2(offset);
2910 #else
2911 	reloc_toc();
2912 #endif
2913 
2914 	/*
2915 	 * First zero the BSS
2916 	 */
2917 	memset(&__bss_start, 0, __bss_stop - __bss_start);
2918 
2919 	/*
2920 	 * Init interface to Open Firmware, get some node references,
2921 	 * like /chosen
2922 	 */
2923 	prom_init_client_services(pp);
2924 
2925 	/*
2926 	 * See if this OF is old enough that we need to do explicit maps
2927 	 * and other workarounds
2928 	 */
2929 	prom_find_mmu();
2930 
2931 	/*
2932 	 * Init prom stdout device
2933 	 */
2934 	prom_init_stdout();
2935 
2936 	prom_printf("Preparing to boot %s", linux_banner);
2937 
2938 	/*
2939 	 * Get default machine type. At this point, we do not differentiate
2940 	 * between pSeries SMP and pSeries LPAR
2941 	 */
2942 	of_platform = prom_find_machine_type();
2943 	prom_printf("Detected machine type: %x\n", of_platform);
2944 
2945 #ifndef CONFIG_NONSTATIC_KERNEL
2946 	/* Bail if this is a kdump kernel. */
2947 	if (PHYSICAL_START > 0)
2948 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2949 #endif
2950 
2951 	/*
2952 	 * Check for an initrd
2953 	 */
2954 	prom_check_initrd(r3, r4);
2955 
2956 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2957 	/*
2958 	 * On pSeries, inform the firmware about our capabilities
2959 	 */
2960 	if (of_platform == PLATFORM_PSERIES ||
2961 	    of_platform == PLATFORM_PSERIES_LPAR)
2962 		prom_send_capabilities();
2963 #endif
2964 
2965 	/*
2966 	 * Copy the CPU hold code
2967 	 */
2968 	if (of_platform != PLATFORM_POWERMAC)
2969 		copy_and_flush(0, kbase, 0x100, 0);
2970 
2971 	/*
2972 	 * Do early parsing of command line
2973 	 */
2974 	early_cmdline_parse();
2975 
2976 	/*
2977 	 * Initialize memory management within prom_init
2978 	 */
2979 	prom_init_mem();
2980 
2981 	/*
2982 	 * Determine which cpu is actually running right _now_
2983 	 */
2984 	prom_find_boot_cpu();
2985 
2986 	/*
2987 	 * Initialize display devices
2988 	 */
2989 	prom_check_displays();
2990 
2991 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
2992 	/*
2993 	 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2994 	 * that uses the allocator, we need to make sure we get the top of memory
2995 	 * available for us here...
2996 	 */
2997 	if (of_platform == PLATFORM_PSERIES)
2998 		prom_initialize_tce_table();
2999 #endif
3000 
3001 	/*
3002 	 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3003 	 * have a usable RTAS implementation.
3004 	 */
3005 	if (of_platform != PLATFORM_POWERMAC &&
3006 	    of_platform != PLATFORM_OPAL)
3007 		prom_instantiate_rtas();
3008 
3009 #ifdef CONFIG_PPC_POWERNV
3010 #ifdef __BIG_ENDIAN__
3011 	/* Detect HAL and try instanciating it & doing takeover */
3012 	if (of_platform == PLATFORM_PSERIES_LPAR) {
3013 		prom_query_opal();
3014 		if (of_platform == PLATFORM_OPAL) {
3015 			prom_opal_hold_cpus();
3016 			prom_opal_takeover();
3017 		}
3018 	} else
3019 #endif /* __BIG_ENDIAN__ */
3020 	if (of_platform == PLATFORM_OPAL)
3021 		prom_instantiate_opal();
3022 #endif /* CONFIG_PPC_POWERNV */
3023 
3024 #ifdef CONFIG_PPC64
3025 	/* instantiate sml */
3026 	prom_instantiate_sml();
3027 #endif
3028 
3029 	/*
3030 	 * On non-powermacs, put all CPUs in spin-loops.
3031 	 *
3032 	 * PowerMacs use a different mechanism to spin CPUs
3033 	 *
3034 	 * (This must be done after instanciating RTAS)
3035 	 */
3036 	if (of_platform != PLATFORM_POWERMAC &&
3037 	    of_platform != PLATFORM_OPAL)
3038 		prom_hold_cpus();
3039 
3040 	/*
3041 	 * Fill in some infos for use by the kernel later on
3042 	 */
3043 	if (prom_memory_limit) {
3044 		__be64 val = cpu_to_be64(prom_memory_limit);
3045 		prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3046 			     &val, sizeof(val));
3047 	}
3048 #ifdef CONFIG_PPC64
3049 	if (prom_iommu_off)
3050 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3051 			     NULL, 0);
3052 
3053 	if (prom_iommu_force_on)
3054 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3055 			     NULL, 0);
3056 
3057 	if (prom_tce_alloc_start) {
3058 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3059 			     &prom_tce_alloc_start,
3060 			     sizeof(prom_tce_alloc_start));
3061 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3062 			     &prom_tce_alloc_end,
3063 			     sizeof(prom_tce_alloc_end));
3064 	}
3065 #endif
3066 
3067 	/*
3068 	 * Fixup any known bugs in the device-tree
3069 	 */
3070 	fixup_device_tree();
3071 
3072 	/*
3073 	 * Now finally create the flattened device-tree
3074 	 */
3075 	prom_printf("copying OF device tree...\n");
3076 	flatten_device_tree();
3077 
3078 	/*
3079 	 * in case stdin is USB and still active on IBM machines...
3080 	 * Unfortunately quiesce crashes on some powermacs if we have
3081 	 * closed stdin already (in particular the powerbook 101). It
3082 	 * appears that the OPAL version of OFW doesn't like it either.
3083 	 */
3084 	if (of_platform != PLATFORM_POWERMAC &&
3085 	    of_platform != PLATFORM_OPAL)
3086 		prom_close_stdin();
3087 
3088 	/*
3089 	 * Call OF "quiesce" method to shut down pending DMA's from
3090 	 * devices etc...
3091 	 */
3092 	prom_printf("Calling quiesce...\n");
3093 	call_prom("quiesce", 0, 0);
3094 
3095 	/*
3096 	 * And finally, call the kernel passing it the flattened device
3097 	 * tree and NULL as r5, thus triggering the new entry point which
3098 	 * is common to us and kexec
3099 	 */
3100 	hdr = dt_header_start;
3101 
3102 	/* Don't print anything after quiesce under OPAL, it crashes OFW */
3103 	if (of_platform != PLATFORM_OPAL) {
3104 		prom_printf("returning from prom_init\n");
3105 		prom_debug("->dt_header_start=0x%x\n", hdr);
3106 	}
3107 
3108 #ifdef CONFIG_PPC32
3109 	reloc_got2(-offset);
3110 #else
3111 	unreloc_toc();
3112 #endif
3113 
3114 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3115 	/* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3116 	__start(hdr, kbase, 0, 0, 0,
3117 		prom_opal_base, prom_opal_entry);
3118 #else
3119 	__start(hdr, kbase, 0, 0, 0, 0, 0);
3120 #endif
3121 
3122 	return 0;
3123 }
3124