1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for interfacing to Open Firmware.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12 #undef DEBUG_PROM
13
14 /* we cannot use FORTIFY as it brings in new symbols */
15 #define __NO_FORTIFY
16
17 #include <linux/stdarg.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
30 #include <linux/printk.h>
31 #include <linux/of.h>
32 #include <linux/of_fdt.h>
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/page.h>
36 #include <asm/processor.h>
37 #include <asm/interrupt.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/mmu.h>
42 #include <asm/iommu.h>
43 #include <asm/btext.h>
44 #include <asm/sections.h>
45 #include <asm/setup.h>
46 #include <asm/asm-prototypes.h>
47 #include <asm/ultravisor-api.h>
48
49 #include <linux/linux_logo.h>
50
51 /* All of prom_init bss lives here */
52 #define __prombss __section(".bss.prominit")
53
54 /*
55 * Eventually bump that one up
56 */
57 #define DEVTREE_CHUNK_SIZE 0x100000
58
59 /*
60 * This is the size of the local memory reserve map that gets copied
61 * into the boot params passed to the kernel. That size is totally
62 * flexible as the kernel just reads the list until it encounters an
63 * entry with size 0, so it can be changed without breaking binary
64 * compatibility
65 */
66 #define MEM_RESERVE_MAP_SIZE 8
67
68 /*
69 * prom_init() is called very early on, before the kernel text
70 * and data have been mapped to KERNELBASE. At this point the code
71 * is running at whatever address it has been loaded at.
72 * On ppc32 we compile with -mrelocatable, which means that references
73 * to extern and static variables get relocated automatically.
74 * ppc64 objects are always relocatable, we just need to relocate the
75 * TOC.
76 *
77 * Because OF may have mapped I/O devices into the area starting at
78 * KERNELBASE, particularly on CHRP machines, we can't safely call
79 * OF once the kernel has been mapped to KERNELBASE. Therefore all
80 * OF calls must be done within prom_init().
81 *
82 * ADDR is used in calls to call_prom. The 4th and following
83 * arguments to call_prom should be 32-bit values.
84 * On ppc64, 64 bit values are truncated to 32 bits (and
85 * fortunately don't get interpreted as two arguments).
86 */
87 #define ADDR(x) (u32)(unsigned long)(x)
88
89 #ifdef CONFIG_PPC64
90 #define OF_WORKAROUNDS 0
91 #else
92 #define OF_WORKAROUNDS of_workarounds
93 static int of_workarounds __prombss;
94 #endif
95
96 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
97 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
98
99 #ifdef DEBUG_PROM
100 #define prom_debug(x...) prom_printf(x)
101 #else
102 #define prom_debug(x...) do { } while (0)
103 #endif
104
105
106 typedef u32 prom_arg_t;
107
108 struct prom_args {
109 __be32 service;
110 __be32 nargs;
111 __be32 nret;
112 __be32 args[10];
113 };
114
115 struct prom_t {
116 ihandle root;
117 phandle chosen;
118 int cpu;
119 ihandle stdout;
120 ihandle mmumap;
121 ihandle memory;
122 };
123
124 struct mem_map_entry {
125 __be64 base;
126 __be64 size;
127 };
128
129 typedef __be32 cell_t;
130
131 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
132 unsigned long r6, unsigned long r7, unsigned long r8,
133 unsigned long r9);
134
135 #ifdef CONFIG_PPC64
136 extern int enter_prom(struct prom_args *args, unsigned long entry);
137 #else
enter_prom(struct prom_args * args,unsigned long entry)138 static inline int enter_prom(struct prom_args *args, unsigned long entry)
139 {
140 return ((int (*)(struct prom_args *))entry)(args);
141 }
142 #endif
143
144 extern void copy_and_flush(unsigned long dest, unsigned long src,
145 unsigned long size, unsigned long offset);
146
147 /* prom structure */
148 static struct prom_t __prombss prom;
149
150 static unsigned long __prombss prom_entry;
151
152 static char __prombss of_stdout_device[256];
153 static char __prombss prom_scratch[256];
154
155 static unsigned long __prombss dt_header_start;
156 static unsigned long __prombss dt_struct_start, dt_struct_end;
157 static unsigned long __prombss dt_string_start, dt_string_end;
158
159 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
160
161 #ifdef CONFIG_PPC64
162 static int __prombss prom_iommu_force_on;
163 static int __prombss prom_iommu_off;
164 static unsigned long __prombss prom_tce_alloc_start;
165 static unsigned long __prombss prom_tce_alloc_end;
166 #endif
167
168 #ifdef CONFIG_PPC_PSERIES
169 static bool __prombss prom_radix_disable;
170 static bool __prombss prom_radix_gtse_disable;
171 static bool __prombss prom_xive_disable;
172 #endif
173
174 #ifdef CONFIG_PPC_SVM
175 static bool __prombss prom_svm_enable;
176 #endif
177
178 struct platform_support {
179 bool hash_mmu;
180 bool radix_mmu;
181 bool radix_gtse;
182 bool xive;
183 };
184
185 /* Platforms codes are now obsolete in the kernel. Now only used within this
186 * file and ultimately gone too. Feel free to change them if you need, they
187 * are not shared with anything outside of this file anymore
188 */
189 #define PLATFORM_PSERIES 0x0100
190 #define PLATFORM_PSERIES_LPAR 0x0101
191 #define PLATFORM_LPAR 0x0001
192 #define PLATFORM_POWERMAC 0x0400
193 #define PLATFORM_GENERIC 0x0500
194
195 static int __prombss of_platform;
196
197 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
198
199 static unsigned long __prombss prom_memory_limit;
200
201 static unsigned long __prombss alloc_top;
202 static unsigned long __prombss alloc_top_high;
203 static unsigned long __prombss alloc_bottom;
204 static unsigned long __prombss rmo_top;
205 static unsigned long __prombss ram_top;
206
207 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
208 static int __prombss mem_reserve_cnt;
209
210 static cell_t __prombss regbuf[1024];
211
212 static bool __prombss rtas_has_query_cpu_stopped;
213
214
215 /*
216 * Error results ... some OF calls will return "-1" on error, some
217 * will return 0, some will return either. To simplify, here are
218 * macros to use with any ihandle or phandle return value to check if
219 * it is valid
220 */
221
222 #define PROM_ERROR (-1u)
223 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
224 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
225
226 /* Copied from lib/string.c and lib/kstrtox.c */
227
prom_strcmp(const char * cs,const char * ct)228 static int __init prom_strcmp(const char *cs, const char *ct)
229 {
230 unsigned char c1, c2;
231
232 while (1) {
233 c1 = *cs++;
234 c2 = *ct++;
235 if (c1 != c2)
236 return c1 < c2 ? -1 : 1;
237 if (!c1)
238 break;
239 }
240 return 0;
241 }
242
prom_strscpy_pad(char * dest,const char * src,size_t n)243 static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
244 {
245 ssize_t rc;
246 size_t i;
247
248 if (n == 0 || n > INT_MAX)
249 return -E2BIG;
250
251 // Copy up to n bytes
252 for (i = 0; i < n && src[i] != '\0'; i++)
253 dest[i] = src[i];
254
255 rc = i;
256
257 // If we copied all n then we have run out of space for the nul
258 if (rc == n) {
259 // Rewind by one character to ensure nul termination
260 i--;
261 rc = -E2BIG;
262 }
263
264 for (; i < n; i++)
265 dest[i] = '\0';
266
267 return rc;
268 }
269
prom_strncmp(const char * cs,const char * ct,size_t count)270 static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
271 {
272 unsigned char c1, c2;
273
274 while (count) {
275 c1 = *cs++;
276 c2 = *ct++;
277 if (c1 != c2)
278 return c1 < c2 ? -1 : 1;
279 if (!c1)
280 break;
281 count--;
282 }
283 return 0;
284 }
285
prom_strlen(const char * s)286 static size_t __init prom_strlen(const char *s)
287 {
288 const char *sc;
289
290 for (sc = s; *sc != '\0'; ++sc)
291 /* nothing */;
292 return sc - s;
293 }
294
prom_memcmp(const void * cs,const void * ct,size_t count)295 static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
296 {
297 const unsigned char *su1, *su2;
298 int res = 0;
299
300 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
301 if ((res = *su1 - *su2) != 0)
302 break;
303 return res;
304 }
305
prom_strstr(const char * s1,const char * s2)306 static char __init *prom_strstr(const char *s1, const char *s2)
307 {
308 size_t l1, l2;
309
310 l2 = prom_strlen(s2);
311 if (!l2)
312 return (char *)s1;
313 l1 = prom_strlen(s1);
314 while (l1 >= l2) {
315 l1--;
316 if (!prom_memcmp(s1, s2, l2))
317 return (char *)s1;
318 s1++;
319 }
320 return NULL;
321 }
322
prom_strlcat(char * dest,const char * src,size_t count)323 static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
324 {
325 size_t dsize = prom_strlen(dest);
326 size_t len = prom_strlen(src);
327 size_t res = dsize + len;
328
329 /* This would be a bug */
330 if (dsize >= count)
331 return count;
332
333 dest += dsize;
334 count -= dsize;
335 if (len >= count)
336 len = count-1;
337 memcpy(dest, src, len);
338 dest[len] = 0;
339 return res;
340
341 }
342
343 #ifdef CONFIG_PPC_PSERIES
prom_strtobool(const char * s,bool * res)344 static int __init prom_strtobool(const char *s, bool *res)
345 {
346 if (!s)
347 return -EINVAL;
348
349 switch (s[0]) {
350 case 'y':
351 case 'Y':
352 case '1':
353 *res = true;
354 return 0;
355 case 'n':
356 case 'N':
357 case '0':
358 *res = false;
359 return 0;
360 case 'o':
361 case 'O':
362 switch (s[1]) {
363 case 'n':
364 case 'N':
365 *res = true;
366 return 0;
367 case 'f':
368 case 'F':
369 *res = false;
370 return 0;
371 default:
372 break;
373 }
374 break;
375 default:
376 break;
377 }
378
379 return -EINVAL;
380 }
381 #endif
382
383 /* This is the one and *ONLY* place where we actually call open
384 * firmware.
385 */
386
call_prom(const char * service,int nargs,int nret,...)387 static int __init call_prom(const char *service, int nargs, int nret, ...)
388 {
389 int i;
390 struct prom_args args;
391 va_list list;
392
393 args.service = cpu_to_be32(ADDR(service));
394 args.nargs = cpu_to_be32(nargs);
395 args.nret = cpu_to_be32(nret);
396
397 va_start(list, nret);
398 for (i = 0; i < nargs; i++)
399 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
400 va_end(list);
401
402 for (i = 0; i < nret; i++)
403 args.args[nargs+i] = 0;
404
405 if (enter_prom(&args, prom_entry) < 0)
406 return PROM_ERROR;
407
408 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
409 }
410
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)411 static int __init call_prom_ret(const char *service, int nargs, int nret,
412 prom_arg_t *rets, ...)
413 {
414 int i;
415 struct prom_args args;
416 va_list list;
417
418 args.service = cpu_to_be32(ADDR(service));
419 args.nargs = cpu_to_be32(nargs);
420 args.nret = cpu_to_be32(nret);
421
422 va_start(list, rets);
423 for (i = 0; i < nargs; i++)
424 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
425 va_end(list);
426
427 for (i = 0; i < nret; i++)
428 args.args[nargs+i] = 0;
429
430 if (enter_prom(&args, prom_entry) < 0)
431 return PROM_ERROR;
432
433 if (rets != NULL)
434 for (i = 1; i < nret; ++i)
435 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
436
437 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
438 }
439
440
prom_print(const char * msg)441 static void __init prom_print(const char *msg)
442 {
443 const char *p, *q;
444
445 if (prom.stdout == 0)
446 return;
447
448 for (p = msg; *p != 0; p = q) {
449 for (q = p; *q != 0 && *q != '\n'; ++q)
450 ;
451 if (q > p)
452 call_prom("write", 3, 1, prom.stdout, p, q - p);
453 if (*q == 0)
454 break;
455 ++q;
456 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
457 }
458 }
459
460
461 /*
462 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
463 * we do not need __udivdi3 or __umoddi3 on 32bits.
464 */
prom_print_hex(unsigned long val)465 static void __init prom_print_hex(unsigned long val)
466 {
467 int i, nibbles = sizeof(val)*2;
468 char buf[sizeof(val)*2+1];
469
470 for (i = nibbles-1; i >= 0; i--) {
471 buf[i] = (val & 0xf) + '0';
472 if (buf[i] > '9')
473 buf[i] += ('a'-'0'-10);
474 val >>= 4;
475 }
476 buf[nibbles] = '\0';
477 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
478 }
479
480 /* max number of decimal digits in an unsigned long */
481 #define UL_DIGITS 21
prom_print_dec(unsigned long val)482 static void __init prom_print_dec(unsigned long val)
483 {
484 int i, size;
485 char buf[UL_DIGITS+1];
486
487 for (i = UL_DIGITS-1; i >= 0; i--) {
488 buf[i] = (val % 10) + '0';
489 val = val/10;
490 if (val == 0)
491 break;
492 }
493 /* shift stuff down */
494 size = UL_DIGITS - i;
495 call_prom("write", 3, 1, prom.stdout, buf+i, size);
496 }
497
498 __printf(1, 2)
prom_printf(const char * format,...)499 static void __init prom_printf(const char *format, ...)
500 {
501 const char *p, *q, *s;
502 va_list args;
503 unsigned long v;
504 long vs;
505 int n = 0;
506
507 va_start(args, format);
508 for (p = format; *p != 0; p = q) {
509 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
510 ;
511 if (q > p)
512 call_prom("write", 3, 1, prom.stdout, p, q - p);
513 if (*q == 0)
514 break;
515 if (*q == '\n') {
516 ++q;
517 call_prom("write", 3, 1, prom.stdout,
518 ADDR("\r\n"), 2);
519 continue;
520 }
521 ++q;
522 if (*q == 0)
523 break;
524 while (*q == 'l') {
525 ++q;
526 ++n;
527 }
528 switch (*q) {
529 case 's':
530 ++q;
531 s = va_arg(args, const char *);
532 prom_print(s);
533 break;
534 case 'x':
535 ++q;
536 switch (n) {
537 case 0:
538 v = va_arg(args, unsigned int);
539 break;
540 case 1:
541 v = va_arg(args, unsigned long);
542 break;
543 case 2:
544 default:
545 v = va_arg(args, unsigned long long);
546 break;
547 }
548 prom_print_hex(v);
549 break;
550 case 'u':
551 ++q;
552 switch (n) {
553 case 0:
554 v = va_arg(args, unsigned int);
555 break;
556 case 1:
557 v = va_arg(args, unsigned long);
558 break;
559 case 2:
560 default:
561 v = va_arg(args, unsigned long long);
562 break;
563 }
564 prom_print_dec(v);
565 break;
566 case 'd':
567 ++q;
568 switch (n) {
569 case 0:
570 vs = va_arg(args, int);
571 break;
572 case 1:
573 vs = va_arg(args, long);
574 break;
575 case 2:
576 default:
577 vs = va_arg(args, long long);
578 break;
579 }
580 if (vs < 0) {
581 prom_print("-");
582 vs = -vs;
583 }
584 prom_print_dec(vs);
585 break;
586 }
587 }
588 va_end(args);
589 }
590
591
prom_claim(unsigned long virt,unsigned long size,unsigned long align)592 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
593 unsigned long align)
594 {
595
596 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
597 /*
598 * Old OF requires we claim physical and virtual separately
599 * and then map explicitly (assuming virtual mode)
600 */
601 int ret;
602 prom_arg_t result;
603
604 ret = call_prom_ret("call-method", 5, 2, &result,
605 ADDR("claim"), prom.memory,
606 align, size, virt);
607 if (ret != 0 || result == -1)
608 return -1;
609 ret = call_prom_ret("call-method", 5, 2, &result,
610 ADDR("claim"), prom.mmumap,
611 align, size, virt);
612 if (ret != 0) {
613 call_prom("call-method", 4, 1, ADDR("release"),
614 prom.memory, size, virt);
615 return -1;
616 }
617 /* the 0x12 is M (coherence) + PP == read/write */
618 call_prom("call-method", 6, 1,
619 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
620 return virt;
621 }
622 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
623 (prom_arg_t)align);
624 }
625
prom_panic(const char * reason)626 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
627 {
628 prom_print(reason);
629 /* Do not call exit because it clears the screen on pmac
630 * it also causes some sort of double-fault on early pmacs */
631 if (of_platform == PLATFORM_POWERMAC)
632 asm("trap\n");
633
634 /* ToDo: should put up an SRC here on pSeries */
635 call_prom("exit", 0, 0);
636
637 for (;;) /* should never get here */
638 ;
639 }
640
641
prom_next_node(phandle * nodep)642 static int __init prom_next_node(phandle *nodep)
643 {
644 phandle node;
645
646 if ((node = *nodep) != 0
647 && (*nodep = call_prom("child", 1, 1, node)) != 0)
648 return 1;
649 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
650 return 1;
651 for (;;) {
652 if ((node = call_prom("parent", 1, 1, node)) == 0)
653 return 0;
654 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
655 return 1;
656 }
657 }
658
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)659 static inline int __init prom_getprop(phandle node, const char *pname,
660 void *value, size_t valuelen)
661 {
662 return call_prom("getprop", 4, 1, node, ADDR(pname),
663 (u32)(unsigned long) value, (u32) valuelen);
664 }
665
prom_getproplen(phandle node,const char * pname)666 static inline int __init prom_getproplen(phandle node, const char *pname)
667 {
668 return call_prom("getproplen", 2, 1, node, ADDR(pname));
669 }
670
add_string(char ** str,const char * q)671 static void __init add_string(char **str, const char *q)
672 {
673 char *p = *str;
674
675 while (*q)
676 *p++ = *q++;
677 *p++ = ' ';
678 *str = p;
679 }
680
tohex(unsigned int x)681 static char *__init tohex(unsigned int x)
682 {
683 static const char digits[] __initconst = "0123456789abcdef";
684 static char result[9] __prombss;
685 int i;
686
687 result[8] = 0;
688 i = 8;
689 do {
690 --i;
691 result[i] = digits[x & 0xf];
692 x >>= 4;
693 } while (x != 0 && i > 0);
694 return &result[i];
695 }
696
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)697 static int __init prom_setprop(phandle node, const char *nodename,
698 const char *pname, void *value, size_t valuelen)
699 {
700 char cmd[256], *p;
701
702 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
703 return call_prom("setprop", 4, 1, node, ADDR(pname),
704 (u32)(unsigned long) value, (u32) valuelen);
705
706 /* gah... setprop doesn't work on longtrail, have to use interpret */
707 p = cmd;
708 add_string(&p, "dev");
709 add_string(&p, nodename);
710 add_string(&p, tohex((u32)(unsigned long) value));
711 add_string(&p, tohex(valuelen));
712 add_string(&p, tohex(ADDR(pname)));
713 add_string(&p, tohex(prom_strlen(pname)));
714 add_string(&p, "property");
715 *p = 0;
716 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
717 }
718
719 /* We can't use the standard versions because of relocation headaches. */
720 #define prom_isxdigit(c) \
721 (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
722
723 #define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
724 #define prom_islower(c) ('a' <= (c) && (c) <= 'z')
725 #define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
726
prom_strtoul(const char * cp,const char ** endp)727 static unsigned long __init prom_strtoul(const char *cp, const char **endp)
728 {
729 unsigned long result = 0, base = 10, value;
730
731 if (*cp == '0') {
732 base = 8;
733 cp++;
734 if (prom_toupper(*cp) == 'X') {
735 cp++;
736 base = 16;
737 }
738 }
739
740 while (prom_isxdigit(*cp) &&
741 (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
742 result = result * base + value;
743 cp++;
744 }
745
746 if (endp)
747 *endp = cp;
748
749 return result;
750 }
751
prom_memparse(const char * ptr,const char ** retptr)752 static unsigned long __init prom_memparse(const char *ptr, const char **retptr)
753 {
754 unsigned long ret = prom_strtoul(ptr, retptr);
755 int shift = 0;
756
757 /*
758 * We can't use a switch here because GCC *may* generate a
759 * jump table which won't work, because we're not running at
760 * the address we're linked at.
761 */
762 if ('G' == **retptr || 'g' == **retptr)
763 shift = 30;
764
765 if ('M' == **retptr || 'm' == **retptr)
766 shift = 20;
767
768 if ('K' == **retptr || 'k' == **retptr)
769 shift = 10;
770
771 if (shift) {
772 ret <<= shift;
773 (*retptr)++;
774 }
775
776 return ret;
777 }
778
779 /*
780 * Early parsing of the command line passed to the kernel, used for
781 * "mem=x" and the options that affect the iommu
782 */
early_cmdline_parse(void)783 static void __init early_cmdline_parse(void)
784 {
785 const char *opt;
786
787 char *p;
788 int l = 0;
789
790 prom_cmd_line[0] = 0;
791 p = prom_cmd_line;
792
793 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
794 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
795
796 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
797 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
798 sizeof(prom_cmd_line));
799
800 prom_printf("command line: %s\n", prom_cmd_line);
801
802 #ifdef CONFIG_PPC64
803 opt = prom_strstr(prom_cmd_line, "iommu=");
804 if (opt) {
805 prom_printf("iommu opt is: %s\n", opt);
806 opt += 6;
807 while (*opt && *opt == ' ')
808 opt++;
809 if (!prom_strncmp(opt, "off", 3))
810 prom_iommu_off = 1;
811 else if (!prom_strncmp(opt, "force", 5))
812 prom_iommu_force_on = 1;
813 }
814 #endif
815 opt = prom_strstr(prom_cmd_line, "mem=");
816 if (opt) {
817 opt += 4;
818 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
819 #ifdef CONFIG_PPC64
820 /* Align to 16 MB == size of ppc64 large page */
821 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
822 #endif
823 }
824
825 #ifdef CONFIG_PPC_PSERIES
826 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
827 opt = prom_strstr(prom_cmd_line, "disable_radix");
828 if (opt) {
829 opt += 13;
830 if (*opt && *opt == '=') {
831 bool val;
832
833 if (prom_strtobool(++opt, &val))
834 prom_radix_disable = false;
835 else
836 prom_radix_disable = val;
837 } else
838 prom_radix_disable = true;
839 }
840 if (prom_radix_disable)
841 prom_debug("Radix disabled from cmdline\n");
842
843 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
844 if (opt) {
845 prom_radix_gtse_disable = true;
846 prom_debug("Radix GTSE disabled from cmdline\n");
847 }
848
849 opt = prom_strstr(prom_cmd_line, "xive=off");
850 if (opt) {
851 prom_xive_disable = true;
852 prom_debug("XIVE disabled from cmdline\n");
853 }
854 #endif /* CONFIG_PPC_PSERIES */
855
856 #ifdef CONFIG_PPC_SVM
857 opt = prom_strstr(prom_cmd_line, "svm=");
858 if (opt) {
859 bool val;
860
861 opt += sizeof("svm=") - 1;
862 if (!prom_strtobool(opt, &val))
863 prom_svm_enable = val;
864 }
865 #endif /* CONFIG_PPC_SVM */
866 }
867
868 #ifdef CONFIG_PPC_PSERIES
869 /*
870 * The architecture vector has an array of PVR mask/value pairs,
871 * followed by # option vectors - 1, followed by the option vectors.
872 *
873 * See prom.h for the definition of the bits specified in the
874 * architecture vector.
875 */
876
877 /* Firmware expects the value to be n - 1, where n is the # of vectors */
878 #define NUM_VECTORS(n) ((n) - 1)
879
880 /*
881 * Firmware expects 1 + n - 2, where n is the length of the option vector in
882 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
883 */
884 #define VECTOR_LENGTH(n) (1 + (n) - 2)
885
886 struct option_vector1 {
887 u8 byte1;
888 u8 arch_versions;
889 u8 arch_versions3;
890 } __packed;
891
892 struct option_vector2 {
893 u8 byte1;
894 __be16 reserved;
895 __be32 real_base;
896 __be32 real_size;
897 __be32 virt_base;
898 __be32 virt_size;
899 __be32 load_base;
900 __be32 min_rma;
901 __be32 min_load;
902 u8 min_rma_percent;
903 u8 max_pft_size;
904 } __packed;
905
906 struct option_vector3 {
907 u8 byte1;
908 u8 byte2;
909 } __packed;
910
911 struct option_vector4 {
912 u8 byte1;
913 u8 min_vp_cap;
914 } __packed;
915
916 struct option_vector5 {
917 u8 byte1;
918 u8 byte2;
919 u8 byte3;
920 u8 cmo;
921 u8 associativity;
922 u8 bin_opts;
923 u8 micro_checkpoint;
924 u8 reserved0;
925 __be32 max_cpus;
926 __be16 papr_level;
927 __be16 reserved1;
928 u8 platform_facilities;
929 u8 reserved2;
930 __be16 reserved3;
931 u8 subprocessors;
932 u8 byte22;
933 u8 intarch;
934 u8 mmu;
935 u8 hash_ext;
936 u8 radix_ext;
937 } __packed;
938
939 struct option_vector6 {
940 u8 reserved;
941 u8 secondary_pteg;
942 u8 os_name;
943 } __packed;
944
945 struct option_vector7 {
946 u8 os_id[256];
947 } __packed;
948
949 struct ibm_arch_vec {
950 struct { u32 mask, val; } pvrs[14];
951
952 u8 num_vectors;
953
954 u8 vec1_len;
955 struct option_vector1 vec1;
956
957 u8 vec2_len;
958 struct option_vector2 vec2;
959
960 u8 vec3_len;
961 struct option_vector3 vec3;
962
963 u8 vec4_len;
964 struct option_vector4 vec4;
965
966 u8 vec5_len;
967 struct option_vector5 vec5;
968
969 u8 vec6_len;
970 struct option_vector6 vec6;
971
972 u8 vec7_len;
973 struct option_vector7 vec7;
974 } __packed;
975
976 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
977 .pvrs = {
978 {
979 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
980 .val = cpu_to_be32(0x003a0000),
981 },
982 {
983 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
984 .val = cpu_to_be32(0x003e0000),
985 },
986 {
987 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
988 .val = cpu_to_be32(0x003f0000),
989 },
990 {
991 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
992 .val = cpu_to_be32(0x004b0000),
993 },
994 {
995 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
996 .val = cpu_to_be32(0x004c0000),
997 },
998 {
999 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
1000 .val = cpu_to_be32(0x004d0000),
1001 },
1002 {
1003 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
1004 .val = cpu_to_be32(0x004e0000),
1005 },
1006 {
1007 .mask = cpu_to_be32(0xffff0000), /* POWER10 */
1008 .val = cpu_to_be32(0x00800000),
1009 },
1010 {
1011 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
1012 .val = cpu_to_be32(0x0f000006),
1013 },
1014 {
1015 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
1016 .val = cpu_to_be32(0x0f000005),
1017 },
1018 {
1019 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
1020 .val = cpu_to_be32(0x0f000004),
1021 },
1022 {
1023 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1024 .val = cpu_to_be32(0x0f000003),
1025 },
1026 {
1027 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1028 .val = cpu_to_be32(0x0f000002),
1029 },
1030 {
1031 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1032 .val = cpu_to_be32(0x0f000001),
1033 },
1034 },
1035
1036 .num_vectors = NUM_VECTORS(6),
1037
1038 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1039 .vec1 = {
1040 .byte1 = 0,
1041 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1042 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1043 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1044 },
1045
1046 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1047 /* option vector 2: Open Firmware options supported */
1048 .vec2 = {
1049 .byte1 = OV2_REAL_MODE,
1050 .reserved = 0,
1051 .real_base = cpu_to_be32(0xffffffff),
1052 .real_size = cpu_to_be32(0xffffffff),
1053 .virt_base = cpu_to_be32(0xffffffff),
1054 .virt_size = cpu_to_be32(0xffffffff),
1055 .load_base = cpu_to_be32(0xffffffff),
1056 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
1057 .min_load = cpu_to_be32(0xffffffff), /* full client load */
1058 .min_rma_percent = 0, /* min RMA percentage of total RAM */
1059 .max_pft_size = 48, /* max log_2(hash table size) */
1060 },
1061
1062 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1063 /* option vector 3: processor options supported */
1064 .vec3 = {
1065 .byte1 = 0, /* don't ignore, don't halt */
1066 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1067 },
1068
1069 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1070 /* option vector 4: IBM PAPR implementation */
1071 .vec4 = {
1072 .byte1 = 0, /* don't halt */
1073 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
1074 },
1075
1076 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1077 /* option vector 5: PAPR/OF options */
1078 .vec5 = {
1079 .byte1 = 0, /* don't ignore, don't halt */
1080 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1081 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1082 #ifdef CONFIG_PCI_MSI
1083 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1084 OV5_FEAT(OV5_MSI),
1085 #else
1086 0,
1087 #endif
1088 .byte3 = 0,
1089 .cmo =
1090 #ifdef CONFIG_PPC_SMLPAR
1091 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1092 #else
1093 0,
1094 #endif
1095 .associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
1096 OV5_FEAT(OV5_FORM2_AFFINITY),
1097 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1098 .micro_checkpoint = 0,
1099 .reserved0 = 0,
1100 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
1101 .papr_level = 0,
1102 .reserved1 = 0,
1103 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1104 .reserved2 = 0,
1105 .reserved3 = 0,
1106 .subprocessors = 1,
1107 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1108 .intarch = 0,
1109 .mmu = 0,
1110 .hash_ext = 0,
1111 .radix_ext = 0,
1112 },
1113
1114 /* option vector 6: IBM PAPR hints */
1115 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1116 .vec6 = {
1117 .reserved = 0,
1118 .secondary_pteg = 0,
1119 .os_name = OV6_LINUX,
1120 },
1121
1122 /* option vector 7: OS Identification */
1123 .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
1124 };
1125
1126 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
1127
1128 /* Old method - ELF header with PT_NOTE sections only works on BE */
1129 #ifdef __BIG_ENDIAN__
1130 static const struct fake_elf {
1131 Elf32_Ehdr elfhdr;
1132 Elf32_Phdr phdr[2];
1133 struct chrpnote {
1134 u32 namesz;
1135 u32 descsz;
1136 u32 type;
1137 char name[8]; /* "PowerPC" */
1138 struct chrpdesc {
1139 u32 real_mode;
1140 u32 real_base;
1141 u32 real_size;
1142 u32 virt_base;
1143 u32 virt_size;
1144 u32 load_base;
1145 } chrpdesc;
1146 } chrpnote;
1147 struct rpanote {
1148 u32 namesz;
1149 u32 descsz;
1150 u32 type;
1151 char name[24]; /* "IBM,RPA-Client-Config" */
1152 struct rpadesc {
1153 u32 lpar_affinity;
1154 u32 min_rmo_size;
1155 u32 min_rmo_percent;
1156 u32 max_pft_size;
1157 u32 splpar;
1158 u32 min_load;
1159 u32 new_mem_def;
1160 u32 ignore_me;
1161 } rpadesc;
1162 } rpanote;
1163 } fake_elf __initconst = {
1164 .elfhdr = {
1165 .e_ident = { 0x7f, 'E', 'L', 'F',
1166 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1167 .e_type = ET_EXEC, /* yeah right */
1168 .e_machine = EM_PPC,
1169 .e_version = EV_CURRENT,
1170 .e_phoff = offsetof(struct fake_elf, phdr),
1171 .e_phentsize = sizeof(Elf32_Phdr),
1172 .e_phnum = 2
1173 },
1174 .phdr = {
1175 [0] = {
1176 .p_type = PT_NOTE,
1177 .p_offset = offsetof(struct fake_elf, chrpnote),
1178 .p_filesz = sizeof(struct chrpnote)
1179 }, [1] = {
1180 .p_type = PT_NOTE,
1181 .p_offset = offsetof(struct fake_elf, rpanote),
1182 .p_filesz = sizeof(struct rpanote)
1183 }
1184 },
1185 .chrpnote = {
1186 .namesz = sizeof("PowerPC"),
1187 .descsz = sizeof(struct chrpdesc),
1188 .type = 0x1275,
1189 .name = "PowerPC",
1190 .chrpdesc = {
1191 .real_mode = ~0U, /* ~0 means "don't care" */
1192 .real_base = ~0U,
1193 .real_size = ~0U,
1194 .virt_base = ~0U,
1195 .virt_size = ~0U,
1196 .load_base = ~0U
1197 },
1198 },
1199 .rpanote = {
1200 .namesz = sizeof("IBM,RPA-Client-Config"),
1201 .descsz = sizeof(struct rpadesc),
1202 .type = 0x12759999,
1203 .name = "IBM,RPA-Client-Config",
1204 .rpadesc = {
1205 .lpar_affinity = 0,
1206 .min_rmo_size = 64, /* in megabytes */
1207 .min_rmo_percent = 0,
1208 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1209 .splpar = 1,
1210 .min_load = ~0U,
1211 .new_mem_def = 0
1212 }
1213 }
1214 };
1215 #endif /* __BIG_ENDIAN__ */
1216
prom_count_smt_threads(void)1217 static int __init prom_count_smt_threads(void)
1218 {
1219 phandle node;
1220 char type[64];
1221 unsigned int plen;
1222
1223 /* Pick up th first CPU node we can find */
1224 for (node = 0; prom_next_node(&node); ) {
1225 type[0] = 0;
1226 prom_getprop(node, "device_type", type, sizeof(type));
1227
1228 if (prom_strcmp(type, "cpu"))
1229 continue;
1230 /*
1231 * There is an entry for each smt thread, each entry being
1232 * 4 bytes long. All cpus should have the same number of
1233 * smt threads, so return after finding the first.
1234 */
1235 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1236 if (plen == PROM_ERROR)
1237 break;
1238 plen >>= 2;
1239 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1240
1241 /* Sanity check */
1242 if (plen < 1 || plen > 64) {
1243 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1244 (unsigned long)plen);
1245 return 1;
1246 }
1247 return plen;
1248 }
1249 prom_debug("No threads found, assuming 1 per core\n");
1250
1251 return 1;
1252
1253 }
1254
prom_parse_mmu_model(u8 val,struct platform_support * support)1255 static void __init prom_parse_mmu_model(u8 val,
1256 struct platform_support *support)
1257 {
1258 switch (val) {
1259 case OV5_FEAT(OV5_MMU_DYNAMIC):
1260 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1261 prom_debug("MMU - either supported\n");
1262 support->radix_mmu = !prom_radix_disable;
1263 support->hash_mmu = true;
1264 break;
1265 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1266 prom_debug("MMU - radix only\n");
1267 if (prom_radix_disable) {
1268 /*
1269 * If we __have__ to do radix, we're better off ignoring
1270 * the command line rather than not booting.
1271 */
1272 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1273 }
1274 support->radix_mmu = true;
1275 break;
1276 case OV5_FEAT(OV5_MMU_HASH):
1277 prom_debug("MMU - hash only\n");
1278 support->hash_mmu = true;
1279 break;
1280 default:
1281 prom_debug("Unknown mmu support option: 0x%x\n", val);
1282 break;
1283 }
1284 }
1285
prom_parse_xive_model(u8 val,struct platform_support * support)1286 static void __init prom_parse_xive_model(u8 val,
1287 struct platform_support *support)
1288 {
1289 switch (val) {
1290 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1291 prom_debug("XIVE - either mode supported\n");
1292 support->xive = !prom_xive_disable;
1293 break;
1294 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1295 prom_debug("XIVE - exploitation mode supported\n");
1296 if (prom_xive_disable) {
1297 /*
1298 * If we __have__ to do XIVE, we're better off ignoring
1299 * the command line rather than not booting.
1300 */
1301 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1302 }
1303 support->xive = true;
1304 break;
1305 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1306 prom_debug("XIVE - legacy mode supported\n");
1307 break;
1308 default:
1309 prom_debug("Unknown xive support option: 0x%x\n", val);
1310 break;
1311 }
1312 }
1313
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1314 static void __init prom_parse_platform_support(u8 index, u8 val,
1315 struct platform_support *support)
1316 {
1317 switch (index) {
1318 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1319 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1320 break;
1321 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1322 if (val & OV5_FEAT(OV5_RADIX_GTSE))
1323 support->radix_gtse = !prom_radix_gtse_disable;
1324 break;
1325 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1326 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1327 support);
1328 break;
1329 }
1330 }
1331
prom_check_platform_support(void)1332 static void __init prom_check_platform_support(void)
1333 {
1334 struct platform_support supported = {
1335 .hash_mmu = false,
1336 .radix_mmu = false,
1337 .radix_gtse = false,
1338 .xive = false
1339 };
1340 int prop_len = prom_getproplen(prom.chosen,
1341 "ibm,arch-vec-5-platform-support");
1342
1343 /*
1344 * First copy the architecture vec template
1345 *
1346 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1347 * by __memcpy() when KASAN is active
1348 */
1349 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1350 sizeof(ibm_architecture_vec));
1351
1352 prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
1353
1354 if (prop_len > 1) {
1355 int i;
1356 u8 vec[8];
1357 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1358 prop_len);
1359 if (prop_len > sizeof(vec))
1360 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1361 prop_len);
1362 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1363 for (i = 0; i < prop_len; i += 2) {
1364 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1365 prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1366 }
1367 }
1368
1369 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1370 /* Radix preferred - Check if GTSE is also supported */
1371 prom_debug("Asking for radix\n");
1372 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1373 if (supported.radix_gtse)
1374 ibm_architecture_vec.vec5.radix_ext =
1375 OV5_FEAT(OV5_RADIX_GTSE);
1376 else
1377 prom_debug("Radix GTSE isn't supported\n");
1378 } else if (supported.hash_mmu) {
1379 /* Default to hash mmu (if we can) */
1380 prom_debug("Asking for hash\n");
1381 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1382 } else {
1383 /* We're probably on a legacy hypervisor */
1384 prom_debug("Assuming legacy hash support\n");
1385 }
1386
1387 if (supported.xive) {
1388 prom_debug("Asking for XIVE\n");
1389 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1390 }
1391 }
1392
prom_send_capabilities(void)1393 static void __init prom_send_capabilities(void)
1394 {
1395 ihandle root;
1396 prom_arg_t ret;
1397 u32 cores;
1398
1399 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1400 prom_check_platform_support();
1401
1402 root = call_prom("open", 1, 1, ADDR("/"));
1403 if (root != 0) {
1404 /* We need to tell the FW about the number of cores we support.
1405 *
1406 * To do that, we count the number of threads on the first core
1407 * (we assume this is the same for all cores) and use it to
1408 * divide NR_CPUS.
1409 */
1410
1411 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1412 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1413 cores, NR_CPUS);
1414
1415 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1416
1417 /* try calling the ibm,client-architecture-support method */
1418 prom_printf("Calling ibm,client-architecture-support...");
1419 if (call_prom_ret("call-method", 3, 2, &ret,
1420 ADDR("ibm,client-architecture-support"),
1421 root,
1422 ADDR(&ibm_architecture_vec)) == 0) {
1423 /* the call exists... */
1424 if (ret)
1425 prom_printf("\nWARNING: ibm,client-architecture"
1426 "-support call FAILED!\n");
1427 call_prom("close", 1, 0, root);
1428 prom_printf(" done\n");
1429 return;
1430 }
1431 call_prom("close", 1, 0, root);
1432 prom_printf(" not implemented\n");
1433 }
1434
1435 #ifdef __BIG_ENDIAN__
1436 {
1437 ihandle elfloader;
1438
1439 /* no ibm,client-architecture-support call, try the old way */
1440 elfloader = call_prom("open", 1, 1,
1441 ADDR("/packages/elf-loader"));
1442 if (elfloader == 0) {
1443 prom_printf("couldn't open /packages/elf-loader\n");
1444 return;
1445 }
1446 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1447 elfloader, ADDR(&fake_elf));
1448 call_prom("close", 1, 0, elfloader);
1449 }
1450 #endif /* __BIG_ENDIAN__ */
1451 }
1452 #endif /* CONFIG_PPC_PSERIES */
1453
1454 /*
1455 * Memory allocation strategy... our layout is normally:
1456 *
1457 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1458 * rare cases, initrd might end up being before the kernel though.
1459 * We assume this won't override the final kernel at 0, we have no
1460 * provision to handle that in this version, but it should hopefully
1461 * never happen.
1462 *
1463 * alloc_top is set to the top of RMO, eventually shrink down if the
1464 * TCEs overlap
1465 *
1466 * alloc_bottom is set to the top of kernel/initrd
1467 *
1468 * from there, allocations are done this way : rtas is allocated
1469 * topmost, and the device-tree is allocated from the bottom. We try
1470 * to grow the device-tree allocation as we progress. If we can't,
1471 * then we fail, we don't currently have a facility to restart
1472 * elsewhere, but that shouldn't be necessary.
1473 *
1474 * Note that calls to reserve_mem have to be done explicitly, memory
1475 * allocated with either alloc_up or alloc_down isn't automatically
1476 * reserved.
1477 */
1478
1479
1480 /*
1481 * Allocates memory in the RMO upward from the kernel/initrd
1482 *
1483 * When align is 0, this is a special case, it means to allocate in place
1484 * at the current location of alloc_bottom or fail (that is basically
1485 * extending the previous allocation). Used for the device-tree flattening
1486 */
alloc_up(unsigned long size,unsigned long align)1487 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1488 {
1489 unsigned long base = alloc_bottom;
1490 unsigned long addr = 0;
1491
1492 if (align)
1493 base = ALIGN(base, align);
1494 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1495 if (ram_top == 0)
1496 prom_panic("alloc_up() called with mem not initialized\n");
1497
1498 if (align)
1499 base = ALIGN(alloc_bottom, align);
1500 else
1501 base = alloc_bottom;
1502
1503 for(; (base + size) <= alloc_top;
1504 base = ALIGN(base + 0x100000, align)) {
1505 prom_debug(" trying: 0x%lx\n\r", base);
1506 addr = (unsigned long)prom_claim(base, size, 0);
1507 if (addr != PROM_ERROR && addr != 0)
1508 break;
1509 addr = 0;
1510 if (align == 0)
1511 break;
1512 }
1513 if (addr == 0)
1514 return 0;
1515 alloc_bottom = addr + size;
1516
1517 prom_debug(" -> %lx\n", addr);
1518 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1519 prom_debug(" alloc_top : %lx\n", alloc_top);
1520 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1521 prom_debug(" rmo_top : %lx\n", rmo_top);
1522 prom_debug(" ram_top : %lx\n", ram_top);
1523
1524 return addr;
1525 }
1526
1527 /*
1528 * Allocates memory downward, either from top of RMO, or if highmem
1529 * is set, from the top of RAM. Note that this one doesn't handle
1530 * failures. It does claim memory if highmem is not set.
1531 */
alloc_down(unsigned long size,unsigned long align,int highmem)1532 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1533 int highmem)
1534 {
1535 unsigned long base, addr = 0;
1536
1537 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1538 highmem ? "(high)" : "(low)");
1539 if (ram_top == 0)
1540 prom_panic("alloc_down() called with mem not initialized\n");
1541
1542 if (highmem) {
1543 /* Carve out storage for the TCE table. */
1544 addr = ALIGN_DOWN(alloc_top_high - size, align);
1545 if (addr <= alloc_bottom)
1546 return 0;
1547 /* Will we bump into the RMO ? If yes, check out that we
1548 * didn't overlap existing allocations there, if we did,
1549 * we are dead, we must be the first in town !
1550 */
1551 if (addr < rmo_top) {
1552 /* Good, we are first */
1553 if (alloc_top == rmo_top)
1554 alloc_top = rmo_top = addr;
1555 else
1556 return 0;
1557 }
1558 alloc_top_high = addr;
1559 goto bail;
1560 }
1561
1562 base = ALIGN_DOWN(alloc_top - size, align);
1563 for (; base > alloc_bottom;
1564 base = ALIGN_DOWN(base - 0x100000, align)) {
1565 prom_debug(" trying: 0x%lx\n\r", base);
1566 addr = (unsigned long)prom_claim(base, size, 0);
1567 if (addr != PROM_ERROR && addr != 0)
1568 break;
1569 addr = 0;
1570 }
1571 if (addr == 0)
1572 return 0;
1573 alloc_top = addr;
1574
1575 bail:
1576 prom_debug(" -> %lx\n", addr);
1577 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1578 prom_debug(" alloc_top : %lx\n", alloc_top);
1579 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1580 prom_debug(" rmo_top : %lx\n", rmo_top);
1581 prom_debug(" ram_top : %lx\n", ram_top);
1582
1583 return addr;
1584 }
1585
1586 /*
1587 * Parse a "reg" cell
1588 */
prom_next_cell(int s,cell_t ** cellp)1589 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1590 {
1591 cell_t *p = *cellp;
1592 unsigned long r = 0;
1593
1594 /* Ignore more than 2 cells */
1595 while (s > sizeof(unsigned long) / 4) {
1596 p++;
1597 s--;
1598 }
1599 r = be32_to_cpu(*p++);
1600 #ifdef CONFIG_PPC64
1601 if (s > 1) {
1602 r <<= 32;
1603 r |= be32_to_cpu(*(p++));
1604 }
1605 #endif
1606 *cellp = p;
1607 return r;
1608 }
1609
1610 /*
1611 * Very dumb function for adding to the memory reserve list, but
1612 * we don't need anything smarter at this point
1613 *
1614 * XXX Eventually check for collisions. They should NEVER happen.
1615 * If problems seem to show up, it would be a good start to track
1616 * them down.
1617 */
reserve_mem(u64 base,u64 size)1618 static void __init reserve_mem(u64 base, u64 size)
1619 {
1620 u64 top = base + size;
1621 unsigned long cnt = mem_reserve_cnt;
1622
1623 if (size == 0)
1624 return;
1625
1626 /* We need to always keep one empty entry so that we
1627 * have our terminator with "size" set to 0 since we are
1628 * dumb and just copy this entire array to the boot params
1629 */
1630 base = ALIGN_DOWN(base, PAGE_SIZE);
1631 top = ALIGN(top, PAGE_SIZE);
1632 size = top - base;
1633
1634 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1635 prom_panic("Memory reserve map exhausted !\n");
1636 mem_reserve_map[cnt].base = cpu_to_be64(base);
1637 mem_reserve_map[cnt].size = cpu_to_be64(size);
1638 mem_reserve_cnt = cnt + 1;
1639 }
1640
1641 /*
1642 * Initialize memory allocation mechanism, parse "memory" nodes and
1643 * obtain that way the top of memory and RMO to setup out local allocator
1644 */
prom_init_mem(void)1645 static void __init prom_init_mem(void)
1646 {
1647 phandle node;
1648 char type[64];
1649 unsigned int plen;
1650 cell_t *p, *endp;
1651 __be32 val;
1652 u32 rac, rsc;
1653
1654 /*
1655 * We iterate the memory nodes to find
1656 * 1) top of RMO (first node)
1657 * 2) top of memory
1658 */
1659 val = cpu_to_be32(2);
1660 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1661 rac = be32_to_cpu(val);
1662 val = cpu_to_be32(1);
1663 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1664 rsc = be32_to_cpu(val);
1665 prom_debug("root_addr_cells: %x\n", rac);
1666 prom_debug("root_size_cells: %x\n", rsc);
1667
1668 prom_debug("scanning memory:\n");
1669
1670 for (node = 0; prom_next_node(&node); ) {
1671 type[0] = 0;
1672 prom_getprop(node, "device_type", type, sizeof(type));
1673
1674 if (type[0] == 0) {
1675 /*
1676 * CHRP Longtrail machines have no device_type
1677 * on the memory node, so check the name instead...
1678 */
1679 prom_getprop(node, "name", type, sizeof(type));
1680 }
1681 if (prom_strcmp(type, "memory"))
1682 continue;
1683
1684 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1685 if (plen > sizeof(regbuf)) {
1686 prom_printf("memory node too large for buffer !\n");
1687 plen = sizeof(regbuf);
1688 }
1689 p = regbuf;
1690 endp = p + (plen / sizeof(cell_t));
1691
1692 #ifdef DEBUG_PROM
1693 memset(prom_scratch, 0, sizeof(prom_scratch));
1694 call_prom("package-to-path", 3, 1, node, prom_scratch,
1695 sizeof(prom_scratch) - 1);
1696 prom_debug(" node %s :\n", prom_scratch);
1697 #endif /* DEBUG_PROM */
1698
1699 while ((endp - p) >= (rac + rsc)) {
1700 unsigned long base, size;
1701
1702 base = prom_next_cell(rac, &p);
1703 size = prom_next_cell(rsc, &p);
1704
1705 if (size == 0)
1706 continue;
1707 prom_debug(" %lx %lx\n", base, size);
1708 if (base == 0 && (of_platform & PLATFORM_LPAR))
1709 rmo_top = size;
1710 if ((base + size) > ram_top)
1711 ram_top = base + size;
1712 }
1713 }
1714
1715 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1716
1717 /*
1718 * If prom_memory_limit is set we reduce the upper limits *except* for
1719 * alloc_top_high. This must be the real top of RAM so we can put
1720 * TCE's up there.
1721 */
1722
1723 alloc_top_high = ram_top;
1724
1725 if (prom_memory_limit) {
1726 if (prom_memory_limit <= alloc_bottom) {
1727 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1728 prom_memory_limit);
1729 prom_memory_limit = 0;
1730 } else if (prom_memory_limit >= ram_top) {
1731 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1732 prom_memory_limit);
1733 prom_memory_limit = 0;
1734 } else {
1735 ram_top = prom_memory_limit;
1736 rmo_top = min(rmo_top, prom_memory_limit);
1737 }
1738 }
1739
1740 /*
1741 * Setup our top alloc point, that is top of RMO or top of
1742 * segment 0 when running non-LPAR.
1743 * Some RS64 machines have buggy firmware where claims up at
1744 * 1GB fail. Cap at 768MB as a workaround.
1745 * Since 768MB is plenty of room, and we need to cap to something
1746 * reasonable on 32-bit, cap at 768MB on all machines.
1747 */
1748 if (!rmo_top)
1749 rmo_top = ram_top;
1750 rmo_top = min(0x30000000ul, rmo_top);
1751 alloc_top = rmo_top;
1752 alloc_top_high = ram_top;
1753
1754 /*
1755 * Check if we have an initrd after the kernel but still inside
1756 * the RMO. If we do move our bottom point to after it.
1757 */
1758 if (prom_initrd_start &&
1759 prom_initrd_start < rmo_top &&
1760 prom_initrd_end > alloc_bottom)
1761 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1762
1763 prom_printf("memory layout at init:\n");
1764 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1765 prom_memory_limit);
1766 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1767 prom_printf(" alloc_top : %lx\n", alloc_top);
1768 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1769 prom_printf(" rmo_top : %lx\n", rmo_top);
1770 prom_printf(" ram_top : %lx\n", ram_top);
1771 }
1772
prom_close_stdin(void)1773 static void __init prom_close_stdin(void)
1774 {
1775 __be32 val;
1776 ihandle stdin;
1777
1778 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1779 stdin = be32_to_cpu(val);
1780 call_prom("close", 1, 0, stdin);
1781 }
1782 }
1783
1784 #ifdef CONFIG_PPC_SVM
prom_rtas_hcall(uint64_t args)1785 static int __init prom_rtas_hcall(uint64_t args)
1786 {
1787 register uint64_t arg1 asm("r3") = H_RTAS;
1788 register uint64_t arg2 asm("r4") = args;
1789
1790 asm volatile("sc 1\n" : "=r" (arg1) :
1791 "r" (arg1),
1792 "r" (arg2) :);
1793 srr_regs_clobbered();
1794
1795 return arg1;
1796 }
1797
1798 static struct rtas_args __prombss os_term_args;
1799
prom_rtas_os_term(char * str)1800 static void __init prom_rtas_os_term(char *str)
1801 {
1802 phandle rtas_node;
1803 __be32 val;
1804 u32 token;
1805
1806 prom_debug("%s: start...\n", __func__);
1807 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1808 prom_debug("rtas_node: %x\n", rtas_node);
1809 if (!PHANDLE_VALID(rtas_node))
1810 return;
1811
1812 val = 0;
1813 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1814 token = be32_to_cpu(val);
1815 prom_debug("ibm,os-term: %x\n", token);
1816 if (token == 0)
1817 prom_panic("Could not get token for ibm,os-term\n");
1818 os_term_args.token = cpu_to_be32(token);
1819 os_term_args.nargs = cpu_to_be32(1);
1820 os_term_args.nret = cpu_to_be32(1);
1821 os_term_args.args[0] = cpu_to_be32(__pa(str));
1822 prom_rtas_hcall((uint64_t)&os_term_args);
1823 }
1824 #endif /* CONFIG_PPC_SVM */
1825
1826 /*
1827 * Allocate room for and instantiate RTAS
1828 */
prom_instantiate_rtas(void)1829 static void __init prom_instantiate_rtas(void)
1830 {
1831 phandle rtas_node;
1832 ihandle rtas_inst;
1833 u32 base, entry = 0;
1834 __be32 val;
1835 u32 size = 0;
1836
1837 prom_debug("prom_instantiate_rtas: start...\n");
1838
1839 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1840 prom_debug("rtas_node: %x\n", rtas_node);
1841 if (!PHANDLE_VALID(rtas_node))
1842 return;
1843
1844 val = 0;
1845 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1846 size = be32_to_cpu(val);
1847 if (size == 0)
1848 return;
1849
1850 base = alloc_down(size, PAGE_SIZE, 0);
1851 if (base == 0)
1852 prom_panic("Could not allocate memory for RTAS\n");
1853
1854 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1855 if (!IHANDLE_VALID(rtas_inst)) {
1856 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1857 return;
1858 }
1859
1860 prom_printf("instantiating rtas at 0x%x...", base);
1861
1862 if (call_prom_ret("call-method", 3, 2, &entry,
1863 ADDR("instantiate-rtas"),
1864 rtas_inst, base) != 0
1865 || entry == 0) {
1866 prom_printf(" failed\n");
1867 return;
1868 }
1869 prom_printf(" done\n");
1870
1871 reserve_mem(base, size);
1872
1873 val = cpu_to_be32(base);
1874 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1875 &val, sizeof(val));
1876 val = cpu_to_be32(entry);
1877 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1878 &val, sizeof(val));
1879
1880 /* Check if it supports "query-cpu-stopped-state" */
1881 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1882 &val, sizeof(val)) != PROM_ERROR)
1883 rtas_has_query_cpu_stopped = true;
1884
1885 prom_debug("rtas base = 0x%x\n", base);
1886 prom_debug("rtas entry = 0x%x\n", entry);
1887 prom_debug("rtas size = 0x%x\n", size);
1888
1889 prom_debug("prom_instantiate_rtas: end...\n");
1890 }
1891
1892 #ifdef CONFIG_PPC64
1893 /*
1894 * Allocate room for and instantiate Stored Measurement Log (SML)
1895 */
prom_instantiate_sml(void)1896 static void __init prom_instantiate_sml(void)
1897 {
1898 phandle ibmvtpm_node;
1899 ihandle ibmvtpm_inst;
1900 u32 entry = 0, size = 0, succ = 0;
1901 u64 base;
1902 __be32 val;
1903
1904 prom_debug("prom_instantiate_sml: start...\n");
1905
1906 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1907 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1908 if (!PHANDLE_VALID(ibmvtpm_node))
1909 return;
1910
1911 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1912 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1913 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1914 return;
1915 }
1916
1917 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1918 &val, sizeof(val)) != PROM_ERROR) {
1919 if (call_prom_ret("call-method", 2, 2, &succ,
1920 ADDR("reformat-sml-to-efi-alignment"),
1921 ibmvtpm_inst) != 0 || succ == 0) {
1922 prom_printf("Reformat SML to EFI alignment failed\n");
1923 return;
1924 }
1925
1926 if (call_prom_ret("call-method", 2, 2, &size,
1927 ADDR("sml-get-allocated-size"),
1928 ibmvtpm_inst) != 0 || size == 0) {
1929 prom_printf("SML get allocated size failed\n");
1930 return;
1931 }
1932 } else {
1933 if (call_prom_ret("call-method", 2, 2, &size,
1934 ADDR("sml-get-handover-size"),
1935 ibmvtpm_inst) != 0 || size == 0) {
1936 prom_printf("SML get handover size failed\n");
1937 return;
1938 }
1939 }
1940
1941 base = alloc_down(size, PAGE_SIZE, 0);
1942 if (base == 0)
1943 prom_panic("Could not allocate memory for sml\n");
1944
1945 prom_printf("instantiating sml at 0x%llx...", base);
1946
1947 memset((void *)base, 0, size);
1948
1949 if (call_prom_ret("call-method", 4, 2, &entry,
1950 ADDR("sml-handover"),
1951 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1952 prom_printf("SML handover failed\n");
1953 return;
1954 }
1955 prom_printf(" done\n");
1956
1957 reserve_mem(base, size);
1958
1959 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1960 &base, sizeof(base));
1961 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1962 &size, sizeof(size));
1963
1964 prom_debug("sml base = 0x%llx\n", base);
1965 prom_debug("sml size = 0x%x\n", size);
1966
1967 prom_debug("prom_instantiate_sml: end...\n");
1968 }
1969
1970 /*
1971 * Allocate room for and initialize TCE tables
1972 */
1973 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1974 static void __init prom_initialize_tce_table(void)
1975 {
1976 phandle node;
1977 ihandle phb_node;
1978 char compatible[64], type[64], model[64];
1979 char *path = prom_scratch;
1980 u64 base, align;
1981 u32 minalign, minsize;
1982 u64 tce_entry, *tce_entryp;
1983 u64 local_alloc_top, local_alloc_bottom;
1984 u64 i;
1985
1986 if (prom_iommu_off)
1987 return;
1988
1989 prom_debug("starting prom_initialize_tce_table\n");
1990
1991 /* Cache current top of allocs so we reserve a single block */
1992 local_alloc_top = alloc_top_high;
1993 local_alloc_bottom = local_alloc_top;
1994
1995 /* Search all nodes looking for PHBs. */
1996 for (node = 0; prom_next_node(&node); ) {
1997 compatible[0] = 0;
1998 type[0] = 0;
1999 model[0] = 0;
2000 prom_getprop(node, "compatible",
2001 compatible, sizeof(compatible));
2002 prom_getprop(node, "device_type", type, sizeof(type));
2003 prom_getprop(node, "model", model, sizeof(model));
2004
2005 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
2006 continue;
2007
2008 /* Keep the old logic intact to avoid regression. */
2009 if (compatible[0] != 0) {
2010 if ((prom_strstr(compatible, "python") == NULL) &&
2011 (prom_strstr(compatible, "Speedwagon") == NULL) &&
2012 (prom_strstr(compatible, "Winnipeg") == NULL))
2013 continue;
2014 } else if (model[0] != 0) {
2015 if ((prom_strstr(model, "ython") == NULL) &&
2016 (prom_strstr(model, "peedwagon") == NULL) &&
2017 (prom_strstr(model, "innipeg") == NULL))
2018 continue;
2019 }
2020
2021 if (prom_getprop(node, "tce-table-minalign", &minalign,
2022 sizeof(minalign)) == PROM_ERROR)
2023 minalign = 0;
2024 if (prom_getprop(node, "tce-table-minsize", &minsize,
2025 sizeof(minsize)) == PROM_ERROR)
2026 minsize = 4UL << 20;
2027
2028 /*
2029 * Even though we read what OF wants, we just set the table
2030 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2031 * By doing this, we avoid the pitfalls of trying to DMA to
2032 * MMIO space and the DMA alias hole.
2033 */
2034 minsize = 4UL << 20;
2035
2036 /* Align to the greater of the align or size */
2037 align = max(minalign, minsize);
2038 base = alloc_down(minsize, align, 1);
2039 if (base == 0)
2040 prom_panic("ERROR, cannot find space for TCE table.\n");
2041 if (base < local_alloc_bottom)
2042 local_alloc_bottom = base;
2043
2044 /* It seems OF doesn't null-terminate the path :-( */
2045 memset(path, 0, sizeof(prom_scratch));
2046 /* Call OF to setup the TCE hardware */
2047 if (call_prom("package-to-path", 3, 1, node,
2048 path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2049 prom_printf("package-to-path failed\n");
2050 }
2051
2052 /* Save away the TCE table attributes for later use. */
2053 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2054 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2055
2056 prom_debug("TCE table: %s\n", path);
2057 prom_debug("\tnode = 0x%x\n", node);
2058 prom_debug("\tbase = 0x%llx\n", base);
2059 prom_debug("\tsize = 0x%x\n", minsize);
2060
2061 /* Initialize the table to have a one-to-one mapping
2062 * over the allocated size.
2063 */
2064 tce_entryp = (u64 *)base;
2065 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2066 tce_entry = (i << PAGE_SHIFT);
2067 tce_entry |= 0x3;
2068 *tce_entryp = tce_entry;
2069 }
2070
2071 prom_printf("opening PHB %s", path);
2072 phb_node = call_prom("open", 1, 1, path);
2073 if (phb_node == 0)
2074 prom_printf("... failed\n");
2075 else
2076 prom_printf("... done\n");
2077
2078 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2079 phb_node, -1, minsize,
2080 (u32) base, (u32) (base >> 32));
2081 call_prom("close", 1, 0, phb_node);
2082 }
2083
2084 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2085
2086 /* These are only really needed if there is a memory limit in
2087 * effect, but we don't know so export them always. */
2088 prom_tce_alloc_start = local_alloc_bottom;
2089 prom_tce_alloc_end = local_alloc_top;
2090
2091 /* Flag the first invalid entry */
2092 prom_debug("ending prom_initialize_tce_table\n");
2093 }
2094 #endif /* __BIG_ENDIAN__ */
2095 #endif /* CONFIG_PPC64 */
2096
2097 /*
2098 * With CHRP SMP we need to use the OF to start the other processors.
2099 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2100 * so we have to put the processors into a holding pattern controlled
2101 * by the kernel (not OF) before we destroy the OF.
2102 *
2103 * This uses a chunk of low memory, puts some holding pattern
2104 * code there and sends the other processors off to there until
2105 * smp_boot_cpus tells them to do something. The holding pattern
2106 * checks that address until its cpu # is there, when it is that
2107 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2108 * of setting those values.
2109 *
2110 * We also use physical address 0x4 here to tell when a cpu
2111 * is in its holding pattern code.
2112 *
2113 * -- Cort
2114 */
2115 /*
2116 * We want to reference the copy of __secondary_hold_* in the
2117 * 0 - 0x100 address range
2118 */
2119 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2120
prom_hold_cpus(void)2121 static void __init prom_hold_cpus(void)
2122 {
2123 unsigned long i;
2124 phandle node;
2125 char type[64];
2126 unsigned long *spinloop
2127 = (void *) LOW_ADDR(__secondary_hold_spinloop);
2128 unsigned long *acknowledge
2129 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
2130 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2131
2132 /*
2133 * On pseries, if RTAS supports "query-cpu-stopped-state",
2134 * we skip this stage, the CPUs will be started by the
2135 * kernel using RTAS.
2136 */
2137 if ((of_platform == PLATFORM_PSERIES ||
2138 of_platform == PLATFORM_PSERIES_LPAR) &&
2139 rtas_has_query_cpu_stopped) {
2140 prom_printf("prom_hold_cpus: skipped\n");
2141 return;
2142 }
2143
2144 prom_debug("prom_hold_cpus: start...\n");
2145 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
2146 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
2147 prom_debug(" 1) acknowledge = 0x%lx\n",
2148 (unsigned long)acknowledge);
2149 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
2150 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
2151
2152 /* Set the common spinloop variable, so all of the secondary cpus
2153 * will block when they are awakened from their OF spinloop.
2154 * This must occur for both SMP and non SMP kernels, since OF will
2155 * be trashed when we move the kernel.
2156 */
2157 *spinloop = 0;
2158
2159 /* look for cpus */
2160 for (node = 0; prom_next_node(&node); ) {
2161 unsigned int cpu_no;
2162 __be32 reg;
2163
2164 type[0] = 0;
2165 prom_getprop(node, "device_type", type, sizeof(type));
2166 if (prom_strcmp(type, "cpu") != 0)
2167 continue;
2168
2169 /* Skip non-configured cpus. */
2170 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2171 if (prom_strcmp(type, "okay") != 0)
2172 continue;
2173
2174 reg = cpu_to_be32(-1); /* make sparse happy */
2175 prom_getprop(node, "reg", ®, sizeof(reg));
2176 cpu_no = be32_to_cpu(reg);
2177
2178 prom_debug("cpu hw idx = %u\n", cpu_no);
2179
2180 /* Init the acknowledge var which will be reset by
2181 * the secondary cpu when it awakens from its OF
2182 * spinloop.
2183 */
2184 *acknowledge = (unsigned long)-1;
2185
2186 if (cpu_no != prom.cpu) {
2187 /* Primary Thread of non-boot cpu or any thread */
2188 prom_printf("starting cpu hw idx %u... ", cpu_no);
2189 call_prom("start-cpu", 3, 0, node,
2190 secondary_hold, cpu_no);
2191
2192 for (i = 0; (i < 100000000) &&
2193 (*acknowledge == ((unsigned long)-1)); i++ )
2194 mb();
2195
2196 if (*acknowledge == cpu_no)
2197 prom_printf("done\n");
2198 else
2199 prom_printf("failed: %lx\n", *acknowledge);
2200 }
2201 #ifdef CONFIG_SMP
2202 else
2203 prom_printf("boot cpu hw idx %u\n", cpu_no);
2204 #endif /* CONFIG_SMP */
2205 }
2206
2207 prom_debug("prom_hold_cpus: end...\n");
2208 }
2209
2210
prom_init_client_services(unsigned long pp)2211 static void __init prom_init_client_services(unsigned long pp)
2212 {
2213 /* Get a handle to the prom entry point before anything else */
2214 prom_entry = pp;
2215
2216 /* get a handle for the stdout device */
2217 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2218 if (!PHANDLE_VALID(prom.chosen))
2219 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2220
2221 /* get device tree root */
2222 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2223 if (!PHANDLE_VALID(prom.root))
2224 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2225
2226 prom.mmumap = 0;
2227 }
2228
2229 #ifdef CONFIG_PPC32
2230 /*
2231 * For really old powermacs, we need to map things we claim.
2232 * For that, we need the ihandle of the mmu.
2233 * Also, on the longtrail, we need to work around other bugs.
2234 */
prom_find_mmu(void)2235 static void __init prom_find_mmu(void)
2236 {
2237 phandle oprom;
2238 char version[64];
2239
2240 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2241 if (!PHANDLE_VALID(oprom))
2242 return;
2243 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2244 return;
2245 version[sizeof(version) - 1] = 0;
2246 /* XXX might need to add other versions here */
2247 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2248 of_workarounds = OF_WA_CLAIM;
2249 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2250 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2251 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2252 } else
2253 return;
2254 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2255 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2256 sizeof(prom.mmumap));
2257 prom.mmumap = be32_to_cpu(prom.mmumap);
2258 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2259 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2260 }
2261 #else
2262 #define prom_find_mmu()
2263 #endif
2264
prom_init_stdout(void)2265 static void __init prom_init_stdout(void)
2266 {
2267 char *path = of_stdout_device;
2268 char type[16];
2269 phandle stdout_node;
2270 __be32 val;
2271
2272 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2273 prom_panic("cannot find stdout");
2274
2275 prom.stdout = be32_to_cpu(val);
2276
2277 /* Get the full OF pathname of the stdout device */
2278 memset(path, 0, 256);
2279 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2280 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2281 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2282 path, prom_strlen(path) + 1);
2283
2284 /* instance-to-package fails on PA-Semi */
2285 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2286 if (stdout_node != PROM_ERROR) {
2287 val = cpu_to_be32(stdout_node);
2288
2289 /* If it's a display, note it */
2290 memset(type, 0, sizeof(type));
2291 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2292 if (prom_strcmp(type, "display") == 0)
2293 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2294 }
2295 }
2296
prom_find_machine_type(void)2297 static int __init prom_find_machine_type(void)
2298 {
2299 static char compat[256] __prombss;
2300 int len, i = 0;
2301 #ifdef CONFIG_PPC64
2302 phandle rtas;
2303 int x;
2304 #endif
2305
2306 /* Look for a PowerMac or a Cell */
2307 len = prom_getprop(prom.root, "compatible",
2308 compat, sizeof(compat)-1);
2309 if (len > 0) {
2310 compat[len] = 0;
2311 while (i < len) {
2312 char *p = &compat[i];
2313 int sl = prom_strlen(p);
2314 if (sl == 0)
2315 break;
2316 if (prom_strstr(p, "Power Macintosh") ||
2317 prom_strstr(p, "MacRISC"))
2318 return PLATFORM_POWERMAC;
2319 #ifdef CONFIG_PPC64
2320 /* We must make sure we don't detect the IBM Cell
2321 * blades as pSeries due to some firmware issues,
2322 * so we do it here.
2323 */
2324 if (prom_strstr(p, "IBM,CBEA") ||
2325 prom_strstr(p, "IBM,CPBW-1.0"))
2326 return PLATFORM_GENERIC;
2327 #endif /* CONFIG_PPC64 */
2328 i += sl + 1;
2329 }
2330 }
2331 #ifdef CONFIG_PPC64
2332 /* Try to figure out if it's an IBM pSeries or any other
2333 * PAPR compliant platform. We assume it is if :
2334 * - /device_type is "chrp" (please, do NOT use that for future
2335 * non-IBM designs !
2336 * - it has /rtas
2337 */
2338 len = prom_getprop(prom.root, "device_type",
2339 compat, sizeof(compat)-1);
2340 if (len <= 0)
2341 return PLATFORM_GENERIC;
2342 if (prom_strcmp(compat, "chrp"))
2343 return PLATFORM_GENERIC;
2344
2345 /* Default to pSeries. We need to know if we are running LPAR */
2346 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2347 if (!PHANDLE_VALID(rtas))
2348 return PLATFORM_GENERIC;
2349 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2350 if (x != PROM_ERROR) {
2351 prom_debug("Hypertas detected, assuming LPAR !\n");
2352 return PLATFORM_PSERIES_LPAR;
2353 }
2354 return PLATFORM_PSERIES;
2355 #else
2356 return PLATFORM_GENERIC;
2357 #endif
2358 }
2359
prom_set_color(ihandle ih,int i,int r,int g,int b)2360 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2361 {
2362 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2363 }
2364
2365 /*
2366 * If we have a display that we don't know how to drive,
2367 * we will want to try to execute OF's open method for it
2368 * later. However, OF will probably fall over if we do that
2369 * we've taken over the MMU.
2370 * So we check whether we will need to open the display,
2371 * and if so, open it now.
2372 */
prom_check_displays(void)2373 static void __init prom_check_displays(void)
2374 {
2375 char type[16], *path;
2376 phandle node;
2377 ihandle ih;
2378 int i;
2379
2380 static const unsigned char default_colors[] __initconst = {
2381 0x00, 0x00, 0x00,
2382 0x00, 0x00, 0xaa,
2383 0x00, 0xaa, 0x00,
2384 0x00, 0xaa, 0xaa,
2385 0xaa, 0x00, 0x00,
2386 0xaa, 0x00, 0xaa,
2387 0xaa, 0xaa, 0x00,
2388 0xaa, 0xaa, 0xaa,
2389 0x55, 0x55, 0x55,
2390 0x55, 0x55, 0xff,
2391 0x55, 0xff, 0x55,
2392 0x55, 0xff, 0xff,
2393 0xff, 0x55, 0x55,
2394 0xff, 0x55, 0xff,
2395 0xff, 0xff, 0x55,
2396 0xff, 0xff, 0xff
2397 };
2398 const unsigned char *clut;
2399
2400 prom_debug("Looking for displays\n");
2401 for (node = 0; prom_next_node(&node); ) {
2402 memset(type, 0, sizeof(type));
2403 prom_getprop(node, "device_type", type, sizeof(type));
2404 if (prom_strcmp(type, "display") != 0)
2405 continue;
2406
2407 /* It seems OF doesn't null-terminate the path :-( */
2408 path = prom_scratch;
2409 memset(path, 0, sizeof(prom_scratch));
2410
2411 /*
2412 * leave some room at the end of the path for appending extra
2413 * arguments
2414 */
2415 if (call_prom("package-to-path", 3, 1, node, path,
2416 sizeof(prom_scratch) - 10) == PROM_ERROR)
2417 continue;
2418 prom_printf("found display : %s, opening... ", path);
2419
2420 ih = call_prom("open", 1, 1, path);
2421 if (ih == 0) {
2422 prom_printf("failed\n");
2423 continue;
2424 }
2425
2426 /* Success */
2427 prom_printf("done\n");
2428 prom_setprop(node, path, "linux,opened", NULL, 0);
2429
2430 /* Setup a usable color table when the appropriate
2431 * method is available. Should update this to set-colors */
2432 clut = default_colors;
2433 for (i = 0; i < 16; i++, clut += 3)
2434 if (prom_set_color(ih, i, clut[0], clut[1],
2435 clut[2]) != 0)
2436 break;
2437
2438 #ifdef CONFIG_LOGO_LINUX_CLUT224
2439 clut = PTRRELOC(logo_linux_clut224.clut);
2440 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2441 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2442 clut[2]) != 0)
2443 break;
2444 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2445
2446 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2447 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2448 PROM_ERROR) {
2449 u32 width, height, pitch, addr;
2450
2451 prom_printf("Setting btext !\n");
2452
2453 if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2454 return;
2455
2456 if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2457 return;
2458
2459 if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2460 return;
2461
2462 if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2463 return;
2464
2465 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2466 width, height, pitch, addr);
2467 btext_setup_display(width, height, 8, pitch, addr);
2468 btext_prepare_BAT();
2469 }
2470 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2471 }
2472 }
2473
2474
2475 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2476 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2477 unsigned long needed, unsigned long align)
2478 {
2479 void *ret;
2480
2481 *mem_start = ALIGN(*mem_start, align);
2482 while ((*mem_start + needed) > *mem_end) {
2483 unsigned long room, chunk;
2484
2485 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2486 alloc_bottom);
2487 room = alloc_top - alloc_bottom;
2488 if (room > DEVTREE_CHUNK_SIZE)
2489 room = DEVTREE_CHUNK_SIZE;
2490 if (room < PAGE_SIZE)
2491 prom_panic("No memory for flatten_device_tree "
2492 "(no room)\n");
2493 chunk = alloc_up(room, 0);
2494 if (chunk == 0)
2495 prom_panic("No memory for flatten_device_tree "
2496 "(claim failed)\n");
2497 *mem_end = chunk + room;
2498 }
2499
2500 ret = (void *)*mem_start;
2501 *mem_start += needed;
2502
2503 return ret;
2504 }
2505
2506 #define dt_push_token(token, mem_start, mem_end) do { \
2507 void *room = make_room(mem_start, mem_end, 4, 4); \
2508 *(__be32 *)room = cpu_to_be32(token); \
2509 } while(0)
2510
dt_find_string(char * str)2511 static unsigned long __init dt_find_string(char *str)
2512 {
2513 char *s, *os;
2514
2515 s = os = (char *)dt_string_start;
2516 s += 4;
2517 while (s < (char *)dt_string_end) {
2518 if (prom_strcmp(s, str) == 0)
2519 return s - os;
2520 s += prom_strlen(s) + 1;
2521 }
2522 return 0;
2523 }
2524
2525 /*
2526 * The Open Firmware 1275 specification states properties must be 31 bytes or
2527 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2528 */
2529 #define MAX_PROPERTY_NAME 64
2530
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2531 static void __init scan_dt_build_strings(phandle node,
2532 unsigned long *mem_start,
2533 unsigned long *mem_end)
2534 {
2535 char *prev_name, *namep, *sstart;
2536 unsigned long soff;
2537 phandle child;
2538
2539 sstart = (char *)dt_string_start;
2540
2541 /* get and store all property names */
2542 prev_name = "";
2543 for (;;) {
2544 /* 64 is max len of name including nul. */
2545 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2546 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2547 /* No more nodes: unwind alloc */
2548 *mem_start = (unsigned long)namep;
2549 break;
2550 }
2551
2552 /* skip "name" */
2553 if (prom_strcmp(namep, "name") == 0) {
2554 *mem_start = (unsigned long)namep;
2555 prev_name = "name";
2556 continue;
2557 }
2558 /* get/create string entry */
2559 soff = dt_find_string(namep);
2560 if (soff != 0) {
2561 *mem_start = (unsigned long)namep;
2562 namep = sstart + soff;
2563 } else {
2564 /* Trim off some if we can */
2565 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2566 dt_string_end = *mem_start;
2567 }
2568 prev_name = namep;
2569 }
2570
2571 /* do all our children */
2572 child = call_prom("child", 1, 1, node);
2573 while (child != 0) {
2574 scan_dt_build_strings(child, mem_start, mem_end);
2575 child = call_prom("peer", 1, 1, child);
2576 }
2577 }
2578
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2579 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2580 unsigned long *mem_end)
2581 {
2582 phandle child;
2583 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2584 unsigned long soff;
2585 unsigned char *valp;
2586 static char pname[MAX_PROPERTY_NAME] __prombss;
2587 int l, room, has_phandle = 0;
2588
2589 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2590
2591 /* get the node's full name */
2592 namep = (char *)*mem_start;
2593 room = *mem_end - *mem_start;
2594 if (room > 255)
2595 room = 255;
2596 l = call_prom("package-to-path", 3, 1, node, namep, room);
2597 if (l >= 0) {
2598 /* Didn't fit? Get more room. */
2599 if (l >= room) {
2600 if (l >= *mem_end - *mem_start)
2601 namep = make_room(mem_start, mem_end, l+1, 1);
2602 call_prom("package-to-path", 3, 1, node, namep, l);
2603 }
2604 namep[l] = '\0';
2605
2606 /* Fixup an Apple bug where they have bogus \0 chars in the
2607 * middle of the path in some properties, and extract
2608 * the unit name (everything after the last '/').
2609 */
2610 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2611 if (*p == '/')
2612 lp = namep;
2613 else if (*p != 0)
2614 *lp++ = *p;
2615 }
2616 *lp = 0;
2617 *mem_start = ALIGN((unsigned long)lp + 1, 4);
2618 }
2619
2620 /* get it again for debugging */
2621 path = prom_scratch;
2622 memset(path, 0, sizeof(prom_scratch));
2623 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2624
2625 /* get and store all properties */
2626 prev_name = "";
2627 sstart = (char *)dt_string_start;
2628 for (;;) {
2629 if (call_prom("nextprop", 3, 1, node, prev_name,
2630 pname) != 1)
2631 break;
2632
2633 /* skip "name" */
2634 if (prom_strcmp(pname, "name") == 0) {
2635 prev_name = "name";
2636 continue;
2637 }
2638
2639 /* find string offset */
2640 soff = dt_find_string(pname);
2641 if (soff == 0) {
2642 prom_printf("WARNING: Can't find string index for"
2643 " <%s>, node %s\n", pname, path);
2644 break;
2645 }
2646 prev_name = sstart + soff;
2647
2648 /* get length */
2649 l = call_prom("getproplen", 2, 1, node, pname);
2650
2651 /* sanity checks */
2652 if (l == PROM_ERROR)
2653 continue;
2654
2655 /* push property head */
2656 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2657 dt_push_token(l, mem_start, mem_end);
2658 dt_push_token(soff, mem_start, mem_end);
2659
2660 /* push property content */
2661 valp = make_room(mem_start, mem_end, l, 4);
2662 call_prom("getprop", 4, 1, node, pname, valp, l);
2663 *mem_start = ALIGN(*mem_start, 4);
2664
2665 if (!prom_strcmp(pname, "phandle"))
2666 has_phandle = 1;
2667 }
2668
2669 /* Add a "phandle" property if none already exist */
2670 if (!has_phandle) {
2671 soff = dt_find_string("phandle");
2672 if (soff == 0)
2673 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2674 else {
2675 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2676 dt_push_token(4, mem_start, mem_end);
2677 dt_push_token(soff, mem_start, mem_end);
2678 valp = make_room(mem_start, mem_end, 4, 4);
2679 *(__be32 *)valp = cpu_to_be32(node);
2680 }
2681 }
2682
2683 /* do all our children */
2684 child = call_prom("child", 1, 1, node);
2685 while (child != 0) {
2686 scan_dt_build_struct(child, mem_start, mem_end);
2687 child = call_prom("peer", 1, 1, child);
2688 }
2689
2690 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2691 }
2692
flatten_device_tree(void)2693 static void __init flatten_device_tree(void)
2694 {
2695 phandle root;
2696 unsigned long mem_start, mem_end, room;
2697 struct boot_param_header *hdr;
2698 char *namep;
2699 u64 *rsvmap;
2700
2701 /*
2702 * Check how much room we have between alloc top & bottom (+/- a
2703 * few pages), crop to 1MB, as this is our "chunk" size
2704 */
2705 room = alloc_top - alloc_bottom - 0x4000;
2706 if (room > DEVTREE_CHUNK_SIZE)
2707 room = DEVTREE_CHUNK_SIZE;
2708 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2709
2710 /* Now try to claim that */
2711 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2712 if (mem_start == 0)
2713 prom_panic("Can't allocate initial device-tree chunk\n");
2714 mem_end = mem_start + room;
2715
2716 /* Get root of tree */
2717 root = call_prom("peer", 1, 1, (phandle)0);
2718 if (root == (phandle)0)
2719 prom_panic ("couldn't get device tree root\n");
2720
2721 /* Build header and make room for mem rsv map */
2722 mem_start = ALIGN(mem_start, 4);
2723 hdr = make_room(&mem_start, &mem_end,
2724 sizeof(struct boot_param_header), 4);
2725 dt_header_start = (unsigned long)hdr;
2726 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2727
2728 /* Start of strings */
2729 mem_start = PAGE_ALIGN(mem_start);
2730 dt_string_start = mem_start;
2731 mem_start += 4; /* hole */
2732
2733 /* Add "phandle" in there, we'll need it */
2734 namep = make_room(&mem_start, &mem_end, 16, 1);
2735 prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
2736 mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2737
2738 /* Build string array */
2739 prom_printf("Building dt strings...\n");
2740 scan_dt_build_strings(root, &mem_start, &mem_end);
2741 dt_string_end = mem_start;
2742
2743 /* Build structure */
2744 mem_start = PAGE_ALIGN(mem_start);
2745 dt_struct_start = mem_start;
2746 prom_printf("Building dt structure...\n");
2747 scan_dt_build_struct(root, &mem_start, &mem_end);
2748 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2749 dt_struct_end = PAGE_ALIGN(mem_start);
2750
2751 /* Finish header */
2752 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2753 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2754 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2755 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2756 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2757 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2758 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2759 hdr->version = cpu_to_be32(OF_DT_VERSION);
2760 /* Version 16 is not backward compatible */
2761 hdr->last_comp_version = cpu_to_be32(0x10);
2762
2763 /* Copy the reserve map in */
2764 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2765
2766 #ifdef DEBUG_PROM
2767 {
2768 int i;
2769 prom_printf("reserved memory map:\n");
2770 for (i = 0; i < mem_reserve_cnt; i++)
2771 prom_printf(" %llx - %llx\n",
2772 be64_to_cpu(mem_reserve_map[i].base),
2773 be64_to_cpu(mem_reserve_map[i].size));
2774 }
2775 #endif
2776 /* Bump mem_reserve_cnt to cause further reservations to fail
2777 * since it's too late.
2778 */
2779 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2780
2781 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2782 dt_string_start, dt_string_end);
2783 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2784 dt_struct_start, dt_struct_end);
2785 }
2786
2787 #ifdef CONFIG_PPC_MAPLE
2788 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2789 * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2790 static void __init fixup_device_tree_maple(void)
2791 {
2792 phandle isa;
2793 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2794 u32 isa_ranges[6];
2795 char *name;
2796
2797 name = "/ht@0/isa@4";
2798 isa = call_prom("finddevice", 1, 1, ADDR(name));
2799 if (!PHANDLE_VALID(isa)) {
2800 name = "/ht@0/isa@6";
2801 isa = call_prom("finddevice", 1, 1, ADDR(name));
2802 rloc = 0x01003000; /* IO space; PCI device = 6 */
2803 }
2804 if (!PHANDLE_VALID(isa))
2805 return;
2806
2807 if (prom_getproplen(isa, "ranges") != 12)
2808 return;
2809 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2810 == PROM_ERROR)
2811 return;
2812
2813 if (isa_ranges[0] != 0x1 ||
2814 isa_ranges[1] != 0xf4000000 ||
2815 isa_ranges[2] != 0x00010000)
2816 return;
2817
2818 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2819
2820 isa_ranges[0] = 0x1;
2821 isa_ranges[1] = 0x0;
2822 isa_ranges[2] = rloc;
2823 isa_ranges[3] = 0x0;
2824 isa_ranges[4] = 0x0;
2825 isa_ranges[5] = 0x00010000;
2826 prom_setprop(isa, name, "ranges",
2827 isa_ranges, sizeof(isa_ranges));
2828 }
2829
2830 #define CPC925_MC_START 0xf8000000
2831 #define CPC925_MC_LENGTH 0x1000000
2832 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2833 static void __init fixup_device_tree_maple_memory_controller(void)
2834 {
2835 phandle mc;
2836 u32 mc_reg[4];
2837 char *name = "/hostbridge@f8000000";
2838 u32 ac, sc;
2839
2840 mc = call_prom("finddevice", 1, 1, ADDR(name));
2841 if (!PHANDLE_VALID(mc))
2842 return;
2843
2844 if (prom_getproplen(mc, "reg") != 8)
2845 return;
2846
2847 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2848 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2849 if ((ac != 2) || (sc != 2))
2850 return;
2851
2852 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2853 return;
2854
2855 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2856 return;
2857
2858 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2859
2860 mc_reg[0] = 0x0;
2861 mc_reg[1] = CPC925_MC_START;
2862 mc_reg[2] = 0x0;
2863 mc_reg[3] = CPC925_MC_LENGTH;
2864 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2865 }
2866 #else
2867 #define fixup_device_tree_maple()
2868 #define fixup_device_tree_maple_memory_controller()
2869 #endif
2870
2871 #ifdef CONFIG_PPC_CHRP
2872 /*
2873 * Pegasos and BriQ lacks the "ranges" property in the isa node
2874 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2875 * Pegasos has the IDE configured in legacy mode, but advertised as native
2876 */
fixup_device_tree_chrp(void)2877 static void __init fixup_device_tree_chrp(void)
2878 {
2879 phandle ph;
2880 u32 prop[6];
2881 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2882 char *name;
2883 int rc;
2884
2885 name = "/pci@80000000/isa@c";
2886 ph = call_prom("finddevice", 1, 1, ADDR(name));
2887 if (!PHANDLE_VALID(ph)) {
2888 name = "/pci@ff500000/isa@6";
2889 ph = call_prom("finddevice", 1, 1, ADDR(name));
2890 rloc = 0x01003000; /* IO space; PCI device = 6 */
2891 }
2892 if (PHANDLE_VALID(ph)) {
2893 rc = prom_getproplen(ph, "ranges");
2894 if (rc == 0 || rc == PROM_ERROR) {
2895 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2896
2897 prop[0] = 0x1;
2898 prop[1] = 0x0;
2899 prop[2] = rloc;
2900 prop[3] = 0x0;
2901 prop[4] = 0x0;
2902 prop[5] = 0x00010000;
2903 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2904 }
2905 }
2906
2907 name = "/pci@80000000/ide@C,1";
2908 ph = call_prom("finddevice", 1, 1, ADDR(name));
2909 if (PHANDLE_VALID(ph)) {
2910 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2911 prop[0] = 14;
2912 prop[1] = 0x0;
2913 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2914 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2915 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2916 if (rc == sizeof(u32)) {
2917 prop[0] &= ~0x5;
2918 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2919 }
2920 }
2921 }
2922 #else
2923 #define fixup_device_tree_chrp()
2924 #endif
2925
2926 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2927 static void __init fixup_device_tree_pmac(void)
2928 {
2929 phandle u3, i2c, mpic;
2930 u32 u3_rev;
2931 u32 interrupts[2];
2932 u32 parent;
2933
2934 /* Some G5s have a missing interrupt definition, fix it up here */
2935 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2936 if (!PHANDLE_VALID(u3))
2937 return;
2938 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2939 if (!PHANDLE_VALID(i2c))
2940 return;
2941 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2942 if (!PHANDLE_VALID(mpic))
2943 return;
2944
2945 /* check if proper rev of u3 */
2946 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2947 == PROM_ERROR)
2948 return;
2949 if (u3_rev < 0x35 || u3_rev > 0x39)
2950 return;
2951 /* does it need fixup ? */
2952 if (prom_getproplen(i2c, "interrupts") > 0)
2953 return;
2954
2955 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2956
2957 /* interrupt on this revision of u3 is number 0 and level */
2958 interrupts[0] = 0;
2959 interrupts[1] = 1;
2960 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2961 &interrupts, sizeof(interrupts));
2962 parent = (u32)mpic;
2963 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2964 &parent, sizeof(parent));
2965 }
2966 #else
2967 #define fixup_device_tree_pmac()
2968 #endif
2969
2970 #ifdef CONFIG_PPC_EFIKA
2971 /*
2972 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2973 * to talk to the phy. If the phy-handle property is missing, then this
2974 * function is called to add the appropriate nodes and link it to the
2975 * ethernet node.
2976 */
fixup_device_tree_efika_add_phy(void)2977 static void __init fixup_device_tree_efika_add_phy(void)
2978 {
2979 u32 node;
2980 char prop[64];
2981 int rv;
2982
2983 /* Check if /builtin/ethernet exists - bail if it doesn't */
2984 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2985 if (!PHANDLE_VALID(node))
2986 return;
2987
2988 /* Check if the phy-handle property exists - bail if it does */
2989 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2990 if (rv <= 0)
2991 return;
2992
2993 /*
2994 * At this point the ethernet device doesn't have a phy described.
2995 * Now we need to add the missing phy node and linkage
2996 */
2997
2998 /* Check for an MDIO bus node - if missing then create one */
2999 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
3000 if (!PHANDLE_VALID(node)) {
3001 prom_printf("Adding Ethernet MDIO node\n");
3002 call_prom("interpret", 1, 1,
3003 " s\" /builtin\" find-device"
3004 " new-device"
3005 " 1 encode-int s\" #address-cells\" property"
3006 " 0 encode-int s\" #size-cells\" property"
3007 " s\" mdio\" device-name"
3008 " s\" fsl,mpc5200b-mdio\" encode-string"
3009 " s\" compatible\" property"
3010 " 0xf0003000 0x400 reg"
3011 " 0x2 encode-int"
3012 " 0x5 encode-int encode+"
3013 " 0x3 encode-int encode+"
3014 " s\" interrupts\" property"
3015 " finish-device");
3016 }
3017
3018 /* Check for a PHY device node - if missing then create one and
3019 * give it's phandle to the ethernet node */
3020 node = call_prom("finddevice", 1, 1,
3021 ADDR("/builtin/mdio/ethernet-phy"));
3022 if (!PHANDLE_VALID(node)) {
3023 prom_printf("Adding Ethernet PHY node\n");
3024 call_prom("interpret", 1, 1,
3025 " s\" /builtin/mdio\" find-device"
3026 " new-device"
3027 " s\" ethernet-phy\" device-name"
3028 " 0x10 encode-int s\" reg\" property"
3029 " my-self"
3030 " ihandle>phandle"
3031 " finish-device"
3032 " s\" /builtin/ethernet\" find-device"
3033 " encode-int"
3034 " s\" phy-handle\" property"
3035 " device-end");
3036 }
3037 }
3038
fixup_device_tree_efika(void)3039 static void __init fixup_device_tree_efika(void)
3040 {
3041 int sound_irq[3] = { 2, 2, 0 };
3042 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3043 3,4,0, 3,5,0, 3,6,0, 3,7,0,
3044 3,8,0, 3,9,0, 3,10,0, 3,11,0,
3045 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3046 u32 node;
3047 char prop[64];
3048 int rv, len;
3049
3050 /* Check if we're really running on a EFIKA */
3051 node = call_prom("finddevice", 1, 1, ADDR("/"));
3052 if (!PHANDLE_VALID(node))
3053 return;
3054
3055 rv = prom_getprop(node, "model", prop, sizeof(prop));
3056 if (rv == PROM_ERROR)
3057 return;
3058 if (prom_strcmp(prop, "EFIKA5K2"))
3059 return;
3060
3061 prom_printf("Applying EFIKA device tree fixups\n");
3062
3063 /* Claiming to be 'chrp' is death */
3064 node = call_prom("finddevice", 1, 1, ADDR("/"));
3065 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3066 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3067 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3068
3069 /* CODEGEN,description is exposed in /proc/cpuinfo so
3070 fix that too */
3071 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3072 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3073 prom_setprop(node, "/", "CODEGEN,description",
3074 "Efika 5200B PowerPC System",
3075 sizeof("Efika 5200B PowerPC System"));
3076
3077 /* Fixup bestcomm interrupts property */
3078 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3079 if (PHANDLE_VALID(node)) {
3080 len = prom_getproplen(node, "interrupts");
3081 if (len == 12) {
3082 prom_printf("Fixing bestcomm interrupts property\n");
3083 prom_setprop(node, "/builtin/bestcom", "interrupts",
3084 bcomm_irq, sizeof(bcomm_irq));
3085 }
3086 }
3087
3088 /* Fixup sound interrupts property */
3089 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3090 if (PHANDLE_VALID(node)) {
3091 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3092 if (rv == PROM_ERROR) {
3093 prom_printf("Adding sound interrupts property\n");
3094 prom_setprop(node, "/builtin/sound", "interrupts",
3095 sound_irq, sizeof(sound_irq));
3096 }
3097 }
3098
3099 /* Make sure ethernet phy-handle property exists */
3100 fixup_device_tree_efika_add_phy();
3101 }
3102 #else
3103 #define fixup_device_tree_efika()
3104 #endif
3105
3106 #ifdef CONFIG_PPC_PASEMI_NEMO
3107 /*
3108 * CFE supplied on Nemo is broken in several ways, biggest
3109 * problem is that it reassigns ISA interrupts to unused mpic ints.
3110 * Add an interrupt-controller property for the io-bridge to use
3111 * and correct the ints so we can attach them to an irq_domain
3112 */
fixup_device_tree_pasemi(void)3113 static void __init fixup_device_tree_pasemi(void)
3114 {
3115 u32 interrupts[2], parent, rval, val = 0;
3116 char *name, *pci_name;
3117 phandle iob, node;
3118
3119 /* Find the root pci node */
3120 name = "/pxp@0,e0000000";
3121 iob = call_prom("finddevice", 1, 1, ADDR(name));
3122 if (!PHANDLE_VALID(iob))
3123 return;
3124
3125 /* check if interrupt-controller node set yet */
3126 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3127 return;
3128
3129 prom_printf("adding interrupt-controller property for SB600...\n");
3130
3131 prom_setprop(iob, name, "interrupt-controller", &val, 0);
3132
3133 pci_name = "/pxp@0,e0000000/pci@11";
3134 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3135 parent = ADDR(iob);
3136
3137 for( ; prom_next_node(&node); ) {
3138 /* scan each node for one with an interrupt */
3139 if (!PHANDLE_VALID(node))
3140 continue;
3141
3142 rval = prom_getproplen(node, "interrupts");
3143 if (rval == 0 || rval == PROM_ERROR)
3144 continue;
3145
3146 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3147 if ((interrupts[0] < 212) || (interrupts[0] > 222))
3148 continue;
3149
3150 /* found a node, update both interrupts and interrupt-parent */
3151 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3152 interrupts[0] -= 203;
3153 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3154 interrupts[0] -= 213;
3155 if (interrupts[0] == 221)
3156 interrupts[0] = 14;
3157 if (interrupts[0] == 222)
3158 interrupts[0] = 8;
3159
3160 prom_setprop(node, pci_name, "interrupts", interrupts,
3161 sizeof(interrupts));
3162 prom_setprop(node, pci_name, "interrupt-parent", &parent,
3163 sizeof(parent));
3164 }
3165
3166 /*
3167 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3168 * so that generic isa-bridge code can add the SB600 and its on-board
3169 * peripherals.
3170 */
3171 name = "/pxp@0,e0000000/io-bridge@0";
3172 iob = call_prom("finddevice", 1, 1, ADDR(name));
3173 if (!PHANDLE_VALID(iob))
3174 return;
3175
3176 /* device_type is already set, just change it. */
3177
3178 prom_printf("Changing device_type of SB600 node...\n");
3179
3180 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3181 }
3182 #else /* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)3183 static inline void fixup_device_tree_pasemi(void) { }
3184 #endif
3185
fixup_device_tree(void)3186 static void __init fixup_device_tree(void)
3187 {
3188 fixup_device_tree_maple();
3189 fixup_device_tree_maple_memory_controller();
3190 fixup_device_tree_chrp();
3191 fixup_device_tree_pmac();
3192 fixup_device_tree_efika();
3193 fixup_device_tree_pasemi();
3194 }
3195
prom_find_boot_cpu(void)3196 static void __init prom_find_boot_cpu(void)
3197 {
3198 __be32 rval;
3199 ihandle prom_cpu;
3200 phandle cpu_pkg;
3201
3202 rval = 0;
3203 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3204 return;
3205 prom_cpu = be32_to_cpu(rval);
3206
3207 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3208
3209 if (!PHANDLE_VALID(cpu_pkg))
3210 return;
3211
3212 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3213 prom.cpu = be32_to_cpu(rval);
3214
3215 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3216 }
3217
prom_check_initrd(unsigned long r3,unsigned long r4)3218 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3219 {
3220 #ifdef CONFIG_BLK_DEV_INITRD
3221 if (r3 && r4 && r4 != 0xdeadbeef) {
3222 __be64 val;
3223
3224 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3225 prom_initrd_end = prom_initrd_start + r4;
3226
3227 val = cpu_to_be64(prom_initrd_start);
3228 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3229 &val, sizeof(val));
3230 val = cpu_to_be64(prom_initrd_end);
3231 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3232 &val, sizeof(val));
3233
3234 reserve_mem(prom_initrd_start,
3235 prom_initrd_end - prom_initrd_start);
3236
3237 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3238 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3239 }
3240 #endif /* CONFIG_BLK_DEV_INITRD */
3241 }
3242
3243 #ifdef CONFIG_PPC_SVM
3244 /*
3245 * Perform the Enter Secure Mode ultracall.
3246 */
enter_secure_mode(unsigned long kbase,unsigned long fdt)3247 static int __init enter_secure_mode(unsigned long kbase, unsigned long fdt)
3248 {
3249 register unsigned long r3 asm("r3") = UV_ESM;
3250 register unsigned long r4 asm("r4") = kbase;
3251 register unsigned long r5 asm("r5") = fdt;
3252
3253 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3254
3255 return r3;
3256 }
3257
3258 /*
3259 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3260 */
setup_secure_guest(unsigned long kbase,unsigned long fdt)3261 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3262 {
3263 int ret;
3264
3265 if (!prom_svm_enable)
3266 return;
3267
3268 /* Switch to secure mode. */
3269 prom_printf("Switching to secure mode.\n");
3270
3271 /*
3272 * The ultravisor will do an integrity check of the kernel image but we
3273 * relocated it so the check will fail. Restore the original image by
3274 * relocating it back to the kernel virtual base address.
3275 */
3276 relocate(KERNELBASE);
3277
3278 ret = enter_secure_mode(kbase, fdt);
3279
3280 /* Relocate the kernel again. */
3281 relocate(kbase);
3282
3283 if (ret != U_SUCCESS) {
3284 prom_printf("Returned %d from switching to secure mode.\n", ret);
3285 prom_rtas_os_term("Switch to secure mode failed.\n");
3286 }
3287 }
3288 #else
setup_secure_guest(unsigned long kbase,unsigned long fdt)3289 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3290 {
3291 }
3292 #endif /* CONFIG_PPC_SVM */
3293
3294 /*
3295 * We enter here early on, when the Open Firmware prom is still
3296 * handling exceptions and the MMU hash table for us.
3297 */
3298
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3299 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3300 unsigned long pp,
3301 unsigned long r6, unsigned long r7,
3302 unsigned long kbase)
3303 {
3304 unsigned long hdr;
3305
3306 #ifdef CONFIG_PPC32
3307 unsigned long offset = reloc_offset();
3308 reloc_got2(offset);
3309 #endif
3310
3311 /*
3312 * First zero the BSS
3313 */
3314 memset(&__bss_start, 0, __bss_stop - __bss_start);
3315
3316 /*
3317 * Init interface to Open Firmware, get some node references,
3318 * like /chosen
3319 */
3320 prom_init_client_services(pp);
3321
3322 /*
3323 * See if this OF is old enough that we need to do explicit maps
3324 * and other workarounds
3325 */
3326 prom_find_mmu();
3327
3328 /*
3329 * Init prom stdout device
3330 */
3331 prom_init_stdout();
3332
3333 prom_printf("Preparing to boot %s", linux_banner);
3334
3335 /*
3336 * Get default machine type. At this point, we do not differentiate
3337 * between pSeries SMP and pSeries LPAR
3338 */
3339 of_platform = prom_find_machine_type();
3340 prom_printf("Detected machine type: %x\n", of_platform);
3341
3342 #ifndef CONFIG_NONSTATIC_KERNEL
3343 /* Bail if this is a kdump kernel. */
3344 if (PHYSICAL_START > 0)
3345 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3346 #endif
3347
3348 /*
3349 * Check for an initrd
3350 */
3351 prom_check_initrd(r3, r4);
3352
3353 /*
3354 * Do early parsing of command line
3355 */
3356 early_cmdline_parse();
3357
3358 #ifdef CONFIG_PPC_PSERIES
3359 /*
3360 * On pSeries, inform the firmware about our capabilities
3361 */
3362 if (of_platform == PLATFORM_PSERIES ||
3363 of_platform == PLATFORM_PSERIES_LPAR)
3364 prom_send_capabilities();
3365 #endif
3366
3367 /*
3368 * Copy the CPU hold code
3369 */
3370 if (of_platform != PLATFORM_POWERMAC)
3371 copy_and_flush(0, kbase, 0x100, 0);
3372
3373 /*
3374 * Initialize memory management within prom_init
3375 */
3376 prom_init_mem();
3377
3378 /*
3379 * Determine which cpu is actually running right _now_
3380 */
3381 prom_find_boot_cpu();
3382
3383 /*
3384 * Initialize display devices
3385 */
3386 prom_check_displays();
3387
3388 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3389 /*
3390 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3391 * that uses the allocator, we need to make sure we get the top of memory
3392 * available for us here...
3393 */
3394 if (of_platform == PLATFORM_PSERIES)
3395 prom_initialize_tce_table();
3396 #endif
3397
3398 /*
3399 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3400 * have a usable RTAS implementation.
3401 */
3402 if (of_platform != PLATFORM_POWERMAC)
3403 prom_instantiate_rtas();
3404
3405 #ifdef CONFIG_PPC64
3406 /* instantiate sml */
3407 prom_instantiate_sml();
3408 #endif
3409
3410 /*
3411 * On non-powermacs, put all CPUs in spin-loops.
3412 *
3413 * PowerMacs use a different mechanism to spin CPUs
3414 *
3415 * (This must be done after instantiating RTAS)
3416 */
3417 if (of_platform != PLATFORM_POWERMAC)
3418 prom_hold_cpus();
3419
3420 /*
3421 * Fill in some infos for use by the kernel later on
3422 */
3423 if (prom_memory_limit) {
3424 __be64 val = cpu_to_be64(prom_memory_limit);
3425 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3426 &val, sizeof(val));
3427 }
3428 #ifdef CONFIG_PPC64
3429 if (prom_iommu_off)
3430 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3431 NULL, 0);
3432
3433 if (prom_iommu_force_on)
3434 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3435 NULL, 0);
3436
3437 if (prom_tce_alloc_start) {
3438 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3439 &prom_tce_alloc_start,
3440 sizeof(prom_tce_alloc_start));
3441 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3442 &prom_tce_alloc_end,
3443 sizeof(prom_tce_alloc_end));
3444 }
3445 #endif
3446
3447 /*
3448 * Fixup any known bugs in the device-tree
3449 */
3450 fixup_device_tree();
3451
3452 /*
3453 * Now finally create the flattened device-tree
3454 */
3455 prom_printf("copying OF device tree...\n");
3456 flatten_device_tree();
3457
3458 /*
3459 * in case stdin is USB and still active on IBM machines...
3460 * Unfortunately quiesce crashes on some powermacs if we have
3461 * closed stdin already (in particular the powerbook 101).
3462 */
3463 if (of_platform != PLATFORM_POWERMAC)
3464 prom_close_stdin();
3465
3466 /*
3467 * Call OF "quiesce" method to shut down pending DMA's from
3468 * devices etc...
3469 */
3470 prom_printf("Quiescing Open Firmware ...\n");
3471 call_prom("quiesce", 0, 0);
3472
3473 /*
3474 * And finally, call the kernel passing it the flattened device
3475 * tree and NULL as r5, thus triggering the new entry point which
3476 * is common to us and kexec
3477 */
3478 hdr = dt_header_start;
3479
3480 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3481 prom_debug("->dt_header_start=0x%lx\n", hdr);
3482
3483 #ifdef CONFIG_PPC32
3484 reloc_got2(-offset);
3485 #endif
3486
3487 /* Move to secure memory if we're supposed to be secure guests. */
3488 setup_secure_guest(kbase, hdr);
3489
3490 __start(hdr, kbase, 0, 0, 0, 0, 0);
3491
3492 return 0;
3493 }
3494