1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for interfacing to Open Firmware. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 */ 11 12 #undef DEBUG_PROM 13 14 /* we cannot use FORTIFY as it brings in new symbols */ 15 #define __NO_FORTIFY 16 17 #include <stdarg.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/init.h> 21 #include <linux/threads.h> 22 #include <linux/spinlock.h> 23 #include <linux/types.h> 24 #include <linux/pci.h> 25 #include <linux/proc_fs.h> 26 #include <linux/delay.h> 27 #include <linux/initrd.h> 28 #include <linux/bitops.h> 29 #include <linux/pgtable.h> 30 #include <asm/prom.h> 31 #include <asm/rtas.h> 32 #include <asm/page.h> 33 #include <asm/processor.h> 34 #include <asm/irq.h> 35 #include <asm/io.h> 36 #include <asm/smp.h> 37 #include <asm/mmu.h> 38 #include <asm/iommu.h> 39 #include <asm/btext.h> 40 #include <asm/sections.h> 41 #include <asm/machdep.h> 42 #include <asm/asm-prototypes.h> 43 #include <asm/ultravisor-api.h> 44 45 #include <linux/linux_logo.h> 46 47 /* All of prom_init bss lives here */ 48 #define __prombss __section(".bss.prominit") 49 50 /* 51 * Eventually bump that one up 52 */ 53 #define DEVTREE_CHUNK_SIZE 0x100000 54 55 /* 56 * This is the size of the local memory reserve map that gets copied 57 * into the boot params passed to the kernel. That size is totally 58 * flexible as the kernel just reads the list until it encounters an 59 * entry with size 0, so it can be changed without breaking binary 60 * compatibility 61 */ 62 #define MEM_RESERVE_MAP_SIZE 8 63 64 /* 65 * prom_init() is called very early on, before the kernel text 66 * and data have been mapped to KERNELBASE. At this point the code 67 * is running at whatever address it has been loaded at. 68 * On ppc32 we compile with -mrelocatable, which means that references 69 * to extern and static variables get relocated automatically. 70 * ppc64 objects are always relocatable, we just need to relocate the 71 * TOC. 72 * 73 * Because OF may have mapped I/O devices into the area starting at 74 * KERNELBASE, particularly on CHRP machines, we can't safely call 75 * OF once the kernel has been mapped to KERNELBASE. Therefore all 76 * OF calls must be done within prom_init(). 77 * 78 * ADDR is used in calls to call_prom. The 4th and following 79 * arguments to call_prom should be 32-bit values. 80 * On ppc64, 64 bit values are truncated to 32 bits (and 81 * fortunately don't get interpreted as two arguments). 82 */ 83 #define ADDR(x) (u32)(unsigned long)(x) 84 85 #ifdef CONFIG_PPC64 86 #define OF_WORKAROUNDS 0 87 #else 88 #define OF_WORKAROUNDS of_workarounds 89 static int of_workarounds __prombss; 90 #endif 91 92 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 93 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 94 95 #define PROM_BUG() do { \ 96 prom_printf("kernel BUG at %s line 0x%x!\n", \ 97 __FILE__, __LINE__); \ 98 __builtin_trap(); \ 99 } while (0) 100 101 #ifdef DEBUG_PROM 102 #define prom_debug(x...) prom_printf(x) 103 #else 104 #define prom_debug(x...) do { } while (0) 105 #endif 106 107 108 typedef u32 prom_arg_t; 109 110 struct prom_args { 111 __be32 service; 112 __be32 nargs; 113 __be32 nret; 114 __be32 args[10]; 115 }; 116 117 struct prom_t { 118 ihandle root; 119 phandle chosen; 120 int cpu; 121 ihandle stdout; 122 ihandle mmumap; 123 ihandle memory; 124 }; 125 126 struct mem_map_entry { 127 __be64 base; 128 __be64 size; 129 }; 130 131 typedef __be32 cell_t; 132 133 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 134 unsigned long r6, unsigned long r7, unsigned long r8, 135 unsigned long r9); 136 137 #ifdef CONFIG_PPC64 138 extern int enter_prom(struct prom_args *args, unsigned long entry); 139 #else 140 static inline int enter_prom(struct prom_args *args, unsigned long entry) 141 { 142 return ((int (*)(struct prom_args *))entry)(args); 143 } 144 #endif 145 146 extern void copy_and_flush(unsigned long dest, unsigned long src, 147 unsigned long size, unsigned long offset); 148 149 /* prom structure */ 150 static struct prom_t __prombss prom; 151 152 static unsigned long __prombss prom_entry; 153 154 static char __prombss of_stdout_device[256]; 155 static char __prombss prom_scratch[256]; 156 157 static unsigned long __prombss dt_header_start; 158 static unsigned long __prombss dt_struct_start, dt_struct_end; 159 static unsigned long __prombss dt_string_start, dt_string_end; 160 161 static unsigned long __prombss prom_initrd_start, prom_initrd_end; 162 163 #ifdef CONFIG_PPC64 164 static int __prombss prom_iommu_force_on; 165 static int __prombss prom_iommu_off; 166 static unsigned long __prombss prom_tce_alloc_start; 167 static unsigned long __prombss prom_tce_alloc_end; 168 #endif 169 170 #ifdef CONFIG_PPC_PSERIES 171 static bool __prombss prom_radix_disable; 172 static bool __prombss prom_radix_gtse_disable; 173 static bool __prombss prom_xive_disable; 174 #endif 175 176 #ifdef CONFIG_PPC_SVM 177 static bool __prombss prom_svm_enable; 178 #endif 179 180 struct platform_support { 181 bool hash_mmu; 182 bool radix_mmu; 183 bool radix_gtse; 184 bool xive; 185 }; 186 187 /* Platforms codes are now obsolete in the kernel. Now only used within this 188 * file and ultimately gone too. Feel free to change them if you need, they 189 * are not shared with anything outside of this file anymore 190 */ 191 #define PLATFORM_PSERIES 0x0100 192 #define PLATFORM_PSERIES_LPAR 0x0101 193 #define PLATFORM_LPAR 0x0001 194 #define PLATFORM_POWERMAC 0x0400 195 #define PLATFORM_GENERIC 0x0500 196 197 static int __prombss of_platform; 198 199 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE]; 200 201 static unsigned long __prombss prom_memory_limit; 202 203 static unsigned long __prombss alloc_top; 204 static unsigned long __prombss alloc_top_high; 205 static unsigned long __prombss alloc_bottom; 206 static unsigned long __prombss rmo_top; 207 static unsigned long __prombss ram_top; 208 209 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 210 static int __prombss mem_reserve_cnt; 211 212 static cell_t __prombss regbuf[1024]; 213 214 static bool __prombss rtas_has_query_cpu_stopped; 215 216 217 /* 218 * Error results ... some OF calls will return "-1" on error, some 219 * will return 0, some will return either. To simplify, here are 220 * macros to use with any ihandle or phandle return value to check if 221 * it is valid 222 */ 223 224 #define PROM_ERROR (-1u) 225 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 226 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 227 228 /* Copied from lib/string.c and lib/kstrtox.c */ 229 230 static int __init prom_strcmp(const char *cs, const char *ct) 231 { 232 unsigned char c1, c2; 233 234 while (1) { 235 c1 = *cs++; 236 c2 = *ct++; 237 if (c1 != c2) 238 return c1 < c2 ? -1 : 1; 239 if (!c1) 240 break; 241 } 242 return 0; 243 } 244 245 static char __init *prom_strcpy(char *dest, const char *src) 246 { 247 char *tmp = dest; 248 249 while ((*dest++ = *src++) != '\0') 250 /* nothing */; 251 return tmp; 252 } 253 254 static int __init prom_strncmp(const char *cs, const char *ct, size_t count) 255 { 256 unsigned char c1, c2; 257 258 while (count) { 259 c1 = *cs++; 260 c2 = *ct++; 261 if (c1 != c2) 262 return c1 < c2 ? -1 : 1; 263 if (!c1) 264 break; 265 count--; 266 } 267 return 0; 268 } 269 270 static size_t __init prom_strlen(const char *s) 271 { 272 const char *sc; 273 274 for (sc = s; *sc != '\0'; ++sc) 275 /* nothing */; 276 return sc - s; 277 } 278 279 static int __init prom_memcmp(const void *cs, const void *ct, size_t count) 280 { 281 const unsigned char *su1, *su2; 282 int res = 0; 283 284 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 285 if ((res = *su1 - *su2) != 0) 286 break; 287 return res; 288 } 289 290 static char __init *prom_strstr(const char *s1, const char *s2) 291 { 292 size_t l1, l2; 293 294 l2 = prom_strlen(s2); 295 if (!l2) 296 return (char *)s1; 297 l1 = prom_strlen(s1); 298 while (l1 >= l2) { 299 l1--; 300 if (!prom_memcmp(s1, s2, l2)) 301 return (char *)s1; 302 s1++; 303 } 304 return NULL; 305 } 306 307 static size_t __init prom_strlcat(char *dest, const char *src, size_t count) 308 { 309 size_t dsize = prom_strlen(dest); 310 size_t len = prom_strlen(src); 311 size_t res = dsize + len; 312 313 /* This would be a bug */ 314 if (dsize >= count) 315 return count; 316 317 dest += dsize; 318 count -= dsize; 319 if (len >= count) 320 len = count-1; 321 memcpy(dest, src, len); 322 dest[len] = 0; 323 return res; 324 325 } 326 327 #ifdef CONFIG_PPC_PSERIES 328 static int __init prom_strtobool(const char *s, bool *res) 329 { 330 if (!s) 331 return -EINVAL; 332 333 switch (s[0]) { 334 case 'y': 335 case 'Y': 336 case '1': 337 *res = true; 338 return 0; 339 case 'n': 340 case 'N': 341 case '0': 342 *res = false; 343 return 0; 344 case 'o': 345 case 'O': 346 switch (s[1]) { 347 case 'n': 348 case 'N': 349 *res = true; 350 return 0; 351 case 'f': 352 case 'F': 353 *res = false; 354 return 0; 355 default: 356 break; 357 } 358 break; 359 default: 360 break; 361 } 362 363 return -EINVAL; 364 } 365 #endif 366 367 /* This is the one and *ONLY* place where we actually call open 368 * firmware. 369 */ 370 371 static int __init call_prom(const char *service, int nargs, int nret, ...) 372 { 373 int i; 374 struct prom_args args; 375 va_list list; 376 377 args.service = cpu_to_be32(ADDR(service)); 378 args.nargs = cpu_to_be32(nargs); 379 args.nret = cpu_to_be32(nret); 380 381 va_start(list, nret); 382 for (i = 0; i < nargs; i++) 383 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 384 va_end(list); 385 386 for (i = 0; i < nret; i++) 387 args.args[nargs+i] = 0; 388 389 if (enter_prom(&args, prom_entry) < 0) 390 return PROM_ERROR; 391 392 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 393 } 394 395 static int __init call_prom_ret(const char *service, int nargs, int nret, 396 prom_arg_t *rets, ...) 397 { 398 int i; 399 struct prom_args args; 400 va_list list; 401 402 args.service = cpu_to_be32(ADDR(service)); 403 args.nargs = cpu_to_be32(nargs); 404 args.nret = cpu_to_be32(nret); 405 406 va_start(list, rets); 407 for (i = 0; i < nargs; i++) 408 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 409 va_end(list); 410 411 for (i = 0; i < nret; i++) 412 args.args[nargs+i] = 0; 413 414 if (enter_prom(&args, prom_entry) < 0) 415 return PROM_ERROR; 416 417 if (rets != NULL) 418 for (i = 1; i < nret; ++i) 419 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 420 421 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 422 } 423 424 425 static void __init prom_print(const char *msg) 426 { 427 const char *p, *q; 428 429 if (prom.stdout == 0) 430 return; 431 432 for (p = msg; *p != 0; p = q) { 433 for (q = p; *q != 0 && *q != '\n'; ++q) 434 ; 435 if (q > p) 436 call_prom("write", 3, 1, prom.stdout, p, q - p); 437 if (*q == 0) 438 break; 439 ++q; 440 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 441 } 442 } 443 444 445 /* 446 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that 447 * we do not need __udivdi3 or __umoddi3 on 32bits. 448 */ 449 static void __init prom_print_hex(unsigned long val) 450 { 451 int i, nibbles = sizeof(val)*2; 452 char buf[sizeof(val)*2+1]; 453 454 for (i = nibbles-1; i >= 0; i--) { 455 buf[i] = (val & 0xf) + '0'; 456 if (buf[i] > '9') 457 buf[i] += ('a'-'0'-10); 458 val >>= 4; 459 } 460 buf[nibbles] = '\0'; 461 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 462 } 463 464 /* max number of decimal digits in an unsigned long */ 465 #define UL_DIGITS 21 466 static void __init prom_print_dec(unsigned long val) 467 { 468 int i, size; 469 char buf[UL_DIGITS+1]; 470 471 for (i = UL_DIGITS-1; i >= 0; i--) { 472 buf[i] = (val % 10) + '0'; 473 val = val/10; 474 if (val == 0) 475 break; 476 } 477 /* shift stuff down */ 478 size = UL_DIGITS - i; 479 call_prom("write", 3, 1, prom.stdout, buf+i, size); 480 } 481 482 __printf(1, 2) 483 static void __init prom_printf(const char *format, ...) 484 { 485 const char *p, *q, *s; 486 va_list args; 487 unsigned long v; 488 long vs; 489 int n = 0; 490 491 va_start(args, format); 492 for (p = format; *p != 0; p = q) { 493 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 494 ; 495 if (q > p) 496 call_prom("write", 3, 1, prom.stdout, p, q - p); 497 if (*q == 0) 498 break; 499 if (*q == '\n') { 500 ++q; 501 call_prom("write", 3, 1, prom.stdout, 502 ADDR("\r\n"), 2); 503 continue; 504 } 505 ++q; 506 if (*q == 0) 507 break; 508 while (*q == 'l') { 509 ++q; 510 ++n; 511 } 512 switch (*q) { 513 case 's': 514 ++q; 515 s = va_arg(args, const char *); 516 prom_print(s); 517 break; 518 case 'x': 519 ++q; 520 switch (n) { 521 case 0: 522 v = va_arg(args, unsigned int); 523 break; 524 case 1: 525 v = va_arg(args, unsigned long); 526 break; 527 case 2: 528 default: 529 v = va_arg(args, unsigned long long); 530 break; 531 } 532 prom_print_hex(v); 533 break; 534 case 'u': 535 ++q; 536 switch (n) { 537 case 0: 538 v = va_arg(args, unsigned int); 539 break; 540 case 1: 541 v = va_arg(args, unsigned long); 542 break; 543 case 2: 544 default: 545 v = va_arg(args, unsigned long long); 546 break; 547 } 548 prom_print_dec(v); 549 break; 550 case 'd': 551 ++q; 552 switch (n) { 553 case 0: 554 vs = va_arg(args, int); 555 break; 556 case 1: 557 vs = va_arg(args, long); 558 break; 559 case 2: 560 default: 561 vs = va_arg(args, long long); 562 break; 563 } 564 if (vs < 0) { 565 prom_print("-"); 566 vs = -vs; 567 } 568 prom_print_dec(vs); 569 break; 570 } 571 } 572 va_end(args); 573 } 574 575 576 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 577 unsigned long align) 578 { 579 580 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 581 /* 582 * Old OF requires we claim physical and virtual separately 583 * and then map explicitly (assuming virtual mode) 584 */ 585 int ret; 586 prom_arg_t result; 587 588 ret = call_prom_ret("call-method", 5, 2, &result, 589 ADDR("claim"), prom.memory, 590 align, size, virt); 591 if (ret != 0 || result == -1) 592 return -1; 593 ret = call_prom_ret("call-method", 5, 2, &result, 594 ADDR("claim"), prom.mmumap, 595 align, size, virt); 596 if (ret != 0) { 597 call_prom("call-method", 4, 1, ADDR("release"), 598 prom.memory, size, virt); 599 return -1; 600 } 601 /* the 0x12 is M (coherence) + PP == read/write */ 602 call_prom("call-method", 6, 1, 603 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 604 return virt; 605 } 606 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 607 (prom_arg_t)align); 608 } 609 610 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 611 { 612 prom_print(reason); 613 /* Do not call exit because it clears the screen on pmac 614 * it also causes some sort of double-fault on early pmacs */ 615 if (of_platform == PLATFORM_POWERMAC) 616 asm("trap\n"); 617 618 /* ToDo: should put up an SRC here on pSeries */ 619 call_prom("exit", 0, 0); 620 621 for (;;) /* should never get here */ 622 ; 623 } 624 625 626 static int __init prom_next_node(phandle *nodep) 627 { 628 phandle node; 629 630 if ((node = *nodep) != 0 631 && (*nodep = call_prom("child", 1, 1, node)) != 0) 632 return 1; 633 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 634 return 1; 635 for (;;) { 636 if ((node = call_prom("parent", 1, 1, node)) == 0) 637 return 0; 638 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 639 return 1; 640 } 641 } 642 643 static inline int __init prom_getprop(phandle node, const char *pname, 644 void *value, size_t valuelen) 645 { 646 return call_prom("getprop", 4, 1, node, ADDR(pname), 647 (u32)(unsigned long) value, (u32) valuelen); 648 } 649 650 static inline int __init prom_getproplen(phandle node, const char *pname) 651 { 652 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 653 } 654 655 static void add_string(char **str, const char *q) 656 { 657 char *p = *str; 658 659 while (*q) 660 *p++ = *q++; 661 *p++ = ' '; 662 *str = p; 663 } 664 665 static char *tohex(unsigned int x) 666 { 667 static const char digits[] __initconst = "0123456789abcdef"; 668 static char result[9] __prombss; 669 int i; 670 671 result[8] = 0; 672 i = 8; 673 do { 674 --i; 675 result[i] = digits[x & 0xf]; 676 x >>= 4; 677 } while (x != 0 && i > 0); 678 return &result[i]; 679 } 680 681 static int __init prom_setprop(phandle node, const char *nodename, 682 const char *pname, void *value, size_t valuelen) 683 { 684 char cmd[256], *p; 685 686 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 687 return call_prom("setprop", 4, 1, node, ADDR(pname), 688 (u32)(unsigned long) value, (u32) valuelen); 689 690 /* gah... setprop doesn't work on longtrail, have to use interpret */ 691 p = cmd; 692 add_string(&p, "dev"); 693 add_string(&p, nodename); 694 add_string(&p, tohex((u32)(unsigned long) value)); 695 add_string(&p, tohex(valuelen)); 696 add_string(&p, tohex(ADDR(pname))); 697 add_string(&p, tohex(prom_strlen(pname))); 698 add_string(&p, "property"); 699 *p = 0; 700 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 701 } 702 703 /* We can't use the standard versions because of relocation headaches. */ 704 #define prom_isxdigit(c) \ 705 (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F')) 706 707 #define prom_isdigit(c) ('0' <= (c) && (c) <= '9') 708 #define prom_islower(c) ('a' <= (c) && (c) <= 'z') 709 #define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c)) 710 711 static unsigned long prom_strtoul(const char *cp, const char **endp) 712 { 713 unsigned long result = 0, base = 10, value; 714 715 if (*cp == '0') { 716 base = 8; 717 cp++; 718 if (prom_toupper(*cp) == 'X') { 719 cp++; 720 base = 16; 721 } 722 } 723 724 while (prom_isxdigit(*cp) && 725 (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) { 726 result = result * base + value; 727 cp++; 728 } 729 730 if (endp) 731 *endp = cp; 732 733 return result; 734 } 735 736 static unsigned long prom_memparse(const char *ptr, const char **retptr) 737 { 738 unsigned long ret = prom_strtoul(ptr, retptr); 739 int shift = 0; 740 741 /* 742 * We can't use a switch here because GCC *may* generate a 743 * jump table which won't work, because we're not running at 744 * the address we're linked at. 745 */ 746 if ('G' == **retptr || 'g' == **retptr) 747 shift = 30; 748 749 if ('M' == **retptr || 'm' == **retptr) 750 shift = 20; 751 752 if ('K' == **retptr || 'k' == **retptr) 753 shift = 10; 754 755 if (shift) { 756 ret <<= shift; 757 (*retptr)++; 758 } 759 760 return ret; 761 } 762 763 /* 764 * Early parsing of the command line passed to the kernel, used for 765 * "mem=x" and the options that affect the iommu 766 */ 767 static void __init early_cmdline_parse(void) 768 { 769 const char *opt; 770 771 char *p; 772 int l = 0; 773 774 prom_cmd_line[0] = 0; 775 p = prom_cmd_line; 776 777 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0) 778 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 779 780 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0') 781 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE, 782 sizeof(prom_cmd_line)); 783 784 prom_printf("command line: %s\n", prom_cmd_line); 785 786 #ifdef CONFIG_PPC64 787 opt = prom_strstr(prom_cmd_line, "iommu="); 788 if (opt) { 789 prom_printf("iommu opt is: %s\n", opt); 790 opt += 6; 791 while (*opt && *opt == ' ') 792 opt++; 793 if (!prom_strncmp(opt, "off", 3)) 794 prom_iommu_off = 1; 795 else if (!prom_strncmp(opt, "force", 5)) 796 prom_iommu_force_on = 1; 797 } 798 #endif 799 opt = prom_strstr(prom_cmd_line, "mem="); 800 if (opt) { 801 opt += 4; 802 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 803 #ifdef CONFIG_PPC64 804 /* Align to 16 MB == size of ppc64 large page */ 805 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 806 #endif 807 } 808 809 #ifdef CONFIG_PPC_PSERIES 810 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); 811 opt = prom_strstr(prom_cmd_line, "disable_radix"); 812 if (opt) { 813 opt += 13; 814 if (*opt && *opt == '=') { 815 bool val; 816 817 if (prom_strtobool(++opt, &val)) 818 prom_radix_disable = false; 819 else 820 prom_radix_disable = val; 821 } else 822 prom_radix_disable = true; 823 } 824 if (prom_radix_disable) 825 prom_debug("Radix disabled from cmdline\n"); 826 827 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on"); 828 if (opt) { 829 prom_radix_gtse_disable = true; 830 prom_debug("Radix GTSE disabled from cmdline\n"); 831 } 832 833 opt = prom_strstr(prom_cmd_line, "xive=off"); 834 if (opt) { 835 prom_xive_disable = true; 836 prom_debug("XIVE disabled from cmdline\n"); 837 } 838 #endif /* CONFIG_PPC_PSERIES */ 839 840 #ifdef CONFIG_PPC_SVM 841 opt = prom_strstr(prom_cmd_line, "svm="); 842 if (opt) { 843 bool val; 844 845 opt += sizeof("svm=") - 1; 846 if (!prom_strtobool(opt, &val)) 847 prom_svm_enable = val; 848 } 849 #endif /* CONFIG_PPC_SVM */ 850 } 851 852 #ifdef CONFIG_PPC_PSERIES 853 /* 854 * The architecture vector has an array of PVR mask/value pairs, 855 * followed by # option vectors - 1, followed by the option vectors. 856 * 857 * See prom.h for the definition of the bits specified in the 858 * architecture vector. 859 */ 860 861 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 862 #define NUM_VECTORS(n) ((n) - 1) 863 864 /* 865 * Firmware expects 1 + n - 2, where n is the length of the option vector in 866 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 867 */ 868 #define VECTOR_LENGTH(n) (1 + (n) - 2) 869 870 struct option_vector1 { 871 u8 byte1; 872 u8 arch_versions; 873 u8 arch_versions3; 874 } __packed; 875 876 struct option_vector2 { 877 u8 byte1; 878 __be16 reserved; 879 __be32 real_base; 880 __be32 real_size; 881 __be32 virt_base; 882 __be32 virt_size; 883 __be32 load_base; 884 __be32 min_rma; 885 __be32 min_load; 886 u8 min_rma_percent; 887 u8 max_pft_size; 888 } __packed; 889 890 struct option_vector3 { 891 u8 byte1; 892 u8 byte2; 893 } __packed; 894 895 struct option_vector4 { 896 u8 byte1; 897 u8 min_vp_cap; 898 } __packed; 899 900 struct option_vector5 { 901 u8 byte1; 902 u8 byte2; 903 u8 byte3; 904 u8 cmo; 905 u8 associativity; 906 u8 bin_opts; 907 u8 micro_checkpoint; 908 u8 reserved0; 909 __be32 max_cpus; 910 __be16 papr_level; 911 __be16 reserved1; 912 u8 platform_facilities; 913 u8 reserved2; 914 __be16 reserved3; 915 u8 subprocessors; 916 u8 byte22; 917 u8 intarch; 918 u8 mmu; 919 u8 hash_ext; 920 u8 radix_ext; 921 } __packed; 922 923 struct option_vector6 { 924 u8 reserved; 925 u8 secondary_pteg; 926 u8 os_name; 927 } __packed; 928 929 struct ibm_arch_vec { 930 struct { u32 mask, val; } pvrs[14]; 931 932 u8 num_vectors; 933 934 u8 vec1_len; 935 struct option_vector1 vec1; 936 937 u8 vec2_len; 938 struct option_vector2 vec2; 939 940 u8 vec3_len; 941 struct option_vector3 vec3; 942 943 u8 vec4_len; 944 struct option_vector4 vec4; 945 946 u8 vec5_len; 947 struct option_vector5 vec5; 948 949 u8 vec6_len; 950 struct option_vector6 vec6; 951 } __packed; 952 953 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { 954 .pvrs = { 955 { 956 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 957 .val = cpu_to_be32(0x003a0000), 958 }, 959 { 960 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 961 .val = cpu_to_be32(0x003e0000), 962 }, 963 { 964 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 965 .val = cpu_to_be32(0x003f0000), 966 }, 967 { 968 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 969 .val = cpu_to_be32(0x004b0000), 970 }, 971 { 972 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 973 .val = cpu_to_be32(0x004c0000), 974 }, 975 { 976 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 977 .val = cpu_to_be32(0x004d0000), 978 }, 979 { 980 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 981 .val = cpu_to_be32(0x004e0000), 982 }, 983 { 984 .mask = cpu_to_be32(0xffff0000), /* POWER10 */ 985 .val = cpu_to_be32(0x00800000), 986 }, 987 { 988 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */ 989 .val = cpu_to_be32(0x0f000006), 990 }, 991 { 992 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 993 .val = cpu_to_be32(0x0f000005), 994 }, 995 { 996 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 997 .val = cpu_to_be32(0x0f000004), 998 }, 999 { 1000 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 1001 .val = cpu_to_be32(0x0f000003), 1002 }, 1003 { 1004 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 1005 .val = cpu_to_be32(0x0f000002), 1006 }, 1007 { 1008 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 1009 .val = cpu_to_be32(0x0f000001), 1010 }, 1011 }, 1012 1013 .num_vectors = NUM_VECTORS(6), 1014 1015 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 1016 .vec1 = { 1017 .byte1 = 0, 1018 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 1019 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 1020 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1, 1021 }, 1022 1023 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 1024 /* option vector 2: Open Firmware options supported */ 1025 .vec2 = { 1026 .byte1 = OV2_REAL_MODE, 1027 .reserved = 0, 1028 .real_base = cpu_to_be32(0xffffffff), 1029 .real_size = cpu_to_be32(0xffffffff), 1030 .virt_base = cpu_to_be32(0xffffffff), 1031 .virt_size = cpu_to_be32(0xffffffff), 1032 .load_base = cpu_to_be32(0xffffffff), 1033 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 1034 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 1035 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 1036 .max_pft_size = 48, /* max log_2(hash table size) */ 1037 }, 1038 1039 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 1040 /* option vector 3: processor options supported */ 1041 .vec3 = { 1042 .byte1 = 0, /* don't ignore, don't halt */ 1043 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 1044 }, 1045 1046 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 1047 /* option vector 4: IBM PAPR implementation */ 1048 .vec4 = { 1049 .byte1 = 0, /* don't halt */ 1050 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 1051 }, 1052 1053 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 1054 /* option vector 5: PAPR/OF options */ 1055 .vec5 = { 1056 .byte1 = 0, /* don't ignore, don't halt */ 1057 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 1058 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 1059 #ifdef CONFIG_PCI_MSI 1060 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 1061 OV5_FEAT(OV5_MSI), 1062 #else 1063 0, 1064 #endif 1065 .byte3 = 0, 1066 .cmo = 1067 #ifdef CONFIG_PPC_SMLPAR 1068 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 1069 #else 1070 0, 1071 #endif 1072 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 1073 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 1074 .micro_checkpoint = 0, 1075 .reserved0 = 0, 1076 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 1077 .papr_level = 0, 1078 .reserved1 = 0, 1079 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 1080 .reserved2 = 0, 1081 .reserved3 = 0, 1082 .subprocessors = 1, 1083 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO), 1084 .intarch = 0, 1085 .mmu = 0, 1086 .hash_ext = 0, 1087 .radix_ext = 0, 1088 }, 1089 1090 /* option vector 6: IBM PAPR hints */ 1091 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 1092 .vec6 = { 1093 .reserved = 0, 1094 .secondary_pteg = 0, 1095 .os_name = OV6_LINUX, 1096 }, 1097 }; 1098 1099 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned; 1100 1101 /* Old method - ELF header with PT_NOTE sections only works on BE */ 1102 #ifdef __BIG_ENDIAN__ 1103 static const struct fake_elf { 1104 Elf32_Ehdr elfhdr; 1105 Elf32_Phdr phdr[2]; 1106 struct chrpnote { 1107 u32 namesz; 1108 u32 descsz; 1109 u32 type; 1110 char name[8]; /* "PowerPC" */ 1111 struct chrpdesc { 1112 u32 real_mode; 1113 u32 real_base; 1114 u32 real_size; 1115 u32 virt_base; 1116 u32 virt_size; 1117 u32 load_base; 1118 } chrpdesc; 1119 } chrpnote; 1120 struct rpanote { 1121 u32 namesz; 1122 u32 descsz; 1123 u32 type; 1124 char name[24]; /* "IBM,RPA-Client-Config" */ 1125 struct rpadesc { 1126 u32 lpar_affinity; 1127 u32 min_rmo_size; 1128 u32 min_rmo_percent; 1129 u32 max_pft_size; 1130 u32 splpar; 1131 u32 min_load; 1132 u32 new_mem_def; 1133 u32 ignore_me; 1134 } rpadesc; 1135 } rpanote; 1136 } fake_elf __initconst = { 1137 .elfhdr = { 1138 .e_ident = { 0x7f, 'E', 'L', 'F', 1139 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 1140 .e_type = ET_EXEC, /* yeah right */ 1141 .e_machine = EM_PPC, 1142 .e_version = EV_CURRENT, 1143 .e_phoff = offsetof(struct fake_elf, phdr), 1144 .e_phentsize = sizeof(Elf32_Phdr), 1145 .e_phnum = 2 1146 }, 1147 .phdr = { 1148 [0] = { 1149 .p_type = PT_NOTE, 1150 .p_offset = offsetof(struct fake_elf, chrpnote), 1151 .p_filesz = sizeof(struct chrpnote) 1152 }, [1] = { 1153 .p_type = PT_NOTE, 1154 .p_offset = offsetof(struct fake_elf, rpanote), 1155 .p_filesz = sizeof(struct rpanote) 1156 } 1157 }, 1158 .chrpnote = { 1159 .namesz = sizeof("PowerPC"), 1160 .descsz = sizeof(struct chrpdesc), 1161 .type = 0x1275, 1162 .name = "PowerPC", 1163 .chrpdesc = { 1164 .real_mode = ~0U, /* ~0 means "don't care" */ 1165 .real_base = ~0U, 1166 .real_size = ~0U, 1167 .virt_base = ~0U, 1168 .virt_size = ~0U, 1169 .load_base = ~0U 1170 }, 1171 }, 1172 .rpanote = { 1173 .namesz = sizeof("IBM,RPA-Client-Config"), 1174 .descsz = sizeof(struct rpadesc), 1175 .type = 0x12759999, 1176 .name = "IBM,RPA-Client-Config", 1177 .rpadesc = { 1178 .lpar_affinity = 0, 1179 .min_rmo_size = 64, /* in megabytes */ 1180 .min_rmo_percent = 0, 1181 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 1182 .splpar = 1, 1183 .min_load = ~0U, 1184 .new_mem_def = 0 1185 } 1186 } 1187 }; 1188 #endif /* __BIG_ENDIAN__ */ 1189 1190 static int __init prom_count_smt_threads(void) 1191 { 1192 phandle node; 1193 char type[64]; 1194 unsigned int plen; 1195 1196 /* Pick up th first CPU node we can find */ 1197 for (node = 0; prom_next_node(&node); ) { 1198 type[0] = 0; 1199 prom_getprop(node, "device_type", type, sizeof(type)); 1200 1201 if (prom_strcmp(type, "cpu")) 1202 continue; 1203 /* 1204 * There is an entry for each smt thread, each entry being 1205 * 4 bytes long. All cpus should have the same number of 1206 * smt threads, so return after finding the first. 1207 */ 1208 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 1209 if (plen == PROM_ERROR) 1210 break; 1211 plen >>= 2; 1212 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 1213 1214 /* Sanity check */ 1215 if (plen < 1 || plen > 64) { 1216 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1217 (unsigned long)plen); 1218 return 1; 1219 } 1220 return plen; 1221 } 1222 prom_debug("No threads found, assuming 1 per core\n"); 1223 1224 return 1; 1225 1226 } 1227 1228 static void __init prom_parse_mmu_model(u8 val, 1229 struct platform_support *support) 1230 { 1231 switch (val) { 1232 case OV5_FEAT(OV5_MMU_DYNAMIC): 1233 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1234 prom_debug("MMU - either supported\n"); 1235 support->radix_mmu = !prom_radix_disable; 1236 support->hash_mmu = true; 1237 break; 1238 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1239 prom_debug("MMU - radix only\n"); 1240 if (prom_radix_disable) { 1241 /* 1242 * If we __have__ to do radix, we're better off ignoring 1243 * the command line rather than not booting. 1244 */ 1245 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1246 } 1247 support->radix_mmu = true; 1248 break; 1249 case OV5_FEAT(OV5_MMU_HASH): 1250 prom_debug("MMU - hash only\n"); 1251 support->hash_mmu = true; 1252 break; 1253 default: 1254 prom_debug("Unknown mmu support option: 0x%x\n", val); 1255 break; 1256 } 1257 } 1258 1259 static void __init prom_parse_xive_model(u8 val, 1260 struct platform_support *support) 1261 { 1262 switch (val) { 1263 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ 1264 prom_debug("XIVE - either mode supported\n"); 1265 support->xive = !prom_xive_disable; 1266 break; 1267 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ 1268 prom_debug("XIVE - exploitation mode supported\n"); 1269 if (prom_xive_disable) { 1270 /* 1271 * If we __have__ to do XIVE, we're better off ignoring 1272 * the command line rather than not booting. 1273 */ 1274 prom_printf("WARNING: Ignoring cmdline option xive=off\n"); 1275 } 1276 support->xive = true; 1277 break; 1278 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ 1279 prom_debug("XIVE - legacy mode supported\n"); 1280 break; 1281 default: 1282 prom_debug("Unknown xive support option: 0x%x\n", val); 1283 break; 1284 } 1285 } 1286 1287 static void __init prom_parse_platform_support(u8 index, u8 val, 1288 struct platform_support *support) 1289 { 1290 switch (index) { 1291 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1292 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1293 break; 1294 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1295 if (val & OV5_FEAT(OV5_RADIX_GTSE)) 1296 support->radix_gtse = !prom_radix_gtse_disable; 1297 break; 1298 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ 1299 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), 1300 support); 1301 break; 1302 } 1303 } 1304 1305 static void __init prom_check_platform_support(void) 1306 { 1307 struct platform_support supported = { 1308 .hash_mmu = false, 1309 .radix_mmu = false, 1310 .radix_gtse = false, 1311 .xive = false 1312 }; 1313 int prop_len = prom_getproplen(prom.chosen, 1314 "ibm,arch-vec-5-platform-support"); 1315 1316 /* 1317 * First copy the architecture vec template 1318 * 1319 * use memcpy() instead of *vec = *vec_template so that GCC replaces it 1320 * by __memcpy() when KASAN is active 1321 */ 1322 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template, 1323 sizeof(ibm_architecture_vec)); 1324 1325 if (prop_len > 1) { 1326 int i; 1327 u8 vec[8]; 1328 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1329 prop_len); 1330 if (prop_len > sizeof(vec)) 1331 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", 1332 prop_len); 1333 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec)); 1334 for (i = 0; i < prop_len; i += 2) { 1335 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]); 1336 prom_parse_platform_support(vec[i], vec[i + 1], &supported); 1337 } 1338 } 1339 1340 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { 1341 /* Radix preferred - Check if GTSE is also supported */ 1342 prom_debug("Asking for radix\n"); 1343 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1344 if (supported.radix_gtse) 1345 ibm_architecture_vec.vec5.radix_ext = 1346 OV5_FEAT(OV5_RADIX_GTSE); 1347 else 1348 prom_debug("Radix GTSE isn't supported\n"); 1349 } else if (supported.hash_mmu) { 1350 /* Default to hash mmu (if we can) */ 1351 prom_debug("Asking for hash\n"); 1352 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1353 } else { 1354 /* We're probably on a legacy hypervisor */ 1355 prom_debug("Assuming legacy hash support\n"); 1356 } 1357 1358 if (supported.xive) { 1359 prom_debug("Asking for XIVE\n"); 1360 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); 1361 } 1362 } 1363 1364 static void __init prom_send_capabilities(void) 1365 { 1366 ihandle root; 1367 prom_arg_t ret; 1368 u32 cores; 1369 1370 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1371 prom_check_platform_support(); 1372 1373 root = call_prom("open", 1, 1, ADDR("/")); 1374 if (root != 0) { 1375 /* We need to tell the FW about the number of cores we support. 1376 * 1377 * To do that, we count the number of threads on the first core 1378 * (we assume this is the same for all cores) and use it to 1379 * divide NR_CPUS. 1380 */ 1381 1382 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1383 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", 1384 cores, NR_CPUS); 1385 1386 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1387 1388 /* try calling the ibm,client-architecture-support method */ 1389 prom_printf("Calling ibm,client-architecture-support..."); 1390 if (call_prom_ret("call-method", 3, 2, &ret, 1391 ADDR("ibm,client-architecture-support"), 1392 root, 1393 ADDR(&ibm_architecture_vec)) == 0) { 1394 /* the call exists... */ 1395 if (ret) 1396 prom_printf("\nWARNING: ibm,client-architecture" 1397 "-support call FAILED!\n"); 1398 call_prom("close", 1, 0, root); 1399 prom_printf(" done\n"); 1400 return; 1401 } 1402 call_prom("close", 1, 0, root); 1403 prom_printf(" not implemented\n"); 1404 } 1405 1406 #ifdef __BIG_ENDIAN__ 1407 { 1408 ihandle elfloader; 1409 1410 /* no ibm,client-architecture-support call, try the old way */ 1411 elfloader = call_prom("open", 1, 1, 1412 ADDR("/packages/elf-loader")); 1413 if (elfloader == 0) { 1414 prom_printf("couldn't open /packages/elf-loader\n"); 1415 return; 1416 } 1417 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1418 elfloader, ADDR(&fake_elf)); 1419 call_prom("close", 1, 0, elfloader); 1420 } 1421 #endif /* __BIG_ENDIAN__ */ 1422 } 1423 #endif /* CONFIG_PPC_PSERIES */ 1424 1425 /* 1426 * Memory allocation strategy... our layout is normally: 1427 * 1428 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1429 * rare cases, initrd might end up being before the kernel though. 1430 * We assume this won't override the final kernel at 0, we have no 1431 * provision to handle that in this version, but it should hopefully 1432 * never happen. 1433 * 1434 * alloc_top is set to the top of RMO, eventually shrink down if the 1435 * TCEs overlap 1436 * 1437 * alloc_bottom is set to the top of kernel/initrd 1438 * 1439 * from there, allocations are done this way : rtas is allocated 1440 * topmost, and the device-tree is allocated from the bottom. We try 1441 * to grow the device-tree allocation as we progress. If we can't, 1442 * then we fail, we don't currently have a facility to restart 1443 * elsewhere, but that shouldn't be necessary. 1444 * 1445 * Note that calls to reserve_mem have to be done explicitly, memory 1446 * allocated with either alloc_up or alloc_down isn't automatically 1447 * reserved. 1448 */ 1449 1450 1451 /* 1452 * Allocates memory in the RMO upward from the kernel/initrd 1453 * 1454 * When align is 0, this is a special case, it means to allocate in place 1455 * at the current location of alloc_bottom or fail (that is basically 1456 * extending the previous allocation). Used for the device-tree flattening 1457 */ 1458 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1459 { 1460 unsigned long base = alloc_bottom; 1461 unsigned long addr = 0; 1462 1463 if (align) 1464 base = ALIGN(base, align); 1465 prom_debug("%s(%lx, %lx)\n", __func__, size, align); 1466 if (ram_top == 0) 1467 prom_panic("alloc_up() called with mem not initialized\n"); 1468 1469 if (align) 1470 base = ALIGN(alloc_bottom, align); 1471 else 1472 base = alloc_bottom; 1473 1474 for(; (base + size) <= alloc_top; 1475 base = ALIGN(base + 0x100000, align)) { 1476 prom_debug(" trying: 0x%lx\n\r", base); 1477 addr = (unsigned long)prom_claim(base, size, 0); 1478 if (addr != PROM_ERROR && addr != 0) 1479 break; 1480 addr = 0; 1481 if (align == 0) 1482 break; 1483 } 1484 if (addr == 0) 1485 return 0; 1486 alloc_bottom = addr + size; 1487 1488 prom_debug(" -> %lx\n", addr); 1489 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1490 prom_debug(" alloc_top : %lx\n", alloc_top); 1491 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1492 prom_debug(" rmo_top : %lx\n", rmo_top); 1493 prom_debug(" ram_top : %lx\n", ram_top); 1494 1495 return addr; 1496 } 1497 1498 /* 1499 * Allocates memory downward, either from top of RMO, or if highmem 1500 * is set, from the top of RAM. Note that this one doesn't handle 1501 * failures. It does claim memory if highmem is not set. 1502 */ 1503 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1504 int highmem) 1505 { 1506 unsigned long base, addr = 0; 1507 1508 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, 1509 highmem ? "(high)" : "(low)"); 1510 if (ram_top == 0) 1511 prom_panic("alloc_down() called with mem not initialized\n"); 1512 1513 if (highmem) { 1514 /* Carve out storage for the TCE table. */ 1515 addr = ALIGN_DOWN(alloc_top_high - size, align); 1516 if (addr <= alloc_bottom) 1517 return 0; 1518 /* Will we bump into the RMO ? If yes, check out that we 1519 * didn't overlap existing allocations there, if we did, 1520 * we are dead, we must be the first in town ! 1521 */ 1522 if (addr < rmo_top) { 1523 /* Good, we are first */ 1524 if (alloc_top == rmo_top) 1525 alloc_top = rmo_top = addr; 1526 else 1527 return 0; 1528 } 1529 alloc_top_high = addr; 1530 goto bail; 1531 } 1532 1533 base = ALIGN_DOWN(alloc_top - size, align); 1534 for (; base > alloc_bottom; 1535 base = ALIGN_DOWN(base - 0x100000, align)) { 1536 prom_debug(" trying: 0x%lx\n\r", base); 1537 addr = (unsigned long)prom_claim(base, size, 0); 1538 if (addr != PROM_ERROR && addr != 0) 1539 break; 1540 addr = 0; 1541 } 1542 if (addr == 0) 1543 return 0; 1544 alloc_top = addr; 1545 1546 bail: 1547 prom_debug(" -> %lx\n", addr); 1548 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1549 prom_debug(" alloc_top : %lx\n", alloc_top); 1550 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1551 prom_debug(" rmo_top : %lx\n", rmo_top); 1552 prom_debug(" ram_top : %lx\n", ram_top); 1553 1554 return addr; 1555 } 1556 1557 /* 1558 * Parse a "reg" cell 1559 */ 1560 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1561 { 1562 cell_t *p = *cellp; 1563 unsigned long r = 0; 1564 1565 /* Ignore more than 2 cells */ 1566 while (s > sizeof(unsigned long) / 4) { 1567 p++; 1568 s--; 1569 } 1570 r = be32_to_cpu(*p++); 1571 #ifdef CONFIG_PPC64 1572 if (s > 1) { 1573 r <<= 32; 1574 r |= be32_to_cpu(*(p++)); 1575 } 1576 #endif 1577 *cellp = p; 1578 return r; 1579 } 1580 1581 /* 1582 * Very dumb function for adding to the memory reserve list, but 1583 * we don't need anything smarter at this point 1584 * 1585 * XXX Eventually check for collisions. They should NEVER happen. 1586 * If problems seem to show up, it would be a good start to track 1587 * them down. 1588 */ 1589 static void __init reserve_mem(u64 base, u64 size) 1590 { 1591 u64 top = base + size; 1592 unsigned long cnt = mem_reserve_cnt; 1593 1594 if (size == 0) 1595 return; 1596 1597 /* We need to always keep one empty entry so that we 1598 * have our terminator with "size" set to 0 since we are 1599 * dumb and just copy this entire array to the boot params 1600 */ 1601 base = ALIGN_DOWN(base, PAGE_SIZE); 1602 top = ALIGN(top, PAGE_SIZE); 1603 size = top - base; 1604 1605 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1606 prom_panic("Memory reserve map exhausted !\n"); 1607 mem_reserve_map[cnt].base = cpu_to_be64(base); 1608 mem_reserve_map[cnt].size = cpu_to_be64(size); 1609 mem_reserve_cnt = cnt + 1; 1610 } 1611 1612 /* 1613 * Initialize memory allocation mechanism, parse "memory" nodes and 1614 * obtain that way the top of memory and RMO to setup out local allocator 1615 */ 1616 static void __init prom_init_mem(void) 1617 { 1618 phandle node; 1619 char type[64]; 1620 unsigned int plen; 1621 cell_t *p, *endp; 1622 __be32 val; 1623 u32 rac, rsc; 1624 1625 /* 1626 * We iterate the memory nodes to find 1627 * 1) top of RMO (first node) 1628 * 2) top of memory 1629 */ 1630 val = cpu_to_be32(2); 1631 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1632 rac = be32_to_cpu(val); 1633 val = cpu_to_be32(1); 1634 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1635 rsc = be32_to_cpu(val); 1636 prom_debug("root_addr_cells: %x\n", rac); 1637 prom_debug("root_size_cells: %x\n", rsc); 1638 1639 prom_debug("scanning memory:\n"); 1640 1641 for (node = 0; prom_next_node(&node); ) { 1642 type[0] = 0; 1643 prom_getprop(node, "device_type", type, sizeof(type)); 1644 1645 if (type[0] == 0) { 1646 /* 1647 * CHRP Longtrail machines have no device_type 1648 * on the memory node, so check the name instead... 1649 */ 1650 prom_getprop(node, "name", type, sizeof(type)); 1651 } 1652 if (prom_strcmp(type, "memory")) 1653 continue; 1654 1655 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1656 if (plen > sizeof(regbuf)) { 1657 prom_printf("memory node too large for buffer !\n"); 1658 plen = sizeof(regbuf); 1659 } 1660 p = regbuf; 1661 endp = p + (plen / sizeof(cell_t)); 1662 1663 #ifdef DEBUG_PROM 1664 memset(prom_scratch, 0, sizeof(prom_scratch)); 1665 call_prom("package-to-path", 3, 1, node, prom_scratch, 1666 sizeof(prom_scratch) - 1); 1667 prom_debug(" node %s :\n", prom_scratch); 1668 #endif /* DEBUG_PROM */ 1669 1670 while ((endp - p) >= (rac + rsc)) { 1671 unsigned long base, size; 1672 1673 base = prom_next_cell(rac, &p); 1674 size = prom_next_cell(rsc, &p); 1675 1676 if (size == 0) 1677 continue; 1678 prom_debug(" %lx %lx\n", base, size); 1679 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1680 rmo_top = size; 1681 if ((base + size) > ram_top) 1682 ram_top = base + size; 1683 } 1684 } 1685 1686 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1687 1688 /* 1689 * If prom_memory_limit is set we reduce the upper limits *except* for 1690 * alloc_top_high. This must be the real top of RAM so we can put 1691 * TCE's up there. 1692 */ 1693 1694 alloc_top_high = ram_top; 1695 1696 if (prom_memory_limit) { 1697 if (prom_memory_limit <= alloc_bottom) { 1698 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", 1699 prom_memory_limit); 1700 prom_memory_limit = 0; 1701 } else if (prom_memory_limit >= ram_top) { 1702 prom_printf("Ignoring mem=%lx >= ram_top.\n", 1703 prom_memory_limit); 1704 prom_memory_limit = 0; 1705 } else { 1706 ram_top = prom_memory_limit; 1707 rmo_top = min(rmo_top, prom_memory_limit); 1708 } 1709 } 1710 1711 /* 1712 * Setup our top alloc point, that is top of RMO or top of 1713 * segment 0 when running non-LPAR. 1714 * Some RS64 machines have buggy firmware where claims up at 1715 * 1GB fail. Cap at 768MB as a workaround. 1716 * Since 768MB is plenty of room, and we need to cap to something 1717 * reasonable on 32-bit, cap at 768MB on all machines. 1718 */ 1719 if (!rmo_top) 1720 rmo_top = ram_top; 1721 rmo_top = min(0x30000000ul, rmo_top); 1722 alloc_top = rmo_top; 1723 alloc_top_high = ram_top; 1724 1725 /* 1726 * Check if we have an initrd after the kernel but still inside 1727 * the RMO. If we do move our bottom point to after it. 1728 */ 1729 if (prom_initrd_start && 1730 prom_initrd_start < rmo_top && 1731 prom_initrd_end > alloc_bottom) 1732 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1733 1734 prom_printf("memory layout at init:\n"); 1735 prom_printf(" memory_limit : %lx (16 MB aligned)\n", 1736 prom_memory_limit); 1737 prom_printf(" alloc_bottom : %lx\n", alloc_bottom); 1738 prom_printf(" alloc_top : %lx\n", alloc_top); 1739 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); 1740 prom_printf(" rmo_top : %lx\n", rmo_top); 1741 prom_printf(" ram_top : %lx\n", ram_top); 1742 } 1743 1744 static void __init prom_close_stdin(void) 1745 { 1746 __be32 val; 1747 ihandle stdin; 1748 1749 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1750 stdin = be32_to_cpu(val); 1751 call_prom("close", 1, 0, stdin); 1752 } 1753 } 1754 1755 #ifdef CONFIG_PPC_SVM 1756 static int prom_rtas_hcall(uint64_t args) 1757 { 1758 register uint64_t arg1 asm("r3") = H_RTAS; 1759 register uint64_t arg2 asm("r4") = args; 1760 1761 asm volatile("sc 1\n" : "=r" (arg1) : 1762 "r" (arg1), 1763 "r" (arg2) :); 1764 return arg1; 1765 } 1766 1767 static struct rtas_args __prombss os_term_args; 1768 1769 static void __init prom_rtas_os_term(char *str) 1770 { 1771 phandle rtas_node; 1772 __be32 val; 1773 u32 token; 1774 1775 prom_debug("%s: start...\n", __func__); 1776 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1777 prom_debug("rtas_node: %x\n", rtas_node); 1778 if (!PHANDLE_VALID(rtas_node)) 1779 return; 1780 1781 val = 0; 1782 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val)); 1783 token = be32_to_cpu(val); 1784 prom_debug("ibm,os-term: %x\n", token); 1785 if (token == 0) 1786 prom_panic("Could not get token for ibm,os-term\n"); 1787 os_term_args.token = cpu_to_be32(token); 1788 os_term_args.nargs = cpu_to_be32(1); 1789 os_term_args.nret = cpu_to_be32(1); 1790 os_term_args.args[0] = cpu_to_be32(__pa(str)); 1791 prom_rtas_hcall((uint64_t)&os_term_args); 1792 } 1793 #endif /* CONFIG_PPC_SVM */ 1794 1795 /* 1796 * Allocate room for and instantiate RTAS 1797 */ 1798 static void __init prom_instantiate_rtas(void) 1799 { 1800 phandle rtas_node; 1801 ihandle rtas_inst; 1802 u32 base, entry = 0; 1803 __be32 val; 1804 u32 size = 0; 1805 1806 prom_debug("prom_instantiate_rtas: start...\n"); 1807 1808 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1809 prom_debug("rtas_node: %x\n", rtas_node); 1810 if (!PHANDLE_VALID(rtas_node)) 1811 return; 1812 1813 val = 0; 1814 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1815 size = be32_to_cpu(val); 1816 if (size == 0) 1817 return; 1818 1819 base = alloc_down(size, PAGE_SIZE, 0); 1820 if (base == 0) 1821 prom_panic("Could not allocate memory for RTAS\n"); 1822 1823 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1824 if (!IHANDLE_VALID(rtas_inst)) { 1825 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1826 return; 1827 } 1828 1829 prom_printf("instantiating rtas at 0x%x...", base); 1830 1831 if (call_prom_ret("call-method", 3, 2, &entry, 1832 ADDR("instantiate-rtas"), 1833 rtas_inst, base) != 0 1834 || entry == 0) { 1835 prom_printf(" failed\n"); 1836 return; 1837 } 1838 prom_printf(" done\n"); 1839 1840 reserve_mem(base, size); 1841 1842 val = cpu_to_be32(base); 1843 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1844 &val, sizeof(val)); 1845 val = cpu_to_be32(entry); 1846 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1847 &val, sizeof(val)); 1848 1849 /* Check if it supports "query-cpu-stopped-state" */ 1850 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1851 &val, sizeof(val)) != PROM_ERROR) 1852 rtas_has_query_cpu_stopped = true; 1853 1854 prom_debug("rtas base = 0x%x\n", base); 1855 prom_debug("rtas entry = 0x%x\n", entry); 1856 prom_debug("rtas size = 0x%x\n", size); 1857 1858 prom_debug("prom_instantiate_rtas: end...\n"); 1859 } 1860 1861 #ifdef CONFIG_PPC64 1862 /* 1863 * Allocate room for and instantiate Stored Measurement Log (SML) 1864 */ 1865 static void __init prom_instantiate_sml(void) 1866 { 1867 phandle ibmvtpm_node; 1868 ihandle ibmvtpm_inst; 1869 u32 entry = 0, size = 0, succ = 0; 1870 u64 base; 1871 __be32 val; 1872 1873 prom_debug("prom_instantiate_sml: start...\n"); 1874 1875 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1876 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1877 if (!PHANDLE_VALID(ibmvtpm_node)) 1878 return; 1879 1880 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1881 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1882 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1883 return; 1884 } 1885 1886 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1887 &val, sizeof(val)) != PROM_ERROR) { 1888 if (call_prom_ret("call-method", 2, 2, &succ, 1889 ADDR("reformat-sml-to-efi-alignment"), 1890 ibmvtpm_inst) != 0 || succ == 0) { 1891 prom_printf("Reformat SML to EFI alignment failed\n"); 1892 return; 1893 } 1894 1895 if (call_prom_ret("call-method", 2, 2, &size, 1896 ADDR("sml-get-allocated-size"), 1897 ibmvtpm_inst) != 0 || size == 0) { 1898 prom_printf("SML get allocated size failed\n"); 1899 return; 1900 } 1901 } else { 1902 if (call_prom_ret("call-method", 2, 2, &size, 1903 ADDR("sml-get-handover-size"), 1904 ibmvtpm_inst) != 0 || size == 0) { 1905 prom_printf("SML get handover size failed\n"); 1906 return; 1907 } 1908 } 1909 1910 base = alloc_down(size, PAGE_SIZE, 0); 1911 if (base == 0) 1912 prom_panic("Could not allocate memory for sml\n"); 1913 1914 prom_printf("instantiating sml at 0x%llx...", base); 1915 1916 memset((void *)base, 0, size); 1917 1918 if (call_prom_ret("call-method", 4, 2, &entry, 1919 ADDR("sml-handover"), 1920 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1921 prom_printf("SML handover failed\n"); 1922 return; 1923 } 1924 prom_printf(" done\n"); 1925 1926 reserve_mem(base, size); 1927 1928 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1929 &base, sizeof(base)); 1930 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1931 &size, sizeof(size)); 1932 1933 prom_debug("sml base = 0x%llx\n", base); 1934 prom_debug("sml size = 0x%x\n", size); 1935 1936 prom_debug("prom_instantiate_sml: end...\n"); 1937 } 1938 1939 /* 1940 * Allocate room for and initialize TCE tables 1941 */ 1942 #ifdef __BIG_ENDIAN__ 1943 static void __init prom_initialize_tce_table(void) 1944 { 1945 phandle node; 1946 ihandle phb_node; 1947 char compatible[64], type[64], model[64]; 1948 char *path = prom_scratch; 1949 u64 base, align; 1950 u32 minalign, minsize; 1951 u64 tce_entry, *tce_entryp; 1952 u64 local_alloc_top, local_alloc_bottom; 1953 u64 i; 1954 1955 if (prom_iommu_off) 1956 return; 1957 1958 prom_debug("starting prom_initialize_tce_table\n"); 1959 1960 /* Cache current top of allocs so we reserve a single block */ 1961 local_alloc_top = alloc_top_high; 1962 local_alloc_bottom = local_alloc_top; 1963 1964 /* Search all nodes looking for PHBs. */ 1965 for (node = 0; prom_next_node(&node); ) { 1966 compatible[0] = 0; 1967 type[0] = 0; 1968 model[0] = 0; 1969 prom_getprop(node, "compatible", 1970 compatible, sizeof(compatible)); 1971 prom_getprop(node, "device_type", type, sizeof(type)); 1972 prom_getprop(node, "model", model, sizeof(model)); 1973 1974 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL)) 1975 continue; 1976 1977 /* Keep the old logic intact to avoid regression. */ 1978 if (compatible[0] != 0) { 1979 if ((prom_strstr(compatible, "python") == NULL) && 1980 (prom_strstr(compatible, "Speedwagon") == NULL) && 1981 (prom_strstr(compatible, "Winnipeg") == NULL)) 1982 continue; 1983 } else if (model[0] != 0) { 1984 if ((prom_strstr(model, "ython") == NULL) && 1985 (prom_strstr(model, "peedwagon") == NULL) && 1986 (prom_strstr(model, "innipeg") == NULL)) 1987 continue; 1988 } 1989 1990 if (prom_getprop(node, "tce-table-minalign", &minalign, 1991 sizeof(minalign)) == PROM_ERROR) 1992 minalign = 0; 1993 if (prom_getprop(node, "tce-table-minsize", &minsize, 1994 sizeof(minsize)) == PROM_ERROR) 1995 minsize = 4UL << 20; 1996 1997 /* 1998 * Even though we read what OF wants, we just set the table 1999 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 2000 * By doing this, we avoid the pitfalls of trying to DMA to 2001 * MMIO space and the DMA alias hole. 2002 */ 2003 minsize = 4UL << 20; 2004 2005 /* Align to the greater of the align or size */ 2006 align = max(minalign, minsize); 2007 base = alloc_down(minsize, align, 1); 2008 if (base == 0) 2009 prom_panic("ERROR, cannot find space for TCE table.\n"); 2010 if (base < local_alloc_bottom) 2011 local_alloc_bottom = base; 2012 2013 /* It seems OF doesn't null-terminate the path :-( */ 2014 memset(path, 0, sizeof(prom_scratch)); 2015 /* Call OF to setup the TCE hardware */ 2016 if (call_prom("package-to-path", 3, 1, node, 2017 path, sizeof(prom_scratch) - 1) == PROM_ERROR) { 2018 prom_printf("package-to-path failed\n"); 2019 } 2020 2021 /* Save away the TCE table attributes for later use. */ 2022 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 2023 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 2024 2025 prom_debug("TCE table: %s\n", path); 2026 prom_debug("\tnode = 0x%x\n", node); 2027 prom_debug("\tbase = 0x%llx\n", base); 2028 prom_debug("\tsize = 0x%x\n", minsize); 2029 2030 /* Initialize the table to have a one-to-one mapping 2031 * over the allocated size. 2032 */ 2033 tce_entryp = (u64 *)base; 2034 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 2035 tce_entry = (i << PAGE_SHIFT); 2036 tce_entry |= 0x3; 2037 *tce_entryp = tce_entry; 2038 } 2039 2040 prom_printf("opening PHB %s", path); 2041 phb_node = call_prom("open", 1, 1, path); 2042 if (phb_node == 0) 2043 prom_printf("... failed\n"); 2044 else 2045 prom_printf("... done\n"); 2046 2047 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 2048 phb_node, -1, minsize, 2049 (u32) base, (u32) (base >> 32)); 2050 call_prom("close", 1, 0, phb_node); 2051 } 2052 2053 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 2054 2055 /* These are only really needed if there is a memory limit in 2056 * effect, but we don't know so export them always. */ 2057 prom_tce_alloc_start = local_alloc_bottom; 2058 prom_tce_alloc_end = local_alloc_top; 2059 2060 /* Flag the first invalid entry */ 2061 prom_debug("ending prom_initialize_tce_table\n"); 2062 } 2063 #endif /* __BIG_ENDIAN__ */ 2064 #endif /* CONFIG_PPC64 */ 2065 2066 /* 2067 * With CHRP SMP we need to use the OF to start the other processors. 2068 * We can't wait until smp_boot_cpus (the OF is trashed by then) 2069 * so we have to put the processors into a holding pattern controlled 2070 * by the kernel (not OF) before we destroy the OF. 2071 * 2072 * This uses a chunk of low memory, puts some holding pattern 2073 * code there and sends the other processors off to there until 2074 * smp_boot_cpus tells them to do something. The holding pattern 2075 * checks that address until its cpu # is there, when it is that 2076 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 2077 * of setting those values. 2078 * 2079 * We also use physical address 0x4 here to tell when a cpu 2080 * is in its holding pattern code. 2081 * 2082 * -- Cort 2083 */ 2084 /* 2085 * We want to reference the copy of __secondary_hold_* in the 2086 * 0 - 0x100 address range 2087 */ 2088 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 2089 2090 static void __init prom_hold_cpus(void) 2091 { 2092 unsigned long i; 2093 phandle node; 2094 char type[64]; 2095 unsigned long *spinloop 2096 = (void *) LOW_ADDR(__secondary_hold_spinloop); 2097 unsigned long *acknowledge 2098 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 2099 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 2100 2101 /* 2102 * On pseries, if RTAS supports "query-cpu-stopped-state", 2103 * we skip this stage, the CPUs will be started by the 2104 * kernel using RTAS. 2105 */ 2106 if ((of_platform == PLATFORM_PSERIES || 2107 of_platform == PLATFORM_PSERIES_LPAR) && 2108 rtas_has_query_cpu_stopped) { 2109 prom_printf("prom_hold_cpus: skipped\n"); 2110 return; 2111 } 2112 2113 prom_debug("prom_hold_cpus: start...\n"); 2114 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); 2115 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); 2116 prom_debug(" 1) acknowledge = 0x%lx\n", 2117 (unsigned long)acknowledge); 2118 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); 2119 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); 2120 2121 /* Set the common spinloop variable, so all of the secondary cpus 2122 * will block when they are awakened from their OF spinloop. 2123 * This must occur for both SMP and non SMP kernels, since OF will 2124 * be trashed when we move the kernel. 2125 */ 2126 *spinloop = 0; 2127 2128 /* look for cpus */ 2129 for (node = 0; prom_next_node(&node); ) { 2130 unsigned int cpu_no; 2131 __be32 reg; 2132 2133 type[0] = 0; 2134 prom_getprop(node, "device_type", type, sizeof(type)); 2135 if (prom_strcmp(type, "cpu") != 0) 2136 continue; 2137 2138 /* Skip non-configured cpus. */ 2139 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 2140 if (prom_strcmp(type, "okay") != 0) 2141 continue; 2142 2143 reg = cpu_to_be32(-1); /* make sparse happy */ 2144 prom_getprop(node, "reg", ®, sizeof(reg)); 2145 cpu_no = be32_to_cpu(reg); 2146 2147 prom_debug("cpu hw idx = %u\n", cpu_no); 2148 2149 /* Init the acknowledge var which will be reset by 2150 * the secondary cpu when it awakens from its OF 2151 * spinloop. 2152 */ 2153 *acknowledge = (unsigned long)-1; 2154 2155 if (cpu_no != prom.cpu) { 2156 /* Primary Thread of non-boot cpu or any thread */ 2157 prom_printf("starting cpu hw idx %u... ", cpu_no); 2158 call_prom("start-cpu", 3, 0, node, 2159 secondary_hold, cpu_no); 2160 2161 for (i = 0; (i < 100000000) && 2162 (*acknowledge == ((unsigned long)-1)); i++ ) 2163 mb(); 2164 2165 if (*acknowledge == cpu_no) 2166 prom_printf("done\n"); 2167 else 2168 prom_printf("failed: %lx\n", *acknowledge); 2169 } 2170 #ifdef CONFIG_SMP 2171 else 2172 prom_printf("boot cpu hw idx %u\n", cpu_no); 2173 #endif /* CONFIG_SMP */ 2174 } 2175 2176 prom_debug("prom_hold_cpus: end...\n"); 2177 } 2178 2179 2180 static void __init prom_init_client_services(unsigned long pp) 2181 { 2182 /* Get a handle to the prom entry point before anything else */ 2183 prom_entry = pp; 2184 2185 /* get a handle for the stdout device */ 2186 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 2187 if (!PHANDLE_VALID(prom.chosen)) 2188 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 2189 2190 /* get device tree root */ 2191 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 2192 if (!PHANDLE_VALID(prom.root)) 2193 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 2194 2195 prom.mmumap = 0; 2196 } 2197 2198 #ifdef CONFIG_PPC32 2199 /* 2200 * For really old powermacs, we need to map things we claim. 2201 * For that, we need the ihandle of the mmu. 2202 * Also, on the longtrail, we need to work around other bugs. 2203 */ 2204 static void __init prom_find_mmu(void) 2205 { 2206 phandle oprom; 2207 char version[64]; 2208 2209 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 2210 if (!PHANDLE_VALID(oprom)) 2211 return; 2212 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 2213 return; 2214 version[sizeof(version) - 1] = 0; 2215 /* XXX might need to add other versions here */ 2216 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0) 2217 of_workarounds = OF_WA_CLAIM; 2218 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) { 2219 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2220 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2221 } else 2222 return; 2223 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2224 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2225 sizeof(prom.mmumap)); 2226 prom.mmumap = be32_to_cpu(prom.mmumap); 2227 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2228 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2229 } 2230 #else 2231 #define prom_find_mmu() 2232 #endif 2233 2234 static void __init prom_init_stdout(void) 2235 { 2236 char *path = of_stdout_device; 2237 char type[16]; 2238 phandle stdout_node; 2239 __be32 val; 2240 2241 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2242 prom_panic("cannot find stdout"); 2243 2244 prom.stdout = be32_to_cpu(val); 2245 2246 /* Get the full OF pathname of the stdout device */ 2247 memset(path, 0, 256); 2248 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2249 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2250 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2251 path, prom_strlen(path) + 1); 2252 2253 /* instance-to-package fails on PA-Semi */ 2254 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2255 if (stdout_node != PROM_ERROR) { 2256 val = cpu_to_be32(stdout_node); 2257 2258 /* If it's a display, note it */ 2259 memset(type, 0, sizeof(type)); 2260 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2261 if (prom_strcmp(type, "display") == 0) 2262 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2263 } 2264 } 2265 2266 static int __init prom_find_machine_type(void) 2267 { 2268 char compat[256]; 2269 int len, i = 0; 2270 #ifdef CONFIG_PPC64 2271 phandle rtas; 2272 int x; 2273 #endif 2274 2275 /* Look for a PowerMac or a Cell */ 2276 len = prom_getprop(prom.root, "compatible", 2277 compat, sizeof(compat)-1); 2278 if (len > 0) { 2279 compat[len] = 0; 2280 while (i < len) { 2281 char *p = &compat[i]; 2282 int sl = prom_strlen(p); 2283 if (sl == 0) 2284 break; 2285 if (prom_strstr(p, "Power Macintosh") || 2286 prom_strstr(p, "MacRISC")) 2287 return PLATFORM_POWERMAC; 2288 #ifdef CONFIG_PPC64 2289 /* We must make sure we don't detect the IBM Cell 2290 * blades as pSeries due to some firmware issues, 2291 * so we do it here. 2292 */ 2293 if (prom_strstr(p, "IBM,CBEA") || 2294 prom_strstr(p, "IBM,CPBW-1.0")) 2295 return PLATFORM_GENERIC; 2296 #endif /* CONFIG_PPC64 */ 2297 i += sl + 1; 2298 } 2299 } 2300 #ifdef CONFIG_PPC64 2301 /* Try to figure out if it's an IBM pSeries or any other 2302 * PAPR compliant platform. We assume it is if : 2303 * - /device_type is "chrp" (please, do NOT use that for future 2304 * non-IBM designs ! 2305 * - it has /rtas 2306 */ 2307 len = prom_getprop(prom.root, "device_type", 2308 compat, sizeof(compat)-1); 2309 if (len <= 0) 2310 return PLATFORM_GENERIC; 2311 if (prom_strcmp(compat, "chrp")) 2312 return PLATFORM_GENERIC; 2313 2314 /* Default to pSeries. We need to know if we are running LPAR */ 2315 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2316 if (!PHANDLE_VALID(rtas)) 2317 return PLATFORM_GENERIC; 2318 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2319 if (x != PROM_ERROR) { 2320 prom_debug("Hypertas detected, assuming LPAR !\n"); 2321 return PLATFORM_PSERIES_LPAR; 2322 } 2323 return PLATFORM_PSERIES; 2324 #else 2325 return PLATFORM_GENERIC; 2326 #endif 2327 } 2328 2329 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2330 { 2331 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2332 } 2333 2334 /* 2335 * If we have a display that we don't know how to drive, 2336 * we will want to try to execute OF's open method for it 2337 * later. However, OF will probably fall over if we do that 2338 * we've taken over the MMU. 2339 * So we check whether we will need to open the display, 2340 * and if so, open it now. 2341 */ 2342 static void __init prom_check_displays(void) 2343 { 2344 char type[16], *path; 2345 phandle node; 2346 ihandle ih; 2347 int i; 2348 2349 static const unsigned char default_colors[] __initconst = { 2350 0x00, 0x00, 0x00, 2351 0x00, 0x00, 0xaa, 2352 0x00, 0xaa, 0x00, 2353 0x00, 0xaa, 0xaa, 2354 0xaa, 0x00, 0x00, 2355 0xaa, 0x00, 0xaa, 2356 0xaa, 0xaa, 0x00, 2357 0xaa, 0xaa, 0xaa, 2358 0x55, 0x55, 0x55, 2359 0x55, 0x55, 0xff, 2360 0x55, 0xff, 0x55, 2361 0x55, 0xff, 0xff, 2362 0xff, 0x55, 0x55, 2363 0xff, 0x55, 0xff, 2364 0xff, 0xff, 0x55, 2365 0xff, 0xff, 0xff 2366 }; 2367 const unsigned char *clut; 2368 2369 prom_debug("Looking for displays\n"); 2370 for (node = 0; prom_next_node(&node); ) { 2371 memset(type, 0, sizeof(type)); 2372 prom_getprop(node, "device_type", type, sizeof(type)); 2373 if (prom_strcmp(type, "display") != 0) 2374 continue; 2375 2376 /* It seems OF doesn't null-terminate the path :-( */ 2377 path = prom_scratch; 2378 memset(path, 0, sizeof(prom_scratch)); 2379 2380 /* 2381 * leave some room at the end of the path for appending extra 2382 * arguments 2383 */ 2384 if (call_prom("package-to-path", 3, 1, node, path, 2385 sizeof(prom_scratch) - 10) == PROM_ERROR) 2386 continue; 2387 prom_printf("found display : %s, opening... ", path); 2388 2389 ih = call_prom("open", 1, 1, path); 2390 if (ih == 0) { 2391 prom_printf("failed\n"); 2392 continue; 2393 } 2394 2395 /* Success */ 2396 prom_printf("done\n"); 2397 prom_setprop(node, path, "linux,opened", NULL, 0); 2398 2399 /* Setup a usable color table when the appropriate 2400 * method is available. Should update this to set-colors */ 2401 clut = default_colors; 2402 for (i = 0; i < 16; i++, clut += 3) 2403 if (prom_set_color(ih, i, clut[0], clut[1], 2404 clut[2]) != 0) 2405 break; 2406 2407 #ifdef CONFIG_LOGO_LINUX_CLUT224 2408 clut = PTRRELOC(logo_linux_clut224.clut); 2409 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2410 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2411 clut[2]) != 0) 2412 break; 2413 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2414 2415 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2416 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2417 PROM_ERROR) { 2418 u32 width, height, pitch, addr; 2419 2420 prom_printf("Setting btext !\n"); 2421 2422 if (prom_getprop(node, "width", &width, 4) == PROM_ERROR) 2423 return; 2424 2425 if (prom_getprop(node, "height", &height, 4) == PROM_ERROR) 2426 return; 2427 2428 if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR) 2429 return; 2430 2431 if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR) 2432 return; 2433 2434 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2435 width, height, pitch, addr); 2436 btext_setup_display(width, height, 8, pitch, addr); 2437 btext_prepare_BAT(); 2438 } 2439 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2440 } 2441 } 2442 2443 2444 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2445 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2446 unsigned long needed, unsigned long align) 2447 { 2448 void *ret; 2449 2450 *mem_start = ALIGN(*mem_start, align); 2451 while ((*mem_start + needed) > *mem_end) { 2452 unsigned long room, chunk; 2453 2454 prom_debug("Chunk exhausted, claiming more at %lx...\n", 2455 alloc_bottom); 2456 room = alloc_top - alloc_bottom; 2457 if (room > DEVTREE_CHUNK_SIZE) 2458 room = DEVTREE_CHUNK_SIZE; 2459 if (room < PAGE_SIZE) 2460 prom_panic("No memory for flatten_device_tree " 2461 "(no room)\n"); 2462 chunk = alloc_up(room, 0); 2463 if (chunk == 0) 2464 prom_panic("No memory for flatten_device_tree " 2465 "(claim failed)\n"); 2466 *mem_end = chunk + room; 2467 } 2468 2469 ret = (void *)*mem_start; 2470 *mem_start += needed; 2471 2472 return ret; 2473 } 2474 2475 #define dt_push_token(token, mem_start, mem_end) do { \ 2476 void *room = make_room(mem_start, mem_end, 4, 4); \ 2477 *(__be32 *)room = cpu_to_be32(token); \ 2478 } while(0) 2479 2480 static unsigned long __init dt_find_string(char *str) 2481 { 2482 char *s, *os; 2483 2484 s = os = (char *)dt_string_start; 2485 s += 4; 2486 while (s < (char *)dt_string_end) { 2487 if (prom_strcmp(s, str) == 0) 2488 return s - os; 2489 s += prom_strlen(s) + 1; 2490 } 2491 return 0; 2492 } 2493 2494 /* 2495 * The Open Firmware 1275 specification states properties must be 31 bytes or 2496 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2497 */ 2498 #define MAX_PROPERTY_NAME 64 2499 2500 static void __init scan_dt_build_strings(phandle node, 2501 unsigned long *mem_start, 2502 unsigned long *mem_end) 2503 { 2504 char *prev_name, *namep, *sstart; 2505 unsigned long soff; 2506 phandle child; 2507 2508 sstart = (char *)dt_string_start; 2509 2510 /* get and store all property names */ 2511 prev_name = ""; 2512 for (;;) { 2513 /* 64 is max len of name including nul. */ 2514 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2515 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2516 /* No more nodes: unwind alloc */ 2517 *mem_start = (unsigned long)namep; 2518 break; 2519 } 2520 2521 /* skip "name" */ 2522 if (prom_strcmp(namep, "name") == 0) { 2523 *mem_start = (unsigned long)namep; 2524 prev_name = "name"; 2525 continue; 2526 } 2527 /* get/create string entry */ 2528 soff = dt_find_string(namep); 2529 if (soff != 0) { 2530 *mem_start = (unsigned long)namep; 2531 namep = sstart + soff; 2532 } else { 2533 /* Trim off some if we can */ 2534 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2535 dt_string_end = *mem_start; 2536 } 2537 prev_name = namep; 2538 } 2539 2540 /* do all our children */ 2541 child = call_prom("child", 1, 1, node); 2542 while (child != 0) { 2543 scan_dt_build_strings(child, mem_start, mem_end); 2544 child = call_prom("peer", 1, 1, child); 2545 } 2546 } 2547 2548 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2549 unsigned long *mem_end) 2550 { 2551 phandle child; 2552 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2553 unsigned long soff; 2554 unsigned char *valp; 2555 static char pname[MAX_PROPERTY_NAME] __prombss; 2556 int l, room, has_phandle = 0; 2557 2558 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2559 2560 /* get the node's full name */ 2561 namep = (char *)*mem_start; 2562 room = *mem_end - *mem_start; 2563 if (room > 255) 2564 room = 255; 2565 l = call_prom("package-to-path", 3, 1, node, namep, room); 2566 if (l >= 0) { 2567 /* Didn't fit? Get more room. */ 2568 if (l >= room) { 2569 if (l >= *mem_end - *mem_start) 2570 namep = make_room(mem_start, mem_end, l+1, 1); 2571 call_prom("package-to-path", 3, 1, node, namep, l); 2572 } 2573 namep[l] = '\0'; 2574 2575 /* Fixup an Apple bug where they have bogus \0 chars in the 2576 * middle of the path in some properties, and extract 2577 * the unit name (everything after the last '/'). 2578 */ 2579 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2580 if (*p == '/') 2581 lp = namep; 2582 else if (*p != 0) 2583 *lp++ = *p; 2584 } 2585 *lp = 0; 2586 *mem_start = ALIGN((unsigned long)lp + 1, 4); 2587 } 2588 2589 /* get it again for debugging */ 2590 path = prom_scratch; 2591 memset(path, 0, sizeof(prom_scratch)); 2592 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1); 2593 2594 /* get and store all properties */ 2595 prev_name = ""; 2596 sstart = (char *)dt_string_start; 2597 for (;;) { 2598 if (call_prom("nextprop", 3, 1, node, prev_name, 2599 pname) != 1) 2600 break; 2601 2602 /* skip "name" */ 2603 if (prom_strcmp(pname, "name") == 0) { 2604 prev_name = "name"; 2605 continue; 2606 } 2607 2608 /* find string offset */ 2609 soff = dt_find_string(pname); 2610 if (soff == 0) { 2611 prom_printf("WARNING: Can't find string index for" 2612 " <%s>, node %s\n", pname, path); 2613 break; 2614 } 2615 prev_name = sstart + soff; 2616 2617 /* get length */ 2618 l = call_prom("getproplen", 2, 1, node, pname); 2619 2620 /* sanity checks */ 2621 if (l == PROM_ERROR) 2622 continue; 2623 2624 /* push property head */ 2625 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2626 dt_push_token(l, mem_start, mem_end); 2627 dt_push_token(soff, mem_start, mem_end); 2628 2629 /* push property content */ 2630 valp = make_room(mem_start, mem_end, l, 4); 2631 call_prom("getprop", 4, 1, node, pname, valp, l); 2632 *mem_start = ALIGN(*mem_start, 4); 2633 2634 if (!prom_strcmp(pname, "phandle")) 2635 has_phandle = 1; 2636 } 2637 2638 /* Add a "phandle" property if none already exist */ 2639 if (!has_phandle) { 2640 soff = dt_find_string("phandle"); 2641 if (soff == 0) 2642 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path); 2643 else { 2644 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2645 dt_push_token(4, mem_start, mem_end); 2646 dt_push_token(soff, mem_start, mem_end); 2647 valp = make_room(mem_start, mem_end, 4, 4); 2648 *(__be32 *)valp = cpu_to_be32(node); 2649 } 2650 } 2651 2652 /* do all our children */ 2653 child = call_prom("child", 1, 1, node); 2654 while (child != 0) { 2655 scan_dt_build_struct(child, mem_start, mem_end); 2656 child = call_prom("peer", 1, 1, child); 2657 } 2658 2659 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2660 } 2661 2662 static void __init flatten_device_tree(void) 2663 { 2664 phandle root; 2665 unsigned long mem_start, mem_end, room; 2666 struct boot_param_header *hdr; 2667 char *namep; 2668 u64 *rsvmap; 2669 2670 /* 2671 * Check how much room we have between alloc top & bottom (+/- a 2672 * few pages), crop to 1MB, as this is our "chunk" size 2673 */ 2674 room = alloc_top - alloc_bottom - 0x4000; 2675 if (room > DEVTREE_CHUNK_SIZE) 2676 room = DEVTREE_CHUNK_SIZE; 2677 prom_debug("starting device tree allocs at %lx\n", alloc_bottom); 2678 2679 /* Now try to claim that */ 2680 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2681 if (mem_start == 0) 2682 prom_panic("Can't allocate initial device-tree chunk\n"); 2683 mem_end = mem_start + room; 2684 2685 /* Get root of tree */ 2686 root = call_prom("peer", 1, 1, (phandle)0); 2687 if (root == (phandle)0) 2688 prom_panic ("couldn't get device tree root\n"); 2689 2690 /* Build header and make room for mem rsv map */ 2691 mem_start = ALIGN(mem_start, 4); 2692 hdr = make_room(&mem_start, &mem_end, 2693 sizeof(struct boot_param_header), 4); 2694 dt_header_start = (unsigned long)hdr; 2695 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2696 2697 /* Start of strings */ 2698 mem_start = PAGE_ALIGN(mem_start); 2699 dt_string_start = mem_start; 2700 mem_start += 4; /* hole */ 2701 2702 /* Add "phandle" in there, we'll need it */ 2703 namep = make_room(&mem_start, &mem_end, 16, 1); 2704 prom_strcpy(namep, "phandle"); 2705 mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2706 2707 /* Build string array */ 2708 prom_printf("Building dt strings...\n"); 2709 scan_dt_build_strings(root, &mem_start, &mem_end); 2710 dt_string_end = mem_start; 2711 2712 /* Build structure */ 2713 mem_start = PAGE_ALIGN(mem_start); 2714 dt_struct_start = mem_start; 2715 prom_printf("Building dt structure...\n"); 2716 scan_dt_build_struct(root, &mem_start, &mem_end); 2717 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2718 dt_struct_end = PAGE_ALIGN(mem_start); 2719 2720 /* Finish header */ 2721 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2722 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2723 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2724 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2725 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2726 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2727 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2728 hdr->version = cpu_to_be32(OF_DT_VERSION); 2729 /* Version 16 is not backward compatible */ 2730 hdr->last_comp_version = cpu_to_be32(0x10); 2731 2732 /* Copy the reserve map in */ 2733 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2734 2735 #ifdef DEBUG_PROM 2736 { 2737 int i; 2738 prom_printf("reserved memory map:\n"); 2739 for (i = 0; i < mem_reserve_cnt; i++) 2740 prom_printf(" %llx - %llx\n", 2741 be64_to_cpu(mem_reserve_map[i].base), 2742 be64_to_cpu(mem_reserve_map[i].size)); 2743 } 2744 #endif 2745 /* Bump mem_reserve_cnt to cause further reservations to fail 2746 * since it's too late. 2747 */ 2748 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2749 2750 prom_printf("Device tree strings 0x%lx -> 0x%lx\n", 2751 dt_string_start, dt_string_end); 2752 prom_printf("Device tree struct 0x%lx -> 0x%lx\n", 2753 dt_struct_start, dt_struct_end); 2754 } 2755 2756 #ifdef CONFIG_PPC_MAPLE 2757 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2758 * The values are bad, and it doesn't even have the right number of cells. */ 2759 static void __init fixup_device_tree_maple(void) 2760 { 2761 phandle isa; 2762 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2763 u32 isa_ranges[6]; 2764 char *name; 2765 2766 name = "/ht@0/isa@4"; 2767 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2768 if (!PHANDLE_VALID(isa)) { 2769 name = "/ht@0/isa@6"; 2770 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2771 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2772 } 2773 if (!PHANDLE_VALID(isa)) 2774 return; 2775 2776 if (prom_getproplen(isa, "ranges") != 12) 2777 return; 2778 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2779 == PROM_ERROR) 2780 return; 2781 2782 if (isa_ranges[0] != 0x1 || 2783 isa_ranges[1] != 0xf4000000 || 2784 isa_ranges[2] != 0x00010000) 2785 return; 2786 2787 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2788 2789 isa_ranges[0] = 0x1; 2790 isa_ranges[1] = 0x0; 2791 isa_ranges[2] = rloc; 2792 isa_ranges[3] = 0x0; 2793 isa_ranges[4] = 0x0; 2794 isa_ranges[5] = 0x00010000; 2795 prom_setprop(isa, name, "ranges", 2796 isa_ranges, sizeof(isa_ranges)); 2797 } 2798 2799 #define CPC925_MC_START 0xf8000000 2800 #define CPC925_MC_LENGTH 0x1000000 2801 /* The values for memory-controller don't have right number of cells */ 2802 static void __init fixup_device_tree_maple_memory_controller(void) 2803 { 2804 phandle mc; 2805 u32 mc_reg[4]; 2806 char *name = "/hostbridge@f8000000"; 2807 u32 ac, sc; 2808 2809 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2810 if (!PHANDLE_VALID(mc)) 2811 return; 2812 2813 if (prom_getproplen(mc, "reg") != 8) 2814 return; 2815 2816 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2817 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2818 if ((ac != 2) || (sc != 2)) 2819 return; 2820 2821 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2822 return; 2823 2824 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2825 return; 2826 2827 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2828 2829 mc_reg[0] = 0x0; 2830 mc_reg[1] = CPC925_MC_START; 2831 mc_reg[2] = 0x0; 2832 mc_reg[3] = CPC925_MC_LENGTH; 2833 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2834 } 2835 #else 2836 #define fixup_device_tree_maple() 2837 #define fixup_device_tree_maple_memory_controller() 2838 #endif 2839 2840 #ifdef CONFIG_PPC_CHRP 2841 /* 2842 * Pegasos and BriQ lacks the "ranges" property in the isa node 2843 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2844 * Pegasos has the IDE configured in legacy mode, but advertised as native 2845 */ 2846 static void __init fixup_device_tree_chrp(void) 2847 { 2848 phandle ph; 2849 u32 prop[6]; 2850 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2851 char *name; 2852 int rc; 2853 2854 name = "/pci@80000000/isa@c"; 2855 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2856 if (!PHANDLE_VALID(ph)) { 2857 name = "/pci@ff500000/isa@6"; 2858 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2859 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2860 } 2861 if (PHANDLE_VALID(ph)) { 2862 rc = prom_getproplen(ph, "ranges"); 2863 if (rc == 0 || rc == PROM_ERROR) { 2864 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2865 2866 prop[0] = 0x1; 2867 prop[1] = 0x0; 2868 prop[2] = rloc; 2869 prop[3] = 0x0; 2870 prop[4] = 0x0; 2871 prop[5] = 0x00010000; 2872 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2873 } 2874 } 2875 2876 name = "/pci@80000000/ide@C,1"; 2877 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2878 if (PHANDLE_VALID(ph)) { 2879 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2880 prop[0] = 14; 2881 prop[1] = 0x0; 2882 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2883 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2884 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2885 if (rc == sizeof(u32)) { 2886 prop[0] &= ~0x5; 2887 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2888 } 2889 } 2890 } 2891 #else 2892 #define fixup_device_tree_chrp() 2893 #endif 2894 2895 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2896 static void __init fixup_device_tree_pmac(void) 2897 { 2898 phandle u3, i2c, mpic; 2899 u32 u3_rev; 2900 u32 interrupts[2]; 2901 u32 parent; 2902 2903 /* Some G5s have a missing interrupt definition, fix it up here */ 2904 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2905 if (!PHANDLE_VALID(u3)) 2906 return; 2907 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2908 if (!PHANDLE_VALID(i2c)) 2909 return; 2910 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2911 if (!PHANDLE_VALID(mpic)) 2912 return; 2913 2914 /* check if proper rev of u3 */ 2915 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2916 == PROM_ERROR) 2917 return; 2918 if (u3_rev < 0x35 || u3_rev > 0x39) 2919 return; 2920 /* does it need fixup ? */ 2921 if (prom_getproplen(i2c, "interrupts") > 0) 2922 return; 2923 2924 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2925 2926 /* interrupt on this revision of u3 is number 0 and level */ 2927 interrupts[0] = 0; 2928 interrupts[1] = 1; 2929 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2930 &interrupts, sizeof(interrupts)); 2931 parent = (u32)mpic; 2932 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2933 &parent, sizeof(parent)); 2934 } 2935 #else 2936 #define fixup_device_tree_pmac() 2937 #endif 2938 2939 #ifdef CONFIG_PPC_EFIKA 2940 /* 2941 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2942 * to talk to the phy. If the phy-handle property is missing, then this 2943 * function is called to add the appropriate nodes and link it to the 2944 * ethernet node. 2945 */ 2946 static void __init fixup_device_tree_efika_add_phy(void) 2947 { 2948 u32 node; 2949 char prop[64]; 2950 int rv; 2951 2952 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2953 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2954 if (!PHANDLE_VALID(node)) 2955 return; 2956 2957 /* Check if the phy-handle property exists - bail if it does */ 2958 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2959 if (!rv) 2960 return; 2961 2962 /* 2963 * At this point the ethernet device doesn't have a phy described. 2964 * Now we need to add the missing phy node and linkage 2965 */ 2966 2967 /* Check for an MDIO bus node - if missing then create one */ 2968 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2969 if (!PHANDLE_VALID(node)) { 2970 prom_printf("Adding Ethernet MDIO node\n"); 2971 call_prom("interpret", 1, 1, 2972 " s\" /builtin\" find-device" 2973 " new-device" 2974 " 1 encode-int s\" #address-cells\" property" 2975 " 0 encode-int s\" #size-cells\" property" 2976 " s\" mdio\" device-name" 2977 " s\" fsl,mpc5200b-mdio\" encode-string" 2978 " s\" compatible\" property" 2979 " 0xf0003000 0x400 reg" 2980 " 0x2 encode-int" 2981 " 0x5 encode-int encode+" 2982 " 0x3 encode-int encode+" 2983 " s\" interrupts\" property" 2984 " finish-device"); 2985 } 2986 2987 /* Check for a PHY device node - if missing then create one and 2988 * give it's phandle to the ethernet node */ 2989 node = call_prom("finddevice", 1, 1, 2990 ADDR("/builtin/mdio/ethernet-phy")); 2991 if (!PHANDLE_VALID(node)) { 2992 prom_printf("Adding Ethernet PHY node\n"); 2993 call_prom("interpret", 1, 1, 2994 " s\" /builtin/mdio\" find-device" 2995 " new-device" 2996 " s\" ethernet-phy\" device-name" 2997 " 0x10 encode-int s\" reg\" property" 2998 " my-self" 2999 " ihandle>phandle" 3000 " finish-device" 3001 " s\" /builtin/ethernet\" find-device" 3002 " encode-int" 3003 " s\" phy-handle\" property" 3004 " device-end"); 3005 } 3006 } 3007 3008 static void __init fixup_device_tree_efika(void) 3009 { 3010 int sound_irq[3] = { 2, 2, 0 }; 3011 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 3012 3,4,0, 3,5,0, 3,6,0, 3,7,0, 3013 3,8,0, 3,9,0, 3,10,0, 3,11,0, 3014 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 3015 u32 node; 3016 char prop[64]; 3017 int rv, len; 3018 3019 /* Check if we're really running on a EFIKA */ 3020 node = call_prom("finddevice", 1, 1, ADDR("/")); 3021 if (!PHANDLE_VALID(node)) 3022 return; 3023 3024 rv = prom_getprop(node, "model", prop, sizeof(prop)); 3025 if (rv == PROM_ERROR) 3026 return; 3027 if (prom_strcmp(prop, "EFIKA5K2")) 3028 return; 3029 3030 prom_printf("Applying EFIKA device tree fixups\n"); 3031 3032 /* Claiming to be 'chrp' is death */ 3033 node = call_prom("finddevice", 1, 1, ADDR("/")); 3034 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 3035 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0)) 3036 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 3037 3038 /* CODEGEN,description is exposed in /proc/cpuinfo so 3039 fix that too */ 3040 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 3041 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP"))) 3042 prom_setprop(node, "/", "CODEGEN,description", 3043 "Efika 5200B PowerPC System", 3044 sizeof("Efika 5200B PowerPC System")); 3045 3046 /* Fixup bestcomm interrupts property */ 3047 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 3048 if (PHANDLE_VALID(node)) { 3049 len = prom_getproplen(node, "interrupts"); 3050 if (len == 12) { 3051 prom_printf("Fixing bestcomm interrupts property\n"); 3052 prom_setprop(node, "/builtin/bestcom", "interrupts", 3053 bcomm_irq, sizeof(bcomm_irq)); 3054 } 3055 } 3056 3057 /* Fixup sound interrupts property */ 3058 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 3059 if (PHANDLE_VALID(node)) { 3060 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 3061 if (rv == PROM_ERROR) { 3062 prom_printf("Adding sound interrupts property\n"); 3063 prom_setprop(node, "/builtin/sound", "interrupts", 3064 sound_irq, sizeof(sound_irq)); 3065 } 3066 } 3067 3068 /* Make sure ethernet phy-handle property exists */ 3069 fixup_device_tree_efika_add_phy(); 3070 } 3071 #else 3072 #define fixup_device_tree_efika() 3073 #endif 3074 3075 #ifdef CONFIG_PPC_PASEMI_NEMO 3076 /* 3077 * CFE supplied on Nemo is broken in several ways, biggest 3078 * problem is that it reassigns ISA interrupts to unused mpic ints. 3079 * Add an interrupt-controller property for the io-bridge to use 3080 * and correct the ints so we can attach them to an irq_domain 3081 */ 3082 static void __init fixup_device_tree_pasemi(void) 3083 { 3084 u32 interrupts[2], parent, rval, val = 0; 3085 char *name, *pci_name; 3086 phandle iob, node; 3087 3088 /* Find the root pci node */ 3089 name = "/pxp@0,e0000000"; 3090 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3091 if (!PHANDLE_VALID(iob)) 3092 return; 3093 3094 /* check if interrupt-controller node set yet */ 3095 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 3096 return; 3097 3098 prom_printf("adding interrupt-controller property for SB600...\n"); 3099 3100 prom_setprop(iob, name, "interrupt-controller", &val, 0); 3101 3102 pci_name = "/pxp@0,e0000000/pci@11"; 3103 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 3104 parent = ADDR(iob); 3105 3106 for( ; prom_next_node(&node); ) { 3107 /* scan each node for one with an interrupt */ 3108 if (!PHANDLE_VALID(node)) 3109 continue; 3110 3111 rval = prom_getproplen(node, "interrupts"); 3112 if (rval == 0 || rval == PROM_ERROR) 3113 continue; 3114 3115 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 3116 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 3117 continue; 3118 3119 /* found a node, update both interrupts and interrupt-parent */ 3120 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 3121 interrupts[0] -= 203; 3122 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 3123 interrupts[0] -= 213; 3124 if (interrupts[0] == 221) 3125 interrupts[0] = 14; 3126 if (interrupts[0] == 222) 3127 interrupts[0] = 8; 3128 3129 prom_setprop(node, pci_name, "interrupts", interrupts, 3130 sizeof(interrupts)); 3131 prom_setprop(node, pci_name, "interrupt-parent", &parent, 3132 sizeof(parent)); 3133 } 3134 3135 /* 3136 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 3137 * so that generic isa-bridge code can add the SB600 and its on-board 3138 * peripherals. 3139 */ 3140 name = "/pxp@0,e0000000/io-bridge@0"; 3141 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3142 if (!PHANDLE_VALID(iob)) 3143 return; 3144 3145 /* device_type is already set, just change it. */ 3146 3147 prom_printf("Changing device_type of SB600 node...\n"); 3148 3149 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 3150 } 3151 #else /* !CONFIG_PPC_PASEMI_NEMO */ 3152 static inline void fixup_device_tree_pasemi(void) { } 3153 #endif 3154 3155 static void __init fixup_device_tree(void) 3156 { 3157 fixup_device_tree_maple(); 3158 fixup_device_tree_maple_memory_controller(); 3159 fixup_device_tree_chrp(); 3160 fixup_device_tree_pmac(); 3161 fixup_device_tree_efika(); 3162 fixup_device_tree_pasemi(); 3163 } 3164 3165 static void __init prom_find_boot_cpu(void) 3166 { 3167 __be32 rval; 3168 ihandle prom_cpu; 3169 phandle cpu_pkg; 3170 3171 rval = 0; 3172 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 3173 return; 3174 prom_cpu = be32_to_cpu(rval); 3175 3176 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 3177 3178 if (!PHANDLE_VALID(cpu_pkg)) 3179 return; 3180 3181 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 3182 prom.cpu = be32_to_cpu(rval); 3183 3184 prom_debug("Booting CPU hw index = %d\n", prom.cpu); 3185 } 3186 3187 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 3188 { 3189 #ifdef CONFIG_BLK_DEV_INITRD 3190 if (r3 && r4 && r4 != 0xdeadbeef) { 3191 __be64 val; 3192 3193 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 3194 prom_initrd_end = prom_initrd_start + r4; 3195 3196 val = cpu_to_be64(prom_initrd_start); 3197 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 3198 &val, sizeof(val)); 3199 val = cpu_to_be64(prom_initrd_end); 3200 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 3201 &val, sizeof(val)); 3202 3203 reserve_mem(prom_initrd_start, 3204 prom_initrd_end - prom_initrd_start); 3205 3206 prom_debug("initrd_start=0x%lx\n", prom_initrd_start); 3207 prom_debug("initrd_end=0x%lx\n", prom_initrd_end); 3208 } 3209 #endif /* CONFIG_BLK_DEV_INITRD */ 3210 } 3211 3212 #ifdef CONFIG_PPC64 3213 #ifdef CONFIG_RELOCATABLE 3214 static void reloc_toc(void) 3215 { 3216 } 3217 3218 static void unreloc_toc(void) 3219 { 3220 } 3221 #else 3222 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3223 { 3224 unsigned long i; 3225 unsigned long *toc_entry; 3226 3227 /* Get the start of the TOC by using r2 directly. */ 3228 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3229 3230 for (i = 0; i < nr_entries; i++) { 3231 *toc_entry = *toc_entry + offset; 3232 toc_entry++; 3233 } 3234 } 3235 3236 static void reloc_toc(void) 3237 { 3238 unsigned long offset = reloc_offset(); 3239 unsigned long nr_entries = 3240 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3241 3242 __reloc_toc(offset, nr_entries); 3243 3244 mb(); 3245 } 3246 3247 static void unreloc_toc(void) 3248 { 3249 unsigned long offset = reloc_offset(); 3250 unsigned long nr_entries = 3251 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3252 3253 mb(); 3254 3255 __reloc_toc(-offset, nr_entries); 3256 } 3257 #endif 3258 #endif 3259 3260 #ifdef CONFIG_PPC_SVM 3261 /* 3262 * Perform the Enter Secure Mode ultracall. 3263 */ 3264 static int enter_secure_mode(unsigned long kbase, unsigned long fdt) 3265 { 3266 register unsigned long r3 asm("r3") = UV_ESM; 3267 register unsigned long r4 asm("r4") = kbase; 3268 register unsigned long r5 asm("r5") = fdt; 3269 3270 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5)); 3271 3272 return r3; 3273 } 3274 3275 /* 3276 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob. 3277 */ 3278 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt) 3279 { 3280 int ret; 3281 3282 if (!prom_svm_enable) 3283 return; 3284 3285 /* Switch to secure mode. */ 3286 prom_printf("Switching to secure mode.\n"); 3287 3288 /* 3289 * The ultravisor will do an integrity check of the kernel image but we 3290 * relocated it so the check will fail. Restore the original image by 3291 * relocating it back to the kernel virtual base address. 3292 */ 3293 if (IS_ENABLED(CONFIG_RELOCATABLE)) 3294 relocate(KERNELBASE); 3295 3296 ret = enter_secure_mode(kbase, fdt); 3297 3298 /* Relocate the kernel again. */ 3299 if (IS_ENABLED(CONFIG_RELOCATABLE)) 3300 relocate(kbase); 3301 3302 if (ret != U_SUCCESS) { 3303 prom_printf("Returned %d from switching to secure mode.\n", ret); 3304 prom_rtas_os_term("Switch to secure mode failed.\n"); 3305 } 3306 } 3307 #else 3308 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt) 3309 { 3310 } 3311 #endif /* CONFIG_PPC_SVM */ 3312 3313 /* 3314 * We enter here early on, when the Open Firmware prom is still 3315 * handling exceptions and the MMU hash table for us. 3316 */ 3317 3318 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3319 unsigned long pp, 3320 unsigned long r6, unsigned long r7, 3321 unsigned long kbase) 3322 { 3323 unsigned long hdr; 3324 3325 #ifdef CONFIG_PPC32 3326 unsigned long offset = reloc_offset(); 3327 reloc_got2(offset); 3328 #else 3329 reloc_toc(); 3330 #endif 3331 3332 /* 3333 * First zero the BSS 3334 */ 3335 memset(&__bss_start, 0, __bss_stop - __bss_start); 3336 3337 /* 3338 * Init interface to Open Firmware, get some node references, 3339 * like /chosen 3340 */ 3341 prom_init_client_services(pp); 3342 3343 /* 3344 * See if this OF is old enough that we need to do explicit maps 3345 * and other workarounds 3346 */ 3347 prom_find_mmu(); 3348 3349 /* 3350 * Init prom stdout device 3351 */ 3352 prom_init_stdout(); 3353 3354 prom_printf("Preparing to boot %s", linux_banner); 3355 3356 /* 3357 * Get default machine type. At this point, we do not differentiate 3358 * between pSeries SMP and pSeries LPAR 3359 */ 3360 of_platform = prom_find_machine_type(); 3361 prom_printf("Detected machine type: %x\n", of_platform); 3362 3363 #ifndef CONFIG_NONSTATIC_KERNEL 3364 /* Bail if this is a kdump kernel. */ 3365 if (PHYSICAL_START > 0) 3366 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3367 #endif 3368 3369 /* 3370 * Check for an initrd 3371 */ 3372 prom_check_initrd(r3, r4); 3373 3374 /* 3375 * Do early parsing of command line 3376 */ 3377 early_cmdline_parse(); 3378 3379 #ifdef CONFIG_PPC_PSERIES 3380 /* 3381 * On pSeries, inform the firmware about our capabilities 3382 */ 3383 if (of_platform == PLATFORM_PSERIES || 3384 of_platform == PLATFORM_PSERIES_LPAR) 3385 prom_send_capabilities(); 3386 #endif 3387 3388 /* 3389 * Copy the CPU hold code 3390 */ 3391 if (of_platform != PLATFORM_POWERMAC) 3392 copy_and_flush(0, kbase, 0x100, 0); 3393 3394 /* 3395 * Initialize memory management within prom_init 3396 */ 3397 prom_init_mem(); 3398 3399 /* 3400 * Determine which cpu is actually running right _now_ 3401 */ 3402 prom_find_boot_cpu(); 3403 3404 /* 3405 * Initialize display devices 3406 */ 3407 prom_check_displays(); 3408 3409 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3410 /* 3411 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3412 * that uses the allocator, we need to make sure we get the top of memory 3413 * available for us here... 3414 */ 3415 if (of_platform == PLATFORM_PSERIES) 3416 prom_initialize_tce_table(); 3417 #endif 3418 3419 /* 3420 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3421 * have a usable RTAS implementation. 3422 */ 3423 if (of_platform != PLATFORM_POWERMAC) 3424 prom_instantiate_rtas(); 3425 3426 #ifdef CONFIG_PPC64 3427 /* instantiate sml */ 3428 prom_instantiate_sml(); 3429 #endif 3430 3431 /* 3432 * On non-powermacs, put all CPUs in spin-loops. 3433 * 3434 * PowerMacs use a different mechanism to spin CPUs 3435 * 3436 * (This must be done after instanciating RTAS) 3437 */ 3438 if (of_platform != PLATFORM_POWERMAC) 3439 prom_hold_cpus(); 3440 3441 /* 3442 * Fill in some infos for use by the kernel later on 3443 */ 3444 if (prom_memory_limit) { 3445 __be64 val = cpu_to_be64(prom_memory_limit); 3446 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3447 &val, sizeof(val)); 3448 } 3449 #ifdef CONFIG_PPC64 3450 if (prom_iommu_off) 3451 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3452 NULL, 0); 3453 3454 if (prom_iommu_force_on) 3455 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3456 NULL, 0); 3457 3458 if (prom_tce_alloc_start) { 3459 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3460 &prom_tce_alloc_start, 3461 sizeof(prom_tce_alloc_start)); 3462 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3463 &prom_tce_alloc_end, 3464 sizeof(prom_tce_alloc_end)); 3465 } 3466 #endif 3467 3468 /* 3469 * Fixup any known bugs in the device-tree 3470 */ 3471 fixup_device_tree(); 3472 3473 /* 3474 * Now finally create the flattened device-tree 3475 */ 3476 prom_printf("copying OF device tree...\n"); 3477 flatten_device_tree(); 3478 3479 /* 3480 * in case stdin is USB and still active on IBM machines... 3481 * Unfortunately quiesce crashes on some powermacs if we have 3482 * closed stdin already (in particular the powerbook 101). 3483 */ 3484 if (of_platform != PLATFORM_POWERMAC) 3485 prom_close_stdin(); 3486 3487 /* 3488 * Call OF "quiesce" method to shut down pending DMA's from 3489 * devices etc... 3490 */ 3491 prom_printf("Quiescing Open Firmware ...\n"); 3492 call_prom("quiesce", 0, 0); 3493 3494 /* 3495 * And finally, call the kernel passing it the flattened device 3496 * tree and NULL as r5, thus triggering the new entry point which 3497 * is common to us and kexec 3498 */ 3499 hdr = dt_header_start; 3500 3501 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3502 prom_debug("->dt_header_start=0x%lx\n", hdr); 3503 3504 #ifdef CONFIG_PPC32 3505 reloc_got2(-offset); 3506 #else 3507 unreloc_toc(); 3508 #endif 3509 3510 /* Move to secure memory if we're supposed to be secure guests. */ 3511 setup_secure_guest(kbase, hdr); 3512 3513 __start(hdr, kbase, 0, 0, 0, 0, 0); 3514 3515 return 0; 3516 } 3517