1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for interfacing to Open Firmware. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 */ 11 12 #undef DEBUG_PROM 13 14 /* we cannot use FORTIFY as it brings in new symbols */ 15 #define __NO_FORTIFY 16 17 #include <stdarg.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/init.h> 21 #include <linux/threads.h> 22 #include <linux/spinlock.h> 23 #include <linux/types.h> 24 #include <linux/pci.h> 25 #include <linux/proc_fs.h> 26 #include <linux/delay.h> 27 #include <linux/initrd.h> 28 #include <linux/bitops.h> 29 #include <linux/pgtable.h> 30 #include <asm/prom.h> 31 #include <asm/rtas.h> 32 #include <asm/page.h> 33 #include <asm/processor.h> 34 #include <asm/irq.h> 35 #include <asm/io.h> 36 #include <asm/smp.h> 37 #include <asm/mmu.h> 38 #include <asm/iommu.h> 39 #include <asm/btext.h> 40 #include <asm/sections.h> 41 #include <asm/machdep.h> 42 #include <asm/asm-prototypes.h> 43 #include <asm/ultravisor-api.h> 44 45 #include <linux/linux_logo.h> 46 47 /* All of prom_init bss lives here */ 48 #define __prombss __section(.bss.prominit) 49 50 /* 51 * Eventually bump that one up 52 */ 53 #define DEVTREE_CHUNK_SIZE 0x100000 54 55 /* 56 * This is the size of the local memory reserve map that gets copied 57 * into the boot params passed to the kernel. That size is totally 58 * flexible as the kernel just reads the list until it encounters an 59 * entry with size 0, so it can be changed without breaking binary 60 * compatibility 61 */ 62 #define MEM_RESERVE_MAP_SIZE 8 63 64 /* 65 * prom_init() is called very early on, before the kernel text 66 * and data have been mapped to KERNELBASE. At this point the code 67 * is running at whatever address it has been loaded at. 68 * On ppc32 we compile with -mrelocatable, which means that references 69 * to extern and static variables get relocated automatically. 70 * ppc64 objects are always relocatable, we just need to relocate the 71 * TOC. 72 * 73 * Because OF may have mapped I/O devices into the area starting at 74 * KERNELBASE, particularly on CHRP machines, we can't safely call 75 * OF once the kernel has been mapped to KERNELBASE. Therefore all 76 * OF calls must be done within prom_init(). 77 * 78 * ADDR is used in calls to call_prom. The 4th and following 79 * arguments to call_prom should be 32-bit values. 80 * On ppc64, 64 bit values are truncated to 32 bits (and 81 * fortunately don't get interpreted as two arguments). 82 */ 83 #define ADDR(x) (u32)(unsigned long)(x) 84 85 #ifdef CONFIG_PPC64 86 #define OF_WORKAROUNDS 0 87 #else 88 #define OF_WORKAROUNDS of_workarounds 89 static int of_workarounds __prombss; 90 #endif 91 92 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 93 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 94 95 #define PROM_BUG() do { \ 96 prom_printf("kernel BUG at %s line 0x%x!\n", \ 97 __FILE__, __LINE__); \ 98 __builtin_trap(); \ 99 } while (0) 100 101 #ifdef DEBUG_PROM 102 #define prom_debug(x...) prom_printf(x) 103 #else 104 #define prom_debug(x...) do { } while (0) 105 #endif 106 107 108 typedef u32 prom_arg_t; 109 110 struct prom_args { 111 __be32 service; 112 __be32 nargs; 113 __be32 nret; 114 __be32 args[10]; 115 }; 116 117 struct prom_t { 118 ihandle root; 119 phandle chosen; 120 int cpu; 121 ihandle stdout; 122 ihandle mmumap; 123 ihandle memory; 124 }; 125 126 struct mem_map_entry { 127 __be64 base; 128 __be64 size; 129 }; 130 131 typedef __be32 cell_t; 132 133 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 134 unsigned long r6, unsigned long r7, unsigned long r8, 135 unsigned long r9); 136 137 #ifdef CONFIG_PPC64 138 extern int enter_prom(struct prom_args *args, unsigned long entry); 139 #else 140 static inline int enter_prom(struct prom_args *args, unsigned long entry) 141 { 142 return ((int (*)(struct prom_args *))entry)(args); 143 } 144 #endif 145 146 extern void copy_and_flush(unsigned long dest, unsigned long src, 147 unsigned long size, unsigned long offset); 148 149 /* prom structure */ 150 static struct prom_t __prombss prom; 151 152 static unsigned long __prombss prom_entry; 153 154 static char __prombss of_stdout_device[256]; 155 static char __prombss prom_scratch[256]; 156 157 static unsigned long __prombss dt_header_start; 158 static unsigned long __prombss dt_struct_start, dt_struct_end; 159 static unsigned long __prombss dt_string_start, dt_string_end; 160 161 static unsigned long __prombss prom_initrd_start, prom_initrd_end; 162 163 #ifdef CONFIG_PPC64 164 static int __prombss prom_iommu_force_on; 165 static int __prombss prom_iommu_off; 166 static unsigned long __prombss prom_tce_alloc_start; 167 static unsigned long __prombss prom_tce_alloc_end; 168 #endif 169 170 #ifdef CONFIG_PPC_PSERIES 171 static bool __prombss prom_radix_disable; 172 static bool __prombss prom_xive_disable; 173 #endif 174 175 #ifdef CONFIG_PPC_SVM 176 static bool __prombss prom_svm_enable; 177 #endif 178 179 struct platform_support { 180 bool hash_mmu; 181 bool radix_mmu; 182 bool radix_gtse; 183 bool xive; 184 }; 185 186 /* Platforms codes are now obsolete in the kernel. Now only used within this 187 * file and ultimately gone too. Feel free to change them if you need, they 188 * are not shared with anything outside of this file anymore 189 */ 190 #define PLATFORM_PSERIES 0x0100 191 #define PLATFORM_PSERIES_LPAR 0x0101 192 #define PLATFORM_LPAR 0x0001 193 #define PLATFORM_POWERMAC 0x0400 194 #define PLATFORM_GENERIC 0x0500 195 196 static int __prombss of_platform; 197 198 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE]; 199 200 static unsigned long __prombss prom_memory_limit; 201 202 static unsigned long __prombss alloc_top; 203 static unsigned long __prombss alloc_top_high; 204 static unsigned long __prombss alloc_bottom; 205 static unsigned long __prombss rmo_top; 206 static unsigned long __prombss ram_top; 207 208 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 209 static int __prombss mem_reserve_cnt; 210 211 static cell_t __prombss regbuf[1024]; 212 213 static bool __prombss rtas_has_query_cpu_stopped; 214 215 216 /* 217 * Error results ... some OF calls will return "-1" on error, some 218 * will return 0, some will return either. To simplify, here are 219 * macros to use with any ihandle or phandle return value to check if 220 * it is valid 221 */ 222 223 #define PROM_ERROR (-1u) 224 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 225 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 226 227 /* Copied from lib/string.c and lib/kstrtox.c */ 228 229 static int __init prom_strcmp(const char *cs, const char *ct) 230 { 231 unsigned char c1, c2; 232 233 while (1) { 234 c1 = *cs++; 235 c2 = *ct++; 236 if (c1 != c2) 237 return c1 < c2 ? -1 : 1; 238 if (!c1) 239 break; 240 } 241 return 0; 242 } 243 244 static char __init *prom_strcpy(char *dest, const char *src) 245 { 246 char *tmp = dest; 247 248 while ((*dest++ = *src++) != '\0') 249 /* nothing */; 250 return tmp; 251 } 252 253 static int __init prom_strncmp(const char *cs, const char *ct, size_t count) 254 { 255 unsigned char c1, c2; 256 257 while (count) { 258 c1 = *cs++; 259 c2 = *ct++; 260 if (c1 != c2) 261 return c1 < c2 ? -1 : 1; 262 if (!c1) 263 break; 264 count--; 265 } 266 return 0; 267 } 268 269 static size_t __init prom_strlen(const char *s) 270 { 271 const char *sc; 272 273 for (sc = s; *sc != '\0'; ++sc) 274 /* nothing */; 275 return sc - s; 276 } 277 278 static int __init prom_memcmp(const void *cs, const void *ct, size_t count) 279 { 280 const unsigned char *su1, *su2; 281 int res = 0; 282 283 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 284 if ((res = *su1 - *su2) != 0) 285 break; 286 return res; 287 } 288 289 static char __init *prom_strstr(const char *s1, const char *s2) 290 { 291 size_t l1, l2; 292 293 l2 = prom_strlen(s2); 294 if (!l2) 295 return (char *)s1; 296 l1 = prom_strlen(s1); 297 while (l1 >= l2) { 298 l1--; 299 if (!prom_memcmp(s1, s2, l2)) 300 return (char *)s1; 301 s1++; 302 } 303 return NULL; 304 } 305 306 static size_t __init prom_strlcat(char *dest, const char *src, size_t count) 307 { 308 size_t dsize = prom_strlen(dest); 309 size_t len = prom_strlen(src); 310 size_t res = dsize + len; 311 312 /* This would be a bug */ 313 if (dsize >= count) 314 return count; 315 316 dest += dsize; 317 count -= dsize; 318 if (len >= count) 319 len = count-1; 320 memcpy(dest, src, len); 321 dest[len] = 0; 322 return res; 323 324 } 325 326 #ifdef CONFIG_PPC_PSERIES 327 static int __init prom_strtobool(const char *s, bool *res) 328 { 329 if (!s) 330 return -EINVAL; 331 332 switch (s[0]) { 333 case 'y': 334 case 'Y': 335 case '1': 336 *res = true; 337 return 0; 338 case 'n': 339 case 'N': 340 case '0': 341 *res = false; 342 return 0; 343 case 'o': 344 case 'O': 345 switch (s[1]) { 346 case 'n': 347 case 'N': 348 *res = true; 349 return 0; 350 case 'f': 351 case 'F': 352 *res = false; 353 return 0; 354 default: 355 break; 356 } 357 default: 358 break; 359 } 360 361 return -EINVAL; 362 } 363 #endif 364 365 /* This is the one and *ONLY* place where we actually call open 366 * firmware. 367 */ 368 369 static int __init call_prom(const char *service, int nargs, int nret, ...) 370 { 371 int i; 372 struct prom_args args; 373 va_list list; 374 375 args.service = cpu_to_be32(ADDR(service)); 376 args.nargs = cpu_to_be32(nargs); 377 args.nret = cpu_to_be32(nret); 378 379 va_start(list, nret); 380 for (i = 0; i < nargs; i++) 381 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 382 va_end(list); 383 384 for (i = 0; i < nret; i++) 385 args.args[nargs+i] = 0; 386 387 if (enter_prom(&args, prom_entry) < 0) 388 return PROM_ERROR; 389 390 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 391 } 392 393 static int __init call_prom_ret(const char *service, int nargs, int nret, 394 prom_arg_t *rets, ...) 395 { 396 int i; 397 struct prom_args args; 398 va_list list; 399 400 args.service = cpu_to_be32(ADDR(service)); 401 args.nargs = cpu_to_be32(nargs); 402 args.nret = cpu_to_be32(nret); 403 404 va_start(list, rets); 405 for (i = 0; i < nargs; i++) 406 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 407 va_end(list); 408 409 for (i = 0; i < nret; i++) 410 args.args[nargs+i] = 0; 411 412 if (enter_prom(&args, prom_entry) < 0) 413 return PROM_ERROR; 414 415 if (rets != NULL) 416 for (i = 1; i < nret; ++i) 417 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 418 419 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 420 } 421 422 423 static void __init prom_print(const char *msg) 424 { 425 const char *p, *q; 426 427 if (prom.stdout == 0) 428 return; 429 430 for (p = msg; *p != 0; p = q) { 431 for (q = p; *q != 0 && *q != '\n'; ++q) 432 ; 433 if (q > p) 434 call_prom("write", 3, 1, prom.stdout, p, q - p); 435 if (*q == 0) 436 break; 437 ++q; 438 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 439 } 440 } 441 442 443 /* 444 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that 445 * we do not need __udivdi3 or __umoddi3 on 32bits. 446 */ 447 static void __init prom_print_hex(unsigned long val) 448 { 449 int i, nibbles = sizeof(val)*2; 450 char buf[sizeof(val)*2+1]; 451 452 for (i = nibbles-1; i >= 0; i--) { 453 buf[i] = (val & 0xf) + '0'; 454 if (buf[i] > '9') 455 buf[i] += ('a'-'0'-10); 456 val >>= 4; 457 } 458 buf[nibbles] = '\0'; 459 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 460 } 461 462 /* max number of decimal digits in an unsigned long */ 463 #define UL_DIGITS 21 464 static void __init prom_print_dec(unsigned long val) 465 { 466 int i, size; 467 char buf[UL_DIGITS+1]; 468 469 for (i = UL_DIGITS-1; i >= 0; i--) { 470 buf[i] = (val % 10) + '0'; 471 val = val/10; 472 if (val == 0) 473 break; 474 } 475 /* shift stuff down */ 476 size = UL_DIGITS - i; 477 call_prom("write", 3, 1, prom.stdout, buf+i, size); 478 } 479 480 __printf(1, 2) 481 static void __init prom_printf(const char *format, ...) 482 { 483 const char *p, *q, *s; 484 va_list args; 485 unsigned long v; 486 long vs; 487 int n = 0; 488 489 va_start(args, format); 490 for (p = format; *p != 0; p = q) { 491 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 492 ; 493 if (q > p) 494 call_prom("write", 3, 1, prom.stdout, p, q - p); 495 if (*q == 0) 496 break; 497 if (*q == '\n') { 498 ++q; 499 call_prom("write", 3, 1, prom.stdout, 500 ADDR("\r\n"), 2); 501 continue; 502 } 503 ++q; 504 if (*q == 0) 505 break; 506 while (*q == 'l') { 507 ++q; 508 ++n; 509 } 510 switch (*q) { 511 case 's': 512 ++q; 513 s = va_arg(args, const char *); 514 prom_print(s); 515 break; 516 case 'x': 517 ++q; 518 switch (n) { 519 case 0: 520 v = va_arg(args, unsigned int); 521 break; 522 case 1: 523 v = va_arg(args, unsigned long); 524 break; 525 case 2: 526 default: 527 v = va_arg(args, unsigned long long); 528 break; 529 } 530 prom_print_hex(v); 531 break; 532 case 'u': 533 ++q; 534 switch (n) { 535 case 0: 536 v = va_arg(args, unsigned int); 537 break; 538 case 1: 539 v = va_arg(args, unsigned long); 540 break; 541 case 2: 542 default: 543 v = va_arg(args, unsigned long long); 544 break; 545 } 546 prom_print_dec(v); 547 break; 548 case 'd': 549 ++q; 550 switch (n) { 551 case 0: 552 vs = va_arg(args, int); 553 break; 554 case 1: 555 vs = va_arg(args, long); 556 break; 557 case 2: 558 default: 559 vs = va_arg(args, long long); 560 break; 561 } 562 if (vs < 0) { 563 prom_print("-"); 564 vs = -vs; 565 } 566 prom_print_dec(vs); 567 break; 568 } 569 } 570 va_end(args); 571 } 572 573 574 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 575 unsigned long align) 576 { 577 578 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 579 /* 580 * Old OF requires we claim physical and virtual separately 581 * and then map explicitly (assuming virtual mode) 582 */ 583 int ret; 584 prom_arg_t result; 585 586 ret = call_prom_ret("call-method", 5, 2, &result, 587 ADDR("claim"), prom.memory, 588 align, size, virt); 589 if (ret != 0 || result == -1) 590 return -1; 591 ret = call_prom_ret("call-method", 5, 2, &result, 592 ADDR("claim"), prom.mmumap, 593 align, size, virt); 594 if (ret != 0) { 595 call_prom("call-method", 4, 1, ADDR("release"), 596 prom.memory, size, virt); 597 return -1; 598 } 599 /* the 0x12 is M (coherence) + PP == read/write */ 600 call_prom("call-method", 6, 1, 601 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 602 return virt; 603 } 604 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 605 (prom_arg_t)align); 606 } 607 608 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 609 { 610 prom_print(reason); 611 /* Do not call exit because it clears the screen on pmac 612 * it also causes some sort of double-fault on early pmacs */ 613 if (of_platform == PLATFORM_POWERMAC) 614 asm("trap\n"); 615 616 /* ToDo: should put up an SRC here on pSeries */ 617 call_prom("exit", 0, 0); 618 619 for (;;) /* should never get here */ 620 ; 621 } 622 623 624 static int __init prom_next_node(phandle *nodep) 625 { 626 phandle node; 627 628 if ((node = *nodep) != 0 629 && (*nodep = call_prom("child", 1, 1, node)) != 0) 630 return 1; 631 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 632 return 1; 633 for (;;) { 634 if ((node = call_prom("parent", 1, 1, node)) == 0) 635 return 0; 636 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 637 return 1; 638 } 639 } 640 641 static inline int __init prom_getprop(phandle node, const char *pname, 642 void *value, size_t valuelen) 643 { 644 return call_prom("getprop", 4, 1, node, ADDR(pname), 645 (u32)(unsigned long) value, (u32) valuelen); 646 } 647 648 static inline int __init prom_getproplen(phandle node, const char *pname) 649 { 650 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 651 } 652 653 static void add_string(char **str, const char *q) 654 { 655 char *p = *str; 656 657 while (*q) 658 *p++ = *q++; 659 *p++ = ' '; 660 *str = p; 661 } 662 663 static char *tohex(unsigned int x) 664 { 665 static const char digits[] __initconst = "0123456789abcdef"; 666 static char result[9] __prombss; 667 int i; 668 669 result[8] = 0; 670 i = 8; 671 do { 672 --i; 673 result[i] = digits[x & 0xf]; 674 x >>= 4; 675 } while (x != 0 && i > 0); 676 return &result[i]; 677 } 678 679 static int __init prom_setprop(phandle node, const char *nodename, 680 const char *pname, void *value, size_t valuelen) 681 { 682 char cmd[256], *p; 683 684 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 685 return call_prom("setprop", 4, 1, node, ADDR(pname), 686 (u32)(unsigned long) value, (u32) valuelen); 687 688 /* gah... setprop doesn't work on longtrail, have to use interpret */ 689 p = cmd; 690 add_string(&p, "dev"); 691 add_string(&p, nodename); 692 add_string(&p, tohex((u32)(unsigned long) value)); 693 add_string(&p, tohex(valuelen)); 694 add_string(&p, tohex(ADDR(pname))); 695 add_string(&p, tohex(prom_strlen(pname))); 696 add_string(&p, "property"); 697 *p = 0; 698 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 699 } 700 701 /* We can't use the standard versions because of relocation headaches. */ 702 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 703 || ('a' <= (c) && (c) <= 'f') \ 704 || ('A' <= (c) && (c) <= 'F')) 705 706 #define isdigit(c) ('0' <= (c) && (c) <= '9') 707 #define islower(c) ('a' <= (c) && (c) <= 'z') 708 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 709 710 static unsigned long prom_strtoul(const char *cp, const char **endp) 711 { 712 unsigned long result = 0, base = 10, value; 713 714 if (*cp == '0') { 715 base = 8; 716 cp++; 717 if (toupper(*cp) == 'X') { 718 cp++; 719 base = 16; 720 } 721 } 722 723 while (isxdigit(*cp) && 724 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 725 result = result * base + value; 726 cp++; 727 } 728 729 if (endp) 730 *endp = cp; 731 732 return result; 733 } 734 735 static unsigned long prom_memparse(const char *ptr, const char **retptr) 736 { 737 unsigned long ret = prom_strtoul(ptr, retptr); 738 int shift = 0; 739 740 /* 741 * We can't use a switch here because GCC *may* generate a 742 * jump table which won't work, because we're not running at 743 * the address we're linked at. 744 */ 745 if ('G' == **retptr || 'g' == **retptr) 746 shift = 30; 747 748 if ('M' == **retptr || 'm' == **retptr) 749 shift = 20; 750 751 if ('K' == **retptr || 'k' == **retptr) 752 shift = 10; 753 754 if (shift) { 755 ret <<= shift; 756 (*retptr)++; 757 } 758 759 return ret; 760 } 761 762 /* 763 * Early parsing of the command line passed to the kernel, used for 764 * "mem=x" and the options that affect the iommu 765 */ 766 static void __init early_cmdline_parse(void) 767 { 768 const char *opt; 769 770 char *p; 771 int l = 0; 772 773 prom_cmd_line[0] = 0; 774 p = prom_cmd_line; 775 776 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0) 777 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 778 779 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0') 780 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE, 781 sizeof(prom_cmd_line)); 782 783 prom_printf("command line: %s\n", prom_cmd_line); 784 785 #ifdef CONFIG_PPC64 786 opt = prom_strstr(prom_cmd_line, "iommu="); 787 if (opt) { 788 prom_printf("iommu opt is: %s\n", opt); 789 opt += 6; 790 while (*opt && *opt == ' ') 791 opt++; 792 if (!prom_strncmp(opt, "off", 3)) 793 prom_iommu_off = 1; 794 else if (!prom_strncmp(opt, "force", 5)) 795 prom_iommu_force_on = 1; 796 } 797 #endif 798 opt = prom_strstr(prom_cmd_line, "mem="); 799 if (opt) { 800 opt += 4; 801 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 802 #ifdef CONFIG_PPC64 803 /* Align to 16 MB == size of ppc64 large page */ 804 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 805 #endif 806 } 807 808 #ifdef CONFIG_PPC_PSERIES 809 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); 810 opt = prom_strstr(prom_cmd_line, "disable_radix"); 811 if (opt) { 812 opt += 13; 813 if (*opt && *opt == '=') { 814 bool val; 815 816 if (prom_strtobool(++opt, &val)) 817 prom_radix_disable = false; 818 else 819 prom_radix_disable = val; 820 } else 821 prom_radix_disable = true; 822 } 823 if (prom_radix_disable) 824 prom_debug("Radix disabled from cmdline\n"); 825 826 opt = prom_strstr(prom_cmd_line, "xive=off"); 827 if (opt) { 828 prom_xive_disable = true; 829 prom_debug("XIVE disabled from cmdline\n"); 830 } 831 #endif /* CONFIG_PPC_PSERIES */ 832 833 #ifdef CONFIG_PPC_SVM 834 opt = prom_strstr(prom_cmd_line, "svm="); 835 if (opt) { 836 bool val; 837 838 opt += sizeof("svm=") - 1; 839 if (!prom_strtobool(opt, &val)) 840 prom_svm_enable = val; 841 } 842 #endif /* CONFIG_PPC_SVM */ 843 } 844 845 #ifdef CONFIG_PPC_PSERIES 846 /* 847 * The architecture vector has an array of PVR mask/value pairs, 848 * followed by # option vectors - 1, followed by the option vectors. 849 * 850 * See prom.h for the definition of the bits specified in the 851 * architecture vector. 852 */ 853 854 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 855 #define NUM_VECTORS(n) ((n) - 1) 856 857 /* 858 * Firmware expects 1 + n - 2, where n is the length of the option vector in 859 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 860 */ 861 #define VECTOR_LENGTH(n) (1 + (n) - 2) 862 863 struct option_vector1 { 864 u8 byte1; 865 u8 arch_versions; 866 u8 arch_versions3; 867 } __packed; 868 869 struct option_vector2 { 870 u8 byte1; 871 __be16 reserved; 872 __be32 real_base; 873 __be32 real_size; 874 __be32 virt_base; 875 __be32 virt_size; 876 __be32 load_base; 877 __be32 min_rma; 878 __be32 min_load; 879 u8 min_rma_percent; 880 u8 max_pft_size; 881 } __packed; 882 883 struct option_vector3 { 884 u8 byte1; 885 u8 byte2; 886 } __packed; 887 888 struct option_vector4 { 889 u8 byte1; 890 u8 min_vp_cap; 891 } __packed; 892 893 struct option_vector5 { 894 u8 byte1; 895 u8 byte2; 896 u8 byte3; 897 u8 cmo; 898 u8 associativity; 899 u8 bin_opts; 900 u8 micro_checkpoint; 901 u8 reserved0; 902 __be32 max_cpus; 903 __be16 papr_level; 904 __be16 reserved1; 905 u8 platform_facilities; 906 u8 reserved2; 907 __be16 reserved3; 908 u8 subprocessors; 909 u8 byte22; 910 u8 intarch; 911 u8 mmu; 912 u8 hash_ext; 913 u8 radix_ext; 914 } __packed; 915 916 struct option_vector6 { 917 u8 reserved; 918 u8 secondary_pteg; 919 u8 os_name; 920 } __packed; 921 922 struct ibm_arch_vec { 923 struct { u32 mask, val; } pvrs[14]; 924 925 u8 num_vectors; 926 927 u8 vec1_len; 928 struct option_vector1 vec1; 929 930 u8 vec2_len; 931 struct option_vector2 vec2; 932 933 u8 vec3_len; 934 struct option_vector3 vec3; 935 936 u8 vec4_len; 937 struct option_vector4 vec4; 938 939 u8 vec5_len; 940 struct option_vector5 vec5; 941 942 u8 vec6_len; 943 struct option_vector6 vec6; 944 } __packed; 945 946 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { 947 .pvrs = { 948 { 949 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 950 .val = cpu_to_be32(0x003a0000), 951 }, 952 { 953 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 954 .val = cpu_to_be32(0x003e0000), 955 }, 956 { 957 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 958 .val = cpu_to_be32(0x003f0000), 959 }, 960 { 961 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 962 .val = cpu_to_be32(0x004b0000), 963 }, 964 { 965 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 966 .val = cpu_to_be32(0x004c0000), 967 }, 968 { 969 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 970 .val = cpu_to_be32(0x004d0000), 971 }, 972 { 973 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 974 .val = cpu_to_be32(0x004e0000), 975 }, 976 { 977 .mask = cpu_to_be32(0xffff0000), /* POWER10 */ 978 .val = cpu_to_be32(0x00800000), 979 }, 980 { 981 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */ 982 .val = cpu_to_be32(0x0f000006), 983 }, 984 { 985 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 986 .val = cpu_to_be32(0x0f000005), 987 }, 988 { 989 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 990 .val = cpu_to_be32(0x0f000004), 991 }, 992 { 993 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 994 .val = cpu_to_be32(0x0f000003), 995 }, 996 { 997 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 998 .val = cpu_to_be32(0x0f000002), 999 }, 1000 { 1001 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 1002 .val = cpu_to_be32(0x0f000001), 1003 }, 1004 }, 1005 1006 .num_vectors = NUM_VECTORS(6), 1007 1008 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 1009 .vec1 = { 1010 .byte1 = 0, 1011 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 1012 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 1013 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1, 1014 }, 1015 1016 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 1017 /* option vector 2: Open Firmware options supported */ 1018 .vec2 = { 1019 .byte1 = OV2_REAL_MODE, 1020 .reserved = 0, 1021 .real_base = cpu_to_be32(0xffffffff), 1022 .real_size = cpu_to_be32(0xffffffff), 1023 .virt_base = cpu_to_be32(0xffffffff), 1024 .virt_size = cpu_to_be32(0xffffffff), 1025 .load_base = cpu_to_be32(0xffffffff), 1026 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 1027 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 1028 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 1029 .max_pft_size = 48, /* max log_2(hash table size) */ 1030 }, 1031 1032 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 1033 /* option vector 3: processor options supported */ 1034 .vec3 = { 1035 .byte1 = 0, /* don't ignore, don't halt */ 1036 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 1037 }, 1038 1039 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 1040 /* option vector 4: IBM PAPR implementation */ 1041 .vec4 = { 1042 .byte1 = 0, /* don't halt */ 1043 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 1044 }, 1045 1046 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 1047 /* option vector 5: PAPR/OF options */ 1048 .vec5 = { 1049 .byte1 = 0, /* don't ignore, don't halt */ 1050 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 1051 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 1052 #ifdef CONFIG_PCI_MSI 1053 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 1054 OV5_FEAT(OV5_MSI), 1055 #else 1056 0, 1057 #endif 1058 .byte3 = 0, 1059 .cmo = 1060 #ifdef CONFIG_PPC_SMLPAR 1061 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 1062 #else 1063 0, 1064 #endif 1065 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 1066 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 1067 .micro_checkpoint = 0, 1068 .reserved0 = 0, 1069 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 1070 .papr_level = 0, 1071 .reserved1 = 0, 1072 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 1073 .reserved2 = 0, 1074 .reserved3 = 0, 1075 .subprocessors = 1, 1076 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO), 1077 .intarch = 0, 1078 .mmu = 0, 1079 .hash_ext = 0, 1080 .radix_ext = 0, 1081 }, 1082 1083 /* option vector 6: IBM PAPR hints */ 1084 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 1085 .vec6 = { 1086 .reserved = 0, 1087 .secondary_pteg = 0, 1088 .os_name = OV6_LINUX, 1089 }, 1090 }; 1091 1092 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned; 1093 1094 /* Old method - ELF header with PT_NOTE sections only works on BE */ 1095 #ifdef __BIG_ENDIAN__ 1096 static const struct fake_elf { 1097 Elf32_Ehdr elfhdr; 1098 Elf32_Phdr phdr[2]; 1099 struct chrpnote { 1100 u32 namesz; 1101 u32 descsz; 1102 u32 type; 1103 char name[8]; /* "PowerPC" */ 1104 struct chrpdesc { 1105 u32 real_mode; 1106 u32 real_base; 1107 u32 real_size; 1108 u32 virt_base; 1109 u32 virt_size; 1110 u32 load_base; 1111 } chrpdesc; 1112 } chrpnote; 1113 struct rpanote { 1114 u32 namesz; 1115 u32 descsz; 1116 u32 type; 1117 char name[24]; /* "IBM,RPA-Client-Config" */ 1118 struct rpadesc { 1119 u32 lpar_affinity; 1120 u32 min_rmo_size; 1121 u32 min_rmo_percent; 1122 u32 max_pft_size; 1123 u32 splpar; 1124 u32 min_load; 1125 u32 new_mem_def; 1126 u32 ignore_me; 1127 } rpadesc; 1128 } rpanote; 1129 } fake_elf __initconst = { 1130 .elfhdr = { 1131 .e_ident = { 0x7f, 'E', 'L', 'F', 1132 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 1133 .e_type = ET_EXEC, /* yeah right */ 1134 .e_machine = EM_PPC, 1135 .e_version = EV_CURRENT, 1136 .e_phoff = offsetof(struct fake_elf, phdr), 1137 .e_phentsize = sizeof(Elf32_Phdr), 1138 .e_phnum = 2 1139 }, 1140 .phdr = { 1141 [0] = { 1142 .p_type = PT_NOTE, 1143 .p_offset = offsetof(struct fake_elf, chrpnote), 1144 .p_filesz = sizeof(struct chrpnote) 1145 }, [1] = { 1146 .p_type = PT_NOTE, 1147 .p_offset = offsetof(struct fake_elf, rpanote), 1148 .p_filesz = sizeof(struct rpanote) 1149 } 1150 }, 1151 .chrpnote = { 1152 .namesz = sizeof("PowerPC"), 1153 .descsz = sizeof(struct chrpdesc), 1154 .type = 0x1275, 1155 .name = "PowerPC", 1156 .chrpdesc = { 1157 .real_mode = ~0U, /* ~0 means "don't care" */ 1158 .real_base = ~0U, 1159 .real_size = ~0U, 1160 .virt_base = ~0U, 1161 .virt_size = ~0U, 1162 .load_base = ~0U 1163 }, 1164 }, 1165 .rpanote = { 1166 .namesz = sizeof("IBM,RPA-Client-Config"), 1167 .descsz = sizeof(struct rpadesc), 1168 .type = 0x12759999, 1169 .name = "IBM,RPA-Client-Config", 1170 .rpadesc = { 1171 .lpar_affinity = 0, 1172 .min_rmo_size = 64, /* in megabytes */ 1173 .min_rmo_percent = 0, 1174 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 1175 .splpar = 1, 1176 .min_load = ~0U, 1177 .new_mem_def = 0 1178 } 1179 } 1180 }; 1181 #endif /* __BIG_ENDIAN__ */ 1182 1183 static int __init prom_count_smt_threads(void) 1184 { 1185 phandle node; 1186 char type[64]; 1187 unsigned int plen; 1188 1189 /* Pick up th first CPU node we can find */ 1190 for (node = 0; prom_next_node(&node); ) { 1191 type[0] = 0; 1192 prom_getprop(node, "device_type", type, sizeof(type)); 1193 1194 if (prom_strcmp(type, "cpu")) 1195 continue; 1196 /* 1197 * There is an entry for each smt thread, each entry being 1198 * 4 bytes long. All cpus should have the same number of 1199 * smt threads, so return after finding the first. 1200 */ 1201 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 1202 if (plen == PROM_ERROR) 1203 break; 1204 plen >>= 2; 1205 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 1206 1207 /* Sanity check */ 1208 if (plen < 1 || plen > 64) { 1209 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1210 (unsigned long)plen); 1211 return 1; 1212 } 1213 return plen; 1214 } 1215 prom_debug("No threads found, assuming 1 per core\n"); 1216 1217 return 1; 1218 1219 } 1220 1221 static void __init prom_parse_mmu_model(u8 val, 1222 struct platform_support *support) 1223 { 1224 switch (val) { 1225 case OV5_FEAT(OV5_MMU_DYNAMIC): 1226 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1227 prom_debug("MMU - either supported\n"); 1228 support->radix_mmu = !prom_radix_disable; 1229 support->hash_mmu = true; 1230 break; 1231 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1232 prom_debug("MMU - radix only\n"); 1233 if (prom_radix_disable) { 1234 /* 1235 * If we __have__ to do radix, we're better off ignoring 1236 * the command line rather than not booting. 1237 */ 1238 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1239 } 1240 support->radix_mmu = true; 1241 break; 1242 case OV5_FEAT(OV5_MMU_HASH): 1243 prom_debug("MMU - hash only\n"); 1244 support->hash_mmu = true; 1245 break; 1246 default: 1247 prom_debug("Unknown mmu support option: 0x%x\n", val); 1248 break; 1249 } 1250 } 1251 1252 static void __init prom_parse_xive_model(u8 val, 1253 struct platform_support *support) 1254 { 1255 switch (val) { 1256 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ 1257 prom_debug("XIVE - either mode supported\n"); 1258 support->xive = !prom_xive_disable; 1259 break; 1260 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ 1261 prom_debug("XIVE - exploitation mode supported\n"); 1262 if (prom_xive_disable) { 1263 /* 1264 * If we __have__ to do XIVE, we're better off ignoring 1265 * the command line rather than not booting. 1266 */ 1267 prom_printf("WARNING: Ignoring cmdline option xive=off\n"); 1268 } 1269 support->xive = true; 1270 break; 1271 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ 1272 prom_debug("XIVE - legacy mode supported\n"); 1273 break; 1274 default: 1275 prom_debug("Unknown xive support option: 0x%x\n", val); 1276 break; 1277 } 1278 } 1279 1280 static void __init prom_parse_platform_support(u8 index, u8 val, 1281 struct platform_support *support) 1282 { 1283 switch (index) { 1284 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1285 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1286 break; 1287 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1288 if (val & OV5_FEAT(OV5_RADIX_GTSE)) { 1289 prom_debug("Radix - GTSE supported\n"); 1290 support->radix_gtse = true; 1291 } 1292 break; 1293 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ 1294 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), 1295 support); 1296 break; 1297 } 1298 } 1299 1300 static void __init prom_check_platform_support(void) 1301 { 1302 struct platform_support supported = { 1303 .hash_mmu = false, 1304 .radix_mmu = false, 1305 .radix_gtse = false, 1306 .xive = false 1307 }; 1308 int prop_len = prom_getproplen(prom.chosen, 1309 "ibm,arch-vec-5-platform-support"); 1310 1311 /* 1312 * First copy the architecture vec template 1313 * 1314 * use memcpy() instead of *vec = *vec_template so that GCC replaces it 1315 * by __memcpy() when KASAN is active 1316 */ 1317 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template, 1318 sizeof(ibm_architecture_vec)); 1319 1320 if (prop_len > 1) { 1321 int i; 1322 u8 vec[8]; 1323 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1324 prop_len); 1325 if (prop_len > sizeof(vec)) 1326 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", 1327 prop_len); 1328 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", 1329 &vec, sizeof(vec)); 1330 for (i = 0; i < sizeof(vec); i += 2) { 1331 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 1332 , vec[i] 1333 , vec[i + 1]); 1334 prom_parse_platform_support(vec[i], vec[i + 1], 1335 &supported); 1336 } 1337 } 1338 1339 if (supported.radix_mmu && supported.radix_gtse && 1340 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { 1341 /* Radix preferred - but we require GTSE for now */ 1342 prom_debug("Asking for radix with GTSE\n"); 1343 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1344 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); 1345 } else if (supported.hash_mmu) { 1346 /* Default to hash mmu (if we can) */ 1347 prom_debug("Asking for hash\n"); 1348 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1349 } else { 1350 /* We're probably on a legacy hypervisor */ 1351 prom_debug("Assuming legacy hash support\n"); 1352 } 1353 1354 if (supported.xive) { 1355 prom_debug("Asking for XIVE\n"); 1356 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); 1357 } 1358 } 1359 1360 static void __init prom_send_capabilities(void) 1361 { 1362 ihandle root; 1363 prom_arg_t ret; 1364 u32 cores; 1365 1366 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1367 prom_check_platform_support(); 1368 1369 root = call_prom("open", 1, 1, ADDR("/")); 1370 if (root != 0) { 1371 /* We need to tell the FW about the number of cores we support. 1372 * 1373 * To do that, we count the number of threads on the first core 1374 * (we assume this is the same for all cores) and use it to 1375 * divide NR_CPUS. 1376 */ 1377 1378 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1379 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", 1380 cores, NR_CPUS); 1381 1382 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1383 1384 /* try calling the ibm,client-architecture-support method */ 1385 prom_printf("Calling ibm,client-architecture-support..."); 1386 if (call_prom_ret("call-method", 3, 2, &ret, 1387 ADDR("ibm,client-architecture-support"), 1388 root, 1389 ADDR(&ibm_architecture_vec)) == 0) { 1390 /* the call exists... */ 1391 if (ret) 1392 prom_printf("\nWARNING: ibm,client-architecture" 1393 "-support call FAILED!\n"); 1394 call_prom("close", 1, 0, root); 1395 prom_printf(" done\n"); 1396 return; 1397 } 1398 call_prom("close", 1, 0, root); 1399 prom_printf(" not implemented\n"); 1400 } 1401 1402 #ifdef __BIG_ENDIAN__ 1403 { 1404 ihandle elfloader; 1405 1406 /* no ibm,client-architecture-support call, try the old way */ 1407 elfloader = call_prom("open", 1, 1, 1408 ADDR("/packages/elf-loader")); 1409 if (elfloader == 0) { 1410 prom_printf("couldn't open /packages/elf-loader\n"); 1411 return; 1412 } 1413 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1414 elfloader, ADDR(&fake_elf)); 1415 call_prom("close", 1, 0, elfloader); 1416 } 1417 #endif /* __BIG_ENDIAN__ */ 1418 } 1419 #endif /* CONFIG_PPC_PSERIES */ 1420 1421 /* 1422 * Memory allocation strategy... our layout is normally: 1423 * 1424 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1425 * rare cases, initrd might end up being before the kernel though. 1426 * We assume this won't override the final kernel at 0, we have no 1427 * provision to handle that in this version, but it should hopefully 1428 * never happen. 1429 * 1430 * alloc_top is set to the top of RMO, eventually shrink down if the 1431 * TCEs overlap 1432 * 1433 * alloc_bottom is set to the top of kernel/initrd 1434 * 1435 * from there, allocations are done this way : rtas is allocated 1436 * topmost, and the device-tree is allocated from the bottom. We try 1437 * to grow the device-tree allocation as we progress. If we can't, 1438 * then we fail, we don't currently have a facility to restart 1439 * elsewhere, but that shouldn't be necessary. 1440 * 1441 * Note that calls to reserve_mem have to be done explicitly, memory 1442 * allocated with either alloc_up or alloc_down isn't automatically 1443 * reserved. 1444 */ 1445 1446 1447 /* 1448 * Allocates memory in the RMO upward from the kernel/initrd 1449 * 1450 * When align is 0, this is a special case, it means to allocate in place 1451 * at the current location of alloc_bottom or fail (that is basically 1452 * extending the previous allocation). Used for the device-tree flattening 1453 */ 1454 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1455 { 1456 unsigned long base = alloc_bottom; 1457 unsigned long addr = 0; 1458 1459 if (align) 1460 base = ALIGN(base, align); 1461 prom_debug("%s(%lx, %lx)\n", __func__, size, align); 1462 if (ram_top == 0) 1463 prom_panic("alloc_up() called with mem not initialized\n"); 1464 1465 if (align) 1466 base = ALIGN(alloc_bottom, align); 1467 else 1468 base = alloc_bottom; 1469 1470 for(; (base + size) <= alloc_top; 1471 base = ALIGN(base + 0x100000, align)) { 1472 prom_debug(" trying: 0x%lx\n\r", base); 1473 addr = (unsigned long)prom_claim(base, size, 0); 1474 if (addr != PROM_ERROR && addr != 0) 1475 break; 1476 addr = 0; 1477 if (align == 0) 1478 break; 1479 } 1480 if (addr == 0) 1481 return 0; 1482 alloc_bottom = addr + size; 1483 1484 prom_debug(" -> %lx\n", addr); 1485 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1486 prom_debug(" alloc_top : %lx\n", alloc_top); 1487 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1488 prom_debug(" rmo_top : %lx\n", rmo_top); 1489 prom_debug(" ram_top : %lx\n", ram_top); 1490 1491 return addr; 1492 } 1493 1494 /* 1495 * Allocates memory downward, either from top of RMO, or if highmem 1496 * is set, from the top of RAM. Note that this one doesn't handle 1497 * failures. It does claim memory if highmem is not set. 1498 */ 1499 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1500 int highmem) 1501 { 1502 unsigned long base, addr = 0; 1503 1504 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, 1505 highmem ? "(high)" : "(low)"); 1506 if (ram_top == 0) 1507 prom_panic("alloc_down() called with mem not initialized\n"); 1508 1509 if (highmem) { 1510 /* Carve out storage for the TCE table. */ 1511 addr = ALIGN_DOWN(alloc_top_high - size, align); 1512 if (addr <= alloc_bottom) 1513 return 0; 1514 /* Will we bump into the RMO ? If yes, check out that we 1515 * didn't overlap existing allocations there, if we did, 1516 * we are dead, we must be the first in town ! 1517 */ 1518 if (addr < rmo_top) { 1519 /* Good, we are first */ 1520 if (alloc_top == rmo_top) 1521 alloc_top = rmo_top = addr; 1522 else 1523 return 0; 1524 } 1525 alloc_top_high = addr; 1526 goto bail; 1527 } 1528 1529 base = ALIGN_DOWN(alloc_top - size, align); 1530 for (; base > alloc_bottom; 1531 base = ALIGN_DOWN(base - 0x100000, align)) { 1532 prom_debug(" trying: 0x%lx\n\r", base); 1533 addr = (unsigned long)prom_claim(base, size, 0); 1534 if (addr != PROM_ERROR && addr != 0) 1535 break; 1536 addr = 0; 1537 } 1538 if (addr == 0) 1539 return 0; 1540 alloc_top = addr; 1541 1542 bail: 1543 prom_debug(" -> %lx\n", addr); 1544 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1545 prom_debug(" alloc_top : %lx\n", alloc_top); 1546 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1547 prom_debug(" rmo_top : %lx\n", rmo_top); 1548 prom_debug(" ram_top : %lx\n", ram_top); 1549 1550 return addr; 1551 } 1552 1553 /* 1554 * Parse a "reg" cell 1555 */ 1556 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1557 { 1558 cell_t *p = *cellp; 1559 unsigned long r = 0; 1560 1561 /* Ignore more than 2 cells */ 1562 while (s > sizeof(unsigned long) / 4) { 1563 p++; 1564 s--; 1565 } 1566 r = be32_to_cpu(*p++); 1567 #ifdef CONFIG_PPC64 1568 if (s > 1) { 1569 r <<= 32; 1570 r |= be32_to_cpu(*(p++)); 1571 } 1572 #endif 1573 *cellp = p; 1574 return r; 1575 } 1576 1577 /* 1578 * Very dumb function for adding to the memory reserve list, but 1579 * we don't need anything smarter at this point 1580 * 1581 * XXX Eventually check for collisions. They should NEVER happen. 1582 * If problems seem to show up, it would be a good start to track 1583 * them down. 1584 */ 1585 static void __init reserve_mem(u64 base, u64 size) 1586 { 1587 u64 top = base + size; 1588 unsigned long cnt = mem_reserve_cnt; 1589 1590 if (size == 0) 1591 return; 1592 1593 /* We need to always keep one empty entry so that we 1594 * have our terminator with "size" set to 0 since we are 1595 * dumb and just copy this entire array to the boot params 1596 */ 1597 base = ALIGN_DOWN(base, PAGE_SIZE); 1598 top = ALIGN(top, PAGE_SIZE); 1599 size = top - base; 1600 1601 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1602 prom_panic("Memory reserve map exhausted !\n"); 1603 mem_reserve_map[cnt].base = cpu_to_be64(base); 1604 mem_reserve_map[cnt].size = cpu_to_be64(size); 1605 mem_reserve_cnt = cnt + 1; 1606 } 1607 1608 /* 1609 * Initialize memory allocation mechanism, parse "memory" nodes and 1610 * obtain that way the top of memory and RMO to setup out local allocator 1611 */ 1612 static void __init prom_init_mem(void) 1613 { 1614 phandle node; 1615 char type[64]; 1616 unsigned int plen; 1617 cell_t *p, *endp; 1618 __be32 val; 1619 u32 rac, rsc; 1620 1621 /* 1622 * We iterate the memory nodes to find 1623 * 1) top of RMO (first node) 1624 * 2) top of memory 1625 */ 1626 val = cpu_to_be32(2); 1627 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1628 rac = be32_to_cpu(val); 1629 val = cpu_to_be32(1); 1630 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1631 rsc = be32_to_cpu(val); 1632 prom_debug("root_addr_cells: %x\n", rac); 1633 prom_debug("root_size_cells: %x\n", rsc); 1634 1635 prom_debug("scanning memory:\n"); 1636 1637 for (node = 0; prom_next_node(&node); ) { 1638 type[0] = 0; 1639 prom_getprop(node, "device_type", type, sizeof(type)); 1640 1641 if (type[0] == 0) { 1642 /* 1643 * CHRP Longtrail machines have no device_type 1644 * on the memory node, so check the name instead... 1645 */ 1646 prom_getprop(node, "name", type, sizeof(type)); 1647 } 1648 if (prom_strcmp(type, "memory")) 1649 continue; 1650 1651 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1652 if (plen > sizeof(regbuf)) { 1653 prom_printf("memory node too large for buffer !\n"); 1654 plen = sizeof(regbuf); 1655 } 1656 p = regbuf; 1657 endp = p + (plen / sizeof(cell_t)); 1658 1659 #ifdef DEBUG_PROM 1660 memset(prom_scratch, 0, sizeof(prom_scratch)); 1661 call_prom("package-to-path", 3, 1, node, prom_scratch, 1662 sizeof(prom_scratch) - 1); 1663 prom_debug(" node %s :\n", prom_scratch); 1664 #endif /* DEBUG_PROM */ 1665 1666 while ((endp - p) >= (rac + rsc)) { 1667 unsigned long base, size; 1668 1669 base = prom_next_cell(rac, &p); 1670 size = prom_next_cell(rsc, &p); 1671 1672 if (size == 0) 1673 continue; 1674 prom_debug(" %lx %lx\n", base, size); 1675 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1676 rmo_top = size; 1677 if ((base + size) > ram_top) 1678 ram_top = base + size; 1679 } 1680 } 1681 1682 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1683 1684 /* 1685 * If prom_memory_limit is set we reduce the upper limits *except* for 1686 * alloc_top_high. This must be the real top of RAM so we can put 1687 * TCE's up there. 1688 */ 1689 1690 alloc_top_high = ram_top; 1691 1692 if (prom_memory_limit) { 1693 if (prom_memory_limit <= alloc_bottom) { 1694 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", 1695 prom_memory_limit); 1696 prom_memory_limit = 0; 1697 } else if (prom_memory_limit >= ram_top) { 1698 prom_printf("Ignoring mem=%lx >= ram_top.\n", 1699 prom_memory_limit); 1700 prom_memory_limit = 0; 1701 } else { 1702 ram_top = prom_memory_limit; 1703 rmo_top = min(rmo_top, prom_memory_limit); 1704 } 1705 } 1706 1707 /* 1708 * Setup our top alloc point, that is top of RMO or top of 1709 * segment 0 when running non-LPAR. 1710 * Some RS64 machines have buggy firmware where claims up at 1711 * 1GB fail. Cap at 768MB as a workaround. 1712 * Since 768MB is plenty of room, and we need to cap to something 1713 * reasonable on 32-bit, cap at 768MB on all machines. 1714 */ 1715 if (!rmo_top) 1716 rmo_top = ram_top; 1717 rmo_top = min(0x30000000ul, rmo_top); 1718 alloc_top = rmo_top; 1719 alloc_top_high = ram_top; 1720 1721 /* 1722 * Check if we have an initrd after the kernel but still inside 1723 * the RMO. If we do move our bottom point to after it. 1724 */ 1725 if (prom_initrd_start && 1726 prom_initrd_start < rmo_top && 1727 prom_initrd_end > alloc_bottom) 1728 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1729 1730 prom_printf("memory layout at init:\n"); 1731 prom_printf(" memory_limit : %lx (16 MB aligned)\n", 1732 prom_memory_limit); 1733 prom_printf(" alloc_bottom : %lx\n", alloc_bottom); 1734 prom_printf(" alloc_top : %lx\n", alloc_top); 1735 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); 1736 prom_printf(" rmo_top : %lx\n", rmo_top); 1737 prom_printf(" ram_top : %lx\n", ram_top); 1738 } 1739 1740 static void __init prom_close_stdin(void) 1741 { 1742 __be32 val; 1743 ihandle stdin; 1744 1745 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1746 stdin = be32_to_cpu(val); 1747 call_prom("close", 1, 0, stdin); 1748 } 1749 } 1750 1751 #ifdef CONFIG_PPC_SVM 1752 static int prom_rtas_hcall(uint64_t args) 1753 { 1754 register uint64_t arg1 asm("r3") = H_RTAS; 1755 register uint64_t arg2 asm("r4") = args; 1756 1757 asm volatile("sc 1\n" : "=r" (arg1) : 1758 "r" (arg1), 1759 "r" (arg2) :); 1760 return arg1; 1761 } 1762 1763 static struct rtas_args __prombss os_term_args; 1764 1765 static void __init prom_rtas_os_term(char *str) 1766 { 1767 phandle rtas_node; 1768 __be32 val; 1769 u32 token; 1770 1771 prom_debug("%s: start...\n", __func__); 1772 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1773 prom_debug("rtas_node: %x\n", rtas_node); 1774 if (!PHANDLE_VALID(rtas_node)) 1775 return; 1776 1777 val = 0; 1778 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val)); 1779 token = be32_to_cpu(val); 1780 prom_debug("ibm,os-term: %x\n", token); 1781 if (token == 0) 1782 prom_panic("Could not get token for ibm,os-term\n"); 1783 os_term_args.token = cpu_to_be32(token); 1784 os_term_args.nargs = cpu_to_be32(1); 1785 os_term_args.nret = cpu_to_be32(1); 1786 os_term_args.args[0] = cpu_to_be32(__pa(str)); 1787 prom_rtas_hcall((uint64_t)&os_term_args); 1788 } 1789 #endif /* CONFIG_PPC_SVM */ 1790 1791 /* 1792 * Allocate room for and instantiate RTAS 1793 */ 1794 static void __init prom_instantiate_rtas(void) 1795 { 1796 phandle rtas_node; 1797 ihandle rtas_inst; 1798 u32 base, entry = 0; 1799 __be32 val; 1800 u32 size = 0; 1801 1802 prom_debug("prom_instantiate_rtas: start...\n"); 1803 1804 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1805 prom_debug("rtas_node: %x\n", rtas_node); 1806 if (!PHANDLE_VALID(rtas_node)) 1807 return; 1808 1809 val = 0; 1810 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1811 size = be32_to_cpu(val); 1812 if (size == 0) 1813 return; 1814 1815 base = alloc_down(size, PAGE_SIZE, 0); 1816 if (base == 0) 1817 prom_panic("Could not allocate memory for RTAS\n"); 1818 1819 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1820 if (!IHANDLE_VALID(rtas_inst)) { 1821 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1822 return; 1823 } 1824 1825 prom_printf("instantiating rtas at 0x%x...", base); 1826 1827 if (call_prom_ret("call-method", 3, 2, &entry, 1828 ADDR("instantiate-rtas"), 1829 rtas_inst, base) != 0 1830 || entry == 0) { 1831 prom_printf(" failed\n"); 1832 return; 1833 } 1834 prom_printf(" done\n"); 1835 1836 reserve_mem(base, size); 1837 1838 val = cpu_to_be32(base); 1839 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1840 &val, sizeof(val)); 1841 val = cpu_to_be32(entry); 1842 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1843 &val, sizeof(val)); 1844 1845 /* Check if it supports "query-cpu-stopped-state" */ 1846 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1847 &val, sizeof(val)) != PROM_ERROR) 1848 rtas_has_query_cpu_stopped = true; 1849 1850 prom_debug("rtas base = 0x%x\n", base); 1851 prom_debug("rtas entry = 0x%x\n", entry); 1852 prom_debug("rtas size = 0x%x\n", size); 1853 1854 prom_debug("prom_instantiate_rtas: end...\n"); 1855 } 1856 1857 #ifdef CONFIG_PPC64 1858 /* 1859 * Allocate room for and instantiate Stored Measurement Log (SML) 1860 */ 1861 static void __init prom_instantiate_sml(void) 1862 { 1863 phandle ibmvtpm_node; 1864 ihandle ibmvtpm_inst; 1865 u32 entry = 0, size = 0, succ = 0; 1866 u64 base; 1867 __be32 val; 1868 1869 prom_debug("prom_instantiate_sml: start...\n"); 1870 1871 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1872 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1873 if (!PHANDLE_VALID(ibmvtpm_node)) 1874 return; 1875 1876 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1877 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1878 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1879 return; 1880 } 1881 1882 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1883 &val, sizeof(val)) != PROM_ERROR) { 1884 if (call_prom_ret("call-method", 2, 2, &succ, 1885 ADDR("reformat-sml-to-efi-alignment"), 1886 ibmvtpm_inst) != 0 || succ == 0) { 1887 prom_printf("Reformat SML to EFI alignment failed\n"); 1888 return; 1889 } 1890 1891 if (call_prom_ret("call-method", 2, 2, &size, 1892 ADDR("sml-get-allocated-size"), 1893 ibmvtpm_inst) != 0 || size == 0) { 1894 prom_printf("SML get allocated size failed\n"); 1895 return; 1896 } 1897 } else { 1898 if (call_prom_ret("call-method", 2, 2, &size, 1899 ADDR("sml-get-handover-size"), 1900 ibmvtpm_inst) != 0 || size == 0) { 1901 prom_printf("SML get handover size failed\n"); 1902 return; 1903 } 1904 } 1905 1906 base = alloc_down(size, PAGE_SIZE, 0); 1907 if (base == 0) 1908 prom_panic("Could not allocate memory for sml\n"); 1909 1910 prom_printf("instantiating sml at 0x%llx...", base); 1911 1912 memset((void *)base, 0, size); 1913 1914 if (call_prom_ret("call-method", 4, 2, &entry, 1915 ADDR("sml-handover"), 1916 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1917 prom_printf("SML handover failed\n"); 1918 return; 1919 } 1920 prom_printf(" done\n"); 1921 1922 reserve_mem(base, size); 1923 1924 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1925 &base, sizeof(base)); 1926 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1927 &size, sizeof(size)); 1928 1929 prom_debug("sml base = 0x%llx\n", base); 1930 prom_debug("sml size = 0x%x\n", size); 1931 1932 prom_debug("prom_instantiate_sml: end...\n"); 1933 } 1934 1935 /* 1936 * Allocate room for and initialize TCE tables 1937 */ 1938 #ifdef __BIG_ENDIAN__ 1939 static void __init prom_initialize_tce_table(void) 1940 { 1941 phandle node; 1942 ihandle phb_node; 1943 char compatible[64], type[64], model[64]; 1944 char *path = prom_scratch; 1945 u64 base, align; 1946 u32 minalign, minsize; 1947 u64 tce_entry, *tce_entryp; 1948 u64 local_alloc_top, local_alloc_bottom; 1949 u64 i; 1950 1951 if (prom_iommu_off) 1952 return; 1953 1954 prom_debug("starting prom_initialize_tce_table\n"); 1955 1956 /* Cache current top of allocs so we reserve a single block */ 1957 local_alloc_top = alloc_top_high; 1958 local_alloc_bottom = local_alloc_top; 1959 1960 /* Search all nodes looking for PHBs. */ 1961 for (node = 0; prom_next_node(&node); ) { 1962 compatible[0] = 0; 1963 type[0] = 0; 1964 model[0] = 0; 1965 prom_getprop(node, "compatible", 1966 compatible, sizeof(compatible)); 1967 prom_getprop(node, "device_type", type, sizeof(type)); 1968 prom_getprop(node, "model", model, sizeof(model)); 1969 1970 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL)) 1971 continue; 1972 1973 /* Keep the old logic intact to avoid regression. */ 1974 if (compatible[0] != 0) { 1975 if ((prom_strstr(compatible, "python") == NULL) && 1976 (prom_strstr(compatible, "Speedwagon") == NULL) && 1977 (prom_strstr(compatible, "Winnipeg") == NULL)) 1978 continue; 1979 } else if (model[0] != 0) { 1980 if ((prom_strstr(model, "ython") == NULL) && 1981 (prom_strstr(model, "peedwagon") == NULL) && 1982 (prom_strstr(model, "innipeg") == NULL)) 1983 continue; 1984 } 1985 1986 if (prom_getprop(node, "tce-table-minalign", &minalign, 1987 sizeof(minalign)) == PROM_ERROR) 1988 minalign = 0; 1989 if (prom_getprop(node, "tce-table-minsize", &minsize, 1990 sizeof(minsize)) == PROM_ERROR) 1991 minsize = 4UL << 20; 1992 1993 /* 1994 * Even though we read what OF wants, we just set the table 1995 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1996 * By doing this, we avoid the pitfalls of trying to DMA to 1997 * MMIO space and the DMA alias hole. 1998 */ 1999 minsize = 4UL << 20; 2000 2001 /* Align to the greater of the align or size */ 2002 align = max(minalign, minsize); 2003 base = alloc_down(minsize, align, 1); 2004 if (base == 0) 2005 prom_panic("ERROR, cannot find space for TCE table.\n"); 2006 if (base < local_alloc_bottom) 2007 local_alloc_bottom = base; 2008 2009 /* It seems OF doesn't null-terminate the path :-( */ 2010 memset(path, 0, sizeof(prom_scratch)); 2011 /* Call OF to setup the TCE hardware */ 2012 if (call_prom("package-to-path", 3, 1, node, 2013 path, sizeof(prom_scratch) - 1) == PROM_ERROR) { 2014 prom_printf("package-to-path failed\n"); 2015 } 2016 2017 /* Save away the TCE table attributes for later use. */ 2018 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 2019 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 2020 2021 prom_debug("TCE table: %s\n", path); 2022 prom_debug("\tnode = 0x%x\n", node); 2023 prom_debug("\tbase = 0x%llx\n", base); 2024 prom_debug("\tsize = 0x%x\n", minsize); 2025 2026 /* Initialize the table to have a one-to-one mapping 2027 * over the allocated size. 2028 */ 2029 tce_entryp = (u64 *)base; 2030 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 2031 tce_entry = (i << PAGE_SHIFT); 2032 tce_entry |= 0x3; 2033 *tce_entryp = tce_entry; 2034 } 2035 2036 prom_printf("opening PHB %s", path); 2037 phb_node = call_prom("open", 1, 1, path); 2038 if (phb_node == 0) 2039 prom_printf("... failed\n"); 2040 else 2041 prom_printf("... done\n"); 2042 2043 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 2044 phb_node, -1, minsize, 2045 (u32) base, (u32) (base >> 32)); 2046 call_prom("close", 1, 0, phb_node); 2047 } 2048 2049 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 2050 2051 /* These are only really needed if there is a memory limit in 2052 * effect, but we don't know so export them always. */ 2053 prom_tce_alloc_start = local_alloc_bottom; 2054 prom_tce_alloc_end = local_alloc_top; 2055 2056 /* Flag the first invalid entry */ 2057 prom_debug("ending prom_initialize_tce_table\n"); 2058 } 2059 #endif /* __BIG_ENDIAN__ */ 2060 #endif /* CONFIG_PPC64 */ 2061 2062 /* 2063 * With CHRP SMP we need to use the OF to start the other processors. 2064 * We can't wait until smp_boot_cpus (the OF is trashed by then) 2065 * so we have to put the processors into a holding pattern controlled 2066 * by the kernel (not OF) before we destroy the OF. 2067 * 2068 * This uses a chunk of low memory, puts some holding pattern 2069 * code there and sends the other processors off to there until 2070 * smp_boot_cpus tells them to do something. The holding pattern 2071 * checks that address until its cpu # is there, when it is that 2072 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 2073 * of setting those values. 2074 * 2075 * We also use physical address 0x4 here to tell when a cpu 2076 * is in its holding pattern code. 2077 * 2078 * -- Cort 2079 */ 2080 /* 2081 * We want to reference the copy of __secondary_hold_* in the 2082 * 0 - 0x100 address range 2083 */ 2084 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 2085 2086 static void __init prom_hold_cpus(void) 2087 { 2088 unsigned long i; 2089 phandle node; 2090 char type[64]; 2091 unsigned long *spinloop 2092 = (void *) LOW_ADDR(__secondary_hold_spinloop); 2093 unsigned long *acknowledge 2094 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 2095 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 2096 2097 /* 2098 * On pseries, if RTAS supports "query-cpu-stopped-state", 2099 * we skip this stage, the CPUs will be started by the 2100 * kernel using RTAS. 2101 */ 2102 if ((of_platform == PLATFORM_PSERIES || 2103 of_platform == PLATFORM_PSERIES_LPAR) && 2104 rtas_has_query_cpu_stopped) { 2105 prom_printf("prom_hold_cpus: skipped\n"); 2106 return; 2107 } 2108 2109 prom_debug("prom_hold_cpus: start...\n"); 2110 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); 2111 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); 2112 prom_debug(" 1) acknowledge = 0x%lx\n", 2113 (unsigned long)acknowledge); 2114 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); 2115 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); 2116 2117 /* Set the common spinloop variable, so all of the secondary cpus 2118 * will block when they are awakened from their OF spinloop. 2119 * This must occur for both SMP and non SMP kernels, since OF will 2120 * be trashed when we move the kernel. 2121 */ 2122 *spinloop = 0; 2123 2124 /* look for cpus */ 2125 for (node = 0; prom_next_node(&node); ) { 2126 unsigned int cpu_no; 2127 __be32 reg; 2128 2129 type[0] = 0; 2130 prom_getprop(node, "device_type", type, sizeof(type)); 2131 if (prom_strcmp(type, "cpu") != 0) 2132 continue; 2133 2134 /* Skip non-configured cpus. */ 2135 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 2136 if (prom_strcmp(type, "okay") != 0) 2137 continue; 2138 2139 reg = cpu_to_be32(-1); /* make sparse happy */ 2140 prom_getprop(node, "reg", ®, sizeof(reg)); 2141 cpu_no = be32_to_cpu(reg); 2142 2143 prom_debug("cpu hw idx = %u\n", cpu_no); 2144 2145 /* Init the acknowledge var which will be reset by 2146 * the secondary cpu when it awakens from its OF 2147 * spinloop. 2148 */ 2149 *acknowledge = (unsigned long)-1; 2150 2151 if (cpu_no != prom.cpu) { 2152 /* Primary Thread of non-boot cpu or any thread */ 2153 prom_printf("starting cpu hw idx %u... ", cpu_no); 2154 call_prom("start-cpu", 3, 0, node, 2155 secondary_hold, cpu_no); 2156 2157 for (i = 0; (i < 100000000) && 2158 (*acknowledge == ((unsigned long)-1)); i++ ) 2159 mb(); 2160 2161 if (*acknowledge == cpu_no) 2162 prom_printf("done\n"); 2163 else 2164 prom_printf("failed: %lx\n", *acknowledge); 2165 } 2166 #ifdef CONFIG_SMP 2167 else 2168 prom_printf("boot cpu hw idx %u\n", cpu_no); 2169 #endif /* CONFIG_SMP */ 2170 } 2171 2172 prom_debug("prom_hold_cpus: end...\n"); 2173 } 2174 2175 2176 static void __init prom_init_client_services(unsigned long pp) 2177 { 2178 /* Get a handle to the prom entry point before anything else */ 2179 prom_entry = pp; 2180 2181 /* get a handle for the stdout device */ 2182 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 2183 if (!PHANDLE_VALID(prom.chosen)) 2184 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 2185 2186 /* get device tree root */ 2187 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 2188 if (!PHANDLE_VALID(prom.root)) 2189 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 2190 2191 prom.mmumap = 0; 2192 } 2193 2194 #ifdef CONFIG_PPC32 2195 /* 2196 * For really old powermacs, we need to map things we claim. 2197 * For that, we need the ihandle of the mmu. 2198 * Also, on the longtrail, we need to work around other bugs. 2199 */ 2200 static void __init prom_find_mmu(void) 2201 { 2202 phandle oprom; 2203 char version[64]; 2204 2205 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 2206 if (!PHANDLE_VALID(oprom)) 2207 return; 2208 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 2209 return; 2210 version[sizeof(version) - 1] = 0; 2211 /* XXX might need to add other versions here */ 2212 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0) 2213 of_workarounds = OF_WA_CLAIM; 2214 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) { 2215 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2216 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2217 } else 2218 return; 2219 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2220 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2221 sizeof(prom.mmumap)); 2222 prom.mmumap = be32_to_cpu(prom.mmumap); 2223 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2224 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2225 } 2226 #else 2227 #define prom_find_mmu() 2228 #endif 2229 2230 static void __init prom_init_stdout(void) 2231 { 2232 char *path = of_stdout_device; 2233 char type[16]; 2234 phandle stdout_node; 2235 __be32 val; 2236 2237 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2238 prom_panic("cannot find stdout"); 2239 2240 prom.stdout = be32_to_cpu(val); 2241 2242 /* Get the full OF pathname of the stdout device */ 2243 memset(path, 0, 256); 2244 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2245 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2246 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2247 path, prom_strlen(path) + 1); 2248 2249 /* instance-to-package fails on PA-Semi */ 2250 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2251 if (stdout_node != PROM_ERROR) { 2252 val = cpu_to_be32(stdout_node); 2253 2254 /* If it's a display, note it */ 2255 memset(type, 0, sizeof(type)); 2256 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2257 if (prom_strcmp(type, "display") == 0) 2258 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2259 } 2260 } 2261 2262 static int __init prom_find_machine_type(void) 2263 { 2264 char compat[256]; 2265 int len, i = 0; 2266 #ifdef CONFIG_PPC64 2267 phandle rtas; 2268 int x; 2269 #endif 2270 2271 /* Look for a PowerMac or a Cell */ 2272 len = prom_getprop(prom.root, "compatible", 2273 compat, sizeof(compat)-1); 2274 if (len > 0) { 2275 compat[len] = 0; 2276 while (i < len) { 2277 char *p = &compat[i]; 2278 int sl = prom_strlen(p); 2279 if (sl == 0) 2280 break; 2281 if (prom_strstr(p, "Power Macintosh") || 2282 prom_strstr(p, "MacRISC")) 2283 return PLATFORM_POWERMAC; 2284 #ifdef CONFIG_PPC64 2285 /* We must make sure we don't detect the IBM Cell 2286 * blades as pSeries due to some firmware issues, 2287 * so we do it here. 2288 */ 2289 if (prom_strstr(p, "IBM,CBEA") || 2290 prom_strstr(p, "IBM,CPBW-1.0")) 2291 return PLATFORM_GENERIC; 2292 #endif /* CONFIG_PPC64 */ 2293 i += sl + 1; 2294 } 2295 } 2296 #ifdef CONFIG_PPC64 2297 /* Try to figure out if it's an IBM pSeries or any other 2298 * PAPR compliant platform. We assume it is if : 2299 * - /device_type is "chrp" (please, do NOT use that for future 2300 * non-IBM designs ! 2301 * - it has /rtas 2302 */ 2303 len = prom_getprop(prom.root, "device_type", 2304 compat, sizeof(compat)-1); 2305 if (len <= 0) 2306 return PLATFORM_GENERIC; 2307 if (prom_strcmp(compat, "chrp")) 2308 return PLATFORM_GENERIC; 2309 2310 /* Default to pSeries. We need to know if we are running LPAR */ 2311 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2312 if (!PHANDLE_VALID(rtas)) 2313 return PLATFORM_GENERIC; 2314 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2315 if (x != PROM_ERROR) { 2316 prom_debug("Hypertas detected, assuming LPAR !\n"); 2317 return PLATFORM_PSERIES_LPAR; 2318 } 2319 return PLATFORM_PSERIES; 2320 #else 2321 return PLATFORM_GENERIC; 2322 #endif 2323 } 2324 2325 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2326 { 2327 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2328 } 2329 2330 /* 2331 * If we have a display that we don't know how to drive, 2332 * we will want to try to execute OF's open method for it 2333 * later. However, OF will probably fall over if we do that 2334 * we've taken over the MMU. 2335 * So we check whether we will need to open the display, 2336 * and if so, open it now. 2337 */ 2338 static void __init prom_check_displays(void) 2339 { 2340 char type[16], *path; 2341 phandle node; 2342 ihandle ih; 2343 int i; 2344 2345 static const unsigned char default_colors[] __initconst = { 2346 0x00, 0x00, 0x00, 2347 0x00, 0x00, 0xaa, 2348 0x00, 0xaa, 0x00, 2349 0x00, 0xaa, 0xaa, 2350 0xaa, 0x00, 0x00, 2351 0xaa, 0x00, 0xaa, 2352 0xaa, 0xaa, 0x00, 2353 0xaa, 0xaa, 0xaa, 2354 0x55, 0x55, 0x55, 2355 0x55, 0x55, 0xff, 2356 0x55, 0xff, 0x55, 2357 0x55, 0xff, 0xff, 2358 0xff, 0x55, 0x55, 2359 0xff, 0x55, 0xff, 2360 0xff, 0xff, 0x55, 2361 0xff, 0xff, 0xff 2362 }; 2363 const unsigned char *clut; 2364 2365 prom_debug("Looking for displays\n"); 2366 for (node = 0; prom_next_node(&node); ) { 2367 memset(type, 0, sizeof(type)); 2368 prom_getprop(node, "device_type", type, sizeof(type)); 2369 if (prom_strcmp(type, "display") != 0) 2370 continue; 2371 2372 /* It seems OF doesn't null-terminate the path :-( */ 2373 path = prom_scratch; 2374 memset(path, 0, sizeof(prom_scratch)); 2375 2376 /* 2377 * leave some room at the end of the path for appending extra 2378 * arguments 2379 */ 2380 if (call_prom("package-to-path", 3, 1, node, path, 2381 sizeof(prom_scratch) - 10) == PROM_ERROR) 2382 continue; 2383 prom_printf("found display : %s, opening... ", path); 2384 2385 ih = call_prom("open", 1, 1, path); 2386 if (ih == 0) { 2387 prom_printf("failed\n"); 2388 continue; 2389 } 2390 2391 /* Success */ 2392 prom_printf("done\n"); 2393 prom_setprop(node, path, "linux,opened", NULL, 0); 2394 2395 /* Setup a usable color table when the appropriate 2396 * method is available. Should update this to set-colors */ 2397 clut = default_colors; 2398 for (i = 0; i < 16; i++, clut += 3) 2399 if (prom_set_color(ih, i, clut[0], clut[1], 2400 clut[2]) != 0) 2401 break; 2402 2403 #ifdef CONFIG_LOGO_LINUX_CLUT224 2404 clut = PTRRELOC(logo_linux_clut224.clut); 2405 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2406 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2407 clut[2]) != 0) 2408 break; 2409 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2410 2411 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2412 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2413 PROM_ERROR) { 2414 u32 width, height, pitch, addr; 2415 2416 prom_printf("Setting btext !\n"); 2417 prom_getprop(node, "width", &width, 4); 2418 prom_getprop(node, "height", &height, 4); 2419 prom_getprop(node, "linebytes", &pitch, 4); 2420 prom_getprop(node, "address", &addr, 4); 2421 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2422 width, height, pitch, addr); 2423 btext_setup_display(width, height, 8, pitch, addr); 2424 btext_prepare_BAT(); 2425 } 2426 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2427 } 2428 } 2429 2430 2431 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2432 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2433 unsigned long needed, unsigned long align) 2434 { 2435 void *ret; 2436 2437 *mem_start = ALIGN(*mem_start, align); 2438 while ((*mem_start + needed) > *mem_end) { 2439 unsigned long room, chunk; 2440 2441 prom_debug("Chunk exhausted, claiming more at %lx...\n", 2442 alloc_bottom); 2443 room = alloc_top - alloc_bottom; 2444 if (room > DEVTREE_CHUNK_SIZE) 2445 room = DEVTREE_CHUNK_SIZE; 2446 if (room < PAGE_SIZE) 2447 prom_panic("No memory for flatten_device_tree " 2448 "(no room)\n"); 2449 chunk = alloc_up(room, 0); 2450 if (chunk == 0) 2451 prom_panic("No memory for flatten_device_tree " 2452 "(claim failed)\n"); 2453 *mem_end = chunk + room; 2454 } 2455 2456 ret = (void *)*mem_start; 2457 *mem_start += needed; 2458 2459 return ret; 2460 } 2461 2462 #define dt_push_token(token, mem_start, mem_end) do { \ 2463 void *room = make_room(mem_start, mem_end, 4, 4); \ 2464 *(__be32 *)room = cpu_to_be32(token); \ 2465 } while(0) 2466 2467 static unsigned long __init dt_find_string(char *str) 2468 { 2469 char *s, *os; 2470 2471 s = os = (char *)dt_string_start; 2472 s += 4; 2473 while (s < (char *)dt_string_end) { 2474 if (prom_strcmp(s, str) == 0) 2475 return s - os; 2476 s += prom_strlen(s) + 1; 2477 } 2478 return 0; 2479 } 2480 2481 /* 2482 * The Open Firmware 1275 specification states properties must be 31 bytes or 2483 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2484 */ 2485 #define MAX_PROPERTY_NAME 64 2486 2487 static void __init scan_dt_build_strings(phandle node, 2488 unsigned long *mem_start, 2489 unsigned long *mem_end) 2490 { 2491 char *prev_name, *namep, *sstart; 2492 unsigned long soff; 2493 phandle child; 2494 2495 sstart = (char *)dt_string_start; 2496 2497 /* get and store all property names */ 2498 prev_name = ""; 2499 for (;;) { 2500 /* 64 is max len of name including nul. */ 2501 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2502 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2503 /* No more nodes: unwind alloc */ 2504 *mem_start = (unsigned long)namep; 2505 break; 2506 } 2507 2508 /* skip "name" */ 2509 if (prom_strcmp(namep, "name") == 0) { 2510 *mem_start = (unsigned long)namep; 2511 prev_name = "name"; 2512 continue; 2513 } 2514 /* get/create string entry */ 2515 soff = dt_find_string(namep); 2516 if (soff != 0) { 2517 *mem_start = (unsigned long)namep; 2518 namep = sstart + soff; 2519 } else { 2520 /* Trim off some if we can */ 2521 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2522 dt_string_end = *mem_start; 2523 } 2524 prev_name = namep; 2525 } 2526 2527 /* do all our children */ 2528 child = call_prom("child", 1, 1, node); 2529 while (child != 0) { 2530 scan_dt_build_strings(child, mem_start, mem_end); 2531 child = call_prom("peer", 1, 1, child); 2532 } 2533 } 2534 2535 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2536 unsigned long *mem_end) 2537 { 2538 phandle child; 2539 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2540 unsigned long soff; 2541 unsigned char *valp; 2542 static char pname[MAX_PROPERTY_NAME] __prombss; 2543 int l, room, has_phandle = 0; 2544 2545 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2546 2547 /* get the node's full name */ 2548 namep = (char *)*mem_start; 2549 room = *mem_end - *mem_start; 2550 if (room > 255) 2551 room = 255; 2552 l = call_prom("package-to-path", 3, 1, node, namep, room); 2553 if (l >= 0) { 2554 /* Didn't fit? Get more room. */ 2555 if (l >= room) { 2556 if (l >= *mem_end - *mem_start) 2557 namep = make_room(mem_start, mem_end, l+1, 1); 2558 call_prom("package-to-path", 3, 1, node, namep, l); 2559 } 2560 namep[l] = '\0'; 2561 2562 /* Fixup an Apple bug where they have bogus \0 chars in the 2563 * middle of the path in some properties, and extract 2564 * the unit name (everything after the last '/'). 2565 */ 2566 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2567 if (*p == '/') 2568 lp = namep; 2569 else if (*p != 0) 2570 *lp++ = *p; 2571 } 2572 *lp = 0; 2573 *mem_start = ALIGN((unsigned long)lp + 1, 4); 2574 } 2575 2576 /* get it again for debugging */ 2577 path = prom_scratch; 2578 memset(path, 0, sizeof(prom_scratch)); 2579 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1); 2580 2581 /* get and store all properties */ 2582 prev_name = ""; 2583 sstart = (char *)dt_string_start; 2584 for (;;) { 2585 if (call_prom("nextprop", 3, 1, node, prev_name, 2586 pname) != 1) 2587 break; 2588 2589 /* skip "name" */ 2590 if (prom_strcmp(pname, "name") == 0) { 2591 prev_name = "name"; 2592 continue; 2593 } 2594 2595 /* find string offset */ 2596 soff = dt_find_string(pname); 2597 if (soff == 0) { 2598 prom_printf("WARNING: Can't find string index for" 2599 " <%s>, node %s\n", pname, path); 2600 break; 2601 } 2602 prev_name = sstart + soff; 2603 2604 /* get length */ 2605 l = call_prom("getproplen", 2, 1, node, pname); 2606 2607 /* sanity checks */ 2608 if (l == PROM_ERROR) 2609 continue; 2610 2611 /* push property head */ 2612 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2613 dt_push_token(l, mem_start, mem_end); 2614 dt_push_token(soff, mem_start, mem_end); 2615 2616 /* push property content */ 2617 valp = make_room(mem_start, mem_end, l, 4); 2618 call_prom("getprop", 4, 1, node, pname, valp, l); 2619 *mem_start = ALIGN(*mem_start, 4); 2620 2621 if (!prom_strcmp(pname, "phandle")) 2622 has_phandle = 1; 2623 } 2624 2625 /* Add a "phandle" property if none already exist */ 2626 if (!has_phandle) { 2627 soff = dt_find_string("phandle"); 2628 if (soff == 0) 2629 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path); 2630 else { 2631 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2632 dt_push_token(4, mem_start, mem_end); 2633 dt_push_token(soff, mem_start, mem_end); 2634 valp = make_room(mem_start, mem_end, 4, 4); 2635 *(__be32 *)valp = cpu_to_be32(node); 2636 } 2637 } 2638 2639 /* do all our children */ 2640 child = call_prom("child", 1, 1, node); 2641 while (child != 0) { 2642 scan_dt_build_struct(child, mem_start, mem_end); 2643 child = call_prom("peer", 1, 1, child); 2644 } 2645 2646 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2647 } 2648 2649 static void __init flatten_device_tree(void) 2650 { 2651 phandle root; 2652 unsigned long mem_start, mem_end, room; 2653 struct boot_param_header *hdr; 2654 char *namep; 2655 u64 *rsvmap; 2656 2657 /* 2658 * Check how much room we have between alloc top & bottom (+/- a 2659 * few pages), crop to 1MB, as this is our "chunk" size 2660 */ 2661 room = alloc_top - alloc_bottom - 0x4000; 2662 if (room > DEVTREE_CHUNK_SIZE) 2663 room = DEVTREE_CHUNK_SIZE; 2664 prom_debug("starting device tree allocs at %lx\n", alloc_bottom); 2665 2666 /* Now try to claim that */ 2667 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2668 if (mem_start == 0) 2669 prom_panic("Can't allocate initial device-tree chunk\n"); 2670 mem_end = mem_start + room; 2671 2672 /* Get root of tree */ 2673 root = call_prom("peer", 1, 1, (phandle)0); 2674 if (root == (phandle)0) 2675 prom_panic ("couldn't get device tree root\n"); 2676 2677 /* Build header and make room for mem rsv map */ 2678 mem_start = ALIGN(mem_start, 4); 2679 hdr = make_room(&mem_start, &mem_end, 2680 sizeof(struct boot_param_header), 4); 2681 dt_header_start = (unsigned long)hdr; 2682 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2683 2684 /* Start of strings */ 2685 mem_start = PAGE_ALIGN(mem_start); 2686 dt_string_start = mem_start; 2687 mem_start += 4; /* hole */ 2688 2689 /* Add "phandle" in there, we'll need it */ 2690 namep = make_room(&mem_start, &mem_end, 16, 1); 2691 prom_strcpy(namep, "phandle"); 2692 mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2693 2694 /* Build string array */ 2695 prom_printf("Building dt strings...\n"); 2696 scan_dt_build_strings(root, &mem_start, &mem_end); 2697 dt_string_end = mem_start; 2698 2699 /* Build structure */ 2700 mem_start = PAGE_ALIGN(mem_start); 2701 dt_struct_start = mem_start; 2702 prom_printf("Building dt structure...\n"); 2703 scan_dt_build_struct(root, &mem_start, &mem_end); 2704 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2705 dt_struct_end = PAGE_ALIGN(mem_start); 2706 2707 /* Finish header */ 2708 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2709 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2710 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2711 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2712 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2713 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2714 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2715 hdr->version = cpu_to_be32(OF_DT_VERSION); 2716 /* Version 16 is not backward compatible */ 2717 hdr->last_comp_version = cpu_to_be32(0x10); 2718 2719 /* Copy the reserve map in */ 2720 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2721 2722 #ifdef DEBUG_PROM 2723 { 2724 int i; 2725 prom_printf("reserved memory map:\n"); 2726 for (i = 0; i < mem_reserve_cnt; i++) 2727 prom_printf(" %llx - %llx\n", 2728 be64_to_cpu(mem_reserve_map[i].base), 2729 be64_to_cpu(mem_reserve_map[i].size)); 2730 } 2731 #endif 2732 /* Bump mem_reserve_cnt to cause further reservations to fail 2733 * since it's too late. 2734 */ 2735 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2736 2737 prom_printf("Device tree strings 0x%lx -> 0x%lx\n", 2738 dt_string_start, dt_string_end); 2739 prom_printf("Device tree struct 0x%lx -> 0x%lx\n", 2740 dt_struct_start, dt_struct_end); 2741 } 2742 2743 #ifdef CONFIG_PPC_MAPLE 2744 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2745 * The values are bad, and it doesn't even have the right number of cells. */ 2746 static void __init fixup_device_tree_maple(void) 2747 { 2748 phandle isa; 2749 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2750 u32 isa_ranges[6]; 2751 char *name; 2752 2753 name = "/ht@0/isa@4"; 2754 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2755 if (!PHANDLE_VALID(isa)) { 2756 name = "/ht@0/isa@6"; 2757 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2758 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2759 } 2760 if (!PHANDLE_VALID(isa)) 2761 return; 2762 2763 if (prom_getproplen(isa, "ranges") != 12) 2764 return; 2765 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2766 == PROM_ERROR) 2767 return; 2768 2769 if (isa_ranges[0] != 0x1 || 2770 isa_ranges[1] != 0xf4000000 || 2771 isa_ranges[2] != 0x00010000) 2772 return; 2773 2774 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2775 2776 isa_ranges[0] = 0x1; 2777 isa_ranges[1] = 0x0; 2778 isa_ranges[2] = rloc; 2779 isa_ranges[3] = 0x0; 2780 isa_ranges[4] = 0x0; 2781 isa_ranges[5] = 0x00010000; 2782 prom_setprop(isa, name, "ranges", 2783 isa_ranges, sizeof(isa_ranges)); 2784 } 2785 2786 #define CPC925_MC_START 0xf8000000 2787 #define CPC925_MC_LENGTH 0x1000000 2788 /* The values for memory-controller don't have right number of cells */ 2789 static void __init fixup_device_tree_maple_memory_controller(void) 2790 { 2791 phandle mc; 2792 u32 mc_reg[4]; 2793 char *name = "/hostbridge@f8000000"; 2794 u32 ac, sc; 2795 2796 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2797 if (!PHANDLE_VALID(mc)) 2798 return; 2799 2800 if (prom_getproplen(mc, "reg") != 8) 2801 return; 2802 2803 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2804 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2805 if ((ac != 2) || (sc != 2)) 2806 return; 2807 2808 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2809 return; 2810 2811 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2812 return; 2813 2814 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2815 2816 mc_reg[0] = 0x0; 2817 mc_reg[1] = CPC925_MC_START; 2818 mc_reg[2] = 0x0; 2819 mc_reg[3] = CPC925_MC_LENGTH; 2820 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2821 } 2822 #else 2823 #define fixup_device_tree_maple() 2824 #define fixup_device_tree_maple_memory_controller() 2825 #endif 2826 2827 #ifdef CONFIG_PPC_CHRP 2828 /* 2829 * Pegasos and BriQ lacks the "ranges" property in the isa node 2830 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2831 * Pegasos has the IDE configured in legacy mode, but advertised as native 2832 */ 2833 static void __init fixup_device_tree_chrp(void) 2834 { 2835 phandle ph; 2836 u32 prop[6]; 2837 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2838 char *name; 2839 int rc; 2840 2841 name = "/pci@80000000/isa@c"; 2842 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2843 if (!PHANDLE_VALID(ph)) { 2844 name = "/pci@ff500000/isa@6"; 2845 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2846 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2847 } 2848 if (PHANDLE_VALID(ph)) { 2849 rc = prom_getproplen(ph, "ranges"); 2850 if (rc == 0 || rc == PROM_ERROR) { 2851 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2852 2853 prop[0] = 0x1; 2854 prop[1] = 0x0; 2855 prop[2] = rloc; 2856 prop[3] = 0x0; 2857 prop[4] = 0x0; 2858 prop[5] = 0x00010000; 2859 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2860 } 2861 } 2862 2863 name = "/pci@80000000/ide@C,1"; 2864 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2865 if (PHANDLE_VALID(ph)) { 2866 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2867 prop[0] = 14; 2868 prop[1] = 0x0; 2869 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2870 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2871 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2872 if (rc == sizeof(u32)) { 2873 prop[0] &= ~0x5; 2874 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2875 } 2876 } 2877 } 2878 #else 2879 #define fixup_device_tree_chrp() 2880 #endif 2881 2882 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2883 static void __init fixup_device_tree_pmac(void) 2884 { 2885 phandle u3, i2c, mpic; 2886 u32 u3_rev; 2887 u32 interrupts[2]; 2888 u32 parent; 2889 2890 /* Some G5s have a missing interrupt definition, fix it up here */ 2891 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2892 if (!PHANDLE_VALID(u3)) 2893 return; 2894 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2895 if (!PHANDLE_VALID(i2c)) 2896 return; 2897 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2898 if (!PHANDLE_VALID(mpic)) 2899 return; 2900 2901 /* check if proper rev of u3 */ 2902 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2903 == PROM_ERROR) 2904 return; 2905 if (u3_rev < 0x35 || u3_rev > 0x39) 2906 return; 2907 /* does it need fixup ? */ 2908 if (prom_getproplen(i2c, "interrupts") > 0) 2909 return; 2910 2911 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2912 2913 /* interrupt on this revision of u3 is number 0 and level */ 2914 interrupts[0] = 0; 2915 interrupts[1] = 1; 2916 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2917 &interrupts, sizeof(interrupts)); 2918 parent = (u32)mpic; 2919 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2920 &parent, sizeof(parent)); 2921 } 2922 #else 2923 #define fixup_device_tree_pmac() 2924 #endif 2925 2926 #ifdef CONFIG_PPC_EFIKA 2927 /* 2928 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2929 * to talk to the phy. If the phy-handle property is missing, then this 2930 * function is called to add the appropriate nodes and link it to the 2931 * ethernet node. 2932 */ 2933 static void __init fixup_device_tree_efika_add_phy(void) 2934 { 2935 u32 node; 2936 char prop[64]; 2937 int rv; 2938 2939 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2940 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2941 if (!PHANDLE_VALID(node)) 2942 return; 2943 2944 /* Check if the phy-handle property exists - bail if it does */ 2945 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2946 if (!rv) 2947 return; 2948 2949 /* 2950 * At this point the ethernet device doesn't have a phy described. 2951 * Now we need to add the missing phy node and linkage 2952 */ 2953 2954 /* Check for an MDIO bus node - if missing then create one */ 2955 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2956 if (!PHANDLE_VALID(node)) { 2957 prom_printf("Adding Ethernet MDIO node\n"); 2958 call_prom("interpret", 1, 1, 2959 " s\" /builtin\" find-device" 2960 " new-device" 2961 " 1 encode-int s\" #address-cells\" property" 2962 " 0 encode-int s\" #size-cells\" property" 2963 " s\" mdio\" device-name" 2964 " s\" fsl,mpc5200b-mdio\" encode-string" 2965 " s\" compatible\" property" 2966 " 0xf0003000 0x400 reg" 2967 " 0x2 encode-int" 2968 " 0x5 encode-int encode+" 2969 " 0x3 encode-int encode+" 2970 " s\" interrupts\" property" 2971 " finish-device"); 2972 }; 2973 2974 /* Check for a PHY device node - if missing then create one and 2975 * give it's phandle to the ethernet node */ 2976 node = call_prom("finddevice", 1, 1, 2977 ADDR("/builtin/mdio/ethernet-phy")); 2978 if (!PHANDLE_VALID(node)) { 2979 prom_printf("Adding Ethernet PHY node\n"); 2980 call_prom("interpret", 1, 1, 2981 " s\" /builtin/mdio\" find-device" 2982 " new-device" 2983 " s\" ethernet-phy\" device-name" 2984 " 0x10 encode-int s\" reg\" property" 2985 " my-self" 2986 " ihandle>phandle" 2987 " finish-device" 2988 " s\" /builtin/ethernet\" find-device" 2989 " encode-int" 2990 " s\" phy-handle\" property" 2991 " device-end"); 2992 } 2993 } 2994 2995 static void __init fixup_device_tree_efika(void) 2996 { 2997 int sound_irq[3] = { 2, 2, 0 }; 2998 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2999 3,4,0, 3,5,0, 3,6,0, 3,7,0, 3000 3,8,0, 3,9,0, 3,10,0, 3,11,0, 3001 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 3002 u32 node; 3003 char prop[64]; 3004 int rv, len; 3005 3006 /* Check if we're really running on a EFIKA */ 3007 node = call_prom("finddevice", 1, 1, ADDR("/")); 3008 if (!PHANDLE_VALID(node)) 3009 return; 3010 3011 rv = prom_getprop(node, "model", prop, sizeof(prop)); 3012 if (rv == PROM_ERROR) 3013 return; 3014 if (prom_strcmp(prop, "EFIKA5K2")) 3015 return; 3016 3017 prom_printf("Applying EFIKA device tree fixups\n"); 3018 3019 /* Claiming to be 'chrp' is death */ 3020 node = call_prom("finddevice", 1, 1, ADDR("/")); 3021 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 3022 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0)) 3023 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 3024 3025 /* CODEGEN,description is exposed in /proc/cpuinfo so 3026 fix that too */ 3027 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 3028 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP"))) 3029 prom_setprop(node, "/", "CODEGEN,description", 3030 "Efika 5200B PowerPC System", 3031 sizeof("Efika 5200B PowerPC System")); 3032 3033 /* Fixup bestcomm interrupts property */ 3034 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 3035 if (PHANDLE_VALID(node)) { 3036 len = prom_getproplen(node, "interrupts"); 3037 if (len == 12) { 3038 prom_printf("Fixing bestcomm interrupts property\n"); 3039 prom_setprop(node, "/builtin/bestcom", "interrupts", 3040 bcomm_irq, sizeof(bcomm_irq)); 3041 } 3042 } 3043 3044 /* Fixup sound interrupts property */ 3045 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 3046 if (PHANDLE_VALID(node)) { 3047 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 3048 if (rv == PROM_ERROR) { 3049 prom_printf("Adding sound interrupts property\n"); 3050 prom_setprop(node, "/builtin/sound", "interrupts", 3051 sound_irq, sizeof(sound_irq)); 3052 } 3053 } 3054 3055 /* Make sure ethernet phy-handle property exists */ 3056 fixup_device_tree_efika_add_phy(); 3057 } 3058 #else 3059 #define fixup_device_tree_efika() 3060 #endif 3061 3062 #ifdef CONFIG_PPC_PASEMI_NEMO 3063 /* 3064 * CFE supplied on Nemo is broken in several ways, biggest 3065 * problem is that it reassigns ISA interrupts to unused mpic ints. 3066 * Add an interrupt-controller property for the io-bridge to use 3067 * and correct the ints so we can attach them to an irq_domain 3068 */ 3069 static void __init fixup_device_tree_pasemi(void) 3070 { 3071 u32 interrupts[2], parent, rval, val = 0; 3072 char *name, *pci_name; 3073 phandle iob, node; 3074 3075 /* Find the root pci node */ 3076 name = "/pxp@0,e0000000"; 3077 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3078 if (!PHANDLE_VALID(iob)) 3079 return; 3080 3081 /* check if interrupt-controller node set yet */ 3082 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 3083 return; 3084 3085 prom_printf("adding interrupt-controller property for SB600...\n"); 3086 3087 prom_setprop(iob, name, "interrupt-controller", &val, 0); 3088 3089 pci_name = "/pxp@0,e0000000/pci@11"; 3090 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 3091 parent = ADDR(iob); 3092 3093 for( ; prom_next_node(&node); ) { 3094 /* scan each node for one with an interrupt */ 3095 if (!PHANDLE_VALID(node)) 3096 continue; 3097 3098 rval = prom_getproplen(node, "interrupts"); 3099 if (rval == 0 || rval == PROM_ERROR) 3100 continue; 3101 3102 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 3103 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 3104 continue; 3105 3106 /* found a node, update both interrupts and interrupt-parent */ 3107 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 3108 interrupts[0] -= 203; 3109 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 3110 interrupts[0] -= 213; 3111 if (interrupts[0] == 221) 3112 interrupts[0] = 14; 3113 if (interrupts[0] == 222) 3114 interrupts[0] = 8; 3115 3116 prom_setprop(node, pci_name, "interrupts", interrupts, 3117 sizeof(interrupts)); 3118 prom_setprop(node, pci_name, "interrupt-parent", &parent, 3119 sizeof(parent)); 3120 } 3121 3122 /* 3123 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 3124 * so that generic isa-bridge code can add the SB600 and its on-board 3125 * peripherals. 3126 */ 3127 name = "/pxp@0,e0000000/io-bridge@0"; 3128 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3129 if (!PHANDLE_VALID(iob)) 3130 return; 3131 3132 /* device_type is already set, just change it. */ 3133 3134 prom_printf("Changing device_type of SB600 node...\n"); 3135 3136 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 3137 } 3138 #else /* !CONFIG_PPC_PASEMI_NEMO */ 3139 static inline void fixup_device_tree_pasemi(void) { } 3140 #endif 3141 3142 static void __init fixup_device_tree(void) 3143 { 3144 fixup_device_tree_maple(); 3145 fixup_device_tree_maple_memory_controller(); 3146 fixup_device_tree_chrp(); 3147 fixup_device_tree_pmac(); 3148 fixup_device_tree_efika(); 3149 fixup_device_tree_pasemi(); 3150 } 3151 3152 static void __init prom_find_boot_cpu(void) 3153 { 3154 __be32 rval; 3155 ihandle prom_cpu; 3156 phandle cpu_pkg; 3157 3158 rval = 0; 3159 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 3160 return; 3161 prom_cpu = be32_to_cpu(rval); 3162 3163 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 3164 3165 if (!PHANDLE_VALID(cpu_pkg)) 3166 return; 3167 3168 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 3169 prom.cpu = be32_to_cpu(rval); 3170 3171 prom_debug("Booting CPU hw index = %d\n", prom.cpu); 3172 } 3173 3174 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 3175 { 3176 #ifdef CONFIG_BLK_DEV_INITRD 3177 if (r3 && r4 && r4 != 0xdeadbeef) { 3178 __be64 val; 3179 3180 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 3181 prom_initrd_end = prom_initrd_start + r4; 3182 3183 val = cpu_to_be64(prom_initrd_start); 3184 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 3185 &val, sizeof(val)); 3186 val = cpu_to_be64(prom_initrd_end); 3187 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 3188 &val, sizeof(val)); 3189 3190 reserve_mem(prom_initrd_start, 3191 prom_initrd_end - prom_initrd_start); 3192 3193 prom_debug("initrd_start=0x%lx\n", prom_initrd_start); 3194 prom_debug("initrd_end=0x%lx\n", prom_initrd_end); 3195 } 3196 #endif /* CONFIG_BLK_DEV_INITRD */ 3197 } 3198 3199 #ifdef CONFIG_PPC64 3200 #ifdef CONFIG_RELOCATABLE 3201 static void reloc_toc(void) 3202 { 3203 } 3204 3205 static void unreloc_toc(void) 3206 { 3207 } 3208 #else 3209 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3210 { 3211 unsigned long i; 3212 unsigned long *toc_entry; 3213 3214 /* Get the start of the TOC by using r2 directly. */ 3215 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3216 3217 for (i = 0; i < nr_entries; i++) { 3218 *toc_entry = *toc_entry + offset; 3219 toc_entry++; 3220 } 3221 } 3222 3223 static void reloc_toc(void) 3224 { 3225 unsigned long offset = reloc_offset(); 3226 unsigned long nr_entries = 3227 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3228 3229 __reloc_toc(offset, nr_entries); 3230 3231 mb(); 3232 } 3233 3234 static void unreloc_toc(void) 3235 { 3236 unsigned long offset = reloc_offset(); 3237 unsigned long nr_entries = 3238 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3239 3240 mb(); 3241 3242 __reloc_toc(-offset, nr_entries); 3243 } 3244 #endif 3245 #endif 3246 3247 #ifdef CONFIG_PPC_SVM 3248 /* 3249 * Perform the Enter Secure Mode ultracall. 3250 */ 3251 static int enter_secure_mode(unsigned long kbase, unsigned long fdt) 3252 { 3253 register unsigned long r3 asm("r3") = UV_ESM; 3254 register unsigned long r4 asm("r4") = kbase; 3255 register unsigned long r5 asm("r5") = fdt; 3256 3257 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5)); 3258 3259 return r3; 3260 } 3261 3262 /* 3263 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob. 3264 */ 3265 static void setup_secure_guest(unsigned long kbase, unsigned long fdt) 3266 { 3267 int ret; 3268 3269 if (!prom_svm_enable) 3270 return; 3271 3272 /* Switch to secure mode. */ 3273 prom_printf("Switching to secure mode.\n"); 3274 3275 /* 3276 * The ultravisor will do an integrity check of the kernel image but we 3277 * relocated it so the check will fail. Restore the original image by 3278 * relocating it back to the kernel virtual base address. 3279 */ 3280 if (IS_ENABLED(CONFIG_RELOCATABLE)) 3281 relocate(KERNELBASE); 3282 3283 ret = enter_secure_mode(kbase, fdt); 3284 3285 /* Relocate the kernel again. */ 3286 if (IS_ENABLED(CONFIG_RELOCATABLE)) 3287 relocate(kbase); 3288 3289 if (ret != U_SUCCESS) { 3290 prom_printf("Returned %d from switching to secure mode.\n", ret); 3291 prom_rtas_os_term("Switch to secure mode failed.\n"); 3292 } 3293 } 3294 #else 3295 static void setup_secure_guest(unsigned long kbase, unsigned long fdt) 3296 { 3297 } 3298 #endif /* CONFIG_PPC_SVM */ 3299 3300 /* 3301 * We enter here early on, when the Open Firmware prom is still 3302 * handling exceptions and the MMU hash table for us. 3303 */ 3304 3305 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3306 unsigned long pp, 3307 unsigned long r6, unsigned long r7, 3308 unsigned long kbase) 3309 { 3310 unsigned long hdr; 3311 3312 #ifdef CONFIG_PPC32 3313 unsigned long offset = reloc_offset(); 3314 reloc_got2(offset); 3315 #else 3316 reloc_toc(); 3317 #endif 3318 3319 /* 3320 * First zero the BSS 3321 */ 3322 memset(&__bss_start, 0, __bss_stop - __bss_start); 3323 3324 /* 3325 * Init interface to Open Firmware, get some node references, 3326 * like /chosen 3327 */ 3328 prom_init_client_services(pp); 3329 3330 /* 3331 * See if this OF is old enough that we need to do explicit maps 3332 * and other workarounds 3333 */ 3334 prom_find_mmu(); 3335 3336 /* 3337 * Init prom stdout device 3338 */ 3339 prom_init_stdout(); 3340 3341 prom_printf("Preparing to boot %s", linux_banner); 3342 3343 /* 3344 * Get default machine type. At this point, we do not differentiate 3345 * between pSeries SMP and pSeries LPAR 3346 */ 3347 of_platform = prom_find_machine_type(); 3348 prom_printf("Detected machine type: %x\n", of_platform); 3349 3350 #ifndef CONFIG_NONSTATIC_KERNEL 3351 /* Bail if this is a kdump kernel. */ 3352 if (PHYSICAL_START > 0) 3353 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3354 #endif 3355 3356 /* 3357 * Check for an initrd 3358 */ 3359 prom_check_initrd(r3, r4); 3360 3361 /* 3362 * Do early parsing of command line 3363 */ 3364 early_cmdline_parse(); 3365 3366 #ifdef CONFIG_PPC_PSERIES 3367 /* 3368 * On pSeries, inform the firmware about our capabilities 3369 */ 3370 if (of_platform == PLATFORM_PSERIES || 3371 of_platform == PLATFORM_PSERIES_LPAR) 3372 prom_send_capabilities(); 3373 #endif 3374 3375 /* 3376 * Copy the CPU hold code 3377 */ 3378 if (of_platform != PLATFORM_POWERMAC) 3379 copy_and_flush(0, kbase, 0x100, 0); 3380 3381 /* 3382 * Initialize memory management within prom_init 3383 */ 3384 prom_init_mem(); 3385 3386 /* 3387 * Determine which cpu is actually running right _now_ 3388 */ 3389 prom_find_boot_cpu(); 3390 3391 /* 3392 * Initialize display devices 3393 */ 3394 prom_check_displays(); 3395 3396 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3397 /* 3398 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3399 * that uses the allocator, we need to make sure we get the top of memory 3400 * available for us here... 3401 */ 3402 if (of_platform == PLATFORM_PSERIES) 3403 prom_initialize_tce_table(); 3404 #endif 3405 3406 /* 3407 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3408 * have a usable RTAS implementation. 3409 */ 3410 if (of_platform != PLATFORM_POWERMAC) 3411 prom_instantiate_rtas(); 3412 3413 #ifdef CONFIG_PPC64 3414 /* instantiate sml */ 3415 prom_instantiate_sml(); 3416 #endif 3417 3418 /* 3419 * On non-powermacs, put all CPUs in spin-loops. 3420 * 3421 * PowerMacs use a different mechanism to spin CPUs 3422 * 3423 * (This must be done after instanciating RTAS) 3424 */ 3425 if (of_platform != PLATFORM_POWERMAC) 3426 prom_hold_cpus(); 3427 3428 /* 3429 * Fill in some infos for use by the kernel later on 3430 */ 3431 if (prom_memory_limit) { 3432 __be64 val = cpu_to_be64(prom_memory_limit); 3433 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3434 &val, sizeof(val)); 3435 } 3436 #ifdef CONFIG_PPC64 3437 if (prom_iommu_off) 3438 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3439 NULL, 0); 3440 3441 if (prom_iommu_force_on) 3442 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3443 NULL, 0); 3444 3445 if (prom_tce_alloc_start) { 3446 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3447 &prom_tce_alloc_start, 3448 sizeof(prom_tce_alloc_start)); 3449 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3450 &prom_tce_alloc_end, 3451 sizeof(prom_tce_alloc_end)); 3452 } 3453 #endif 3454 3455 /* 3456 * Fixup any known bugs in the device-tree 3457 */ 3458 fixup_device_tree(); 3459 3460 /* 3461 * Now finally create the flattened device-tree 3462 */ 3463 prom_printf("copying OF device tree...\n"); 3464 flatten_device_tree(); 3465 3466 /* 3467 * in case stdin is USB and still active on IBM machines... 3468 * Unfortunately quiesce crashes on some powermacs if we have 3469 * closed stdin already (in particular the powerbook 101). 3470 */ 3471 if (of_platform != PLATFORM_POWERMAC) 3472 prom_close_stdin(); 3473 3474 /* 3475 * Call OF "quiesce" method to shut down pending DMA's from 3476 * devices etc... 3477 */ 3478 prom_printf("Quiescing Open Firmware ...\n"); 3479 call_prom("quiesce", 0, 0); 3480 3481 /* 3482 * And finally, call the kernel passing it the flattened device 3483 * tree and NULL as r5, thus triggering the new entry point which 3484 * is common to us and kexec 3485 */ 3486 hdr = dt_header_start; 3487 3488 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3489 prom_debug("->dt_header_start=0x%lx\n", hdr); 3490 3491 #ifdef CONFIG_PPC32 3492 reloc_got2(-offset); 3493 #else 3494 unreloc_toc(); 3495 #endif 3496 3497 /* Move to secure memory if we're supposed to be secure guests. */ 3498 setup_secure_guest(kbase, hdr); 3499 3500 __start(hdr, kbase, 0, 0, 0, 0, 0); 3501 3502 return 0; 3503 } 3504