1 /* 2 * Procedures for interfacing to Open Firmware. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG_PROM 17 18 /* we cannot use FORTIFY as it brings in new symbols */ 19 #define __NO_FORTIFY 20 21 #include <stdarg.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/threads.h> 26 #include <linux/spinlock.h> 27 #include <linux/types.h> 28 #include <linux/pci.h> 29 #include <linux/proc_fs.h> 30 #include <linux/stringify.h> 31 #include <linux/delay.h> 32 #include <linux/initrd.h> 33 #include <linux/bitops.h> 34 #include <asm/prom.h> 35 #include <asm/rtas.h> 36 #include <asm/page.h> 37 #include <asm/processor.h> 38 #include <asm/irq.h> 39 #include <asm/io.h> 40 #include <asm/smp.h> 41 #include <asm/mmu.h> 42 #include <asm/pgtable.h> 43 #include <asm/iommu.h> 44 #include <asm/btext.h> 45 #include <asm/sections.h> 46 #include <asm/machdep.h> 47 #include <asm/opal.h> 48 #include <asm/asm-prototypes.h> 49 50 #include <linux/linux_logo.h> 51 52 /* 53 * Eventually bump that one up 54 */ 55 #define DEVTREE_CHUNK_SIZE 0x100000 56 57 /* 58 * This is the size of the local memory reserve map that gets copied 59 * into the boot params passed to the kernel. That size is totally 60 * flexible as the kernel just reads the list until it encounters an 61 * entry with size 0, so it can be changed without breaking binary 62 * compatibility 63 */ 64 #define MEM_RESERVE_MAP_SIZE 8 65 66 /* 67 * prom_init() is called very early on, before the kernel text 68 * and data have been mapped to KERNELBASE. At this point the code 69 * is running at whatever address it has been loaded at. 70 * On ppc32 we compile with -mrelocatable, which means that references 71 * to extern and static variables get relocated automatically. 72 * ppc64 objects are always relocatable, we just need to relocate the 73 * TOC. 74 * 75 * Because OF may have mapped I/O devices into the area starting at 76 * KERNELBASE, particularly on CHRP machines, we can't safely call 77 * OF once the kernel has been mapped to KERNELBASE. Therefore all 78 * OF calls must be done within prom_init(). 79 * 80 * ADDR is used in calls to call_prom. The 4th and following 81 * arguments to call_prom should be 32-bit values. 82 * On ppc64, 64 bit values are truncated to 32 bits (and 83 * fortunately don't get interpreted as two arguments). 84 */ 85 #define ADDR(x) (u32)(unsigned long)(x) 86 87 #ifdef CONFIG_PPC64 88 #define OF_WORKAROUNDS 0 89 #else 90 #define OF_WORKAROUNDS of_workarounds 91 int of_workarounds; 92 #endif 93 94 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 95 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 96 97 #define PROM_BUG() do { \ 98 prom_printf("kernel BUG at %s line 0x%x!\n", \ 99 __FILE__, __LINE__); \ 100 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ 101 } while (0) 102 103 #ifdef DEBUG_PROM 104 #define prom_debug(x...) prom_printf(x) 105 #else 106 #define prom_debug(x...) do { } while (0) 107 #endif 108 109 110 typedef u32 prom_arg_t; 111 112 struct prom_args { 113 __be32 service; 114 __be32 nargs; 115 __be32 nret; 116 __be32 args[10]; 117 }; 118 119 struct prom_t { 120 ihandle root; 121 phandle chosen; 122 int cpu; 123 ihandle stdout; 124 ihandle mmumap; 125 ihandle memory; 126 }; 127 128 struct mem_map_entry { 129 __be64 base; 130 __be64 size; 131 }; 132 133 typedef __be32 cell_t; 134 135 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 136 unsigned long r6, unsigned long r7, unsigned long r8, 137 unsigned long r9); 138 139 #ifdef CONFIG_PPC64 140 extern int enter_prom(struct prom_args *args, unsigned long entry); 141 #else 142 static inline int enter_prom(struct prom_args *args, unsigned long entry) 143 { 144 return ((int (*)(struct prom_args *))entry)(args); 145 } 146 #endif 147 148 extern void copy_and_flush(unsigned long dest, unsigned long src, 149 unsigned long size, unsigned long offset); 150 151 /* prom structure */ 152 static struct prom_t __initdata prom; 153 154 static unsigned long prom_entry __initdata; 155 156 #define PROM_SCRATCH_SIZE 256 157 158 static char __initdata of_stdout_device[256]; 159 static char __initdata prom_scratch[PROM_SCRATCH_SIZE]; 160 161 static unsigned long __initdata dt_header_start; 162 static unsigned long __initdata dt_struct_start, dt_struct_end; 163 static unsigned long __initdata dt_string_start, dt_string_end; 164 165 static unsigned long __initdata prom_initrd_start, prom_initrd_end; 166 167 #ifdef CONFIG_PPC64 168 static int __initdata prom_iommu_force_on; 169 static int __initdata prom_iommu_off; 170 static unsigned long __initdata prom_tce_alloc_start; 171 static unsigned long __initdata prom_tce_alloc_end; 172 #endif 173 174 static bool prom_radix_disable __initdata = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); 175 176 struct platform_support { 177 bool hash_mmu; 178 bool radix_mmu; 179 bool radix_gtse; 180 bool xive; 181 }; 182 183 /* Platforms codes are now obsolete in the kernel. Now only used within this 184 * file and ultimately gone too. Feel free to change them if you need, they 185 * are not shared with anything outside of this file anymore 186 */ 187 #define PLATFORM_PSERIES 0x0100 188 #define PLATFORM_PSERIES_LPAR 0x0101 189 #define PLATFORM_LPAR 0x0001 190 #define PLATFORM_POWERMAC 0x0400 191 #define PLATFORM_GENERIC 0x0500 192 #define PLATFORM_OPAL 0x0600 193 194 static int __initdata of_platform; 195 196 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE]; 197 198 static unsigned long __initdata prom_memory_limit; 199 200 static unsigned long __initdata alloc_top; 201 static unsigned long __initdata alloc_top_high; 202 static unsigned long __initdata alloc_bottom; 203 static unsigned long __initdata rmo_top; 204 static unsigned long __initdata ram_top; 205 206 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 207 static int __initdata mem_reserve_cnt; 208 209 static cell_t __initdata regbuf[1024]; 210 211 static bool rtas_has_query_cpu_stopped; 212 213 214 /* 215 * Error results ... some OF calls will return "-1" on error, some 216 * will return 0, some will return either. To simplify, here are 217 * macros to use with any ihandle or phandle return value to check if 218 * it is valid 219 */ 220 221 #define PROM_ERROR (-1u) 222 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 223 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 224 225 226 /* This is the one and *ONLY* place where we actually call open 227 * firmware. 228 */ 229 230 static int __init call_prom(const char *service, int nargs, int nret, ...) 231 { 232 int i; 233 struct prom_args args; 234 va_list list; 235 236 args.service = cpu_to_be32(ADDR(service)); 237 args.nargs = cpu_to_be32(nargs); 238 args.nret = cpu_to_be32(nret); 239 240 va_start(list, nret); 241 for (i = 0; i < nargs; i++) 242 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 243 va_end(list); 244 245 for (i = 0; i < nret; i++) 246 args.args[nargs+i] = 0; 247 248 if (enter_prom(&args, prom_entry) < 0) 249 return PROM_ERROR; 250 251 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 252 } 253 254 static int __init call_prom_ret(const char *service, int nargs, int nret, 255 prom_arg_t *rets, ...) 256 { 257 int i; 258 struct prom_args args; 259 va_list list; 260 261 args.service = cpu_to_be32(ADDR(service)); 262 args.nargs = cpu_to_be32(nargs); 263 args.nret = cpu_to_be32(nret); 264 265 va_start(list, rets); 266 for (i = 0; i < nargs; i++) 267 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 268 va_end(list); 269 270 for (i = 0; i < nret; i++) 271 args.args[nargs+i] = 0; 272 273 if (enter_prom(&args, prom_entry) < 0) 274 return PROM_ERROR; 275 276 if (rets != NULL) 277 for (i = 1; i < nret; ++i) 278 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 279 280 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 281 } 282 283 284 static void __init prom_print(const char *msg) 285 { 286 const char *p, *q; 287 288 if (prom.stdout == 0) 289 return; 290 291 for (p = msg; *p != 0; p = q) { 292 for (q = p; *q != 0 && *q != '\n'; ++q) 293 ; 294 if (q > p) 295 call_prom("write", 3, 1, prom.stdout, p, q - p); 296 if (*q == 0) 297 break; 298 ++q; 299 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 300 } 301 } 302 303 304 /* 305 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that 306 * we do not need __udivdi3 or __umoddi3 on 32bits. 307 */ 308 static void __init prom_print_hex(unsigned long val) 309 { 310 int i, nibbles = sizeof(val)*2; 311 char buf[sizeof(val)*2+1]; 312 313 for (i = nibbles-1; i >= 0; i--) { 314 buf[i] = (val & 0xf) + '0'; 315 if (buf[i] > '9') 316 buf[i] += ('a'-'0'-10); 317 val >>= 4; 318 } 319 buf[nibbles] = '\0'; 320 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 321 } 322 323 /* max number of decimal digits in an unsigned long */ 324 #define UL_DIGITS 21 325 static void __init prom_print_dec(unsigned long val) 326 { 327 int i, size; 328 char buf[UL_DIGITS+1]; 329 330 for (i = UL_DIGITS-1; i >= 0; i--) { 331 buf[i] = (val % 10) + '0'; 332 val = val/10; 333 if (val == 0) 334 break; 335 } 336 /* shift stuff down */ 337 size = UL_DIGITS - i; 338 call_prom("write", 3, 1, prom.stdout, buf+i, size); 339 } 340 341 __printf(1, 2) 342 static void __init prom_printf(const char *format, ...) 343 { 344 const char *p, *q, *s; 345 va_list args; 346 unsigned long v; 347 long vs; 348 int n = 0; 349 350 va_start(args, format); 351 for (p = format; *p != 0; p = q) { 352 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 353 ; 354 if (q > p) 355 call_prom("write", 3, 1, prom.stdout, p, q - p); 356 if (*q == 0) 357 break; 358 if (*q == '\n') { 359 ++q; 360 call_prom("write", 3, 1, prom.stdout, 361 ADDR("\r\n"), 2); 362 continue; 363 } 364 ++q; 365 if (*q == 0) 366 break; 367 while (*q == 'l') { 368 ++q; 369 ++n; 370 } 371 switch (*q) { 372 case 's': 373 ++q; 374 s = va_arg(args, const char *); 375 prom_print(s); 376 break; 377 case 'x': 378 ++q; 379 switch (n) { 380 case 0: 381 v = va_arg(args, unsigned int); 382 break; 383 case 1: 384 v = va_arg(args, unsigned long); 385 break; 386 case 2: 387 default: 388 v = va_arg(args, unsigned long long); 389 break; 390 } 391 prom_print_hex(v); 392 break; 393 case 'u': 394 ++q; 395 switch (n) { 396 case 0: 397 v = va_arg(args, unsigned int); 398 break; 399 case 1: 400 v = va_arg(args, unsigned long); 401 break; 402 case 2: 403 default: 404 v = va_arg(args, unsigned long long); 405 break; 406 } 407 prom_print_dec(v); 408 break; 409 case 'd': 410 ++q; 411 switch (n) { 412 case 0: 413 vs = va_arg(args, int); 414 break; 415 case 1: 416 vs = va_arg(args, long); 417 break; 418 case 2: 419 default: 420 vs = va_arg(args, long long); 421 break; 422 } 423 if (vs < 0) { 424 prom_print("-"); 425 vs = -vs; 426 } 427 prom_print_dec(vs); 428 break; 429 } 430 } 431 va_end(args); 432 } 433 434 435 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 436 unsigned long align) 437 { 438 439 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 440 /* 441 * Old OF requires we claim physical and virtual separately 442 * and then map explicitly (assuming virtual mode) 443 */ 444 int ret; 445 prom_arg_t result; 446 447 ret = call_prom_ret("call-method", 5, 2, &result, 448 ADDR("claim"), prom.memory, 449 align, size, virt); 450 if (ret != 0 || result == -1) 451 return -1; 452 ret = call_prom_ret("call-method", 5, 2, &result, 453 ADDR("claim"), prom.mmumap, 454 align, size, virt); 455 if (ret != 0) { 456 call_prom("call-method", 4, 1, ADDR("release"), 457 prom.memory, size, virt); 458 return -1; 459 } 460 /* the 0x12 is M (coherence) + PP == read/write */ 461 call_prom("call-method", 6, 1, 462 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 463 return virt; 464 } 465 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 466 (prom_arg_t)align); 467 } 468 469 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 470 { 471 prom_print(reason); 472 /* Do not call exit because it clears the screen on pmac 473 * it also causes some sort of double-fault on early pmacs */ 474 if (of_platform == PLATFORM_POWERMAC) 475 asm("trap\n"); 476 477 /* ToDo: should put up an SRC here on pSeries */ 478 call_prom("exit", 0, 0); 479 480 for (;;) /* should never get here */ 481 ; 482 } 483 484 485 static int __init prom_next_node(phandle *nodep) 486 { 487 phandle node; 488 489 if ((node = *nodep) != 0 490 && (*nodep = call_prom("child", 1, 1, node)) != 0) 491 return 1; 492 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 493 return 1; 494 for (;;) { 495 if ((node = call_prom("parent", 1, 1, node)) == 0) 496 return 0; 497 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 498 return 1; 499 } 500 } 501 502 static inline int prom_getprop(phandle node, const char *pname, 503 void *value, size_t valuelen) 504 { 505 return call_prom("getprop", 4, 1, node, ADDR(pname), 506 (u32)(unsigned long) value, (u32) valuelen); 507 } 508 509 static inline int prom_getproplen(phandle node, const char *pname) 510 { 511 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 512 } 513 514 static void add_string(char **str, const char *q) 515 { 516 char *p = *str; 517 518 while (*q) 519 *p++ = *q++; 520 *p++ = ' '; 521 *str = p; 522 } 523 524 static char *tohex(unsigned int x) 525 { 526 static char digits[] = "0123456789abcdef"; 527 static char result[9]; 528 int i; 529 530 result[8] = 0; 531 i = 8; 532 do { 533 --i; 534 result[i] = digits[x & 0xf]; 535 x >>= 4; 536 } while (x != 0 && i > 0); 537 return &result[i]; 538 } 539 540 static int __init prom_setprop(phandle node, const char *nodename, 541 const char *pname, void *value, size_t valuelen) 542 { 543 char cmd[256], *p; 544 545 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 546 return call_prom("setprop", 4, 1, node, ADDR(pname), 547 (u32)(unsigned long) value, (u32) valuelen); 548 549 /* gah... setprop doesn't work on longtrail, have to use interpret */ 550 p = cmd; 551 add_string(&p, "dev"); 552 add_string(&p, nodename); 553 add_string(&p, tohex((u32)(unsigned long) value)); 554 add_string(&p, tohex(valuelen)); 555 add_string(&p, tohex(ADDR(pname))); 556 add_string(&p, tohex(strlen(pname))); 557 add_string(&p, "property"); 558 *p = 0; 559 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 560 } 561 562 /* We can't use the standard versions because of relocation headaches. */ 563 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 564 || ('a' <= (c) && (c) <= 'f') \ 565 || ('A' <= (c) && (c) <= 'F')) 566 567 #define isdigit(c) ('0' <= (c) && (c) <= '9') 568 #define islower(c) ('a' <= (c) && (c) <= 'z') 569 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 570 571 static unsigned long prom_strtoul(const char *cp, const char **endp) 572 { 573 unsigned long result = 0, base = 10, value; 574 575 if (*cp == '0') { 576 base = 8; 577 cp++; 578 if (toupper(*cp) == 'X') { 579 cp++; 580 base = 16; 581 } 582 } 583 584 while (isxdigit(*cp) && 585 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 586 result = result * base + value; 587 cp++; 588 } 589 590 if (endp) 591 *endp = cp; 592 593 return result; 594 } 595 596 static unsigned long prom_memparse(const char *ptr, const char **retptr) 597 { 598 unsigned long ret = prom_strtoul(ptr, retptr); 599 int shift = 0; 600 601 /* 602 * We can't use a switch here because GCC *may* generate a 603 * jump table which won't work, because we're not running at 604 * the address we're linked at. 605 */ 606 if ('G' == **retptr || 'g' == **retptr) 607 shift = 30; 608 609 if ('M' == **retptr || 'm' == **retptr) 610 shift = 20; 611 612 if ('K' == **retptr || 'k' == **retptr) 613 shift = 10; 614 615 if (shift) { 616 ret <<= shift; 617 (*retptr)++; 618 } 619 620 return ret; 621 } 622 623 /* 624 * Early parsing of the command line passed to the kernel, used for 625 * "mem=x" and the options that affect the iommu 626 */ 627 static void __init early_cmdline_parse(void) 628 { 629 const char *opt; 630 631 char *p; 632 int l = 0; 633 634 prom_cmd_line[0] = 0; 635 p = prom_cmd_line; 636 if ((long)prom.chosen > 0) 637 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 638 #ifdef CONFIG_CMDLINE 639 if (l <= 0 || p[0] == '\0') /* dbl check */ 640 strlcpy(prom_cmd_line, 641 CONFIG_CMDLINE, sizeof(prom_cmd_line)); 642 #endif /* CONFIG_CMDLINE */ 643 prom_printf("command line: %s\n", prom_cmd_line); 644 645 #ifdef CONFIG_PPC64 646 opt = strstr(prom_cmd_line, "iommu="); 647 if (opt) { 648 prom_printf("iommu opt is: %s\n", opt); 649 opt += 6; 650 while (*opt && *opt == ' ') 651 opt++; 652 if (!strncmp(opt, "off", 3)) 653 prom_iommu_off = 1; 654 else if (!strncmp(opt, "force", 5)) 655 prom_iommu_force_on = 1; 656 } 657 #endif 658 opt = strstr(prom_cmd_line, "mem="); 659 if (opt) { 660 opt += 4; 661 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 662 #ifdef CONFIG_PPC64 663 /* Align to 16 MB == size of ppc64 large page */ 664 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 665 #endif 666 } 667 668 opt = strstr(prom_cmd_line, "disable_radix"); 669 if (opt) { 670 opt += 13; 671 if (*opt && *opt == '=') { 672 bool val; 673 674 if (kstrtobool(++opt, &val)) 675 prom_radix_disable = false; 676 else 677 prom_radix_disable = val; 678 } else 679 prom_radix_disable = true; 680 } 681 if (prom_radix_disable) 682 prom_debug("Radix disabled from cmdline\n"); 683 } 684 685 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 686 /* 687 * The architecture vector has an array of PVR mask/value pairs, 688 * followed by # option vectors - 1, followed by the option vectors. 689 * 690 * See prom.h for the definition of the bits specified in the 691 * architecture vector. 692 */ 693 694 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 695 #define NUM_VECTORS(n) ((n) - 1) 696 697 /* 698 * Firmware expects 1 + n - 2, where n is the length of the option vector in 699 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 700 */ 701 #define VECTOR_LENGTH(n) (1 + (n) - 2) 702 703 struct option_vector1 { 704 u8 byte1; 705 u8 arch_versions; 706 u8 arch_versions3; 707 } __packed; 708 709 struct option_vector2 { 710 u8 byte1; 711 __be16 reserved; 712 __be32 real_base; 713 __be32 real_size; 714 __be32 virt_base; 715 __be32 virt_size; 716 __be32 load_base; 717 __be32 min_rma; 718 __be32 min_load; 719 u8 min_rma_percent; 720 u8 max_pft_size; 721 } __packed; 722 723 struct option_vector3 { 724 u8 byte1; 725 u8 byte2; 726 } __packed; 727 728 struct option_vector4 { 729 u8 byte1; 730 u8 min_vp_cap; 731 } __packed; 732 733 struct option_vector5 { 734 u8 byte1; 735 u8 byte2; 736 u8 byte3; 737 u8 cmo; 738 u8 associativity; 739 u8 bin_opts; 740 u8 micro_checkpoint; 741 u8 reserved0; 742 __be32 max_cpus; 743 __be16 papr_level; 744 __be16 reserved1; 745 u8 platform_facilities; 746 u8 reserved2; 747 __be16 reserved3; 748 u8 subprocessors; 749 u8 byte22; 750 u8 intarch; 751 u8 mmu; 752 u8 hash_ext; 753 u8 radix_ext; 754 } __packed; 755 756 struct option_vector6 { 757 u8 reserved; 758 u8 secondary_pteg; 759 u8 os_name; 760 } __packed; 761 762 struct ibm_arch_vec { 763 struct { u32 mask, val; } pvrs[12]; 764 765 u8 num_vectors; 766 767 u8 vec1_len; 768 struct option_vector1 vec1; 769 770 u8 vec2_len; 771 struct option_vector2 vec2; 772 773 u8 vec3_len; 774 struct option_vector3 vec3; 775 776 u8 vec4_len; 777 struct option_vector4 vec4; 778 779 u8 vec5_len; 780 struct option_vector5 vec5; 781 782 u8 vec6_len; 783 struct option_vector6 vec6; 784 } __packed; 785 786 struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { 787 .pvrs = { 788 { 789 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 790 .val = cpu_to_be32(0x003a0000), 791 }, 792 { 793 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 794 .val = cpu_to_be32(0x003e0000), 795 }, 796 { 797 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 798 .val = cpu_to_be32(0x003f0000), 799 }, 800 { 801 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 802 .val = cpu_to_be32(0x004b0000), 803 }, 804 { 805 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 806 .val = cpu_to_be32(0x004c0000), 807 }, 808 { 809 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 810 .val = cpu_to_be32(0x004d0000), 811 }, 812 { 813 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 814 .val = cpu_to_be32(0x004e0000), 815 }, 816 { 817 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 818 .val = cpu_to_be32(0x0f000005), 819 }, 820 { 821 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 822 .val = cpu_to_be32(0x0f000004), 823 }, 824 { 825 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 826 .val = cpu_to_be32(0x0f000003), 827 }, 828 { 829 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 830 .val = cpu_to_be32(0x0f000002), 831 }, 832 { 833 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 834 .val = cpu_to_be32(0x0f000001), 835 }, 836 }, 837 838 .num_vectors = NUM_VECTORS(6), 839 840 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 841 .vec1 = { 842 .byte1 = 0, 843 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 844 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 845 .arch_versions3 = OV1_PPC_3_00, 846 }, 847 848 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 849 /* option vector 2: Open Firmware options supported */ 850 .vec2 = { 851 .byte1 = OV2_REAL_MODE, 852 .reserved = 0, 853 .real_base = cpu_to_be32(0xffffffff), 854 .real_size = cpu_to_be32(0xffffffff), 855 .virt_base = cpu_to_be32(0xffffffff), 856 .virt_size = cpu_to_be32(0xffffffff), 857 .load_base = cpu_to_be32(0xffffffff), 858 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 859 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 860 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 861 .max_pft_size = 48, /* max log_2(hash table size) */ 862 }, 863 864 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 865 /* option vector 3: processor options supported */ 866 .vec3 = { 867 .byte1 = 0, /* don't ignore, don't halt */ 868 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 869 }, 870 871 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 872 /* option vector 4: IBM PAPR implementation */ 873 .vec4 = { 874 .byte1 = 0, /* don't halt */ 875 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 876 }, 877 878 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 879 /* option vector 5: PAPR/OF options */ 880 .vec5 = { 881 .byte1 = 0, /* don't ignore, don't halt */ 882 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 883 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 884 #ifdef CONFIG_PCI_MSI 885 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 886 OV5_FEAT(OV5_MSI), 887 #else 888 0, 889 #endif 890 .byte3 = 0, 891 .cmo = 892 #ifdef CONFIG_PPC_SMLPAR 893 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 894 #else 895 0, 896 #endif 897 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 898 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 899 .micro_checkpoint = 0, 900 .reserved0 = 0, 901 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 902 .papr_level = 0, 903 .reserved1 = 0, 904 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 905 .reserved2 = 0, 906 .reserved3 = 0, 907 .subprocessors = 1, 908 .byte22 = OV5_FEAT(OV5_DRMEM_V2), 909 .intarch = 0, 910 .mmu = 0, 911 .hash_ext = 0, 912 .radix_ext = 0, 913 }, 914 915 /* option vector 6: IBM PAPR hints */ 916 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 917 .vec6 = { 918 .reserved = 0, 919 .secondary_pteg = 0, 920 .os_name = OV6_LINUX, 921 }, 922 }; 923 924 /* Old method - ELF header with PT_NOTE sections only works on BE */ 925 #ifdef __BIG_ENDIAN__ 926 static struct fake_elf { 927 Elf32_Ehdr elfhdr; 928 Elf32_Phdr phdr[2]; 929 struct chrpnote { 930 u32 namesz; 931 u32 descsz; 932 u32 type; 933 char name[8]; /* "PowerPC" */ 934 struct chrpdesc { 935 u32 real_mode; 936 u32 real_base; 937 u32 real_size; 938 u32 virt_base; 939 u32 virt_size; 940 u32 load_base; 941 } chrpdesc; 942 } chrpnote; 943 struct rpanote { 944 u32 namesz; 945 u32 descsz; 946 u32 type; 947 char name[24]; /* "IBM,RPA-Client-Config" */ 948 struct rpadesc { 949 u32 lpar_affinity; 950 u32 min_rmo_size; 951 u32 min_rmo_percent; 952 u32 max_pft_size; 953 u32 splpar; 954 u32 min_load; 955 u32 new_mem_def; 956 u32 ignore_me; 957 } rpadesc; 958 } rpanote; 959 } fake_elf = { 960 .elfhdr = { 961 .e_ident = { 0x7f, 'E', 'L', 'F', 962 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 963 .e_type = ET_EXEC, /* yeah right */ 964 .e_machine = EM_PPC, 965 .e_version = EV_CURRENT, 966 .e_phoff = offsetof(struct fake_elf, phdr), 967 .e_phentsize = sizeof(Elf32_Phdr), 968 .e_phnum = 2 969 }, 970 .phdr = { 971 [0] = { 972 .p_type = PT_NOTE, 973 .p_offset = offsetof(struct fake_elf, chrpnote), 974 .p_filesz = sizeof(struct chrpnote) 975 }, [1] = { 976 .p_type = PT_NOTE, 977 .p_offset = offsetof(struct fake_elf, rpanote), 978 .p_filesz = sizeof(struct rpanote) 979 } 980 }, 981 .chrpnote = { 982 .namesz = sizeof("PowerPC"), 983 .descsz = sizeof(struct chrpdesc), 984 .type = 0x1275, 985 .name = "PowerPC", 986 .chrpdesc = { 987 .real_mode = ~0U, /* ~0 means "don't care" */ 988 .real_base = ~0U, 989 .real_size = ~0U, 990 .virt_base = ~0U, 991 .virt_size = ~0U, 992 .load_base = ~0U 993 }, 994 }, 995 .rpanote = { 996 .namesz = sizeof("IBM,RPA-Client-Config"), 997 .descsz = sizeof(struct rpadesc), 998 .type = 0x12759999, 999 .name = "IBM,RPA-Client-Config", 1000 .rpadesc = { 1001 .lpar_affinity = 0, 1002 .min_rmo_size = 64, /* in megabytes */ 1003 .min_rmo_percent = 0, 1004 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 1005 .splpar = 1, 1006 .min_load = ~0U, 1007 .new_mem_def = 0 1008 } 1009 } 1010 }; 1011 #endif /* __BIG_ENDIAN__ */ 1012 1013 static int __init prom_count_smt_threads(void) 1014 { 1015 phandle node; 1016 char type[64]; 1017 unsigned int plen; 1018 1019 /* Pick up th first CPU node we can find */ 1020 for (node = 0; prom_next_node(&node); ) { 1021 type[0] = 0; 1022 prom_getprop(node, "device_type", type, sizeof(type)); 1023 1024 if (strcmp(type, "cpu")) 1025 continue; 1026 /* 1027 * There is an entry for each smt thread, each entry being 1028 * 4 bytes long. All cpus should have the same number of 1029 * smt threads, so return after finding the first. 1030 */ 1031 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 1032 if (plen == PROM_ERROR) 1033 break; 1034 plen >>= 2; 1035 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 1036 1037 /* Sanity check */ 1038 if (plen < 1 || plen > 64) { 1039 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1040 (unsigned long)plen); 1041 return 1; 1042 } 1043 return plen; 1044 } 1045 prom_debug("No threads found, assuming 1 per core\n"); 1046 1047 return 1; 1048 1049 } 1050 1051 static void __init prom_parse_mmu_model(u8 val, 1052 struct platform_support *support) 1053 { 1054 switch (val) { 1055 case OV5_FEAT(OV5_MMU_DYNAMIC): 1056 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1057 prom_debug("MMU - either supported\n"); 1058 support->radix_mmu = !prom_radix_disable; 1059 support->hash_mmu = true; 1060 break; 1061 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1062 prom_debug("MMU - radix only\n"); 1063 if (prom_radix_disable) { 1064 /* 1065 * If we __have__ to do radix, we're better off ignoring 1066 * the command line rather than not booting. 1067 */ 1068 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1069 } 1070 support->radix_mmu = true; 1071 break; 1072 case OV5_FEAT(OV5_MMU_HASH): 1073 prom_debug("MMU - hash only\n"); 1074 support->hash_mmu = true; 1075 break; 1076 default: 1077 prom_debug("Unknown mmu support option: 0x%x\n", val); 1078 break; 1079 } 1080 } 1081 1082 static void __init prom_parse_xive_model(u8 val, 1083 struct platform_support *support) 1084 { 1085 switch (val) { 1086 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ 1087 prom_debug("XIVE - either mode supported\n"); 1088 support->xive = true; 1089 break; 1090 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ 1091 prom_debug("XIVE - exploitation mode supported\n"); 1092 support->xive = true; 1093 break; 1094 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ 1095 prom_debug("XIVE - legacy mode supported\n"); 1096 break; 1097 default: 1098 prom_debug("Unknown xive support option: 0x%x\n", val); 1099 break; 1100 } 1101 } 1102 1103 static void __init prom_parse_platform_support(u8 index, u8 val, 1104 struct platform_support *support) 1105 { 1106 switch (index) { 1107 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1108 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1109 break; 1110 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1111 if (val & OV5_FEAT(OV5_RADIX_GTSE)) { 1112 prom_debug("Radix - GTSE supported\n"); 1113 support->radix_gtse = true; 1114 } 1115 break; 1116 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ 1117 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), 1118 support); 1119 break; 1120 } 1121 } 1122 1123 static void __init prom_check_platform_support(void) 1124 { 1125 struct platform_support supported = { 1126 .hash_mmu = false, 1127 .radix_mmu = false, 1128 .radix_gtse = false, 1129 .xive = false 1130 }; 1131 int prop_len = prom_getproplen(prom.chosen, 1132 "ibm,arch-vec-5-platform-support"); 1133 if (prop_len > 1) { 1134 int i; 1135 u8 vec[prop_len]; 1136 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1137 prop_len); 1138 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", 1139 &vec, sizeof(vec)); 1140 for (i = 0; i < prop_len; i += 2) { 1141 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 1142 , vec[i] 1143 , vec[i + 1]); 1144 prom_parse_platform_support(vec[i], vec[i + 1], 1145 &supported); 1146 } 1147 } 1148 1149 if (supported.radix_mmu && supported.radix_gtse && 1150 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { 1151 /* Radix preferred - but we require GTSE for now */ 1152 prom_debug("Asking for radix with GTSE\n"); 1153 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1154 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); 1155 } else if (supported.hash_mmu) { 1156 /* Default to hash mmu (if we can) */ 1157 prom_debug("Asking for hash\n"); 1158 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1159 } else { 1160 /* We're probably on a legacy hypervisor */ 1161 prom_debug("Assuming legacy hash support\n"); 1162 } 1163 1164 if (supported.xive) { 1165 prom_debug("Asking for XIVE\n"); 1166 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); 1167 } 1168 } 1169 1170 static void __init prom_send_capabilities(void) 1171 { 1172 ihandle root; 1173 prom_arg_t ret; 1174 u32 cores; 1175 1176 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1177 prom_check_platform_support(); 1178 1179 root = call_prom("open", 1, 1, ADDR("/")); 1180 if (root != 0) { 1181 /* We need to tell the FW about the number of cores we support. 1182 * 1183 * To do that, we count the number of threads on the first core 1184 * (we assume this is the same for all cores) and use it to 1185 * divide NR_CPUS. 1186 */ 1187 1188 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1189 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", 1190 cores, NR_CPUS); 1191 1192 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1193 1194 /* try calling the ibm,client-architecture-support method */ 1195 prom_printf("Calling ibm,client-architecture-support..."); 1196 if (call_prom_ret("call-method", 3, 2, &ret, 1197 ADDR("ibm,client-architecture-support"), 1198 root, 1199 ADDR(&ibm_architecture_vec)) == 0) { 1200 /* the call exists... */ 1201 if (ret) 1202 prom_printf("\nWARNING: ibm,client-architecture" 1203 "-support call FAILED!\n"); 1204 call_prom("close", 1, 0, root); 1205 prom_printf(" done\n"); 1206 return; 1207 } 1208 call_prom("close", 1, 0, root); 1209 prom_printf(" not implemented\n"); 1210 } 1211 1212 #ifdef __BIG_ENDIAN__ 1213 { 1214 ihandle elfloader; 1215 1216 /* no ibm,client-architecture-support call, try the old way */ 1217 elfloader = call_prom("open", 1, 1, 1218 ADDR("/packages/elf-loader")); 1219 if (elfloader == 0) { 1220 prom_printf("couldn't open /packages/elf-loader\n"); 1221 return; 1222 } 1223 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1224 elfloader, ADDR(&fake_elf)); 1225 call_prom("close", 1, 0, elfloader); 1226 } 1227 #endif /* __BIG_ENDIAN__ */ 1228 } 1229 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1230 1231 /* 1232 * Memory allocation strategy... our layout is normally: 1233 * 1234 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1235 * rare cases, initrd might end up being before the kernel though. 1236 * We assume this won't override the final kernel at 0, we have no 1237 * provision to handle that in this version, but it should hopefully 1238 * never happen. 1239 * 1240 * alloc_top is set to the top of RMO, eventually shrink down if the 1241 * TCEs overlap 1242 * 1243 * alloc_bottom is set to the top of kernel/initrd 1244 * 1245 * from there, allocations are done this way : rtas is allocated 1246 * topmost, and the device-tree is allocated from the bottom. We try 1247 * to grow the device-tree allocation as we progress. If we can't, 1248 * then we fail, we don't currently have a facility to restart 1249 * elsewhere, but that shouldn't be necessary. 1250 * 1251 * Note that calls to reserve_mem have to be done explicitly, memory 1252 * allocated with either alloc_up or alloc_down isn't automatically 1253 * reserved. 1254 */ 1255 1256 1257 /* 1258 * Allocates memory in the RMO upward from the kernel/initrd 1259 * 1260 * When align is 0, this is a special case, it means to allocate in place 1261 * at the current location of alloc_bottom or fail (that is basically 1262 * extending the previous allocation). Used for the device-tree flattening 1263 */ 1264 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1265 { 1266 unsigned long base = alloc_bottom; 1267 unsigned long addr = 0; 1268 1269 if (align) 1270 base = _ALIGN_UP(base, align); 1271 prom_debug("%s(%lx, %lx)\n", __func__, size, align); 1272 if (ram_top == 0) 1273 prom_panic("alloc_up() called with mem not initialized\n"); 1274 1275 if (align) 1276 base = _ALIGN_UP(alloc_bottom, align); 1277 else 1278 base = alloc_bottom; 1279 1280 for(; (base + size) <= alloc_top; 1281 base = _ALIGN_UP(base + 0x100000, align)) { 1282 prom_debug(" trying: 0x%lx\n\r", base); 1283 addr = (unsigned long)prom_claim(base, size, 0); 1284 if (addr != PROM_ERROR && addr != 0) 1285 break; 1286 addr = 0; 1287 if (align == 0) 1288 break; 1289 } 1290 if (addr == 0) 1291 return 0; 1292 alloc_bottom = addr + size; 1293 1294 prom_debug(" -> %lx\n", addr); 1295 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1296 prom_debug(" alloc_top : %lx\n", alloc_top); 1297 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1298 prom_debug(" rmo_top : %lx\n", rmo_top); 1299 prom_debug(" ram_top : %lx\n", ram_top); 1300 1301 return addr; 1302 } 1303 1304 /* 1305 * Allocates memory downward, either from top of RMO, or if highmem 1306 * is set, from the top of RAM. Note that this one doesn't handle 1307 * failures. It does claim memory if highmem is not set. 1308 */ 1309 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1310 int highmem) 1311 { 1312 unsigned long base, addr = 0; 1313 1314 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, 1315 highmem ? "(high)" : "(low)"); 1316 if (ram_top == 0) 1317 prom_panic("alloc_down() called with mem not initialized\n"); 1318 1319 if (highmem) { 1320 /* Carve out storage for the TCE table. */ 1321 addr = _ALIGN_DOWN(alloc_top_high - size, align); 1322 if (addr <= alloc_bottom) 1323 return 0; 1324 /* Will we bump into the RMO ? If yes, check out that we 1325 * didn't overlap existing allocations there, if we did, 1326 * we are dead, we must be the first in town ! 1327 */ 1328 if (addr < rmo_top) { 1329 /* Good, we are first */ 1330 if (alloc_top == rmo_top) 1331 alloc_top = rmo_top = addr; 1332 else 1333 return 0; 1334 } 1335 alloc_top_high = addr; 1336 goto bail; 1337 } 1338 1339 base = _ALIGN_DOWN(alloc_top - size, align); 1340 for (; base > alloc_bottom; 1341 base = _ALIGN_DOWN(base - 0x100000, align)) { 1342 prom_debug(" trying: 0x%lx\n\r", base); 1343 addr = (unsigned long)prom_claim(base, size, 0); 1344 if (addr != PROM_ERROR && addr != 0) 1345 break; 1346 addr = 0; 1347 } 1348 if (addr == 0) 1349 return 0; 1350 alloc_top = addr; 1351 1352 bail: 1353 prom_debug(" -> %lx\n", addr); 1354 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1355 prom_debug(" alloc_top : %lx\n", alloc_top); 1356 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1357 prom_debug(" rmo_top : %lx\n", rmo_top); 1358 prom_debug(" ram_top : %lx\n", ram_top); 1359 1360 return addr; 1361 } 1362 1363 /* 1364 * Parse a "reg" cell 1365 */ 1366 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1367 { 1368 cell_t *p = *cellp; 1369 unsigned long r = 0; 1370 1371 /* Ignore more than 2 cells */ 1372 while (s > sizeof(unsigned long) / 4) { 1373 p++; 1374 s--; 1375 } 1376 r = be32_to_cpu(*p++); 1377 #ifdef CONFIG_PPC64 1378 if (s > 1) { 1379 r <<= 32; 1380 r |= be32_to_cpu(*(p++)); 1381 } 1382 #endif 1383 *cellp = p; 1384 return r; 1385 } 1386 1387 /* 1388 * Very dumb function for adding to the memory reserve list, but 1389 * we don't need anything smarter at this point 1390 * 1391 * XXX Eventually check for collisions. They should NEVER happen. 1392 * If problems seem to show up, it would be a good start to track 1393 * them down. 1394 */ 1395 static void __init reserve_mem(u64 base, u64 size) 1396 { 1397 u64 top = base + size; 1398 unsigned long cnt = mem_reserve_cnt; 1399 1400 if (size == 0) 1401 return; 1402 1403 /* We need to always keep one empty entry so that we 1404 * have our terminator with "size" set to 0 since we are 1405 * dumb and just copy this entire array to the boot params 1406 */ 1407 base = _ALIGN_DOWN(base, PAGE_SIZE); 1408 top = _ALIGN_UP(top, PAGE_SIZE); 1409 size = top - base; 1410 1411 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1412 prom_panic("Memory reserve map exhausted !\n"); 1413 mem_reserve_map[cnt].base = cpu_to_be64(base); 1414 mem_reserve_map[cnt].size = cpu_to_be64(size); 1415 mem_reserve_cnt = cnt + 1; 1416 } 1417 1418 /* 1419 * Initialize memory allocation mechanism, parse "memory" nodes and 1420 * obtain that way the top of memory and RMO to setup out local allocator 1421 */ 1422 static void __init prom_init_mem(void) 1423 { 1424 phandle node; 1425 char *path, type[64]; 1426 unsigned int plen; 1427 cell_t *p, *endp; 1428 __be32 val; 1429 u32 rac, rsc; 1430 1431 /* 1432 * We iterate the memory nodes to find 1433 * 1) top of RMO (first node) 1434 * 2) top of memory 1435 */ 1436 val = cpu_to_be32(2); 1437 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1438 rac = be32_to_cpu(val); 1439 val = cpu_to_be32(1); 1440 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1441 rsc = be32_to_cpu(val); 1442 prom_debug("root_addr_cells: %x\n", rac); 1443 prom_debug("root_size_cells: %x\n", rsc); 1444 1445 prom_debug("scanning memory:\n"); 1446 path = prom_scratch; 1447 1448 for (node = 0; prom_next_node(&node); ) { 1449 type[0] = 0; 1450 prom_getprop(node, "device_type", type, sizeof(type)); 1451 1452 if (type[0] == 0) { 1453 /* 1454 * CHRP Longtrail machines have no device_type 1455 * on the memory node, so check the name instead... 1456 */ 1457 prom_getprop(node, "name", type, sizeof(type)); 1458 } 1459 if (strcmp(type, "memory")) 1460 continue; 1461 1462 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1463 if (plen > sizeof(regbuf)) { 1464 prom_printf("memory node too large for buffer !\n"); 1465 plen = sizeof(regbuf); 1466 } 1467 p = regbuf; 1468 endp = p + (plen / sizeof(cell_t)); 1469 1470 #ifdef DEBUG_PROM 1471 memset(path, 0, PROM_SCRATCH_SIZE); 1472 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1473 prom_debug(" node %s :\n", path); 1474 #endif /* DEBUG_PROM */ 1475 1476 while ((endp - p) >= (rac + rsc)) { 1477 unsigned long base, size; 1478 1479 base = prom_next_cell(rac, &p); 1480 size = prom_next_cell(rsc, &p); 1481 1482 if (size == 0) 1483 continue; 1484 prom_debug(" %lx %lx\n", base, size); 1485 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1486 rmo_top = size; 1487 if ((base + size) > ram_top) 1488 ram_top = base + size; 1489 } 1490 } 1491 1492 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1493 1494 /* 1495 * If prom_memory_limit is set we reduce the upper limits *except* for 1496 * alloc_top_high. This must be the real top of RAM so we can put 1497 * TCE's up there. 1498 */ 1499 1500 alloc_top_high = ram_top; 1501 1502 if (prom_memory_limit) { 1503 if (prom_memory_limit <= alloc_bottom) { 1504 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", 1505 prom_memory_limit); 1506 prom_memory_limit = 0; 1507 } else if (prom_memory_limit >= ram_top) { 1508 prom_printf("Ignoring mem=%lx >= ram_top.\n", 1509 prom_memory_limit); 1510 prom_memory_limit = 0; 1511 } else { 1512 ram_top = prom_memory_limit; 1513 rmo_top = min(rmo_top, prom_memory_limit); 1514 } 1515 } 1516 1517 /* 1518 * Setup our top alloc point, that is top of RMO or top of 1519 * segment 0 when running non-LPAR. 1520 * Some RS64 machines have buggy firmware where claims up at 1521 * 1GB fail. Cap at 768MB as a workaround. 1522 * Since 768MB is plenty of room, and we need to cap to something 1523 * reasonable on 32-bit, cap at 768MB on all machines. 1524 */ 1525 if (!rmo_top) 1526 rmo_top = ram_top; 1527 rmo_top = min(0x30000000ul, rmo_top); 1528 alloc_top = rmo_top; 1529 alloc_top_high = ram_top; 1530 1531 /* 1532 * Check if we have an initrd after the kernel but still inside 1533 * the RMO. If we do move our bottom point to after it. 1534 */ 1535 if (prom_initrd_start && 1536 prom_initrd_start < rmo_top && 1537 prom_initrd_end > alloc_bottom) 1538 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1539 1540 prom_printf("memory layout at init:\n"); 1541 prom_printf(" memory_limit : %lx (16 MB aligned)\n", 1542 prom_memory_limit); 1543 prom_printf(" alloc_bottom : %lx\n", alloc_bottom); 1544 prom_printf(" alloc_top : %lx\n", alloc_top); 1545 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); 1546 prom_printf(" rmo_top : %lx\n", rmo_top); 1547 prom_printf(" ram_top : %lx\n", ram_top); 1548 } 1549 1550 static void __init prom_close_stdin(void) 1551 { 1552 __be32 val; 1553 ihandle stdin; 1554 1555 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1556 stdin = be32_to_cpu(val); 1557 call_prom("close", 1, 0, stdin); 1558 } 1559 } 1560 1561 #ifdef CONFIG_PPC_POWERNV 1562 1563 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 1564 static u64 __initdata prom_opal_base; 1565 static u64 __initdata prom_opal_entry; 1566 #endif 1567 1568 /* 1569 * Allocate room for and instantiate OPAL 1570 */ 1571 static void __init prom_instantiate_opal(void) 1572 { 1573 phandle opal_node; 1574 ihandle opal_inst; 1575 u64 base, entry; 1576 u64 size = 0, align = 0x10000; 1577 __be64 val64; 1578 u32 rets[2]; 1579 1580 prom_debug("prom_instantiate_opal: start...\n"); 1581 1582 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal")); 1583 prom_debug("opal_node: %x\n", opal_node); 1584 if (!PHANDLE_VALID(opal_node)) 1585 return; 1586 1587 val64 = 0; 1588 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64)); 1589 size = be64_to_cpu(val64); 1590 if (size == 0) 1591 return; 1592 val64 = 0; 1593 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64)); 1594 align = be64_to_cpu(val64); 1595 1596 base = alloc_down(size, align, 0); 1597 if (base == 0) { 1598 prom_printf("OPAL allocation failed !\n"); 1599 return; 1600 } 1601 1602 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal")); 1603 if (!IHANDLE_VALID(opal_inst)) { 1604 prom_printf("opening opal package failed (%x)\n", opal_inst); 1605 return; 1606 } 1607 1608 prom_printf("instantiating opal at 0x%llx...", base); 1609 1610 if (call_prom_ret("call-method", 4, 3, rets, 1611 ADDR("load-opal-runtime"), 1612 opal_inst, 1613 base >> 32, base & 0xffffffff) != 0 1614 || (rets[0] == 0 && rets[1] == 0)) { 1615 prom_printf(" failed\n"); 1616 return; 1617 } 1618 entry = (((u64)rets[0]) << 32) | rets[1]; 1619 1620 prom_printf(" done\n"); 1621 1622 reserve_mem(base, size); 1623 1624 prom_debug("opal base = 0x%llx\n", base); 1625 prom_debug("opal align = 0x%llx\n", align); 1626 prom_debug("opal entry = 0x%llx\n", entry); 1627 prom_debug("opal size = 0x%llx\n", size); 1628 1629 prom_setprop(opal_node, "/ibm,opal", "opal-base-address", 1630 &base, sizeof(base)); 1631 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address", 1632 &entry, sizeof(entry)); 1633 1634 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 1635 prom_opal_base = base; 1636 prom_opal_entry = entry; 1637 #endif 1638 prom_debug("prom_instantiate_opal: end...\n"); 1639 } 1640 1641 #endif /* CONFIG_PPC_POWERNV */ 1642 1643 /* 1644 * Allocate room for and instantiate RTAS 1645 */ 1646 static void __init prom_instantiate_rtas(void) 1647 { 1648 phandle rtas_node; 1649 ihandle rtas_inst; 1650 u32 base, entry = 0; 1651 __be32 val; 1652 u32 size = 0; 1653 1654 prom_debug("prom_instantiate_rtas: start...\n"); 1655 1656 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1657 prom_debug("rtas_node: %x\n", rtas_node); 1658 if (!PHANDLE_VALID(rtas_node)) 1659 return; 1660 1661 val = 0; 1662 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1663 size = be32_to_cpu(val); 1664 if (size == 0) 1665 return; 1666 1667 base = alloc_down(size, PAGE_SIZE, 0); 1668 if (base == 0) 1669 prom_panic("Could not allocate memory for RTAS\n"); 1670 1671 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1672 if (!IHANDLE_VALID(rtas_inst)) { 1673 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1674 return; 1675 } 1676 1677 prom_printf("instantiating rtas at 0x%x...", base); 1678 1679 if (call_prom_ret("call-method", 3, 2, &entry, 1680 ADDR("instantiate-rtas"), 1681 rtas_inst, base) != 0 1682 || entry == 0) { 1683 prom_printf(" failed\n"); 1684 return; 1685 } 1686 prom_printf(" done\n"); 1687 1688 reserve_mem(base, size); 1689 1690 val = cpu_to_be32(base); 1691 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1692 &val, sizeof(val)); 1693 val = cpu_to_be32(entry); 1694 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1695 &val, sizeof(val)); 1696 1697 /* Check if it supports "query-cpu-stopped-state" */ 1698 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1699 &val, sizeof(val)) != PROM_ERROR) 1700 rtas_has_query_cpu_stopped = true; 1701 1702 prom_debug("rtas base = 0x%x\n", base); 1703 prom_debug("rtas entry = 0x%x\n", entry); 1704 prom_debug("rtas size = 0x%x\n", size); 1705 1706 prom_debug("prom_instantiate_rtas: end...\n"); 1707 } 1708 1709 #ifdef CONFIG_PPC64 1710 /* 1711 * Allocate room for and instantiate Stored Measurement Log (SML) 1712 */ 1713 static void __init prom_instantiate_sml(void) 1714 { 1715 phandle ibmvtpm_node; 1716 ihandle ibmvtpm_inst; 1717 u32 entry = 0, size = 0, succ = 0; 1718 u64 base; 1719 __be32 val; 1720 1721 prom_debug("prom_instantiate_sml: start...\n"); 1722 1723 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1724 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1725 if (!PHANDLE_VALID(ibmvtpm_node)) 1726 return; 1727 1728 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1729 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1730 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1731 return; 1732 } 1733 1734 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1735 &val, sizeof(val)) != PROM_ERROR) { 1736 if (call_prom_ret("call-method", 2, 2, &succ, 1737 ADDR("reformat-sml-to-efi-alignment"), 1738 ibmvtpm_inst) != 0 || succ == 0) { 1739 prom_printf("Reformat SML to EFI alignment failed\n"); 1740 return; 1741 } 1742 1743 if (call_prom_ret("call-method", 2, 2, &size, 1744 ADDR("sml-get-allocated-size"), 1745 ibmvtpm_inst) != 0 || size == 0) { 1746 prom_printf("SML get allocated size failed\n"); 1747 return; 1748 } 1749 } else { 1750 if (call_prom_ret("call-method", 2, 2, &size, 1751 ADDR("sml-get-handover-size"), 1752 ibmvtpm_inst) != 0 || size == 0) { 1753 prom_printf("SML get handover size failed\n"); 1754 return; 1755 } 1756 } 1757 1758 base = alloc_down(size, PAGE_SIZE, 0); 1759 if (base == 0) 1760 prom_panic("Could not allocate memory for sml\n"); 1761 1762 prom_printf("instantiating sml at 0x%llx...", base); 1763 1764 memset((void *)base, 0, size); 1765 1766 if (call_prom_ret("call-method", 4, 2, &entry, 1767 ADDR("sml-handover"), 1768 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1769 prom_printf("SML handover failed\n"); 1770 return; 1771 } 1772 prom_printf(" done\n"); 1773 1774 reserve_mem(base, size); 1775 1776 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1777 &base, sizeof(base)); 1778 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1779 &size, sizeof(size)); 1780 1781 prom_debug("sml base = 0x%llx\n", base); 1782 prom_debug("sml size = 0x%x\n", size); 1783 1784 prom_debug("prom_instantiate_sml: end...\n"); 1785 } 1786 1787 /* 1788 * Allocate room for and initialize TCE tables 1789 */ 1790 #ifdef __BIG_ENDIAN__ 1791 static void __init prom_initialize_tce_table(void) 1792 { 1793 phandle node; 1794 ihandle phb_node; 1795 char compatible[64], type[64], model[64]; 1796 char *path = prom_scratch; 1797 u64 base, align; 1798 u32 minalign, minsize; 1799 u64 tce_entry, *tce_entryp; 1800 u64 local_alloc_top, local_alloc_bottom; 1801 u64 i; 1802 1803 if (prom_iommu_off) 1804 return; 1805 1806 prom_debug("starting prom_initialize_tce_table\n"); 1807 1808 /* Cache current top of allocs so we reserve a single block */ 1809 local_alloc_top = alloc_top_high; 1810 local_alloc_bottom = local_alloc_top; 1811 1812 /* Search all nodes looking for PHBs. */ 1813 for (node = 0; prom_next_node(&node); ) { 1814 compatible[0] = 0; 1815 type[0] = 0; 1816 model[0] = 0; 1817 prom_getprop(node, "compatible", 1818 compatible, sizeof(compatible)); 1819 prom_getprop(node, "device_type", type, sizeof(type)); 1820 prom_getprop(node, "model", model, sizeof(model)); 1821 1822 if ((type[0] == 0) || (strstr(type, "pci") == NULL)) 1823 continue; 1824 1825 /* Keep the old logic intact to avoid regression. */ 1826 if (compatible[0] != 0) { 1827 if ((strstr(compatible, "python") == NULL) && 1828 (strstr(compatible, "Speedwagon") == NULL) && 1829 (strstr(compatible, "Winnipeg") == NULL)) 1830 continue; 1831 } else if (model[0] != 0) { 1832 if ((strstr(model, "ython") == NULL) && 1833 (strstr(model, "peedwagon") == NULL) && 1834 (strstr(model, "innipeg") == NULL)) 1835 continue; 1836 } 1837 1838 if (prom_getprop(node, "tce-table-minalign", &minalign, 1839 sizeof(minalign)) == PROM_ERROR) 1840 minalign = 0; 1841 if (prom_getprop(node, "tce-table-minsize", &minsize, 1842 sizeof(minsize)) == PROM_ERROR) 1843 minsize = 4UL << 20; 1844 1845 /* 1846 * Even though we read what OF wants, we just set the table 1847 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1848 * By doing this, we avoid the pitfalls of trying to DMA to 1849 * MMIO space and the DMA alias hole. 1850 */ 1851 minsize = 4UL << 20; 1852 1853 /* Align to the greater of the align or size */ 1854 align = max(minalign, minsize); 1855 base = alloc_down(minsize, align, 1); 1856 if (base == 0) 1857 prom_panic("ERROR, cannot find space for TCE table.\n"); 1858 if (base < local_alloc_bottom) 1859 local_alloc_bottom = base; 1860 1861 /* It seems OF doesn't null-terminate the path :-( */ 1862 memset(path, 0, PROM_SCRATCH_SIZE); 1863 /* Call OF to setup the TCE hardware */ 1864 if (call_prom("package-to-path", 3, 1, node, 1865 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) { 1866 prom_printf("package-to-path failed\n"); 1867 } 1868 1869 /* Save away the TCE table attributes for later use. */ 1870 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 1871 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 1872 1873 prom_debug("TCE table: %s\n", path); 1874 prom_debug("\tnode = 0x%x\n", node); 1875 prom_debug("\tbase = 0x%llx\n", base); 1876 prom_debug("\tsize = 0x%x\n", minsize); 1877 1878 /* Initialize the table to have a one-to-one mapping 1879 * over the allocated size. 1880 */ 1881 tce_entryp = (u64 *)base; 1882 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1883 tce_entry = (i << PAGE_SHIFT); 1884 tce_entry |= 0x3; 1885 *tce_entryp = tce_entry; 1886 } 1887 1888 prom_printf("opening PHB %s", path); 1889 phb_node = call_prom("open", 1, 1, path); 1890 if (phb_node == 0) 1891 prom_printf("... failed\n"); 1892 else 1893 prom_printf("... done\n"); 1894 1895 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 1896 phb_node, -1, minsize, 1897 (u32) base, (u32) (base >> 32)); 1898 call_prom("close", 1, 0, phb_node); 1899 } 1900 1901 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 1902 1903 /* These are only really needed if there is a memory limit in 1904 * effect, but we don't know so export them always. */ 1905 prom_tce_alloc_start = local_alloc_bottom; 1906 prom_tce_alloc_end = local_alloc_top; 1907 1908 /* Flag the first invalid entry */ 1909 prom_debug("ending prom_initialize_tce_table\n"); 1910 } 1911 #endif /* __BIG_ENDIAN__ */ 1912 #endif /* CONFIG_PPC64 */ 1913 1914 /* 1915 * With CHRP SMP we need to use the OF to start the other processors. 1916 * We can't wait until smp_boot_cpus (the OF is trashed by then) 1917 * so we have to put the processors into a holding pattern controlled 1918 * by the kernel (not OF) before we destroy the OF. 1919 * 1920 * This uses a chunk of low memory, puts some holding pattern 1921 * code there and sends the other processors off to there until 1922 * smp_boot_cpus tells them to do something. The holding pattern 1923 * checks that address until its cpu # is there, when it is that 1924 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 1925 * of setting those values. 1926 * 1927 * We also use physical address 0x4 here to tell when a cpu 1928 * is in its holding pattern code. 1929 * 1930 * -- Cort 1931 */ 1932 /* 1933 * We want to reference the copy of __secondary_hold_* in the 1934 * 0 - 0x100 address range 1935 */ 1936 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 1937 1938 static void __init prom_hold_cpus(void) 1939 { 1940 unsigned long i; 1941 phandle node; 1942 char type[64]; 1943 unsigned long *spinloop 1944 = (void *) LOW_ADDR(__secondary_hold_spinloop); 1945 unsigned long *acknowledge 1946 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1947 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1948 1949 /* 1950 * On pseries, if RTAS supports "query-cpu-stopped-state", 1951 * we skip this stage, the CPUs will be started by the 1952 * kernel using RTAS. 1953 */ 1954 if ((of_platform == PLATFORM_PSERIES || 1955 of_platform == PLATFORM_PSERIES_LPAR) && 1956 rtas_has_query_cpu_stopped) { 1957 prom_printf("prom_hold_cpus: skipped\n"); 1958 return; 1959 } 1960 1961 prom_debug("prom_hold_cpus: start...\n"); 1962 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); 1963 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); 1964 prom_debug(" 1) acknowledge = 0x%lx\n", 1965 (unsigned long)acknowledge); 1966 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); 1967 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); 1968 1969 /* Set the common spinloop variable, so all of the secondary cpus 1970 * will block when they are awakened from their OF spinloop. 1971 * This must occur for both SMP and non SMP kernels, since OF will 1972 * be trashed when we move the kernel. 1973 */ 1974 *spinloop = 0; 1975 1976 /* look for cpus */ 1977 for (node = 0; prom_next_node(&node); ) { 1978 unsigned int cpu_no; 1979 __be32 reg; 1980 1981 type[0] = 0; 1982 prom_getprop(node, "device_type", type, sizeof(type)); 1983 if (strcmp(type, "cpu") != 0) 1984 continue; 1985 1986 /* Skip non-configured cpus. */ 1987 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 1988 if (strcmp(type, "okay") != 0) 1989 continue; 1990 1991 reg = cpu_to_be32(-1); /* make sparse happy */ 1992 prom_getprop(node, "reg", ®, sizeof(reg)); 1993 cpu_no = be32_to_cpu(reg); 1994 1995 prom_debug("cpu hw idx = %u\n", cpu_no); 1996 1997 /* Init the acknowledge var which will be reset by 1998 * the secondary cpu when it awakens from its OF 1999 * spinloop. 2000 */ 2001 *acknowledge = (unsigned long)-1; 2002 2003 if (cpu_no != prom.cpu) { 2004 /* Primary Thread of non-boot cpu or any thread */ 2005 prom_printf("starting cpu hw idx %u... ", cpu_no); 2006 call_prom("start-cpu", 3, 0, node, 2007 secondary_hold, cpu_no); 2008 2009 for (i = 0; (i < 100000000) && 2010 (*acknowledge == ((unsigned long)-1)); i++ ) 2011 mb(); 2012 2013 if (*acknowledge == cpu_no) 2014 prom_printf("done\n"); 2015 else 2016 prom_printf("failed: %lx\n", *acknowledge); 2017 } 2018 #ifdef CONFIG_SMP 2019 else 2020 prom_printf("boot cpu hw idx %u\n", cpu_no); 2021 #endif /* CONFIG_SMP */ 2022 } 2023 2024 prom_debug("prom_hold_cpus: end...\n"); 2025 } 2026 2027 2028 static void __init prom_init_client_services(unsigned long pp) 2029 { 2030 /* Get a handle to the prom entry point before anything else */ 2031 prom_entry = pp; 2032 2033 /* get a handle for the stdout device */ 2034 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 2035 if (!PHANDLE_VALID(prom.chosen)) 2036 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 2037 2038 /* get device tree root */ 2039 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 2040 if (!PHANDLE_VALID(prom.root)) 2041 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 2042 2043 prom.mmumap = 0; 2044 } 2045 2046 #ifdef CONFIG_PPC32 2047 /* 2048 * For really old powermacs, we need to map things we claim. 2049 * For that, we need the ihandle of the mmu. 2050 * Also, on the longtrail, we need to work around other bugs. 2051 */ 2052 static void __init prom_find_mmu(void) 2053 { 2054 phandle oprom; 2055 char version[64]; 2056 2057 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 2058 if (!PHANDLE_VALID(oprom)) 2059 return; 2060 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 2061 return; 2062 version[sizeof(version) - 1] = 0; 2063 /* XXX might need to add other versions here */ 2064 if (strcmp(version, "Open Firmware, 1.0.5") == 0) 2065 of_workarounds = OF_WA_CLAIM; 2066 else if (strncmp(version, "FirmWorks,3.", 12) == 0) { 2067 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2068 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2069 } else 2070 return; 2071 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2072 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2073 sizeof(prom.mmumap)); 2074 prom.mmumap = be32_to_cpu(prom.mmumap); 2075 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2076 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2077 } 2078 #else 2079 #define prom_find_mmu() 2080 #endif 2081 2082 static void __init prom_init_stdout(void) 2083 { 2084 char *path = of_stdout_device; 2085 char type[16]; 2086 phandle stdout_node; 2087 __be32 val; 2088 2089 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2090 prom_panic("cannot find stdout"); 2091 2092 prom.stdout = be32_to_cpu(val); 2093 2094 /* Get the full OF pathname of the stdout device */ 2095 memset(path, 0, 256); 2096 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2097 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2098 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2099 path, strlen(path) + 1); 2100 2101 /* instance-to-package fails on PA-Semi */ 2102 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2103 if (stdout_node != PROM_ERROR) { 2104 val = cpu_to_be32(stdout_node); 2105 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", 2106 &val, sizeof(val)); 2107 2108 /* If it's a display, note it */ 2109 memset(type, 0, sizeof(type)); 2110 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2111 if (strcmp(type, "display") == 0) 2112 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2113 } 2114 } 2115 2116 static int __init prom_find_machine_type(void) 2117 { 2118 char compat[256]; 2119 int len, i = 0; 2120 #ifdef CONFIG_PPC64 2121 phandle rtas; 2122 int x; 2123 #endif 2124 2125 /* Look for a PowerMac or a Cell */ 2126 len = prom_getprop(prom.root, "compatible", 2127 compat, sizeof(compat)-1); 2128 if (len > 0) { 2129 compat[len] = 0; 2130 while (i < len) { 2131 char *p = &compat[i]; 2132 int sl = strlen(p); 2133 if (sl == 0) 2134 break; 2135 if (strstr(p, "Power Macintosh") || 2136 strstr(p, "MacRISC")) 2137 return PLATFORM_POWERMAC; 2138 #ifdef CONFIG_PPC64 2139 /* We must make sure we don't detect the IBM Cell 2140 * blades as pSeries due to some firmware issues, 2141 * so we do it here. 2142 */ 2143 if (strstr(p, "IBM,CBEA") || 2144 strstr(p, "IBM,CPBW-1.0")) 2145 return PLATFORM_GENERIC; 2146 #endif /* CONFIG_PPC64 */ 2147 i += sl + 1; 2148 } 2149 } 2150 #ifdef CONFIG_PPC64 2151 /* Try to detect OPAL */ 2152 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal")))) 2153 return PLATFORM_OPAL; 2154 2155 /* Try to figure out if it's an IBM pSeries or any other 2156 * PAPR compliant platform. We assume it is if : 2157 * - /device_type is "chrp" (please, do NOT use that for future 2158 * non-IBM designs ! 2159 * - it has /rtas 2160 */ 2161 len = prom_getprop(prom.root, "device_type", 2162 compat, sizeof(compat)-1); 2163 if (len <= 0) 2164 return PLATFORM_GENERIC; 2165 if (strcmp(compat, "chrp")) 2166 return PLATFORM_GENERIC; 2167 2168 /* Default to pSeries. We need to know if we are running LPAR */ 2169 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2170 if (!PHANDLE_VALID(rtas)) 2171 return PLATFORM_GENERIC; 2172 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2173 if (x != PROM_ERROR) { 2174 prom_debug("Hypertas detected, assuming LPAR !\n"); 2175 return PLATFORM_PSERIES_LPAR; 2176 } 2177 return PLATFORM_PSERIES; 2178 #else 2179 return PLATFORM_GENERIC; 2180 #endif 2181 } 2182 2183 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2184 { 2185 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2186 } 2187 2188 /* 2189 * If we have a display that we don't know how to drive, 2190 * we will want to try to execute OF's open method for it 2191 * later. However, OF will probably fall over if we do that 2192 * we've taken over the MMU. 2193 * So we check whether we will need to open the display, 2194 * and if so, open it now. 2195 */ 2196 static void __init prom_check_displays(void) 2197 { 2198 char type[16], *path; 2199 phandle node; 2200 ihandle ih; 2201 int i; 2202 2203 static unsigned char default_colors[] = { 2204 0x00, 0x00, 0x00, 2205 0x00, 0x00, 0xaa, 2206 0x00, 0xaa, 0x00, 2207 0x00, 0xaa, 0xaa, 2208 0xaa, 0x00, 0x00, 2209 0xaa, 0x00, 0xaa, 2210 0xaa, 0xaa, 0x00, 2211 0xaa, 0xaa, 0xaa, 2212 0x55, 0x55, 0x55, 2213 0x55, 0x55, 0xff, 2214 0x55, 0xff, 0x55, 2215 0x55, 0xff, 0xff, 2216 0xff, 0x55, 0x55, 2217 0xff, 0x55, 0xff, 2218 0xff, 0xff, 0x55, 2219 0xff, 0xff, 0xff 2220 }; 2221 const unsigned char *clut; 2222 2223 prom_debug("Looking for displays\n"); 2224 for (node = 0; prom_next_node(&node); ) { 2225 memset(type, 0, sizeof(type)); 2226 prom_getprop(node, "device_type", type, sizeof(type)); 2227 if (strcmp(type, "display") != 0) 2228 continue; 2229 2230 /* It seems OF doesn't null-terminate the path :-( */ 2231 path = prom_scratch; 2232 memset(path, 0, PROM_SCRATCH_SIZE); 2233 2234 /* 2235 * leave some room at the end of the path for appending extra 2236 * arguments 2237 */ 2238 if (call_prom("package-to-path", 3, 1, node, path, 2239 PROM_SCRATCH_SIZE-10) == PROM_ERROR) 2240 continue; 2241 prom_printf("found display : %s, opening... ", path); 2242 2243 ih = call_prom("open", 1, 1, path); 2244 if (ih == 0) { 2245 prom_printf("failed\n"); 2246 continue; 2247 } 2248 2249 /* Success */ 2250 prom_printf("done\n"); 2251 prom_setprop(node, path, "linux,opened", NULL, 0); 2252 2253 /* Setup a usable color table when the appropriate 2254 * method is available. Should update this to set-colors */ 2255 clut = default_colors; 2256 for (i = 0; i < 16; i++, clut += 3) 2257 if (prom_set_color(ih, i, clut[0], clut[1], 2258 clut[2]) != 0) 2259 break; 2260 2261 #ifdef CONFIG_LOGO_LINUX_CLUT224 2262 clut = PTRRELOC(logo_linux_clut224.clut); 2263 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2264 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2265 clut[2]) != 0) 2266 break; 2267 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2268 2269 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2270 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2271 PROM_ERROR) { 2272 u32 width, height, pitch, addr; 2273 2274 prom_printf("Setting btext !\n"); 2275 prom_getprop(node, "width", &width, 4); 2276 prom_getprop(node, "height", &height, 4); 2277 prom_getprop(node, "linebytes", &pitch, 4); 2278 prom_getprop(node, "address", &addr, 4); 2279 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2280 width, height, pitch, addr); 2281 btext_setup_display(width, height, 8, pitch, addr); 2282 } 2283 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2284 } 2285 } 2286 2287 2288 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2289 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2290 unsigned long needed, unsigned long align) 2291 { 2292 void *ret; 2293 2294 *mem_start = _ALIGN(*mem_start, align); 2295 while ((*mem_start + needed) > *mem_end) { 2296 unsigned long room, chunk; 2297 2298 prom_debug("Chunk exhausted, claiming more at %lx...\n", 2299 alloc_bottom); 2300 room = alloc_top - alloc_bottom; 2301 if (room > DEVTREE_CHUNK_SIZE) 2302 room = DEVTREE_CHUNK_SIZE; 2303 if (room < PAGE_SIZE) 2304 prom_panic("No memory for flatten_device_tree " 2305 "(no room)\n"); 2306 chunk = alloc_up(room, 0); 2307 if (chunk == 0) 2308 prom_panic("No memory for flatten_device_tree " 2309 "(claim failed)\n"); 2310 *mem_end = chunk + room; 2311 } 2312 2313 ret = (void *)*mem_start; 2314 *mem_start += needed; 2315 2316 return ret; 2317 } 2318 2319 #define dt_push_token(token, mem_start, mem_end) do { \ 2320 void *room = make_room(mem_start, mem_end, 4, 4); \ 2321 *(__be32 *)room = cpu_to_be32(token); \ 2322 } while(0) 2323 2324 static unsigned long __init dt_find_string(char *str) 2325 { 2326 char *s, *os; 2327 2328 s = os = (char *)dt_string_start; 2329 s += 4; 2330 while (s < (char *)dt_string_end) { 2331 if (strcmp(s, str) == 0) 2332 return s - os; 2333 s += strlen(s) + 1; 2334 } 2335 return 0; 2336 } 2337 2338 /* 2339 * The Open Firmware 1275 specification states properties must be 31 bytes or 2340 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2341 */ 2342 #define MAX_PROPERTY_NAME 64 2343 2344 static void __init scan_dt_build_strings(phandle node, 2345 unsigned long *mem_start, 2346 unsigned long *mem_end) 2347 { 2348 char *prev_name, *namep, *sstart; 2349 unsigned long soff; 2350 phandle child; 2351 2352 sstart = (char *)dt_string_start; 2353 2354 /* get and store all property names */ 2355 prev_name = ""; 2356 for (;;) { 2357 /* 64 is max len of name including nul. */ 2358 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2359 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2360 /* No more nodes: unwind alloc */ 2361 *mem_start = (unsigned long)namep; 2362 break; 2363 } 2364 2365 /* skip "name" */ 2366 if (strcmp(namep, "name") == 0) { 2367 *mem_start = (unsigned long)namep; 2368 prev_name = "name"; 2369 continue; 2370 } 2371 /* get/create string entry */ 2372 soff = dt_find_string(namep); 2373 if (soff != 0) { 2374 *mem_start = (unsigned long)namep; 2375 namep = sstart + soff; 2376 } else { 2377 /* Trim off some if we can */ 2378 *mem_start = (unsigned long)namep + strlen(namep) + 1; 2379 dt_string_end = *mem_start; 2380 } 2381 prev_name = namep; 2382 } 2383 2384 /* do all our children */ 2385 child = call_prom("child", 1, 1, node); 2386 while (child != 0) { 2387 scan_dt_build_strings(child, mem_start, mem_end); 2388 child = call_prom("peer", 1, 1, child); 2389 } 2390 } 2391 2392 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2393 unsigned long *mem_end) 2394 { 2395 phandle child; 2396 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2397 unsigned long soff; 2398 unsigned char *valp; 2399 static char pname[MAX_PROPERTY_NAME]; 2400 int l, room, has_phandle = 0; 2401 2402 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2403 2404 /* get the node's full name */ 2405 namep = (char *)*mem_start; 2406 room = *mem_end - *mem_start; 2407 if (room > 255) 2408 room = 255; 2409 l = call_prom("package-to-path", 3, 1, node, namep, room); 2410 if (l >= 0) { 2411 /* Didn't fit? Get more room. */ 2412 if (l >= room) { 2413 if (l >= *mem_end - *mem_start) 2414 namep = make_room(mem_start, mem_end, l+1, 1); 2415 call_prom("package-to-path", 3, 1, node, namep, l); 2416 } 2417 namep[l] = '\0'; 2418 2419 /* Fixup an Apple bug where they have bogus \0 chars in the 2420 * middle of the path in some properties, and extract 2421 * the unit name (everything after the last '/'). 2422 */ 2423 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2424 if (*p == '/') 2425 lp = namep; 2426 else if (*p != 0) 2427 *lp++ = *p; 2428 } 2429 *lp = 0; 2430 *mem_start = _ALIGN((unsigned long)lp + 1, 4); 2431 } 2432 2433 /* get it again for debugging */ 2434 path = prom_scratch; 2435 memset(path, 0, PROM_SCRATCH_SIZE); 2436 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 2437 2438 /* get and store all properties */ 2439 prev_name = ""; 2440 sstart = (char *)dt_string_start; 2441 for (;;) { 2442 if (call_prom("nextprop", 3, 1, node, prev_name, 2443 pname) != 1) 2444 break; 2445 2446 /* skip "name" */ 2447 if (strcmp(pname, "name") == 0) { 2448 prev_name = "name"; 2449 continue; 2450 } 2451 2452 /* find string offset */ 2453 soff = dt_find_string(pname); 2454 if (soff == 0) { 2455 prom_printf("WARNING: Can't find string index for" 2456 " <%s>, node %s\n", pname, path); 2457 break; 2458 } 2459 prev_name = sstart + soff; 2460 2461 /* get length */ 2462 l = call_prom("getproplen", 2, 1, node, pname); 2463 2464 /* sanity checks */ 2465 if (l == PROM_ERROR) 2466 continue; 2467 2468 /* push property head */ 2469 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2470 dt_push_token(l, mem_start, mem_end); 2471 dt_push_token(soff, mem_start, mem_end); 2472 2473 /* push property content */ 2474 valp = make_room(mem_start, mem_end, l, 4); 2475 call_prom("getprop", 4, 1, node, pname, valp, l); 2476 *mem_start = _ALIGN(*mem_start, 4); 2477 2478 if (!strcmp(pname, "phandle")) 2479 has_phandle = 1; 2480 } 2481 2482 /* Add a "linux,phandle" property if no "phandle" property already 2483 * existed (can happen with OPAL) 2484 */ 2485 if (!has_phandle) { 2486 soff = dt_find_string("linux,phandle"); 2487 if (soff == 0) 2488 prom_printf("WARNING: Can't find string index for" 2489 " <linux-phandle> node %s\n", path); 2490 else { 2491 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2492 dt_push_token(4, mem_start, mem_end); 2493 dt_push_token(soff, mem_start, mem_end); 2494 valp = make_room(mem_start, mem_end, 4, 4); 2495 *(__be32 *)valp = cpu_to_be32(node); 2496 } 2497 } 2498 2499 /* do all our children */ 2500 child = call_prom("child", 1, 1, node); 2501 while (child != 0) { 2502 scan_dt_build_struct(child, mem_start, mem_end); 2503 child = call_prom("peer", 1, 1, child); 2504 } 2505 2506 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2507 } 2508 2509 static void __init flatten_device_tree(void) 2510 { 2511 phandle root; 2512 unsigned long mem_start, mem_end, room; 2513 struct boot_param_header *hdr; 2514 char *namep; 2515 u64 *rsvmap; 2516 2517 /* 2518 * Check how much room we have between alloc top & bottom (+/- a 2519 * few pages), crop to 1MB, as this is our "chunk" size 2520 */ 2521 room = alloc_top - alloc_bottom - 0x4000; 2522 if (room > DEVTREE_CHUNK_SIZE) 2523 room = DEVTREE_CHUNK_SIZE; 2524 prom_debug("starting device tree allocs at %lx\n", alloc_bottom); 2525 2526 /* Now try to claim that */ 2527 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2528 if (mem_start == 0) 2529 prom_panic("Can't allocate initial device-tree chunk\n"); 2530 mem_end = mem_start + room; 2531 2532 /* Get root of tree */ 2533 root = call_prom("peer", 1, 1, (phandle)0); 2534 if (root == (phandle)0) 2535 prom_panic ("couldn't get device tree root\n"); 2536 2537 /* Build header and make room for mem rsv map */ 2538 mem_start = _ALIGN(mem_start, 4); 2539 hdr = make_room(&mem_start, &mem_end, 2540 sizeof(struct boot_param_header), 4); 2541 dt_header_start = (unsigned long)hdr; 2542 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2543 2544 /* Start of strings */ 2545 mem_start = PAGE_ALIGN(mem_start); 2546 dt_string_start = mem_start; 2547 mem_start += 4; /* hole */ 2548 2549 /* Add "linux,phandle" in there, we'll need it */ 2550 namep = make_room(&mem_start, &mem_end, 16, 1); 2551 strcpy(namep, "linux,phandle"); 2552 mem_start = (unsigned long)namep + strlen(namep) + 1; 2553 2554 /* Build string array */ 2555 prom_printf("Building dt strings...\n"); 2556 scan_dt_build_strings(root, &mem_start, &mem_end); 2557 dt_string_end = mem_start; 2558 2559 /* Build structure */ 2560 mem_start = PAGE_ALIGN(mem_start); 2561 dt_struct_start = mem_start; 2562 prom_printf("Building dt structure...\n"); 2563 scan_dt_build_struct(root, &mem_start, &mem_end); 2564 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2565 dt_struct_end = PAGE_ALIGN(mem_start); 2566 2567 /* Finish header */ 2568 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2569 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2570 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2571 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2572 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2573 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2574 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2575 hdr->version = cpu_to_be32(OF_DT_VERSION); 2576 /* Version 16 is not backward compatible */ 2577 hdr->last_comp_version = cpu_to_be32(0x10); 2578 2579 /* Copy the reserve map in */ 2580 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2581 2582 #ifdef DEBUG_PROM 2583 { 2584 int i; 2585 prom_printf("reserved memory map:\n"); 2586 for (i = 0; i < mem_reserve_cnt; i++) 2587 prom_printf(" %llx - %llx\n", 2588 be64_to_cpu(mem_reserve_map[i].base), 2589 be64_to_cpu(mem_reserve_map[i].size)); 2590 } 2591 #endif 2592 /* Bump mem_reserve_cnt to cause further reservations to fail 2593 * since it's too late. 2594 */ 2595 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2596 2597 prom_printf("Device tree strings 0x%lx -> 0x%lx\n", 2598 dt_string_start, dt_string_end); 2599 prom_printf("Device tree struct 0x%lx -> 0x%lx\n", 2600 dt_struct_start, dt_struct_end); 2601 } 2602 2603 #ifdef CONFIG_PPC_MAPLE 2604 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2605 * The values are bad, and it doesn't even have the right number of cells. */ 2606 static void __init fixup_device_tree_maple(void) 2607 { 2608 phandle isa; 2609 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2610 u32 isa_ranges[6]; 2611 char *name; 2612 2613 name = "/ht@0/isa@4"; 2614 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2615 if (!PHANDLE_VALID(isa)) { 2616 name = "/ht@0/isa@6"; 2617 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2618 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2619 } 2620 if (!PHANDLE_VALID(isa)) 2621 return; 2622 2623 if (prom_getproplen(isa, "ranges") != 12) 2624 return; 2625 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2626 == PROM_ERROR) 2627 return; 2628 2629 if (isa_ranges[0] != 0x1 || 2630 isa_ranges[1] != 0xf4000000 || 2631 isa_ranges[2] != 0x00010000) 2632 return; 2633 2634 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2635 2636 isa_ranges[0] = 0x1; 2637 isa_ranges[1] = 0x0; 2638 isa_ranges[2] = rloc; 2639 isa_ranges[3] = 0x0; 2640 isa_ranges[4] = 0x0; 2641 isa_ranges[5] = 0x00010000; 2642 prom_setprop(isa, name, "ranges", 2643 isa_ranges, sizeof(isa_ranges)); 2644 } 2645 2646 #define CPC925_MC_START 0xf8000000 2647 #define CPC925_MC_LENGTH 0x1000000 2648 /* The values for memory-controller don't have right number of cells */ 2649 static void __init fixup_device_tree_maple_memory_controller(void) 2650 { 2651 phandle mc; 2652 u32 mc_reg[4]; 2653 char *name = "/hostbridge@f8000000"; 2654 u32 ac, sc; 2655 2656 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2657 if (!PHANDLE_VALID(mc)) 2658 return; 2659 2660 if (prom_getproplen(mc, "reg") != 8) 2661 return; 2662 2663 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2664 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2665 if ((ac != 2) || (sc != 2)) 2666 return; 2667 2668 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2669 return; 2670 2671 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2672 return; 2673 2674 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2675 2676 mc_reg[0] = 0x0; 2677 mc_reg[1] = CPC925_MC_START; 2678 mc_reg[2] = 0x0; 2679 mc_reg[3] = CPC925_MC_LENGTH; 2680 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2681 } 2682 #else 2683 #define fixup_device_tree_maple() 2684 #define fixup_device_tree_maple_memory_controller() 2685 #endif 2686 2687 #ifdef CONFIG_PPC_CHRP 2688 /* 2689 * Pegasos and BriQ lacks the "ranges" property in the isa node 2690 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2691 * Pegasos has the IDE configured in legacy mode, but advertised as native 2692 */ 2693 static void __init fixup_device_tree_chrp(void) 2694 { 2695 phandle ph; 2696 u32 prop[6]; 2697 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2698 char *name; 2699 int rc; 2700 2701 name = "/pci@80000000/isa@c"; 2702 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2703 if (!PHANDLE_VALID(ph)) { 2704 name = "/pci@ff500000/isa@6"; 2705 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2706 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2707 } 2708 if (PHANDLE_VALID(ph)) { 2709 rc = prom_getproplen(ph, "ranges"); 2710 if (rc == 0 || rc == PROM_ERROR) { 2711 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2712 2713 prop[0] = 0x1; 2714 prop[1] = 0x0; 2715 prop[2] = rloc; 2716 prop[3] = 0x0; 2717 prop[4] = 0x0; 2718 prop[5] = 0x00010000; 2719 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2720 } 2721 } 2722 2723 name = "/pci@80000000/ide@C,1"; 2724 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2725 if (PHANDLE_VALID(ph)) { 2726 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2727 prop[0] = 14; 2728 prop[1] = 0x0; 2729 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2730 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2731 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2732 if (rc == sizeof(u32)) { 2733 prop[0] &= ~0x5; 2734 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2735 } 2736 } 2737 } 2738 #else 2739 #define fixup_device_tree_chrp() 2740 #endif 2741 2742 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2743 static void __init fixup_device_tree_pmac(void) 2744 { 2745 phandle u3, i2c, mpic; 2746 u32 u3_rev; 2747 u32 interrupts[2]; 2748 u32 parent; 2749 2750 /* Some G5s have a missing interrupt definition, fix it up here */ 2751 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2752 if (!PHANDLE_VALID(u3)) 2753 return; 2754 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2755 if (!PHANDLE_VALID(i2c)) 2756 return; 2757 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2758 if (!PHANDLE_VALID(mpic)) 2759 return; 2760 2761 /* check if proper rev of u3 */ 2762 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2763 == PROM_ERROR) 2764 return; 2765 if (u3_rev < 0x35 || u3_rev > 0x39) 2766 return; 2767 /* does it need fixup ? */ 2768 if (prom_getproplen(i2c, "interrupts") > 0) 2769 return; 2770 2771 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2772 2773 /* interrupt on this revision of u3 is number 0 and level */ 2774 interrupts[0] = 0; 2775 interrupts[1] = 1; 2776 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2777 &interrupts, sizeof(interrupts)); 2778 parent = (u32)mpic; 2779 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2780 &parent, sizeof(parent)); 2781 } 2782 #else 2783 #define fixup_device_tree_pmac() 2784 #endif 2785 2786 #ifdef CONFIG_PPC_EFIKA 2787 /* 2788 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2789 * to talk to the phy. If the phy-handle property is missing, then this 2790 * function is called to add the appropriate nodes and link it to the 2791 * ethernet node. 2792 */ 2793 static void __init fixup_device_tree_efika_add_phy(void) 2794 { 2795 u32 node; 2796 char prop[64]; 2797 int rv; 2798 2799 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2800 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2801 if (!PHANDLE_VALID(node)) 2802 return; 2803 2804 /* Check if the phy-handle property exists - bail if it does */ 2805 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2806 if (!rv) 2807 return; 2808 2809 /* 2810 * At this point the ethernet device doesn't have a phy described. 2811 * Now we need to add the missing phy node and linkage 2812 */ 2813 2814 /* Check for an MDIO bus node - if missing then create one */ 2815 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2816 if (!PHANDLE_VALID(node)) { 2817 prom_printf("Adding Ethernet MDIO node\n"); 2818 call_prom("interpret", 1, 1, 2819 " s\" /builtin\" find-device" 2820 " new-device" 2821 " 1 encode-int s\" #address-cells\" property" 2822 " 0 encode-int s\" #size-cells\" property" 2823 " s\" mdio\" device-name" 2824 " s\" fsl,mpc5200b-mdio\" encode-string" 2825 " s\" compatible\" property" 2826 " 0xf0003000 0x400 reg" 2827 " 0x2 encode-int" 2828 " 0x5 encode-int encode+" 2829 " 0x3 encode-int encode+" 2830 " s\" interrupts\" property" 2831 " finish-device"); 2832 }; 2833 2834 /* Check for a PHY device node - if missing then create one and 2835 * give it's phandle to the ethernet node */ 2836 node = call_prom("finddevice", 1, 1, 2837 ADDR("/builtin/mdio/ethernet-phy")); 2838 if (!PHANDLE_VALID(node)) { 2839 prom_printf("Adding Ethernet PHY node\n"); 2840 call_prom("interpret", 1, 1, 2841 " s\" /builtin/mdio\" find-device" 2842 " new-device" 2843 " s\" ethernet-phy\" device-name" 2844 " 0x10 encode-int s\" reg\" property" 2845 " my-self" 2846 " ihandle>phandle" 2847 " finish-device" 2848 " s\" /builtin/ethernet\" find-device" 2849 " encode-int" 2850 " s\" phy-handle\" property" 2851 " device-end"); 2852 } 2853 } 2854 2855 static void __init fixup_device_tree_efika(void) 2856 { 2857 int sound_irq[3] = { 2, 2, 0 }; 2858 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2859 3,4,0, 3,5,0, 3,6,0, 3,7,0, 2860 3,8,0, 3,9,0, 3,10,0, 3,11,0, 2861 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 2862 u32 node; 2863 char prop[64]; 2864 int rv, len; 2865 2866 /* Check if we're really running on a EFIKA */ 2867 node = call_prom("finddevice", 1, 1, ADDR("/")); 2868 if (!PHANDLE_VALID(node)) 2869 return; 2870 2871 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2872 if (rv == PROM_ERROR) 2873 return; 2874 if (strcmp(prop, "EFIKA5K2")) 2875 return; 2876 2877 prom_printf("Applying EFIKA device tree fixups\n"); 2878 2879 /* Claiming to be 'chrp' is death */ 2880 node = call_prom("finddevice", 1, 1, ADDR("/")); 2881 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 2882 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0)) 2883 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 2884 2885 /* CODEGEN,description is exposed in /proc/cpuinfo so 2886 fix that too */ 2887 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 2888 if (rv != PROM_ERROR && (strstr(prop, "CHRP"))) 2889 prom_setprop(node, "/", "CODEGEN,description", 2890 "Efika 5200B PowerPC System", 2891 sizeof("Efika 5200B PowerPC System")); 2892 2893 /* Fixup bestcomm interrupts property */ 2894 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 2895 if (PHANDLE_VALID(node)) { 2896 len = prom_getproplen(node, "interrupts"); 2897 if (len == 12) { 2898 prom_printf("Fixing bestcomm interrupts property\n"); 2899 prom_setprop(node, "/builtin/bestcom", "interrupts", 2900 bcomm_irq, sizeof(bcomm_irq)); 2901 } 2902 } 2903 2904 /* Fixup sound interrupts property */ 2905 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 2906 if (PHANDLE_VALID(node)) { 2907 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 2908 if (rv == PROM_ERROR) { 2909 prom_printf("Adding sound interrupts property\n"); 2910 prom_setprop(node, "/builtin/sound", "interrupts", 2911 sound_irq, sizeof(sound_irq)); 2912 } 2913 } 2914 2915 /* Make sure ethernet phy-handle property exists */ 2916 fixup_device_tree_efika_add_phy(); 2917 } 2918 #else 2919 #define fixup_device_tree_efika() 2920 #endif 2921 2922 #ifdef CONFIG_PPC_PASEMI_NEMO 2923 /* 2924 * CFE supplied on Nemo is broken in several ways, biggest 2925 * problem is that it reassigns ISA interrupts to unused mpic ints. 2926 * Add an interrupt-controller property for the io-bridge to use 2927 * and correct the ints so we can attach them to an irq_domain 2928 */ 2929 static void __init fixup_device_tree_pasemi(void) 2930 { 2931 u32 interrupts[2], parent, rval, val = 0; 2932 char *name, *pci_name; 2933 phandle iob, node; 2934 2935 /* Find the root pci node */ 2936 name = "/pxp@0,e0000000"; 2937 iob = call_prom("finddevice", 1, 1, ADDR(name)); 2938 if (!PHANDLE_VALID(iob)) 2939 return; 2940 2941 /* check if interrupt-controller node set yet */ 2942 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 2943 return; 2944 2945 prom_printf("adding interrupt-controller property for SB600...\n"); 2946 2947 prom_setprop(iob, name, "interrupt-controller", &val, 0); 2948 2949 pci_name = "/pxp@0,e0000000/pci@11"; 2950 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 2951 parent = ADDR(iob); 2952 2953 for( ; prom_next_node(&node); ) { 2954 /* scan each node for one with an interrupt */ 2955 if (!PHANDLE_VALID(node)) 2956 continue; 2957 2958 rval = prom_getproplen(node, "interrupts"); 2959 if (rval == 0 || rval == PROM_ERROR) 2960 continue; 2961 2962 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 2963 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 2964 continue; 2965 2966 /* found a node, update both interrupts and interrupt-parent */ 2967 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 2968 interrupts[0] -= 203; 2969 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 2970 interrupts[0] -= 213; 2971 if (interrupts[0] == 221) 2972 interrupts[0] = 14; 2973 if (interrupts[0] == 222) 2974 interrupts[0] = 8; 2975 2976 prom_setprop(node, pci_name, "interrupts", interrupts, 2977 sizeof(interrupts)); 2978 prom_setprop(node, pci_name, "interrupt-parent", &parent, 2979 sizeof(parent)); 2980 } 2981 2982 /* 2983 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 2984 * so that generic isa-bridge code can add the SB600 and its on-board 2985 * peripherals. 2986 */ 2987 name = "/pxp@0,e0000000/io-bridge@0"; 2988 iob = call_prom("finddevice", 1, 1, ADDR(name)); 2989 if (!PHANDLE_VALID(iob)) 2990 return; 2991 2992 /* device_type is already set, just change it. */ 2993 2994 prom_printf("Changing device_type of SB600 node...\n"); 2995 2996 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 2997 } 2998 #else /* !CONFIG_PPC_PASEMI_NEMO */ 2999 static inline void fixup_device_tree_pasemi(void) { } 3000 #endif 3001 3002 static void __init fixup_device_tree(void) 3003 { 3004 fixup_device_tree_maple(); 3005 fixup_device_tree_maple_memory_controller(); 3006 fixup_device_tree_chrp(); 3007 fixup_device_tree_pmac(); 3008 fixup_device_tree_efika(); 3009 fixup_device_tree_pasemi(); 3010 } 3011 3012 static void __init prom_find_boot_cpu(void) 3013 { 3014 __be32 rval; 3015 ihandle prom_cpu; 3016 phandle cpu_pkg; 3017 3018 rval = 0; 3019 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 3020 return; 3021 prom_cpu = be32_to_cpu(rval); 3022 3023 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 3024 3025 if (!PHANDLE_VALID(cpu_pkg)) 3026 return; 3027 3028 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 3029 prom.cpu = be32_to_cpu(rval); 3030 3031 prom_debug("Booting CPU hw index = %d\n", prom.cpu); 3032 } 3033 3034 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 3035 { 3036 #ifdef CONFIG_BLK_DEV_INITRD 3037 if (r3 && r4 && r4 != 0xdeadbeef) { 3038 __be64 val; 3039 3040 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 3041 prom_initrd_end = prom_initrd_start + r4; 3042 3043 val = cpu_to_be64(prom_initrd_start); 3044 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 3045 &val, sizeof(val)); 3046 val = cpu_to_be64(prom_initrd_end); 3047 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 3048 &val, sizeof(val)); 3049 3050 reserve_mem(prom_initrd_start, 3051 prom_initrd_end - prom_initrd_start); 3052 3053 prom_debug("initrd_start=0x%lx\n", prom_initrd_start); 3054 prom_debug("initrd_end=0x%lx\n", prom_initrd_end); 3055 } 3056 #endif /* CONFIG_BLK_DEV_INITRD */ 3057 } 3058 3059 #ifdef CONFIG_PPC64 3060 #ifdef CONFIG_RELOCATABLE 3061 static void reloc_toc(void) 3062 { 3063 } 3064 3065 static void unreloc_toc(void) 3066 { 3067 } 3068 #else 3069 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3070 { 3071 unsigned long i; 3072 unsigned long *toc_entry; 3073 3074 /* Get the start of the TOC by using r2 directly. */ 3075 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3076 3077 for (i = 0; i < nr_entries; i++) { 3078 *toc_entry = *toc_entry + offset; 3079 toc_entry++; 3080 } 3081 } 3082 3083 static void reloc_toc(void) 3084 { 3085 unsigned long offset = reloc_offset(); 3086 unsigned long nr_entries = 3087 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3088 3089 __reloc_toc(offset, nr_entries); 3090 3091 mb(); 3092 } 3093 3094 static void unreloc_toc(void) 3095 { 3096 unsigned long offset = reloc_offset(); 3097 unsigned long nr_entries = 3098 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3099 3100 mb(); 3101 3102 __reloc_toc(-offset, nr_entries); 3103 } 3104 #endif 3105 #endif 3106 3107 /* 3108 * We enter here early on, when the Open Firmware prom is still 3109 * handling exceptions and the MMU hash table for us. 3110 */ 3111 3112 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3113 unsigned long pp, 3114 unsigned long r6, unsigned long r7, 3115 unsigned long kbase) 3116 { 3117 unsigned long hdr; 3118 3119 #ifdef CONFIG_PPC32 3120 unsigned long offset = reloc_offset(); 3121 reloc_got2(offset); 3122 #else 3123 reloc_toc(); 3124 #endif 3125 3126 /* 3127 * First zero the BSS 3128 */ 3129 memset(&__bss_start, 0, __bss_stop - __bss_start); 3130 3131 /* 3132 * Init interface to Open Firmware, get some node references, 3133 * like /chosen 3134 */ 3135 prom_init_client_services(pp); 3136 3137 /* 3138 * See if this OF is old enough that we need to do explicit maps 3139 * and other workarounds 3140 */ 3141 prom_find_mmu(); 3142 3143 /* 3144 * Init prom stdout device 3145 */ 3146 prom_init_stdout(); 3147 3148 prom_printf("Preparing to boot %s", linux_banner); 3149 3150 /* 3151 * Get default machine type. At this point, we do not differentiate 3152 * between pSeries SMP and pSeries LPAR 3153 */ 3154 of_platform = prom_find_machine_type(); 3155 prom_printf("Detected machine type: %x\n", of_platform); 3156 3157 #ifndef CONFIG_NONSTATIC_KERNEL 3158 /* Bail if this is a kdump kernel. */ 3159 if (PHYSICAL_START > 0) 3160 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3161 #endif 3162 3163 /* 3164 * Check for an initrd 3165 */ 3166 prom_check_initrd(r3, r4); 3167 3168 /* 3169 * Do early parsing of command line 3170 */ 3171 early_cmdline_parse(); 3172 3173 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 3174 /* 3175 * On pSeries, inform the firmware about our capabilities 3176 */ 3177 if (of_platform == PLATFORM_PSERIES || 3178 of_platform == PLATFORM_PSERIES_LPAR) 3179 prom_send_capabilities(); 3180 #endif 3181 3182 /* 3183 * Copy the CPU hold code 3184 */ 3185 if (of_platform != PLATFORM_POWERMAC) 3186 copy_and_flush(0, kbase, 0x100, 0); 3187 3188 /* 3189 * Initialize memory management within prom_init 3190 */ 3191 prom_init_mem(); 3192 3193 /* 3194 * Determine which cpu is actually running right _now_ 3195 */ 3196 prom_find_boot_cpu(); 3197 3198 /* 3199 * Initialize display devices 3200 */ 3201 prom_check_displays(); 3202 3203 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3204 /* 3205 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3206 * that uses the allocator, we need to make sure we get the top of memory 3207 * available for us here... 3208 */ 3209 if (of_platform == PLATFORM_PSERIES) 3210 prom_initialize_tce_table(); 3211 #endif 3212 3213 /* 3214 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3215 * have a usable RTAS implementation. 3216 */ 3217 if (of_platform != PLATFORM_POWERMAC && 3218 of_platform != PLATFORM_OPAL) 3219 prom_instantiate_rtas(); 3220 3221 #ifdef CONFIG_PPC_POWERNV 3222 if (of_platform == PLATFORM_OPAL) 3223 prom_instantiate_opal(); 3224 #endif /* CONFIG_PPC_POWERNV */ 3225 3226 #ifdef CONFIG_PPC64 3227 /* instantiate sml */ 3228 prom_instantiate_sml(); 3229 #endif 3230 3231 /* 3232 * On non-powermacs, put all CPUs in spin-loops. 3233 * 3234 * PowerMacs use a different mechanism to spin CPUs 3235 * 3236 * (This must be done after instanciating RTAS) 3237 */ 3238 if (of_platform != PLATFORM_POWERMAC && 3239 of_platform != PLATFORM_OPAL) 3240 prom_hold_cpus(); 3241 3242 /* 3243 * Fill in some infos for use by the kernel later on 3244 */ 3245 if (prom_memory_limit) { 3246 __be64 val = cpu_to_be64(prom_memory_limit); 3247 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3248 &val, sizeof(val)); 3249 } 3250 #ifdef CONFIG_PPC64 3251 if (prom_iommu_off) 3252 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3253 NULL, 0); 3254 3255 if (prom_iommu_force_on) 3256 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3257 NULL, 0); 3258 3259 if (prom_tce_alloc_start) { 3260 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3261 &prom_tce_alloc_start, 3262 sizeof(prom_tce_alloc_start)); 3263 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3264 &prom_tce_alloc_end, 3265 sizeof(prom_tce_alloc_end)); 3266 } 3267 #endif 3268 3269 /* 3270 * Fixup any known bugs in the device-tree 3271 */ 3272 fixup_device_tree(); 3273 3274 /* 3275 * Now finally create the flattened device-tree 3276 */ 3277 prom_printf("copying OF device tree...\n"); 3278 flatten_device_tree(); 3279 3280 /* 3281 * in case stdin is USB and still active on IBM machines... 3282 * Unfortunately quiesce crashes on some powermacs if we have 3283 * closed stdin already (in particular the powerbook 101). It 3284 * appears that the OPAL version of OFW doesn't like it either. 3285 */ 3286 if (of_platform != PLATFORM_POWERMAC && 3287 of_platform != PLATFORM_OPAL) 3288 prom_close_stdin(); 3289 3290 /* 3291 * Call OF "quiesce" method to shut down pending DMA's from 3292 * devices etc... 3293 */ 3294 prom_printf("Quiescing Open Firmware ...\n"); 3295 call_prom("quiesce", 0, 0); 3296 3297 /* 3298 * And finally, call the kernel passing it the flattened device 3299 * tree and NULL as r5, thus triggering the new entry point which 3300 * is common to us and kexec 3301 */ 3302 hdr = dt_header_start; 3303 3304 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 3305 if (of_platform != PLATFORM_OPAL) { 3306 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3307 prom_debug("->dt_header_start=0x%lx\n", hdr); 3308 } 3309 3310 #ifdef CONFIG_PPC32 3311 reloc_got2(-offset); 3312 #else 3313 unreloc_toc(); 3314 #endif 3315 3316 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 3317 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */ 3318 __start(hdr, kbase, 0, 0, 0, 3319 prom_opal_base, prom_opal_entry); 3320 #else 3321 __start(hdr, kbase, 0, 0, 0, 0, 0); 3322 #endif 3323 3324 return 0; 3325 } 3326