1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for interfacing to Open Firmware. 4 * 5 * Paul Mackerras August 1996. 6 * Copyright (C) 1996-2005 Paul Mackerras. 7 * 8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 9 * {engebret|bergner}@us.ibm.com 10 */ 11 12 #undef DEBUG_PROM 13 14 /* we cannot use FORTIFY as it brings in new symbols */ 15 #define __NO_FORTIFY 16 17 #include <stdarg.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/init.h> 21 #include <linux/threads.h> 22 #include <linux/spinlock.h> 23 #include <linux/types.h> 24 #include <linux/pci.h> 25 #include <linux/proc_fs.h> 26 #include <linux/delay.h> 27 #include <linux/initrd.h> 28 #include <linux/bitops.h> 29 #include <asm/prom.h> 30 #include <asm/rtas.h> 31 #include <asm/page.h> 32 #include <asm/processor.h> 33 #include <asm/irq.h> 34 #include <asm/io.h> 35 #include <asm/smp.h> 36 #include <asm/mmu.h> 37 #include <asm/pgtable.h> 38 #include <asm/iommu.h> 39 #include <asm/btext.h> 40 #include <asm/sections.h> 41 #include <asm/machdep.h> 42 #include <asm/asm-prototypes.h> 43 44 #include <linux/linux_logo.h> 45 46 /* All of prom_init bss lives here */ 47 #define __prombss __section(.bss.prominit) 48 49 /* 50 * Eventually bump that one up 51 */ 52 #define DEVTREE_CHUNK_SIZE 0x100000 53 54 /* 55 * This is the size of the local memory reserve map that gets copied 56 * into the boot params passed to the kernel. That size is totally 57 * flexible as the kernel just reads the list until it encounters an 58 * entry with size 0, so it can be changed without breaking binary 59 * compatibility 60 */ 61 #define MEM_RESERVE_MAP_SIZE 8 62 63 /* 64 * prom_init() is called very early on, before the kernel text 65 * and data have been mapped to KERNELBASE. At this point the code 66 * is running at whatever address it has been loaded at. 67 * On ppc32 we compile with -mrelocatable, which means that references 68 * to extern and static variables get relocated automatically. 69 * ppc64 objects are always relocatable, we just need to relocate the 70 * TOC. 71 * 72 * Because OF may have mapped I/O devices into the area starting at 73 * KERNELBASE, particularly on CHRP machines, we can't safely call 74 * OF once the kernel has been mapped to KERNELBASE. Therefore all 75 * OF calls must be done within prom_init(). 76 * 77 * ADDR is used in calls to call_prom. The 4th and following 78 * arguments to call_prom should be 32-bit values. 79 * On ppc64, 64 bit values are truncated to 32 bits (and 80 * fortunately don't get interpreted as two arguments). 81 */ 82 #define ADDR(x) (u32)(unsigned long)(x) 83 84 #ifdef CONFIG_PPC64 85 #define OF_WORKAROUNDS 0 86 #else 87 #define OF_WORKAROUNDS of_workarounds 88 static int of_workarounds __prombss; 89 #endif 90 91 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 92 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 93 94 #define PROM_BUG() do { \ 95 prom_printf("kernel BUG at %s line 0x%x!\n", \ 96 __FILE__, __LINE__); \ 97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ 98 } while (0) 99 100 #ifdef DEBUG_PROM 101 #define prom_debug(x...) prom_printf(x) 102 #else 103 #define prom_debug(x...) do { } while (0) 104 #endif 105 106 107 typedef u32 prom_arg_t; 108 109 struct prom_args { 110 __be32 service; 111 __be32 nargs; 112 __be32 nret; 113 __be32 args[10]; 114 }; 115 116 struct prom_t { 117 ihandle root; 118 phandle chosen; 119 int cpu; 120 ihandle stdout; 121 ihandle mmumap; 122 ihandle memory; 123 }; 124 125 struct mem_map_entry { 126 __be64 base; 127 __be64 size; 128 }; 129 130 typedef __be32 cell_t; 131 132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 133 unsigned long r6, unsigned long r7, unsigned long r8, 134 unsigned long r9); 135 136 #ifdef CONFIG_PPC64 137 extern int enter_prom(struct prom_args *args, unsigned long entry); 138 #else 139 static inline int enter_prom(struct prom_args *args, unsigned long entry) 140 { 141 return ((int (*)(struct prom_args *))entry)(args); 142 } 143 #endif 144 145 extern void copy_and_flush(unsigned long dest, unsigned long src, 146 unsigned long size, unsigned long offset); 147 148 /* prom structure */ 149 static struct prom_t __prombss prom; 150 151 static unsigned long __prombss prom_entry; 152 153 static char __prombss of_stdout_device[256]; 154 static char __prombss prom_scratch[256]; 155 156 static unsigned long __prombss dt_header_start; 157 static unsigned long __prombss dt_struct_start, dt_struct_end; 158 static unsigned long __prombss dt_string_start, dt_string_end; 159 160 static unsigned long __prombss prom_initrd_start, prom_initrd_end; 161 162 #ifdef CONFIG_PPC64 163 static int __prombss prom_iommu_force_on; 164 static int __prombss prom_iommu_off; 165 static unsigned long __prombss prom_tce_alloc_start; 166 static unsigned long __prombss prom_tce_alloc_end; 167 #endif 168 169 #ifdef CONFIG_PPC_PSERIES 170 static bool __prombss prom_radix_disable; 171 static bool __prombss prom_xive_disable; 172 #endif 173 174 struct platform_support { 175 bool hash_mmu; 176 bool radix_mmu; 177 bool radix_gtse; 178 bool xive; 179 }; 180 181 /* Platforms codes are now obsolete in the kernel. Now only used within this 182 * file and ultimately gone too. Feel free to change them if you need, they 183 * are not shared with anything outside of this file anymore 184 */ 185 #define PLATFORM_PSERIES 0x0100 186 #define PLATFORM_PSERIES_LPAR 0x0101 187 #define PLATFORM_LPAR 0x0001 188 #define PLATFORM_POWERMAC 0x0400 189 #define PLATFORM_GENERIC 0x0500 190 191 static int __prombss of_platform; 192 193 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE]; 194 195 static unsigned long __prombss prom_memory_limit; 196 197 static unsigned long __prombss alloc_top; 198 static unsigned long __prombss alloc_top_high; 199 static unsigned long __prombss alloc_bottom; 200 static unsigned long __prombss rmo_top; 201 static unsigned long __prombss ram_top; 202 203 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 204 static int __prombss mem_reserve_cnt; 205 206 static cell_t __prombss regbuf[1024]; 207 208 static bool __prombss rtas_has_query_cpu_stopped; 209 210 211 /* 212 * Error results ... some OF calls will return "-1" on error, some 213 * will return 0, some will return either. To simplify, here are 214 * macros to use with any ihandle or phandle return value to check if 215 * it is valid 216 */ 217 218 #define PROM_ERROR (-1u) 219 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 220 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 221 222 /* Copied from lib/string.c and lib/kstrtox.c */ 223 224 static int __init prom_strcmp(const char *cs, const char *ct) 225 { 226 unsigned char c1, c2; 227 228 while (1) { 229 c1 = *cs++; 230 c2 = *ct++; 231 if (c1 != c2) 232 return c1 < c2 ? -1 : 1; 233 if (!c1) 234 break; 235 } 236 return 0; 237 } 238 239 static char __init *prom_strcpy(char *dest, const char *src) 240 { 241 char *tmp = dest; 242 243 while ((*dest++ = *src++) != '\0') 244 /* nothing */; 245 return tmp; 246 } 247 248 static int __init prom_strncmp(const char *cs, const char *ct, size_t count) 249 { 250 unsigned char c1, c2; 251 252 while (count) { 253 c1 = *cs++; 254 c2 = *ct++; 255 if (c1 != c2) 256 return c1 < c2 ? -1 : 1; 257 if (!c1) 258 break; 259 count--; 260 } 261 return 0; 262 } 263 264 static size_t __init prom_strlen(const char *s) 265 { 266 const char *sc; 267 268 for (sc = s; *sc != '\0'; ++sc) 269 /* nothing */; 270 return sc - s; 271 } 272 273 static int __init prom_memcmp(const void *cs, const void *ct, size_t count) 274 { 275 const unsigned char *su1, *su2; 276 int res = 0; 277 278 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 279 if ((res = *su1 - *su2) != 0) 280 break; 281 return res; 282 } 283 284 static char __init *prom_strstr(const char *s1, const char *s2) 285 { 286 size_t l1, l2; 287 288 l2 = prom_strlen(s2); 289 if (!l2) 290 return (char *)s1; 291 l1 = prom_strlen(s1); 292 while (l1 >= l2) { 293 l1--; 294 if (!prom_memcmp(s1, s2, l2)) 295 return (char *)s1; 296 s1++; 297 } 298 return NULL; 299 } 300 301 static size_t __init prom_strlcpy(char *dest, const char *src, size_t size) 302 { 303 size_t ret = prom_strlen(src); 304 305 if (size) { 306 size_t len = (ret >= size) ? size - 1 : ret; 307 memcpy(dest, src, len); 308 dest[len] = '\0'; 309 } 310 return ret; 311 } 312 313 #ifdef CONFIG_PPC_PSERIES 314 static int __init prom_strtobool(const char *s, bool *res) 315 { 316 if (!s) 317 return -EINVAL; 318 319 switch (s[0]) { 320 case 'y': 321 case 'Y': 322 case '1': 323 *res = true; 324 return 0; 325 case 'n': 326 case 'N': 327 case '0': 328 *res = false; 329 return 0; 330 case 'o': 331 case 'O': 332 switch (s[1]) { 333 case 'n': 334 case 'N': 335 *res = true; 336 return 0; 337 case 'f': 338 case 'F': 339 *res = false; 340 return 0; 341 default: 342 break; 343 } 344 default: 345 break; 346 } 347 348 return -EINVAL; 349 } 350 #endif 351 352 /* This is the one and *ONLY* place where we actually call open 353 * firmware. 354 */ 355 356 static int __init call_prom(const char *service, int nargs, int nret, ...) 357 { 358 int i; 359 struct prom_args args; 360 va_list list; 361 362 args.service = cpu_to_be32(ADDR(service)); 363 args.nargs = cpu_to_be32(nargs); 364 args.nret = cpu_to_be32(nret); 365 366 va_start(list, nret); 367 for (i = 0; i < nargs; i++) 368 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 369 va_end(list); 370 371 for (i = 0; i < nret; i++) 372 args.args[nargs+i] = 0; 373 374 if (enter_prom(&args, prom_entry) < 0) 375 return PROM_ERROR; 376 377 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 378 } 379 380 static int __init call_prom_ret(const char *service, int nargs, int nret, 381 prom_arg_t *rets, ...) 382 { 383 int i; 384 struct prom_args args; 385 va_list list; 386 387 args.service = cpu_to_be32(ADDR(service)); 388 args.nargs = cpu_to_be32(nargs); 389 args.nret = cpu_to_be32(nret); 390 391 va_start(list, rets); 392 for (i = 0; i < nargs; i++) 393 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 394 va_end(list); 395 396 for (i = 0; i < nret; i++) 397 args.args[nargs+i] = 0; 398 399 if (enter_prom(&args, prom_entry) < 0) 400 return PROM_ERROR; 401 402 if (rets != NULL) 403 for (i = 1; i < nret; ++i) 404 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 405 406 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 407 } 408 409 410 static void __init prom_print(const char *msg) 411 { 412 const char *p, *q; 413 414 if (prom.stdout == 0) 415 return; 416 417 for (p = msg; *p != 0; p = q) { 418 for (q = p; *q != 0 && *q != '\n'; ++q) 419 ; 420 if (q > p) 421 call_prom("write", 3, 1, prom.stdout, p, q - p); 422 if (*q == 0) 423 break; 424 ++q; 425 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 426 } 427 } 428 429 430 /* 431 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that 432 * we do not need __udivdi3 or __umoddi3 on 32bits. 433 */ 434 static void __init prom_print_hex(unsigned long val) 435 { 436 int i, nibbles = sizeof(val)*2; 437 char buf[sizeof(val)*2+1]; 438 439 for (i = nibbles-1; i >= 0; i--) { 440 buf[i] = (val & 0xf) + '0'; 441 if (buf[i] > '9') 442 buf[i] += ('a'-'0'-10); 443 val >>= 4; 444 } 445 buf[nibbles] = '\0'; 446 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 447 } 448 449 /* max number of decimal digits in an unsigned long */ 450 #define UL_DIGITS 21 451 static void __init prom_print_dec(unsigned long val) 452 { 453 int i, size; 454 char buf[UL_DIGITS+1]; 455 456 for (i = UL_DIGITS-1; i >= 0; i--) { 457 buf[i] = (val % 10) + '0'; 458 val = val/10; 459 if (val == 0) 460 break; 461 } 462 /* shift stuff down */ 463 size = UL_DIGITS - i; 464 call_prom("write", 3, 1, prom.stdout, buf+i, size); 465 } 466 467 __printf(1, 2) 468 static void __init prom_printf(const char *format, ...) 469 { 470 const char *p, *q, *s; 471 va_list args; 472 unsigned long v; 473 long vs; 474 int n = 0; 475 476 va_start(args, format); 477 for (p = format; *p != 0; p = q) { 478 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 479 ; 480 if (q > p) 481 call_prom("write", 3, 1, prom.stdout, p, q - p); 482 if (*q == 0) 483 break; 484 if (*q == '\n') { 485 ++q; 486 call_prom("write", 3, 1, prom.stdout, 487 ADDR("\r\n"), 2); 488 continue; 489 } 490 ++q; 491 if (*q == 0) 492 break; 493 while (*q == 'l') { 494 ++q; 495 ++n; 496 } 497 switch (*q) { 498 case 's': 499 ++q; 500 s = va_arg(args, const char *); 501 prom_print(s); 502 break; 503 case 'x': 504 ++q; 505 switch (n) { 506 case 0: 507 v = va_arg(args, unsigned int); 508 break; 509 case 1: 510 v = va_arg(args, unsigned long); 511 break; 512 case 2: 513 default: 514 v = va_arg(args, unsigned long long); 515 break; 516 } 517 prom_print_hex(v); 518 break; 519 case 'u': 520 ++q; 521 switch (n) { 522 case 0: 523 v = va_arg(args, unsigned int); 524 break; 525 case 1: 526 v = va_arg(args, unsigned long); 527 break; 528 case 2: 529 default: 530 v = va_arg(args, unsigned long long); 531 break; 532 } 533 prom_print_dec(v); 534 break; 535 case 'd': 536 ++q; 537 switch (n) { 538 case 0: 539 vs = va_arg(args, int); 540 break; 541 case 1: 542 vs = va_arg(args, long); 543 break; 544 case 2: 545 default: 546 vs = va_arg(args, long long); 547 break; 548 } 549 if (vs < 0) { 550 prom_print("-"); 551 vs = -vs; 552 } 553 prom_print_dec(vs); 554 break; 555 } 556 } 557 va_end(args); 558 } 559 560 561 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 562 unsigned long align) 563 { 564 565 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 566 /* 567 * Old OF requires we claim physical and virtual separately 568 * and then map explicitly (assuming virtual mode) 569 */ 570 int ret; 571 prom_arg_t result; 572 573 ret = call_prom_ret("call-method", 5, 2, &result, 574 ADDR("claim"), prom.memory, 575 align, size, virt); 576 if (ret != 0 || result == -1) 577 return -1; 578 ret = call_prom_ret("call-method", 5, 2, &result, 579 ADDR("claim"), prom.mmumap, 580 align, size, virt); 581 if (ret != 0) { 582 call_prom("call-method", 4, 1, ADDR("release"), 583 prom.memory, size, virt); 584 return -1; 585 } 586 /* the 0x12 is M (coherence) + PP == read/write */ 587 call_prom("call-method", 6, 1, 588 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 589 return virt; 590 } 591 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 592 (prom_arg_t)align); 593 } 594 595 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 596 { 597 prom_print(reason); 598 /* Do not call exit because it clears the screen on pmac 599 * it also causes some sort of double-fault on early pmacs */ 600 if (of_platform == PLATFORM_POWERMAC) 601 asm("trap\n"); 602 603 /* ToDo: should put up an SRC here on pSeries */ 604 call_prom("exit", 0, 0); 605 606 for (;;) /* should never get here */ 607 ; 608 } 609 610 611 static int __init prom_next_node(phandle *nodep) 612 { 613 phandle node; 614 615 if ((node = *nodep) != 0 616 && (*nodep = call_prom("child", 1, 1, node)) != 0) 617 return 1; 618 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 619 return 1; 620 for (;;) { 621 if ((node = call_prom("parent", 1, 1, node)) == 0) 622 return 0; 623 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 624 return 1; 625 } 626 } 627 628 static inline int __init prom_getprop(phandle node, const char *pname, 629 void *value, size_t valuelen) 630 { 631 return call_prom("getprop", 4, 1, node, ADDR(pname), 632 (u32)(unsigned long) value, (u32) valuelen); 633 } 634 635 static inline int __init prom_getproplen(phandle node, const char *pname) 636 { 637 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 638 } 639 640 static void add_string(char **str, const char *q) 641 { 642 char *p = *str; 643 644 while (*q) 645 *p++ = *q++; 646 *p++ = ' '; 647 *str = p; 648 } 649 650 static char *tohex(unsigned int x) 651 { 652 static const char digits[] __initconst = "0123456789abcdef"; 653 static char result[9] __prombss; 654 int i; 655 656 result[8] = 0; 657 i = 8; 658 do { 659 --i; 660 result[i] = digits[x & 0xf]; 661 x >>= 4; 662 } while (x != 0 && i > 0); 663 return &result[i]; 664 } 665 666 static int __init prom_setprop(phandle node, const char *nodename, 667 const char *pname, void *value, size_t valuelen) 668 { 669 char cmd[256], *p; 670 671 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 672 return call_prom("setprop", 4, 1, node, ADDR(pname), 673 (u32)(unsigned long) value, (u32) valuelen); 674 675 /* gah... setprop doesn't work on longtrail, have to use interpret */ 676 p = cmd; 677 add_string(&p, "dev"); 678 add_string(&p, nodename); 679 add_string(&p, tohex((u32)(unsigned long) value)); 680 add_string(&p, tohex(valuelen)); 681 add_string(&p, tohex(ADDR(pname))); 682 add_string(&p, tohex(prom_strlen(pname))); 683 add_string(&p, "property"); 684 *p = 0; 685 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 686 } 687 688 /* We can't use the standard versions because of relocation headaches. */ 689 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 690 || ('a' <= (c) && (c) <= 'f') \ 691 || ('A' <= (c) && (c) <= 'F')) 692 693 #define isdigit(c) ('0' <= (c) && (c) <= '9') 694 #define islower(c) ('a' <= (c) && (c) <= 'z') 695 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 696 697 static unsigned long prom_strtoul(const char *cp, const char **endp) 698 { 699 unsigned long result = 0, base = 10, value; 700 701 if (*cp == '0') { 702 base = 8; 703 cp++; 704 if (toupper(*cp) == 'X') { 705 cp++; 706 base = 16; 707 } 708 } 709 710 while (isxdigit(*cp) && 711 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 712 result = result * base + value; 713 cp++; 714 } 715 716 if (endp) 717 *endp = cp; 718 719 return result; 720 } 721 722 static unsigned long prom_memparse(const char *ptr, const char **retptr) 723 { 724 unsigned long ret = prom_strtoul(ptr, retptr); 725 int shift = 0; 726 727 /* 728 * We can't use a switch here because GCC *may* generate a 729 * jump table which won't work, because we're not running at 730 * the address we're linked at. 731 */ 732 if ('G' == **retptr || 'g' == **retptr) 733 shift = 30; 734 735 if ('M' == **retptr || 'm' == **retptr) 736 shift = 20; 737 738 if ('K' == **retptr || 'k' == **retptr) 739 shift = 10; 740 741 if (shift) { 742 ret <<= shift; 743 (*retptr)++; 744 } 745 746 return ret; 747 } 748 749 /* 750 * Early parsing of the command line passed to the kernel, used for 751 * "mem=x" and the options that affect the iommu 752 */ 753 static void __init early_cmdline_parse(void) 754 { 755 const char *opt; 756 757 char *p; 758 int l = 0; 759 760 prom_cmd_line[0] = 0; 761 p = prom_cmd_line; 762 if ((long)prom.chosen > 0) 763 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 764 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */ 765 prom_strlcpy(prom_cmd_line, CONFIG_CMDLINE, sizeof(prom_cmd_line)); 766 prom_printf("command line: %s\n", prom_cmd_line); 767 768 #ifdef CONFIG_PPC64 769 opt = prom_strstr(prom_cmd_line, "iommu="); 770 if (opt) { 771 prom_printf("iommu opt is: %s\n", opt); 772 opt += 6; 773 while (*opt && *opt == ' ') 774 opt++; 775 if (!prom_strncmp(opt, "off", 3)) 776 prom_iommu_off = 1; 777 else if (!prom_strncmp(opt, "force", 5)) 778 prom_iommu_force_on = 1; 779 } 780 #endif 781 opt = prom_strstr(prom_cmd_line, "mem="); 782 if (opt) { 783 opt += 4; 784 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 785 #ifdef CONFIG_PPC64 786 /* Align to 16 MB == size of ppc64 large page */ 787 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 788 #endif 789 } 790 791 #ifdef CONFIG_PPC_PSERIES 792 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); 793 opt = prom_strstr(prom_cmd_line, "disable_radix"); 794 if (opt) { 795 opt += 13; 796 if (*opt && *opt == '=') { 797 bool val; 798 799 if (prom_strtobool(++opt, &val)) 800 prom_radix_disable = false; 801 else 802 prom_radix_disable = val; 803 } else 804 prom_radix_disable = true; 805 } 806 if (prom_radix_disable) 807 prom_debug("Radix disabled from cmdline\n"); 808 809 opt = prom_strstr(prom_cmd_line, "xive=off"); 810 if (opt) { 811 prom_xive_disable = true; 812 prom_debug("XIVE disabled from cmdline\n"); 813 } 814 #endif /* CONFIG_PPC_PSERIES */ 815 } 816 817 #ifdef CONFIG_PPC_PSERIES 818 /* 819 * The architecture vector has an array of PVR mask/value pairs, 820 * followed by # option vectors - 1, followed by the option vectors. 821 * 822 * See prom.h for the definition of the bits specified in the 823 * architecture vector. 824 */ 825 826 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 827 #define NUM_VECTORS(n) ((n) - 1) 828 829 /* 830 * Firmware expects 1 + n - 2, where n is the length of the option vector in 831 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 832 */ 833 #define VECTOR_LENGTH(n) (1 + (n) - 2) 834 835 struct option_vector1 { 836 u8 byte1; 837 u8 arch_versions; 838 u8 arch_versions3; 839 } __packed; 840 841 struct option_vector2 { 842 u8 byte1; 843 __be16 reserved; 844 __be32 real_base; 845 __be32 real_size; 846 __be32 virt_base; 847 __be32 virt_size; 848 __be32 load_base; 849 __be32 min_rma; 850 __be32 min_load; 851 u8 min_rma_percent; 852 u8 max_pft_size; 853 } __packed; 854 855 struct option_vector3 { 856 u8 byte1; 857 u8 byte2; 858 } __packed; 859 860 struct option_vector4 { 861 u8 byte1; 862 u8 min_vp_cap; 863 } __packed; 864 865 struct option_vector5 { 866 u8 byte1; 867 u8 byte2; 868 u8 byte3; 869 u8 cmo; 870 u8 associativity; 871 u8 bin_opts; 872 u8 micro_checkpoint; 873 u8 reserved0; 874 __be32 max_cpus; 875 __be16 papr_level; 876 __be16 reserved1; 877 u8 platform_facilities; 878 u8 reserved2; 879 __be16 reserved3; 880 u8 subprocessors; 881 u8 byte22; 882 u8 intarch; 883 u8 mmu; 884 u8 hash_ext; 885 u8 radix_ext; 886 } __packed; 887 888 struct option_vector6 { 889 u8 reserved; 890 u8 secondary_pteg; 891 u8 os_name; 892 } __packed; 893 894 struct ibm_arch_vec { 895 struct { u32 mask, val; } pvrs[12]; 896 897 u8 num_vectors; 898 899 u8 vec1_len; 900 struct option_vector1 vec1; 901 902 u8 vec2_len; 903 struct option_vector2 vec2; 904 905 u8 vec3_len; 906 struct option_vector3 vec3; 907 908 u8 vec4_len; 909 struct option_vector4 vec4; 910 911 u8 vec5_len; 912 struct option_vector5 vec5; 913 914 u8 vec6_len; 915 struct option_vector6 vec6; 916 } __packed; 917 918 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { 919 .pvrs = { 920 { 921 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 922 .val = cpu_to_be32(0x003a0000), 923 }, 924 { 925 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 926 .val = cpu_to_be32(0x003e0000), 927 }, 928 { 929 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 930 .val = cpu_to_be32(0x003f0000), 931 }, 932 { 933 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 934 .val = cpu_to_be32(0x004b0000), 935 }, 936 { 937 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 938 .val = cpu_to_be32(0x004c0000), 939 }, 940 { 941 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 942 .val = cpu_to_be32(0x004d0000), 943 }, 944 { 945 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 946 .val = cpu_to_be32(0x004e0000), 947 }, 948 { 949 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 950 .val = cpu_to_be32(0x0f000005), 951 }, 952 { 953 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 954 .val = cpu_to_be32(0x0f000004), 955 }, 956 { 957 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 958 .val = cpu_to_be32(0x0f000003), 959 }, 960 { 961 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 962 .val = cpu_to_be32(0x0f000002), 963 }, 964 { 965 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 966 .val = cpu_to_be32(0x0f000001), 967 }, 968 }, 969 970 .num_vectors = NUM_VECTORS(6), 971 972 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 973 .vec1 = { 974 .byte1 = 0, 975 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 976 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 977 .arch_versions3 = OV1_PPC_3_00, 978 }, 979 980 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 981 /* option vector 2: Open Firmware options supported */ 982 .vec2 = { 983 .byte1 = OV2_REAL_MODE, 984 .reserved = 0, 985 .real_base = cpu_to_be32(0xffffffff), 986 .real_size = cpu_to_be32(0xffffffff), 987 .virt_base = cpu_to_be32(0xffffffff), 988 .virt_size = cpu_to_be32(0xffffffff), 989 .load_base = cpu_to_be32(0xffffffff), 990 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 991 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 992 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 993 .max_pft_size = 48, /* max log_2(hash table size) */ 994 }, 995 996 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 997 /* option vector 3: processor options supported */ 998 .vec3 = { 999 .byte1 = 0, /* don't ignore, don't halt */ 1000 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 1001 }, 1002 1003 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 1004 /* option vector 4: IBM PAPR implementation */ 1005 .vec4 = { 1006 .byte1 = 0, /* don't halt */ 1007 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 1008 }, 1009 1010 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 1011 /* option vector 5: PAPR/OF options */ 1012 .vec5 = { 1013 .byte1 = 0, /* don't ignore, don't halt */ 1014 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 1015 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 1016 #ifdef CONFIG_PCI_MSI 1017 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 1018 OV5_FEAT(OV5_MSI), 1019 #else 1020 0, 1021 #endif 1022 .byte3 = 0, 1023 .cmo = 1024 #ifdef CONFIG_PPC_SMLPAR 1025 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 1026 #else 1027 0, 1028 #endif 1029 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 1030 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 1031 .micro_checkpoint = 0, 1032 .reserved0 = 0, 1033 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 1034 .papr_level = 0, 1035 .reserved1 = 0, 1036 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 1037 .reserved2 = 0, 1038 .reserved3 = 0, 1039 .subprocessors = 1, 1040 .byte22 = OV5_FEAT(OV5_DRMEM_V2), 1041 .intarch = 0, 1042 .mmu = 0, 1043 .hash_ext = 0, 1044 .radix_ext = 0, 1045 }, 1046 1047 /* option vector 6: IBM PAPR hints */ 1048 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 1049 .vec6 = { 1050 .reserved = 0, 1051 .secondary_pteg = 0, 1052 .os_name = OV6_LINUX, 1053 }, 1054 }; 1055 1056 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned; 1057 1058 /* Old method - ELF header with PT_NOTE sections only works on BE */ 1059 #ifdef __BIG_ENDIAN__ 1060 static const struct fake_elf { 1061 Elf32_Ehdr elfhdr; 1062 Elf32_Phdr phdr[2]; 1063 struct chrpnote { 1064 u32 namesz; 1065 u32 descsz; 1066 u32 type; 1067 char name[8]; /* "PowerPC" */ 1068 struct chrpdesc { 1069 u32 real_mode; 1070 u32 real_base; 1071 u32 real_size; 1072 u32 virt_base; 1073 u32 virt_size; 1074 u32 load_base; 1075 } chrpdesc; 1076 } chrpnote; 1077 struct rpanote { 1078 u32 namesz; 1079 u32 descsz; 1080 u32 type; 1081 char name[24]; /* "IBM,RPA-Client-Config" */ 1082 struct rpadesc { 1083 u32 lpar_affinity; 1084 u32 min_rmo_size; 1085 u32 min_rmo_percent; 1086 u32 max_pft_size; 1087 u32 splpar; 1088 u32 min_load; 1089 u32 new_mem_def; 1090 u32 ignore_me; 1091 } rpadesc; 1092 } rpanote; 1093 } fake_elf __initconst = { 1094 .elfhdr = { 1095 .e_ident = { 0x7f, 'E', 'L', 'F', 1096 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 1097 .e_type = ET_EXEC, /* yeah right */ 1098 .e_machine = EM_PPC, 1099 .e_version = EV_CURRENT, 1100 .e_phoff = offsetof(struct fake_elf, phdr), 1101 .e_phentsize = sizeof(Elf32_Phdr), 1102 .e_phnum = 2 1103 }, 1104 .phdr = { 1105 [0] = { 1106 .p_type = PT_NOTE, 1107 .p_offset = offsetof(struct fake_elf, chrpnote), 1108 .p_filesz = sizeof(struct chrpnote) 1109 }, [1] = { 1110 .p_type = PT_NOTE, 1111 .p_offset = offsetof(struct fake_elf, rpanote), 1112 .p_filesz = sizeof(struct rpanote) 1113 } 1114 }, 1115 .chrpnote = { 1116 .namesz = sizeof("PowerPC"), 1117 .descsz = sizeof(struct chrpdesc), 1118 .type = 0x1275, 1119 .name = "PowerPC", 1120 .chrpdesc = { 1121 .real_mode = ~0U, /* ~0 means "don't care" */ 1122 .real_base = ~0U, 1123 .real_size = ~0U, 1124 .virt_base = ~0U, 1125 .virt_size = ~0U, 1126 .load_base = ~0U 1127 }, 1128 }, 1129 .rpanote = { 1130 .namesz = sizeof("IBM,RPA-Client-Config"), 1131 .descsz = sizeof(struct rpadesc), 1132 .type = 0x12759999, 1133 .name = "IBM,RPA-Client-Config", 1134 .rpadesc = { 1135 .lpar_affinity = 0, 1136 .min_rmo_size = 64, /* in megabytes */ 1137 .min_rmo_percent = 0, 1138 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 1139 .splpar = 1, 1140 .min_load = ~0U, 1141 .new_mem_def = 0 1142 } 1143 } 1144 }; 1145 #endif /* __BIG_ENDIAN__ */ 1146 1147 static int __init prom_count_smt_threads(void) 1148 { 1149 phandle node; 1150 char type[64]; 1151 unsigned int plen; 1152 1153 /* Pick up th first CPU node we can find */ 1154 for (node = 0; prom_next_node(&node); ) { 1155 type[0] = 0; 1156 prom_getprop(node, "device_type", type, sizeof(type)); 1157 1158 if (prom_strcmp(type, "cpu")) 1159 continue; 1160 /* 1161 * There is an entry for each smt thread, each entry being 1162 * 4 bytes long. All cpus should have the same number of 1163 * smt threads, so return after finding the first. 1164 */ 1165 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 1166 if (plen == PROM_ERROR) 1167 break; 1168 plen >>= 2; 1169 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 1170 1171 /* Sanity check */ 1172 if (plen < 1 || plen > 64) { 1173 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1174 (unsigned long)plen); 1175 return 1; 1176 } 1177 return plen; 1178 } 1179 prom_debug("No threads found, assuming 1 per core\n"); 1180 1181 return 1; 1182 1183 } 1184 1185 static void __init prom_parse_mmu_model(u8 val, 1186 struct platform_support *support) 1187 { 1188 switch (val) { 1189 case OV5_FEAT(OV5_MMU_DYNAMIC): 1190 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1191 prom_debug("MMU - either supported\n"); 1192 support->radix_mmu = !prom_radix_disable; 1193 support->hash_mmu = true; 1194 break; 1195 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1196 prom_debug("MMU - radix only\n"); 1197 if (prom_radix_disable) { 1198 /* 1199 * If we __have__ to do radix, we're better off ignoring 1200 * the command line rather than not booting. 1201 */ 1202 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1203 } 1204 support->radix_mmu = true; 1205 break; 1206 case OV5_FEAT(OV5_MMU_HASH): 1207 prom_debug("MMU - hash only\n"); 1208 support->hash_mmu = true; 1209 break; 1210 default: 1211 prom_debug("Unknown mmu support option: 0x%x\n", val); 1212 break; 1213 } 1214 } 1215 1216 static void __init prom_parse_xive_model(u8 val, 1217 struct platform_support *support) 1218 { 1219 switch (val) { 1220 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ 1221 prom_debug("XIVE - either mode supported\n"); 1222 support->xive = !prom_xive_disable; 1223 break; 1224 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ 1225 prom_debug("XIVE - exploitation mode supported\n"); 1226 if (prom_xive_disable) { 1227 /* 1228 * If we __have__ to do XIVE, we're better off ignoring 1229 * the command line rather than not booting. 1230 */ 1231 prom_printf("WARNING: Ignoring cmdline option xive=off\n"); 1232 } 1233 support->xive = true; 1234 break; 1235 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ 1236 prom_debug("XIVE - legacy mode supported\n"); 1237 break; 1238 default: 1239 prom_debug("Unknown xive support option: 0x%x\n", val); 1240 break; 1241 } 1242 } 1243 1244 static void __init prom_parse_platform_support(u8 index, u8 val, 1245 struct platform_support *support) 1246 { 1247 switch (index) { 1248 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1249 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1250 break; 1251 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1252 if (val & OV5_FEAT(OV5_RADIX_GTSE)) { 1253 prom_debug("Radix - GTSE supported\n"); 1254 support->radix_gtse = true; 1255 } 1256 break; 1257 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ 1258 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), 1259 support); 1260 break; 1261 } 1262 } 1263 1264 static void __init prom_check_platform_support(void) 1265 { 1266 struct platform_support supported = { 1267 .hash_mmu = false, 1268 .radix_mmu = false, 1269 .radix_gtse = false, 1270 .xive = false 1271 }; 1272 int prop_len = prom_getproplen(prom.chosen, 1273 "ibm,arch-vec-5-platform-support"); 1274 1275 /* 1276 * First copy the architecture vec template 1277 * 1278 * use memcpy() instead of *vec = *vec_template so that GCC replaces it 1279 * by __memcpy() when KASAN is active 1280 */ 1281 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template, 1282 sizeof(ibm_architecture_vec)); 1283 1284 if (prop_len > 1) { 1285 int i; 1286 u8 vec[8]; 1287 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1288 prop_len); 1289 if (prop_len > sizeof(vec)) 1290 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", 1291 prop_len); 1292 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", 1293 &vec, sizeof(vec)); 1294 for (i = 0; i < sizeof(vec); i += 2) { 1295 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 1296 , vec[i] 1297 , vec[i + 1]); 1298 prom_parse_platform_support(vec[i], vec[i + 1], 1299 &supported); 1300 } 1301 } 1302 1303 if (supported.radix_mmu && supported.radix_gtse && 1304 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { 1305 /* Radix preferred - but we require GTSE for now */ 1306 prom_debug("Asking for radix with GTSE\n"); 1307 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1308 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); 1309 } else if (supported.hash_mmu) { 1310 /* Default to hash mmu (if we can) */ 1311 prom_debug("Asking for hash\n"); 1312 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1313 } else { 1314 /* We're probably on a legacy hypervisor */ 1315 prom_debug("Assuming legacy hash support\n"); 1316 } 1317 1318 if (supported.xive) { 1319 prom_debug("Asking for XIVE\n"); 1320 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); 1321 } 1322 } 1323 1324 static void __init prom_send_capabilities(void) 1325 { 1326 ihandle root; 1327 prom_arg_t ret; 1328 u32 cores; 1329 1330 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1331 prom_check_platform_support(); 1332 1333 root = call_prom("open", 1, 1, ADDR("/")); 1334 if (root != 0) { 1335 /* We need to tell the FW about the number of cores we support. 1336 * 1337 * To do that, we count the number of threads on the first core 1338 * (we assume this is the same for all cores) and use it to 1339 * divide NR_CPUS. 1340 */ 1341 1342 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1343 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", 1344 cores, NR_CPUS); 1345 1346 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1347 1348 /* try calling the ibm,client-architecture-support method */ 1349 prom_printf("Calling ibm,client-architecture-support..."); 1350 if (call_prom_ret("call-method", 3, 2, &ret, 1351 ADDR("ibm,client-architecture-support"), 1352 root, 1353 ADDR(&ibm_architecture_vec)) == 0) { 1354 /* the call exists... */ 1355 if (ret) 1356 prom_printf("\nWARNING: ibm,client-architecture" 1357 "-support call FAILED!\n"); 1358 call_prom("close", 1, 0, root); 1359 prom_printf(" done\n"); 1360 return; 1361 } 1362 call_prom("close", 1, 0, root); 1363 prom_printf(" not implemented\n"); 1364 } 1365 1366 #ifdef __BIG_ENDIAN__ 1367 { 1368 ihandle elfloader; 1369 1370 /* no ibm,client-architecture-support call, try the old way */ 1371 elfloader = call_prom("open", 1, 1, 1372 ADDR("/packages/elf-loader")); 1373 if (elfloader == 0) { 1374 prom_printf("couldn't open /packages/elf-loader\n"); 1375 return; 1376 } 1377 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1378 elfloader, ADDR(&fake_elf)); 1379 call_prom("close", 1, 0, elfloader); 1380 } 1381 #endif /* __BIG_ENDIAN__ */ 1382 } 1383 #endif /* CONFIG_PPC_PSERIES */ 1384 1385 /* 1386 * Memory allocation strategy... our layout is normally: 1387 * 1388 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1389 * rare cases, initrd might end up being before the kernel though. 1390 * We assume this won't override the final kernel at 0, we have no 1391 * provision to handle that in this version, but it should hopefully 1392 * never happen. 1393 * 1394 * alloc_top is set to the top of RMO, eventually shrink down if the 1395 * TCEs overlap 1396 * 1397 * alloc_bottom is set to the top of kernel/initrd 1398 * 1399 * from there, allocations are done this way : rtas is allocated 1400 * topmost, and the device-tree is allocated from the bottom. We try 1401 * to grow the device-tree allocation as we progress. If we can't, 1402 * then we fail, we don't currently have a facility to restart 1403 * elsewhere, but that shouldn't be necessary. 1404 * 1405 * Note that calls to reserve_mem have to be done explicitly, memory 1406 * allocated with either alloc_up or alloc_down isn't automatically 1407 * reserved. 1408 */ 1409 1410 1411 /* 1412 * Allocates memory in the RMO upward from the kernel/initrd 1413 * 1414 * When align is 0, this is a special case, it means to allocate in place 1415 * at the current location of alloc_bottom or fail (that is basically 1416 * extending the previous allocation). Used for the device-tree flattening 1417 */ 1418 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1419 { 1420 unsigned long base = alloc_bottom; 1421 unsigned long addr = 0; 1422 1423 if (align) 1424 base = _ALIGN_UP(base, align); 1425 prom_debug("%s(%lx, %lx)\n", __func__, size, align); 1426 if (ram_top == 0) 1427 prom_panic("alloc_up() called with mem not initialized\n"); 1428 1429 if (align) 1430 base = _ALIGN_UP(alloc_bottom, align); 1431 else 1432 base = alloc_bottom; 1433 1434 for(; (base + size) <= alloc_top; 1435 base = _ALIGN_UP(base + 0x100000, align)) { 1436 prom_debug(" trying: 0x%lx\n\r", base); 1437 addr = (unsigned long)prom_claim(base, size, 0); 1438 if (addr != PROM_ERROR && addr != 0) 1439 break; 1440 addr = 0; 1441 if (align == 0) 1442 break; 1443 } 1444 if (addr == 0) 1445 return 0; 1446 alloc_bottom = addr + size; 1447 1448 prom_debug(" -> %lx\n", addr); 1449 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1450 prom_debug(" alloc_top : %lx\n", alloc_top); 1451 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1452 prom_debug(" rmo_top : %lx\n", rmo_top); 1453 prom_debug(" ram_top : %lx\n", ram_top); 1454 1455 return addr; 1456 } 1457 1458 /* 1459 * Allocates memory downward, either from top of RMO, or if highmem 1460 * is set, from the top of RAM. Note that this one doesn't handle 1461 * failures. It does claim memory if highmem is not set. 1462 */ 1463 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1464 int highmem) 1465 { 1466 unsigned long base, addr = 0; 1467 1468 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, 1469 highmem ? "(high)" : "(low)"); 1470 if (ram_top == 0) 1471 prom_panic("alloc_down() called with mem not initialized\n"); 1472 1473 if (highmem) { 1474 /* Carve out storage for the TCE table. */ 1475 addr = _ALIGN_DOWN(alloc_top_high - size, align); 1476 if (addr <= alloc_bottom) 1477 return 0; 1478 /* Will we bump into the RMO ? If yes, check out that we 1479 * didn't overlap existing allocations there, if we did, 1480 * we are dead, we must be the first in town ! 1481 */ 1482 if (addr < rmo_top) { 1483 /* Good, we are first */ 1484 if (alloc_top == rmo_top) 1485 alloc_top = rmo_top = addr; 1486 else 1487 return 0; 1488 } 1489 alloc_top_high = addr; 1490 goto bail; 1491 } 1492 1493 base = _ALIGN_DOWN(alloc_top - size, align); 1494 for (; base > alloc_bottom; 1495 base = _ALIGN_DOWN(base - 0x100000, align)) { 1496 prom_debug(" trying: 0x%lx\n\r", base); 1497 addr = (unsigned long)prom_claim(base, size, 0); 1498 if (addr != PROM_ERROR && addr != 0) 1499 break; 1500 addr = 0; 1501 } 1502 if (addr == 0) 1503 return 0; 1504 alloc_top = addr; 1505 1506 bail: 1507 prom_debug(" -> %lx\n", addr); 1508 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1509 prom_debug(" alloc_top : %lx\n", alloc_top); 1510 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1511 prom_debug(" rmo_top : %lx\n", rmo_top); 1512 prom_debug(" ram_top : %lx\n", ram_top); 1513 1514 return addr; 1515 } 1516 1517 /* 1518 * Parse a "reg" cell 1519 */ 1520 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1521 { 1522 cell_t *p = *cellp; 1523 unsigned long r = 0; 1524 1525 /* Ignore more than 2 cells */ 1526 while (s > sizeof(unsigned long) / 4) { 1527 p++; 1528 s--; 1529 } 1530 r = be32_to_cpu(*p++); 1531 #ifdef CONFIG_PPC64 1532 if (s > 1) { 1533 r <<= 32; 1534 r |= be32_to_cpu(*(p++)); 1535 } 1536 #endif 1537 *cellp = p; 1538 return r; 1539 } 1540 1541 /* 1542 * Very dumb function for adding to the memory reserve list, but 1543 * we don't need anything smarter at this point 1544 * 1545 * XXX Eventually check for collisions. They should NEVER happen. 1546 * If problems seem to show up, it would be a good start to track 1547 * them down. 1548 */ 1549 static void __init reserve_mem(u64 base, u64 size) 1550 { 1551 u64 top = base + size; 1552 unsigned long cnt = mem_reserve_cnt; 1553 1554 if (size == 0) 1555 return; 1556 1557 /* We need to always keep one empty entry so that we 1558 * have our terminator with "size" set to 0 since we are 1559 * dumb and just copy this entire array to the boot params 1560 */ 1561 base = _ALIGN_DOWN(base, PAGE_SIZE); 1562 top = _ALIGN_UP(top, PAGE_SIZE); 1563 size = top - base; 1564 1565 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1566 prom_panic("Memory reserve map exhausted !\n"); 1567 mem_reserve_map[cnt].base = cpu_to_be64(base); 1568 mem_reserve_map[cnt].size = cpu_to_be64(size); 1569 mem_reserve_cnt = cnt + 1; 1570 } 1571 1572 /* 1573 * Initialize memory allocation mechanism, parse "memory" nodes and 1574 * obtain that way the top of memory and RMO to setup out local allocator 1575 */ 1576 static void __init prom_init_mem(void) 1577 { 1578 phandle node; 1579 char type[64]; 1580 unsigned int plen; 1581 cell_t *p, *endp; 1582 __be32 val; 1583 u32 rac, rsc; 1584 1585 /* 1586 * We iterate the memory nodes to find 1587 * 1) top of RMO (first node) 1588 * 2) top of memory 1589 */ 1590 val = cpu_to_be32(2); 1591 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1592 rac = be32_to_cpu(val); 1593 val = cpu_to_be32(1); 1594 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1595 rsc = be32_to_cpu(val); 1596 prom_debug("root_addr_cells: %x\n", rac); 1597 prom_debug("root_size_cells: %x\n", rsc); 1598 1599 prom_debug("scanning memory:\n"); 1600 1601 for (node = 0; prom_next_node(&node); ) { 1602 type[0] = 0; 1603 prom_getprop(node, "device_type", type, sizeof(type)); 1604 1605 if (type[0] == 0) { 1606 /* 1607 * CHRP Longtrail machines have no device_type 1608 * on the memory node, so check the name instead... 1609 */ 1610 prom_getprop(node, "name", type, sizeof(type)); 1611 } 1612 if (prom_strcmp(type, "memory")) 1613 continue; 1614 1615 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1616 if (plen > sizeof(regbuf)) { 1617 prom_printf("memory node too large for buffer !\n"); 1618 plen = sizeof(regbuf); 1619 } 1620 p = regbuf; 1621 endp = p + (plen / sizeof(cell_t)); 1622 1623 #ifdef DEBUG_PROM 1624 memset(prom_scratch, 0, sizeof(prom_scratch)); 1625 call_prom("package-to-path", 3, 1, node, prom_scratch, 1626 sizeof(prom_scratch) - 1); 1627 prom_debug(" node %s :\n", prom_scratch); 1628 #endif /* DEBUG_PROM */ 1629 1630 while ((endp - p) >= (rac + rsc)) { 1631 unsigned long base, size; 1632 1633 base = prom_next_cell(rac, &p); 1634 size = prom_next_cell(rsc, &p); 1635 1636 if (size == 0) 1637 continue; 1638 prom_debug(" %lx %lx\n", base, size); 1639 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1640 rmo_top = size; 1641 if ((base + size) > ram_top) 1642 ram_top = base + size; 1643 } 1644 } 1645 1646 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1647 1648 /* 1649 * If prom_memory_limit is set we reduce the upper limits *except* for 1650 * alloc_top_high. This must be the real top of RAM so we can put 1651 * TCE's up there. 1652 */ 1653 1654 alloc_top_high = ram_top; 1655 1656 if (prom_memory_limit) { 1657 if (prom_memory_limit <= alloc_bottom) { 1658 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", 1659 prom_memory_limit); 1660 prom_memory_limit = 0; 1661 } else if (prom_memory_limit >= ram_top) { 1662 prom_printf("Ignoring mem=%lx >= ram_top.\n", 1663 prom_memory_limit); 1664 prom_memory_limit = 0; 1665 } else { 1666 ram_top = prom_memory_limit; 1667 rmo_top = min(rmo_top, prom_memory_limit); 1668 } 1669 } 1670 1671 /* 1672 * Setup our top alloc point, that is top of RMO or top of 1673 * segment 0 when running non-LPAR. 1674 * Some RS64 machines have buggy firmware where claims up at 1675 * 1GB fail. Cap at 768MB as a workaround. 1676 * Since 768MB is plenty of room, and we need to cap to something 1677 * reasonable on 32-bit, cap at 768MB on all machines. 1678 */ 1679 if (!rmo_top) 1680 rmo_top = ram_top; 1681 rmo_top = min(0x30000000ul, rmo_top); 1682 alloc_top = rmo_top; 1683 alloc_top_high = ram_top; 1684 1685 /* 1686 * Check if we have an initrd after the kernel but still inside 1687 * the RMO. If we do move our bottom point to after it. 1688 */ 1689 if (prom_initrd_start && 1690 prom_initrd_start < rmo_top && 1691 prom_initrd_end > alloc_bottom) 1692 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1693 1694 prom_printf("memory layout at init:\n"); 1695 prom_printf(" memory_limit : %lx (16 MB aligned)\n", 1696 prom_memory_limit); 1697 prom_printf(" alloc_bottom : %lx\n", alloc_bottom); 1698 prom_printf(" alloc_top : %lx\n", alloc_top); 1699 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); 1700 prom_printf(" rmo_top : %lx\n", rmo_top); 1701 prom_printf(" ram_top : %lx\n", ram_top); 1702 } 1703 1704 static void __init prom_close_stdin(void) 1705 { 1706 __be32 val; 1707 ihandle stdin; 1708 1709 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1710 stdin = be32_to_cpu(val); 1711 call_prom("close", 1, 0, stdin); 1712 } 1713 } 1714 1715 /* 1716 * Allocate room for and instantiate RTAS 1717 */ 1718 static void __init prom_instantiate_rtas(void) 1719 { 1720 phandle rtas_node; 1721 ihandle rtas_inst; 1722 u32 base, entry = 0; 1723 __be32 val; 1724 u32 size = 0; 1725 1726 prom_debug("prom_instantiate_rtas: start...\n"); 1727 1728 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1729 prom_debug("rtas_node: %x\n", rtas_node); 1730 if (!PHANDLE_VALID(rtas_node)) 1731 return; 1732 1733 val = 0; 1734 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1735 size = be32_to_cpu(val); 1736 if (size == 0) 1737 return; 1738 1739 base = alloc_down(size, PAGE_SIZE, 0); 1740 if (base == 0) 1741 prom_panic("Could not allocate memory for RTAS\n"); 1742 1743 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1744 if (!IHANDLE_VALID(rtas_inst)) { 1745 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1746 return; 1747 } 1748 1749 prom_printf("instantiating rtas at 0x%x...", base); 1750 1751 if (call_prom_ret("call-method", 3, 2, &entry, 1752 ADDR("instantiate-rtas"), 1753 rtas_inst, base) != 0 1754 || entry == 0) { 1755 prom_printf(" failed\n"); 1756 return; 1757 } 1758 prom_printf(" done\n"); 1759 1760 reserve_mem(base, size); 1761 1762 val = cpu_to_be32(base); 1763 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1764 &val, sizeof(val)); 1765 val = cpu_to_be32(entry); 1766 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1767 &val, sizeof(val)); 1768 1769 /* Check if it supports "query-cpu-stopped-state" */ 1770 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1771 &val, sizeof(val)) != PROM_ERROR) 1772 rtas_has_query_cpu_stopped = true; 1773 1774 prom_debug("rtas base = 0x%x\n", base); 1775 prom_debug("rtas entry = 0x%x\n", entry); 1776 prom_debug("rtas size = 0x%x\n", size); 1777 1778 prom_debug("prom_instantiate_rtas: end...\n"); 1779 } 1780 1781 #ifdef CONFIG_PPC64 1782 /* 1783 * Allocate room for and instantiate Stored Measurement Log (SML) 1784 */ 1785 static void __init prom_instantiate_sml(void) 1786 { 1787 phandle ibmvtpm_node; 1788 ihandle ibmvtpm_inst; 1789 u32 entry = 0, size = 0, succ = 0; 1790 u64 base; 1791 __be32 val; 1792 1793 prom_debug("prom_instantiate_sml: start...\n"); 1794 1795 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1796 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1797 if (!PHANDLE_VALID(ibmvtpm_node)) 1798 return; 1799 1800 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1801 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1802 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1803 return; 1804 } 1805 1806 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1807 &val, sizeof(val)) != PROM_ERROR) { 1808 if (call_prom_ret("call-method", 2, 2, &succ, 1809 ADDR("reformat-sml-to-efi-alignment"), 1810 ibmvtpm_inst) != 0 || succ == 0) { 1811 prom_printf("Reformat SML to EFI alignment failed\n"); 1812 return; 1813 } 1814 1815 if (call_prom_ret("call-method", 2, 2, &size, 1816 ADDR("sml-get-allocated-size"), 1817 ibmvtpm_inst) != 0 || size == 0) { 1818 prom_printf("SML get allocated size failed\n"); 1819 return; 1820 } 1821 } else { 1822 if (call_prom_ret("call-method", 2, 2, &size, 1823 ADDR("sml-get-handover-size"), 1824 ibmvtpm_inst) != 0 || size == 0) { 1825 prom_printf("SML get handover size failed\n"); 1826 return; 1827 } 1828 } 1829 1830 base = alloc_down(size, PAGE_SIZE, 0); 1831 if (base == 0) 1832 prom_panic("Could not allocate memory for sml\n"); 1833 1834 prom_printf("instantiating sml at 0x%llx...", base); 1835 1836 memset((void *)base, 0, size); 1837 1838 if (call_prom_ret("call-method", 4, 2, &entry, 1839 ADDR("sml-handover"), 1840 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1841 prom_printf("SML handover failed\n"); 1842 return; 1843 } 1844 prom_printf(" done\n"); 1845 1846 reserve_mem(base, size); 1847 1848 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1849 &base, sizeof(base)); 1850 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1851 &size, sizeof(size)); 1852 1853 prom_debug("sml base = 0x%llx\n", base); 1854 prom_debug("sml size = 0x%x\n", size); 1855 1856 prom_debug("prom_instantiate_sml: end...\n"); 1857 } 1858 1859 /* 1860 * Allocate room for and initialize TCE tables 1861 */ 1862 #ifdef __BIG_ENDIAN__ 1863 static void __init prom_initialize_tce_table(void) 1864 { 1865 phandle node; 1866 ihandle phb_node; 1867 char compatible[64], type[64], model[64]; 1868 char *path = prom_scratch; 1869 u64 base, align; 1870 u32 minalign, minsize; 1871 u64 tce_entry, *tce_entryp; 1872 u64 local_alloc_top, local_alloc_bottom; 1873 u64 i; 1874 1875 if (prom_iommu_off) 1876 return; 1877 1878 prom_debug("starting prom_initialize_tce_table\n"); 1879 1880 /* Cache current top of allocs so we reserve a single block */ 1881 local_alloc_top = alloc_top_high; 1882 local_alloc_bottom = local_alloc_top; 1883 1884 /* Search all nodes looking for PHBs. */ 1885 for (node = 0; prom_next_node(&node); ) { 1886 compatible[0] = 0; 1887 type[0] = 0; 1888 model[0] = 0; 1889 prom_getprop(node, "compatible", 1890 compatible, sizeof(compatible)); 1891 prom_getprop(node, "device_type", type, sizeof(type)); 1892 prom_getprop(node, "model", model, sizeof(model)); 1893 1894 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL)) 1895 continue; 1896 1897 /* Keep the old logic intact to avoid regression. */ 1898 if (compatible[0] != 0) { 1899 if ((prom_strstr(compatible, "python") == NULL) && 1900 (prom_strstr(compatible, "Speedwagon") == NULL) && 1901 (prom_strstr(compatible, "Winnipeg") == NULL)) 1902 continue; 1903 } else if (model[0] != 0) { 1904 if ((prom_strstr(model, "ython") == NULL) && 1905 (prom_strstr(model, "peedwagon") == NULL) && 1906 (prom_strstr(model, "innipeg") == NULL)) 1907 continue; 1908 } 1909 1910 if (prom_getprop(node, "tce-table-minalign", &minalign, 1911 sizeof(minalign)) == PROM_ERROR) 1912 minalign = 0; 1913 if (prom_getprop(node, "tce-table-minsize", &minsize, 1914 sizeof(minsize)) == PROM_ERROR) 1915 minsize = 4UL << 20; 1916 1917 /* 1918 * Even though we read what OF wants, we just set the table 1919 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1920 * By doing this, we avoid the pitfalls of trying to DMA to 1921 * MMIO space and the DMA alias hole. 1922 */ 1923 minsize = 4UL << 20; 1924 1925 /* Align to the greater of the align or size */ 1926 align = max(minalign, minsize); 1927 base = alloc_down(minsize, align, 1); 1928 if (base == 0) 1929 prom_panic("ERROR, cannot find space for TCE table.\n"); 1930 if (base < local_alloc_bottom) 1931 local_alloc_bottom = base; 1932 1933 /* It seems OF doesn't null-terminate the path :-( */ 1934 memset(path, 0, sizeof(prom_scratch)); 1935 /* Call OF to setup the TCE hardware */ 1936 if (call_prom("package-to-path", 3, 1, node, 1937 path, sizeof(prom_scratch) - 1) == PROM_ERROR) { 1938 prom_printf("package-to-path failed\n"); 1939 } 1940 1941 /* Save away the TCE table attributes for later use. */ 1942 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 1943 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 1944 1945 prom_debug("TCE table: %s\n", path); 1946 prom_debug("\tnode = 0x%x\n", node); 1947 prom_debug("\tbase = 0x%llx\n", base); 1948 prom_debug("\tsize = 0x%x\n", minsize); 1949 1950 /* Initialize the table to have a one-to-one mapping 1951 * over the allocated size. 1952 */ 1953 tce_entryp = (u64 *)base; 1954 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1955 tce_entry = (i << PAGE_SHIFT); 1956 tce_entry |= 0x3; 1957 *tce_entryp = tce_entry; 1958 } 1959 1960 prom_printf("opening PHB %s", path); 1961 phb_node = call_prom("open", 1, 1, path); 1962 if (phb_node == 0) 1963 prom_printf("... failed\n"); 1964 else 1965 prom_printf("... done\n"); 1966 1967 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 1968 phb_node, -1, minsize, 1969 (u32) base, (u32) (base >> 32)); 1970 call_prom("close", 1, 0, phb_node); 1971 } 1972 1973 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 1974 1975 /* These are only really needed if there is a memory limit in 1976 * effect, but we don't know so export them always. */ 1977 prom_tce_alloc_start = local_alloc_bottom; 1978 prom_tce_alloc_end = local_alloc_top; 1979 1980 /* Flag the first invalid entry */ 1981 prom_debug("ending prom_initialize_tce_table\n"); 1982 } 1983 #endif /* __BIG_ENDIAN__ */ 1984 #endif /* CONFIG_PPC64 */ 1985 1986 /* 1987 * With CHRP SMP we need to use the OF to start the other processors. 1988 * We can't wait until smp_boot_cpus (the OF is trashed by then) 1989 * so we have to put the processors into a holding pattern controlled 1990 * by the kernel (not OF) before we destroy the OF. 1991 * 1992 * This uses a chunk of low memory, puts some holding pattern 1993 * code there and sends the other processors off to there until 1994 * smp_boot_cpus tells them to do something. The holding pattern 1995 * checks that address until its cpu # is there, when it is that 1996 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 1997 * of setting those values. 1998 * 1999 * We also use physical address 0x4 here to tell when a cpu 2000 * is in its holding pattern code. 2001 * 2002 * -- Cort 2003 */ 2004 /* 2005 * We want to reference the copy of __secondary_hold_* in the 2006 * 0 - 0x100 address range 2007 */ 2008 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 2009 2010 static void __init prom_hold_cpus(void) 2011 { 2012 unsigned long i; 2013 phandle node; 2014 char type[64]; 2015 unsigned long *spinloop 2016 = (void *) LOW_ADDR(__secondary_hold_spinloop); 2017 unsigned long *acknowledge 2018 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 2019 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 2020 2021 /* 2022 * On pseries, if RTAS supports "query-cpu-stopped-state", 2023 * we skip this stage, the CPUs will be started by the 2024 * kernel using RTAS. 2025 */ 2026 if ((of_platform == PLATFORM_PSERIES || 2027 of_platform == PLATFORM_PSERIES_LPAR) && 2028 rtas_has_query_cpu_stopped) { 2029 prom_printf("prom_hold_cpus: skipped\n"); 2030 return; 2031 } 2032 2033 prom_debug("prom_hold_cpus: start...\n"); 2034 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); 2035 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); 2036 prom_debug(" 1) acknowledge = 0x%lx\n", 2037 (unsigned long)acknowledge); 2038 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); 2039 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); 2040 2041 /* Set the common spinloop variable, so all of the secondary cpus 2042 * will block when they are awakened from their OF spinloop. 2043 * This must occur for both SMP and non SMP kernels, since OF will 2044 * be trashed when we move the kernel. 2045 */ 2046 *spinloop = 0; 2047 2048 /* look for cpus */ 2049 for (node = 0; prom_next_node(&node); ) { 2050 unsigned int cpu_no; 2051 __be32 reg; 2052 2053 type[0] = 0; 2054 prom_getprop(node, "device_type", type, sizeof(type)); 2055 if (prom_strcmp(type, "cpu") != 0) 2056 continue; 2057 2058 /* Skip non-configured cpus. */ 2059 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 2060 if (prom_strcmp(type, "okay") != 0) 2061 continue; 2062 2063 reg = cpu_to_be32(-1); /* make sparse happy */ 2064 prom_getprop(node, "reg", ®, sizeof(reg)); 2065 cpu_no = be32_to_cpu(reg); 2066 2067 prom_debug("cpu hw idx = %u\n", cpu_no); 2068 2069 /* Init the acknowledge var which will be reset by 2070 * the secondary cpu when it awakens from its OF 2071 * spinloop. 2072 */ 2073 *acknowledge = (unsigned long)-1; 2074 2075 if (cpu_no != prom.cpu) { 2076 /* Primary Thread of non-boot cpu or any thread */ 2077 prom_printf("starting cpu hw idx %u... ", cpu_no); 2078 call_prom("start-cpu", 3, 0, node, 2079 secondary_hold, cpu_no); 2080 2081 for (i = 0; (i < 100000000) && 2082 (*acknowledge == ((unsigned long)-1)); i++ ) 2083 mb(); 2084 2085 if (*acknowledge == cpu_no) 2086 prom_printf("done\n"); 2087 else 2088 prom_printf("failed: %lx\n", *acknowledge); 2089 } 2090 #ifdef CONFIG_SMP 2091 else 2092 prom_printf("boot cpu hw idx %u\n", cpu_no); 2093 #endif /* CONFIG_SMP */ 2094 } 2095 2096 prom_debug("prom_hold_cpus: end...\n"); 2097 } 2098 2099 2100 static void __init prom_init_client_services(unsigned long pp) 2101 { 2102 /* Get a handle to the prom entry point before anything else */ 2103 prom_entry = pp; 2104 2105 /* get a handle for the stdout device */ 2106 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 2107 if (!PHANDLE_VALID(prom.chosen)) 2108 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 2109 2110 /* get device tree root */ 2111 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 2112 if (!PHANDLE_VALID(prom.root)) 2113 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 2114 2115 prom.mmumap = 0; 2116 } 2117 2118 #ifdef CONFIG_PPC32 2119 /* 2120 * For really old powermacs, we need to map things we claim. 2121 * For that, we need the ihandle of the mmu. 2122 * Also, on the longtrail, we need to work around other bugs. 2123 */ 2124 static void __init prom_find_mmu(void) 2125 { 2126 phandle oprom; 2127 char version[64]; 2128 2129 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 2130 if (!PHANDLE_VALID(oprom)) 2131 return; 2132 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 2133 return; 2134 version[sizeof(version) - 1] = 0; 2135 /* XXX might need to add other versions here */ 2136 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0) 2137 of_workarounds = OF_WA_CLAIM; 2138 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) { 2139 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2140 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2141 } else 2142 return; 2143 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2144 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2145 sizeof(prom.mmumap)); 2146 prom.mmumap = be32_to_cpu(prom.mmumap); 2147 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2148 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2149 } 2150 #else 2151 #define prom_find_mmu() 2152 #endif 2153 2154 static void __init prom_init_stdout(void) 2155 { 2156 char *path = of_stdout_device; 2157 char type[16]; 2158 phandle stdout_node; 2159 __be32 val; 2160 2161 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2162 prom_panic("cannot find stdout"); 2163 2164 prom.stdout = be32_to_cpu(val); 2165 2166 /* Get the full OF pathname of the stdout device */ 2167 memset(path, 0, 256); 2168 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2169 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2170 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2171 path, prom_strlen(path) + 1); 2172 2173 /* instance-to-package fails on PA-Semi */ 2174 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2175 if (stdout_node != PROM_ERROR) { 2176 val = cpu_to_be32(stdout_node); 2177 2178 /* If it's a display, note it */ 2179 memset(type, 0, sizeof(type)); 2180 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2181 if (prom_strcmp(type, "display") == 0) 2182 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2183 } 2184 } 2185 2186 static int __init prom_find_machine_type(void) 2187 { 2188 char compat[256]; 2189 int len, i = 0; 2190 #ifdef CONFIG_PPC64 2191 phandle rtas; 2192 int x; 2193 #endif 2194 2195 /* Look for a PowerMac or a Cell */ 2196 len = prom_getprop(prom.root, "compatible", 2197 compat, sizeof(compat)-1); 2198 if (len > 0) { 2199 compat[len] = 0; 2200 while (i < len) { 2201 char *p = &compat[i]; 2202 int sl = prom_strlen(p); 2203 if (sl == 0) 2204 break; 2205 if (prom_strstr(p, "Power Macintosh") || 2206 prom_strstr(p, "MacRISC")) 2207 return PLATFORM_POWERMAC; 2208 #ifdef CONFIG_PPC64 2209 /* We must make sure we don't detect the IBM Cell 2210 * blades as pSeries due to some firmware issues, 2211 * so we do it here. 2212 */ 2213 if (prom_strstr(p, "IBM,CBEA") || 2214 prom_strstr(p, "IBM,CPBW-1.0")) 2215 return PLATFORM_GENERIC; 2216 #endif /* CONFIG_PPC64 */ 2217 i += sl + 1; 2218 } 2219 } 2220 #ifdef CONFIG_PPC64 2221 /* Try to figure out if it's an IBM pSeries or any other 2222 * PAPR compliant platform. We assume it is if : 2223 * - /device_type is "chrp" (please, do NOT use that for future 2224 * non-IBM designs ! 2225 * - it has /rtas 2226 */ 2227 len = prom_getprop(prom.root, "device_type", 2228 compat, sizeof(compat)-1); 2229 if (len <= 0) 2230 return PLATFORM_GENERIC; 2231 if (prom_strcmp(compat, "chrp")) 2232 return PLATFORM_GENERIC; 2233 2234 /* Default to pSeries. We need to know if we are running LPAR */ 2235 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2236 if (!PHANDLE_VALID(rtas)) 2237 return PLATFORM_GENERIC; 2238 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2239 if (x != PROM_ERROR) { 2240 prom_debug("Hypertas detected, assuming LPAR !\n"); 2241 return PLATFORM_PSERIES_LPAR; 2242 } 2243 return PLATFORM_PSERIES; 2244 #else 2245 return PLATFORM_GENERIC; 2246 #endif 2247 } 2248 2249 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2250 { 2251 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2252 } 2253 2254 /* 2255 * If we have a display that we don't know how to drive, 2256 * we will want to try to execute OF's open method for it 2257 * later. However, OF will probably fall over if we do that 2258 * we've taken over the MMU. 2259 * So we check whether we will need to open the display, 2260 * and if so, open it now. 2261 */ 2262 static void __init prom_check_displays(void) 2263 { 2264 char type[16], *path; 2265 phandle node; 2266 ihandle ih; 2267 int i; 2268 2269 static const unsigned char default_colors[] __initconst = { 2270 0x00, 0x00, 0x00, 2271 0x00, 0x00, 0xaa, 2272 0x00, 0xaa, 0x00, 2273 0x00, 0xaa, 0xaa, 2274 0xaa, 0x00, 0x00, 2275 0xaa, 0x00, 0xaa, 2276 0xaa, 0xaa, 0x00, 2277 0xaa, 0xaa, 0xaa, 2278 0x55, 0x55, 0x55, 2279 0x55, 0x55, 0xff, 2280 0x55, 0xff, 0x55, 2281 0x55, 0xff, 0xff, 2282 0xff, 0x55, 0x55, 2283 0xff, 0x55, 0xff, 2284 0xff, 0xff, 0x55, 2285 0xff, 0xff, 0xff 2286 }; 2287 const unsigned char *clut; 2288 2289 prom_debug("Looking for displays\n"); 2290 for (node = 0; prom_next_node(&node); ) { 2291 memset(type, 0, sizeof(type)); 2292 prom_getprop(node, "device_type", type, sizeof(type)); 2293 if (prom_strcmp(type, "display") != 0) 2294 continue; 2295 2296 /* It seems OF doesn't null-terminate the path :-( */ 2297 path = prom_scratch; 2298 memset(path, 0, sizeof(prom_scratch)); 2299 2300 /* 2301 * leave some room at the end of the path for appending extra 2302 * arguments 2303 */ 2304 if (call_prom("package-to-path", 3, 1, node, path, 2305 sizeof(prom_scratch) - 10) == PROM_ERROR) 2306 continue; 2307 prom_printf("found display : %s, opening... ", path); 2308 2309 ih = call_prom("open", 1, 1, path); 2310 if (ih == 0) { 2311 prom_printf("failed\n"); 2312 continue; 2313 } 2314 2315 /* Success */ 2316 prom_printf("done\n"); 2317 prom_setprop(node, path, "linux,opened", NULL, 0); 2318 2319 /* Setup a usable color table when the appropriate 2320 * method is available. Should update this to set-colors */ 2321 clut = default_colors; 2322 for (i = 0; i < 16; i++, clut += 3) 2323 if (prom_set_color(ih, i, clut[0], clut[1], 2324 clut[2]) != 0) 2325 break; 2326 2327 #ifdef CONFIG_LOGO_LINUX_CLUT224 2328 clut = PTRRELOC(logo_linux_clut224.clut); 2329 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2330 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2331 clut[2]) != 0) 2332 break; 2333 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2334 2335 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2336 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2337 PROM_ERROR) { 2338 u32 width, height, pitch, addr; 2339 2340 prom_printf("Setting btext !\n"); 2341 prom_getprop(node, "width", &width, 4); 2342 prom_getprop(node, "height", &height, 4); 2343 prom_getprop(node, "linebytes", &pitch, 4); 2344 prom_getprop(node, "address", &addr, 4); 2345 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2346 width, height, pitch, addr); 2347 btext_setup_display(width, height, 8, pitch, addr); 2348 btext_prepare_BAT(); 2349 } 2350 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2351 } 2352 } 2353 2354 2355 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2356 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2357 unsigned long needed, unsigned long align) 2358 { 2359 void *ret; 2360 2361 *mem_start = _ALIGN(*mem_start, align); 2362 while ((*mem_start + needed) > *mem_end) { 2363 unsigned long room, chunk; 2364 2365 prom_debug("Chunk exhausted, claiming more at %lx...\n", 2366 alloc_bottom); 2367 room = alloc_top - alloc_bottom; 2368 if (room > DEVTREE_CHUNK_SIZE) 2369 room = DEVTREE_CHUNK_SIZE; 2370 if (room < PAGE_SIZE) 2371 prom_panic("No memory for flatten_device_tree " 2372 "(no room)\n"); 2373 chunk = alloc_up(room, 0); 2374 if (chunk == 0) 2375 prom_panic("No memory for flatten_device_tree " 2376 "(claim failed)\n"); 2377 *mem_end = chunk + room; 2378 } 2379 2380 ret = (void *)*mem_start; 2381 *mem_start += needed; 2382 2383 return ret; 2384 } 2385 2386 #define dt_push_token(token, mem_start, mem_end) do { \ 2387 void *room = make_room(mem_start, mem_end, 4, 4); \ 2388 *(__be32 *)room = cpu_to_be32(token); \ 2389 } while(0) 2390 2391 static unsigned long __init dt_find_string(char *str) 2392 { 2393 char *s, *os; 2394 2395 s = os = (char *)dt_string_start; 2396 s += 4; 2397 while (s < (char *)dt_string_end) { 2398 if (prom_strcmp(s, str) == 0) 2399 return s - os; 2400 s += prom_strlen(s) + 1; 2401 } 2402 return 0; 2403 } 2404 2405 /* 2406 * The Open Firmware 1275 specification states properties must be 31 bytes or 2407 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2408 */ 2409 #define MAX_PROPERTY_NAME 64 2410 2411 static void __init scan_dt_build_strings(phandle node, 2412 unsigned long *mem_start, 2413 unsigned long *mem_end) 2414 { 2415 char *prev_name, *namep, *sstart; 2416 unsigned long soff; 2417 phandle child; 2418 2419 sstart = (char *)dt_string_start; 2420 2421 /* get and store all property names */ 2422 prev_name = ""; 2423 for (;;) { 2424 /* 64 is max len of name including nul. */ 2425 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2426 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2427 /* No more nodes: unwind alloc */ 2428 *mem_start = (unsigned long)namep; 2429 break; 2430 } 2431 2432 /* skip "name" */ 2433 if (prom_strcmp(namep, "name") == 0) { 2434 *mem_start = (unsigned long)namep; 2435 prev_name = "name"; 2436 continue; 2437 } 2438 /* get/create string entry */ 2439 soff = dt_find_string(namep); 2440 if (soff != 0) { 2441 *mem_start = (unsigned long)namep; 2442 namep = sstart + soff; 2443 } else { 2444 /* Trim off some if we can */ 2445 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2446 dt_string_end = *mem_start; 2447 } 2448 prev_name = namep; 2449 } 2450 2451 /* do all our children */ 2452 child = call_prom("child", 1, 1, node); 2453 while (child != 0) { 2454 scan_dt_build_strings(child, mem_start, mem_end); 2455 child = call_prom("peer", 1, 1, child); 2456 } 2457 } 2458 2459 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2460 unsigned long *mem_end) 2461 { 2462 phandle child; 2463 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2464 unsigned long soff; 2465 unsigned char *valp; 2466 static char pname[MAX_PROPERTY_NAME] __prombss; 2467 int l, room, has_phandle = 0; 2468 2469 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2470 2471 /* get the node's full name */ 2472 namep = (char *)*mem_start; 2473 room = *mem_end - *mem_start; 2474 if (room > 255) 2475 room = 255; 2476 l = call_prom("package-to-path", 3, 1, node, namep, room); 2477 if (l >= 0) { 2478 /* Didn't fit? Get more room. */ 2479 if (l >= room) { 2480 if (l >= *mem_end - *mem_start) 2481 namep = make_room(mem_start, mem_end, l+1, 1); 2482 call_prom("package-to-path", 3, 1, node, namep, l); 2483 } 2484 namep[l] = '\0'; 2485 2486 /* Fixup an Apple bug where they have bogus \0 chars in the 2487 * middle of the path in some properties, and extract 2488 * the unit name (everything after the last '/'). 2489 */ 2490 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2491 if (*p == '/') 2492 lp = namep; 2493 else if (*p != 0) 2494 *lp++ = *p; 2495 } 2496 *lp = 0; 2497 *mem_start = _ALIGN((unsigned long)lp + 1, 4); 2498 } 2499 2500 /* get it again for debugging */ 2501 path = prom_scratch; 2502 memset(path, 0, sizeof(prom_scratch)); 2503 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1); 2504 2505 /* get and store all properties */ 2506 prev_name = ""; 2507 sstart = (char *)dt_string_start; 2508 for (;;) { 2509 if (call_prom("nextprop", 3, 1, node, prev_name, 2510 pname) != 1) 2511 break; 2512 2513 /* skip "name" */ 2514 if (prom_strcmp(pname, "name") == 0) { 2515 prev_name = "name"; 2516 continue; 2517 } 2518 2519 /* find string offset */ 2520 soff = dt_find_string(pname); 2521 if (soff == 0) { 2522 prom_printf("WARNING: Can't find string index for" 2523 " <%s>, node %s\n", pname, path); 2524 break; 2525 } 2526 prev_name = sstart + soff; 2527 2528 /* get length */ 2529 l = call_prom("getproplen", 2, 1, node, pname); 2530 2531 /* sanity checks */ 2532 if (l == PROM_ERROR) 2533 continue; 2534 2535 /* push property head */ 2536 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2537 dt_push_token(l, mem_start, mem_end); 2538 dt_push_token(soff, mem_start, mem_end); 2539 2540 /* push property content */ 2541 valp = make_room(mem_start, mem_end, l, 4); 2542 call_prom("getprop", 4, 1, node, pname, valp, l); 2543 *mem_start = _ALIGN(*mem_start, 4); 2544 2545 if (!prom_strcmp(pname, "phandle")) 2546 has_phandle = 1; 2547 } 2548 2549 /* Add a "phandle" property if none already exist */ 2550 if (!has_phandle) { 2551 soff = dt_find_string("phandle"); 2552 if (soff == 0) 2553 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path); 2554 else { 2555 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2556 dt_push_token(4, mem_start, mem_end); 2557 dt_push_token(soff, mem_start, mem_end); 2558 valp = make_room(mem_start, mem_end, 4, 4); 2559 *(__be32 *)valp = cpu_to_be32(node); 2560 } 2561 } 2562 2563 /* do all our children */ 2564 child = call_prom("child", 1, 1, node); 2565 while (child != 0) { 2566 scan_dt_build_struct(child, mem_start, mem_end); 2567 child = call_prom("peer", 1, 1, child); 2568 } 2569 2570 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2571 } 2572 2573 static void __init flatten_device_tree(void) 2574 { 2575 phandle root; 2576 unsigned long mem_start, mem_end, room; 2577 struct boot_param_header *hdr; 2578 char *namep; 2579 u64 *rsvmap; 2580 2581 /* 2582 * Check how much room we have between alloc top & bottom (+/- a 2583 * few pages), crop to 1MB, as this is our "chunk" size 2584 */ 2585 room = alloc_top - alloc_bottom - 0x4000; 2586 if (room > DEVTREE_CHUNK_SIZE) 2587 room = DEVTREE_CHUNK_SIZE; 2588 prom_debug("starting device tree allocs at %lx\n", alloc_bottom); 2589 2590 /* Now try to claim that */ 2591 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2592 if (mem_start == 0) 2593 prom_panic("Can't allocate initial device-tree chunk\n"); 2594 mem_end = mem_start + room; 2595 2596 /* Get root of tree */ 2597 root = call_prom("peer", 1, 1, (phandle)0); 2598 if (root == (phandle)0) 2599 prom_panic ("couldn't get device tree root\n"); 2600 2601 /* Build header and make room for mem rsv map */ 2602 mem_start = _ALIGN(mem_start, 4); 2603 hdr = make_room(&mem_start, &mem_end, 2604 sizeof(struct boot_param_header), 4); 2605 dt_header_start = (unsigned long)hdr; 2606 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2607 2608 /* Start of strings */ 2609 mem_start = PAGE_ALIGN(mem_start); 2610 dt_string_start = mem_start; 2611 mem_start += 4; /* hole */ 2612 2613 /* Add "phandle" in there, we'll need it */ 2614 namep = make_room(&mem_start, &mem_end, 16, 1); 2615 prom_strcpy(namep, "phandle"); 2616 mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2617 2618 /* Build string array */ 2619 prom_printf("Building dt strings...\n"); 2620 scan_dt_build_strings(root, &mem_start, &mem_end); 2621 dt_string_end = mem_start; 2622 2623 /* Build structure */ 2624 mem_start = PAGE_ALIGN(mem_start); 2625 dt_struct_start = mem_start; 2626 prom_printf("Building dt structure...\n"); 2627 scan_dt_build_struct(root, &mem_start, &mem_end); 2628 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2629 dt_struct_end = PAGE_ALIGN(mem_start); 2630 2631 /* Finish header */ 2632 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2633 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2634 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2635 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2636 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2637 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2638 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2639 hdr->version = cpu_to_be32(OF_DT_VERSION); 2640 /* Version 16 is not backward compatible */ 2641 hdr->last_comp_version = cpu_to_be32(0x10); 2642 2643 /* Copy the reserve map in */ 2644 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2645 2646 #ifdef DEBUG_PROM 2647 { 2648 int i; 2649 prom_printf("reserved memory map:\n"); 2650 for (i = 0; i < mem_reserve_cnt; i++) 2651 prom_printf(" %llx - %llx\n", 2652 be64_to_cpu(mem_reserve_map[i].base), 2653 be64_to_cpu(mem_reserve_map[i].size)); 2654 } 2655 #endif 2656 /* Bump mem_reserve_cnt to cause further reservations to fail 2657 * since it's too late. 2658 */ 2659 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2660 2661 prom_printf("Device tree strings 0x%lx -> 0x%lx\n", 2662 dt_string_start, dt_string_end); 2663 prom_printf("Device tree struct 0x%lx -> 0x%lx\n", 2664 dt_struct_start, dt_struct_end); 2665 } 2666 2667 #ifdef CONFIG_PPC_MAPLE 2668 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2669 * The values are bad, and it doesn't even have the right number of cells. */ 2670 static void __init fixup_device_tree_maple(void) 2671 { 2672 phandle isa; 2673 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2674 u32 isa_ranges[6]; 2675 char *name; 2676 2677 name = "/ht@0/isa@4"; 2678 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2679 if (!PHANDLE_VALID(isa)) { 2680 name = "/ht@0/isa@6"; 2681 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2682 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2683 } 2684 if (!PHANDLE_VALID(isa)) 2685 return; 2686 2687 if (prom_getproplen(isa, "ranges") != 12) 2688 return; 2689 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2690 == PROM_ERROR) 2691 return; 2692 2693 if (isa_ranges[0] != 0x1 || 2694 isa_ranges[1] != 0xf4000000 || 2695 isa_ranges[2] != 0x00010000) 2696 return; 2697 2698 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2699 2700 isa_ranges[0] = 0x1; 2701 isa_ranges[1] = 0x0; 2702 isa_ranges[2] = rloc; 2703 isa_ranges[3] = 0x0; 2704 isa_ranges[4] = 0x0; 2705 isa_ranges[5] = 0x00010000; 2706 prom_setprop(isa, name, "ranges", 2707 isa_ranges, sizeof(isa_ranges)); 2708 } 2709 2710 #define CPC925_MC_START 0xf8000000 2711 #define CPC925_MC_LENGTH 0x1000000 2712 /* The values for memory-controller don't have right number of cells */ 2713 static void __init fixup_device_tree_maple_memory_controller(void) 2714 { 2715 phandle mc; 2716 u32 mc_reg[4]; 2717 char *name = "/hostbridge@f8000000"; 2718 u32 ac, sc; 2719 2720 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2721 if (!PHANDLE_VALID(mc)) 2722 return; 2723 2724 if (prom_getproplen(mc, "reg") != 8) 2725 return; 2726 2727 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2728 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2729 if ((ac != 2) || (sc != 2)) 2730 return; 2731 2732 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2733 return; 2734 2735 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2736 return; 2737 2738 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2739 2740 mc_reg[0] = 0x0; 2741 mc_reg[1] = CPC925_MC_START; 2742 mc_reg[2] = 0x0; 2743 mc_reg[3] = CPC925_MC_LENGTH; 2744 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2745 } 2746 #else 2747 #define fixup_device_tree_maple() 2748 #define fixup_device_tree_maple_memory_controller() 2749 #endif 2750 2751 #ifdef CONFIG_PPC_CHRP 2752 /* 2753 * Pegasos and BriQ lacks the "ranges" property in the isa node 2754 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2755 * Pegasos has the IDE configured in legacy mode, but advertised as native 2756 */ 2757 static void __init fixup_device_tree_chrp(void) 2758 { 2759 phandle ph; 2760 u32 prop[6]; 2761 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2762 char *name; 2763 int rc; 2764 2765 name = "/pci@80000000/isa@c"; 2766 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2767 if (!PHANDLE_VALID(ph)) { 2768 name = "/pci@ff500000/isa@6"; 2769 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2770 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2771 } 2772 if (PHANDLE_VALID(ph)) { 2773 rc = prom_getproplen(ph, "ranges"); 2774 if (rc == 0 || rc == PROM_ERROR) { 2775 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2776 2777 prop[0] = 0x1; 2778 prop[1] = 0x0; 2779 prop[2] = rloc; 2780 prop[3] = 0x0; 2781 prop[4] = 0x0; 2782 prop[5] = 0x00010000; 2783 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2784 } 2785 } 2786 2787 name = "/pci@80000000/ide@C,1"; 2788 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2789 if (PHANDLE_VALID(ph)) { 2790 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2791 prop[0] = 14; 2792 prop[1] = 0x0; 2793 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2794 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2795 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2796 if (rc == sizeof(u32)) { 2797 prop[0] &= ~0x5; 2798 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2799 } 2800 } 2801 } 2802 #else 2803 #define fixup_device_tree_chrp() 2804 #endif 2805 2806 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2807 static void __init fixup_device_tree_pmac(void) 2808 { 2809 phandle u3, i2c, mpic; 2810 u32 u3_rev; 2811 u32 interrupts[2]; 2812 u32 parent; 2813 2814 /* Some G5s have a missing interrupt definition, fix it up here */ 2815 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2816 if (!PHANDLE_VALID(u3)) 2817 return; 2818 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2819 if (!PHANDLE_VALID(i2c)) 2820 return; 2821 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2822 if (!PHANDLE_VALID(mpic)) 2823 return; 2824 2825 /* check if proper rev of u3 */ 2826 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2827 == PROM_ERROR) 2828 return; 2829 if (u3_rev < 0x35 || u3_rev > 0x39) 2830 return; 2831 /* does it need fixup ? */ 2832 if (prom_getproplen(i2c, "interrupts") > 0) 2833 return; 2834 2835 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2836 2837 /* interrupt on this revision of u3 is number 0 and level */ 2838 interrupts[0] = 0; 2839 interrupts[1] = 1; 2840 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2841 &interrupts, sizeof(interrupts)); 2842 parent = (u32)mpic; 2843 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2844 &parent, sizeof(parent)); 2845 } 2846 #else 2847 #define fixup_device_tree_pmac() 2848 #endif 2849 2850 #ifdef CONFIG_PPC_EFIKA 2851 /* 2852 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2853 * to talk to the phy. If the phy-handle property is missing, then this 2854 * function is called to add the appropriate nodes and link it to the 2855 * ethernet node. 2856 */ 2857 static void __init fixup_device_tree_efika_add_phy(void) 2858 { 2859 u32 node; 2860 char prop[64]; 2861 int rv; 2862 2863 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2864 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2865 if (!PHANDLE_VALID(node)) 2866 return; 2867 2868 /* Check if the phy-handle property exists - bail if it does */ 2869 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2870 if (!rv) 2871 return; 2872 2873 /* 2874 * At this point the ethernet device doesn't have a phy described. 2875 * Now we need to add the missing phy node and linkage 2876 */ 2877 2878 /* Check for an MDIO bus node - if missing then create one */ 2879 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2880 if (!PHANDLE_VALID(node)) { 2881 prom_printf("Adding Ethernet MDIO node\n"); 2882 call_prom("interpret", 1, 1, 2883 " s\" /builtin\" find-device" 2884 " new-device" 2885 " 1 encode-int s\" #address-cells\" property" 2886 " 0 encode-int s\" #size-cells\" property" 2887 " s\" mdio\" device-name" 2888 " s\" fsl,mpc5200b-mdio\" encode-string" 2889 " s\" compatible\" property" 2890 " 0xf0003000 0x400 reg" 2891 " 0x2 encode-int" 2892 " 0x5 encode-int encode+" 2893 " 0x3 encode-int encode+" 2894 " s\" interrupts\" property" 2895 " finish-device"); 2896 }; 2897 2898 /* Check for a PHY device node - if missing then create one and 2899 * give it's phandle to the ethernet node */ 2900 node = call_prom("finddevice", 1, 1, 2901 ADDR("/builtin/mdio/ethernet-phy")); 2902 if (!PHANDLE_VALID(node)) { 2903 prom_printf("Adding Ethernet PHY node\n"); 2904 call_prom("interpret", 1, 1, 2905 " s\" /builtin/mdio\" find-device" 2906 " new-device" 2907 " s\" ethernet-phy\" device-name" 2908 " 0x10 encode-int s\" reg\" property" 2909 " my-self" 2910 " ihandle>phandle" 2911 " finish-device" 2912 " s\" /builtin/ethernet\" find-device" 2913 " encode-int" 2914 " s\" phy-handle\" property" 2915 " device-end"); 2916 } 2917 } 2918 2919 static void __init fixup_device_tree_efika(void) 2920 { 2921 int sound_irq[3] = { 2, 2, 0 }; 2922 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2923 3,4,0, 3,5,0, 3,6,0, 3,7,0, 2924 3,8,0, 3,9,0, 3,10,0, 3,11,0, 2925 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 2926 u32 node; 2927 char prop[64]; 2928 int rv, len; 2929 2930 /* Check if we're really running on a EFIKA */ 2931 node = call_prom("finddevice", 1, 1, ADDR("/")); 2932 if (!PHANDLE_VALID(node)) 2933 return; 2934 2935 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2936 if (rv == PROM_ERROR) 2937 return; 2938 if (prom_strcmp(prop, "EFIKA5K2")) 2939 return; 2940 2941 prom_printf("Applying EFIKA device tree fixups\n"); 2942 2943 /* Claiming to be 'chrp' is death */ 2944 node = call_prom("finddevice", 1, 1, ADDR("/")); 2945 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 2946 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0)) 2947 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 2948 2949 /* CODEGEN,description is exposed in /proc/cpuinfo so 2950 fix that too */ 2951 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 2952 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP"))) 2953 prom_setprop(node, "/", "CODEGEN,description", 2954 "Efika 5200B PowerPC System", 2955 sizeof("Efika 5200B PowerPC System")); 2956 2957 /* Fixup bestcomm interrupts property */ 2958 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 2959 if (PHANDLE_VALID(node)) { 2960 len = prom_getproplen(node, "interrupts"); 2961 if (len == 12) { 2962 prom_printf("Fixing bestcomm interrupts property\n"); 2963 prom_setprop(node, "/builtin/bestcom", "interrupts", 2964 bcomm_irq, sizeof(bcomm_irq)); 2965 } 2966 } 2967 2968 /* Fixup sound interrupts property */ 2969 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 2970 if (PHANDLE_VALID(node)) { 2971 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 2972 if (rv == PROM_ERROR) { 2973 prom_printf("Adding sound interrupts property\n"); 2974 prom_setprop(node, "/builtin/sound", "interrupts", 2975 sound_irq, sizeof(sound_irq)); 2976 } 2977 } 2978 2979 /* Make sure ethernet phy-handle property exists */ 2980 fixup_device_tree_efika_add_phy(); 2981 } 2982 #else 2983 #define fixup_device_tree_efika() 2984 #endif 2985 2986 #ifdef CONFIG_PPC_PASEMI_NEMO 2987 /* 2988 * CFE supplied on Nemo is broken in several ways, biggest 2989 * problem is that it reassigns ISA interrupts to unused mpic ints. 2990 * Add an interrupt-controller property for the io-bridge to use 2991 * and correct the ints so we can attach them to an irq_domain 2992 */ 2993 static void __init fixup_device_tree_pasemi(void) 2994 { 2995 u32 interrupts[2], parent, rval, val = 0; 2996 char *name, *pci_name; 2997 phandle iob, node; 2998 2999 /* Find the root pci node */ 3000 name = "/pxp@0,e0000000"; 3001 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3002 if (!PHANDLE_VALID(iob)) 3003 return; 3004 3005 /* check if interrupt-controller node set yet */ 3006 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 3007 return; 3008 3009 prom_printf("adding interrupt-controller property for SB600...\n"); 3010 3011 prom_setprop(iob, name, "interrupt-controller", &val, 0); 3012 3013 pci_name = "/pxp@0,e0000000/pci@11"; 3014 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 3015 parent = ADDR(iob); 3016 3017 for( ; prom_next_node(&node); ) { 3018 /* scan each node for one with an interrupt */ 3019 if (!PHANDLE_VALID(node)) 3020 continue; 3021 3022 rval = prom_getproplen(node, "interrupts"); 3023 if (rval == 0 || rval == PROM_ERROR) 3024 continue; 3025 3026 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 3027 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 3028 continue; 3029 3030 /* found a node, update both interrupts and interrupt-parent */ 3031 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 3032 interrupts[0] -= 203; 3033 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 3034 interrupts[0] -= 213; 3035 if (interrupts[0] == 221) 3036 interrupts[0] = 14; 3037 if (interrupts[0] == 222) 3038 interrupts[0] = 8; 3039 3040 prom_setprop(node, pci_name, "interrupts", interrupts, 3041 sizeof(interrupts)); 3042 prom_setprop(node, pci_name, "interrupt-parent", &parent, 3043 sizeof(parent)); 3044 } 3045 3046 /* 3047 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 3048 * so that generic isa-bridge code can add the SB600 and its on-board 3049 * peripherals. 3050 */ 3051 name = "/pxp@0,e0000000/io-bridge@0"; 3052 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3053 if (!PHANDLE_VALID(iob)) 3054 return; 3055 3056 /* device_type is already set, just change it. */ 3057 3058 prom_printf("Changing device_type of SB600 node...\n"); 3059 3060 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 3061 } 3062 #else /* !CONFIG_PPC_PASEMI_NEMO */ 3063 static inline void fixup_device_tree_pasemi(void) { } 3064 #endif 3065 3066 static void __init fixup_device_tree(void) 3067 { 3068 fixup_device_tree_maple(); 3069 fixup_device_tree_maple_memory_controller(); 3070 fixup_device_tree_chrp(); 3071 fixup_device_tree_pmac(); 3072 fixup_device_tree_efika(); 3073 fixup_device_tree_pasemi(); 3074 } 3075 3076 static void __init prom_find_boot_cpu(void) 3077 { 3078 __be32 rval; 3079 ihandle prom_cpu; 3080 phandle cpu_pkg; 3081 3082 rval = 0; 3083 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 3084 return; 3085 prom_cpu = be32_to_cpu(rval); 3086 3087 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 3088 3089 if (!PHANDLE_VALID(cpu_pkg)) 3090 return; 3091 3092 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 3093 prom.cpu = be32_to_cpu(rval); 3094 3095 prom_debug("Booting CPU hw index = %d\n", prom.cpu); 3096 } 3097 3098 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 3099 { 3100 #ifdef CONFIG_BLK_DEV_INITRD 3101 if (r3 && r4 && r4 != 0xdeadbeef) { 3102 __be64 val; 3103 3104 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 3105 prom_initrd_end = prom_initrd_start + r4; 3106 3107 val = cpu_to_be64(prom_initrd_start); 3108 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 3109 &val, sizeof(val)); 3110 val = cpu_to_be64(prom_initrd_end); 3111 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 3112 &val, sizeof(val)); 3113 3114 reserve_mem(prom_initrd_start, 3115 prom_initrd_end - prom_initrd_start); 3116 3117 prom_debug("initrd_start=0x%lx\n", prom_initrd_start); 3118 prom_debug("initrd_end=0x%lx\n", prom_initrd_end); 3119 } 3120 #endif /* CONFIG_BLK_DEV_INITRD */ 3121 } 3122 3123 #ifdef CONFIG_PPC64 3124 #ifdef CONFIG_RELOCATABLE 3125 static void reloc_toc(void) 3126 { 3127 } 3128 3129 static void unreloc_toc(void) 3130 { 3131 } 3132 #else 3133 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3134 { 3135 unsigned long i; 3136 unsigned long *toc_entry; 3137 3138 /* Get the start of the TOC by using r2 directly. */ 3139 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3140 3141 for (i = 0; i < nr_entries; i++) { 3142 *toc_entry = *toc_entry + offset; 3143 toc_entry++; 3144 } 3145 } 3146 3147 static void reloc_toc(void) 3148 { 3149 unsigned long offset = reloc_offset(); 3150 unsigned long nr_entries = 3151 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3152 3153 __reloc_toc(offset, nr_entries); 3154 3155 mb(); 3156 } 3157 3158 static void unreloc_toc(void) 3159 { 3160 unsigned long offset = reloc_offset(); 3161 unsigned long nr_entries = 3162 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3163 3164 mb(); 3165 3166 __reloc_toc(-offset, nr_entries); 3167 } 3168 #endif 3169 #endif 3170 3171 /* 3172 * We enter here early on, when the Open Firmware prom is still 3173 * handling exceptions and the MMU hash table for us. 3174 */ 3175 3176 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3177 unsigned long pp, 3178 unsigned long r6, unsigned long r7, 3179 unsigned long kbase) 3180 { 3181 unsigned long hdr; 3182 3183 #ifdef CONFIG_PPC32 3184 unsigned long offset = reloc_offset(); 3185 reloc_got2(offset); 3186 #else 3187 reloc_toc(); 3188 #endif 3189 3190 /* 3191 * First zero the BSS 3192 */ 3193 memset(&__bss_start, 0, __bss_stop - __bss_start); 3194 3195 /* 3196 * Init interface to Open Firmware, get some node references, 3197 * like /chosen 3198 */ 3199 prom_init_client_services(pp); 3200 3201 /* 3202 * See if this OF is old enough that we need to do explicit maps 3203 * and other workarounds 3204 */ 3205 prom_find_mmu(); 3206 3207 /* 3208 * Init prom stdout device 3209 */ 3210 prom_init_stdout(); 3211 3212 prom_printf("Preparing to boot %s", linux_banner); 3213 3214 /* 3215 * Get default machine type. At this point, we do not differentiate 3216 * between pSeries SMP and pSeries LPAR 3217 */ 3218 of_platform = prom_find_machine_type(); 3219 prom_printf("Detected machine type: %x\n", of_platform); 3220 3221 #ifndef CONFIG_NONSTATIC_KERNEL 3222 /* Bail if this is a kdump kernel. */ 3223 if (PHYSICAL_START > 0) 3224 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3225 #endif 3226 3227 /* 3228 * Check for an initrd 3229 */ 3230 prom_check_initrd(r3, r4); 3231 3232 /* 3233 * Do early parsing of command line 3234 */ 3235 early_cmdline_parse(); 3236 3237 #ifdef CONFIG_PPC_PSERIES 3238 /* 3239 * On pSeries, inform the firmware about our capabilities 3240 */ 3241 if (of_platform == PLATFORM_PSERIES || 3242 of_platform == PLATFORM_PSERIES_LPAR) 3243 prom_send_capabilities(); 3244 #endif 3245 3246 /* 3247 * Copy the CPU hold code 3248 */ 3249 if (of_platform != PLATFORM_POWERMAC) 3250 copy_and_flush(0, kbase, 0x100, 0); 3251 3252 /* 3253 * Initialize memory management within prom_init 3254 */ 3255 prom_init_mem(); 3256 3257 /* 3258 * Determine which cpu is actually running right _now_ 3259 */ 3260 prom_find_boot_cpu(); 3261 3262 /* 3263 * Initialize display devices 3264 */ 3265 prom_check_displays(); 3266 3267 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3268 /* 3269 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3270 * that uses the allocator, we need to make sure we get the top of memory 3271 * available for us here... 3272 */ 3273 if (of_platform == PLATFORM_PSERIES) 3274 prom_initialize_tce_table(); 3275 #endif 3276 3277 /* 3278 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3279 * have a usable RTAS implementation. 3280 */ 3281 if (of_platform != PLATFORM_POWERMAC) 3282 prom_instantiate_rtas(); 3283 3284 #ifdef CONFIG_PPC64 3285 /* instantiate sml */ 3286 prom_instantiate_sml(); 3287 #endif 3288 3289 /* 3290 * On non-powermacs, put all CPUs in spin-loops. 3291 * 3292 * PowerMacs use a different mechanism to spin CPUs 3293 * 3294 * (This must be done after instanciating RTAS) 3295 */ 3296 if (of_platform != PLATFORM_POWERMAC) 3297 prom_hold_cpus(); 3298 3299 /* 3300 * Fill in some infos for use by the kernel later on 3301 */ 3302 if (prom_memory_limit) { 3303 __be64 val = cpu_to_be64(prom_memory_limit); 3304 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3305 &val, sizeof(val)); 3306 } 3307 #ifdef CONFIG_PPC64 3308 if (prom_iommu_off) 3309 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3310 NULL, 0); 3311 3312 if (prom_iommu_force_on) 3313 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3314 NULL, 0); 3315 3316 if (prom_tce_alloc_start) { 3317 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3318 &prom_tce_alloc_start, 3319 sizeof(prom_tce_alloc_start)); 3320 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3321 &prom_tce_alloc_end, 3322 sizeof(prom_tce_alloc_end)); 3323 } 3324 #endif 3325 3326 /* 3327 * Fixup any known bugs in the device-tree 3328 */ 3329 fixup_device_tree(); 3330 3331 /* 3332 * Now finally create the flattened device-tree 3333 */ 3334 prom_printf("copying OF device tree...\n"); 3335 flatten_device_tree(); 3336 3337 /* 3338 * in case stdin is USB and still active on IBM machines... 3339 * Unfortunately quiesce crashes on some powermacs if we have 3340 * closed stdin already (in particular the powerbook 101). 3341 */ 3342 if (of_platform != PLATFORM_POWERMAC) 3343 prom_close_stdin(); 3344 3345 /* 3346 * Call OF "quiesce" method to shut down pending DMA's from 3347 * devices etc... 3348 */ 3349 prom_printf("Quiescing Open Firmware ...\n"); 3350 call_prom("quiesce", 0, 0); 3351 3352 /* 3353 * And finally, call the kernel passing it the flattened device 3354 * tree and NULL as r5, thus triggering the new entry point which 3355 * is common to us and kexec 3356 */ 3357 hdr = dt_header_start; 3358 3359 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 3360 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3361 prom_debug("->dt_header_start=0x%lx\n", hdr); 3362 3363 #ifdef CONFIG_PPC32 3364 reloc_got2(-offset); 3365 #else 3366 unreloc_toc(); 3367 #endif 3368 3369 __start(hdr, kbase, 0, 0, 0, 0, 0); 3370 3371 return 0; 3372 } 3373