1 /* 2 * Procedures for interfacing to Open Firmware. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG_PROM 17 18 /* we cannot use FORTIFY as it brings in new symbols */ 19 #define __NO_FORTIFY 20 21 #include <stdarg.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/init.h> 25 #include <linux/threads.h> 26 #include <linux/spinlock.h> 27 #include <linux/types.h> 28 #include <linux/pci.h> 29 #include <linux/proc_fs.h> 30 #include <linux/delay.h> 31 #include <linux/initrd.h> 32 #include <linux/bitops.h> 33 #include <asm/prom.h> 34 #include <asm/rtas.h> 35 #include <asm/page.h> 36 #include <asm/processor.h> 37 #include <asm/irq.h> 38 #include <asm/io.h> 39 #include <asm/smp.h> 40 #include <asm/mmu.h> 41 #include <asm/pgtable.h> 42 #include <asm/iommu.h> 43 #include <asm/btext.h> 44 #include <asm/sections.h> 45 #include <asm/machdep.h> 46 #include <asm/asm-prototypes.h> 47 48 #include <linux/linux_logo.h> 49 50 /* All of prom_init bss lives here */ 51 #define __prombss __section(.bss.prominit) 52 53 /* 54 * Eventually bump that one up 55 */ 56 #define DEVTREE_CHUNK_SIZE 0x100000 57 58 /* 59 * This is the size of the local memory reserve map that gets copied 60 * into the boot params passed to the kernel. That size is totally 61 * flexible as the kernel just reads the list until it encounters an 62 * entry with size 0, so it can be changed without breaking binary 63 * compatibility 64 */ 65 #define MEM_RESERVE_MAP_SIZE 8 66 67 /* 68 * prom_init() is called very early on, before the kernel text 69 * and data have been mapped to KERNELBASE. At this point the code 70 * is running at whatever address it has been loaded at. 71 * On ppc32 we compile with -mrelocatable, which means that references 72 * to extern and static variables get relocated automatically. 73 * ppc64 objects are always relocatable, we just need to relocate the 74 * TOC. 75 * 76 * Because OF may have mapped I/O devices into the area starting at 77 * KERNELBASE, particularly on CHRP machines, we can't safely call 78 * OF once the kernel has been mapped to KERNELBASE. Therefore all 79 * OF calls must be done within prom_init(). 80 * 81 * ADDR is used in calls to call_prom. The 4th and following 82 * arguments to call_prom should be 32-bit values. 83 * On ppc64, 64 bit values are truncated to 32 bits (and 84 * fortunately don't get interpreted as two arguments). 85 */ 86 #define ADDR(x) (u32)(unsigned long)(x) 87 88 #ifdef CONFIG_PPC64 89 #define OF_WORKAROUNDS 0 90 #else 91 #define OF_WORKAROUNDS of_workarounds 92 static int of_workarounds __prombss; 93 #endif 94 95 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 96 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 97 98 #define PROM_BUG() do { \ 99 prom_printf("kernel BUG at %s line 0x%x!\n", \ 100 __FILE__, __LINE__); \ 101 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ 102 } while (0) 103 104 #ifdef DEBUG_PROM 105 #define prom_debug(x...) prom_printf(x) 106 #else 107 #define prom_debug(x...) do { } while (0) 108 #endif 109 110 111 typedef u32 prom_arg_t; 112 113 struct prom_args { 114 __be32 service; 115 __be32 nargs; 116 __be32 nret; 117 __be32 args[10]; 118 }; 119 120 struct prom_t { 121 ihandle root; 122 phandle chosen; 123 int cpu; 124 ihandle stdout; 125 ihandle mmumap; 126 ihandle memory; 127 }; 128 129 struct mem_map_entry { 130 __be64 base; 131 __be64 size; 132 }; 133 134 typedef __be32 cell_t; 135 136 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 137 unsigned long r6, unsigned long r7, unsigned long r8, 138 unsigned long r9); 139 140 #ifdef CONFIG_PPC64 141 extern int enter_prom(struct prom_args *args, unsigned long entry); 142 #else 143 static inline int enter_prom(struct prom_args *args, unsigned long entry) 144 { 145 return ((int (*)(struct prom_args *))entry)(args); 146 } 147 #endif 148 149 extern void copy_and_flush(unsigned long dest, unsigned long src, 150 unsigned long size, unsigned long offset); 151 152 /* prom structure */ 153 static struct prom_t __prombss prom; 154 155 static unsigned long __prombss prom_entry; 156 157 static char __prombss of_stdout_device[256]; 158 static char __prombss prom_scratch[256]; 159 160 static unsigned long __prombss dt_header_start; 161 static unsigned long __prombss dt_struct_start, dt_struct_end; 162 static unsigned long __prombss dt_string_start, dt_string_end; 163 164 static unsigned long __prombss prom_initrd_start, prom_initrd_end; 165 166 #ifdef CONFIG_PPC64 167 static int __prombss prom_iommu_force_on; 168 static int __prombss prom_iommu_off; 169 static unsigned long __prombss prom_tce_alloc_start; 170 static unsigned long __prombss prom_tce_alloc_end; 171 #endif 172 173 #ifdef CONFIG_PPC_PSERIES 174 static bool __prombss prom_radix_disable; 175 static bool __prombss prom_xive_disable; 176 #endif 177 178 struct platform_support { 179 bool hash_mmu; 180 bool radix_mmu; 181 bool radix_gtse; 182 bool xive; 183 }; 184 185 /* Platforms codes are now obsolete in the kernel. Now only used within this 186 * file and ultimately gone too. Feel free to change them if you need, they 187 * are not shared with anything outside of this file anymore 188 */ 189 #define PLATFORM_PSERIES 0x0100 190 #define PLATFORM_PSERIES_LPAR 0x0101 191 #define PLATFORM_LPAR 0x0001 192 #define PLATFORM_POWERMAC 0x0400 193 #define PLATFORM_GENERIC 0x0500 194 195 static int __prombss of_platform; 196 197 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE]; 198 199 static unsigned long __prombss prom_memory_limit; 200 201 static unsigned long __prombss alloc_top; 202 static unsigned long __prombss alloc_top_high; 203 static unsigned long __prombss alloc_bottom; 204 static unsigned long __prombss rmo_top; 205 static unsigned long __prombss ram_top; 206 207 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 208 static int __prombss mem_reserve_cnt; 209 210 static cell_t __prombss regbuf[1024]; 211 212 static bool __prombss rtas_has_query_cpu_stopped; 213 214 215 /* 216 * Error results ... some OF calls will return "-1" on error, some 217 * will return 0, some will return either. To simplify, here are 218 * macros to use with any ihandle or phandle return value to check if 219 * it is valid 220 */ 221 222 #define PROM_ERROR (-1u) 223 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 224 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 225 226 /* Copied from lib/string.c and lib/kstrtox.c */ 227 228 static int __init prom_strcmp(const char *cs, const char *ct) 229 { 230 unsigned char c1, c2; 231 232 while (1) { 233 c1 = *cs++; 234 c2 = *ct++; 235 if (c1 != c2) 236 return c1 < c2 ? -1 : 1; 237 if (!c1) 238 break; 239 } 240 return 0; 241 } 242 243 static char __init *prom_strcpy(char *dest, const char *src) 244 { 245 char *tmp = dest; 246 247 while ((*dest++ = *src++) != '\0') 248 /* nothing */; 249 return tmp; 250 } 251 252 static int __init prom_strncmp(const char *cs, const char *ct, size_t count) 253 { 254 unsigned char c1, c2; 255 256 while (count) { 257 c1 = *cs++; 258 c2 = *ct++; 259 if (c1 != c2) 260 return c1 < c2 ? -1 : 1; 261 if (!c1) 262 break; 263 count--; 264 } 265 return 0; 266 } 267 268 static size_t __init prom_strlen(const char *s) 269 { 270 const char *sc; 271 272 for (sc = s; *sc != '\0'; ++sc) 273 /* nothing */; 274 return sc - s; 275 } 276 277 static int __init prom_memcmp(const void *cs, const void *ct, size_t count) 278 { 279 const unsigned char *su1, *su2; 280 int res = 0; 281 282 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 283 if ((res = *su1 - *su2) != 0) 284 break; 285 return res; 286 } 287 288 static char __init *prom_strstr(const char *s1, const char *s2) 289 { 290 size_t l1, l2; 291 292 l2 = prom_strlen(s2); 293 if (!l2) 294 return (char *)s1; 295 l1 = prom_strlen(s1); 296 while (l1 >= l2) { 297 l1--; 298 if (!prom_memcmp(s1, s2, l2)) 299 return (char *)s1; 300 s1++; 301 } 302 return NULL; 303 } 304 305 static size_t __init prom_strlcpy(char *dest, const char *src, size_t size) 306 { 307 size_t ret = prom_strlen(src); 308 309 if (size) { 310 size_t len = (ret >= size) ? size - 1 : ret; 311 memcpy(dest, src, len); 312 dest[len] = '\0'; 313 } 314 return ret; 315 } 316 317 #ifdef CONFIG_PPC_PSERIES 318 static int __init prom_strtobool(const char *s, bool *res) 319 { 320 if (!s) 321 return -EINVAL; 322 323 switch (s[0]) { 324 case 'y': 325 case 'Y': 326 case '1': 327 *res = true; 328 return 0; 329 case 'n': 330 case 'N': 331 case '0': 332 *res = false; 333 return 0; 334 case 'o': 335 case 'O': 336 switch (s[1]) { 337 case 'n': 338 case 'N': 339 *res = true; 340 return 0; 341 case 'f': 342 case 'F': 343 *res = false; 344 return 0; 345 default: 346 break; 347 } 348 default: 349 break; 350 } 351 352 return -EINVAL; 353 } 354 #endif 355 356 /* This is the one and *ONLY* place where we actually call open 357 * firmware. 358 */ 359 360 static int __init call_prom(const char *service, int nargs, int nret, ...) 361 { 362 int i; 363 struct prom_args args; 364 va_list list; 365 366 args.service = cpu_to_be32(ADDR(service)); 367 args.nargs = cpu_to_be32(nargs); 368 args.nret = cpu_to_be32(nret); 369 370 va_start(list, nret); 371 for (i = 0; i < nargs; i++) 372 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 373 va_end(list); 374 375 for (i = 0; i < nret; i++) 376 args.args[nargs+i] = 0; 377 378 if (enter_prom(&args, prom_entry) < 0) 379 return PROM_ERROR; 380 381 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 382 } 383 384 static int __init call_prom_ret(const char *service, int nargs, int nret, 385 prom_arg_t *rets, ...) 386 { 387 int i; 388 struct prom_args args; 389 va_list list; 390 391 args.service = cpu_to_be32(ADDR(service)); 392 args.nargs = cpu_to_be32(nargs); 393 args.nret = cpu_to_be32(nret); 394 395 va_start(list, rets); 396 for (i = 0; i < nargs; i++) 397 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 398 va_end(list); 399 400 for (i = 0; i < nret; i++) 401 args.args[nargs+i] = 0; 402 403 if (enter_prom(&args, prom_entry) < 0) 404 return PROM_ERROR; 405 406 if (rets != NULL) 407 for (i = 1; i < nret; ++i) 408 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 409 410 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 411 } 412 413 414 static void __init prom_print(const char *msg) 415 { 416 const char *p, *q; 417 418 if (prom.stdout == 0) 419 return; 420 421 for (p = msg; *p != 0; p = q) { 422 for (q = p; *q != 0 && *q != '\n'; ++q) 423 ; 424 if (q > p) 425 call_prom("write", 3, 1, prom.stdout, p, q - p); 426 if (*q == 0) 427 break; 428 ++q; 429 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 430 } 431 } 432 433 434 /* 435 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that 436 * we do not need __udivdi3 or __umoddi3 on 32bits. 437 */ 438 static void __init prom_print_hex(unsigned long val) 439 { 440 int i, nibbles = sizeof(val)*2; 441 char buf[sizeof(val)*2+1]; 442 443 for (i = nibbles-1; i >= 0; i--) { 444 buf[i] = (val & 0xf) + '0'; 445 if (buf[i] > '9') 446 buf[i] += ('a'-'0'-10); 447 val >>= 4; 448 } 449 buf[nibbles] = '\0'; 450 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 451 } 452 453 /* max number of decimal digits in an unsigned long */ 454 #define UL_DIGITS 21 455 static void __init prom_print_dec(unsigned long val) 456 { 457 int i, size; 458 char buf[UL_DIGITS+1]; 459 460 for (i = UL_DIGITS-1; i >= 0; i--) { 461 buf[i] = (val % 10) + '0'; 462 val = val/10; 463 if (val == 0) 464 break; 465 } 466 /* shift stuff down */ 467 size = UL_DIGITS - i; 468 call_prom("write", 3, 1, prom.stdout, buf+i, size); 469 } 470 471 __printf(1, 2) 472 static void __init prom_printf(const char *format, ...) 473 { 474 const char *p, *q, *s; 475 va_list args; 476 unsigned long v; 477 long vs; 478 int n = 0; 479 480 va_start(args, format); 481 for (p = format; *p != 0; p = q) { 482 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 483 ; 484 if (q > p) 485 call_prom("write", 3, 1, prom.stdout, p, q - p); 486 if (*q == 0) 487 break; 488 if (*q == '\n') { 489 ++q; 490 call_prom("write", 3, 1, prom.stdout, 491 ADDR("\r\n"), 2); 492 continue; 493 } 494 ++q; 495 if (*q == 0) 496 break; 497 while (*q == 'l') { 498 ++q; 499 ++n; 500 } 501 switch (*q) { 502 case 's': 503 ++q; 504 s = va_arg(args, const char *); 505 prom_print(s); 506 break; 507 case 'x': 508 ++q; 509 switch (n) { 510 case 0: 511 v = va_arg(args, unsigned int); 512 break; 513 case 1: 514 v = va_arg(args, unsigned long); 515 break; 516 case 2: 517 default: 518 v = va_arg(args, unsigned long long); 519 break; 520 } 521 prom_print_hex(v); 522 break; 523 case 'u': 524 ++q; 525 switch (n) { 526 case 0: 527 v = va_arg(args, unsigned int); 528 break; 529 case 1: 530 v = va_arg(args, unsigned long); 531 break; 532 case 2: 533 default: 534 v = va_arg(args, unsigned long long); 535 break; 536 } 537 prom_print_dec(v); 538 break; 539 case 'd': 540 ++q; 541 switch (n) { 542 case 0: 543 vs = va_arg(args, int); 544 break; 545 case 1: 546 vs = va_arg(args, long); 547 break; 548 case 2: 549 default: 550 vs = va_arg(args, long long); 551 break; 552 } 553 if (vs < 0) { 554 prom_print("-"); 555 vs = -vs; 556 } 557 prom_print_dec(vs); 558 break; 559 } 560 } 561 va_end(args); 562 } 563 564 565 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 566 unsigned long align) 567 { 568 569 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 570 /* 571 * Old OF requires we claim physical and virtual separately 572 * and then map explicitly (assuming virtual mode) 573 */ 574 int ret; 575 prom_arg_t result; 576 577 ret = call_prom_ret("call-method", 5, 2, &result, 578 ADDR("claim"), prom.memory, 579 align, size, virt); 580 if (ret != 0 || result == -1) 581 return -1; 582 ret = call_prom_ret("call-method", 5, 2, &result, 583 ADDR("claim"), prom.mmumap, 584 align, size, virt); 585 if (ret != 0) { 586 call_prom("call-method", 4, 1, ADDR("release"), 587 prom.memory, size, virt); 588 return -1; 589 } 590 /* the 0x12 is M (coherence) + PP == read/write */ 591 call_prom("call-method", 6, 1, 592 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 593 return virt; 594 } 595 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 596 (prom_arg_t)align); 597 } 598 599 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 600 { 601 prom_print(reason); 602 /* Do not call exit because it clears the screen on pmac 603 * it also causes some sort of double-fault on early pmacs */ 604 if (of_platform == PLATFORM_POWERMAC) 605 asm("trap\n"); 606 607 /* ToDo: should put up an SRC here on pSeries */ 608 call_prom("exit", 0, 0); 609 610 for (;;) /* should never get here */ 611 ; 612 } 613 614 615 static int __init prom_next_node(phandle *nodep) 616 { 617 phandle node; 618 619 if ((node = *nodep) != 0 620 && (*nodep = call_prom("child", 1, 1, node)) != 0) 621 return 1; 622 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 623 return 1; 624 for (;;) { 625 if ((node = call_prom("parent", 1, 1, node)) == 0) 626 return 0; 627 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 628 return 1; 629 } 630 } 631 632 static inline int __init prom_getprop(phandle node, const char *pname, 633 void *value, size_t valuelen) 634 { 635 return call_prom("getprop", 4, 1, node, ADDR(pname), 636 (u32)(unsigned long) value, (u32) valuelen); 637 } 638 639 static inline int __init prom_getproplen(phandle node, const char *pname) 640 { 641 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 642 } 643 644 static void add_string(char **str, const char *q) 645 { 646 char *p = *str; 647 648 while (*q) 649 *p++ = *q++; 650 *p++ = ' '; 651 *str = p; 652 } 653 654 static char *tohex(unsigned int x) 655 { 656 static const char digits[] __initconst = "0123456789abcdef"; 657 static char result[9] __prombss; 658 int i; 659 660 result[8] = 0; 661 i = 8; 662 do { 663 --i; 664 result[i] = digits[x & 0xf]; 665 x >>= 4; 666 } while (x != 0 && i > 0); 667 return &result[i]; 668 } 669 670 static int __init prom_setprop(phandle node, const char *nodename, 671 const char *pname, void *value, size_t valuelen) 672 { 673 char cmd[256], *p; 674 675 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 676 return call_prom("setprop", 4, 1, node, ADDR(pname), 677 (u32)(unsigned long) value, (u32) valuelen); 678 679 /* gah... setprop doesn't work on longtrail, have to use interpret */ 680 p = cmd; 681 add_string(&p, "dev"); 682 add_string(&p, nodename); 683 add_string(&p, tohex((u32)(unsigned long) value)); 684 add_string(&p, tohex(valuelen)); 685 add_string(&p, tohex(ADDR(pname))); 686 add_string(&p, tohex(prom_strlen(pname))); 687 add_string(&p, "property"); 688 *p = 0; 689 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 690 } 691 692 /* We can't use the standard versions because of relocation headaches. */ 693 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 694 || ('a' <= (c) && (c) <= 'f') \ 695 || ('A' <= (c) && (c) <= 'F')) 696 697 #define isdigit(c) ('0' <= (c) && (c) <= '9') 698 #define islower(c) ('a' <= (c) && (c) <= 'z') 699 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 700 701 static unsigned long prom_strtoul(const char *cp, const char **endp) 702 { 703 unsigned long result = 0, base = 10, value; 704 705 if (*cp == '0') { 706 base = 8; 707 cp++; 708 if (toupper(*cp) == 'X') { 709 cp++; 710 base = 16; 711 } 712 } 713 714 while (isxdigit(*cp) && 715 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 716 result = result * base + value; 717 cp++; 718 } 719 720 if (endp) 721 *endp = cp; 722 723 return result; 724 } 725 726 static unsigned long prom_memparse(const char *ptr, const char **retptr) 727 { 728 unsigned long ret = prom_strtoul(ptr, retptr); 729 int shift = 0; 730 731 /* 732 * We can't use a switch here because GCC *may* generate a 733 * jump table which won't work, because we're not running at 734 * the address we're linked at. 735 */ 736 if ('G' == **retptr || 'g' == **retptr) 737 shift = 30; 738 739 if ('M' == **retptr || 'm' == **retptr) 740 shift = 20; 741 742 if ('K' == **retptr || 'k' == **retptr) 743 shift = 10; 744 745 if (shift) { 746 ret <<= shift; 747 (*retptr)++; 748 } 749 750 return ret; 751 } 752 753 /* 754 * Early parsing of the command line passed to the kernel, used for 755 * "mem=x" and the options that affect the iommu 756 */ 757 static void __init early_cmdline_parse(void) 758 { 759 const char *opt; 760 761 char *p; 762 int l = 0; 763 764 prom_cmd_line[0] = 0; 765 p = prom_cmd_line; 766 if ((long)prom.chosen > 0) 767 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 768 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */ 769 prom_strlcpy(prom_cmd_line, CONFIG_CMDLINE, sizeof(prom_cmd_line)); 770 prom_printf("command line: %s\n", prom_cmd_line); 771 772 #ifdef CONFIG_PPC64 773 opt = prom_strstr(prom_cmd_line, "iommu="); 774 if (opt) { 775 prom_printf("iommu opt is: %s\n", opt); 776 opt += 6; 777 while (*opt && *opt == ' ') 778 opt++; 779 if (!prom_strncmp(opt, "off", 3)) 780 prom_iommu_off = 1; 781 else if (!prom_strncmp(opt, "force", 5)) 782 prom_iommu_force_on = 1; 783 } 784 #endif 785 opt = prom_strstr(prom_cmd_line, "mem="); 786 if (opt) { 787 opt += 4; 788 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 789 #ifdef CONFIG_PPC64 790 /* Align to 16 MB == size of ppc64 large page */ 791 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 792 #endif 793 } 794 795 #ifdef CONFIG_PPC_PSERIES 796 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); 797 opt = prom_strstr(prom_cmd_line, "disable_radix"); 798 if (opt) { 799 opt += 13; 800 if (*opt && *opt == '=') { 801 bool val; 802 803 if (prom_strtobool(++opt, &val)) 804 prom_radix_disable = false; 805 else 806 prom_radix_disable = val; 807 } else 808 prom_radix_disable = true; 809 } 810 if (prom_radix_disable) 811 prom_debug("Radix disabled from cmdline\n"); 812 813 opt = prom_strstr(prom_cmd_line, "xive=off"); 814 if (opt) { 815 prom_xive_disable = true; 816 prom_debug("XIVE disabled from cmdline\n"); 817 } 818 #endif /* CONFIG_PPC_PSERIES */ 819 } 820 821 #ifdef CONFIG_PPC_PSERIES 822 /* 823 * The architecture vector has an array of PVR mask/value pairs, 824 * followed by # option vectors - 1, followed by the option vectors. 825 * 826 * See prom.h for the definition of the bits specified in the 827 * architecture vector. 828 */ 829 830 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 831 #define NUM_VECTORS(n) ((n) - 1) 832 833 /* 834 * Firmware expects 1 + n - 2, where n is the length of the option vector in 835 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 836 */ 837 #define VECTOR_LENGTH(n) (1 + (n) - 2) 838 839 struct option_vector1 { 840 u8 byte1; 841 u8 arch_versions; 842 u8 arch_versions3; 843 } __packed; 844 845 struct option_vector2 { 846 u8 byte1; 847 __be16 reserved; 848 __be32 real_base; 849 __be32 real_size; 850 __be32 virt_base; 851 __be32 virt_size; 852 __be32 load_base; 853 __be32 min_rma; 854 __be32 min_load; 855 u8 min_rma_percent; 856 u8 max_pft_size; 857 } __packed; 858 859 struct option_vector3 { 860 u8 byte1; 861 u8 byte2; 862 } __packed; 863 864 struct option_vector4 { 865 u8 byte1; 866 u8 min_vp_cap; 867 } __packed; 868 869 struct option_vector5 { 870 u8 byte1; 871 u8 byte2; 872 u8 byte3; 873 u8 cmo; 874 u8 associativity; 875 u8 bin_opts; 876 u8 micro_checkpoint; 877 u8 reserved0; 878 __be32 max_cpus; 879 __be16 papr_level; 880 __be16 reserved1; 881 u8 platform_facilities; 882 u8 reserved2; 883 __be16 reserved3; 884 u8 subprocessors; 885 u8 byte22; 886 u8 intarch; 887 u8 mmu; 888 u8 hash_ext; 889 u8 radix_ext; 890 } __packed; 891 892 struct option_vector6 { 893 u8 reserved; 894 u8 secondary_pteg; 895 u8 os_name; 896 } __packed; 897 898 struct ibm_arch_vec { 899 struct { u32 mask, val; } pvrs[12]; 900 901 u8 num_vectors; 902 903 u8 vec1_len; 904 struct option_vector1 vec1; 905 906 u8 vec2_len; 907 struct option_vector2 vec2; 908 909 u8 vec3_len; 910 struct option_vector3 vec3; 911 912 u8 vec4_len; 913 struct option_vector4 vec4; 914 915 u8 vec5_len; 916 struct option_vector5 vec5; 917 918 u8 vec6_len; 919 struct option_vector6 vec6; 920 } __packed; 921 922 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { 923 .pvrs = { 924 { 925 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ 926 .val = cpu_to_be32(0x003a0000), 927 }, 928 { 929 .mask = cpu_to_be32(0xffff0000), /* POWER6 */ 930 .val = cpu_to_be32(0x003e0000), 931 }, 932 { 933 .mask = cpu_to_be32(0xffff0000), /* POWER7 */ 934 .val = cpu_to_be32(0x003f0000), 935 }, 936 { 937 .mask = cpu_to_be32(0xffff0000), /* POWER8E */ 938 .val = cpu_to_be32(0x004b0000), 939 }, 940 { 941 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */ 942 .val = cpu_to_be32(0x004c0000), 943 }, 944 { 945 .mask = cpu_to_be32(0xffff0000), /* POWER8 */ 946 .val = cpu_to_be32(0x004d0000), 947 }, 948 { 949 .mask = cpu_to_be32(0xffff0000), /* POWER9 */ 950 .val = cpu_to_be32(0x004e0000), 951 }, 952 { 953 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */ 954 .val = cpu_to_be32(0x0f000005), 955 }, 956 { 957 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */ 958 .val = cpu_to_be32(0x0f000004), 959 }, 960 { 961 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */ 962 .val = cpu_to_be32(0x0f000003), 963 }, 964 { 965 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */ 966 .val = cpu_to_be32(0x0f000002), 967 }, 968 { 969 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */ 970 .val = cpu_to_be32(0x0f000001), 971 }, 972 }, 973 974 .num_vectors = NUM_VECTORS(6), 975 976 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)), 977 .vec1 = { 978 .byte1 = 0, 979 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 980 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 981 .arch_versions3 = OV1_PPC_3_00, 982 }, 983 984 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)), 985 /* option vector 2: Open Firmware options supported */ 986 .vec2 = { 987 .byte1 = OV2_REAL_MODE, 988 .reserved = 0, 989 .real_base = cpu_to_be32(0xffffffff), 990 .real_size = cpu_to_be32(0xffffffff), 991 .virt_base = cpu_to_be32(0xffffffff), 992 .virt_size = cpu_to_be32(0xffffffff), 993 .load_base = cpu_to_be32(0xffffffff), 994 .min_rma = cpu_to_be32(512), /* 512MB min RMA */ 995 .min_load = cpu_to_be32(0xffffffff), /* full client load */ 996 .min_rma_percent = 0, /* min RMA percentage of total RAM */ 997 .max_pft_size = 48, /* max log_2(hash table size) */ 998 }, 999 1000 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)), 1001 /* option vector 3: processor options supported */ 1002 .vec3 = { 1003 .byte1 = 0, /* don't ignore, don't halt */ 1004 .byte2 = OV3_FP | OV3_VMX | OV3_DFP, 1005 }, 1006 1007 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)), 1008 /* option vector 4: IBM PAPR implementation */ 1009 .vec4 = { 1010 .byte1 = 0, /* don't halt */ 1011 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 1012 }, 1013 1014 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)), 1015 /* option vector 5: PAPR/OF options */ 1016 .vec5 = { 1017 .byte1 = 0, /* don't ignore, don't halt */ 1018 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 1019 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 1020 #ifdef CONFIG_PCI_MSI 1021 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 1022 OV5_FEAT(OV5_MSI), 1023 #else 1024 0, 1025 #endif 1026 .byte3 = 0, 1027 .cmo = 1028 #ifdef CONFIG_PPC_SMLPAR 1029 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 1030 #else 1031 0, 1032 #endif 1033 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 1034 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT), 1035 .micro_checkpoint = 0, 1036 .reserved0 = 0, 1037 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ 1038 .papr_level = 0, 1039 .reserved1 = 0, 1040 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842), 1041 .reserved2 = 0, 1042 .reserved3 = 0, 1043 .subprocessors = 1, 1044 .byte22 = OV5_FEAT(OV5_DRMEM_V2), 1045 .intarch = 0, 1046 .mmu = 0, 1047 .hash_ext = 0, 1048 .radix_ext = 0, 1049 }, 1050 1051 /* option vector 6: IBM PAPR hints */ 1052 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)), 1053 .vec6 = { 1054 .reserved = 0, 1055 .secondary_pteg = 0, 1056 .os_name = OV6_LINUX, 1057 }, 1058 }; 1059 1060 static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned; 1061 1062 /* Old method - ELF header with PT_NOTE sections only works on BE */ 1063 #ifdef __BIG_ENDIAN__ 1064 static const struct fake_elf { 1065 Elf32_Ehdr elfhdr; 1066 Elf32_Phdr phdr[2]; 1067 struct chrpnote { 1068 u32 namesz; 1069 u32 descsz; 1070 u32 type; 1071 char name[8]; /* "PowerPC" */ 1072 struct chrpdesc { 1073 u32 real_mode; 1074 u32 real_base; 1075 u32 real_size; 1076 u32 virt_base; 1077 u32 virt_size; 1078 u32 load_base; 1079 } chrpdesc; 1080 } chrpnote; 1081 struct rpanote { 1082 u32 namesz; 1083 u32 descsz; 1084 u32 type; 1085 char name[24]; /* "IBM,RPA-Client-Config" */ 1086 struct rpadesc { 1087 u32 lpar_affinity; 1088 u32 min_rmo_size; 1089 u32 min_rmo_percent; 1090 u32 max_pft_size; 1091 u32 splpar; 1092 u32 min_load; 1093 u32 new_mem_def; 1094 u32 ignore_me; 1095 } rpadesc; 1096 } rpanote; 1097 } fake_elf __initconst = { 1098 .elfhdr = { 1099 .e_ident = { 0x7f, 'E', 'L', 'F', 1100 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 1101 .e_type = ET_EXEC, /* yeah right */ 1102 .e_machine = EM_PPC, 1103 .e_version = EV_CURRENT, 1104 .e_phoff = offsetof(struct fake_elf, phdr), 1105 .e_phentsize = sizeof(Elf32_Phdr), 1106 .e_phnum = 2 1107 }, 1108 .phdr = { 1109 [0] = { 1110 .p_type = PT_NOTE, 1111 .p_offset = offsetof(struct fake_elf, chrpnote), 1112 .p_filesz = sizeof(struct chrpnote) 1113 }, [1] = { 1114 .p_type = PT_NOTE, 1115 .p_offset = offsetof(struct fake_elf, rpanote), 1116 .p_filesz = sizeof(struct rpanote) 1117 } 1118 }, 1119 .chrpnote = { 1120 .namesz = sizeof("PowerPC"), 1121 .descsz = sizeof(struct chrpdesc), 1122 .type = 0x1275, 1123 .name = "PowerPC", 1124 .chrpdesc = { 1125 .real_mode = ~0U, /* ~0 means "don't care" */ 1126 .real_base = ~0U, 1127 .real_size = ~0U, 1128 .virt_base = ~0U, 1129 .virt_size = ~0U, 1130 .load_base = ~0U 1131 }, 1132 }, 1133 .rpanote = { 1134 .namesz = sizeof("IBM,RPA-Client-Config"), 1135 .descsz = sizeof(struct rpadesc), 1136 .type = 0x12759999, 1137 .name = "IBM,RPA-Client-Config", 1138 .rpadesc = { 1139 .lpar_affinity = 0, 1140 .min_rmo_size = 64, /* in megabytes */ 1141 .min_rmo_percent = 0, 1142 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 1143 .splpar = 1, 1144 .min_load = ~0U, 1145 .new_mem_def = 0 1146 } 1147 } 1148 }; 1149 #endif /* __BIG_ENDIAN__ */ 1150 1151 static int __init prom_count_smt_threads(void) 1152 { 1153 phandle node; 1154 char type[64]; 1155 unsigned int plen; 1156 1157 /* Pick up th first CPU node we can find */ 1158 for (node = 0; prom_next_node(&node); ) { 1159 type[0] = 0; 1160 prom_getprop(node, "device_type", type, sizeof(type)); 1161 1162 if (prom_strcmp(type, "cpu")) 1163 continue; 1164 /* 1165 * There is an entry for each smt thread, each entry being 1166 * 4 bytes long. All cpus should have the same number of 1167 * smt threads, so return after finding the first. 1168 */ 1169 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 1170 if (plen == PROM_ERROR) 1171 break; 1172 plen >>= 2; 1173 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 1174 1175 /* Sanity check */ 1176 if (plen < 1 || plen > 64) { 1177 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 1178 (unsigned long)plen); 1179 return 1; 1180 } 1181 return plen; 1182 } 1183 prom_debug("No threads found, assuming 1 per core\n"); 1184 1185 return 1; 1186 1187 } 1188 1189 static void __init prom_parse_mmu_model(u8 val, 1190 struct platform_support *support) 1191 { 1192 switch (val) { 1193 case OV5_FEAT(OV5_MMU_DYNAMIC): 1194 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ 1195 prom_debug("MMU - either supported\n"); 1196 support->radix_mmu = !prom_radix_disable; 1197 support->hash_mmu = true; 1198 break; 1199 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ 1200 prom_debug("MMU - radix only\n"); 1201 if (prom_radix_disable) { 1202 /* 1203 * If we __have__ to do radix, we're better off ignoring 1204 * the command line rather than not booting. 1205 */ 1206 prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); 1207 } 1208 support->radix_mmu = true; 1209 break; 1210 case OV5_FEAT(OV5_MMU_HASH): 1211 prom_debug("MMU - hash only\n"); 1212 support->hash_mmu = true; 1213 break; 1214 default: 1215 prom_debug("Unknown mmu support option: 0x%x\n", val); 1216 break; 1217 } 1218 } 1219 1220 static void __init prom_parse_xive_model(u8 val, 1221 struct platform_support *support) 1222 { 1223 switch (val) { 1224 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */ 1225 prom_debug("XIVE - either mode supported\n"); 1226 support->xive = !prom_xive_disable; 1227 break; 1228 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */ 1229 prom_debug("XIVE - exploitation mode supported\n"); 1230 if (prom_xive_disable) { 1231 /* 1232 * If we __have__ to do XIVE, we're better off ignoring 1233 * the command line rather than not booting. 1234 */ 1235 prom_printf("WARNING: Ignoring cmdline option xive=off\n"); 1236 } 1237 support->xive = true; 1238 break; 1239 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */ 1240 prom_debug("XIVE - legacy mode supported\n"); 1241 break; 1242 default: 1243 prom_debug("Unknown xive support option: 0x%x\n", val); 1244 break; 1245 } 1246 } 1247 1248 static void __init prom_parse_platform_support(u8 index, u8 val, 1249 struct platform_support *support) 1250 { 1251 switch (index) { 1252 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ 1253 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); 1254 break; 1255 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ 1256 if (val & OV5_FEAT(OV5_RADIX_GTSE)) { 1257 prom_debug("Radix - GTSE supported\n"); 1258 support->radix_gtse = true; 1259 } 1260 break; 1261 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ 1262 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), 1263 support); 1264 break; 1265 } 1266 } 1267 1268 static void __init prom_check_platform_support(void) 1269 { 1270 struct platform_support supported = { 1271 .hash_mmu = false, 1272 .radix_mmu = false, 1273 .radix_gtse = false, 1274 .xive = false 1275 }; 1276 int prop_len = prom_getproplen(prom.chosen, 1277 "ibm,arch-vec-5-platform-support"); 1278 1279 /* 1280 * First copy the architecture vec template 1281 * 1282 * use memcpy() instead of *vec = *vec_template so that GCC replaces it 1283 * by __memcpy() when KASAN is active 1284 */ 1285 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template, 1286 sizeof(ibm_architecture_vec)); 1287 1288 if (prop_len > 1) { 1289 int i; 1290 u8 vec[8]; 1291 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", 1292 prop_len); 1293 if (prop_len > sizeof(vec)) 1294 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", 1295 prop_len); 1296 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", 1297 &vec, sizeof(vec)); 1298 for (i = 0; i < sizeof(vec); i += 2) { 1299 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 1300 , vec[i] 1301 , vec[i + 1]); 1302 prom_parse_platform_support(vec[i], vec[i + 1], 1303 &supported); 1304 } 1305 } 1306 1307 if (supported.radix_mmu && supported.radix_gtse && 1308 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { 1309 /* Radix preferred - but we require GTSE for now */ 1310 prom_debug("Asking for radix with GTSE\n"); 1311 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); 1312 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); 1313 } else if (supported.hash_mmu) { 1314 /* Default to hash mmu (if we can) */ 1315 prom_debug("Asking for hash\n"); 1316 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); 1317 } else { 1318 /* We're probably on a legacy hypervisor */ 1319 prom_debug("Assuming legacy hash support\n"); 1320 } 1321 1322 if (supported.xive) { 1323 prom_debug("Asking for XIVE\n"); 1324 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT); 1325 } 1326 } 1327 1328 static void __init prom_send_capabilities(void) 1329 { 1330 ihandle root; 1331 prom_arg_t ret; 1332 u32 cores; 1333 1334 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ 1335 prom_check_platform_support(); 1336 1337 root = call_prom("open", 1, 1, ADDR("/")); 1338 if (root != 0) { 1339 /* We need to tell the FW about the number of cores we support. 1340 * 1341 * To do that, we count the number of threads on the first core 1342 * (we assume this is the same for all cores) and use it to 1343 * divide NR_CPUS. 1344 */ 1345 1346 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 1347 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", 1348 cores, NR_CPUS); 1349 1350 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); 1351 1352 /* try calling the ibm,client-architecture-support method */ 1353 prom_printf("Calling ibm,client-architecture-support..."); 1354 if (call_prom_ret("call-method", 3, 2, &ret, 1355 ADDR("ibm,client-architecture-support"), 1356 root, 1357 ADDR(&ibm_architecture_vec)) == 0) { 1358 /* the call exists... */ 1359 if (ret) 1360 prom_printf("\nWARNING: ibm,client-architecture" 1361 "-support call FAILED!\n"); 1362 call_prom("close", 1, 0, root); 1363 prom_printf(" done\n"); 1364 return; 1365 } 1366 call_prom("close", 1, 0, root); 1367 prom_printf(" not implemented\n"); 1368 } 1369 1370 #ifdef __BIG_ENDIAN__ 1371 { 1372 ihandle elfloader; 1373 1374 /* no ibm,client-architecture-support call, try the old way */ 1375 elfloader = call_prom("open", 1, 1, 1376 ADDR("/packages/elf-loader")); 1377 if (elfloader == 0) { 1378 prom_printf("couldn't open /packages/elf-loader\n"); 1379 return; 1380 } 1381 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 1382 elfloader, ADDR(&fake_elf)); 1383 call_prom("close", 1, 0, elfloader); 1384 } 1385 #endif /* __BIG_ENDIAN__ */ 1386 } 1387 #endif /* CONFIG_PPC_PSERIES */ 1388 1389 /* 1390 * Memory allocation strategy... our layout is normally: 1391 * 1392 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 1393 * rare cases, initrd might end up being before the kernel though. 1394 * We assume this won't override the final kernel at 0, we have no 1395 * provision to handle that in this version, but it should hopefully 1396 * never happen. 1397 * 1398 * alloc_top is set to the top of RMO, eventually shrink down if the 1399 * TCEs overlap 1400 * 1401 * alloc_bottom is set to the top of kernel/initrd 1402 * 1403 * from there, allocations are done this way : rtas is allocated 1404 * topmost, and the device-tree is allocated from the bottom. We try 1405 * to grow the device-tree allocation as we progress. If we can't, 1406 * then we fail, we don't currently have a facility to restart 1407 * elsewhere, but that shouldn't be necessary. 1408 * 1409 * Note that calls to reserve_mem have to be done explicitly, memory 1410 * allocated with either alloc_up or alloc_down isn't automatically 1411 * reserved. 1412 */ 1413 1414 1415 /* 1416 * Allocates memory in the RMO upward from the kernel/initrd 1417 * 1418 * When align is 0, this is a special case, it means to allocate in place 1419 * at the current location of alloc_bottom or fail (that is basically 1420 * extending the previous allocation). Used for the device-tree flattening 1421 */ 1422 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 1423 { 1424 unsigned long base = alloc_bottom; 1425 unsigned long addr = 0; 1426 1427 if (align) 1428 base = _ALIGN_UP(base, align); 1429 prom_debug("%s(%lx, %lx)\n", __func__, size, align); 1430 if (ram_top == 0) 1431 prom_panic("alloc_up() called with mem not initialized\n"); 1432 1433 if (align) 1434 base = _ALIGN_UP(alloc_bottom, align); 1435 else 1436 base = alloc_bottom; 1437 1438 for(; (base + size) <= alloc_top; 1439 base = _ALIGN_UP(base + 0x100000, align)) { 1440 prom_debug(" trying: 0x%lx\n\r", base); 1441 addr = (unsigned long)prom_claim(base, size, 0); 1442 if (addr != PROM_ERROR && addr != 0) 1443 break; 1444 addr = 0; 1445 if (align == 0) 1446 break; 1447 } 1448 if (addr == 0) 1449 return 0; 1450 alloc_bottom = addr + size; 1451 1452 prom_debug(" -> %lx\n", addr); 1453 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1454 prom_debug(" alloc_top : %lx\n", alloc_top); 1455 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1456 prom_debug(" rmo_top : %lx\n", rmo_top); 1457 prom_debug(" ram_top : %lx\n", ram_top); 1458 1459 return addr; 1460 } 1461 1462 /* 1463 * Allocates memory downward, either from top of RMO, or if highmem 1464 * is set, from the top of RAM. Note that this one doesn't handle 1465 * failures. It does claim memory if highmem is not set. 1466 */ 1467 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1468 int highmem) 1469 { 1470 unsigned long base, addr = 0; 1471 1472 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, 1473 highmem ? "(high)" : "(low)"); 1474 if (ram_top == 0) 1475 prom_panic("alloc_down() called with mem not initialized\n"); 1476 1477 if (highmem) { 1478 /* Carve out storage for the TCE table. */ 1479 addr = _ALIGN_DOWN(alloc_top_high - size, align); 1480 if (addr <= alloc_bottom) 1481 return 0; 1482 /* Will we bump into the RMO ? If yes, check out that we 1483 * didn't overlap existing allocations there, if we did, 1484 * we are dead, we must be the first in town ! 1485 */ 1486 if (addr < rmo_top) { 1487 /* Good, we are first */ 1488 if (alloc_top == rmo_top) 1489 alloc_top = rmo_top = addr; 1490 else 1491 return 0; 1492 } 1493 alloc_top_high = addr; 1494 goto bail; 1495 } 1496 1497 base = _ALIGN_DOWN(alloc_top - size, align); 1498 for (; base > alloc_bottom; 1499 base = _ALIGN_DOWN(base - 0x100000, align)) { 1500 prom_debug(" trying: 0x%lx\n\r", base); 1501 addr = (unsigned long)prom_claim(base, size, 0); 1502 if (addr != PROM_ERROR && addr != 0) 1503 break; 1504 addr = 0; 1505 } 1506 if (addr == 0) 1507 return 0; 1508 alloc_top = addr; 1509 1510 bail: 1511 prom_debug(" -> %lx\n", addr); 1512 prom_debug(" alloc_bottom : %lx\n", alloc_bottom); 1513 prom_debug(" alloc_top : %lx\n", alloc_top); 1514 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); 1515 prom_debug(" rmo_top : %lx\n", rmo_top); 1516 prom_debug(" ram_top : %lx\n", ram_top); 1517 1518 return addr; 1519 } 1520 1521 /* 1522 * Parse a "reg" cell 1523 */ 1524 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1525 { 1526 cell_t *p = *cellp; 1527 unsigned long r = 0; 1528 1529 /* Ignore more than 2 cells */ 1530 while (s > sizeof(unsigned long) / 4) { 1531 p++; 1532 s--; 1533 } 1534 r = be32_to_cpu(*p++); 1535 #ifdef CONFIG_PPC64 1536 if (s > 1) { 1537 r <<= 32; 1538 r |= be32_to_cpu(*(p++)); 1539 } 1540 #endif 1541 *cellp = p; 1542 return r; 1543 } 1544 1545 /* 1546 * Very dumb function for adding to the memory reserve list, but 1547 * we don't need anything smarter at this point 1548 * 1549 * XXX Eventually check for collisions. They should NEVER happen. 1550 * If problems seem to show up, it would be a good start to track 1551 * them down. 1552 */ 1553 static void __init reserve_mem(u64 base, u64 size) 1554 { 1555 u64 top = base + size; 1556 unsigned long cnt = mem_reserve_cnt; 1557 1558 if (size == 0) 1559 return; 1560 1561 /* We need to always keep one empty entry so that we 1562 * have our terminator with "size" set to 0 since we are 1563 * dumb and just copy this entire array to the boot params 1564 */ 1565 base = _ALIGN_DOWN(base, PAGE_SIZE); 1566 top = _ALIGN_UP(top, PAGE_SIZE); 1567 size = top - base; 1568 1569 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1570 prom_panic("Memory reserve map exhausted !\n"); 1571 mem_reserve_map[cnt].base = cpu_to_be64(base); 1572 mem_reserve_map[cnt].size = cpu_to_be64(size); 1573 mem_reserve_cnt = cnt + 1; 1574 } 1575 1576 /* 1577 * Initialize memory allocation mechanism, parse "memory" nodes and 1578 * obtain that way the top of memory and RMO to setup out local allocator 1579 */ 1580 static void __init prom_init_mem(void) 1581 { 1582 phandle node; 1583 char type[64]; 1584 unsigned int plen; 1585 cell_t *p, *endp; 1586 __be32 val; 1587 u32 rac, rsc; 1588 1589 /* 1590 * We iterate the memory nodes to find 1591 * 1) top of RMO (first node) 1592 * 2) top of memory 1593 */ 1594 val = cpu_to_be32(2); 1595 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1596 rac = be32_to_cpu(val); 1597 val = cpu_to_be32(1); 1598 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1599 rsc = be32_to_cpu(val); 1600 prom_debug("root_addr_cells: %x\n", rac); 1601 prom_debug("root_size_cells: %x\n", rsc); 1602 1603 prom_debug("scanning memory:\n"); 1604 1605 for (node = 0; prom_next_node(&node); ) { 1606 type[0] = 0; 1607 prom_getprop(node, "device_type", type, sizeof(type)); 1608 1609 if (type[0] == 0) { 1610 /* 1611 * CHRP Longtrail machines have no device_type 1612 * on the memory node, so check the name instead... 1613 */ 1614 prom_getprop(node, "name", type, sizeof(type)); 1615 } 1616 if (prom_strcmp(type, "memory")) 1617 continue; 1618 1619 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1620 if (plen > sizeof(regbuf)) { 1621 prom_printf("memory node too large for buffer !\n"); 1622 plen = sizeof(regbuf); 1623 } 1624 p = regbuf; 1625 endp = p + (plen / sizeof(cell_t)); 1626 1627 #ifdef DEBUG_PROM 1628 memset(prom_scratch, 0, sizeof(prom_scratch)); 1629 call_prom("package-to-path", 3, 1, node, prom_scratch, 1630 sizeof(prom_scratch) - 1); 1631 prom_debug(" node %s :\n", prom_scratch); 1632 #endif /* DEBUG_PROM */ 1633 1634 while ((endp - p) >= (rac + rsc)) { 1635 unsigned long base, size; 1636 1637 base = prom_next_cell(rac, &p); 1638 size = prom_next_cell(rsc, &p); 1639 1640 if (size == 0) 1641 continue; 1642 prom_debug(" %lx %lx\n", base, size); 1643 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1644 rmo_top = size; 1645 if ((base + size) > ram_top) 1646 ram_top = base + size; 1647 } 1648 } 1649 1650 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1651 1652 /* 1653 * If prom_memory_limit is set we reduce the upper limits *except* for 1654 * alloc_top_high. This must be the real top of RAM so we can put 1655 * TCE's up there. 1656 */ 1657 1658 alloc_top_high = ram_top; 1659 1660 if (prom_memory_limit) { 1661 if (prom_memory_limit <= alloc_bottom) { 1662 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", 1663 prom_memory_limit); 1664 prom_memory_limit = 0; 1665 } else if (prom_memory_limit >= ram_top) { 1666 prom_printf("Ignoring mem=%lx >= ram_top.\n", 1667 prom_memory_limit); 1668 prom_memory_limit = 0; 1669 } else { 1670 ram_top = prom_memory_limit; 1671 rmo_top = min(rmo_top, prom_memory_limit); 1672 } 1673 } 1674 1675 /* 1676 * Setup our top alloc point, that is top of RMO or top of 1677 * segment 0 when running non-LPAR. 1678 * Some RS64 machines have buggy firmware where claims up at 1679 * 1GB fail. Cap at 768MB as a workaround. 1680 * Since 768MB is plenty of room, and we need to cap to something 1681 * reasonable on 32-bit, cap at 768MB on all machines. 1682 */ 1683 if (!rmo_top) 1684 rmo_top = ram_top; 1685 rmo_top = min(0x30000000ul, rmo_top); 1686 alloc_top = rmo_top; 1687 alloc_top_high = ram_top; 1688 1689 /* 1690 * Check if we have an initrd after the kernel but still inside 1691 * the RMO. If we do move our bottom point to after it. 1692 */ 1693 if (prom_initrd_start && 1694 prom_initrd_start < rmo_top && 1695 prom_initrd_end > alloc_bottom) 1696 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1697 1698 prom_printf("memory layout at init:\n"); 1699 prom_printf(" memory_limit : %lx (16 MB aligned)\n", 1700 prom_memory_limit); 1701 prom_printf(" alloc_bottom : %lx\n", alloc_bottom); 1702 prom_printf(" alloc_top : %lx\n", alloc_top); 1703 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); 1704 prom_printf(" rmo_top : %lx\n", rmo_top); 1705 prom_printf(" ram_top : %lx\n", ram_top); 1706 } 1707 1708 static void __init prom_close_stdin(void) 1709 { 1710 __be32 val; 1711 ihandle stdin; 1712 1713 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1714 stdin = be32_to_cpu(val); 1715 call_prom("close", 1, 0, stdin); 1716 } 1717 } 1718 1719 /* 1720 * Allocate room for and instantiate RTAS 1721 */ 1722 static void __init prom_instantiate_rtas(void) 1723 { 1724 phandle rtas_node; 1725 ihandle rtas_inst; 1726 u32 base, entry = 0; 1727 __be32 val; 1728 u32 size = 0; 1729 1730 prom_debug("prom_instantiate_rtas: start...\n"); 1731 1732 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1733 prom_debug("rtas_node: %x\n", rtas_node); 1734 if (!PHANDLE_VALID(rtas_node)) 1735 return; 1736 1737 val = 0; 1738 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1739 size = be32_to_cpu(val); 1740 if (size == 0) 1741 return; 1742 1743 base = alloc_down(size, PAGE_SIZE, 0); 1744 if (base == 0) 1745 prom_panic("Could not allocate memory for RTAS\n"); 1746 1747 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1748 if (!IHANDLE_VALID(rtas_inst)) { 1749 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1750 return; 1751 } 1752 1753 prom_printf("instantiating rtas at 0x%x...", base); 1754 1755 if (call_prom_ret("call-method", 3, 2, &entry, 1756 ADDR("instantiate-rtas"), 1757 rtas_inst, base) != 0 1758 || entry == 0) { 1759 prom_printf(" failed\n"); 1760 return; 1761 } 1762 prom_printf(" done\n"); 1763 1764 reserve_mem(base, size); 1765 1766 val = cpu_to_be32(base); 1767 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1768 &val, sizeof(val)); 1769 val = cpu_to_be32(entry); 1770 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1771 &val, sizeof(val)); 1772 1773 /* Check if it supports "query-cpu-stopped-state" */ 1774 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1775 &val, sizeof(val)) != PROM_ERROR) 1776 rtas_has_query_cpu_stopped = true; 1777 1778 prom_debug("rtas base = 0x%x\n", base); 1779 prom_debug("rtas entry = 0x%x\n", entry); 1780 prom_debug("rtas size = 0x%x\n", size); 1781 1782 prom_debug("prom_instantiate_rtas: end...\n"); 1783 } 1784 1785 #ifdef CONFIG_PPC64 1786 /* 1787 * Allocate room for and instantiate Stored Measurement Log (SML) 1788 */ 1789 static void __init prom_instantiate_sml(void) 1790 { 1791 phandle ibmvtpm_node; 1792 ihandle ibmvtpm_inst; 1793 u32 entry = 0, size = 0, succ = 0; 1794 u64 base; 1795 __be32 val; 1796 1797 prom_debug("prom_instantiate_sml: start...\n"); 1798 1799 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1800 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1801 if (!PHANDLE_VALID(ibmvtpm_node)) 1802 return; 1803 1804 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1805 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1806 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1807 return; 1808 } 1809 1810 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1811 &val, sizeof(val)) != PROM_ERROR) { 1812 if (call_prom_ret("call-method", 2, 2, &succ, 1813 ADDR("reformat-sml-to-efi-alignment"), 1814 ibmvtpm_inst) != 0 || succ == 0) { 1815 prom_printf("Reformat SML to EFI alignment failed\n"); 1816 return; 1817 } 1818 1819 if (call_prom_ret("call-method", 2, 2, &size, 1820 ADDR("sml-get-allocated-size"), 1821 ibmvtpm_inst) != 0 || size == 0) { 1822 prom_printf("SML get allocated size failed\n"); 1823 return; 1824 } 1825 } else { 1826 if (call_prom_ret("call-method", 2, 2, &size, 1827 ADDR("sml-get-handover-size"), 1828 ibmvtpm_inst) != 0 || size == 0) { 1829 prom_printf("SML get handover size failed\n"); 1830 return; 1831 } 1832 } 1833 1834 base = alloc_down(size, PAGE_SIZE, 0); 1835 if (base == 0) 1836 prom_panic("Could not allocate memory for sml\n"); 1837 1838 prom_printf("instantiating sml at 0x%llx...", base); 1839 1840 memset((void *)base, 0, size); 1841 1842 if (call_prom_ret("call-method", 4, 2, &entry, 1843 ADDR("sml-handover"), 1844 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1845 prom_printf("SML handover failed\n"); 1846 return; 1847 } 1848 prom_printf(" done\n"); 1849 1850 reserve_mem(base, size); 1851 1852 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1853 &base, sizeof(base)); 1854 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1855 &size, sizeof(size)); 1856 1857 prom_debug("sml base = 0x%llx\n", base); 1858 prom_debug("sml size = 0x%x\n", size); 1859 1860 prom_debug("prom_instantiate_sml: end...\n"); 1861 } 1862 1863 /* 1864 * Allocate room for and initialize TCE tables 1865 */ 1866 #ifdef __BIG_ENDIAN__ 1867 static void __init prom_initialize_tce_table(void) 1868 { 1869 phandle node; 1870 ihandle phb_node; 1871 char compatible[64], type[64], model[64]; 1872 char *path = prom_scratch; 1873 u64 base, align; 1874 u32 minalign, minsize; 1875 u64 tce_entry, *tce_entryp; 1876 u64 local_alloc_top, local_alloc_bottom; 1877 u64 i; 1878 1879 if (prom_iommu_off) 1880 return; 1881 1882 prom_debug("starting prom_initialize_tce_table\n"); 1883 1884 /* Cache current top of allocs so we reserve a single block */ 1885 local_alloc_top = alloc_top_high; 1886 local_alloc_bottom = local_alloc_top; 1887 1888 /* Search all nodes looking for PHBs. */ 1889 for (node = 0; prom_next_node(&node); ) { 1890 compatible[0] = 0; 1891 type[0] = 0; 1892 model[0] = 0; 1893 prom_getprop(node, "compatible", 1894 compatible, sizeof(compatible)); 1895 prom_getprop(node, "device_type", type, sizeof(type)); 1896 prom_getprop(node, "model", model, sizeof(model)); 1897 1898 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL)) 1899 continue; 1900 1901 /* Keep the old logic intact to avoid regression. */ 1902 if (compatible[0] != 0) { 1903 if ((prom_strstr(compatible, "python") == NULL) && 1904 (prom_strstr(compatible, "Speedwagon") == NULL) && 1905 (prom_strstr(compatible, "Winnipeg") == NULL)) 1906 continue; 1907 } else if (model[0] != 0) { 1908 if ((prom_strstr(model, "ython") == NULL) && 1909 (prom_strstr(model, "peedwagon") == NULL) && 1910 (prom_strstr(model, "innipeg") == NULL)) 1911 continue; 1912 } 1913 1914 if (prom_getprop(node, "tce-table-minalign", &minalign, 1915 sizeof(minalign)) == PROM_ERROR) 1916 minalign = 0; 1917 if (prom_getprop(node, "tce-table-minsize", &minsize, 1918 sizeof(minsize)) == PROM_ERROR) 1919 minsize = 4UL << 20; 1920 1921 /* 1922 * Even though we read what OF wants, we just set the table 1923 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1924 * By doing this, we avoid the pitfalls of trying to DMA to 1925 * MMIO space and the DMA alias hole. 1926 */ 1927 minsize = 4UL << 20; 1928 1929 /* Align to the greater of the align or size */ 1930 align = max(minalign, minsize); 1931 base = alloc_down(minsize, align, 1); 1932 if (base == 0) 1933 prom_panic("ERROR, cannot find space for TCE table.\n"); 1934 if (base < local_alloc_bottom) 1935 local_alloc_bottom = base; 1936 1937 /* It seems OF doesn't null-terminate the path :-( */ 1938 memset(path, 0, sizeof(prom_scratch)); 1939 /* Call OF to setup the TCE hardware */ 1940 if (call_prom("package-to-path", 3, 1, node, 1941 path, sizeof(prom_scratch) - 1) == PROM_ERROR) { 1942 prom_printf("package-to-path failed\n"); 1943 } 1944 1945 /* Save away the TCE table attributes for later use. */ 1946 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 1947 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 1948 1949 prom_debug("TCE table: %s\n", path); 1950 prom_debug("\tnode = 0x%x\n", node); 1951 prom_debug("\tbase = 0x%llx\n", base); 1952 prom_debug("\tsize = 0x%x\n", minsize); 1953 1954 /* Initialize the table to have a one-to-one mapping 1955 * over the allocated size. 1956 */ 1957 tce_entryp = (u64 *)base; 1958 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1959 tce_entry = (i << PAGE_SHIFT); 1960 tce_entry |= 0x3; 1961 *tce_entryp = tce_entry; 1962 } 1963 1964 prom_printf("opening PHB %s", path); 1965 phb_node = call_prom("open", 1, 1, path); 1966 if (phb_node == 0) 1967 prom_printf("... failed\n"); 1968 else 1969 prom_printf("... done\n"); 1970 1971 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 1972 phb_node, -1, minsize, 1973 (u32) base, (u32) (base >> 32)); 1974 call_prom("close", 1, 0, phb_node); 1975 } 1976 1977 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 1978 1979 /* These are only really needed if there is a memory limit in 1980 * effect, but we don't know so export them always. */ 1981 prom_tce_alloc_start = local_alloc_bottom; 1982 prom_tce_alloc_end = local_alloc_top; 1983 1984 /* Flag the first invalid entry */ 1985 prom_debug("ending prom_initialize_tce_table\n"); 1986 } 1987 #endif /* __BIG_ENDIAN__ */ 1988 #endif /* CONFIG_PPC64 */ 1989 1990 /* 1991 * With CHRP SMP we need to use the OF to start the other processors. 1992 * We can't wait until smp_boot_cpus (the OF is trashed by then) 1993 * so we have to put the processors into a holding pattern controlled 1994 * by the kernel (not OF) before we destroy the OF. 1995 * 1996 * This uses a chunk of low memory, puts some holding pattern 1997 * code there and sends the other processors off to there until 1998 * smp_boot_cpus tells them to do something. The holding pattern 1999 * checks that address until its cpu # is there, when it is that 2000 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 2001 * of setting those values. 2002 * 2003 * We also use physical address 0x4 here to tell when a cpu 2004 * is in its holding pattern code. 2005 * 2006 * -- Cort 2007 */ 2008 /* 2009 * We want to reference the copy of __secondary_hold_* in the 2010 * 0 - 0x100 address range 2011 */ 2012 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 2013 2014 static void __init prom_hold_cpus(void) 2015 { 2016 unsigned long i; 2017 phandle node; 2018 char type[64]; 2019 unsigned long *spinloop 2020 = (void *) LOW_ADDR(__secondary_hold_spinloop); 2021 unsigned long *acknowledge 2022 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 2023 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 2024 2025 /* 2026 * On pseries, if RTAS supports "query-cpu-stopped-state", 2027 * we skip this stage, the CPUs will be started by the 2028 * kernel using RTAS. 2029 */ 2030 if ((of_platform == PLATFORM_PSERIES || 2031 of_platform == PLATFORM_PSERIES_LPAR) && 2032 rtas_has_query_cpu_stopped) { 2033 prom_printf("prom_hold_cpus: skipped\n"); 2034 return; 2035 } 2036 2037 prom_debug("prom_hold_cpus: start...\n"); 2038 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); 2039 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); 2040 prom_debug(" 1) acknowledge = 0x%lx\n", 2041 (unsigned long)acknowledge); 2042 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); 2043 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); 2044 2045 /* Set the common spinloop variable, so all of the secondary cpus 2046 * will block when they are awakened from their OF spinloop. 2047 * This must occur for both SMP and non SMP kernels, since OF will 2048 * be trashed when we move the kernel. 2049 */ 2050 *spinloop = 0; 2051 2052 /* look for cpus */ 2053 for (node = 0; prom_next_node(&node); ) { 2054 unsigned int cpu_no; 2055 __be32 reg; 2056 2057 type[0] = 0; 2058 prom_getprop(node, "device_type", type, sizeof(type)); 2059 if (prom_strcmp(type, "cpu") != 0) 2060 continue; 2061 2062 /* Skip non-configured cpus. */ 2063 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 2064 if (prom_strcmp(type, "okay") != 0) 2065 continue; 2066 2067 reg = cpu_to_be32(-1); /* make sparse happy */ 2068 prom_getprop(node, "reg", ®, sizeof(reg)); 2069 cpu_no = be32_to_cpu(reg); 2070 2071 prom_debug("cpu hw idx = %u\n", cpu_no); 2072 2073 /* Init the acknowledge var which will be reset by 2074 * the secondary cpu when it awakens from its OF 2075 * spinloop. 2076 */ 2077 *acknowledge = (unsigned long)-1; 2078 2079 if (cpu_no != prom.cpu) { 2080 /* Primary Thread of non-boot cpu or any thread */ 2081 prom_printf("starting cpu hw idx %u... ", cpu_no); 2082 call_prom("start-cpu", 3, 0, node, 2083 secondary_hold, cpu_no); 2084 2085 for (i = 0; (i < 100000000) && 2086 (*acknowledge == ((unsigned long)-1)); i++ ) 2087 mb(); 2088 2089 if (*acknowledge == cpu_no) 2090 prom_printf("done\n"); 2091 else 2092 prom_printf("failed: %lx\n", *acknowledge); 2093 } 2094 #ifdef CONFIG_SMP 2095 else 2096 prom_printf("boot cpu hw idx %u\n", cpu_no); 2097 #endif /* CONFIG_SMP */ 2098 } 2099 2100 prom_debug("prom_hold_cpus: end...\n"); 2101 } 2102 2103 2104 static void __init prom_init_client_services(unsigned long pp) 2105 { 2106 /* Get a handle to the prom entry point before anything else */ 2107 prom_entry = pp; 2108 2109 /* get a handle for the stdout device */ 2110 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 2111 if (!PHANDLE_VALID(prom.chosen)) 2112 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 2113 2114 /* get device tree root */ 2115 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 2116 if (!PHANDLE_VALID(prom.root)) 2117 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 2118 2119 prom.mmumap = 0; 2120 } 2121 2122 #ifdef CONFIG_PPC32 2123 /* 2124 * For really old powermacs, we need to map things we claim. 2125 * For that, we need the ihandle of the mmu. 2126 * Also, on the longtrail, we need to work around other bugs. 2127 */ 2128 static void __init prom_find_mmu(void) 2129 { 2130 phandle oprom; 2131 char version[64]; 2132 2133 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 2134 if (!PHANDLE_VALID(oprom)) 2135 return; 2136 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 2137 return; 2138 version[sizeof(version) - 1] = 0; 2139 /* XXX might need to add other versions here */ 2140 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0) 2141 of_workarounds = OF_WA_CLAIM; 2142 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) { 2143 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 2144 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 2145 } else 2146 return; 2147 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 2148 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 2149 sizeof(prom.mmumap)); 2150 prom.mmumap = be32_to_cpu(prom.mmumap); 2151 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 2152 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 2153 } 2154 #else 2155 #define prom_find_mmu() 2156 #endif 2157 2158 static void __init prom_init_stdout(void) 2159 { 2160 char *path = of_stdout_device; 2161 char type[16]; 2162 phandle stdout_node; 2163 __be32 val; 2164 2165 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 2166 prom_panic("cannot find stdout"); 2167 2168 prom.stdout = be32_to_cpu(val); 2169 2170 /* Get the full OF pathname of the stdout device */ 2171 memset(path, 0, 256); 2172 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 2173 prom_printf("OF stdout device is: %s\n", of_stdout_device); 2174 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 2175 path, prom_strlen(path) + 1); 2176 2177 /* instance-to-package fails on PA-Semi */ 2178 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 2179 if (stdout_node != PROM_ERROR) { 2180 val = cpu_to_be32(stdout_node); 2181 2182 /* If it's a display, note it */ 2183 memset(type, 0, sizeof(type)); 2184 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 2185 if (prom_strcmp(type, "display") == 0) 2186 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 2187 } 2188 } 2189 2190 static int __init prom_find_machine_type(void) 2191 { 2192 char compat[256]; 2193 int len, i = 0; 2194 #ifdef CONFIG_PPC64 2195 phandle rtas; 2196 int x; 2197 #endif 2198 2199 /* Look for a PowerMac or a Cell */ 2200 len = prom_getprop(prom.root, "compatible", 2201 compat, sizeof(compat)-1); 2202 if (len > 0) { 2203 compat[len] = 0; 2204 while (i < len) { 2205 char *p = &compat[i]; 2206 int sl = prom_strlen(p); 2207 if (sl == 0) 2208 break; 2209 if (prom_strstr(p, "Power Macintosh") || 2210 prom_strstr(p, "MacRISC")) 2211 return PLATFORM_POWERMAC; 2212 #ifdef CONFIG_PPC64 2213 /* We must make sure we don't detect the IBM Cell 2214 * blades as pSeries due to some firmware issues, 2215 * so we do it here. 2216 */ 2217 if (prom_strstr(p, "IBM,CBEA") || 2218 prom_strstr(p, "IBM,CPBW-1.0")) 2219 return PLATFORM_GENERIC; 2220 #endif /* CONFIG_PPC64 */ 2221 i += sl + 1; 2222 } 2223 } 2224 #ifdef CONFIG_PPC64 2225 /* Try to figure out if it's an IBM pSeries or any other 2226 * PAPR compliant platform. We assume it is if : 2227 * - /device_type is "chrp" (please, do NOT use that for future 2228 * non-IBM designs ! 2229 * - it has /rtas 2230 */ 2231 len = prom_getprop(prom.root, "device_type", 2232 compat, sizeof(compat)-1); 2233 if (len <= 0) 2234 return PLATFORM_GENERIC; 2235 if (prom_strcmp(compat, "chrp")) 2236 return PLATFORM_GENERIC; 2237 2238 /* Default to pSeries. We need to know if we are running LPAR */ 2239 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 2240 if (!PHANDLE_VALID(rtas)) 2241 return PLATFORM_GENERIC; 2242 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 2243 if (x != PROM_ERROR) { 2244 prom_debug("Hypertas detected, assuming LPAR !\n"); 2245 return PLATFORM_PSERIES_LPAR; 2246 } 2247 return PLATFORM_PSERIES; 2248 #else 2249 return PLATFORM_GENERIC; 2250 #endif 2251 } 2252 2253 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 2254 { 2255 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 2256 } 2257 2258 /* 2259 * If we have a display that we don't know how to drive, 2260 * we will want to try to execute OF's open method for it 2261 * later. However, OF will probably fall over if we do that 2262 * we've taken over the MMU. 2263 * So we check whether we will need to open the display, 2264 * and if so, open it now. 2265 */ 2266 static void __init prom_check_displays(void) 2267 { 2268 char type[16], *path; 2269 phandle node; 2270 ihandle ih; 2271 int i; 2272 2273 static const unsigned char default_colors[] __initconst = { 2274 0x00, 0x00, 0x00, 2275 0x00, 0x00, 0xaa, 2276 0x00, 0xaa, 0x00, 2277 0x00, 0xaa, 0xaa, 2278 0xaa, 0x00, 0x00, 2279 0xaa, 0x00, 0xaa, 2280 0xaa, 0xaa, 0x00, 2281 0xaa, 0xaa, 0xaa, 2282 0x55, 0x55, 0x55, 2283 0x55, 0x55, 0xff, 2284 0x55, 0xff, 0x55, 2285 0x55, 0xff, 0xff, 2286 0xff, 0x55, 0x55, 2287 0xff, 0x55, 0xff, 2288 0xff, 0xff, 0x55, 2289 0xff, 0xff, 0xff 2290 }; 2291 const unsigned char *clut; 2292 2293 prom_debug("Looking for displays\n"); 2294 for (node = 0; prom_next_node(&node); ) { 2295 memset(type, 0, sizeof(type)); 2296 prom_getprop(node, "device_type", type, sizeof(type)); 2297 if (prom_strcmp(type, "display") != 0) 2298 continue; 2299 2300 /* It seems OF doesn't null-terminate the path :-( */ 2301 path = prom_scratch; 2302 memset(path, 0, sizeof(prom_scratch)); 2303 2304 /* 2305 * leave some room at the end of the path for appending extra 2306 * arguments 2307 */ 2308 if (call_prom("package-to-path", 3, 1, node, path, 2309 sizeof(prom_scratch) - 10) == PROM_ERROR) 2310 continue; 2311 prom_printf("found display : %s, opening... ", path); 2312 2313 ih = call_prom("open", 1, 1, path); 2314 if (ih == 0) { 2315 prom_printf("failed\n"); 2316 continue; 2317 } 2318 2319 /* Success */ 2320 prom_printf("done\n"); 2321 prom_setprop(node, path, "linux,opened", NULL, 0); 2322 2323 /* Setup a usable color table when the appropriate 2324 * method is available. Should update this to set-colors */ 2325 clut = default_colors; 2326 for (i = 0; i < 16; i++, clut += 3) 2327 if (prom_set_color(ih, i, clut[0], clut[1], 2328 clut[2]) != 0) 2329 break; 2330 2331 #ifdef CONFIG_LOGO_LINUX_CLUT224 2332 clut = PTRRELOC(logo_linux_clut224.clut); 2333 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 2334 if (prom_set_color(ih, i + 32, clut[0], clut[1], 2335 clut[2]) != 0) 2336 break; 2337 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2338 2339 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 2340 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 2341 PROM_ERROR) { 2342 u32 width, height, pitch, addr; 2343 2344 prom_printf("Setting btext !\n"); 2345 prom_getprop(node, "width", &width, 4); 2346 prom_getprop(node, "height", &height, 4); 2347 prom_getprop(node, "linebytes", &pitch, 4); 2348 prom_getprop(node, "address", &addr, 4); 2349 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2350 width, height, pitch, addr); 2351 btext_setup_display(width, height, 8, pitch, addr); 2352 btext_prepare_BAT(); 2353 } 2354 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2355 } 2356 } 2357 2358 2359 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2360 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2361 unsigned long needed, unsigned long align) 2362 { 2363 void *ret; 2364 2365 *mem_start = _ALIGN(*mem_start, align); 2366 while ((*mem_start + needed) > *mem_end) { 2367 unsigned long room, chunk; 2368 2369 prom_debug("Chunk exhausted, claiming more at %lx...\n", 2370 alloc_bottom); 2371 room = alloc_top - alloc_bottom; 2372 if (room > DEVTREE_CHUNK_SIZE) 2373 room = DEVTREE_CHUNK_SIZE; 2374 if (room < PAGE_SIZE) 2375 prom_panic("No memory for flatten_device_tree " 2376 "(no room)\n"); 2377 chunk = alloc_up(room, 0); 2378 if (chunk == 0) 2379 prom_panic("No memory for flatten_device_tree " 2380 "(claim failed)\n"); 2381 *mem_end = chunk + room; 2382 } 2383 2384 ret = (void *)*mem_start; 2385 *mem_start += needed; 2386 2387 return ret; 2388 } 2389 2390 #define dt_push_token(token, mem_start, mem_end) do { \ 2391 void *room = make_room(mem_start, mem_end, 4, 4); \ 2392 *(__be32 *)room = cpu_to_be32(token); \ 2393 } while(0) 2394 2395 static unsigned long __init dt_find_string(char *str) 2396 { 2397 char *s, *os; 2398 2399 s = os = (char *)dt_string_start; 2400 s += 4; 2401 while (s < (char *)dt_string_end) { 2402 if (prom_strcmp(s, str) == 0) 2403 return s - os; 2404 s += prom_strlen(s) + 1; 2405 } 2406 return 0; 2407 } 2408 2409 /* 2410 * The Open Firmware 1275 specification states properties must be 31 bytes or 2411 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2412 */ 2413 #define MAX_PROPERTY_NAME 64 2414 2415 static void __init scan_dt_build_strings(phandle node, 2416 unsigned long *mem_start, 2417 unsigned long *mem_end) 2418 { 2419 char *prev_name, *namep, *sstart; 2420 unsigned long soff; 2421 phandle child; 2422 2423 sstart = (char *)dt_string_start; 2424 2425 /* get and store all property names */ 2426 prev_name = ""; 2427 for (;;) { 2428 /* 64 is max len of name including nul. */ 2429 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2430 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2431 /* No more nodes: unwind alloc */ 2432 *mem_start = (unsigned long)namep; 2433 break; 2434 } 2435 2436 /* skip "name" */ 2437 if (prom_strcmp(namep, "name") == 0) { 2438 *mem_start = (unsigned long)namep; 2439 prev_name = "name"; 2440 continue; 2441 } 2442 /* get/create string entry */ 2443 soff = dt_find_string(namep); 2444 if (soff != 0) { 2445 *mem_start = (unsigned long)namep; 2446 namep = sstart + soff; 2447 } else { 2448 /* Trim off some if we can */ 2449 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2450 dt_string_end = *mem_start; 2451 } 2452 prev_name = namep; 2453 } 2454 2455 /* do all our children */ 2456 child = call_prom("child", 1, 1, node); 2457 while (child != 0) { 2458 scan_dt_build_strings(child, mem_start, mem_end); 2459 child = call_prom("peer", 1, 1, child); 2460 } 2461 } 2462 2463 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2464 unsigned long *mem_end) 2465 { 2466 phandle child; 2467 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2468 unsigned long soff; 2469 unsigned char *valp; 2470 static char pname[MAX_PROPERTY_NAME] __prombss; 2471 int l, room, has_phandle = 0; 2472 2473 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2474 2475 /* get the node's full name */ 2476 namep = (char *)*mem_start; 2477 room = *mem_end - *mem_start; 2478 if (room > 255) 2479 room = 255; 2480 l = call_prom("package-to-path", 3, 1, node, namep, room); 2481 if (l >= 0) { 2482 /* Didn't fit? Get more room. */ 2483 if (l >= room) { 2484 if (l >= *mem_end - *mem_start) 2485 namep = make_room(mem_start, mem_end, l+1, 1); 2486 call_prom("package-to-path", 3, 1, node, namep, l); 2487 } 2488 namep[l] = '\0'; 2489 2490 /* Fixup an Apple bug where they have bogus \0 chars in the 2491 * middle of the path in some properties, and extract 2492 * the unit name (everything after the last '/'). 2493 */ 2494 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2495 if (*p == '/') 2496 lp = namep; 2497 else if (*p != 0) 2498 *lp++ = *p; 2499 } 2500 *lp = 0; 2501 *mem_start = _ALIGN((unsigned long)lp + 1, 4); 2502 } 2503 2504 /* get it again for debugging */ 2505 path = prom_scratch; 2506 memset(path, 0, sizeof(prom_scratch)); 2507 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1); 2508 2509 /* get and store all properties */ 2510 prev_name = ""; 2511 sstart = (char *)dt_string_start; 2512 for (;;) { 2513 if (call_prom("nextprop", 3, 1, node, prev_name, 2514 pname) != 1) 2515 break; 2516 2517 /* skip "name" */ 2518 if (prom_strcmp(pname, "name") == 0) { 2519 prev_name = "name"; 2520 continue; 2521 } 2522 2523 /* find string offset */ 2524 soff = dt_find_string(pname); 2525 if (soff == 0) { 2526 prom_printf("WARNING: Can't find string index for" 2527 " <%s>, node %s\n", pname, path); 2528 break; 2529 } 2530 prev_name = sstart + soff; 2531 2532 /* get length */ 2533 l = call_prom("getproplen", 2, 1, node, pname); 2534 2535 /* sanity checks */ 2536 if (l == PROM_ERROR) 2537 continue; 2538 2539 /* push property head */ 2540 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2541 dt_push_token(l, mem_start, mem_end); 2542 dt_push_token(soff, mem_start, mem_end); 2543 2544 /* push property content */ 2545 valp = make_room(mem_start, mem_end, l, 4); 2546 call_prom("getprop", 4, 1, node, pname, valp, l); 2547 *mem_start = _ALIGN(*mem_start, 4); 2548 2549 if (!prom_strcmp(pname, "phandle")) 2550 has_phandle = 1; 2551 } 2552 2553 /* Add a "phandle" property if none already exist */ 2554 if (!has_phandle) { 2555 soff = dt_find_string("phandle"); 2556 if (soff == 0) 2557 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path); 2558 else { 2559 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2560 dt_push_token(4, mem_start, mem_end); 2561 dt_push_token(soff, mem_start, mem_end); 2562 valp = make_room(mem_start, mem_end, 4, 4); 2563 *(__be32 *)valp = cpu_to_be32(node); 2564 } 2565 } 2566 2567 /* do all our children */ 2568 child = call_prom("child", 1, 1, node); 2569 while (child != 0) { 2570 scan_dt_build_struct(child, mem_start, mem_end); 2571 child = call_prom("peer", 1, 1, child); 2572 } 2573 2574 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2575 } 2576 2577 static void __init flatten_device_tree(void) 2578 { 2579 phandle root; 2580 unsigned long mem_start, mem_end, room; 2581 struct boot_param_header *hdr; 2582 char *namep; 2583 u64 *rsvmap; 2584 2585 /* 2586 * Check how much room we have between alloc top & bottom (+/- a 2587 * few pages), crop to 1MB, as this is our "chunk" size 2588 */ 2589 room = alloc_top - alloc_bottom - 0x4000; 2590 if (room > DEVTREE_CHUNK_SIZE) 2591 room = DEVTREE_CHUNK_SIZE; 2592 prom_debug("starting device tree allocs at %lx\n", alloc_bottom); 2593 2594 /* Now try to claim that */ 2595 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2596 if (mem_start == 0) 2597 prom_panic("Can't allocate initial device-tree chunk\n"); 2598 mem_end = mem_start + room; 2599 2600 /* Get root of tree */ 2601 root = call_prom("peer", 1, 1, (phandle)0); 2602 if (root == (phandle)0) 2603 prom_panic ("couldn't get device tree root\n"); 2604 2605 /* Build header and make room for mem rsv map */ 2606 mem_start = _ALIGN(mem_start, 4); 2607 hdr = make_room(&mem_start, &mem_end, 2608 sizeof(struct boot_param_header), 4); 2609 dt_header_start = (unsigned long)hdr; 2610 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2611 2612 /* Start of strings */ 2613 mem_start = PAGE_ALIGN(mem_start); 2614 dt_string_start = mem_start; 2615 mem_start += 4; /* hole */ 2616 2617 /* Add "phandle" in there, we'll need it */ 2618 namep = make_room(&mem_start, &mem_end, 16, 1); 2619 prom_strcpy(namep, "phandle"); 2620 mem_start = (unsigned long)namep + prom_strlen(namep) + 1; 2621 2622 /* Build string array */ 2623 prom_printf("Building dt strings...\n"); 2624 scan_dt_build_strings(root, &mem_start, &mem_end); 2625 dt_string_end = mem_start; 2626 2627 /* Build structure */ 2628 mem_start = PAGE_ALIGN(mem_start); 2629 dt_struct_start = mem_start; 2630 prom_printf("Building dt structure...\n"); 2631 scan_dt_build_struct(root, &mem_start, &mem_end); 2632 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2633 dt_struct_end = PAGE_ALIGN(mem_start); 2634 2635 /* Finish header */ 2636 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2637 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2638 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2639 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2640 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2641 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2642 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2643 hdr->version = cpu_to_be32(OF_DT_VERSION); 2644 /* Version 16 is not backward compatible */ 2645 hdr->last_comp_version = cpu_to_be32(0x10); 2646 2647 /* Copy the reserve map in */ 2648 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2649 2650 #ifdef DEBUG_PROM 2651 { 2652 int i; 2653 prom_printf("reserved memory map:\n"); 2654 for (i = 0; i < mem_reserve_cnt; i++) 2655 prom_printf(" %llx - %llx\n", 2656 be64_to_cpu(mem_reserve_map[i].base), 2657 be64_to_cpu(mem_reserve_map[i].size)); 2658 } 2659 #endif 2660 /* Bump mem_reserve_cnt to cause further reservations to fail 2661 * since it's too late. 2662 */ 2663 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2664 2665 prom_printf("Device tree strings 0x%lx -> 0x%lx\n", 2666 dt_string_start, dt_string_end); 2667 prom_printf("Device tree struct 0x%lx -> 0x%lx\n", 2668 dt_struct_start, dt_struct_end); 2669 } 2670 2671 #ifdef CONFIG_PPC_MAPLE 2672 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2673 * The values are bad, and it doesn't even have the right number of cells. */ 2674 static void __init fixup_device_tree_maple(void) 2675 { 2676 phandle isa; 2677 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2678 u32 isa_ranges[6]; 2679 char *name; 2680 2681 name = "/ht@0/isa@4"; 2682 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2683 if (!PHANDLE_VALID(isa)) { 2684 name = "/ht@0/isa@6"; 2685 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2686 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2687 } 2688 if (!PHANDLE_VALID(isa)) 2689 return; 2690 2691 if (prom_getproplen(isa, "ranges") != 12) 2692 return; 2693 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2694 == PROM_ERROR) 2695 return; 2696 2697 if (isa_ranges[0] != 0x1 || 2698 isa_ranges[1] != 0xf4000000 || 2699 isa_ranges[2] != 0x00010000) 2700 return; 2701 2702 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2703 2704 isa_ranges[0] = 0x1; 2705 isa_ranges[1] = 0x0; 2706 isa_ranges[2] = rloc; 2707 isa_ranges[3] = 0x0; 2708 isa_ranges[4] = 0x0; 2709 isa_ranges[5] = 0x00010000; 2710 prom_setprop(isa, name, "ranges", 2711 isa_ranges, sizeof(isa_ranges)); 2712 } 2713 2714 #define CPC925_MC_START 0xf8000000 2715 #define CPC925_MC_LENGTH 0x1000000 2716 /* The values for memory-controller don't have right number of cells */ 2717 static void __init fixup_device_tree_maple_memory_controller(void) 2718 { 2719 phandle mc; 2720 u32 mc_reg[4]; 2721 char *name = "/hostbridge@f8000000"; 2722 u32 ac, sc; 2723 2724 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2725 if (!PHANDLE_VALID(mc)) 2726 return; 2727 2728 if (prom_getproplen(mc, "reg") != 8) 2729 return; 2730 2731 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2732 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2733 if ((ac != 2) || (sc != 2)) 2734 return; 2735 2736 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2737 return; 2738 2739 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2740 return; 2741 2742 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2743 2744 mc_reg[0] = 0x0; 2745 mc_reg[1] = CPC925_MC_START; 2746 mc_reg[2] = 0x0; 2747 mc_reg[3] = CPC925_MC_LENGTH; 2748 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2749 } 2750 #else 2751 #define fixup_device_tree_maple() 2752 #define fixup_device_tree_maple_memory_controller() 2753 #endif 2754 2755 #ifdef CONFIG_PPC_CHRP 2756 /* 2757 * Pegasos and BriQ lacks the "ranges" property in the isa node 2758 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2759 * Pegasos has the IDE configured in legacy mode, but advertised as native 2760 */ 2761 static void __init fixup_device_tree_chrp(void) 2762 { 2763 phandle ph; 2764 u32 prop[6]; 2765 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2766 char *name; 2767 int rc; 2768 2769 name = "/pci@80000000/isa@c"; 2770 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2771 if (!PHANDLE_VALID(ph)) { 2772 name = "/pci@ff500000/isa@6"; 2773 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2774 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2775 } 2776 if (PHANDLE_VALID(ph)) { 2777 rc = prom_getproplen(ph, "ranges"); 2778 if (rc == 0 || rc == PROM_ERROR) { 2779 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2780 2781 prop[0] = 0x1; 2782 prop[1] = 0x0; 2783 prop[2] = rloc; 2784 prop[3] = 0x0; 2785 prop[4] = 0x0; 2786 prop[5] = 0x00010000; 2787 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2788 } 2789 } 2790 2791 name = "/pci@80000000/ide@C,1"; 2792 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2793 if (PHANDLE_VALID(ph)) { 2794 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2795 prop[0] = 14; 2796 prop[1] = 0x0; 2797 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2798 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2799 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2800 if (rc == sizeof(u32)) { 2801 prop[0] &= ~0x5; 2802 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2803 } 2804 } 2805 } 2806 #else 2807 #define fixup_device_tree_chrp() 2808 #endif 2809 2810 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2811 static void __init fixup_device_tree_pmac(void) 2812 { 2813 phandle u3, i2c, mpic; 2814 u32 u3_rev; 2815 u32 interrupts[2]; 2816 u32 parent; 2817 2818 /* Some G5s have a missing interrupt definition, fix it up here */ 2819 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2820 if (!PHANDLE_VALID(u3)) 2821 return; 2822 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2823 if (!PHANDLE_VALID(i2c)) 2824 return; 2825 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2826 if (!PHANDLE_VALID(mpic)) 2827 return; 2828 2829 /* check if proper rev of u3 */ 2830 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2831 == PROM_ERROR) 2832 return; 2833 if (u3_rev < 0x35 || u3_rev > 0x39) 2834 return; 2835 /* does it need fixup ? */ 2836 if (prom_getproplen(i2c, "interrupts") > 0) 2837 return; 2838 2839 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2840 2841 /* interrupt on this revision of u3 is number 0 and level */ 2842 interrupts[0] = 0; 2843 interrupts[1] = 1; 2844 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2845 &interrupts, sizeof(interrupts)); 2846 parent = (u32)mpic; 2847 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2848 &parent, sizeof(parent)); 2849 } 2850 #else 2851 #define fixup_device_tree_pmac() 2852 #endif 2853 2854 #ifdef CONFIG_PPC_EFIKA 2855 /* 2856 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2857 * to talk to the phy. If the phy-handle property is missing, then this 2858 * function is called to add the appropriate nodes and link it to the 2859 * ethernet node. 2860 */ 2861 static void __init fixup_device_tree_efika_add_phy(void) 2862 { 2863 u32 node; 2864 char prop[64]; 2865 int rv; 2866 2867 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2868 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2869 if (!PHANDLE_VALID(node)) 2870 return; 2871 2872 /* Check if the phy-handle property exists - bail if it does */ 2873 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2874 if (!rv) 2875 return; 2876 2877 /* 2878 * At this point the ethernet device doesn't have a phy described. 2879 * Now we need to add the missing phy node and linkage 2880 */ 2881 2882 /* Check for an MDIO bus node - if missing then create one */ 2883 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2884 if (!PHANDLE_VALID(node)) { 2885 prom_printf("Adding Ethernet MDIO node\n"); 2886 call_prom("interpret", 1, 1, 2887 " s\" /builtin\" find-device" 2888 " new-device" 2889 " 1 encode-int s\" #address-cells\" property" 2890 " 0 encode-int s\" #size-cells\" property" 2891 " s\" mdio\" device-name" 2892 " s\" fsl,mpc5200b-mdio\" encode-string" 2893 " s\" compatible\" property" 2894 " 0xf0003000 0x400 reg" 2895 " 0x2 encode-int" 2896 " 0x5 encode-int encode+" 2897 " 0x3 encode-int encode+" 2898 " s\" interrupts\" property" 2899 " finish-device"); 2900 }; 2901 2902 /* Check for a PHY device node - if missing then create one and 2903 * give it's phandle to the ethernet node */ 2904 node = call_prom("finddevice", 1, 1, 2905 ADDR("/builtin/mdio/ethernet-phy")); 2906 if (!PHANDLE_VALID(node)) { 2907 prom_printf("Adding Ethernet PHY node\n"); 2908 call_prom("interpret", 1, 1, 2909 " s\" /builtin/mdio\" find-device" 2910 " new-device" 2911 " s\" ethernet-phy\" device-name" 2912 " 0x10 encode-int s\" reg\" property" 2913 " my-self" 2914 " ihandle>phandle" 2915 " finish-device" 2916 " s\" /builtin/ethernet\" find-device" 2917 " encode-int" 2918 " s\" phy-handle\" property" 2919 " device-end"); 2920 } 2921 } 2922 2923 static void __init fixup_device_tree_efika(void) 2924 { 2925 int sound_irq[3] = { 2, 2, 0 }; 2926 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2927 3,4,0, 3,5,0, 3,6,0, 3,7,0, 2928 3,8,0, 3,9,0, 3,10,0, 3,11,0, 2929 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 2930 u32 node; 2931 char prop[64]; 2932 int rv, len; 2933 2934 /* Check if we're really running on a EFIKA */ 2935 node = call_prom("finddevice", 1, 1, ADDR("/")); 2936 if (!PHANDLE_VALID(node)) 2937 return; 2938 2939 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2940 if (rv == PROM_ERROR) 2941 return; 2942 if (prom_strcmp(prop, "EFIKA5K2")) 2943 return; 2944 2945 prom_printf("Applying EFIKA device tree fixups\n"); 2946 2947 /* Claiming to be 'chrp' is death */ 2948 node = call_prom("finddevice", 1, 1, ADDR("/")); 2949 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 2950 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0)) 2951 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 2952 2953 /* CODEGEN,description is exposed in /proc/cpuinfo so 2954 fix that too */ 2955 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 2956 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP"))) 2957 prom_setprop(node, "/", "CODEGEN,description", 2958 "Efika 5200B PowerPC System", 2959 sizeof("Efika 5200B PowerPC System")); 2960 2961 /* Fixup bestcomm interrupts property */ 2962 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 2963 if (PHANDLE_VALID(node)) { 2964 len = prom_getproplen(node, "interrupts"); 2965 if (len == 12) { 2966 prom_printf("Fixing bestcomm interrupts property\n"); 2967 prom_setprop(node, "/builtin/bestcom", "interrupts", 2968 bcomm_irq, sizeof(bcomm_irq)); 2969 } 2970 } 2971 2972 /* Fixup sound interrupts property */ 2973 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 2974 if (PHANDLE_VALID(node)) { 2975 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 2976 if (rv == PROM_ERROR) { 2977 prom_printf("Adding sound interrupts property\n"); 2978 prom_setprop(node, "/builtin/sound", "interrupts", 2979 sound_irq, sizeof(sound_irq)); 2980 } 2981 } 2982 2983 /* Make sure ethernet phy-handle property exists */ 2984 fixup_device_tree_efika_add_phy(); 2985 } 2986 #else 2987 #define fixup_device_tree_efika() 2988 #endif 2989 2990 #ifdef CONFIG_PPC_PASEMI_NEMO 2991 /* 2992 * CFE supplied on Nemo is broken in several ways, biggest 2993 * problem is that it reassigns ISA interrupts to unused mpic ints. 2994 * Add an interrupt-controller property for the io-bridge to use 2995 * and correct the ints so we can attach them to an irq_domain 2996 */ 2997 static void __init fixup_device_tree_pasemi(void) 2998 { 2999 u32 interrupts[2], parent, rval, val = 0; 3000 char *name, *pci_name; 3001 phandle iob, node; 3002 3003 /* Find the root pci node */ 3004 name = "/pxp@0,e0000000"; 3005 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3006 if (!PHANDLE_VALID(iob)) 3007 return; 3008 3009 /* check if interrupt-controller node set yet */ 3010 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR) 3011 return; 3012 3013 prom_printf("adding interrupt-controller property for SB600...\n"); 3014 3015 prom_setprop(iob, name, "interrupt-controller", &val, 0); 3016 3017 pci_name = "/pxp@0,e0000000/pci@11"; 3018 node = call_prom("finddevice", 1, 1, ADDR(pci_name)); 3019 parent = ADDR(iob); 3020 3021 for( ; prom_next_node(&node); ) { 3022 /* scan each node for one with an interrupt */ 3023 if (!PHANDLE_VALID(node)) 3024 continue; 3025 3026 rval = prom_getproplen(node, "interrupts"); 3027 if (rval == 0 || rval == PROM_ERROR) 3028 continue; 3029 3030 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts)); 3031 if ((interrupts[0] < 212) || (interrupts[0] > 222)) 3032 continue; 3033 3034 /* found a node, update both interrupts and interrupt-parent */ 3035 if ((interrupts[0] >= 212) && (interrupts[0] <= 215)) 3036 interrupts[0] -= 203; 3037 if ((interrupts[0] >= 216) && (interrupts[0] <= 220)) 3038 interrupts[0] -= 213; 3039 if (interrupts[0] == 221) 3040 interrupts[0] = 14; 3041 if (interrupts[0] == 222) 3042 interrupts[0] = 8; 3043 3044 prom_setprop(node, pci_name, "interrupts", interrupts, 3045 sizeof(interrupts)); 3046 prom_setprop(node, pci_name, "interrupt-parent", &parent, 3047 sizeof(parent)); 3048 } 3049 3050 /* 3051 * The io-bridge has device_type set to 'io-bridge' change it to 'isa' 3052 * so that generic isa-bridge code can add the SB600 and its on-board 3053 * peripherals. 3054 */ 3055 name = "/pxp@0,e0000000/io-bridge@0"; 3056 iob = call_prom("finddevice", 1, 1, ADDR(name)); 3057 if (!PHANDLE_VALID(iob)) 3058 return; 3059 3060 /* device_type is already set, just change it. */ 3061 3062 prom_printf("Changing device_type of SB600 node...\n"); 3063 3064 prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); 3065 } 3066 #else /* !CONFIG_PPC_PASEMI_NEMO */ 3067 static inline void fixup_device_tree_pasemi(void) { } 3068 #endif 3069 3070 static void __init fixup_device_tree(void) 3071 { 3072 fixup_device_tree_maple(); 3073 fixup_device_tree_maple_memory_controller(); 3074 fixup_device_tree_chrp(); 3075 fixup_device_tree_pmac(); 3076 fixup_device_tree_efika(); 3077 fixup_device_tree_pasemi(); 3078 } 3079 3080 static void __init prom_find_boot_cpu(void) 3081 { 3082 __be32 rval; 3083 ihandle prom_cpu; 3084 phandle cpu_pkg; 3085 3086 rval = 0; 3087 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 3088 return; 3089 prom_cpu = be32_to_cpu(rval); 3090 3091 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 3092 3093 if (!PHANDLE_VALID(cpu_pkg)) 3094 return; 3095 3096 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 3097 prom.cpu = be32_to_cpu(rval); 3098 3099 prom_debug("Booting CPU hw index = %d\n", prom.cpu); 3100 } 3101 3102 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 3103 { 3104 #ifdef CONFIG_BLK_DEV_INITRD 3105 if (r3 && r4 && r4 != 0xdeadbeef) { 3106 __be64 val; 3107 3108 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 3109 prom_initrd_end = prom_initrd_start + r4; 3110 3111 val = cpu_to_be64(prom_initrd_start); 3112 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 3113 &val, sizeof(val)); 3114 val = cpu_to_be64(prom_initrd_end); 3115 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 3116 &val, sizeof(val)); 3117 3118 reserve_mem(prom_initrd_start, 3119 prom_initrd_end - prom_initrd_start); 3120 3121 prom_debug("initrd_start=0x%lx\n", prom_initrd_start); 3122 prom_debug("initrd_end=0x%lx\n", prom_initrd_end); 3123 } 3124 #endif /* CONFIG_BLK_DEV_INITRD */ 3125 } 3126 3127 #ifdef CONFIG_PPC64 3128 #ifdef CONFIG_RELOCATABLE 3129 static void reloc_toc(void) 3130 { 3131 } 3132 3133 static void unreloc_toc(void) 3134 { 3135 } 3136 #else 3137 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 3138 { 3139 unsigned long i; 3140 unsigned long *toc_entry; 3141 3142 /* Get the start of the TOC by using r2 directly. */ 3143 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 3144 3145 for (i = 0; i < nr_entries; i++) { 3146 *toc_entry = *toc_entry + offset; 3147 toc_entry++; 3148 } 3149 } 3150 3151 static void reloc_toc(void) 3152 { 3153 unsigned long offset = reloc_offset(); 3154 unsigned long nr_entries = 3155 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3156 3157 __reloc_toc(offset, nr_entries); 3158 3159 mb(); 3160 } 3161 3162 static void unreloc_toc(void) 3163 { 3164 unsigned long offset = reloc_offset(); 3165 unsigned long nr_entries = 3166 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 3167 3168 mb(); 3169 3170 __reloc_toc(-offset, nr_entries); 3171 } 3172 #endif 3173 #endif 3174 3175 /* 3176 * We enter here early on, when the Open Firmware prom is still 3177 * handling exceptions and the MMU hash table for us. 3178 */ 3179 3180 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 3181 unsigned long pp, 3182 unsigned long r6, unsigned long r7, 3183 unsigned long kbase) 3184 { 3185 unsigned long hdr; 3186 3187 #ifdef CONFIG_PPC32 3188 unsigned long offset = reloc_offset(); 3189 reloc_got2(offset); 3190 #else 3191 reloc_toc(); 3192 #endif 3193 3194 /* 3195 * First zero the BSS 3196 */ 3197 memset(&__bss_start, 0, __bss_stop - __bss_start); 3198 3199 /* 3200 * Init interface to Open Firmware, get some node references, 3201 * like /chosen 3202 */ 3203 prom_init_client_services(pp); 3204 3205 /* 3206 * See if this OF is old enough that we need to do explicit maps 3207 * and other workarounds 3208 */ 3209 prom_find_mmu(); 3210 3211 /* 3212 * Init prom stdout device 3213 */ 3214 prom_init_stdout(); 3215 3216 prom_printf("Preparing to boot %s", linux_banner); 3217 3218 /* 3219 * Get default machine type. At this point, we do not differentiate 3220 * between pSeries SMP and pSeries LPAR 3221 */ 3222 of_platform = prom_find_machine_type(); 3223 prom_printf("Detected machine type: %x\n", of_platform); 3224 3225 #ifndef CONFIG_NONSTATIC_KERNEL 3226 /* Bail if this is a kdump kernel. */ 3227 if (PHYSICAL_START > 0) 3228 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 3229 #endif 3230 3231 /* 3232 * Check for an initrd 3233 */ 3234 prom_check_initrd(r3, r4); 3235 3236 /* 3237 * Do early parsing of command line 3238 */ 3239 early_cmdline_parse(); 3240 3241 #ifdef CONFIG_PPC_PSERIES 3242 /* 3243 * On pSeries, inform the firmware about our capabilities 3244 */ 3245 if (of_platform == PLATFORM_PSERIES || 3246 of_platform == PLATFORM_PSERIES_LPAR) 3247 prom_send_capabilities(); 3248 #endif 3249 3250 /* 3251 * Copy the CPU hold code 3252 */ 3253 if (of_platform != PLATFORM_POWERMAC) 3254 copy_and_flush(0, kbase, 0x100, 0); 3255 3256 /* 3257 * Initialize memory management within prom_init 3258 */ 3259 prom_init_mem(); 3260 3261 /* 3262 * Determine which cpu is actually running right _now_ 3263 */ 3264 prom_find_boot_cpu(); 3265 3266 /* 3267 * Initialize display devices 3268 */ 3269 prom_check_displays(); 3270 3271 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 3272 /* 3273 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 3274 * that uses the allocator, we need to make sure we get the top of memory 3275 * available for us here... 3276 */ 3277 if (of_platform == PLATFORM_PSERIES) 3278 prom_initialize_tce_table(); 3279 #endif 3280 3281 /* 3282 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 3283 * have a usable RTAS implementation. 3284 */ 3285 if (of_platform != PLATFORM_POWERMAC) 3286 prom_instantiate_rtas(); 3287 3288 #ifdef CONFIG_PPC64 3289 /* instantiate sml */ 3290 prom_instantiate_sml(); 3291 #endif 3292 3293 /* 3294 * On non-powermacs, put all CPUs in spin-loops. 3295 * 3296 * PowerMacs use a different mechanism to spin CPUs 3297 * 3298 * (This must be done after instanciating RTAS) 3299 */ 3300 if (of_platform != PLATFORM_POWERMAC) 3301 prom_hold_cpus(); 3302 3303 /* 3304 * Fill in some infos for use by the kernel later on 3305 */ 3306 if (prom_memory_limit) { 3307 __be64 val = cpu_to_be64(prom_memory_limit); 3308 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3309 &val, sizeof(val)); 3310 } 3311 #ifdef CONFIG_PPC64 3312 if (prom_iommu_off) 3313 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3314 NULL, 0); 3315 3316 if (prom_iommu_force_on) 3317 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 3318 NULL, 0); 3319 3320 if (prom_tce_alloc_start) { 3321 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 3322 &prom_tce_alloc_start, 3323 sizeof(prom_tce_alloc_start)); 3324 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 3325 &prom_tce_alloc_end, 3326 sizeof(prom_tce_alloc_end)); 3327 } 3328 #endif 3329 3330 /* 3331 * Fixup any known bugs in the device-tree 3332 */ 3333 fixup_device_tree(); 3334 3335 /* 3336 * Now finally create the flattened device-tree 3337 */ 3338 prom_printf("copying OF device tree...\n"); 3339 flatten_device_tree(); 3340 3341 /* 3342 * in case stdin is USB and still active on IBM machines... 3343 * Unfortunately quiesce crashes on some powermacs if we have 3344 * closed stdin already (in particular the powerbook 101). 3345 */ 3346 if (of_platform != PLATFORM_POWERMAC) 3347 prom_close_stdin(); 3348 3349 /* 3350 * Call OF "quiesce" method to shut down pending DMA's from 3351 * devices etc... 3352 */ 3353 prom_printf("Quiescing Open Firmware ...\n"); 3354 call_prom("quiesce", 0, 0); 3355 3356 /* 3357 * And finally, call the kernel passing it the flattened device 3358 * tree and NULL as r5, thus triggering the new entry point which 3359 * is common to us and kexec 3360 */ 3361 hdr = dt_header_start; 3362 3363 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 3364 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 3365 prom_debug("->dt_header_start=0x%lx\n", hdr); 3366 3367 #ifdef CONFIG_PPC32 3368 reloc_got2(-offset); 3369 #else 3370 unreloc_toc(); 3371 #endif 3372 3373 __start(hdr, kbase, 0, 0, 0, 0, 0); 3374 3375 return 0; 3376 } 3377