1 /* 2 * Procedures for interfacing to Open Firmware. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG_PROM 17 18 #include <stdarg.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/init.h> 22 #include <linux/threads.h> 23 #include <linux/spinlock.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/proc_fs.h> 27 #include <linux/stringify.h> 28 #include <linux/delay.h> 29 #include <linux/initrd.h> 30 #include <linux/bitops.h> 31 #include <asm/prom.h> 32 #include <asm/rtas.h> 33 #include <asm/page.h> 34 #include <asm/processor.h> 35 #include <asm/irq.h> 36 #include <asm/io.h> 37 #include <asm/smp.h> 38 #include <asm/mmu.h> 39 #include <asm/pgtable.h> 40 #include <asm/iommu.h> 41 #include <asm/btext.h> 42 #include <asm/sections.h> 43 #include <asm/machdep.h> 44 #include <asm/opal.h> 45 46 #include <linux/linux_logo.h> 47 48 /* 49 * Eventually bump that one up 50 */ 51 #define DEVTREE_CHUNK_SIZE 0x100000 52 53 /* 54 * This is the size of the local memory reserve map that gets copied 55 * into the boot params passed to the kernel. That size is totally 56 * flexible as the kernel just reads the list until it encounters an 57 * entry with size 0, so it can be changed without breaking binary 58 * compatibility 59 */ 60 #define MEM_RESERVE_MAP_SIZE 8 61 62 /* 63 * prom_init() is called very early on, before the kernel text 64 * and data have been mapped to KERNELBASE. At this point the code 65 * is running at whatever address it has been loaded at. 66 * On ppc32 we compile with -mrelocatable, which means that references 67 * to extern and static variables get relocated automatically. 68 * ppc64 objects are always relocatable, we just need to relocate the 69 * TOC. 70 * 71 * Because OF may have mapped I/O devices into the area starting at 72 * KERNELBASE, particularly on CHRP machines, we can't safely call 73 * OF once the kernel has been mapped to KERNELBASE. Therefore all 74 * OF calls must be done within prom_init(). 75 * 76 * ADDR is used in calls to call_prom. The 4th and following 77 * arguments to call_prom should be 32-bit values. 78 * On ppc64, 64 bit values are truncated to 32 bits (and 79 * fortunately don't get interpreted as two arguments). 80 */ 81 #define ADDR(x) (u32)(unsigned long)(x) 82 83 #ifdef CONFIG_PPC64 84 #define OF_WORKAROUNDS 0 85 #else 86 #define OF_WORKAROUNDS of_workarounds 87 int of_workarounds; 88 #endif 89 90 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ 91 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ 92 93 #define PROM_BUG() do { \ 94 prom_printf("kernel BUG at %s line 0x%x!\n", \ 95 __FILE__, __LINE__); \ 96 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ 97 } while (0) 98 99 #ifdef DEBUG_PROM 100 #define prom_debug(x...) prom_printf(x) 101 #else 102 #define prom_debug(x...) 103 #endif 104 105 106 typedef u32 prom_arg_t; 107 108 struct prom_args { 109 __be32 service; 110 __be32 nargs; 111 __be32 nret; 112 __be32 args[10]; 113 }; 114 115 struct prom_t { 116 ihandle root; 117 phandle chosen; 118 int cpu; 119 ihandle stdout; 120 ihandle mmumap; 121 ihandle memory; 122 }; 123 124 struct mem_map_entry { 125 __be64 base; 126 __be64 size; 127 }; 128 129 typedef __be32 cell_t; 130 131 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 132 unsigned long r6, unsigned long r7, unsigned long r8, 133 unsigned long r9); 134 135 #ifdef CONFIG_PPC64 136 extern int enter_prom(struct prom_args *args, unsigned long entry); 137 #else 138 static inline int enter_prom(struct prom_args *args, unsigned long entry) 139 { 140 return ((int (*)(struct prom_args *))entry)(args); 141 } 142 #endif 143 144 extern void copy_and_flush(unsigned long dest, unsigned long src, 145 unsigned long size, unsigned long offset); 146 147 /* prom structure */ 148 static struct prom_t __initdata prom; 149 150 static unsigned long prom_entry __initdata; 151 152 #define PROM_SCRATCH_SIZE 256 153 154 static char __initdata of_stdout_device[256]; 155 static char __initdata prom_scratch[PROM_SCRATCH_SIZE]; 156 157 static unsigned long __initdata dt_header_start; 158 static unsigned long __initdata dt_struct_start, dt_struct_end; 159 static unsigned long __initdata dt_string_start, dt_string_end; 160 161 static unsigned long __initdata prom_initrd_start, prom_initrd_end; 162 163 #ifdef CONFIG_PPC64 164 static int __initdata prom_iommu_force_on; 165 static int __initdata prom_iommu_off; 166 static unsigned long __initdata prom_tce_alloc_start; 167 static unsigned long __initdata prom_tce_alloc_end; 168 #endif 169 170 /* Platforms codes are now obsolete in the kernel. Now only used within this 171 * file and ultimately gone too. Feel free to change them if you need, they 172 * are not shared with anything outside of this file anymore 173 */ 174 #define PLATFORM_PSERIES 0x0100 175 #define PLATFORM_PSERIES_LPAR 0x0101 176 #define PLATFORM_LPAR 0x0001 177 #define PLATFORM_POWERMAC 0x0400 178 #define PLATFORM_GENERIC 0x0500 179 #define PLATFORM_OPAL 0x0600 180 181 static int __initdata of_platform; 182 183 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE]; 184 185 static unsigned long __initdata prom_memory_limit; 186 187 static unsigned long __initdata alloc_top; 188 static unsigned long __initdata alloc_top_high; 189 static unsigned long __initdata alloc_bottom; 190 static unsigned long __initdata rmo_top; 191 static unsigned long __initdata ram_top; 192 193 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE]; 194 static int __initdata mem_reserve_cnt; 195 196 static cell_t __initdata regbuf[1024]; 197 198 static bool rtas_has_query_cpu_stopped; 199 200 201 /* 202 * Error results ... some OF calls will return "-1" on error, some 203 * will return 0, some will return either. To simplify, here are 204 * macros to use with any ihandle or phandle return value to check if 205 * it is valid 206 */ 207 208 #define PROM_ERROR (-1u) 209 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) 210 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) 211 212 213 /* This is the one and *ONLY* place where we actually call open 214 * firmware. 215 */ 216 217 static int __init call_prom(const char *service, int nargs, int nret, ...) 218 { 219 int i; 220 struct prom_args args; 221 va_list list; 222 223 args.service = cpu_to_be32(ADDR(service)); 224 args.nargs = cpu_to_be32(nargs); 225 args.nret = cpu_to_be32(nret); 226 227 va_start(list, nret); 228 for (i = 0; i < nargs; i++) 229 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 230 va_end(list); 231 232 for (i = 0; i < nret; i++) 233 args.args[nargs+i] = 0; 234 235 if (enter_prom(&args, prom_entry) < 0) 236 return PROM_ERROR; 237 238 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 239 } 240 241 static int __init call_prom_ret(const char *service, int nargs, int nret, 242 prom_arg_t *rets, ...) 243 { 244 int i; 245 struct prom_args args; 246 va_list list; 247 248 args.service = cpu_to_be32(ADDR(service)); 249 args.nargs = cpu_to_be32(nargs); 250 args.nret = cpu_to_be32(nret); 251 252 va_start(list, rets); 253 for (i = 0; i < nargs; i++) 254 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t)); 255 va_end(list); 256 257 for (i = 0; i < nret; i++) 258 args.args[nargs+i] = 0; 259 260 if (enter_prom(&args, prom_entry) < 0) 261 return PROM_ERROR; 262 263 if (rets != NULL) 264 for (i = 1; i < nret; ++i) 265 rets[i-1] = be32_to_cpu(args.args[nargs+i]); 266 267 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0; 268 } 269 270 271 static void __init prom_print(const char *msg) 272 { 273 const char *p, *q; 274 275 if (prom.stdout == 0) 276 return; 277 278 for (p = msg; *p != 0; p = q) { 279 for (q = p; *q != 0 && *q != '\n'; ++q) 280 ; 281 if (q > p) 282 call_prom("write", 3, 1, prom.stdout, p, q - p); 283 if (*q == 0) 284 break; 285 ++q; 286 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2); 287 } 288 } 289 290 291 static void __init prom_print_hex(unsigned long val) 292 { 293 int i, nibbles = sizeof(val)*2; 294 char buf[sizeof(val)*2+1]; 295 296 for (i = nibbles-1; i >= 0; i--) { 297 buf[i] = (val & 0xf) + '0'; 298 if (buf[i] > '9') 299 buf[i] += ('a'-'0'-10); 300 val >>= 4; 301 } 302 buf[nibbles] = '\0'; 303 call_prom("write", 3, 1, prom.stdout, buf, nibbles); 304 } 305 306 /* max number of decimal digits in an unsigned long */ 307 #define UL_DIGITS 21 308 static void __init prom_print_dec(unsigned long val) 309 { 310 int i, size; 311 char buf[UL_DIGITS+1]; 312 313 for (i = UL_DIGITS-1; i >= 0; i--) { 314 buf[i] = (val % 10) + '0'; 315 val = val/10; 316 if (val == 0) 317 break; 318 } 319 /* shift stuff down */ 320 size = UL_DIGITS - i; 321 call_prom("write", 3, 1, prom.stdout, buf+i, size); 322 } 323 324 static void __init prom_printf(const char *format, ...) 325 { 326 const char *p, *q, *s; 327 va_list args; 328 unsigned long v; 329 long vs; 330 331 va_start(args, format); 332 for (p = format; *p != 0; p = q) { 333 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) 334 ; 335 if (q > p) 336 call_prom("write", 3, 1, prom.stdout, p, q - p); 337 if (*q == 0) 338 break; 339 if (*q == '\n') { 340 ++q; 341 call_prom("write", 3, 1, prom.stdout, 342 ADDR("\r\n"), 2); 343 continue; 344 } 345 ++q; 346 if (*q == 0) 347 break; 348 switch (*q) { 349 case 's': 350 ++q; 351 s = va_arg(args, const char *); 352 prom_print(s); 353 break; 354 case 'x': 355 ++q; 356 v = va_arg(args, unsigned long); 357 prom_print_hex(v); 358 break; 359 case 'd': 360 ++q; 361 vs = va_arg(args, int); 362 if (vs < 0) { 363 prom_print("-"); 364 vs = -vs; 365 } 366 prom_print_dec(vs); 367 break; 368 case 'l': 369 ++q; 370 if (*q == 0) 371 break; 372 else if (*q == 'x') { 373 ++q; 374 v = va_arg(args, unsigned long); 375 prom_print_hex(v); 376 } else if (*q == 'u') { /* '%lu' */ 377 ++q; 378 v = va_arg(args, unsigned long); 379 prom_print_dec(v); 380 } else if (*q == 'd') { /* %ld */ 381 ++q; 382 vs = va_arg(args, long); 383 if (vs < 0) { 384 prom_print("-"); 385 vs = -vs; 386 } 387 prom_print_dec(vs); 388 } 389 break; 390 } 391 } 392 va_end(args); 393 } 394 395 396 static unsigned int __init prom_claim(unsigned long virt, unsigned long size, 397 unsigned long align) 398 { 399 400 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { 401 /* 402 * Old OF requires we claim physical and virtual separately 403 * and then map explicitly (assuming virtual mode) 404 */ 405 int ret; 406 prom_arg_t result; 407 408 ret = call_prom_ret("call-method", 5, 2, &result, 409 ADDR("claim"), prom.memory, 410 align, size, virt); 411 if (ret != 0 || result == -1) 412 return -1; 413 ret = call_prom_ret("call-method", 5, 2, &result, 414 ADDR("claim"), prom.mmumap, 415 align, size, virt); 416 if (ret != 0) { 417 call_prom("call-method", 4, 1, ADDR("release"), 418 prom.memory, size, virt); 419 return -1; 420 } 421 /* the 0x12 is M (coherence) + PP == read/write */ 422 call_prom("call-method", 6, 1, 423 ADDR("map"), prom.mmumap, 0x12, size, virt, virt); 424 return virt; 425 } 426 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size, 427 (prom_arg_t)align); 428 } 429 430 static void __init __attribute__((noreturn)) prom_panic(const char *reason) 431 { 432 prom_print(reason); 433 /* Do not call exit because it clears the screen on pmac 434 * it also causes some sort of double-fault on early pmacs */ 435 if (of_platform == PLATFORM_POWERMAC) 436 asm("trap\n"); 437 438 /* ToDo: should put up an SRC here on pSeries */ 439 call_prom("exit", 0, 0); 440 441 for (;;) /* should never get here */ 442 ; 443 } 444 445 446 static int __init prom_next_node(phandle *nodep) 447 { 448 phandle node; 449 450 if ((node = *nodep) != 0 451 && (*nodep = call_prom("child", 1, 1, node)) != 0) 452 return 1; 453 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 454 return 1; 455 for (;;) { 456 if ((node = call_prom("parent", 1, 1, node)) == 0) 457 return 0; 458 if ((*nodep = call_prom("peer", 1, 1, node)) != 0) 459 return 1; 460 } 461 } 462 463 static int inline prom_getprop(phandle node, const char *pname, 464 void *value, size_t valuelen) 465 { 466 return call_prom("getprop", 4, 1, node, ADDR(pname), 467 (u32)(unsigned long) value, (u32) valuelen); 468 } 469 470 static int inline prom_getproplen(phandle node, const char *pname) 471 { 472 return call_prom("getproplen", 2, 1, node, ADDR(pname)); 473 } 474 475 static void add_string(char **str, const char *q) 476 { 477 char *p = *str; 478 479 while (*q) 480 *p++ = *q++; 481 *p++ = ' '; 482 *str = p; 483 } 484 485 static char *tohex(unsigned int x) 486 { 487 static char digits[] = "0123456789abcdef"; 488 static char result[9]; 489 int i; 490 491 result[8] = 0; 492 i = 8; 493 do { 494 --i; 495 result[i] = digits[x & 0xf]; 496 x >>= 4; 497 } while (x != 0 && i > 0); 498 return &result[i]; 499 } 500 501 static int __init prom_setprop(phandle node, const char *nodename, 502 const char *pname, void *value, size_t valuelen) 503 { 504 char cmd[256], *p; 505 506 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL)) 507 return call_prom("setprop", 4, 1, node, ADDR(pname), 508 (u32)(unsigned long) value, (u32) valuelen); 509 510 /* gah... setprop doesn't work on longtrail, have to use interpret */ 511 p = cmd; 512 add_string(&p, "dev"); 513 add_string(&p, nodename); 514 add_string(&p, tohex((u32)(unsigned long) value)); 515 add_string(&p, tohex(valuelen)); 516 add_string(&p, tohex(ADDR(pname))); 517 add_string(&p, tohex(strlen(pname))); 518 add_string(&p, "property"); 519 *p = 0; 520 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 521 } 522 523 /* We can't use the standard versions because of relocation headaches. */ 524 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 525 || ('a' <= (c) && (c) <= 'f') \ 526 || ('A' <= (c) && (c) <= 'F')) 527 528 #define isdigit(c) ('0' <= (c) && (c) <= '9') 529 #define islower(c) ('a' <= (c) && (c) <= 'z') 530 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 531 532 static unsigned long prom_strtoul(const char *cp, const char **endp) 533 { 534 unsigned long result = 0, base = 10, value; 535 536 if (*cp == '0') { 537 base = 8; 538 cp++; 539 if (toupper(*cp) == 'X') { 540 cp++; 541 base = 16; 542 } 543 } 544 545 while (isxdigit(*cp) && 546 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) { 547 result = result * base + value; 548 cp++; 549 } 550 551 if (endp) 552 *endp = cp; 553 554 return result; 555 } 556 557 static unsigned long prom_memparse(const char *ptr, const char **retptr) 558 { 559 unsigned long ret = prom_strtoul(ptr, retptr); 560 int shift = 0; 561 562 /* 563 * We can't use a switch here because GCC *may* generate a 564 * jump table which won't work, because we're not running at 565 * the address we're linked at. 566 */ 567 if ('G' == **retptr || 'g' == **retptr) 568 shift = 30; 569 570 if ('M' == **retptr || 'm' == **retptr) 571 shift = 20; 572 573 if ('K' == **retptr || 'k' == **retptr) 574 shift = 10; 575 576 if (shift) { 577 ret <<= shift; 578 (*retptr)++; 579 } 580 581 return ret; 582 } 583 584 /* 585 * Early parsing of the command line passed to the kernel, used for 586 * "mem=x" and the options that affect the iommu 587 */ 588 static void __init early_cmdline_parse(void) 589 { 590 const char *opt; 591 592 char *p; 593 int l = 0; 594 595 prom_cmd_line[0] = 0; 596 p = prom_cmd_line; 597 if ((long)prom.chosen > 0) 598 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); 599 #ifdef CONFIG_CMDLINE 600 if (l <= 0 || p[0] == '\0') /* dbl check */ 601 strlcpy(prom_cmd_line, 602 CONFIG_CMDLINE, sizeof(prom_cmd_line)); 603 #endif /* CONFIG_CMDLINE */ 604 prom_printf("command line: %s\n", prom_cmd_line); 605 606 #ifdef CONFIG_PPC64 607 opt = strstr(prom_cmd_line, "iommu="); 608 if (opt) { 609 prom_printf("iommu opt is: %s\n", opt); 610 opt += 6; 611 while (*opt && *opt == ' ') 612 opt++; 613 if (!strncmp(opt, "off", 3)) 614 prom_iommu_off = 1; 615 else if (!strncmp(opt, "force", 5)) 616 prom_iommu_force_on = 1; 617 } 618 #endif 619 opt = strstr(prom_cmd_line, "mem="); 620 if (opt) { 621 opt += 4; 622 prom_memory_limit = prom_memparse(opt, (const char **)&opt); 623 #ifdef CONFIG_PPC64 624 /* Align to 16 MB == size of ppc64 large page */ 625 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 626 #endif 627 } 628 } 629 630 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 631 /* 632 * The architecture vector has an array of PVR mask/value pairs, 633 * followed by # option vectors - 1, followed by the option vectors. 634 * 635 * See prom.h for the definition of the bits specified in the 636 * architecture vector. 637 * 638 * Because the description vector contains a mix of byte and word 639 * values, we declare it as an unsigned char array, and use this 640 * macro to put word values in. 641 */ 642 #define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \ 643 ((x) >> 8) & 0xff, (x) & 0xff 644 645 /* Firmware expects the value to be n - 1, where n is the # of vectors */ 646 #define NUM_VECTORS(n) ((n) - 1) 647 648 /* 649 * Firmware expects 1 + n - 2, where n is the length of the option vector in 650 * bytes. The 1 accounts for the length byte itself, the - 2 .. ? 651 */ 652 #define VECTOR_LENGTH(n) (1 + (n) - 2) 653 654 unsigned char ibm_architecture_vec[] = { 655 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ 656 W(0xffff0000), W(0x003e0000), /* POWER6 */ 657 W(0xffff0000), W(0x003f0000), /* POWER7 */ 658 W(0xffff0000), W(0x004b0000), /* POWER8E */ 659 W(0xffff0000), W(0x004c0000), /* POWER8NVL */ 660 W(0xffff0000), W(0x004d0000), /* POWER8 */ 661 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ 662 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ 663 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ 664 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ 665 NUM_VECTORS(6), /* 6 option vectors */ 666 667 /* option vector 1: processor architectures supported */ 668 VECTOR_LENGTH(2), /* length */ 669 0, /* don't ignore, don't halt */ 670 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 671 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07, 672 673 /* option vector 2: Open Firmware options supported */ 674 VECTOR_LENGTH(33), /* length */ 675 OV2_REAL_MODE, 676 0, 0, 677 W(0xffffffff), /* real_base */ 678 W(0xffffffff), /* real_size */ 679 W(0xffffffff), /* virt_base */ 680 W(0xffffffff), /* virt_size */ 681 W(0xffffffff), /* load_base */ 682 W(256), /* 256MB min RMA */ 683 W(0xffffffff), /* full client load */ 684 0, /* min RMA percentage of total RAM */ 685 48, /* max log_2(hash table size) */ 686 687 /* option vector 3: processor options supported */ 688 VECTOR_LENGTH(2), /* length */ 689 0, /* don't ignore, don't halt */ 690 OV3_FP | OV3_VMX | OV3_DFP, 691 692 /* option vector 4: IBM PAPR implementation */ 693 VECTOR_LENGTH(2), /* length */ 694 0, /* don't halt */ 695 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 696 697 /* option vector 5: PAPR/OF options */ 698 VECTOR_LENGTH(21), /* length */ 699 0, /* don't ignore, don't halt */ 700 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 701 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 702 #ifdef CONFIG_PCI_MSI 703 /* PCIe/MSI support. Without MSI full PCIe is not supported */ 704 OV5_FEAT(OV5_MSI), 705 #else 706 0, 707 #endif 708 0, 709 #ifdef CONFIG_PPC_SMLPAR 710 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO), 711 #else 712 0, 713 #endif 714 OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), 715 0, 716 0, 717 0, 718 /* WARNING: The offset of the "number of cores" field below 719 * must match by the macro below. Update the definition if 720 * the structure layout changes. 721 */ 722 #define IBM_ARCH_VEC_NRCORES_OFFSET 133 723 W(NR_CPUS), /* number of cores supported */ 724 0, 725 0, 726 0, 727 0, 728 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | 729 OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */ 730 0, /* Byte 18 */ 731 0, /* Byte 19 */ 732 0, /* Byte 20 */ 733 OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */ 734 735 /* option vector 6: IBM PAPR hints */ 736 VECTOR_LENGTH(3), /* length */ 737 0, 738 0, 739 OV6_LINUX, 740 }; 741 742 /* Old method - ELF header with PT_NOTE sections only works on BE */ 743 #ifdef __BIG_ENDIAN__ 744 static struct fake_elf { 745 Elf32_Ehdr elfhdr; 746 Elf32_Phdr phdr[2]; 747 struct chrpnote { 748 u32 namesz; 749 u32 descsz; 750 u32 type; 751 char name[8]; /* "PowerPC" */ 752 struct chrpdesc { 753 u32 real_mode; 754 u32 real_base; 755 u32 real_size; 756 u32 virt_base; 757 u32 virt_size; 758 u32 load_base; 759 } chrpdesc; 760 } chrpnote; 761 struct rpanote { 762 u32 namesz; 763 u32 descsz; 764 u32 type; 765 char name[24]; /* "IBM,RPA-Client-Config" */ 766 struct rpadesc { 767 u32 lpar_affinity; 768 u32 min_rmo_size; 769 u32 min_rmo_percent; 770 u32 max_pft_size; 771 u32 splpar; 772 u32 min_load; 773 u32 new_mem_def; 774 u32 ignore_me; 775 } rpadesc; 776 } rpanote; 777 } fake_elf = { 778 .elfhdr = { 779 .e_ident = { 0x7f, 'E', 'L', 'F', 780 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 781 .e_type = ET_EXEC, /* yeah right */ 782 .e_machine = EM_PPC, 783 .e_version = EV_CURRENT, 784 .e_phoff = offsetof(struct fake_elf, phdr), 785 .e_phentsize = sizeof(Elf32_Phdr), 786 .e_phnum = 2 787 }, 788 .phdr = { 789 [0] = { 790 .p_type = PT_NOTE, 791 .p_offset = offsetof(struct fake_elf, chrpnote), 792 .p_filesz = sizeof(struct chrpnote) 793 }, [1] = { 794 .p_type = PT_NOTE, 795 .p_offset = offsetof(struct fake_elf, rpanote), 796 .p_filesz = sizeof(struct rpanote) 797 } 798 }, 799 .chrpnote = { 800 .namesz = sizeof("PowerPC"), 801 .descsz = sizeof(struct chrpdesc), 802 .type = 0x1275, 803 .name = "PowerPC", 804 .chrpdesc = { 805 .real_mode = ~0U, /* ~0 means "don't care" */ 806 .real_base = ~0U, 807 .real_size = ~0U, 808 .virt_base = ~0U, 809 .virt_size = ~0U, 810 .load_base = ~0U 811 }, 812 }, 813 .rpanote = { 814 .namesz = sizeof("IBM,RPA-Client-Config"), 815 .descsz = sizeof(struct rpadesc), 816 .type = 0x12759999, 817 .name = "IBM,RPA-Client-Config", 818 .rpadesc = { 819 .lpar_affinity = 0, 820 .min_rmo_size = 64, /* in megabytes */ 821 .min_rmo_percent = 0, 822 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 823 .splpar = 1, 824 .min_load = ~0U, 825 .new_mem_def = 0 826 } 827 } 828 }; 829 #endif /* __BIG_ENDIAN__ */ 830 831 static int __init prom_count_smt_threads(void) 832 { 833 phandle node; 834 char type[64]; 835 unsigned int plen; 836 837 /* Pick up th first CPU node we can find */ 838 for (node = 0; prom_next_node(&node); ) { 839 type[0] = 0; 840 prom_getprop(node, "device_type", type, sizeof(type)); 841 842 if (strcmp(type, "cpu")) 843 continue; 844 /* 845 * There is an entry for each smt thread, each entry being 846 * 4 bytes long. All cpus should have the same number of 847 * smt threads, so return after finding the first. 848 */ 849 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); 850 if (plen == PROM_ERROR) 851 break; 852 plen >>= 2; 853 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen); 854 855 /* Sanity check */ 856 if (plen < 1 || plen > 64) { 857 prom_printf("Threads per core %lu out of bounds, assuming 1\n", 858 (unsigned long)plen); 859 return 1; 860 } 861 return plen; 862 } 863 prom_debug("No threads found, assuming 1 per core\n"); 864 865 return 1; 866 867 } 868 869 870 static void __init prom_send_capabilities(void) 871 { 872 ihandle root; 873 prom_arg_t ret; 874 u32 cores; 875 unsigned char *ptcores; 876 877 root = call_prom("open", 1, 1, ADDR("/")); 878 if (root != 0) { 879 /* We need to tell the FW about the number of cores we support. 880 * 881 * To do that, we count the number of threads on the first core 882 * (we assume this is the same for all cores) and use it to 883 * divide NR_CPUS. 884 */ 885 886 /* The core value may start at an odd address. If such a word 887 * access is made at a cache line boundary, this leads to an 888 * exception which may not be handled at this time. 889 * Forcing a per byte access to avoid exception. 890 */ 891 ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; 892 cores = 0; 893 cores |= ptcores[0] << 24; 894 cores |= ptcores[1] << 16; 895 cores |= ptcores[2] << 8; 896 cores |= ptcores[3]; 897 if (cores != NR_CPUS) { 898 prom_printf("WARNING ! " 899 "ibm_architecture_vec structure inconsistent: %lu!\n", 900 cores); 901 } else { 902 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 903 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", 904 cores, NR_CPUS); 905 ptcores[0] = (cores >> 24) & 0xff; 906 ptcores[1] = (cores >> 16) & 0xff; 907 ptcores[2] = (cores >> 8) & 0xff; 908 ptcores[3] = cores & 0xff; 909 } 910 911 /* try calling the ibm,client-architecture-support method */ 912 prom_printf("Calling ibm,client-architecture-support..."); 913 if (call_prom_ret("call-method", 3, 2, &ret, 914 ADDR("ibm,client-architecture-support"), 915 root, 916 ADDR(ibm_architecture_vec)) == 0) { 917 /* the call exists... */ 918 if (ret) 919 prom_printf("\nWARNING: ibm,client-architecture" 920 "-support call FAILED!\n"); 921 call_prom("close", 1, 0, root); 922 prom_printf(" done\n"); 923 return; 924 } 925 call_prom("close", 1, 0, root); 926 prom_printf(" not implemented\n"); 927 } 928 929 #ifdef __BIG_ENDIAN__ 930 { 931 ihandle elfloader; 932 933 /* no ibm,client-architecture-support call, try the old way */ 934 elfloader = call_prom("open", 1, 1, 935 ADDR("/packages/elf-loader")); 936 if (elfloader == 0) { 937 prom_printf("couldn't open /packages/elf-loader\n"); 938 return; 939 } 940 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 941 elfloader, ADDR(&fake_elf)); 942 call_prom("close", 1, 0, elfloader); 943 } 944 #endif /* __BIG_ENDIAN__ */ 945 } 946 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 947 948 /* 949 * Memory allocation strategy... our layout is normally: 950 * 951 * at 14Mb or more we have vmlinux, then a gap and initrd. In some 952 * rare cases, initrd might end up being before the kernel though. 953 * We assume this won't override the final kernel at 0, we have no 954 * provision to handle that in this version, but it should hopefully 955 * never happen. 956 * 957 * alloc_top is set to the top of RMO, eventually shrink down if the 958 * TCEs overlap 959 * 960 * alloc_bottom is set to the top of kernel/initrd 961 * 962 * from there, allocations are done this way : rtas is allocated 963 * topmost, and the device-tree is allocated from the bottom. We try 964 * to grow the device-tree allocation as we progress. If we can't, 965 * then we fail, we don't currently have a facility to restart 966 * elsewhere, but that shouldn't be necessary. 967 * 968 * Note that calls to reserve_mem have to be done explicitly, memory 969 * allocated with either alloc_up or alloc_down isn't automatically 970 * reserved. 971 */ 972 973 974 /* 975 * Allocates memory in the RMO upward from the kernel/initrd 976 * 977 * When align is 0, this is a special case, it means to allocate in place 978 * at the current location of alloc_bottom or fail (that is basically 979 * extending the previous allocation). Used for the device-tree flattening 980 */ 981 static unsigned long __init alloc_up(unsigned long size, unsigned long align) 982 { 983 unsigned long base = alloc_bottom; 984 unsigned long addr = 0; 985 986 if (align) 987 base = _ALIGN_UP(base, align); 988 prom_debug("alloc_up(%x, %x)\n", size, align); 989 if (ram_top == 0) 990 prom_panic("alloc_up() called with mem not initialized\n"); 991 992 if (align) 993 base = _ALIGN_UP(alloc_bottom, align); 994 else 995 base = alloc_bottom; 996 997 for(; (base + size) <= alloc_top; 998 base = _ALIGN_UP(base + 0x100000, align)) { 999 prom_debug(" trying: 0x%x\n\r", base); 1000 addr = (unsigned long)prom_claim(base, size, 0); 1001 if (addr != PROM_ERROR && addr != 0) 1002 break; 1003 addr = 0; 1004 if (align == 0) 1005 break; 1006 } 1007 if (addr == 0) 1008 return 0; 1009 alloc_bottom = addr + size; 1010 1011 prom_debug(" -> %x\n", addr); 1012 prom_debug(" alloc_bottom : %x\n", alloc_bottom); 1013 prom_debug(" alloc_top : %x\n", alloc_top); 1014 prom_debug(" alloc_top_hi : %x\n", alloc_top_high); 1015 prom_debug(" rmo_top : %x\n", rmo_top); 1016 prom_debug(" ram_top : %x\n", ram_top); 1017 1018 return addr; 1019 } 1020 1021 /* 1022 * Allocates memory downward, either from top of RMO, or if highmem 1023 * is set, from the top of RAM. Note that this one doesn't handle 1024 * failures. It does claim memory if highmem is not set. 1025 */ 1026 static unsigned long __init alloc_down(unsigned long size, unsigned long align, 1027 int highmem) 1028 { 1029 unsigned long base, addr = 0; 1030 1031 prom_debug("alloc_down(%x, %x, %s)\n", size, align, 1032 highmem ? "(high)" : "(low)"); 1033 if (ram_top == 0) 1034 prom_panic("alloc_down() called with mem not initialized\n"); 1035 1036 if (highmem) { 1037 /* Carve out storage for the TCE table. */ 1038 addr = _ALIGN_DOWN(alloc_top_high - size, align); 1039 if (addr <= alloc_bottom) 1040 return 0; 1041 /* Will we bump into the RMO ? If yes, check out that we 1042 * didn't overlap existing allocations there, if we did, 1043 * we are dead, we must be the first in town ! 1044 */ 1045 if (addr < rmo_top) { 1046 /* Good, we are first */ 1047 if (alloc_top == rmo_top) 1048 alloc_top = rmo_top = addr; 1049 else 1050 return 0; 1051 } 1052 alloc_top_high = addr; 1053 goto bail; 1054 } 1055 1056 base = _ALIGN_DOWN(alloc_top - size, align); 1057 for (; base > alloc_bottom; 1058 base = _ALIGN_DOWN(base - 0x100000, align)) { 1059 prom_debug(" trying: 0x%x\n\r", base); 1060 addr = (unsigned long)prom_claim(base, size, 0); 1061 if (addr != PROM_ERROR && addr != 0) 1062 break; 1063 addr = 0; 1064 } 1065 if (addr == 0) 1066 return 0; 1067 alloc_top = addr; 1068 1069 bail: 1070 prom_debug(" -> %x\n", addr); 1071 prom_debug(" alloc_bottom : %x\n", alloc_bottom); 1072 prom_debug(" alloc_top : %x\n", alloc_top); 1073 prom_debug(" alloc_top_hi : %x\n", alloc_top_high); 1074 prom_debug(" rmo_top : %x\n", rmo_top); 1075 prom_debug(" ram_top : %x\n", ram_top); 1076 1077 return addr; 1078 } 1079 1080 /* 1081 * Parse a "reg" cell 1082 */ 1083 static unsigned long __init prom_next_cell(int s, cell_t **cellp) 1084 { 1085 cell_t *p = *cellp; 1086 unsigned long r = 0; 1087 1088 /* Ignore more than 2 cells */ 1089 while (s > sizeof(unsigned long) / 4) { 1090 p++; 1091 s--; 1092 } 1093 r = be32_to_cpu(*p++); 1094 #ifdef CONFIG_PPC64 1095 if (s > 1) { 1096 r <<= 32; 1097 r |= be32_to_cpu(*(p++)); 1098 } 1099 #endif 1100 *cellp = p; 1101 return r; 1102 } 1103 1104 /* 1105 * Very dumb function for adding to the memory reserve list, but 1106 * we don't need anything smarter at this point 1107 * 1108 * XXX Eventually check for collisions. They should NEVER happen. 1109 * If problems seem to show up, it would be a good start to track 1110 * them down. 1111 */ 1112 static void __init reserve_mem(u64 base, u64 size) 1113 { 1114 u64 top = base + size; 1115 unsigned long cnt = mem_reserve_cnt; 1116 1117 if (size == 0) 1118 return; 1119 1120 /* We need to always keep one empty entry so that we 1121 * have our terminator with "size" set to 0 since we are 1122 * dumb and just copy this entire array to the boot params 1123 */ 1124 base = _ALIGN_DOWN(base, PAGE_SIZE); 1125 top = _ALIGN_UP(top, PAGE_SIZE); 1126 size = top - base; 1127 1128 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1129 prom_panic("Memory reserve map exhausted !\n"); 1130 mem_reserve_map[cnt].base = cpu_to_be64(base); 1131 mem_reserve_map[cnt].size = cpu_to_be64(size); 1132 mem_reserve_cnt = cnt + 1; 1133 } 1134 1135 /* 1136 * Initialize memory allocation mechanism, parse "memory" nodes and 1137 * obtain that way the top of memory and RMO to setup out local allocator 1138 */ 1139 static void __init prom_init_mem(void) 1140 { 1141 phandle node; 1142 char *path, type[64]; 1143 unsigned int plen; 1144 cell_t *p, *endp; 1145 __be32 val; 1146 u32 rac, rsc; 1147 1148 /* 1149 * We iterate the memory nodes to find 1150 * 1) top of RMO (first node) 1151 * 2) top of memory 1152 */ 1153 val = cpu_to_be32(2); 1154 prom_getprop(prom.root, "#address-cells", &val, sizeof(val)); 1155 rac = be32_to_cpu(val); 1156 val = cpu_to_be32(1); 1157 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc)); 1158 rsc = be32_to_cpu(val); 1159 prom_debug("root_addr_cells: %x\n", rac); 1160 prom_debug("root_size_cells: %x\n", rsc); 1161 1162 prom_debug("scanning memory:\n"); 1163 path = prom_scratch; 1164 1165 for (node = 0; prom_next_node(&node); ) { 1166 type[0] = 0; 1167 prom_getprop(node, "device_type", type, sizeof(type)); 1168 1169 if (type[0] == 0) { 1170 /* 1171 * CHRP Longtrail machines have no device_type 1172 * on the memory node, so check the name instead... 1173 */ 1174 prom_getprop(node, "name", type, sizeof(type)); 1175 } 1176 if (strcmp(type, "memory")) 1177 continue; 1178 1179 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); 1180 if (plen > sizeof(regbuf)) { 1181 prom_printf("memory node too large for buffer !\n"); 1182 plen = sizeof(regbuf); 1183 } 1184 p = regbuf; 1185 endp = p + (plen / sizeof(cell_t)); 1186 1187 #ifdef DEBUG_PROM 1188 memset(path, 0, PROM_SCRATCH_SIZE); 1189 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1190 prom_debug(" node %s :\n", path); 1191 #endif /* DEBUG_PROM */ 1192 1193 while ((endp - p) >= (rac + rsc)) { 1194 unsigned long base, size; 1195 1196 base = prom_next_cell(rac, &p); 1197 size = prom_next_cell(rsc, &p); 1198 1199 if (size == 0) 1200 continue; 1201 prom_debug(" %x %x\n", base, size); 1202 if (base == 0 && (of_platform & PLATFORM_LPAR)) 1203 rmo_top = size; 1204 if ((base + size) > ram_top) 1205 ram_top = base + size; 1206 } 1207 } 1208 1209 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000); 1210 1211 /* 1212 * If prom_memory_limit is set we reduce the upper limits *except* for 1213 * alloc_top_high. This must be the real top of RAM so we can put 1214 * TCE's up there. 1215 */ 1216 1217 alloc_top_high = ram_top; 1218 1219 if (prom_memory_limit) { 1220 if (prom_memory_limit <= alloc_bottom) { 1221 prom_printf("Ignoring mem=%x <= alloc_bottom.\n", 1222 prom_memory_limit); 1223 prom_memory_limit = 0; 1224 } else if (prom_memory_limit >= ram_top) { 1225 prom_printf("Ignoring mem=%x >= ram_top.\n", 1226 prom_memory_limit); 1227 prom_memory_limit = 0; 1228 } else { 1229 ram_top = prom_memory_limit; 1230 rmo_top = min(rmo_top, prom_memory_limit); 1231 } 1232 } 1233 1234 /* 1235 * Setup our top alloc point, that is top of RMO or top of 1236 * segment 0 when running non-LPAR. 1237 * Some RS64 machines have buggy firmware where claims up at 1238 * 1GB fail. Cap at 768MB as a workaround. 1239 * Since 768MB is plenty of room, and we need to cap to something 1240 * reasonable on 32-bit, cap at 768MB on all machines. 1241 */ 1242 if (!rmo_top) 1243 rmo_top = ram_top; 1244 rmo_top = min(0x30000000ul, rmo_top); 1245 alloc_top = rmo_top; 1246 alloc_top_high = ram_top; 1247 1248 /* 1249 * Check if we have an initrd after the kernel but still inside 1250 * the RMO. If we do move our bottom point to after it. 1251 */ 1252 if (prom_initrd_start && 1253 prom_initrd_start < rmo_top && 1254 prom_initrd_end > alloc_bottom) 1255 alloc_bottom = PAGE_ALIGN(prom_initrd_end); 1256 1257 prom_printf("memory layout at init:\n"); 1258 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit); 1259 prom_printf(" alloc_bottom : %x\n", alloc_bottom); 1260 prom_printf(" alloc_top : %x\n", alloc_top); 1261 prom_printf(" alloc_top_hi : %x\n", alloc_top_high); 1262 prom_printf(" rmo_top : %x\n", rmo_top); 1263 prom_printf(" ram_top : %x\n", ram_top); 1264 } 1265 1266 static void __init prom_close_stdin(void) 1267 { 1268 __be32 val; 1269 ihandle stdin; 1270 1271 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) { 1272 stdin = be32_to_cpu(val); 1273 call_prom("close", 1, 0, stdin); 1274 } 1275 } 1276 1277 #ifdef CONFIG_PPC_POWERNV 1278 1279 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 1280 static u64 __initdata prom_opal_base; 1281 static u64 __initdata prom_opal_entry; 1282 #endif 1283 1284 /* 1285 * Allocate room for and instantiate OPAL 1286 */ 1287 static void __init prom_instantiate_opal(void) 1288 { 1289 phandle opal_node; 1290 ihandle opal_inst; 1291 u64 base, entry; 1292 u64 size = 0, align = 0x10000; 1293 __be64 val64; 1294 u32 rets[2]; 1295 1296 prom_debug("prom_instantiate_opal: start...\n"); 1297 1298 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal")); 1299 prom_debug("opal_node: %x\n", opal_node); 1300 if (!PHANDLE_VALID(opal_node)) 1301 return; 1302 1303 val64 = 0; 1304 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64)); 1305 size = be64_to_cpu(val64); 1306 if (size == 0) 1307 return; 1308 val64 = 0; 1309 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64)); 1310 align = be64_to_cpu(val64); 1311 1312 base = alloc_down(size, align, 0); 1313 if (base == 0) { 1314 prom_printf("OPAL allocation failed !\n"); 1315 return; 1316 } 1317 1318 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal")); 1319 if (!IHANDLE_VALID(opal_inst)) { 1320 prom_printf("opening opal package failed (%x)\n", opal_inst); 1321 return; 1322 } 1323 1324 prom_printf("instantiating opal at 0x%x...", base); 1325 1326 if (call_prom_ret("call-method", 4, 3, rets, 1327 ADDR("load-opal-runtime"), 1328 opal_inst, 1329 base >> 32, base & 0xffffffff) != 0 1330 || (rets[0] == 0 && rets[1] == 0)) { 1331 prom_printf(" failed\n"); 1332 return; 1333 } 1334 entry = (((u64)rets[0]) << 32) | rets[1]; 1335 1336 prom_printf(" done\n"); 1337 1338 reserve_mem(base, size); 1339 1340 prom_debug("opal base = 0x%x\n", base); 1341 prom_debug("opal align = 0x%x\n", align); 1342 prom_debug("opal entry = 0x%x\n", entry); 1343 prom_debug("opal size = 0x%x\n", (long)size); 1344 1345 prom_setprop(opal_node, "/ibm,opal", "opal-base-address", 1346 &base, sizeof(base)); 1347 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address", 1348 &entry, sizeof(entry)); 1349 1350 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 1351 prom_opal_base = base; 1352 prom_opal_entry = entry; 1353 #endif 1354 prom_debug("prom_instantiate_opal: end...\n"); 1355 } 1356 1357 #endif /* CONFIG_PPC_POWERNV */ 1358 1359 /* 1360 * Allocate room for and instantiate RTAS 1361 */ 1362 static void __init prom_instantiate_rtas(void) 1363 { 1364 phandle rtas_node; 1365 ihandle rtas_inst; 1366 u32 base, entry = 0; 1367 __be32 val; 1368 u32 size = 0; 1369 1370 prom_debug("prom_instantiate_rtas: start...\n"); 1371 1372 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1373 prom_debug("rtas_node: %x\n", rtas_node); 1374 if (!PHANDLE_VALID(rtas_node)) 1375 return; 1376 1377 val = 0; 1378 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size)); 1379 size = be32_to_cpu(val); 1380 if (size == 0) 1381 return; 1382 1383 base = alloc_down(size, PAGE_SIZE, 0); 1384 if (base == 0) 1385 prom_panic("Could not allocate memory for RTAS\n"); 1386 1387 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); 1388 if (!IHANDLE_VALID(rtas_inst)) { 1389 prom_printf("opening rtas package failed (%x)\n", rtas_inst); 1390 return; 1391 } 1392 1393 prom_printf("instantiating rtas at 0x%x...", base); 1394 1395 if (call_prom_ret("call-method", 3, 2, &entry, 1396 ADDR("instantiate-rtas"), 1397 rtas_inst, base) != 0 1398 || entry == 0) { 1399 prom_printf(" failed\n"); 1400 return; 1401 } 1402 prom_printf(" done\n"); 1403 1404 reserve_mem(base, size); 1405 1406 val = cpu_to_be32(base); 1407 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1408 &val, sizeof(val)); 1409 val = cpu_to_be32(entry); 1410 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1411 &val, sizeof(val)); 1412 1413 /* Check if it supports "query-cpu-stopped-state" */ 1414 if (prom_getprop(rtas_node, "query-cpu-stopped-state", 1415 &val, sizeof(val)) != PROM_ERROR) 1416 rtas_has_query_cpu_stopped = true; 1417 1418 prom_debug("rtas base = 0x%x\n", base); 1419 prom_debug("rtas entry = 0x%x\n", entry); 1420 prom_debug("rtas size = 0x%x\n", (long)size); 1421 1422 prom_debug("prom_instantiate_rtas: end...\n"); 1423 } 1424 1425 #ifdef CONFIG_PPC64 1426 /* 1427 * Allocate room for and instantiate Stored Measurement Log (SML) 1428 */ 1429 static void __init prom_instantiate_sml(void) 1430 { 1431 phandle ibmvtpm_node; 1432 ihandle ibmvtpm_inst; 1433 u32 entry = 0, size = 0, succ = 0; 1434 u64 base; 1435 __be32 val; 1436 1437 prom_debug("prom_instantiate_sml: start...\n"); 1438 1439 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm")); 1440 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node); 1441 if (!PHANDLE_VALID(ibmvtpm_node)) 1442 return; 1443 1444 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm")); 1445 if (!IHANDLE_VALID(ibmvtpm_inst)) { 1446 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst); 1447 return; 1448 } 1449 1450 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported", 1451 &val, sizeof(val)) != PROM_ERROR) { 1452 if (call_prom_ret("call-method", 2, 2, &succ, 1453 ADDR("reformat-sml-to-efi-alignment"), 1454 ibmvtpm_inst) != 0 || succ == 0) { 1455 prom_printf("Reformat SML to EFI alignment failed\n"); 1456 return; 1457 } 1458 1459 if (call_prom_ret("call-method", 2, 2, &size, 1460 ADDR("sml-get-allocated-size"), 1461 ibmvtpm_inst) != 0 || size == 0) { 1462 prom_printf("SML get allocated size failed\n"); 1463 return; 1464 } 1465 } else { 1466 if (call_prom_ret("call-method", 2, 2, &size, 1467 ADDR("sml-get-handover-size"), 1468 ibmvtpm_inst) != 0 || size == 0) { 1469 prom_printf("SML get handover size failed\n"); 1470 return; 1471 } 1472 } 1473 1474 base = alloc_down(size, PAGE_SIZE, 0); 1475 if (base == 0) 1476 prom_panic("Could not allocate memory for sml\n"); 1477 1478 prom_printf("instantiating sml at 0x%x...", base); 1479 1480 memset((void *)base, 0, size); 1481 1482 if (call_prom_ret("call-method", 4, 2, &entry, 1483 ADDR("sml-handover"), 1484 ibmvtpm_inst, size, base) != 0 || entry == 0) { 1485 prom_printf("SML handover failed\n"); 1486 return; 1487 } 1488 prom_printf(" done\n"); 1489 1490 reserve_mem(base, size); 1491 1492 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base", 1493 &base, sizeof(base)); 1494 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", 1495 &size, sizeof(size)); 1496 1497 prom_debug("sml base = 0x%x\n", base); 1498 prom_debug("sml size = 0x%x\n", (long)size); 1499 1500 prom_debug("prom_instantiate_sml: end...\n"); 1501 } 1502 1503 /* 1504 * Allocate room for and initialize TCE tables 1505 */ 1506 #ifdef __BIG_ENDIAN__ 1507 static void __init prom_initialize_tce_table(void) 1508 { 1509 phandle node; 1510 ihandle phb_node; 1511 char compatible[64], type[64], model[64]; 1512 char *path = prom_scratch; 1513 u64 base, align; 1514 u32 minalign, minsize; 1515 u64 tce_entry, *tce_entryp; 1516 u64 local_alloc_top, local_alloc_bottom; 1517 u64 i; 1518 1519 if (prom_iommu_off) 1520 return; 1521 1522 prom_debug("starting prom_initialize_tce_table\n"); 1523 1524 /* Cache current top of allocs so we reserve a single block */ 1525 local_alloc_top = alloc_top_high; 1526 local_alloc_bottom = local_alloc_top; 1527 1528 /* Search all nodes looking for PHBs. */ 1529 for (node = 0; prom_next_node(&node); ) { 1530 compatible[0] = 0; 1531 type[0] = 0; 1532 model[0] = 0; 1533 prom_getprop(node, "compatible", 1534 compatible, sizeof(compatible)); 1535 prom_getprop(node, "device_type", type, sizeof(type)); 1536 prom_getprop(node, "model", model, sizeof(model)); 1537 1538 if ((type[0] == 0) || (strstr(type, "pci") == NULL)) 1539 continue; 1540 1541 /* Keep the old logic intact to avoid regression. */ 1542 if (compatible[0] != 0) { 1543 if ((strstr(compatible, "python") == NULL) && 1544 (strstr(compatible, "Speedwagon") == NULL) && 1545 (strstr(compatible, "Winnipeg") == NULL)) 1546 continue; 1547 } else if (model[0] != 0) { 1548 if ((strstr(model, "ython") == NULL) && 1549 (strstr(model, "peedwagon") == NULL) && 1550 (strstr(model, "innipeg") == NULL)) 1551 continue; 1552 } 1553 1554 if (prom_getprop(node, "tce-table-minalign", &minalign, 1555 sizeof(minalign)) == PROM_ERROR) 1556 minalign = 0; 1557 if (prom_getprop(node, "tce-table-minsize", &minsize, 1558 sizeof(minsize)) == PROM_ERROR) 1559 minsize = 4UL << 20; 1560 1561 /* 1562 * Even though we read what OF wants, we just set the table 1563 * size to 4 MB. This is enough to map 2GB of PCI DMA space. 1564 * By doing this, we avoid the pitfalls of trying to DMA to 1565 * MMIO space and the DMA alias hole. 1566 * 1567 * On POWER4, firmware sets the TCE region by assuming 1568 * each TCE table is 8MB. Using this memory for anything 1569 * else will impact performance, so we always allocate 8MB. 1570 * Anton 1571 */ 1572 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p)) 1573 minsize = 8UL << 20; 1574 else 1575 minsize = 4UL << 20; 1576 1577 /* Align to the greater of the align or size */ 1578 align = max(minalign, minsize); 1579 base = alloc_down(minsize, align, 1); 1580 if (base == 0) 1581 prom_panic("ERROR, cannot find space for TCE table.\n"); 1582 if (base < local_alloc_bottom) 1583 local_alloc_bottom = base; 1584 1585 /* It seems OF doesn't null-terminate the path :-( */ 1586 memset(path, 0, PROM_SCRATCH_SIZE); 1587 /* Call OF to setup the TCE hardware */ 1588 if (call_prom("package-to-path", 3, 1, node, 1589 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) { 1590 prom_printf("package-to-path failed\n"); 1591 } 1592 1593 /* Save away the TCE table attributes for later use. */ 1594 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base)); 1595 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize)); 1596 1597 prom_debug("TCE table: %s\n", path); 1598 prom_debug("\tnode = 0x%x\n", node); 1599 prom_debug("\tbase = 0x%x\n", base); 1600 prom_debug("\tsize = 0x%x\n", minsize); 1601 1602 /* Initialize the table to have a one-to-one mapping 1603 * over the allocated size. 1604 */ 1605 tce_entryp = (u64 *)base; 1606 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1607 tce_entry = (i << PAGE_SHIFT); 1608 tce_entry |= 0x3; 1609 *tce_entryp = tce_entry; 1610 } 1611 1612 prom_printf("opening PHB %s", path); 1613 phb_node = call_prom("open", 1, 1, path); 1614 if (phb_node == 0) 1615 prom_printf("... failed\n"); 1616 else 1617 prom_printf("... done\n"); 1618 1619 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"), 1620 phb_node, -1, minsize, 1621 (u32) base, (u32) (base >> 32)); 1622 call_prom("close", 1, 0, phb_node); 1623 } 1624 1625 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); 1626 1627 /* These are only really needed if there is a memory limit in 1628 * effect, but we don't know so export them always. */ 1629 prom_tce_alloc_start = local_alloc_bottom; 1630 prom_tce_alloc_end = local_alloc_top; 1631 1632 /* Flag the first invalid entry */ 1633 prom_debug("ending prom_initialize_tce_table\n"); 1634 } 1635 #endif /* __BIG_ENDIAN__ */ 1636 #endif /* CONFIG_PPC64 */ 1637 1638 /* 1639 * With CHRP SMP we need to use the OF to start the other processors. 1640 * We can't wait until smp_boot_cpus (the OF is trashed by then) 1641 * so we have to put the processors into a holding pattern controlled 1642 * by the kernel (not OF) before we destroy the OF. 1643 * 1644 * This uses a chunk of low memory, puts some holding pattern 1645 * code there and sends the other processors off to there until 1646 * smp_boot_cpus tells them to do something. The holding pattern 1647 * checks that address until its cpu # is there, when it is that 1648 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care 1649 * of setting those values. 1650 * 1651 * We also use physical address 0x4 here to tell when a cpu 1652 * is in its holding pattern code. 1653 * 1654 * -- Cort 1655 */ 1656 /* 1657 * We want to reference the copy of __secondary_hold_* in the 1658 * 0 - 0x100 address range 1659 */ 1660 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff) 1661 1662 static void __init prom_hold_cpus(void) 1663 { 1664 unsigned long i; 1665 phandle node; 1666 char type[64]; 1667 unsigned long *spinloop 1668 = (void *) LOW_ADDR(__secondary_hold_spinloop); 1669 unsigned long *acknowledge 1670 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1671 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1672 1673 /* 1674 * On pseries, if RTAS supports "query-cpu-stopped-state", 1675 * we skip this stage, the CPUs will be started by the 1676 * kernel using RTAS. 1677 */ 1678 if ((of_platform == PLATFORM_PSERIES || 1679 of_platform == PLATFORM_PSERIES_LPAR) && 1680 rtas_has_query_cpu_stopped) { 1681 prom_printf("prom_hold_cpus: skipped\n"); 1682 return; 1683 } 1684 1685 prom_debug("prom_hold_cpus: start...\n"); 1686 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1687 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); 1688 prom_debug(" 1) acknowledge = 0x%x\n", 1689 (unsigned long)acknowledge); 1690 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge); 1691 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold); 1692 1693 /* Set the common spinloop variable, so all of the secondary cpus 1694 * will block when they are awakened from their OF spinloop. 1695 * This must occur for both SMP and non SMP kernels, since OF will 1696 * be trashed when we move the kernel. 1697 */ 1698 *spinloop = 0; 1699 1700 /* look for cpus */ 1701 for (node = 0; prom_next_node(&node); ) { 1702 unsigned int cpu_no; 1703 __be32 reg; 1704 1705 type[0] = 0; 1706 prom_getprop(node, "device_type", type, sizeof(type)); 1707 if (strcmp(type, "cpu") != 0) 1708 continue; 1709 1710 /* Skip non-configured cpus. */ 1711 if (prom_getprop(node, "status", type, sizeof(type)) > 0) 1712 if (strcmp(type, "okay") != 0) 1713 continue; 1714 1715 reg = cpu_to_be32(-1); /* make sparse happy */ 1716 prom_getprop(node, "reg", ®, sizeof(reg)); 1717 cpu_no = be32_to_cpu(reg); 1718 1719 prom_debug("cpu hw idx = %lu\n", cpu_no); 1720 1721 /* Init the acknowledge var which will be reset by 1722 * the secondary cpu when it awakens from its OF 1723 * spinloop. 1724 */ 1725 *acknowledge = (unsigned long)-1; 1726 1727 if (cpu_no != prom.cpu) { 1728 /* Primary Thread of non-boot cpu or any thread */ 1729 prom_printf("starting cpu hw idx %lu... ", cpu_no); 1730 call_prom("start-cpu", 3, 0, node, 1731 secondary_hold, cpu_no); 1732 1733 for (i = 0; (i < 100000000) && 1734 (*acknowledge == ((unsigned long)-1)); i++ ) 1735 mb(); 1736 1737 if (*acknowledge == cpu_no) 1738 prom_printf("done\n"); 1739 else 1740 prom_printf("failed: %x\n", *acknowledge); 1741 } 1742 #ifdef CONFIG_SMP 1743 else 1744 prom_printf("boot cpu hw idx %lu\n", cpu_no); 1745 #endif /* CONFIG_SMP */ 1746 } 1747 1748 prom_debug("prom_hold_cpus: end...\n"); 1749 } 1750 1751 1752 static void __init prom_init_client_services(unsigned long pp) 1753 { 1754 /* Get a handle to the prom entry point before anything else */ 1755 prom_entry = pp; 1756 1757 /* get a handle for the stdout device */ 1758 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); 1759 if (!PHANDLE_VALID(prom.chosen)) 1760 prom_panic("cannot find chosen"); /* msg won't be printed :( */ 1761 1762 /* get device tree root */ 1763 prom.root = call_prom("finddevice", 1, 1, ADDR("/")); 1764 if (!PHANDLE_VALID(prom.root)) 1765 prom_panic("cannot find device tree root"); /* msg won't be printed :( */ 1766 1767 prom.mmumap = 0; 1768 } 1769 1770 #ifdef CONFIG_PPC32 1771 /* 1772 * For really old powermacs, we need to map things we claim. 1773 * For that, we need the ihandle of the mmu. 1774 * Also, on the longtrail, we need to work around other bugs. 1775 */ 1776 static void __init prom_find_mmu(void) 1777 { 1778 phandle oprom; 1779 char version[64]; 1780 1781 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom")); 1782 if (!PHANDLE_VALID(oprom)) 1783 return; 1784 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0) 1785 return; 1786 version[sizeof(version) - 1] = 0; 1787 /* XXX might need to add other versions here */ 1788 if (strcmp(version, "Open Firmware, 1.0.5") == 0) 1789 of_workarounds = OF_WA_CLAIM; 1790 else if (strncmp(version, "FirmWorks,3.", 12) == 0) { 1791 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL; 1792 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim"); 1793 } else 1794 return; 1795 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 1796 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 1797 sizeof(prom.mmumap)); 1798 prom.mmumap = be32_to_cpu(prom.mmumap); 1799 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 1800 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 1801 } 1802 #else 1803 #define prom_find_mmu() 1804 #endif 1805 1806 static void __init prom_init_stdout(void) 1807 { 1808 char *path = of_stdout_device; 1809 char type[16]; 1810 phandle stdout_node; 1811 __be32 val; 1812 1813 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 1814 prom_panic("cannot find stdout"); 1815 1816 prom.stdout = be32_to_cpu(val); 1817 1818 /* Get the full OF pathname of the stdout device */ 1819 memset(path, 0, 256); 1820 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 1821 prom_printf("OF stdout device is: %s\n", of_stdout_device); 1822 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", 1823 path, strlen(path) + 1); 1824 1825 /* instance-to-package fails on PA-Semi */ 1826 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); 1827 if (stdout_node != PROM_ERROR) { 1828 val = cpu_to_be32(stdout_node); 1829 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", 1830 &val, sizeof(val)); 1831 1832 /* If it's a display, note it */ 1833 memset(type, 0, sizeof(type)); 1834 prom_getprop(stdout_node, "device_type", type, sizeof(type)); 1835 if (strcmp(type, "display") == 0) 1836 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); 1837 } 1838 } 1839 1840 static int __init prom_find_machine_type(void) 1841 { 1842 char compat[256]; 1843 int len, i = 0; 1844 #ifdef CONFIG_PPC64 1845 phandle rtas; 1846 int x; 1847 #endif 1848 1849 /* Look for a PowerMac or a Cell */ 1850 len = prom_getprop(prom.root, "compatible", 1851 compat, sizeof(compat)-1); 1852 if (len > 0) { 1853 compat[len] = 0; 1854 while (i < len) { 1855 char *p = &compat[i]; 1856 int sl = strlen(p); 1857 if (sl == 0) 1858 break; 1859 if (strstr(p, "Power Macintosh") || 1860 strstr(p, "MacRISC")) 1861 return PLATFORM_POWERMAC; 1862 #ifdef CONFIG_PPC64 1863 /* We must make sure we don't detect the IBM Cell 1864 * blades as pSeries due to some firmware issues, 1865 * so we do it here. 1866 */ 1867 if (strstr(p, "IBM,CBEA") || 1868 strstr(p, "IBM,CPBW-1.0")) 1869 return PLATFORM_GENERIC; 1870 #endif /* CONFIG_PPC64 */ 1871 i += sl + 1; 1872 } 1873 } 1874 #ifdef CONFIG_PPC64 1875 /* Try to detect OPAL */ 1876 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal")))) 1877 return PLATFORM_OPAL; 1878 1879 /* Try to figure out if it's an IBM pSeries or any other 1880 * PAPR compliant platform. We assume it is if : 1881 * - /device_type is "chrp" (please, do NOT use that for future 1882 * non-IBM designs ! 1883 * - it has /rtas 1884 */ 1885 len = prom_getprop(prom.root, "device_type", 1886 compat, sizeof(compat)-1); 1887 if (len <= 0) 1888 return PLATFORM_GENERIC; 1889 if (strcmp(compat, "chrp")) 1890 return PLATFORM_GENERIC; 1891 1892 /* Default to pSeries. We need to know if we are running LPAR */ 1893 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); 1894 if (!PHANDLE_VALID(rtas)) 1895 return PLATFORM_GENERIC; 1896 x = prom_getproplen(rtas, "ibm,hypertas-functions"); 1897 if (x != PROM_ERROR) { 1898 prom_debug("Hypertas detected, assuming LPAR !\n"); 1899 return PLATFORM_PSERIES_LPAR; 1900 } 1901 return PLATFORM_PSERIES; 1902 #else 1903 return PLATFORM_GENERIC; 1904 #endif 1905 } 1906 1907 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b) 1908 { 1909 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r); 1910 } 1911 1912 /* 1913 * If we have a display that we don't know how to drive, 1914 * we will want to try to execute OF's open method for it 1915 * later. However, OF will probably fall over if we do that 1916 * we've taken over the MMU. 1917 * So we check whether we will need to open the display, 1918 * and if so, open it now. 1919 */ 1920 static void __init prom_check_displays(void) 1921 { 1922 char type[16], *path; 1923 phandle node; 1924 ihandle ih; 1925 int i; 1926 1927 static unsigned char default_colors[] = { 1928 0x00, 0x00, 0x00, 1929 0x00, 0x00, 0xaa, 1930 0x00, 0xaa, 0x00, 1931 0x00, 0xaa, 0xaa, 1932 0xaa, 0x00, 0x00, 1933 0xaa, 0x00, 0xaa, 1934 0xaa, 0xaa, 0x00, 1935 0xaa, 0xaa, 0xaa, 1936 0x55, 0x55, 0x55, 1937 0x55, 0x55, 0xff, 1938 0x55, 0xff, 0x55, 1939 0x55, 0xff, 0xff, 1940 0xff, 0x55, 0x55, 1941 0xff, 0x55, 0xff, 1942 0xff, 0xff, 0x55, 1943 0xff, 0xff, 0xff 1944 }; 1945 const unsigned char *clut; 1946 1947 prom_debug("Looking for displays\n"); 1948 for (node = 0; prom_next_node(&node); ) { 1949 memset(type, 0, sizeof(type)); 1950 prom_getprop(node, "device_type", type, sizeof(type)); 1951 if (strcmp(type, "display") != 0) 1952 continue; 1953 1954 /* It seems OF doesn't null-terminate the path :-( */ 1955 path = prom_scratch; 1956 memset(path, 0, PROM_SCRATCH_SIZE); 1957 1958 /* 1959 * leave some room at the end of the path for appending extra 1960 * arguments 1961 */ 1962 if (call_prom("package-to-path", 3, 1, node, path, 1963 PROM_SCRATCH_SIZE-10) == PROM_ERROR) 1964 continue; 1965 prom_printf("found display : %s, opening... ", path); 1966 1967 ih = call_prom("open", 1, 1, path); 1968 if (ih == 0) { 1969 prom_printf("failed\n"); 1970 continue; 1971 } 1972 1973 /* Success */ 1974 prom_printf("done\n"); 1975 prom_setprop(node, path, "linux,opened", NULL, 0); 1976 1977 /* Setup a usable color table when the appropriate 1978 * method is available. Should update this to set-colors */ 1979 clut = default_colors; 1980 for (i = 0; i < 16; i++, clut += 3) 1981 if (prom_set_color(ih, i, clut[0], clut[1], 1982 clut[2]) != 0) 1983 break; 1984 1985 #ifdef CONFIG_LOGO_LINUX_CLUT224 1986 clut = PTRRELOC(logo_linux_clut224.clut); 1987 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) 1988 if (prom_set_color(ih, i + 32, clut[0], clut[1], 1989 clut[2]) != 0) 1990 break; 1991 #endif /* CONFIG_LOGO_LINUX_CLUT224 */ 1992 1993 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX 1994 if (prom_getprop(node, "linux,boot-display", NULL, 0) != 1995 PROM_ERROR) { 1996 u32 width, height, pitch, addr; 1997 1998 prom_printf("Setting btext !\n"); 1999 prom_getprop(node, "width", &width, 4); 2000 prom_getprop(node, "height", &height, 4); 2001 prom_getprop(node, "linebytes", &pitch, 4); 2002 prom_getprop(node, "address", &addr, 4); 2003 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2004 width, height, pitch, addr); 2005 btext_setup_display(width, height, 8, pitch, addr); 2006 } 2007 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2008 } 2009 } 2010 2011 2012 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */ 2013 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, 2014 unsigned long needed, unsigned long align) 2015 { 2016 void *ret; 2017 2018 *mem_start = _ALIGN(*mem_start, align); 2019 while ((*mem_start + needed) > *mem_end) { 2020 unsigned long room, chunk; 2021 2022 prom_debug("Chunk exhausted, claiming more at %x...\n", 2023 alloc_bottom); 2024 room = alloc_top - alloc_bottom; 2025 if (room > DEVTREE_CHUNK_SIZE) 2026 room = DEVTREE_CHUNK_SIZE; 2027 if (room < PAGE_SIZE) 2028 prom_panic("No memory for flatten_device_tree " 2029 "(no room)\n"); 2030 chunk = alloc_up(room, 0); 2031 if (chunk == 0) 2032 prom_panic("No memory for flatten_device_tree " 2033 "(claim failed)\n"); 2034 *mem_end = chunk + room; 2035 } 2036 2037 ret = (void *)*mem_start; 2038 *mem_start += needed; 2039 2040 return ret; 2041 } 2042 2043 #define dt_push_token(token, mem_start, mem_end) do { \ 2044 void *room = make_room(mem_start, mem_end, 4, 4); \ 2045 *(__be32 *)room = cpu_to_be32(token); \ 2046 } while(0) 2047 2048 static unsigned long __init dt_find_string(char *str) 2049 { 2050 char *s, *os; 2051 2052 s = os = (char *)dt_string_start; 2053 s += 4; 2054 while (s < (char *)dt_string_end) { 2055 if (strcmp(s, str) == 0) 2056 return s - os; 2057 s += strlen(s) + 1; 2058 } 2059 return 0; 2060 } 2061 2062 /* 2063 * The Open Firmware 1275 specification states properties must be 31 bytes or 2064 * less, however not all firmwares obey this. Make it 64 bytes to be safe. 2065 */ 2066 #define MAX_PROPERTY_NAME 64 2067 2068 static void __init scan_dt_build_strings(phandle node, 2069 unsigned long *mem_start, 2070 unsigned long *mem_end) 2071 { 2072 char *prev_name, *namep, *sstart; 2073 unsigned long soff; 2074 phandle child; 2075 2076 sstart = (char *)dt_string_start; 2077 2078 /* get and store all property names */ 2079 prev_name = ""; 2080 for (;;) { 2081 /* 64 is max len of name including nul. */ 2082 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 2083 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { 2084 /* No more nodes: unwind alloc */ 2085 *mem_start = (unsigned long)namep; 2086 break; 2087 } 2088 2089 /* skip "name" */ 2090 if (strcmp(namep, "name") == 0) { 2091 *mem_start = (unsigned long)namep; 2092 prev_name = "name"; 2093 continue; 2094 } 2095 /* get/create string entry */ 2096 soff = dt_find_string(namep); 2097 if (soff != 0) { 2098 *mem_start = (unsigned long)namep; 2099 namep = sstart + soff; 2100 } else { 2101 /* Trim off some if we can */ 2102 *mem_start = (unsigned long)namep + strlen(namep) + 1; 2103 dt_string_end = *mem_start; 2104 } 2105 prev_name = namep; 2106 } 2107 2108 /* do all our children */ 2109 child = call_prom("child", 1, 1, node); 2110 while (child != 0) { 2111 scan_dt_build_strings(child, mem_start, mem_end); 2112 child = call_prom("peer", 1, 1, child); 2113 } 2114 } 2115 2116 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 2117 unsigned long *mem_end) 2118 { 2119 phandle child; 2120 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; 2121 unsigned long soff; 2122 unsigned char *valp; 2123 static char pname[MAX_PROPERTY_NAME]; 2124 int l, room, has_phandle = 0; 2125 2126 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 2127 2128 /* get the node's full name */ 2129 namep = (char *)*mem_start; 2130 room = *mem_end - *mem_start; 2131 if (room > 255) 2132 room = 255; 2133 l = call_prom("package-to-path", 3, 1, node, namep, room); 2134 if (l >= 0) { 2135 /* Didn't fit? Get more room. */ 2136 if (l >= room) { 2137 if (l >= *mem_end - *mem_start) 2138 namep = make_room(mem_start, mem_end, l+1, 1); 2139 call_prom("package-to-path", 3, 1, node, namep, l); 2140 } 2141 namep[l] = '\0'; 2142 2143 /* Fixup an Apple bug where they have bogus \0 chars in the 2144 * middle of the path in some properties, and extract 2145 * the unit name (everything after the last '/'). 2146 */ 2147 for (lp = p = namep, ep = namep + l; p < ep; p++) { 2148 if (*p == '/') 2149 lp = namep; 2150 else if (*p != 0) 2151 *lp++ = *p; 2152 } 2153 *lp = 0; 2154 *mem_start = _ALIGN((unsigned long)lp + 1, 4); 2155 } 2156 2157 /* get it again for debugging */ 2158 path = prom_scratch; 2159 memset(path, 0, PROM_SCRATCH_SIZE); 2160 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 2161 2162 /* get and store all properties */ 2163 prev_name = ""; 2164 sstart = (char *)dt_string_start; 2165 for (;;) { 2166 if (call_prom("nextprop", 3, 1, node, prev_name, 2167 pname) != 1) 2168 break; 2169 2170 /* skip "name" */ 2171 if (strcmp(pname, "name") == 0) { 2172 prev_name = "name"; 2173 continue; 2174 } 2175 2176 /* find string offset */ 2177 soff = dt_find_string(pname); 2178 if (soff == 0) { 2179 prom_printf("WARNING: Can't find string index for" 2180 " <%s>, node %s\n", pname, path); 2181 break; 2182 } 2183 prev_name = sstart + soff; 2184 2185 /* get length */ 2186 l = call_prom("getproplen", 2, 1, node, pname); 2187 2188 /* sanity checks */ 2189 if (l == PROM_ERROR) 2190 continue; 2191 2192 /* push property head */ 2193 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2194 dt_push_token(l, mem_start, mem_end); 2195 dt_push_token(soff, mem_start, mem_end); 2196 2197 /* push property content */ 2198 valp = make_room(mem_start, mem_end, l, 4); 2199 call_prom("getprop", 4, 1, node, pname, valp, l); 2200 *mem_start = _ALIGN(*mem_start, 4); 2201 2202 if (!strcmp(pname, "phandle")) 2203 has_phandle = 1; 2204 } 2205 2206 /* Add a "linux,phandle" property if no "phandle" property already 2207 * existed (can happen with OPAL) 2208 */ 2209 if (!has_phandle) { 2210 soff = dt_find_string("linux,phandle"); 2211 if (soff == 0) 2212 prom_printf("WARNING: Can't find string index for" 2213 " <linux-phandle> node %s\n", path); 2214 else { 2215 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2216 dt_push_token(4, mem_start, mem_end); 2217 dt_push_token(soff, mem_start, mem_end); 2218 valp = make_room(mem_start, mem_end, 4, 4); 2219 *(__be32 *)valp = cpu_to_be32(node); 2220 } 2221 } 2222 2223 /* do all our children */ 2224 child = call_prom("child", 1, 1, node); 2225 while (child != 0) { 2226 scan_dt_build_struct(child, mem_start, mem_end); 2227 child = call_prom("peer", 1, 1, child); 2228 } 2229 2230 dt_push_token(OF_DT_END_NODE, mem_start, mem_end); 2231 } 2232 2233 static void __init flatten_device_tree(void) 2234 { 2235 phandle root; 2236 unsigned long mem_start, mem_end, room; 2237 struct boot_param_header *hdr; 2238 char *namep; 2239 u64 *rsvmap; 2240 2241 /* 2242 * Check how much room we have between alloc top & bottom (+/- a 2243 * few pages), crop to 1MB, as this is our "chunk" size 2244 */ 2245 room = alloc_top - alloc_bottom - 0x4000; 2246 if (room > DEVTREE_CHUNK_SIZE) 2247 room = DEVTREE_CHUNK_SIZE; 2248 prom_debug("starting device tree allocs at %x\n", alloc_bottom); 2249 2250 /* Now try to claim that */ 2251 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); 2252 if (mem_start == 0) 2253 prom_panic("Can't allocate initial device-tree chunk\n"); 2254 mem_end = mem_start + room; 2255 2256 /* Get root of tree */ 2257 root = call_prom("peer", 1, 1, (phandle)0); 2258 if (root == (phandle)0) 2259 prom_panic ("couldn't get device tree root\n"); 2260 2261 /* Build header and make room for mem rsv map */ 2262 mem_start = _ALIGN(mem_start, 4); 2263 hdr = make_room(&mem_start, &mem_end, 2264 sizeof(struct boot_param_header), 4); 2265 dt_header_start = (unsigned long)hdr; 2266 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 2267 2268 /* Start of strings */ 2269 mem_start = PAGE_ALIGN(mem_start); 2270 dt_string_start = mem_start; 2271 mem_start += 4; /* hole */ 2272 2273 /* Add "linux,phandle" in there, we'll need it */ 2274 namep = make_room(&mem_start, &mem_end, 16, 1); 2275 strcpy(namep, "linux,phandle"); 2276 mem_start = (unsigned long)namep + strlen(namep) + 1; 2277 2278 /* Build string array */ 2279 prom_printf("Building dt strings...\n"); 2280 scan_dt_build_strings(root, &mem_start, &mem_end); 2281 dt_string_end = mem_start; 2282 2283 /* Build structure */ 2284 mem_start = PAGE_ALIGN(mem_start); 2285 dt_struct_start = mem_start; 2286 prom_printf("Building dt structure...\n"); 2287 scan_dt_build_struct(root, &mem_start, &mem_end); 2288 dt_push_token(OF_DT_END, &mem_start, &mem_end); 2289 dt_struct_end = PAGE_ALIGN(mem_start); 2290 2291 /* Finish header */ 2292 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu); 2293 hdr->magic = cpu_to_be32(OF_DT_HEADER); 2294 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start); 2295 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start); 2296 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start); 2297 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start); 2298 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start); 2299 hdr->version = cpu_to_be32(OF_DT_VERSION); 2300 /* Version 16 is not backward compatible */ 2301 hdr->last_comp_version = cpu_to_be32(0x10); 2302 2303 /* Copy the reserve map in */ 2304 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2305 2306 #ifdef DEBUG_PROM 2307 { 2308 int i; 2309 prom_printf("reserved memory map:\n"); 2310 for (i = 0; i < mem_reserve_cnt; i++) 2311 prom_printf(" %x - %x\n", 2312 be64_to_cpu(mem_reserve_map[i].base), 2313 be64_to_cpu(mem_reserve_map[i].size)); 2314 } 2315 #endif 2316 /* Bump mem_reserve_cnt to cause further reservations to fail 2317 * since it's too late. 2318 */ 2319 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; 2320 2321 prom_printf("Device tree strings 0x%x -> 0x%x\n", 2322 dt_string_start, dt_string_end); 2323 prom_printf("Device tree struct 0x%x -> 0x%x\n", 2324 dt_struct_start, dt_struct_end); 2325 } 2326 2327 #ifdef CONFIG_PPC_MAPLE 2328 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2329 * The values are bad, and it doesn't even have the right number of cells. */ 2330 static void __init fixup_device_tree_maple(void) 2331 { 2332 phandle isa; 2333 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ 2334 u32 isa_ranges[6]; 2335 char *name; 2336 2337 name = "/ht@0/isa@4"; 2338 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2339 if (!PHANDLE_VALID(isa)) { 2340 name = "/ht@0/isa@6"; 2341 isa = call_prom("finddevice", 1, 1, ADDR(name)); 2342 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2343 } 2344 if (!PHANDLE_VALID(isa)) 2345 return; 2346 2347 if (prom_getproplen(isa, "ranges") != 12) 2348 return; 2349 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2350 == PROM_ERROR) 2351 return; 2352 2353 if (isa_ranges[0] != 0x1 || 2354 isa_ranges[1] != 0xf4000000 || 2355 isa_ranges[2] != 0x00010000) 2356 return; 2357 2358 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); 2359 2360 isa_ranges[0] = 0x1; 2361 isa_ranges[1] = 0x0; 2362 isa_ranges[2] = rloc; 2363 isa_ranges[3] = 0x0; 2364 isa_ranges[4] = 0x0; 2365 isa_ranges[5] = 0x00010000; 2366 prom_setprop(isa, name, "ranges", 2367 isa_ranges, sizeof(isa_ranges)); 2368 } 2369 2370 #define CPC925_MC_START 0xf8000000 2371 #define CPC925_MC_LENGTH 0x1000000 2372 /* The values for memory-controller don't have right number of cells */ 2373 static void __init fixup_device_tree_maple_memory_controller(void) 2374 { 2375 phandle mc; 2376 u32 mc_reg[4]; 2377 char *name = "/hostbridge@f8000000"; 2378 u32 ac, sc; 2379 2380 mc = call_prom("finddevice", 1, 1, ADDR(name)); 2381 if (!PHANDLE_VALID(mc)) 2382 return; 2383 2384 if (prom_getproplen(mc, "reg") != 8) 2385 return; 2386 2387 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); 2388 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); 2389 if ((ac != 2) || (sc != 2)) 2390 return; 2391 2392 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) 2393 return; 2394 2395 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) 2396 return; 2397 2398 prom_printf("Fixing up bogus hostbridge on Maple...\n"); 2399 2400 mc_reg[0] = 0x0; 2401 mc_reg[1] = CPC925_MC_START; 2402 mc_reg[2] = 0x0; 2403 mc_reg[3] = CPC925_MC_LENGTH; 2404 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); 2405 } 2406 #else 2407 #define fixup_device_tree_maple() 2408 #define fixup_device_tree_maple_memory_controller() 2409 #endif 2410 2411 #ifdef CONFIG_PPC_CHRP 2412 /* 2413 * Pegasos and BriQ lacks the "ranges" property in the isa node 2414 * Pegasos needs decimal IRQ 14/15, not hexadecimal 2415 * Pegasos has the IDE configured in legacy mode, but advertised as native 2416 */ 2417 static void __init fixup_device_tree_chrp(void) 2418 { 2419 phandle ph; 2420 u32 prop[6]; 2421 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ 2422 char *name; 2423 int rc; 2424 2425 name = "/pci@80000000/isa@c"; 2426 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2427 if (!PHANDLE_VALID(ph)) { 2428 name = "/pci@ff500000/isa@6"; 2429 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2430 rloc = 0x01003000; /* IO space; PCI device = 6 */ 2431 } 2432 if (PHANDLE_VALID(ph)) { 2433 rc = prom_getproplen(ph, "ranges"); 2434 if (rc == 0 || rc == PROM_ERROR) { 2435 prom_printf("Fixing up missing ISA range on Pegasos...\n"); 2436 2437 prop[0] = 0x1; 2438 prop[1] = 0x0; 2439 prop[2] = rloc; 2440 prop[3] = 0x0; 2441 prop[4] = 0x0; 2442 prop[5] = 0x00010000; 2443 prom_setprop(ph, name, "ranges", prop, sizeof(prop)); 2444 } 2445 } 2446 2447 name = "/pci@80000000/ide@C,1"; 2448 ph = call_prom("finddevice", 1, 1, ADDR(name)); 2449 if (PHANDLE_VALID(ph)) { 2450 prom_printf("Fixing up IDE interrupt on Pegasos...\n"); 2451 prop[0] = 14; 2452 prop[1] = 0x0; 2453 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); 2454 prom_printf("Fixing up IDE class-code on Pegasos...\n"); 2455 rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); 2456 if (rc == sizeof(u32)) { 2457 prop[0] &= ~0x5; 2458 prom_setprop(ph, name, "class-code", prop, sizeof(u32)); 2459 } 2460 } 2461 } 2462 #else 2463 #define fixup_device_tree_chrp() 2464 #endif 2465 2466 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2467 static void __init fixup_device_tree_pmac(void) 2468 { 2469 phandle u3, i2c, mpic; 2470 u32 u3_rev; 2471 u32 interrupts[2]; 2472 u32 parent; 2473 2474 /* Some G5s have a missing interrupt definition, fix it up here */ 2475 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); 2476 if (!PHANDLE_VALID(u3)) 2477 return; 2478 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); 2479 if (!PHANDLE_VALID(i2c)) 2480 return; 2481 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); 2482 if (!PHANDLE_VALID(mpic)) 2483 return; 2484 2485 /* check if proper rev of u3 */ 2486 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) 2487 == PROM_ERROR) 2488 return; 2489 if (u3_rev < 0x35 || u3_rev > 0x39) 2490 return; 2491 /* does it need fixup ? */ 2492 if (prom_getproplen(i2c, "interrupts") > 0) 2493 return; 2494 2495 prom_printf("fixing up bogus interrupts for u3 i2c...\n"); 2496 2497 /* interrupt on this revision of u3 is number 0 and level */ 2498 interrupts[0] = 0; 2499 interrupts[1] = 1; 2500 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts", 2501 &interrupts, sizeof(interrupts)); 2502 parent = (u32)mpic; 2503 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2504 &parent, sizeof(parent)); 2505 } 2506 #else 2507 #define fixup_device_tree_pmac() 2508 #endif 2509 2510 #ifdef CONFIG_PPC_EFIKA 2511 /* 2512 * The MPC5200 FEC driver requires an phy-handle property to tell it how 2513 * to talk to the phy. If the phy-handle property is missing, then this 2514 * function is called to add the appropriate nodes and link it to the 2515 * ethernet node. 2516 */ 2517 static void __init fixup_device_tree_efika_add_phy(void) 2518 { 2519 u32 node; 2520 char prop[64]; 2521 int rv; 2522 2523 /* Check if /builtin/ethernet exists - bail if it doesn't */ 2524 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet")); 2525 if (!PHANDLE_VALID(node)) 2526 return; 2527 2528 /* Check if the phy-handle property exists - bail if it does */ 2529 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); 2530 if (!rv) 2531 return; 2532 2533 /* 2534 * At this point the ethernet device doesn't have a phy described. 2535 * Now we need to add the missing phy node and linkage 2536 */ 2537 2538 /* Check for an MDIO bus node - if missing then create one */ 2539 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio")); 2540 if (!PHANDLE_VALID(node)) { 2541 prom_printf("Adding Ethernet MDIO node\n"); 2542 call_prom("interpret", 1, 1, 2543 " s\" /builtin\" find-device" 2544 " new-device" 2545 " 1 encode-int s\" #address-cells\" property" 2546 " 0 encode-int s\" #size-cells\" property" 2547 " s\" mdio\" device-name" 2548 " s\" fsl,mpc5200b-mdio\" encode-string" 2549 " s\" compatible\" property" 2550 " 0xf0003000 0x400 reg" 2551 " 0x2 encode-int" 2552 " 0x5 encode-int encode+" 2553 " 0x3 encode-int encode+" 2554 " s\" interrupts\" property" 2555 " finish-device"); 2556 }; 2557 2558 /* Check for a PHY device node - if missing then create one and 2559 * give it's phandle to the ethernet node */ 2560 node = call_prom("finddevice", 1, 1, 2561 ADDR("/builtin/mdio/ethernet-phy")); 2562 if (!PHANDLE_VALID(node)) { 2563 prom_printf("Adding Ethernet PHY node\n"); 2564 call_prom("interpret", 1, 1, 2565 " s\" /builtin/mdio\" find-device" 2566 " new-device" 2567 " s\" ethernet-phy\" device-name" 2568 " 0x10 encode-int s\" reg\" property" 2569 " my-self" 2570 " ihandle>phandle" 2571 " finish-device" 2572 " s\" /builtin/ethernet\" find-device" 2573 " encode-int" 2574 " s\" phy-handle\" property" 2575 " device-end"); 2576 } 2577 } 2578 2579 static void __init fixup_device_tree_efika(void) 2580 { 2581 int sound_irq[3] = { 2, 2, 0 }; 2582 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0, 2583 3,4,0, 3,5,0, 3,6,0, 3,7,0, 2584 3,8,0, 3,9,0, 3,10,0, 3,11,0, 2585 3,12,0, 3,13,0, 3,14,0, 3,15,0 }; 2586 u32 node; 2587 char prop[64]; 2588 int rv, len; 2589 2590 /* Check if we're really running on a EFIKA */ 2591 node = call_prom("finddevice", 1, 1, ADDR("/")); 2592 if (!PHANDLE_VALID(node)) 2593 return; 2594 2595 rv = prom_getprop(node, "model", prop, sizeof(prop)); 2596 if (rv == PROM_ERROR) 2597 return; 2598 if (strcmp(prop, "EFIKA5K2")) 2599 return; 2600 2601 prom_printf("Applying EFIKA device tree fixups\n"); 2602 2603 /* Claiming to be 'chrp' is death */ 2604 node = call_prom("finddevice", 1, 1, ADDR("/")); 2605 rv = prom_getprop(node, "device_type", prop, sizeof(prop)); 2606 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0)) 2607 prom_setprop(node, "/", "device_type", "efika", sizeof("efika")); 2608 2609 /* CODEGEN,description is exposed in /proc/cpuinfo so 2610 fix that too */ 2611 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop)); 2612 if (rv != PROM_ERROR && (strstr(prop, "CHRP"))) 2613 prom_setprop(node, "/", "CODEGEN,description", 2614 "Efika 5200B PowerPC System", 2615 sizeof("Efika 5200B PowerPC System")); 2616 2617 /* Fixup bestcomm interrupts property */ 2618 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm")); 2619 if (PHANDLE_VALID(node)) { 2620 len = prom_getproplen(node, "interrupts"); 2621 if (len == 12) { 2622 prom_printf("Fixing bestcomm interrupts property\n"); 2623 prom_setprop(node, "/builtin/bestcom", "interrupts", 2624 bcomm_irq, sizeof(bcomm_irq)); 2625 } 2626 } 2627 2628 /* Fixup sound interrupts property */ 2629 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound")); 2630 if (PHANDLE_VALID(node)) { 2631 rv = prom_getprop(node, "interrupts", prop, sizeof(prop)); 2632 if (rv == PROM_ERROR) { 2633 prom_printf("Adding sound interrupts property\n"); 2634 prom_setprop(node, "/builtin/sound", "interrupts", 2635 sound_irq, sizeof(sound_irq)); 2636 } 2637 } 2638 2639 /* Make sure ethernet phy-handle property exists */ 2640 fixup_device_tree_efika_add_phy(); 2641 } 2642 #else 2643 #define fixup_device_tree_efika() 2644 #endif 2645 2646 static void __init fixup_device_tree(void) 2647 { 2648 fixup_device_tree_maple(); 2649 fixup_device_tree_maple_memory_controller(); 2650 fixup_device_tree_chrp(); 2651 fixup_device_tree_pmac(); 2652 fixup_device_tree_efika(); 2653 } 2654 2655 static void __init prom_find_boot_cpu(void) 2656 { 2657 __be32 rval; 2658 ihandle prom_cpu; 2659 phandle cpu_pkg; 2660 2661 rval = 0; 2662 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0) 2663 return; 2664 prom_cpu = be32_to_cpu(rval); 2665 2666 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 2667 2668 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 2669 prom.cpu = be32_to_cpu(rval); 2670 2671 prom_debug("Booting CPU hw index = %lu\n", prom.cpu); 2672 } 2673 2674 static void __init prom_check_initrd(unsigned long r3, unsigned long r4) 2675 { 2676 #ifdef CONFIG_BLK_DEV_INITRD 2677 if (r3 && r4 && r4 != 0xdeadbeef) { 2678 __be64 val; 2679 2680 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 2681 prom_initrd_end = prom_initrd_start + r4; 2682 2683 val = cpu_to_be64(prom_initrd_start); 2684 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 2685 &val, sizeof(val)); 2686 val = cpu_to_be64(prom_initrd_end); 2687 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 2688 &val, sizeof(val)); 2689 2690 reserve_mem(prom_initrd_start, 2691 prom_initrd_end - prom_initrd_start); 2692 2693 prom_debug("initrd_start=0x%x\n", prom_initrd_start); 2694 prom_debug("initrd_end=0x%x\n", prom_initrd_end); 2695 } 2696 #endif /* CONFIG_BLK_DEV_INITRD */ 2697 } 2698 2699 #ifdef CONFIG_PPC64 2700 #ifdef CONFIG_RELOCATABLE 2701 static void reloc_toc(void) 2702 { 2703 } 2704 2705 static void unreloc_toc(void) 2706 { 2707 } 2708 #else 2709 static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 2710 { 2711 unsigned long i; 2712 unsigned long *toc_entry; 2713 2714 /* Get the start of the TOC by using r2 directly. */ 2715 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 2716 2717 for (i = 0; i < nr_entries; i++) { 2718 *toc_entry = *toc_entry + offset; 2719 toc_entry++; 2720 } 2721 } 2722 2723 static void reloc_toc(void) 2724 { 2725 unsigned long offset = reloc_offset(); 2726 unsigned long nr_entries = 2727 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 2728 2729 __reloc_toc(offset, nr_entries); 2730 2731 mb(); 2732 } 2733 2734 static void unreloc_toc(void) 2735 { 2736 unsigned long offset = reloc_offset(); 2737 unsigned long nr_entries = 2738 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 2739 2740 mb(); 2741 2742 __reloc_toc(-offset, nr_entries); 2743 } 2744 #endif 2745 #endif 2746 2747 /* 2748 * We enter here early on, when the Open Firmware prom is still 2749 * handling exceptions and the MMU hash table for us. 2750 */ 2751 2752 unsigned long __init prom_init(unsigned long r3, unsigned long r4, 2753 unsigned long pp, 2754 unsigned long r6, unsigned long r7, 2755 unsigned long kbase) 2756 { 2757 unsigned long hdr; 2758 2759 #ifdef CONFIG_PPC32 2760 unsigned long offset = reloc_offset(); 2761 reloc_got2(offset); 2762 #else 2763 reloc_toc(); 2764 #endif 2765 2766 /* 2767 * First zero the BSS 2768 */ 2769 memset(&__bss_start, 0, __bss_stop - __bss_start); 2770 2771 /* 2772 * Init interface to Open Firmware, get some node references, 2773 * like /chosen 2774 */ 2775 prom_init_client_services(pp); 2776 2777 /* 2778 * See if this OF is old enough that we need to do explicit maps 2779 * and other workarounds 2780 */ 2781 prom_find_mmu(); 2782 2783 /* 2784 * Init prom stdout device 2785 */ 2786 prom_init_stdout(); 2787 2788 prom_printf("Preparing to boot %s", linux_banner); 2789 2790 /* 2791 * Get default machine type. At this point, we do not differentiate 2792 * between pSeries SMP and pSeries LPAR 2793 */ 2794 of_platform = prom_find_machine_type(); 2795 prom_printf("Detected machine type: %x\n", of_platform); 2796 2797 #ifndef CONFIG_NONSTATIC_KERNEL 2798 /* Bail if this is a kdump kernel. */ 2799 if (PHYSICAL_START > 0) 2800 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 2801 #endif 2802 2803 /* 2804 * Check for an initrd 2805 */ 2806 prom_check_initrd(r3, r4); 2807 2808 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 2809 /* 2810 * On pSeries, inform the firmware about our capabilities 2811 */ 2812 if (of_platform == PLATFORM_PSERIES || 2813 of_platform == PLATFORM_PSERIES_LPAR) 2814 prom_send_capabilities(); 2815 #endif 2816 2817 /* 2818 * Copy the CPU hold code 2819 */ 2820 if (of_platform != PLATFORM_POWERMAC) 2821 copy_and_flush(0, kbase, 0x100, 0); 2822 2823 /* 2824 * Do early parsing of command line 2825 */ 2826 early_cmdline_parse(); 2827 2828 /* 2829 * Initialize memory management within prom_init 2830 */ 2831 prom_init_mem(); 2832 2833 /* 2834 * Determine which cpu is actually running right _now_ 2835 */ 2836 prom_find_boot_cpu(); 2837 2838 /* 2839 * Initialize display devices 2840 */ 2841 prom_check_displays(); 2842 2843 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__) 2844 /* 2845 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 2846 * that uses the allocator, we need to make sure we get the top of memory 2847 * available for us here... 2848 */ 2849 if (of_platform == PLATFORM_PSERIES) 2850 prom_initialize_tce_table(); 2851 #endif 2852 2853 /* 2854 * On non-powermacs, try to instantiate RTAS. PowerMacs don't 2855 * have a usable RTAS implementation. 2856 */ 2857 if (of_platform != PLATFORM_POWERMAC && 2858 of_platform != PLATFORM_OPAL) 2859 prom_instantiate_rtas(); 2860 2861 #ifdef CONFIG_PPC_POWERNV 2862 if (of_platform == PLATFORM_OPAL) 2863 prom_instantiate_opal(); 2864 #endif /* CONFIG_PPC_POWERNV */ 2865 2866 #ifdef CONFIG_PPC64 2867 /* instantiate sml */ 2868 prom_instantiate_sml(); 2869 #endif 2870 2871 /* 2872 * On non-powermacs, put all CPUs in spin-loops. 2873 * 2874 * PowerMacs use a different mechanism to spin CPUs 2875 * 2876 * (This must be done after instanciating RTAS) 2877 */ 2878 if (of_platform != PLATFORM_POWERMAC && 2879 of_platform != PLATFORM_OPAL) 2880 prom_hold_cpus(); 2881 2882 /* 2883 * Fill in some infos for use by the kernel later on 2884 */ 2885 if (prom_memory_limit) { 2886 __be64 val = cpu_to_be64(prom_memory_limit); 2887 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 2888 &val, sizeof(val)); 2889 } 2890 #ifdef CONFIG_PPC64 2891 if (prom_iommu_off) 2892 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 2893 NULL, 0); 2894 2895 if (prom_iommu_force_on) 2896 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on", 2897 NULL, 0); 2898 2899 if (prom_tce_alloc_start) { 2900 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start", 2901 &prom_tce_alloc_start, 2902 sizeof(prom_tce_alloc_start)); 2903 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end", 2904 &prom_tce_alloc_end, 2905 sizeof(prom_tce_alloc_end)); 2906 } 2907 #endif 2908 2909 /* 2910 * Fixup any known bugs in the device-tree 2911 */ 2912 fixup_device_tree(); 2913 2914 /* 2915 * Now finally create the flattened device-tree 2916 */ 2917 prom_printf("copying OF device tree...\n"); 2918 flatten_device_tree(); 2919 2920 /* 2921 * in case stdin is USB and still active on IBM machines... 2922 * Unfortunately quiesce crashes on some powermacs if we have 2923 * closed stdin already (in particular the powerbook 101). It 2924 * appears that the OPAL version of OFW doesn't like it either. 2925 */ 2926 if (of_platform != PLATFORM_POWERMAC && 2927 of_platform != PLATFORM_OPAL) 2928 prom_close_stdin(); 2929 2930 /* 2931 * Call OF "quiesce" method to shut down pending DMA's from 2932 * devices etc... 2933 */ 2934 prom_printf("Quiescing Open Firmware ...\n"); 2935 call_prom("quiesce", 0, 0); 2936 2937 /* 2938 * And finally, call the kernel passing it the flattened device 2939 * tree and NULL as r5, thus triggering the new entry point which 2940 * is common to us and kexec 2941 */ 2942 hdr = dt_header_start; 2943 2944 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 2945 if (of_platform != PLATFORM_OPAL) { 2946 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 2947 prom_debug("->dt_header_start=0x%x\n", hdr); 2948 } 2949 2950 #ifdef CONFIG_PPC32 2951 reloc_got2(-offset); 2952 #else 2953 unreloc_toc(); 2954 #endif 2955 2956 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 2957 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */ 2958 __start(hdr, kbase, 0, 0, 0, 2959 prom_opal_base, prom_opal_entry); 2960 #else 2961 __start(hdr, kbase, 0, 0, 0, 0, 0); 2962 #endif 2963 2964 return 0; 2965 } 2966