1 /* 2 * Procedures for creating, accessing and interpreting the device tree. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #undef DEBUG 17 18 #include <stdarg.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/init.h> 22 #include <linux/threads.h> 23 #include <linux/spinlock.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/stringify.h> 27 #include <linux/delay.h> 28 #include <linux/initrd.h> 29 #include <linux/bitops.h> 30 #include <linux/module.h> 31 #include <linux/kexec.h> 32 #include <linux/debugfs.h> 33 #include <linux/irq.h> 34 35 #include <asm/prom.h> 36 #include <asm/rtas.h> 37 #include <asm/lmb.h> 38 #include <asm/page.h> 39 #include <asm/processor.h> 40 #include <asm/irq.h> 41 #include <asm/io.h> 42 #include <asm/kdump.h> 43 #include <asm/smp.h> 44 #include <asm/system.h> 45 #include <asm/mmu.h> 46 #include <asm/pgtable.h> 47 #include <asm/pci.h> 48 #include <asm/iommu.h> 49 #include <asm/btext.h> 50 #include <asm/sections.h> 51 #include <asm/machdep.h> 52 #include <asm/pSeries_reconfig.h> 53 #include <asm/pci-bridge.h> 54 #include <asm/kexec.h> 55 #include <asm/system.h> 56 57 #ifdef DEBUG 58 #define DBG(fmt...) printk(KERN_ERR fmt) 59 #else 60 #define DBG(fmt...) 61 #endif 62 63 64 static int __initdata dt_root_addr_cells; 65 static int __initdata dt_root_size_cells; 66 67 #ifdef CONFIG_PPC64 68 int __initdata iommu_is_off; 69 int __initdata iommu_force_on; 70 unsigned long tce_alloc_start, tce_alloc_end; 71 #endif 72 73 typedef u32 cell_t; 74 75 #if 0 76 static struct boot_param_header *initial_boot_params __initdata; 77 #else 78 struct boot_param_header *initial_boot_params; 79 #endif 80 81 static struct device_node *allnodes = NULL; 82 83 /* use when traversing tree through the allnext, child, sibling, 84 * or parent members of struct device_node. 85 */ 86 static DEFINE_RWLOCK(devtree_lock); 87 88 /* export that to outside world */ 89 struct device_node *of_chosen; 90 91 static inline char *find_flat_dt_string(u32 offset) 92 { 93 return ((char *)initial_boot_params) + 94 initial_boot_params->off_dt_strings + offset; 95 } 96 97 /** 98 * This function is used to scan the flattened device-tree, it is 99 * used to extract the memory informations at boot before we can 100 * unflatten the tree 101 */ 102 int __init of_scan_flat_dt(int (*it)(unsigned long node, 103 const char *uname, int depth, 104 void *data), 105 void *data) 106 { 107 unsigned long p = ((unsigned long)initial_boot_params) + 108 initial_boot_params->off_dt_struct; 109 int rc = 0; 110 int depth = -1; 111 112 do { 113 u32 tag = *((u32 *)p); 114 char *pathp; 115 116 p += 4; 117 if (tag == OF_DT_END_NODE) { 118 depth --; 119 continue; 120 } 121 if (tag == OF_DT_NOP) 122 continue; 123 if (tag == OF_DT_END) 124 break; 125 if (tag == OF_DT_PROP) { 126 u32 sz = *((u32 *)p); 127 p += 8; 128 if (initial_boot_params->version < 0x10) 129 p = _ALIGN(p, sz >= 8 ? 8 : 4); 130 p += sz; 131 p = _ALIGN(p, 4); 132 continue; 133 } 134 if (tag != OF_DT_BEGIN_NODE) { 135 printk(KERN_WARNING "Invalid tag %x scanning flattened" 136 " device tree !\n", tag); 137 return -EINVAL; 138 } 139 depth++; 140 pathp = (char *)p; 141 p = _ALIGN(p + strlen(pathp) + 1, 4); 142 if ((*pathp) == '/') { 143 char *lp, *np; 144 for (lp = NULL, np = pathp; *np; np++) 145 if ((*np) == '/') 146 lp = np+1; 147 if (lp != NULL) 148 pathp = lp; 149 } 150 rc = it(p, pathp, depth, data); 151 if (rc != 0) 152 break; 153 } while(1); 154 155 return rc; 156 } 157 158 unsigned long __init of_get_flat_dt_root(void) 159 { 160 unsigned long p = ((unsigned long)initial_boot_params) + 161 initial_boot_params->off_dt_struct; 162 163 while(*((u32 *)p) == OF_DT_NOP) 164 p += 4; 165 BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE); 166 p += 4; 167 return _ALIGN(p + strlen((char *)p) + 1, 4); 168 } 169 170 /** 171 * This function can be used within scan_flattened_dt callback to get 172 * access to properties 173 */ 174 void* __init of_get_flat_dt_prop(unsigned long node, const char *name, 175 unsigned long *size) 176 { 177 unsigned long p = node; 178 179 do { 180 u32 tag = *((u32 *)p); 181 u32 sz, noff; 182 const char *nstr; 183 184 p += 4; 185 if (tag == OF_DT_NOP) 186 continue; 187 if (tag != OF_DT_PROP) 188 return NULL; 189 190 sz = *((u32 *)p); 191 noff = *((u32 *)(p + 4)); 192 p += 8; 193 if (initial_boot_params->version < 0x10) 194 p = _ALIGN(p, sz >= 8 ? 8 : 4); 195 196 nstr = find_flat_dt_string(noff); 197 if (nstr == NULL) { 198 printk(KERN_WARNING "Can't find property index" 199 " name !\n"); 200 return NULL; 201 } 202 if (strcmp(name, nstr) == 0) { 203 if (size) 204 *size = sz; 205 return (void *)p; 206 } 207 p += sz; 208 p = _ALIGN(p, 4); 209 } while(1); 210 } 211 212 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 213 { 214 const char* cp; 215 unsigned long cplen, l; 216 217 cp = of_get_flat_dt_prop(node, "compatible", &cplen); 218 if (cp == NULL) 219 return 0; 220 while (cplen > 0) { 221 if (strncasecmp(cp, compat, strlen(compat)) == 0) 222 return 1; 223 l = strlen(cp) + 1; 224 cp += l; 225 cplen -= l; 226 } 227 228 return 0; 229 } 230 231 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 232 unsigned long align) 233 { 234 void *res; 235 236 *mem = _ALIGN(*mem, align); 237 res = (void *)*mem; 238 *mem += size; 239 240 return res; 241 } 242 243 static unsigned long __init unflatten_dt_node(unsigned long mem, 244 unsigned long *p, 245 struct device_node *dad, 246 struct device_node ***allnextpp, 247 unsigned long fpsize) 248 { 249 struct device_node *np; 250 struct property *pp, **prev_pp = NULL; 251 char *pathp; 252 u32 tag; 253 unsigned int l, allocl; 254 int has_name = 0; 255 int new_format = 0; 256 257 tag = *((u32 *)(*p)); 258 if (tag != OF_DT_BEGIN_NODE) { 259 printk("Weird tag at start of node: %x\n", tag); 260 return mem; 261 } 262 *p += 4; 263 pathp = (char *)*p; 264 l = allocl = strlen(pathp) + 1; 265 *p = _ALIGN(*p + l, 4); 266 267 /* version 0x10 has a more compact unit name here instead of the full 268 * path. we accumulate the full path size using "fpsize", we'll rebuild 269 * it later. We detect this because the first character of the name is 270 * not '/'. 271 */ 272 if ((*pathp) != '/') { 273 new_format = 1; 274 if (fpsize == 0) { 275 /* root node: special case. fpsize accounts for path 276 * plus terminating zero. root node only has '/', so 277 * fpsize should be 2, but we want to avoid the first 278 * level nodes to have two '/' so we use fpsize 1 here 279 */ 280 fpsize = 1; 281 allocl = 2; 282 } else { 283 /* account for '/' and path size minus terminal 0 284 * already in 'l' 285 */ 286 fpsize += l; 287 allocl = fpsize; 288 } 289 } 290 291 292 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 293 __alignof__(struct device_node)); 294 if (allnextpp) { 295 memset(np, 0, sizeof(*np)); 296 np->full_name = ((char*)np) + sizeof(struct device_node); 297 if (new_format) { 298 char *p = np->full_name; 299 /* rebuild full path for new format */ 300 if (dad && dad->parent) { 301 strcpy(p, dad->full_name); 302 #ifdef DEBUG 303 if ((strlen(p) + l + 1) != allocl) { 304 DBG("%s: p: %d, l: %d, a: %d\n", 305 pathp, (int)strlen(p), l, allocl); 306 } 307 #endif 308 p += strlen(p); 309 } 310 *(p++) = '/'; 311 memcpy(p, pathp, l); 312 } else 313 memcpy(np->full_name, pathp, l); 314 prev_pp = &np->properties; 315 **allnextpp = np; 316 *allnextpp = &np->allnext; 317 if (dad != NULL) { 318 np->parent = dad; 319 /* we temporarily use the next field as `last_child'*/ 320 if (dad->next == 0) 321 dad->child = np; 322 else 323 dad->next->sibling = np; 324 dad->next = np; 325 } 326 kref_init(&np->kref); 327 } 328 while(1) { 329 u32 sz, noff; 330 char *pname; 331 332 tag = *((u32 *)(*p)); 333 if (tag == OF_DT_NOP) { 334 *p += 4; 335 continue; 336 } 337 if (tag != OF_DT_PROP) 338 break; 339 *p += 4; 340 sz = *((u32 *)(*p)); 341 noff = *((u32 *)((*p) + 4)); 342 *p += 8; 343 if (initial_boot_params->version < 0x10) 344 *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 345 346 pname = find_flat_dt_string(noff); 347 if (pname == NULL) { 348 printk("Can't find property name in list !\n"); 349 break; 350 } 351 if (strcmp(pname, "name") == 0) 352 has_name = 1; 353 l = strlen(pname) + 1; 354 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 355 __alignof__(struct property)); 356 if (allnextpp) { 357 if (strcmp(pname, "linux,phandle") == 0) { 358 np->node = *((u32 *)*p); 359 if (np->linux_phandle == 0) 360 np->linux_phandle = np->node; 361 } 362 if (strcmp(pname, "ibm,phandle") == 0) 363 np->linux_phandle = *((u32 *)*p); 364 pp->name = pname; 365 pp->length = sz; 366 pp->value = (void *)*p; 367 *prev_pp = pp; 368 prev_pp = &pp->next; 369 } 370 *p = _ALIGN((*p) + sz, 4); 371 } 372 /* with version 0x10 we may not have the name property, recreate 373 * it here from the unit name if absent 374 */ 375 if (!has_name) { 376 char *p = pathp, *ps = pathp, *pa = NULL; 377 int sz; 378 379 while (*p) { 380 if ((*p) == '@') 381 pa = p; 382 if ((*p) == '/') 383 ps = p + 1; 384 p++; 385 } 386 if (pa < ps) 387 pa = p; 388 sz = (pa - ps) + 1; 389 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 390 __alignof__(struct property)); 391 if (allnextpp) { 392 pp->name = "name"; 393 pp->length = sz; 394 pp->value = pp + 1; 395 *prev_pp = pp; 396 prev_pp = &pp->next; 397 memcpy(pp->value, ps, sz - 1); 398 ((char *)pp->value)[sz - 1] = 0; 399 DBG("fixed up name for %s -> %s\n", pathp, 400 (char *)pp->value); 401 } 402 } 403 if (allnextpp) { 404 *prev_pp = NULL; 405 np->name = of_get_property(np, "name", NULL); 406 np->type = of_get_property(np, "device_type", NULL); 407 408 if (!np->name) 409 np->name = "<NULL>"; 410 if (!np->type) 411 np->type = "<NULL>"; 412 } 413 while (tag == OF_DT_BEGIN_NODE) { 414 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 415 tag = *((u32 *)(*p)); 416 } 417 if (tag != OF_DT_END_NODE) { 418 printk("Weird tag at end of node: %x\n", tag); 419 return mem; 420 } 421 *p += 4; 422 return mem; 423 } 424 425 static int __init early_parse_mem(char *p) 426 { 427 if (!p) 428 return 1; 429 430 memory_limit = PAGE_ALIGN(memparse(p, &p)); 431 DBG("memory limit = 0x%lx\n", memory_limit); 432 433 return 0; 434 } 435 early_param("mem", early_parse_mem); 436 437 /* 438 * The device tree may be allocated below our memory limit, or inside the 439 * crash kernel region for kdump. If so, move it out now. 440 */ 441 static void move_device_tree(void) 442 { 443 unsigned long start, size; 444 void *p; 445 446 DBG("-> move_device_tree\n"); 447 448 start = __pa(initial_boot_params); 449 size = initial_boot_params->totalsize; 450 451 if ((memory_limit && (start + size) > memory_limit) || 452 overlaps_crashkernel(start, size)) { 453 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size)); 454 memcpy(p, initial_boot_params, size); 455 initial_boot_params = (struct boot_param_header *)p; 456 DBG("Moved device tree to 0x%p\n", p); 457 } 458 459 DBG("<- move_device_tree\n"); 460 } 461 462 /** 463 * unflattens the device-tree passed by the firmware, creating the 464 * tree of struct device_node. It also fills the "name" and "type" 465 * pointers of the nodes so the normal device-tree walking functions 466 * can be used (this used to be done by finish_device_tree) 467 */ 468 void __init unflatten_device_tree(void) 469 { 470 unsigned long start, mem, size; 471 struct device_node **allnextp = &allnodes; 472 473 DBG(" -> unflatten_device_tree()\n"); 474 475 /* First pass, scan for size */ 476 start = ((unsigned long)initial_boot_params) + 477 initial_boot_params->off_dt_struct; 478 size = unflatten_dt_node(0, &start, NULL, NULL, 0); 479 size = (size | 3) + 1; 480 481 DBG(" size is %lx, allocating...\n", size); 482 483 /* Allocate memory for the expanded device tree */ 484 mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 485 mem = (unsigned long) __va(mem); 486 487 ((u32 *)mem)[size / 4] = 0xdeadbeef; 488 489 DBG(" unflattening %lx...\n", mem); 490 491 /* Second pass, do actual unflattening */ 492 start = ((unsigned long)initial_boot_params) + 493 initial_boot_params->off_dt_struct; 494 unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 495 if (*((u32 *)start) != OF_DT_END) 496 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 497 if (((u32 *)mem)[size / 4] != 0xdeadbeef) 498 printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 499 ((u32 *)mem)[size / 4] ); 500 *allnextp = NULL; 501 502 /* Get pointer to OF "/chosen" node for use everywhere */ 503 of_chosen = of_find_node_by_path("/chosen"); 504 if (of_chosen == NULL) 505 of_chosen = of_find_node_by_path("/chosen@0"); 506 507 DBG(" <- unflatten_device_tree()\n"); 508 } 509 510 /* 511 * ibm,pa-features is a per-cpu property that contains a string of 512 * attribute descriptors, each of which has a 2 byte header plus up 513 * to 254 bytes worth of processor attribute bits. First header 514 * byte specifies the number of bytes following the header. 515 * Second header byte is an "attribute-specifier" type, of which 516 * zero is the only currently-defined value. 517 * Implementation: Pass in the byte and bit offset for the feature 518 * that we are interested in. The function will return -1 if the 519 * pa-features property is missing, or a 1/0 to indicate if the feature 520 * is supported/not supported. Note that the bit numbers are 521 * big-endian to match the definition in PAPR. 522 */ 523 static struct ibm_pa_feature { 524 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 525 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 526 unsigned char pabyte; /* byte number in ibm,pa-features */ 527 unsigned char pabit; /* bit number (big-endian) */ 528 unsigned char invert; /* if 1, pa bit set => clear feature */ 529 } ibm_pa_features[] __initdata = { 530 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 531 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 532 {CPU_FTR_SLB, 0, 0, 2, 0}, 533 {CPU_FTR_CTRL, 0, 0, 3, 0}, 534 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, 535 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, 536 #if 0 537 /* put this back once we know how to test if firmware does 64k IO */ 538 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 539 #endif 540 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 541 }; 542 543 static void __init scan_features(unsigned long node, unsigned char *ftrs, 544 unsigned long tablelen, 545 struct ibm_pa_feature *fp, 546 unsigned long ft_size) 547 { 548 unsigned long i, len, bit; 549 550 /* find descriptor with type == 0 */ 551 for (;;) { 552 if (tablelen < 3) 553 return; 554 len = 2 + ftrs[0]; 555 if (tablelen < len) 556 return; /* descriptor 0 not found */ 557 if (ftrs[1] == 0) 558 break; 559 tablelen -= len; 560 ftrs += len; 561 } 562 563 /* loop over bits we know about */ 564 for (i = 0; i < ft_size; ++i, ++fp) { 565 if (fp->pabyte >= ftrs[0]) 566 continue; 567 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; 568 if (bit ^ fp->invert) { 569 cur_cpu_spec->cpu_features |= fp->cpu_features; 570 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 571 } else { 572 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 573 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 574 } 575 } 576 } 577 578 static void __init check_cpu_pa_features(unsigned long node) 579 { 580 unsigned char *pa_ftrs; 581 unsigned long tablelen; 582 583 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); 584 if (pa_ftrs == NULL) 585 return; 586 587 scan_features(node, pa_ftrs, tablelen, 588 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 589 } 590 591 static struct feature_property { 592 const char *name; 593 u32 min_value; 594 unsigned long cpu_feature; 595 unsigned long cpu_user_ftr; 596 } feature_properties[] __initdata = { 597 #ifdef CONFIG_ALTIVEC 598 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 599 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 600 #endif /* CONFIG_ALTIVEC */ 601 #ifdef CONFIG_PPC64 602 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP}, 603 {"ibm,purr", 1, CPU_FTR_PURR, 0}, 604 {"ibm,spurr", 1, CPU_FTR_SPURR, 0}, 605 #endif /* CONFIG_PPC64 */ 606 }; 607 608 static void __init check_cpu_feature_properties(unsigned long node) 609 { 610 unsigned long i; 611 struct feature_property *fp = feature_properties; 612 const u32 *prop; 613 614 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { 615 prop = of_get_flat_dt_prop(node, fp->name, NULL); 616 if (prop && *prop >= fp->min_value) { 617 cur_cpu_spec->cpu_features |= fp->cpu_feature; 618 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr; 619 } 620 } 621 } 622 623 static int __init early_init_dt_scan_cpus(unsigned long node, 624 const char *uname, int depth, 625 void *data) 626 { 627 static int logical_cpuid = 0; 628 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 629 const u32 *prop; 630 const u32 *intserv; 631 int i, nthreads; 632 unsigned long len; 633 int found = 0; 634 635 /* We are scanning "cpu" nodes only */ 636 if (type == NULL || strcmp(type, "cpu") != 0) 637 return 0; 638 639 /* Get physical cpuid */ 640 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); 641 if (intserv) { 642 nthreads = len / sizeof(int); 643 } else { 644 intserv = of_get_flat_dt_prop(node, "reg", NULL); 645 nthreads = 1; 646 } 647 648 /* 649 * Now see if any of these threads match our boot cpu. 650 * NOTE: This must match the parsing done in smp_setup_cpu_maps. 651 */ 652 for (i = 0; i < nthreads; i++) { 653 /* 654 * version 2 of the kexec param format adds the phys cpuid of 655 * booted proc. 656 */ 657 if (initial_boot_params && initial_boot_params->version >= 2) { 658 if (intserv[i] == 659 initial_boot_params->boot_cpuid_phys) { 660 found = 1; 661 break; 662 } 663 } else { 664 /* 665 * Check if it's the boot-cpu, set it's hw index now, 666 * unfortunately this format did not support booting 667 * off secondary threads. 668 */ 669 if (of_get_flat_dt_prop(node, 670 "linux,boot-cpu", NULL) != NULL) { 671 found = 1; 672 break; 673 } 674 } 675 676 #ifdef CONFIG_SMP 677 /* logical cpu id is always 0 on UP kernels */ 678 logical_cpuid++; 679 #endif 680 } 681 682 if (found) { 683 DBG("boot cpu: logical %d physical %d\n", logical_cpuid, 684 intserv[i]); 685 boot_cpuid = logical_cpuid; 686 set_hard_smp_processor_id(boot_cpuid, intserv[i]); 687 688 /* 689 * PAPR defines "logical" PVR values for cpus that 690 * meet various levels of the architecture: 691 * 0x0f000001 Architecture version 2.04 692 * 0x0f000002 Architecture version 2.05 693 * If the cpu-version property in the cpu node contains 694 * such a value, we call identify_cpu again with the 695 * logical PVR value in order to use the cpu feature 696 * bits appropriate for the architecture level. 697 * 698 * A POWER6 partition in "POWER6 architected" mode 699 * uses the 0x0f000002 PVR value; in POWER5+ mode 700 * it uses 0x0f000001. 701 */ 702 prop = of_get_flat_dt_prop(node, "cpu-version", NULL); 703 if (prop && (*prop & 0xff000000) == 0x0f000000) 704 identify_cpu(0, *prop); 705 } 706 707 check_cpu_feature_properties(node); 708 check_cpu_pa_features(node); 709 710 #ifdef CONFIG_PPC_PSERIES 711 if (nthreads > 1) 712 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 713 else 714 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 715 #endif 716 717 return 0; 718 } 719 720 #ifdef CONFIG_BLK_DEV_INITRD 721 static void __init early_init_dt_check_for_initrd(unsigned long node) 722 { 723 unsigned long l; 724 u32 *prop; 725 726 DBG("Looking for initrd properties... "); 727 728 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); 729 if (prop) { 730 initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); 731 732 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); 733 if (prop) { 734 initrd_end = (unsigned long) 735 __va(of_read_ulong(prop, l/4)); 736 initrd_below_start_ok = 1; 737 } else { 738 initrd_start = 0; 739 } 740 } 741 742 DBG("initrd_start=0x%lx initrd_end=0x%lx\n", initrd_start, initrd_end); 743 } 744 #else 745 static inline void early_init_dt_check_for_initrd(unsigned long node) 746 { 747 } 748 #endif /* CONFIG_BLK_DEV_INITRD */ 749 750 static int __init early_init_dt_scan_chosen(unsigned long node, 751 const char *uname, int depth, void *data) 752 { 753 unsigned long *lprop; 754 unsigned long l; 755 char *p; 756 757 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 758 759 if (depth != 1 || 760 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 761 return 0; 762 763 #ifdef CONFIG_PPC64 764 /* check if iommu is forced on or off */ 765 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 766 iommu_is_off = 1; 767 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 768 iommu_force_on = 1; 769 #endif 770 771 /* mem=x on the command line is the preferred mechanism */ 772 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 773 if (lprop) 774 memory_limit = *lprop; 775 776 #ifdef CONFIG_PPC64 777 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 778 if (lprop) 779 tce_alloc_start = *lprop; 780 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 781 if (lprop) 782 tce_alloc_end = *lprop; 783 #endif 784 785 #ifdef CONFIG_KEXEC 786 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 787 if (lprop) 788 crashk_res.start = *lprop; 789 790 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 791 if (lprop) 792 crashk_res.end = crashk_res.start + *lprop - 1; 793 #endif 794 795 early_init_dt_check_for_initrd(node); 796 797 /* Retreive command line */ 798 p = of_get_flat_dt_prop(node, "bootargs", &l); 799 if (p != NULL && l > 0) 800 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 801 802 #ifdef CONFIG_CMDLINE 803 if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 804 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 805 #endif /* CONFIG_CMDLINE */ 806 807 DBG("Command line is: %s\n", cmd_line); 808 809 /* break now */ 810 return 1; 811 } 812 813 static int __init early_init_dt_scan_root(unsigned long node, 814 const char *uname, int depth, void *data) 815 { 816 u32 *prop; 817 818 if (depth != 0) 819 return 0; 820 821 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 822 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 823 DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 824 825 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 826 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 827 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 828 829 /* break now */ 830 return 1; 831 } 832 833 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) 834 { 835 cell_t *p = *cellp; 836 837 *cellp = p + s; 838 return of_read_ulong(p, s); 839 } 840 841 #ifdef CONFIG_PPC_PSERIES 842 /* 843 * Interpret the ibm,dynamic-memory property in the 844 * /ibm,dynamic-reconfiguration-memory node. 845 * This contains a list of memory blocks along with NUMA affinity 846 * information. 847 */ 848 static int __init early_init_dt_scan_drconf_memory(unsigned long node) 849 { 850 cell_t *dm, *ls; 851 unsigned long l, n; 852 unsigned long base, size, lmb_size, flags; 853 854 ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 855 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) 856 return 0; 857 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); 858 859 dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 860 if (dm == NULL || l < sizeof(cell_t)) 861 return 0; 862 863 n = *dm++; /* number of entries */ 864 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t)) 865 return 0; 866 867 for (; n != 0; --n) { 868 base = dt_mem_next_cell(dt_root_addr_cells, &dm); 869 flags = dm[3]; 870 /* skip DRC index, pad, assoc. list index, flags */ 871 dm += 4; 872 /* skip this block if the reserved bit is set in flags (0x80) 873 or if the block is not assigned to this partition (0x8) */ 874 if ((flags & 0x80) || !(flags & 0x8)) 875 continue; 876 size = lmb_size; 877 if (iommu_is_off) { 878 if (base >= 0x80000000ul) 879 continue; 880 if ((base + size) > 0x80000000ul) 881 size = 0x80000000ul - base; 882 } 883 lmb_add(base, size); 884 } 885 lmb_dump_all(); 886 return 0; 887 } 888 #else 889 #define early_init_dt_scan_drconf_memory(node) 0 890 #endif /* CONFIG_PPC_PSERIES */ 891 892 static int __init early_init_dt_scan_memory(unsigned long node, 893 const char *uname, int depth, void *data) 894 { 895 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 896 cell_t *reg, *endp; 897 unsigned long l; 898 899 /* Look for the ibm,dynamic-reconfiguration-memory node */ 900 if (depth == 1 && 901 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) 902 return early_init_dt_scan_drconf_memory(node); 903 904 /* We are scanning "memory" nodes only */ 905 if (type == NULL) { 906 /* 907 * The longtrail doesn't have a device_type on the 908 * /memory node, so look for the node called /memory@0. 909 */ 910 if (depth != 1 || strcmp(uname, "memory@0") != 0) 911 return 0; 912 } else if (strcmp(type, "memory") != 0) 913 return 0; 914 915 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 916 if (reg == NULL) 917 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 918 if (reg == NULL) 919 return 0; 920 921 endp = reg + (l / sizeof(cell_t)); 922 923 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 924 uname, l, reg[0], reg[1], reg[2], reg[3]); 925 926 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 927 unsigned long base, size; 928 929 base = dt_mem_next_cell(dt_root_addr_cells, ®); 930 size = dt_mem_next_cell(dt_root_size_cells, ®); 931 932 if (size == 0) 933 continue; 934 DBG(" - %lx , %lx\n", base, size); 935 #ifdef CONFIG_PPC64 936 if (iommu_is_off) { 937 if (base >= 0x80000000ul) 938 continue; 939 if ((base + size) > 0x80000000ul) 940 size = 0x80000000ul - base; 941 } 942 #endif 943 lmb_add(base, size); 944 } 945 return 0; 946 } 947 948 static void __init early_reserve_mem(void) 949 { 950 u64 base, size; 951 u64 *reserve_map; 952 unsigned long self_base; 953 unsigned long self_size; 954 955 reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 956 initial_boot_params->off_mem_rsvmap); 957 958 /* before we do anything, lets reserve the dt blob */ 959 self_base = __pa((unsigned long)initial_boot_params); 960 self_size = initial_boot_params->totalsize; 961 lmb_reserve(self_base, self_size); 962 963 #ifdef CONFIG_BLK_DEV_INITRD 964 /* then reserve the initrd, if any */ 965 if (initrd_start && (initrd_end > initrd_start)) 966 lmb_reserve(__pa(initrd_start), initrd_end - initrd_start); 967 #endif /* CONFIG_BLK_DEV_INITRD */ 968 969 #ifdef CONFIG_PPC32 970 /* 971 * Handle the case where we might be booting from an old kexec 972 * image that setup the mem_rsvmap as pairs of 32-bit values 973 */ 974 if (*reserve_map > 0xffffffffull) { 975 u32 base_32, size_32; 976 u32 *reserve_map_32 = (u32 *)reserve_map; 977 978 while (1) { 979 base_32 = *(reserve_map_32++); 980 size_32 = *(reserve_map_32++); 981 if (size_32 == 0) 982 break; 983 /* skip if the reservation is for the blob */ 984 if (base_32 == self_base && size_32 == self_size) 985 continue; 986 DBG("reserving: %x -> %x\n", base_32, size_32); 987 lmb_reserve(base_32, size_32); 988 } 989 return; 990 } 991 #endif 992 while (1) { 993 base = *(reserve_map++); 994 size = *(reserve_map++); 995 if (size == 0) 996 break; 997 DBG("reserving: %llx -> %llx\n", base, size); 998 lmb_reserve(base, size); 999 } 1000 1001 #if 0 1002 DBG("memory reserved, lmbs :\n"); 1003 lmb_dump_all(); 1004 #endif 1005 } 1006 1007 void __init early_init_devtree(void *params) 1008 { 1009 DBG(" -> early_init_devtree(%p)\n", params); 1010 1011 /* Setup flat device-tree pointer */ 1012 initial_boot_params = params; 1013 1014 #ifdef CONFIG_PPC_RTAS 1015 /* Some machines might need RTAS info for debugging, grab it now. */ 1016 of_scan_flat_dt(early_init_dt_scan_rtas, NULL); 1017 #endif 1018 1019 /* Retrieve various informations from the /chosen node of the 1020 * device-tree, including the platform type, initrd location and 1021 * size, TCE reserve, and more ... 1022 */ 1023 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 1024 1025 /* Scan memory nodes and rebuild LMBs */ 1026 lmb_init(); 1027 of_scan_flat_dt(early_init_dt_scan_root, NULL); 1028 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1029 1030 /* Save command line for /proc/cmdline and then parse parameters */ 1031 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); 1032 parse_early_param(); 1033 1034 /* Reserve LMB regions used by kernel, initrd, dt, etc... */ 1035 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 1036 reserve_kdump_trampoline(); 1037 reserve_crashkernel(); 1038 early_reserve_mem(); 1039 1040 lmb_enforce_memory_limit(memory_limit); 1041 lmb_analyze(); 1042 1043 DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); 1044 1045 /* We may need to relocate the flat tree, do it now. 1046 * FIXME .. and the initrd too? */ 1047 move_device_tree(); 1048 1049 DBG("Scanning CPUs ...\n"); 1050 1051 /* Retreive CPU related informations from the flat tree 1052 * (altivec support, boot CPU ID, ...) 1053 */ 1054 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 1055 1056 DBG(" <- early_init_devtree()\n"); 1057 } 1058 1059 #undef printk 1060 1061 int of_n_addr_cells(struct device_node* np) 1062 { 1063 const int *ip; 1064 do { 1065 if (np->parent) 1066 np = np->parent; 1067 ip = of_get_property(np, "#address-cells", NULL); 1068 if (ip != NULL) 1069 return *ip; 1070 } while (np->parent); 1071 /* No #address-cells property for the root node, default to 1 */ 1072 return 1; 1073 } 1074 EXPORT_SYMBOL(of_n_addr_cells); 1075 1076 int of_n_size_cells(struct device_node* np) 1077 { 1078 const int* ip; 1079 do { 1080 if (np->parent) 1081 np = np->parent; 1082 ip = of_get_property(np, "#size-cells", NULL); 1083 if (ip != NULL) 1084 return *ip; 1085 } while (np->parent); 1086 /* No #size-cells property for the root node, default to 1 */ 1087 return 1; 1088 } 1089 EXPORT_SYMBOL(of_n_size_cells); 1090 1091 /** Checks if the given "compat" string matches one of the strings in 1092 * the device's "compatible" property 1093 */ 1094 int of_device_is_compatible(const struct device_node *device, 1095 const char *compat) 1096 { 1097 const char* cp; 1098 int cplen, l; 1099 1100 cp = of_get_property(device, "compatible", &cplen); 1101 if (cp == NULL) 1102 return 0; 1103 while (cplen > 0) { 1104 if (strncasecmp(cp, compat, strlen(compat)) == 0) 1105 return 1; 1106 l = strlen(cp) + 1; 1107 cp += l; 1108 cplen -= l; 1109 } 1110 1111 return 0; 1112 } 1113 EXPORT_SYMBOL(of_device_is_compatible); 1114 1115 1116 /** 1117 * Indicates whether the root node has a given value in its 1118 * compatible property. 1119 */ 1120 int machine_is_compatible(const char *compat) 1121 { 1122 struct device_node *root; 1123 int rc = 0; 1124 1125 root = of_find_node_by_path("/"); 1126 if (root) { 1127 rc = of_device_is_compatible(root, compat); 1128 of_node_put(root); 1129 } 1130 return rc; 1131 } 1132 EXPORT_SYMBOL(machine_is_compatible); 1133 1134 /******* 1135 * 1136 * New implementation of the OF "find" APIs, return a refcounted 1137 * object, call of_node_put() when done. The device tree and list 1138 * are protected by a rw_lock. 1139 * 1140 * Note that property management will need some locking as well, 1141 * this isn't dealt with yet. 1142 * 1143 *******/ 1144 1145 /** 1146 * of_find_node_by_name - Find a node by its "name" property 1147 * @from: The node to start searching from or NULL, the node 1148 * you pass will not be searched, only the next one 1149 * will; typically, you pass what the previous call 1150 * returned. of_node_put() will be called on it 1151 * @name: The name string to match against 1152 * 1153 * Returns a node pointer with refcount incremented, use 1154 * of_node_put() on it when done. 1155 */ 1156 struct device_node *of_find_node_by_name(struct device_node *from, 1157 const char *name) 1158 { 1159 struct device_node *np; 1160 1161 read_lock(&devtree_lock); 1162 np = from ? from->allnext : allnodes; 1163 for (; np != NULL; np = np->allnext) 1164 if (np->name != NULL && strcasecmp(np->name, name) == 0 1165 && of_node_get(np)) 1166 break; 1167 of_node_put(from); 1168 read_unlock(&devtree_lock); 1169 return np; 1170 } 1171 EXPORT_SYMBOL(of_find_node_by_name); 1172 1173 /** 1174 * of_find_node_by_type - Find a node by its "device_type" property 1175 * @from: The node to start searching from, or NULL to start searching 1176 * the entire device tree. The node you pass will not be 1177 * searched, only the next one will; typically, you pass 1178 * what the previous call returned. of_node_put() will be 1179 * called on from for you. 1180 * @type: The type string to match against 1181 * 1182 * Returns a node pointer with refcount incremented, use 1183 * of_node_put() on it when done. 1184 */ 1185 struct device_node *of_find_node_by_type(struct device_node *from, 1186 const char *type) 1187 { 1188 struct device_node *np; 1189 1190 read_lock(&devtree_lock); 1191 np = from ? from->allnext : allnodes; 1192 for (; np != 0; np = np->allnext) 1193 if (np->type != 0 && strcasecmp(np->type, type) == 0 1194 && of_node_get(np)) 1195 break; 1196 of_node_put(from); 1197 read_unlock(&devtree_lock); 1198 return np; 1199 } 1200 EXPORT_SYMBOL(of_find_node_by_type); 1201 1202 /** 1203 * of_find_compatible_node - Find a node based on type and one of the 1204 * tokens in its "compatible" property 1205 * @from: The node to start searching from or NULL, the node 1206 * you pass will not be searched, only the next one 1207 * will; typically, you pass what the previous call 1208 * returned. of_node_put() will be called on it 1209 * @type: The type string to match "device_type" or NULL to ignore 1210 * @compatible: The string to match to one of the tokens in the device 1211 * "compatible" list. 1212 * 1213 * Returns a node pointer with refcount incremented, use 1214 * of_node_put() on it when done. 1215 */ 1216 struct device_node *of_find_compatible_node(struct device_node *from, 1217 const char *type, const char *compatible) 1218 { 1219 struct device_node *np; 1220 1221 read_lock(&devtree_lock); 1222 np = from ? from->allnext : allnodes; 1223 for (; np != 0; np = np->allnext) { 1224 if (type != NULL 1225 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1226 continue; 1227 if (of_device_is_compatible(np, compatible) && of_node_get(np)) 1228 break; 1229 } 1230 of_node_put(from); 1231 read_unlock(&devtree_lock); 1232 return np; 1233 } 1234 EXPORT_SYMBOL(of_find_compatible_node); 1235 1236 /** 1237 * of_find_node_by_path - Find a node matching a full OF path 1238 * @path: The full path to match 1239 * 1240 * Returns a node pointer with refcount incremented, use 1241 * of_node_put() on it when done. 1242 */ 1243 struct device_node *of_find_node_by_path(const char *path) 1244 { 1245 struct device_node *np = allnodes; 1246 1247 read_lock(&devtree_lock); 1248 for (; np != 0; np = np->allnext) { 1249 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1250 && of_node_get(np)) 1251 break; 1252 } 1253 read_unlock(&devtree_lock); 1254 return np; 1255 } 1256 EXPORT_SYMBOL(of_find_node_by_path); 1257 1258 /** 1259 * of_find_node_by_phandle - Find a node given a phandle 1260 * @handle: phandle of the node to find 1261 * 1262 * Returns a node pointer with refcount incremented, use 1263 * of_node_put() on it when done. 1264 */ 1265 struct device_node *of_find_node_by_phandle(phandle handle) 1266 { 1267 struct device_node *np; 1268 1269 read_lock(&devtree_lock); 1270 for (np = allnodes; np != 0; np = np->allnext) 1271 if (np->linux_phandle == handle) 1272 break; 1273 of_node_get(np); 1274 read_unlock(&devtree_lock); 1275 return np; 1276 } 1277 EXPORT_SYMBOL(of_find_node_by_phandle); 1278 1279 /** 1280 * of_find_all_nodes - Get next node in global list 1281 * @prev: Previous node or NULL to start iteration 1282 * of_node_put() will be called on it 1283 * 1284 * Returns a node pointer with refcount incremented, use 1285 * of_node_put() on it when done. 1286 */ 1287 struct device_node *of_find_all_nodes(struct device_node *prev) 1288 { 1289 struct device_node *np; 1290 1291 read_lock(&devtree_lock); 1292 np = prev ? prev->allnext : allnodes; 1293 for (; np != 0; np = np->allnext) 1294 if (of_node_get(np)) 1295 break; 1296 of_node_put(prev); 1297 read_unlock(&devtree_lock); 1298 return np; 1299 } 1300 EXPORT_SYMBOL(of_find_all_nodes); 1301 1302 /** 1303 * of_get_parent - Get a node's parent if any 1304 * @node: Node to get parent 1305 * 1306 * Returns a node pointer with refcount incremented, use 1307 * of_node_put() on it when done. 1308 */ 1309 struct device_node *of_get_parent(const struct device_node *node) 1310 { 1311 struct device_node *np; 1312 1313 if (!node) 1314 return NULL; 1315 1316 read_lock(&devtree_lock); 1317 np = of_node_get(node->parent); 1318 read_unlock(&devtree_lock); 1319 return np; 1320 } 1321 EXPORT_SYMBOL(of_get_parent); 1322 1323 /** 1324 * of_get_next_child - Iterate a node childs 1325 * @node: parent node 1326 * @prev: previous child of the parent node, or NULL to get first 1327 * 1328 * Returns a node pointer with refcount incremented, use 1329 * of_node_put() on it when done. 1330 */ 1331 struct device_node *of_get_next_child(const struct device_node *node, 1332 struct device_node *prev) 1333 { 1334 struct device_node *next; 1335 1336 read_lock(&devtree_lock); 1337 next = prev ? prev->sibling : node->child; 1338 for (; next != 0; next = next->sibling) 1339 if (of_node_get(next)) 1340 break; 1341 of_node_put(prev); 1342 read_unlock(&devtree_lock); 1343 return next; 1344 } 1345 EXPORT_SYMBOL(of_get_next_child); 1346 1347 /** 1348 * of_node_get - Increment refcount of a node 1349 * @node: Node to inc refcount, NULL is supported to 1350 * simplify writing of callers 1351 * 1352 * Returns node. 1353 */ 1354 struct device_node *of_node_get(struct device_node *node) 1355 { 1356 if (node) 1357 kref_get(&node->kref); 1358 return node; 1359 } 1360 EXPORT_SYMBOL(of_node_get); 1361 1362 static inline struct device_node * kref_to_device_node(struct kref *kref) 1363 { 1364 return container_of(kref, struct device_node, kref); 1365 } 1366 1367 /** 1368 * of_node_release - release a dynamically allocated node 1369 * @kref: kref element of the node to be released 1370 * 1371 * In of_node_put() this function is passed to kref_put() 1372 * as the destructor. 1373 */ 1374 static void of_node_release(struct kref *kref) 1375 { 1376 struct device_node *node = kref_to_device_node(kref); 1377 struct property *prop = node->properties; 1378 1379 /* We should never be releasing nodes that haven't been detached. */ 1380 if (!of_node_check_flag(node, OF_DETACHED)) { 1381 printk("WARNING: Bad of_node_put() on %s\n", node->full_name); 1382 dump_stack(); 1383 kref_init(&node->kref); 1384 return; 1385 } 1386 1387 if (!of_node_check_flag(node, OF_DYNAMIC)) 1388 return; 1389 1390 while (prop) { 1391 struct property *next = prop->next; 1392 kfree(prop->name); 1393 kfree(prop->value); 1394 kfree(prop); 1395 prop = next; 1396 1397 if (!prop) { 1398 prop = node->deadprops; 1399 node->deadprops = NULL; 1400 } 1401 } 1402 kfree(node->full_name); 1403 kfree(node->data); 1404 kfree(node); 1405 } 1406 1407 /** 1408 * of_node_put - Decrement refcount of a node 1409 * @node: Node to dec refcount, NULL is supported to 1410 * simplify writing of callers 1411 * 1412 */ 1413 void of_node_put(struct device_node *node) 1414 { 1415 if (node) 1416 kref_put(&node->kref, of_node_release); 1417 } 1418 EXPORT_SYMBOL(of_node_put); 1419 1420 /* 1421 * Plug a device node into the tree and global list. 1422 */ 1423 void of_attach_node(struct device_node *np) 1424 { 1425 write_lock(&devtree_lock); 1426 np->sibling = np->parent->child; 1427 np->allnext = allnodes; 1428 np->parent->child = np; 1429 allnodes = np; 1430 write_unlock(&devtree_lock); 1431 } 1432 1433 /* 1434 * "Unplug" a node from the device tree. The caller must hold 1435 * a reference to the node. The memory associated with the node 1436 * is not freed until its refcount goes to zero. 1437 */ 1438 void of_detach_node(const struct device_node *np) 1439 { 1440 struct device_node *parent; 1441 1442 write_lock(&devtree_lock); 1443 1444 parent = np->parent; 1445 if (!parent) 1446 goto out_unlock; 1447 1448 if (allnodes == np) 1449 allnodes = np->allnext; 1450 else { 1451 struct device_node *prev; 1452 for (prev = allnodes; 1453 prev->allnext != np; 1454 prev = prev->allnext) 1455 ; 1456 prev->allnext = np->allnext; 1457 } 1458 1459 if (parent->child == np) 1460 parent->child = np->sibling; 1461 else { 1462 struct device_node *prevsib; 1463 for (prevsib = np->parent->child; 1464 prevsib->sibling != np; 1465 prevsib = prevsib->sibling) 1466 ; 1467 prevsib->sibling = np->sibling; 1468 } 1469 1470 of_node_set_flag(np, OF_DETACHED); 1471 1472 out_unlock: 1473 write_unlock(&devtree_lock); 1474 } 1475 1476 #ifdef CONFIG_PPC_PSERIES 1477 /* 1478 * Fix up the uninitialized fields in a new device node: 1479 * name, type and pci-specific fields 1480 */ 1481 1482 static int of_finish_dynamic_node(struct device_node *node) 1483 { 1484 struct device_node *parent = of_get_parent(node); 1485 int err = 0; 1486 const phandle *ibm_phandle; 1487 1488 node->name = of_get_property(node, "name", NULL); 1489 node->type = of_get_property(node, "device_type", NULL); 1490 1491 if (!node->name) 1492 node->name = "<NULL>"; 1493 if (!node->type) 1494 node->type = "<NULL>"; 1495 1496 if (!parent) { 1497 err = -ENODEV; 1498 goto out; 1499 } 1500 1501 /* We don't support that function on PowerMac, at least 1502 * not yet 1503 */ 1504 if (machine_is(powermac)) 1505 return -ENODEV; 1506 1507 /* fix up new node's linux_phandle field */ 1508 if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL))) 1509 node->linux_phandle = *ibm_phandle; 1510 1511 out: 1512 of_node_put(parent); 1513 return err; 1514 } 1515 1516 static int prom_reconfig_notifier(struct notifier_block *nb, 1517 unsigned long action, void *node) 1518 { 1519 int err; 1520 1521 switch (action) { 1522 case PSERIES_RECONFIG_ADD: 1523 err = of_finish_dynamic_node(node); 1524 if (err < 0) { 1525 printk(KERN_ERR "finish_node returned %d\n", err); 1526 err = NOTIFY_BAD; 1527 } 1528 break; 1529 default: 1530 err = NOTIFY_DONE; 1531 break; 1532 } 1533 return err; 1534 } 1535 1536 static struct notifier_block prom_reconfig_nb = { 1537 .notifier_call = prom_reconfig_notifier, 1538 .priority = 10, /* This one needs to run first */ 1539 }; 1540 1541 static int __init prom_reconfig_setup(void) 1542 { 1543 return pSeries_reconfig_notifier_register(&prom_reconfig_nb); 1544 } 1545 __initcall(prom_reconfig_setup); 1546 #endif 1547 1548 struct property *of_find_property(const struct device_node *np, 1549 const char *name, 1550 int *lenp) 1551 { 1552 struct property *pp; 1553 1554 read_lock(&devtree_lock); 1555 for (pp = np->properties; pp != 0; pp = pp->next) 1556 if (strcmp(pp->name, name) == 0) { 1557 if (lenp != 0) 1558 *lenp = pp->length; 1559 break; 1560 } 1561 read_unlock(&devtree_lock); 1562 1563 return pp; 1564 } 1565 EXPORT_SYMBOL(of_find_property); 1566 1567 /* 1568 * Find a property with a given name for a given node 1569 * and return the value. 1570 */ 1571 const void *of_get_property(const struct device_node *np, const char *name, 1572 int *lenp) 1573 { 1574 struct property *pp = of_find_property(np,name,lenp); 1575 return pp ? pp->value : NULL; 1576 } 1577 EXPORT_SYMBOL(of_get_property); 1578 1579 /* 1580 * Add a property to a node 1581 */ 1582 int prom_add_property(struct device_node* np, struct property* prop) 1583 { 1584 struct property **next; 1585 1586 prop->next = NULL; 1587 write_lock(&devtree_lock); 1588 next = &np->properties; 1589 while (*next) { 1590 if (strcmp(prop->name, (*next)->name) == 0) { 1591 /* duplicate ! don't insert it */ 1592 write_unlock(&devtree_lock); 1593 return -1; 1594 } 1595 next = &(*next)->next; 1596 } 1597 *next = prop; 1598 write_unlock(&devtree_lock); 1599 1600 #ifdef CONFIG_PROC_DEVICETREE 1601 /* try to add to proc as well if it was initialized */ 1602 if (np->pde) 1603 proc_device_tree_add_prop(np->pde, prop); 1604 #endif /* CONFIG_PROC_DEVICETREE */ 1605 1606 return 0; 1607 } 1608 1609 /* 1610 * Remove a property from a node. Note that we don't actually 1611 * remove it, since we have given out who-knows-how-many pointers 1612 * to the data using get-property. Instead we just move the property 1613 * to the "dead properties" list, so it won't be found any more. 1614 */ 1615 int prom_remove_property(struct device_node *np, struct property *prop) 1616 { 1617 struct property **next; 1618 int found = 0; 1619 1620 write_lock(&devtree_lock); 1621 next = &np->properties; 1622 while (*next) { 1623 if (*next == prop) { 1624 /* found the node */ 1625 *next = prop->next; 1626 prop->next = np->deadprops; 1627 np->deadprops = prop; 1628 found = 1; 1629 break; 1630 } 1631 next = &(*next)->next; 1632 } 1633 write_unlock(&devtree_lock); 1634 1635 if (!found) 1636 return -ENODEV; 1637 1638 #ifdef CONFIG_PROC_DEVICETREE 1639 /* try to remove the proc node as well */ 1640 if (np->pde) 1641 proc_device_tree_remove_prop(np->pde, prop); 1642 #endif /* CONFIG_PROC_DEVICETREE */ 1643 1644 return 0; 1645 } 1646 1647 /* 1648 * Update a property in a node. Note that we don't actually 1649 * remove it, since we have given out who-knows-how-many pointers 1650 * to the data using get-property. Instead we just move the property 1651 * to the "dead properties" list, and add the new property to the 1652 * property list 1653 */ 1654 int prom_update_property(struct device_node *np, 1655 struct property *newprop, 1656 struct property *oldprop) 1657 { 1658 struct property **next; 1659 int found = 0; 1660 1661 write_lock(&devtree_lock); 1662 next = &np->properties; 1663 while (*next) { 1664 if (*next == oldprop) { 1665 /* found the node */ 1666 newprop->next = oldprop->next; 1667 *next = newprop; 1668 oldprop->next = np->deadprops; 1669 np->deadprops = oldprop; 1670 found = 1; 1671 break; 1672 } 1673 next = &(*next)->next; 1674 } 1675 write_unlock(&devtree_lock); 1676 1677 if (!found) 1678 return -ENODEV; 1679 1680 #ifdef CONFIG_PROC_DEVICETREE 1681 /* try to add to proc as well if it was initialized */ 1682 if (np->pde) 1683 proc_device_tree_update_prop(np->pde, newprop, oldprop); 1684 #endif /* CONFIG_PROC_DEVICETREE */ 1685 1686 return 0; 1687 } 1688 1689 1690 /* Find the device node for a given logical cpu number, also returns the cpu 1691 * local thread number (index in ibm,interrupt-server#s) if relevant and 1692 * asked for (non NULL) 1693 */ 1694 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) 1695 { 1696 int hardid; 1697 struct device_node *np; 1698 1699 hardid = get_hard_smp_processor_id(cpu); 1700 1701 for_each_node_by_type(np, "cpu") { 1702 const u32 *intserv; 1703 unsigned int plen, t; 1704 1705 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist 1706 * fallback to "reg" property and assume no threads 1707 */ 1708 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", 1709 &plen); 1710 if (intserv == NULL) { 1711 const u32 *reg = of_get_property(np, "reg", NULL); 1712 if (reg == NULL) 1713 continue; 1714 if (*reg == hardid) { 1715 if (thread) 1716 *thread = 0; 1717 return np; 1718 } 1719 } else { 1720 plen /= sizeof(u32); 1721 for (t = 0; t < plen; t++) { 1722 if (hardid == intserv[t]) { 1723 if (thread) 1724 *thread = t; 1725 return np; 1726 } 1727 } 1728 } 1729 } 1730 return NULL; 1731 } 1732 EXPORT_SYMBOL(of_get_cpu_node); 1733 1734 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) 1735 static struct debugfs_blob_wrapper flat_dt_blob; 1736 1737 static int __init export_flat_device_tree(void) 1738 { 1739 struct dentry *d; 1740 1741 flat_dt_blob.data = initial_boot_params; 1742 flat_dt_blob.size = initial_boot_params->totalsize; 1743 1744 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR, 1745 powerpc_debugfs_root, &flat_dt_blob); 1746 if (!d) 1747 return 1; 1748 1749 return 0; 1750 } 1751 __initcall(export_flat_device_tree); 1752 #endif 1753