1 /* mdesc.c: Sun4V machine description handling. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 4 */ 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/lmb.h> 8 #include <linux/log2.h> 9 #include <linux/list.h> 10 #include <linux/slab.h> 11 #include <linux/mm.h> 12 #include <linux/miscdevice.h> 13 14 #include <asm/cpudata.h> 15 #include <asm/hypervisor.h> 16 #include <asm/mdesc.h> 17 #include <asm/prom.h> 18 #include <asm/oplib.h> 19 #include <asm/smp.h> 20 21 /* Unlike the OBP device tree, the machine description is a full-on 22 * DAG. An arbitrary number of ARCs are possible from one 23 * node to other nodes and thus we can't use the OBP device_node 24 * data structure to represent these nodes inside of the kernel. 25 * 26 * Actually, it isn't even a DAG, because there are back pointers 27 * which create cycles in the graph. 28 * 29 * mdesc_hdr and mdesc_elem describe the layout of the data structure 30 * we get from the Hypervisor. 31 */ 32 struct mdesc_hdr { 33 u32 version; /* Transport version */ 34 u32 node_sz; /* node block size */ 35 u32 name_sz; /* name block size */ 36 u32 data_sz; /* data block size */ 37 } __attribute__((aligned(16))); 38 39 struct mdesc_elem { 40 u8 tag; 41 #define MD_LIST_END 0x00 42 #define MD_NODE 0x4e 43 #define MD_NODE_END 0x45 44 #define MD_NOOP 0x20 45 #define MD_PROP_ARC 0x61 46 #define MD_PROP_VAL 0x76 47 #define MD_PROP_STR 0x73 48 #define MD_PROP_DATA 0x64 49 u8 name_len; 50 u16 resv; 51 u32 name_offset; 52 union { 53 struct { 54 u32 data_len; 55 u32 data_offset; 56 } data; 57 u64 val; 58 } d; 59 }; 60 61 struct mdesc_mem_ops { 62 struct mdesc_handle *(*alloc)(unsigned int mdesc_size); 63 void (*free)(struct mdesc_handle *handle); 64 }; 65 66 struct mdesc_handle { 67 struct list_head list; 68 struct mdesc_mem_ops *mops; 69 void *self_base; 70 atomic_t refcnt; 71 unsigned int handle_size; 72 struct mdesc_hdr mdesc; 73 }; 74 75 static void mdesc_handle_init(struct mdesc_handle *hp, 76 unsigned int handle_size, 77 void *base) 78 { 79 BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1)); 80 81 memset(hp, 0, handle_size); 82 INIT_LIST_HEAD(&hp->list); 83 hp->self_base = base; 84 atomic_set(&hp->refcnt, 1); 85 hp->handle_size = handle_size; 86 } 87 88 static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) 89 { 90 unsigned int handle_size, alloc_size; 91 struct mdesc_handle *hp; 92 unsigned long paddr; 93 94 handle_size = (sizeof(struct mdesc_handle) - 95 sizeof(struct mdesc_hdr) + 96 mdesc_size); 97 alloc_size = PAGE_ALIGN(handle_size); 98 99 paddr = lmb_alloc(alloc_size, PAGE_SIZE); 100 101 hp = NULL; 102 if (paddr) { 103 hp = __va(paddr); 104 mdesc_handle_init(hp, handle_size, hp); 105 } 106 return hp; 107 } 108 109 static void mdesc_lmb_free(struct mdesc_handle *hp) 110 { 111 unsigned int alloc_size, handle_size = hp->handle_size; 112 unsigned long start, end; 113 114 BUG_ON(atomic_read(&hp->refcnt) != 0); 115 BUG_ON(!list_empty(&hp->list)); 116 117 alloc_size = PAGE_ALIGN(handle_size); 118 119 start = (unsigned long) hp; 120 end = start + alloc_size; 121 122 while (start < end) { 123 struct page *p; 124 125 p = virt_to_page(start); 126 ClearPageReserved(p); 127 __free_page(p); 128 start += PAGE_SIZE; 129 } 130 } 131 132 static struct mdesc_mem_ops lmb_mdesc_ops = { 133 .alloc = mdesc_lmb_alloc, 134 .free = mdesc_lmb_free, 135 }; 136 137 static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) 138 { 139 unsigned int handle_size; 140 void *base; 141 142 handle_size = (sizeof(struct mdesc_handle) - 143 sizeof(struct mdesc_hdr) + 144 mdesc_size); 145 146 base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL); 147 if (base) { 148 struct mdesc_handle *hp; 149 unsigned long addr; 150 151 addr = (unsigned long)base; 152 addr = (addr + 15UL) & ~15UL; 153 hp = (struct mdesc_handle *) addr; 154 155 mdesc_handle_init(hp, handle_size, base); 156 return hp; 157 } 158 159 return NULL; 160 } 161 162 static void mdesc_kfree(struct mdesc_handle *hp) 163 { 164 BUG_ON(atomic_read(&hp->refcnt) != 0); 165 BUG_ON(!list_empty(&hp->list)); 166 167 kfree(hp->self_base); 168 } 169 170 static struct mdesc_mem_ops kmalloc_mdesc_memops = { 171 .alloc = mdesc_kmalloc, 172 .free = mdesc_kfree, 173 }; 174 175 static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size, 176 struct mdesc_mem_ops *mops) 177 { 178 struct mdesc_handle *hp = mops->alloc(mdesc_size); 179 180 if (hp) 181 hp->mops = mops; 182 183 return hp; 184 } 185 186 static void mdesc_free(struct mdesc_handle *hp) 187 { 188 hp->mops->free(hp); 189 } 190 191 static struct mdesc_handle *cur_mdesc; 192 static LIST_HEAD(mdesc_zombie_list); 193 static DEFINE_SPINLOCK(mdesc_lock); 194 195 struct mdesc_handle *mdesc_grab(void) 196 { 197 struct mdesc_handle *hp; 198 unsigned long flags; 199 200 spin_lock_irqsave(&mdesc_lock, flags); 201 hp = cur_mdesc; 202 if (hp) 203 atomic_inc(&hp->refcnt); 204 spin_unlock_irqrestore(&mdesc_lock, flags); 205 206 return hp; 207 } 208 EXPORT_SYMBOL(mdesc_grab); 209 210 void mdesc_release(struct mdesc_handle *hp) 211 { 212 unsigned long flags; 213 214 spin_lock_irqsave(&mdesc_lock, flags); 215 if (atomic_dec_and_test(&hp->refcnt)) { 216 list_del_init(&hp->list); 217 hp->mops->free(hp); 218 } 219 spin_unlock_irqrestore(&mdesc_lock, flags); 220 } 221 EXPORT_SYMBOL(mdesc_release); 222 223 static DEFINE_MUTEX(mdesc_mutex); 224 static struct mdesc_notifier_client *client_list; 225 226 void mdesc_register_notifier(struct mdesc_notifier_client *client) 227 { 228 u64 node; 229 230 mutex_lock(&mdesc_mutex); 231 client->next = client_list; 232 client_list = client; 233 234 mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name) 235 client->add(cur_mdesc, node); 236 237 mutex_unlock(&mdesc_mutex); 238 } 239 240 static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node) 241 { 242 const u64 *id; 243 u64 a; 244 245 id = NULL; 246 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { 247 u64 target; 248 249 target = mdesc_arc_target(hp, a); 250 id = mdesc_get_property(hp, target, 251 "cfg-handle", NULL); 252 if (id) 253 break; 254 } 255 256 return id; 257 } 258 259 /* Run 'func' on nodes which are in A but not in B. */ 260 static void invoke_on_missing(const char *name, 261 struct mdesc_handle *a, 262 struct mdesc_handle *b, 263 void (*func)(struct mdesc_handle *, u64)) 264 { 265 u64 node; 266 267 mdesc_for_each_node_by_name(a, node, name) { 268 int found = 0, is_vdc_port = 0; 269 const char *name_prop; 270 const u64 *id; 271 u64 fnode; 272 273 name_prop = mdesc_get_property(a, node, "name", NULL); 274 if (name_prop && !strcmp(name_prop, "vdc-port")) { 275 is_vdc_port = 1; 276 id = parent_cfg_handle(a, node); 277 } else 278 id = mdesc_get_property(a, node, "id", NULL); 279 280 if (!id) { 281 printk(KERN_ERR "MD: Cannot find ID for %s node.\n", 282 (name_prop ? name_prop : name)); 283 continue; 284 } 285 286 mdesc_for_each_node_by_name(b, fnode, name) { 287 const u64 *fid; 288 289 if (is_vdc_port) { 290 name_prop = mdesc_get_property(b, fnode, 291 "name", NULL); 292 if (!name_prop || 293 strcmp(name_prop, "vdc-port")) 294 continue; 295 fid = parent_cfg_handle(b, fnode); 296 if (!fid) { 297 printk(KERN_ERR "MD: Cannot find ID " 298 "for vdc-port node.\n"); 299 continue; 300 } 301 } else 302 fid = mdesc_get_property(b, fnode, 303 "id", NULL); 304 305 if (*id == *fid) { 306 found = 1; 307 break; 308 } 309 } 310 if (!found) 311 func(a, node); 312 } 313 } 314 315 static void notify_one(struct mdesc_notifier_client *p, 316 struct mdesc_handle *old_hp, 317 struct mdesc_handle *new_hp) 318 { 319 invoke_on_missing(p->node_name, old_hp, new_hp, p->remove); 320 invoke_on_missing(p->node_name, new_hp, old_hp, p->add); 321 } 322 323 static void mdesc_notify_clients(struct mdesc_handle *old_hp, 324 struct mdesc_handle *new_hp) 325 { 326 struct mdesc_notifier_client *p = client_list; 327 328 while (p) { 329 notify_one(p, old_hp, new_hp); 330 p = p->next; 331 } 332 } 333 334 void mdesc_update(void) 335 { 336 unsigned long len, real_len, status; 337 struct mdesc_handle *hp, *orig_hp; 338 unsigned long flags; 339 340 mutex_lock(&mdesc_mutex); 341 342 (void) sun4v_mach_desc(0UL, 0UL, &len); 343 344 hp = mdesc_alloc(len, &kmalloc_mdesc_memops); 345 if (!hp) { 346 printk(KERN_ERR "MD: mdesc alloc fails\n"); 347 goto out; 348 } 349 350 status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); 351 if (status != HV_EOK || real_len > len) { 352 printk(KERN_ERR "MD: mdesc reread fails with %lu\n", 353 status); 354 atomic_dec(&hp->refcnt); 355 mdesc_free(hp); 356 goto out; 357 } 358 359 spin_lock_irqsave(&mdesc_lock, flags); 360 orig_hp = cur_mdesc; 361 cur_mdesc = hp; 362 spin_unlock_irqrestore(&mdesc_lock, flags); 363 364 mdesc_notify_clients(orig_hp, hp); 365 366 spin_lock_irqsave(&mdesc_lock, flags); 367 if (atomic_dec_and_test(&orig_hp->refcnt)) 368 mdesc_free(orig_hp); 369 else 370 list_add(&orig_hp->list, &mdesc_zombie_list); 371 spin_unlock_irqrestore(&mdesc_lock, flags); 372 373 out: 374 mutex_unlock(&mdesc_mutex); 375 } 376 377 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) 378 { 379 return (struct mdesc_elem *) (mdesc + 1); 380 } 381 382 static void *name_block(struct mdesc_hdr *mdesc) 383 { 384 return ((void *) node_block(mdesc)) + mdesc->node_sz; 385 } 386 387 static void *data_block(struct mdesc_hdr *mdesc) 388 { 389 return ((void *) name_block(mdesc)) + mdesc->name_sz; 390 } 391 392 u64 mdesc_node_by_name(struct mdesc_handle *hp, 393 u64 from_node, const char *name) 394 { 395 struct mdesc_elem *ep = node_block(&hp->mdesc); 396 const char *names = name_block(&hp->mdesc); 397 u64 last_node = hp->mdesc.node_sz / 16; 398 u64 ret; 399 400 if (from_node == MDESC_NODE_NULL) { 401 ret = from_node = 0; 402 } else if (from_node >= last_node) { 403 return MDESC_NODE_NULL; 404 } else { 405 ret = ep[from_node].d.val; 406 } 407 408 while (ret < last_node) { 409 if (ep[ret].tag != MD_NODE) 410 return MDESC_NODE_NULL; 411 if (!strcmp(names + ep[ret].name_offset, name)) 412 break; 413 ret = ep[ret].d.val; 414 } 415 if (ret >= last_node) 416 ret = MDESC_NODE_NULL; 417 return ret; 418 } 419 EXPORT_SYMBOL(mdesc_node_by_name); 420 421 const void *mdesc_get_property(struct mdesc_handle *hp, u64 node, 422 const char *name, int *lenp) 423 { 424 const char *names = name_block(&hp->mdesc); 425 u64 last_node = hp->mdesc.node_sz / 16; 426 void *data = data_block(&hp->mdesc); 427 struct mdesc_elem *ep; 428 429 if (node == MDESC_NODE_NULL || node >= last_node) 430 return NULL; 431 432 ep = node_block(&hp->mdesc) + node; 433 ep++; 434 for (; ep->tag != MD_NODE_END; ep++) { 435 void *val = NULL; 436 int len = 0; 437 438 switch (ep->tag) { 439 case MD_PROP_VAL: 440 val = &ep->d.val; 441 len = 8; 442 break; 443 444 case MD_PROP_STR: 445 case MD_PROP_DATA: 446 val = data + ep->d.data.data_offset; 447 len = ep->d.data.data_len; 448 break; 449 450 default: 451 break; 452 } 453 if (!val) 454 continue; 455 456 if (!strcmp(names + ep->name_offset, name)) { 457 if (lenp) 458 *lenp = len; 459 return val; 460 } 461 } 462 463 return NULL; 464 } 465 EXPORT_SYMBOL(mdesc_get_property); 466 467 u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type) 468 { 469 struct mdesc_elem *ep, *base = node_block(&hp->mdesc); 470 const char *names = name_block(&hp->mdesc); 471 u64 last_node = hp->mdesc.node_sz / 16; 472 473 if (from == MDESC_NODE_NULL || from >= last_node) 474 return MDESC_NODE_NULL; 475 476 ep = base + from; 477 478 ep++; 479 for (; ep->tag != MD_NODE_END; ep++) { 480 if (ep->tag != MD_PROP_ARC) 481 continue; 482 483 if (strcmp(names + ep->name_offset, arc_type)) 484 continue; 485 486 return ep - base; 487 } 488 489 return MDESC_NODE_NULL; 490 } 491 EXPORT_SYMBOL(mdesc_next_arc); 492 493 u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc) 494 { 495 struct mdesc_elem *ep, *base = node_block(&hp->mdesc); 496 497 ep = base + arc; 498 499 return ep->d.val; 500 } 501 EXPORT_SYMBOL(mdesc_arc_target); 502 503 const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) 504 { 505 struct mdesc_elem *ep, *base = node_block(&hp->mdesc); 506 const char *names = name_block(&hp->mdesc); 507 u64 last_node = hp->mdesc.node_sz / 16; 508 509 if (node == MDESC_NODE_NULL || node >= last_node) 510 return NULL; 511 512 ep = base + node; 513 if (ep->tag != MD_NODE) 514 return NULL; 515 516 return names + ep->name_offset; 517 } 518 EXPORT_SYMBOL(mdesc_node_name); 519 520 static void __init report_platform_properties(void) 521 { 522 struct mdesc_handle *hp = mdesc_grab(); 523 u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); 524 const char *s; 525 const u64 *v; 526 527 if (pn == MDESC_NODE_NULL) { 528 prom_printf("No platform node in machine-description.\n"); 529 prom_halt(); 530 } 531 532 s = mdesc_get_property(hp, pn, "banner-name", NULL); 533 printk("PLATFORM: banner-name [%s]\n", s); 534 s = mdesc_get_property(hp, pn, "name", NULL); 535 printk("PLATFORM: name [%s]\n", s); 536 537 v = mdesc_get_property(hp, pn, "hostid", NULL); 538 if (v) 539 printk("PLATFORM: hostid [%08llx]\n", *v); 540 v = mdesc_get_property(hp, pn, "serial#", NULL); 541 if (v) 542 printk("PLATFORM: serial# [%08llx]\n", *v); 543 v = mdesc_get_property(hp, pn, "stick-frequency", NULL); 544 printk("PLATFORM: stick-frequency [%08llx]\n", *v); 545 v = mdesc_get_property(hp, pn, "mac-address", NULL); 546 if (v) 547 printk("PLATFORM: mac-address [%llx]\n", *v); 548 v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL); 549 if (v) 550 printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v); 551 v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL); 552 if (v) 553 printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v); 554 v = mdesc_get_property(hp, pn, "max-cpus", NULL); 555 if (v) 556 printk("PLATFORM: max-cpus [%llu]\n", *v); 557 558 #ifdef CONFIG_SMP 559 { 560 int max_cpu, i; 561 562 if (v) { 563 max_cpu = *v; 564 if (max_cpu > NR_CPUS) 565 max_cpu = NR_CPUS; 566 } else { 567 max_cpu = NR_CPUS; 568 } 569 for (i = 0; i < max_cpu; i++) 570 set_cpu_possible(i, true); 571 } 572 #endif 573 574 mdesc_release(hp); 575 } 576 577 static void __devinit fill_in_one_cache(cpuinfo_sparc *c, 578 struct mdesc_handle *hp, 579 u64 mp) 580 { 581 const u64 *level = mdesc_get_property(hp, mp, "level", NULL); 582 const u64 *size = mdesc_get_property(hp, mp, "size", NULL); 583 const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL); 584 const char *type; 585 int type_len; 586 587 type = mdesc_get_property(hp, mp, "type", &type_len); 588 589 switch (*level) { 590 case 1: 591 if (of_find_in_proplist(type, "instn", type_len)) { 592 c->icache_size = *size; 593 c->icache_line_size = *line_size; 594 } else if (of_find_in_proplist(type, "data", type_len)) { 595 c->dcache_size = *size; 596 c->dcache_line_size = *line_size; 597 } 598 break; 599 600 case 2: 601 c->ecache_size = *size; 602 c->ecache_line_size = *line_size; 603 break; 604 605 default: 606 break; 607 } 608 609 if (*level == 1) { 610 u64 a; 611 612 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { 613 u64 target = mdesc_arc_target(hp, a); 614 const char *name = mdesc_node_name(hp, target); 615 616 if (!strcmp(name, "cache")) 617 fill_in_one_cache(c, hp, target); 618 } 619 } 620 } 621 622 static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, 623 int core_id) 624 { 625 u64 a; 626 627 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { 628 u64 t = mdesc_arc_target(hp, a); 629 const char *name; 630 const u64 *id; 631 632 name = mdesc_node_name(hp, t); 633 if (!strcmp(name, "cpu")) { 634 id = mdesc_get_property(hp, t, "id", NULL); 635 if (*id < NR_CPUS) 636 cpu_data(*id).core_id = core_id; 637 } else { 638 u64 j; 639 640 mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { 641 u64 n = mdesc_arc_target(hp, j); 642 const char *n_name; 643 644 n_name = mdesc_node_name(hp, n); 645 if (strcmp(n_name, "cpu")) 646 continue; 647 648 id = mdesc_get_property(hp, n, "id", NULL); 649 if (*id < NR_CPUS) 650 cpu_data(*id).core_id = core_id; 651 } 652 } 653 } 654 } 655 656 static void __devinit set_core_ids(struct mdesc_handle *hp) 657 { 658 int idx; 659 u64 mp; 660 661 idx = 1; 662 mdesc_for_each_node_by_name(hp, mp, "cache") { 663 const u64 *level; 664 const char *type; 665 int len; 666 667 level = mdesc_get_property(hp, mp, "level", NULL); 668 if (*level != 1) 669 continue; 670 671 type = mdesc_get_property(hp, mp, "type", &len); 672 if (!of_find_in_proplist(type, "instn", len)) 673 continue; 674 675 mark_core_ids(hp, mp, idx); 676 677 idx++; 678 } 679 } 680 681 static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, 682 int proc_id) 683 { 684 u64 a; 685 686 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { 687 u64 t = mdesc_arc_target(hp, a); 688 const char *name; 689 const u64 *id; 690 691 name = mdesc_node_name(hp, t); 692 if (strcmp(name, "cpu")) 693 continue; 694 695 id = mdesc_get_property(hp, t, "id", NULL); 696 if (*id < NR_CPUS) 697 cpu_data(*id).proc_id = proc_id; 698 } 699 } 700 701 static void __devinit __set_proc_ids(struct mdesc_handle *hp, 702 const char *exec_unit_name) 703 { 704 int idx; 705 u64 mp; 706 707 idx = 0; 708 mdesc_for_each_node_by_name(hp, mp, exec_unit_name) { 709 const char *type; 710 int len; 711 712 type = mdesc_get_property(hp, mp, "type", &len); 713 if (!of_find_in_proplist(type, "int", len) && 714 !of_find_in_proplist(type, "integer", len)) 715 continue; 716 717 mark_proc_ids(hp, mp, idx); 718 719 idx++; 720 } 721 } 722 723 static void __devinit set_proc_ids(struct mdesc_handle *hp) 724 { 725 __set_proc_ids(hp, "exec_unit"); 726 __set_proc_ids(hp, "exec-unit"); 727 } 728 729 static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask, 730 unsigned char def) 731 { 732 u64 val; 733 734 if (!p) 735 goto use_default; 736 val = *p; 737 738 if (!val || val >= 64) 739 goto use_default; 740 741 *mask = ((1U << val) * 64U) - 1U; 742 return; 743 744 use_default: 745 *mask = ((1U << def) * 64U) - 1U; 746 } 747 748 static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, 749 struct trap_per_cpu *tb) 750 { 751 const u64 *val; 752 753 val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); 754 get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); 755 756 val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); 757 get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); 758 759 val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); 760 get_one_mondo_bits(val, &tb->resum_qmask, 6); 761 762 val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); 763 get_one_mondo_bits(val, &tb->nonresum_qmask, 2); 764 } 765 766 void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) 767 { 768 struct mdesc_handle *hp = mdesc_grab(); 769 u64 mp; 770 771 ncpus_probed = 0; 772 mdesc_for_each_node_by_name(hp, mp, "cpu") { 773 const u64 *id = mdesc_get_property(hp, mp, "id", NULL); 774 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); 775 struct trap_per_cpu *tb; 776 cpuinfo_sparc *c; 777 int cpuid; 778 u64 a; 779 780 ncpus_probed++; 781 782 cpuid = *id; 783 784 #ifdef CONFIG_SMP 785 if (cpuid >= NR_CPUS) { 786 printk(KERN_WARNING "Ignoring CPU %d which is " 787 ">= NR_CPUS (%d)\n", 788 cpuid, NR_CPUS); 789 continue; 790 } 791 if (!cpu_isset(cpuid, mask)) 792 continue; 793 #else 794 /* On uniprocessor we only want the values for the 795 * real physical cpu the kernel booted onto, however 796 * cpu_data() only has one entry at index 0. 797 */ 798 if (cpuid != real_hard_smp_processor_id()) 799 continue; 800 cpuid = 0; 801 #endif 802 803 c = &cpu_data(cpuid); 804 c->clock_tick = *cfreq; 805 806 tb = &trap_block[cpuid]; 807 get_mondo_data(hp, mp, tb); 808 809 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { 810 u64 j, t = mdesc_arc_target(hp, a); 811 const char *t_name; 812 813 t_name = mdesc_node_name(hp, t); 814 if (!strcmp(t_name, "cache")) { 815 fill_in_one_cache(c, hp, t); 816 continue; 817 } 818 819 mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { 820 u64 n = mdesc_arc_target(hp, j); 821 const char *n_name; 822 823 n_name = mdesc_node_name(hp, n); 824 if (!strcmp(n_name, "cache")) 825 fill_in_one_cache(c, hp, n); 826 } 827 } 828 829 #ifdef CONFIG_SMP 830 cpu_set(cpuid, cpu_present_map); 831 #endif 832 833 c->core_id = 0; 834 c->proc_id = -1; 835 } 836 837 #ifdef CONFIG_SMP 838 sparc64_multi_core = 1; 839 #endif 840 841 set_core_ids(hp); 842 set_proc_ids(hp); 843 844 smp_fill_in_sib_core_maps(); 845 846 mdesc_release(hp); 847 } 848 849 static ssize_t mdesc_read(struct file *file, char __user *buf, 850 size_t len, loff_t *offp) 851 { 852 struct mdesc_handle *hp = mdesc_grab(); 853 int err; 854 855 if (!hp) 856 return -ENODEV; 857 858 err = hp->handle_size; 859 if (len < hp->handle_size) 860 err = -EMSGSIZE; 861 else if (copy_to_user(buf, &hp->mdesc, hp->handle_size)) 862 err = -EFAULT; 863 mdesc_release(hp); 864 865 return err; 866 } 867 868 static const struct file_operations mdesc_fops = { 869 .read = mdesc_read, 870 .owner = THIS_MODULE, 871 }; 872 873 static struct miscdevice mdesc_misc = { 874 .minor = MISC_DYNAMIC_MINOR, 875 .name = "mdesc", 876 .fops = &mdesc_fops, 877 }; 878 879 static int __init mdesc_misc_init(void) 880 { 881 return misc_register(&mdesc_misc); 882 } 883 884 __initcall(mdesc_misc_init); 885 886 void __init sun4v_mdesc_init(void) 887 { 888 struct mdesc_handle *hp; 889 unsigned long len, real_len, status; 890 cpumask_t mask; 891 892 (void) sun4v_mach_desc(0UL, 0UL, &len); 893 894 printk("MDESC: Size is %lu bytes.\n", len); 895 896 hp = mdesc_alloc(len, &lmb_mdesc_ops); 897 if (hp == NULL) { 898 prom_printf("MDESC: alloc of %lu bytes failed.\n", len); 899 prom_halt(); 900 } 901 902 status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); 903 if (status != HV_EOK || real_len > len) { 904 prom_printf("sun4v_mach_desc fails, err(%lu), " 905 "len(%lu), real_len(%lu)\n", 906 status, len, real_len); 907 mdesc_free(hp); 908 prom_halt(); 909 } 910 911 cur_mdesc = hp; 912 913 report_platform_properties(); 914 915 cpus_setall(mask); 916 mdesc_fill_in_cpu_data(mask); 917 } 918