1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * pSeries NUMA support 4 * 5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 6 */ 7 #define pr_fmt(fmt) "numa: " fmt 8 9 #include <linux/threads.h> 10 #include <linux/memblock.h> 11 #include <linux/init.h> 12 #include <linux/mm.h> 13 #include <linux/mmzone.h> 14 #include <linux/export.h> 15 #include <linux/nodemask.h> 16 #include <linux/cpu.h> 17 #include <linux/notifier.h> 18 #include <linux/of.h> 19 #include <linux/pfn.h> 20 #include <linux/cpuset.h> 21 #include <linux/node.h> 22 #include <linux/stop_machine.h> 23 #include <linux/proc_fs.h> 24 #include <linux/seq_file.h> 25 #include <linux/uaccess.h> 26 #include <linux/slab.h> 27 #include <asm/cputhreads.h> 28 #include <asm/sparsemem.h> 29 #include <asm/prom.h> 30 #include <asm/smp.h> 31 #include <asm/topology.h> 32 #include <asm/firmware.h> 33 #include <asm/paca.h> 34 #include <asm/hvcall.h> 35 #include <asm/setup.h> 36 #include <asm/vdso.h> 37 #include <asm/drmem.h> 38 39 static int numa_enabled = 1; 40 41 static char *cmdline __initdata; 42 43 int numa_cpu_lookup_table[NR_CPUS]; 44 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 45 struct pglist_data *node_data[MAX_NUMNODES]; 46 47 EXPORT_SYMBOL(numa_cpu_lookup_table); 48 EXPORT_SYMBOL(node_to_cpumask_map); 49 EXPORT_SYMBOL(node_data); 50 51 static int primary_domain_index; 52 static int n_mem_addr_cells, n_mem_size_cells; 53 54 #define FORM0_AFFINITY 0 55 #define FORM1_AFFINITY 1 56 #define FORM2_AFFINITY 2 57 static int affinity_form; 58 59 #define MAX_DISTANCE_REF_POINTS 4 60 static int distance_ref_points_depth; 61 static const __be32 *distance_ref_points; 62 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; 63 static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = { 64 [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 } 65 }; 66 static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE }; 67 68 /* 69 * Allocate node_to_cpumask_map based on number of available nodes 70 * Requires node_possible_map to be valid. 71 * 72 * Note: cpumask_of_node() is not valid until after this is done. 73 */ 74 static void __init setup_node_to_cpumask_map(void) 75 { 76 unsigned int node; 77 78 /* setup nr_node_ids if not done yet */ 79 if (nr_node_ids == MAX_NUMNODES) 80 setup_nr_node_ids(); 81 82 /* allocate the map */ 83 for_each_node(node) 84 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 85 86 /* cpumask_of_node() will now work */ 87 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); 88 } 89 90 static int __init fake_numa_create_new_node(unsigned long end_pfn, 91 unsigned int *nid) 92 { 93 unsigned long long mem; 94 char *p = cmdline; 95 static unsigned int fake_nid; 96 static unsigned long long curr_boundary; 97 98 /* 99 * Modify node id, iff we started creating NUMA nodes 100 * We want to continue from where we left of the last time 101 */ 102 if (fake_nid) 103 *nid = fake_nid; 104 /* 105 * In case there are no more arguments to parse, the 106 * node_id should be the same as the last fake node id 107 * (we've handled this above). 108 */ 109 if (!p) 110 return 0; 111 112 mem = memparse(p, &p); 113 if (!mem) 114 return 0; 115 116 if (mem < curr_boundary) 117 return 0; 118 119 curr_boundary = mem; 120 121 if ((end_pfn << PAGE_SHIFT) > mem) { 122 /* 123 * Skip commas and spaces 124 */ 125 while (*p == ',' || *p == ' ' || *p == '\t') 126 p++; 127 128 cmdline = p; 129 fake_nid++; 130 *nid = fake_nid; 131 pr_debug("created new fake_node with id %d\n", fake_nid); 132 return 1; 133 } 134 return 0; 135 } 136 137 static void __init reset_numa_cpu_lookup_table(void) 138 { 139 unsigned int cpu; 140 141 for_each_possible_cpu(cpu) 142 numa_cpu_lookup_table[cpu] = -1; 143 } 144 145 void map_cpu_to_node(int cpu, int node) 146 { 147 update_numa_cpu_lookup_table(cpu, node); 148 149 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) { 150 pr_debug("adding cpu %d to node %d\n", cpu, node); 151 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 152 } 153 } 154 155 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) 156 void unmap_cpu_from_node(unsigned long cpu) 157 { 158 int node = numa_cpu_lookup_table[cpu]; 159 160 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 161 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); 162 pr_debug("removing cpu %lu from node %d\n", cpu, node); 163 } else { 164 pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node); 165 } 166 } 167 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 168 169 static int __associativity_to_nid(const __be32 *associativity, 170 int max_array_sz) 171 { 172 int nid; 173 /* 174 * primary_domain_index is 1 based array index. 175 */ 176 int index = primary_domain_index - 1; 177 178 if (!numa_enabled || index >= max_array_sz) 179 return NUMA_NO_NODE; 180 181 nid = of_read_number(&associativity[index], 1); 182 183 /* POWER4 LPAR uses 0xffff as invalid node */ 184 if (nid == 0xffff || nid >= nr_node_ids) 185 nid = NUMA_NO_NODE; 186 return nid; 187 } 188 /* 189 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA 190 * info is found. 191 */ 192 static int associativity_to_nid(const __be32 *associativity) 193 { 194 int array_sz = of_read_number(associativity, 1); 195 196 /* Skip the first element in the associativity array */ 197 return __associativity_to_nid((associativity + 1), array_sz); 198 } 199 200 static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) 201 { 202 int dist; 203 int node1, node2; 204 205 node1 = associativity_to_nid(cpu1_assoc); 206 node2 = associativity_to_nid(cpu2_assoc); 207 208 dist = numa_distance_table[node1][node2]; 209 if (dist <= LOCAL_DISTANCE) 210 return 0; 211 else if (dist <= REMOTE_DISTANCE) 212 return 1; 213 else 214 return 2; 215 } 216 217 static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) 218 { 219 int dist = 0; 220 221 int i, index; 222 223 for (i = 0; i < distance_ref_points_depth; i++) { 224 index = be32_to_cpu(distance_ref_points[i]); 225 if (cpu1_assoc[index] == cpu2_assoc[index]) 226 break; 227 dist++; 228 } 229 230 return dist; 231 } 232 233 int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) 234 { 235 /* We should not get called with FORM0 */ 236 VM_WARN_ON(affinity_form == FORM0_AFFINITY); 237 if (affinity_form == FORM1_AFFINITY) 238 return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc); 239 return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc); 240 } 241 242 /* must hold reference to node during call */ 243 static const __be32 *of_get_associativity(struct device_node *dev) 244 { 245 return of_get_property(dev, "ibm,associativity", NULL); 246 } 247 248 int __node_distance(int a, int b) 249 { 250 int i; 251 int distance = LOCAL_DISTANCE; 252 253 if (affinity_form == FORM2_AFFINITY) 254 return numa_distance_table[a][b]; 255 else if (affinity_form == FORM0_AFFINITY) 256 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); 257 258 for (i = 0; i < distance_ref_points_depth; i++) { 259 if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) 260 break; 261 262 /* Double the distance for each NUMA level */ 263 distance *= 2; 264 } 265 266 return distance; 267 } 268 EXPORT_SYMBOL(__node_distance); 269 270 /* Returns the nid associated with the given device tree node, 271 * or -1 if not found. 272 */ 273 static int of_node_to_nid_single(struct device_node *device) 274 { 275 int nid = NUMA_NO_NODE; 276 const __be32 *tmp; 277 278 tmp = of_get_associativity(device); 279 if (tmp) 280 nid = associativity_to_nid(tmp); 281 return nid; 282 } 283 284 /* Walk the device tree upwards, looking for an associativity id */ 285 int of_node_to_nid(struct device_node *device) 286 { 287 int nid = NUMA_NO_NODE; 288 289 of_node_get(device); 290 while (device) { 291 nid = of_node_to_nid_single(device); 292 if (nid != -1) 293 break; 294 295 device = of_get_next_parent(device); 296 } 297 of_node_put(device); 298 299 return nid; 300 } 301 EXPORT_SYMBOL(of_node_to_nid); 302 303 static void __initialize_form1_numa_distance(const __be32 *associativity, 304 int max_array_sz) 305 { 306 int i, nid; 307 308 if (affinity_form != FORM1_AFFINITY) 309 return; 310 311 nid = __associativity_to_nid(associativity, max_array_sz); 312 if (nid != NUMA_NO_NODE) { 313 for (i = 0; i < distance_ref_points_depth; i++) { 314 const __be32 *entry; 315 int index = be32_to_cpu(distance_ref_points[i]) - 1; 316 317 /* 318 * broken hierarchy, return with broken distance table 319 */ 320 if (WARN(index >= max_array_sz, "Broken ibm,associativity property")) 321 return; 322 323 entry = &associativity[index]; 324 distance_lookup_table[nid][i] = of_read_number(entry, 1); 325 } 326 } 327 } 328 329 static void initialize_form1_numa_distance(const __be32 *associativity) 330 { 331 int array_sz; 332 333 array_sz = of_read_number(associativity, 1); 334 /* Skip the first element in the associativity array */ 335 __initialize_form1_numa_distance(associativity + 1, array_sz); 336 } 337 338 /* 339 * Used to update distance information w.r.t newly added node. 340 */ 341 void update_numa_distance(struct device_node *node) 342 { 343 int nid; 344 345 if (affinity_form == FORM0_AFFINITY) 346 return; 347 else if (affinity_form == FORM1_AFFINITY) { 348 const __be32 *associativity; 349 350 associativity = of_get_associativity(node); 351 if (!associativity) 352 return; 353 354 initialize_form1_numa_distance(associativity); 355 return; 356 } 357 358 /* FORM2 affinity */ 359 nid = of_node_to_nid_single(node); 360 if (nid == NUMA_NO_NODE) 361 return; 362 363 /* 364 * With FORM2 we expect NUMA distance of all possible NUMA 365 * nodes to be provided during boot. 366 */ 367 WARN(numa_distance_table[nid][nid] == -1, 368 "NUMA distance details for node %d not provided\n", nid); 369 } 370 371 /* 372 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN} 373 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements} 374 */ 375 static void __init initialize_form2_numa_distance_lookup_table(void) 376 { 377 int i, j; 378 struct device_node *root; 379 const __u8 *form2_distances; 380 const __be32 *numa_lookup_index; 381 int form2_distances_length; 382 int max_numa_index, distance_index; 383 384 if (firmware_has_feature(FW_FEATURE_OPAL)) 385 root = of_find_node_by_path("/ibm,opal"); 386 else 387 root = of_find_node_by_path("/rtas"); 388 if (!root) 389 root = of_find_node_by_path("/"); 390 391 numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL); 392 max_numa_index = of_read_number(&numa_lookup_index[0], 1); 393 394 /* first element of the array is the size and is encode-int */ 395 form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL); 396 form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1); 397 /* Skip the size which is encoded int */ 398 form2_distances += sizeof(__be32); 399 400 pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n", 401 form2_distances_length, max_numa_index); 402 403 for (i = 0; i < max_numa_index; i++) 404 /* +1 skip the max_numa_index in the property */ 405 numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1); 406 407 408 if (form2_distances_length != max_numa_index * max_numa_index) { 409 WARN(1, "Wrong NUMA distance information\n"); 410 form2_distances = NULL; // don't use it 411 } 412 distance_index = 0; 413 for (i = 0; i < max_numa_index; i++) { 414 for (j = 0; j < max_numa_index; j++) { 415 int nodeA = numa_id_index_table[i]; 416 int nodeB = numa_id_index_table[j]; 417 int dist; 418 419 if (form2_distances) 420 dist = form2_distances[distance_index++]; 421 else if (nodeA == nodeB) 422 dist = LOCAL_DISTANCE; 423 else 424 dist = REMOTE_DISTANCE; 425 numa_distance_table[nodeA][nodeB] = dist; 426 pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist); 427 } 428 } 429 430 of_node_put(root); 431 } 432 433 static int __init find_primary_domain_index(void) 434 { 435 int index; 436 struct device_node *root; 437 438 /* 439 * Check for which form of affinity. 440 */ 441 if (firmware_has_feature(FW_FEATURE_OPAL)) { 442 affinity_form = FORM1_AFFINITY; 443 } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) { 444 pr_debug("Using form 2 affinity\n"); 445 affinity_form = FORM2_AFFINITY; 446 } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) { 447 pr_debug("Using form 1 affinity\n"); 448 affinity_form = FORM1_AFFINITY; 449 } else 450 affinity_form = FORM0_AFFINITY; 451 452 if (firmware_has_feature(FW_FEATURE_OPAL)) 453 root = of_find_node_by_path("/ibm,opal"); 454 else 455 root = of_find_node_by_path("/rtas"); 456 if (!root) 457 root = of_find_node_by_path("/"); 458 459 /* 460 * This property is a set of 32-bit integers, each representing 461 * an index into the ibm,associativity nodes. 462 * 463 * With form 0 affinity the first integer is for an SMP configuration 464 * (should be all 0's) and the second is for a normal NUMA 465 * configuration. We have only one level of NUMA. 466 * 467 * With form 1 affinity the first integer is the most significant 468 * NUMA boundary and the following are progressively less significant 469 * boundaries. There can be more than one level of NUMA. 470 */ 471 distance_ref_points = of_get_property(root, 472 "ibm,associativity-reference-points", 473 &distance_ref_points_depth); 474 475 if (!distance_ref_points) { 476 pr_debug("ibm,associativity-reference-points not found.\n"); 477 goto err; 478 } 479 480 distance_ref_points_depth /= sizeof(int); 481 if (affinity_form == FORM0_AFFINITY) { 482 if (distance_ref_points_depth < 2) { 483 pr_warn("short ibm,associativity-reference-points\n"); 484 goto err; 485 } 486 487 index = of_read_number(&distance_ref_points[1], 1); 488 } else { 489 /* 490 * Both FORM1 and FORM2 affinity find the primary domain details 491 * at the same offset. 492 */ 493 index = of_read_number(distance_ref_points, 1); 494 } 495 /* 496 * Warn and cap if the hardware supports more than 497 * MAX_DISTANCE_REF_POINTS domains. 498 */ 499 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { 500 pr_warn("distance array capped at %d entries\n", 501 MAX_DISTANCE_REF_POINTS); 502 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; 503 } 504 505 of_node_put(root); 506 return index; 507 508 err: 509 of_node_put(root); 510 return -1; 511 } 512 513 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 514 { 515 struct device_node *memory = NULL; 516 517 memory = of_find_node_by_type(memory, "memory"); 518 if (!memory) 519 panic("numa.c: No memory nodes found!"); 520 521 *n_addr_cells = of_n_addr_cells(memory); 522 *n_size_cells = of_n_size_cells(memory); 523 of_node_put(memory); 524 } 525 526 static unsigned long read_n_cells(int n, const __be32 **buf) 527 { 528 unsigned long result = 0; 529 530 while (n--) { 531 result = (result << 32) | of_read_number(*buf, 1); 532 (*buf)++; 533 } 534 return result; 535 } 536 537 struct assoc_arrays { 538 u32 n_arrays; 539 u32 array_sz; 540 const __be32 *arrays; 541 }; 542 543 /* 544 * Retrieve and validate the list of associativity arrays for drconf 545 * memory from the ibm,associativity-lookup-arrays property of the 546 * device tree.. 547 * 548 * The layout of the ibm,associativity-lookup-arrays property is a number N 549 * indicating the number of associativity arrays, followed by a number M 550 * indicating the size of each associativity array, followed by a list 551 * of N associativity arrays. 552 */ 553 static int of_get_assoc_arrays(struct assoc_arrays *aa) 554 { 555 struct device_node *memory; 556 const __be32 *prop; 557 u32 len; 558 559 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 560 if (!memory) 561 return -1; 562 563 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); 564 if (!prop || len < 2 * sizeof(unsigned int)) { 565 of_node_put(memory); 566 return -1; 567 } 568 569 aa->n_arrays = of_read_number(prop++, 1); 570 aa->array_sz = of_read_number(prop++, 1); 571 572 of_node_put(memory); 573 574 /* Now that we know the number of arrays and size of each array, 575 * revalidate the size of the property read in. 576 */ 577 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) 578 return -1; 579 580 aa->arrays = prop; 581 return 0; 582 } 583 584 static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb) 585 { 586 struct assoc_arrays aa = { .arrays = NULL }; 587 int default_nid = NUMA_NO_NODE; 588 int nid = default_nid; 589 int rc, index; 590 591 if ((primary_domain_index < 0) || !numa_enabled) 592 return default_nid; 593 594 rc = of_get_assoc_arrays(&aa); 595 if (rc) 596 return default_nid; 597 598 if (primary_domain_index <= aa.array_sz && 599 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { 600 const __be32 *associativity; 601 602 index = lmb->aa_index * aa.array_sz; 603 associativity = &aa.arrays[index]; 604 nid = __associativity_to_nid(associativity, aa.array_sz); 605 if (nid > 0 && affinity_form == FORM1_AFFINITY) { 606 /* 607 * lookup array associativity entries have 608 * no length of the array as the first element. 609 */ 610 __initialize_form1_numa_distance(associativity, aa.array_sz); 611 } 612 } 613 return nid; 614 } 615 616 /* 617 * This is like of_node_to_nid_single() for memory represented in the 618 * ibm,dynamic-reconfiguration-memory node. 619 */ 620 int of_drconf_to_nid_single(struct drmem_lmb *lmb) 621 { 622 struct assoc_arrays aa = { .arrays = NULL }; 623 int default_nid = NUMA_NO_NODE; 624 int nid = default_nid; 625 int rc, index; 626 627 if ((primary_domain_index < 0) || !numa_enabled) 628 return default_nid; 629 630 rc = of_get_assoc_arrays(&aa); 631 if (rc) 632 return default_nid; 633 634 if (primary_domain_index <= aa.array_sz && 635 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { 636 const __be32 *associativity; 637 638 index = lmb->aa_index * aa.array_sz; 639 associativity = &aa.arrays[index]; 640 nid = __associativity_to_nid(associativity, aa.array_sz); 641 } 642 return nid; 643 } 644 645 #ifdef CONFIG_PPC_SPLPAR 646 647 static int __vphn_get_associativity(long lcpu, __be32 *associativity) 648 { 649 long rc, hwid; 650 651 /* 652 * On a shared lpar, device tree will not have node associativity. 653 * At this time lppaca, or its __old_status field may not be 654 * updated. Hence kernel cannot detect if its on a shared lpar. So 655 * request an explicit associativity irrespective of whether the 656 * lpar is shared or dedicated. Use the device tree property as a 657 * fallback. cpu_to_phys_id is only valid between 658 * smp_setup_cpu_maps() and smp_setup_pacas(). 659 */ 660 if (firmware_has_feature(FW_FEATURE_VPHN)) { 661 if (cpu_to_phys_id) 662 hwid = cpu_to_phys_id[lcpu]; 663 else 664 hwid = get_hard_smp_processor_id(lcpu); 665 666 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity); 667 if (rc == H_SUCCESS) 668 return 0; 669 } 670 671 return -1; 672 } 673 674 static int vphn_get_nid(long lcpu) 675 { 676 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 677 678 679 if (!__vphn_get_associativity(lcpu, associativity)) 680 return associativity_to_nid(associativity); 681 682 return NUMA_NO_NODE; 683 684 } 685 #else 686 687 static int __vphn_get_associativity(long lcpu, __be32 *associativity) 688 { 689 return -1; 690 } 691 692 static int vphn_get_nid(long unused) 693 { 694 return NUMA_NO_NODE; 695 } 696 #endif /* CONFIG_PPC_SPLPAR */ 697 698 /* 699 * Figure out to which domain a cpu belongs and stick it there. 700 * Return the id of the domain used. 701 */ 702 static int numa_setup_cpu(unsigned long lcpu) 703 { 704 struct device_node *cpu; 705 int fcpu = cpu_first_thread_sibling(lcpu); 706 int nid = NUMA_NO_NODE; 707 708 if (!cpu_present(lcpu)) { 709 set_cpu_numa_node(lcpu, first_online_node); 710 return first_online_node; 711 } 712 713 /* 714 * If a valid cpu-to-node mapping is already available, use it 715 * directly instead of querying the firmware, since it represents 716 * the most recent mapping notified to us by the platform (eg: VPHN). 717 * Since cpu_to_node binding remains the same for all threads in the 718 * core. If a valid cpu-to-node mapping is already available, for 719 * the first thread in the core, use it. 720 */ 721 nid = numa_cpu_lookup_table[fcpu]; 722 if (nid >= 0) { 723 map_cpu_to_node(lcpu, nid); 724 return nid; 725 } 726 727 nid = vphn_get_nid(lcpu); 728 if (nid != NUMA_NO_NODE) 729 goto out_present; 730 731 cpu = of_get_cpu_node(lcpu, NULL); 732 733 if (!cpu) { 734 WARN_ON(1); 735 if (cpu_present(lcpu)) 736 goto out_present; 737 else 738 goto out; 739 } 740 741 nid = of_node_to_nid_single(cpu); 742 of_node_put(cpu); 743 744 out_present: 745 if (nid < 0 || !node_possible(nid)) 746 nid = first_online_node; 747 748 /* 749 * Update for the first thread of the core. All threads of a core 750 * have to be part of the same node. This not only avoids querying 751 * for every other thread in the core, but always avoids a case 752 * where virtual node associativity change causes subsequent threads 753 * of a core to be associated with different nid. However if first 754 * thread is already online, expect it to have a valid mapping. 755 */ 756 if (fcpu != lcpu) { 757 WARN_ON(cpu_online(fcpu)); 758 map_cpu_to_node(fcpu, nid); 759 } 760 761 map_cpu_to_node(lcpu, nid); 762 out: 763 return nid; 764 } 765 766 static void verify_cpu_node_mapping(int cpu, int node) 767 { 768 int base, sibling, i; 769 770 /* Verify that all the threads in the core belong to the same node */ 771 base = cpu_first_thread_sibling(cpu); 772 773 for (i = 0; i < threads_per_core; i++) { 774 sibling = base + i; 775 776 if (sibling == cpu || cpu_is_offline(sibling)) 777 continue; 778 779 if (cpu_to_node(sibling) != node) { 780 WARN(1, "CPU thread siblings %d and %d don't belong" 781 " to the same node!\n", cpu, sibling); 782 break; 783 } 784 } 785 } 786 787 /* Must run before sched domains notifier. */ 788 static int ppc_numa_cpu_prepare(unsigned int cpu) 789 { 790 int nid; 791 792 nid = numa_setup_cpu(cpu); 793 verify_cpu_node_mapping(cpu, nid); 794 return 0; 795 } 796 797 static int ppc_numa_cpu_dead(unsigned int cpu) 798 { 799 return 0; 800 } 801 802 /* 803 * Check and possibly modify a memory region to enforce the memory limit. 804 * 805 * Returns the size the region should have to enforce the memory limit. 806 * This will either be the original value of size, a truncated value, 807 * or zero. If the returned value of size is 0 the region should be 808 * discarded as it lies wholly above the memory limit. 809 */ 810 static unsigned long __init numa_enforce_memory_limit(unsigned long start, 811 unsigned long size) 812 { 813 /* 814 * We use memblock_end_of_DRAM() in here instead of memory_limit because 815 * we've already adjusted it for the limit and it takes care of 816 * having memory holes below the limit. Also, in the case of 817 * iommu_is_off, memory_limit is not set but is implicitly enforced. 818 */ 819 820 if (start + size <= memblock_end_of_DRAM()) 821 return size; 822 823 if (start >= memblock_end_of_DRAM()) 824 return 0; 825 826 return memblock_end_of_DRAM() - start; 827 } 828 829 /* 830 * Reads the counter for a given entry in 831 * linux,drconf-usable-memory property 832 */ 833 static inline int __init read_usm_ranges(const __be32 **usm) 834 { 835 /* 836 * For each lmb in ibm,dynamic-memory a corresponding 837 * entry in linux,drconf-usable-memory property contains 838 * a counter followed by that many (base, size) duple. 839 * read the counter from linux,drconf-usable-memory 840 */ 841 return read_n_cells(n_mem_size_cells, usm); 842 } 843 844 /* 845 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 846 * node. This assumes n_mem_{addr,size}_cells have been set. 847 */ 848 static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, 849 const __be32 **usm, 850 void *data) 851 { 852 unsigned int ranges, is_kexec_kdump = 0; 853 unsigned long base, size, sz; 854 int nid; 855 856 /* 857 * Skip this block if the reserved bit is set in flags (0x80) 858 * or if the block is not assigned to this partition (0x8) 859 */ 860 if ((lmb->flags & DRCONF_MEM_RESERVED) 861 || !(lmb->flags & DRCONF_MEM_ASSIGNED)) 862 return 0; 863 864 if (*usm) 865 is_kexec_kdump = 1; 866 867 base = lmb->base_addr; 868 size = drmem_lmb_size(); 869 ranges = 1; 870 871 if (is_kexec_kdump) { 872 ranges = read_usm_ranges(usm); 873 if (!ranges) /* there are no (base, size) duple */ 874 return 0; 875 } 876 877 do { 878 if (is_kexec_kdump) { 879 base = read_n_cells(n_mem_addr_cells, usm); 880 size = read_n_cells(n_mem_size_cells, usm); 881 } 882 883 nid = get_nid_and_numa_distance(lmb); 884 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT), 885 &nid); 886 node_set_online(nid); 887 sz = numa_enforce_memory_limit(base, size); 888 if (sz) 889 memblock_set_node(base, sz, &memblock.memory, nid); 890 } while (--ranges); 891 892 return 0; 893 } 894 895 static int __init parse_numa_properties(void) 896 { 897 struct device_node *memory; 898 int default_nid = 0; 899 unsigned long i; 900 const __be32 *associativity; 901 902 if (numa_enabled == 0) { 903 pr_warn("disabled by user\n"); 904 return -1; 905 } 906 907 primary_domain_index = find_primary_domain_index(); 908 909 if (primary_domain_index < 0) { 910 /* 911 * if we fail to parse primary_domain_index from device tree 912 * mark the numa disabled, boot with numa disabled. 913 */ 914 numa_enabled = false; 915 return primary_domain_index; 916 } 917 918 pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index); 919 920 /* 921 * If it is FORM2 initialize the distance table here. 922 */ 923 if (affinity_form == FORM2_AFFINITY) 924 initialize_form2_numa_distance_lookup_table(); 925 926 /* 927 * Even though we connect cpus to numa domains later in SMP 928 * init, we need to know the node ids now. This is because 929 * each node to be onlined must have NODE_DATA etc backing it. 930 */ 931 for_each_present_cpu(i) { 932 __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE]; 933 struct device_node *cpu; 934 int nid = NUMA_NO_NODE; 935 936 memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32)); 937 938 if (__vphn_get_associativity(i, vphn_assoc) == 0) { 939 nid = associativity_to_nid(vphn_assoc); 940 initialize_form1_numa_distance(vphn_assoc); 941 } else { 942 943 /* 944 * Don't fall back to default_nid yet -- we will plug 945 * cpus into nodes once the memory scan has discovered 946 * the topology. 947 */ 948 cpu = of_get_cpu_node(i, NULL); 949 BUG_ON(!cpu); 950 951 associativity = of_get_associativity(cpu); 952 if (associativity) { 953 nid = associativity_to_nid(associativity); 954 initialize_form1_numa_distance(associativity); 955 } 956 of_node_put(cpu); 957 } 958 959 /* node_set_online() is an UB if 'nid' is negative */ 960 if (likely(nid >= 0)) 961 node_set_online(nid); 962 } 963 964 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 965 966 for_each_node_by_type(memory, "memory") { 967 unsigned long start; 968 unsigned long size; 969 int nid; 970 int ranges; 971 const __be32 *memcell_buf; 972 unsigned int len; 973 974 memcell_buf = of_get_property(memory, 975 "linux,usable-memory", &len); 976 if (!memcell_buf || len <= 0) 977 memcell_buf = of_get_property(memory, "reg", &len); 978 if (!memcell_buf || len <= 0) 979 continue; 980 981 /* ranges in cell */ 982 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 983 new_range: 984 /* these are order-sensitive, and modify the buffer pointer */ 985 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 986 size = read_n_cells(n_mem_size_cells, &memcell_buf); 987 988 /* 989 * Assumption: either all memory nodes or none will 990 * have associativity properties. If none, then 991 * everything goes to default_nid. 992 */ 993 associativity = of_get_associativity(memory); 994 if (associativity) { 995 nid = associativity_to_nid(associativity); 996 initialize_form1_numa_distance(associativity); 997 } else 998 nid = default_nid; 999 1000 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); 1001 node_set_online(nid); 1002 1003 size = numa_enforce_memory_limit(start, size); 1004 if (size) 1005 memblock_set_node(start, size, &memblock.memory, nid); 1006 1007 if (--ranges) 1008 goto new_range; 1009 } 1010 1011 /* 1012 * Now do the same thing for each MEMBLOCK listed in the 1013 * ibm,dynamic-memory property in the 1014 * ibm,dynamic-reconfiguration-memory node. 1015 */ 1016 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1017 if (memory) { 1018 walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb); 1019 of_node_put(memory); 1020 } 1021 1022 return 0; 1023 } 1024 1025 static void __init setup_nonnuma(void) 1026 { 1027 unsigned long top_of_ram = memblock_end_of_DRAM(); 1028 unsigned long total_ram = memblock_phys_mem_size(); 1029 unsigned long start_pfn, end_pfn; 1030 unsigned int nid = 0; 1031 int i; 1032 1033 pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); 1034 pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); 1035 1036 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { 1037 fake_numa_create_new_node(end_pfn, &nid); 1038 memblock_set_node(PFN_PHYS(start_pfn), 1039 PFN_PHYS(end_pfn - start_pfn), 1040 &memblock.memory, nid); 1041 node_set_online(nid); 1042 } 1043 } 1044 1045 void __init dump_numa_cpu_topology(void) 1046 { 1047 unsigned int node; 1048 unsigned int cpu, count; 1049 1050 if (!numa_enabled) 1051 return; 1052 1053 for_each_online_node(node) { 1054 pr_info("Node %d CPUs:", node); 1055 1056 count = 0; 1057 /* 1058 * If we used a CPU iterator here we would miss printing 1059 * the holes in the cpumap. 1060 */ 1061 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 1062 if (cpumask_test_cpu(cpu, 1063 node_to_cpumask_map[node])) { 1064 if (count == 0) 1065 pr_cont(" %u", cpu); 1066 ++count; 1067 } else { 1068 if (count > 1) 1069 pr_cont("-%u", cpu - 1); 1070 count = 0; 1071 } 1072 } 1073 1074 if (count > 1) 1075 pr_cont("-%u", nr_cpu_ids - 1); 1076 pr_cont("\n"); 1077 } 1078 } 1079 1080 /* Initialize NODE_DATA for a node on the local memory */ 1081 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) 1082 { 1083 u64 spanned_pages = end_pfn - start_pfn; 1084 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); 1085 u64 nd_pa; 1086 void *nd; 1087 int tnid; 1088 1089 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); 1090 if (!nd_pa) 1091 panic("Cannot allocate %zu bytes for node %d data\n", 1092 nd_size, nid); 1093 1094 nd = __va(nd_pa); 1095 1096 /* report and initialize */ 1097 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", 1098 nd_pa, nd_pa + nd_size - 1); 1099 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 1100 if (tnid != nid) 1101 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); 1102 1103 node_data[nid] = nd; 1104 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 1105 NODE_DATA(nid)->node_id = nid; 1106 NODE_DATA(nid)->node_start_pfn = start_pfn; 1107 NODE_DATA(nid)->node_spanned_pages = spanned_pages; 1108 } 1109 1110 static void __init find_possible_nodes(void) 1111 { 1112 struct device_node *rtas; 1113 const __be32 *domains = NULL; 1114 int prop_length, max_nodes; 1115 u32 i; 1116 1117 if (!numa_enabled) 1118 return; 1119 1120 rtas = of_find_node_by_path("/rtas"); 1121 if (!rtas) 1122 return; 1123 1124 /* 1125 * ibm,current-associativity-domains is a fairly recent property. If 1126 * it doesn't exist, then fallback on ibm,max-associativity-domains. 1127 * Current denotes what the platform can support compared to max 1128 * which denotes what the Hypervisor can support. 1129 * 1130 * If the LPAR is migratable, new nodes might be activated after a LPM, 1131 * so we should consider the max number in that case. 1132 */ 1133 if (!of_get_property(of_root, "ibm,migratable-partition", NULL)) 1134 domains = of_get_property(rtas, 1135 "ibm,current-associativity-domains", 1136 &prop_length); 1137 if (!domains) { 1138 domains = of_get_property(rtas, "ibm,max-associativity-domains", 1139 &prop_length); 1140 if (!domains) 1141 goto out; 1142 } 1143 1144 max_nodes = of_read_number(&domains[primary_domain_index], 1); 1145 pr_info("Partition configured for %d NUMA nodes.\n", max_nodes); 1146 1147 for (i = 0; i < max_nodes; i++) { 1148 if (!node_possible(i)) 1149 node_set(i, node_possible_map); 1150 } 1151 1152 prop_length /= sizeof(int); 1153 if (prop_length > primary_domain_index + 2) 1154 coregroup_enabled = 1; 1155 1156 out: 1157 of_node_put(rtas); 1158 } 1159 1160 void __init mem_topology_setup(void) 1161 { 1162 int cpu; 1163 1164 /* 1165 * Linux/mm assumes node 0 to be online at boot. However this is not 1166 * true on PowerPC, where node 0 is similar to any other node, it 1167 * could be cpuless, memoryless node. So force node 0 to be offline 1168 * for now. This will prevent cpuless, memoryless node 0 showing up 1169 * unnecessarily as online. If a node has cpus or memory that need 1170 * to be online, then node will anyway be marked online. 1171 */ 1172 node_set_offline(0); 1173 1174 if (parse_numa_properties()) 1175 setup_nonnuma(); 1176 1177 /* 1178 * Modify the set of possible NUMA nodes to reflect information 1179 * available about the set of online nodes, and the set of nodes 1180 * that we expect to make use of for this platform's affinity 1181 * calculations. 1182 */ 1183 nodes_and(node_possible_map, node_possible_map, node_online_map); 1184 1185 find_possible_nodes(); 1186 1187 setup_node_to_cpumask_map(); 1188 1189 reset_numa_cpu_lookup_table(); 1190 1191 for_each_possible_cpu(cpu) { 1192 /* 1193 * Powerpc with CONFIG_NUMA always used to have a node 0, 1194 * even if it was memoryless or cpuless. For all cpus that 1195 * are possible but not present, cpu_to_node() would point 1196 * to node 0. To remove a cpuless, memoryless dummy node, 1197 * powerpc need to make sure all possible but not present 1198 * cpu_to_node are set to a proper node. 1199 */ 1200 numa_setup_cpu(cpu); 1201 } 1202 } 1203 1204 void __init initmem_init(void) 1205 { 1206 int nid; 1207 1208 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1209 max_pfn = max_low_pfn; 1210 1211 memblock_dump_all(); 1212 1213 for_each_online_node(nid) { 1214 unsigned long start_pfn, end_pfn; 1215 1216 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1217 setup_node_data(nid, start_pfn, end_pfn); 1218 } 1219 1220 sparse_init(); 1221 1222 /* 1223 * We need the numa_cpu_lookup_table to be accurate for all CPUs, 1224 * even before we online them, so that we can use cpu_to_{node,mem} 1225 * early in boot, cf. smp_prepare_cpus(). 1226 * _nocalls() + manual invocation is used because cpuhp is not yet 1227 * initialized for the boot CPU. 1228 */ 1229 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare", 1230 ppc_numa_cpu_prepare, ppc_numa_cpu_dead); 1231 } 1232 1233 static int __init early_numa(char *p) 1234 { 1235 if (!p) 1236 return 0; 1237 1238 if (strstr(p, "off")) 1239 numa_enabled = 0; 1240 1241 p = strstr(p, "fake="); 1242 if (p) 1243 cmdline = p + strlen("fake="); 1244 1245 return 0; 1246 } 1247 early_param("numa", early_numa); 1248 1249 #ifdef CONFIG_MEMORY_HOTPLUG 1250 /* 1251 * Find the node associated with a hot added memory section for 1252 * memory represented in the device tree by the property 1253 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. 1254 */ 1255 static int hot_add_drconf_scn_to_nid(unsigned long scn_addr) 1256 { 1257 struct drmem_lmb *lmb; 1258 unsigned long lmb_size; 1259 int nid = NUMA_NO_NODE; 1260 1261 lmb_size = drmem_lmb_size(); 1262 1263 for_each_drmem_lmb(lmb) { 1264 /* skip this block if it is reserved or not assigned to 1265 * this partition */ 1266 if ((lmb->flags & DRCONF_MEM_RESERVED) 1267 || !(lmb->flags & DRCONF_MEM_ASSIGNED)) 1268 continue; 1269 1270 if ((scn_addr < lmb->base_addr) 1271 || (scn_addr >= (lmb->base_addr + lmb_size))) 1272 continue; 1273 1274 nid = of_drconf_to_nid_single(lmb); 1275 break; 1276 } 1277 1278 return nid; 1279 } 1280 1281 /* 1282 * Find the node associated with a hot added memory section for memory 1283 * represented in the device tree as a node (i.e. memory@XXXX) for 1284 * each memblock. 1285 */ 1286 static int hot_add_node_scn_to_nid(unsigned long scn_addr) 1287 { 1288 struct device_node *memory; 1289 int nid = NUMA_NO_NODE; 1290 1291 for_each_node_by_type(memory, "memory") { 1292 unsigned long start, size; 1293 int ranges; 1294 const __be32 *memcell_buf; 1295 unsigned int len; 1296 1297 memcell_buf = of_get_property(memory, "reg", &len); 1298 if (!memcell_buf || len <= 0) 1299 continue; 1300 1301 /* ranges in cell */ 1302 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 1303 1304 while (ranges--) { 1305 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 1306 size = read_n_cells(n_mem_size_cells, &memcell_buf); 1307 1308 if ((scn_addr < start) || (scn_addr >= (start + size))) 1309 continue; 1310 1311 nid = of_node_to_nid_single(memory); 1312 break; 1313 } 1314 1315 if (nid >= 0) 1316 break; 1317 } 1318 1319 of_node_put(memory); 1320 1321 return nid; 1322 } 1323 1324 /* 1325 * Find the node associated with a hot added memory section. Section 1326 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that 1327 * sections are fully contained within a single MEMBLOCK. 1328 */ 1329 int hot_add_scn_to_nid(unsigned long scn_addr) 1330 { 1331 struct device_node *memory = NULL; 1332 int nid; 1333 1334 if (!numa_enabled) 1335 return first_online_node; 1336 1337 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1338 if (memory) { 1339 nid = hot_add_drconf_scn_to_nid(scn_addr); 1340 of_node_put(memory); 1341 } else { 1342 nid = hot_add_node_scn_to_nid(scn_addr); 1343 } 1344 1345 if (nid < 0 || !node_possible(nid)) 1346 nid = first_online_node; 1347 1348 return nid; 1349 } 1350 1351 static u64 hot_add_drconf_memory_max(void) 1352 { 1353 struct device_node *memory = NULL; 1354 struct device_node *dn = NULL; 1355 const __be64 *lrdr = NULL; 1356 1357 dn = of_find_node_by_path("/rtas"); 1358 if (dn) { 1359 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL); 1360 of_node_put(dn); 1361 if (lrdr) 1362 return be64_to_cpup(lrdr); 1363 } 1364 1365 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1366 if (memory) { 1367 of_node_put(memory); 1368 return drmem_lmb_memory_max(); 1369 } 1370 return 0; 1371 } 1372 1373 /* 1374 * memory_hotplug_max - return max address of memory that may be added 1375 * 1376 * This is currently only used on systems that support drconfig memory 1377 * hotplug. 1378 */ 1379 u64 memory_hotplug_max(void) 1380 { 1381 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); 1382 } 1383 #endif /* CONFIG_MEMORY_HOTPLUG */ 1384 1385 /* Virtual Processor Home Node (VPHN) support */ 1386 #ifdef CONFIG_PPC_SPLPAR 1387 static int topology_inited; 1388 1389 /* 1390 * Retrieve the new associativity information for a virtual processor's 1391 * home node. 1392 */ 1393 static long vphn_get_associativity(unsigned long cpu, 1394 __be32 *associativity) 1395 { 1396 long rc; 1397 1398 rc = hcall_vphn(get_hard_smp_processor_id(cpu), 1399 VPHN_FLAG_VCPU, associativity); 1400 1401 switch (rc) { 1402 case H_SUCCESS: 1403 pr_debug("VPHN hcall succeeded. Reset polling...\n"); 1404 goto out; 1405 1406 case H_FUNCTION: 1407 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n"); 1408 break; 1409 case H_HARDWARE: 1410 pr_err_ratelimited("hcall_vphn() experienced a hardware fault " 1411 "preventing VPHN. Disabling polling...\n"); 1412 break; 1413 case H_PARAMETER: 1414 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. " 1415 "Disabling polling...\n"); 1416 break; 1417 default: 1418 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n" 1419 , rc); 1420 break; 1421 } 1422 out: 1423 return rc; 1424 } 1425 1426 int find_and_online_cpu_nid(int cpu) 1427 { 1428 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1429 int new_nid; 1430 1431 /* Use associativity from first thread for all siblings */ 1432 if (vphn_get_associativity(cpu, associativity)) 1433 return cpu_to_node(cpu); 1434 1435 new_nid = associativity_to_nid(associativity); 1436 if (new_nid < 0 || !node_possible(new_nid)) 1437 new_nid = first_online_node; 1438 1439 if (NODE_DATA(new_nid) == NULL) { 1440 #ifdef CONFIG_MEMORY_HOTPLUG 1441 /* 1442 * Need to ensure that NODE_DATA is initialized for a node from 1443 * available memory (see memblock_alloc_try_nid). If unable to 1444 * init the node, then default to nearest node that has memory 1445 * installed. Skip onlining a node if the subsystems are not 1446 * yet initialized. 1447 */ 1448 if (!topology_inited || try_online_node(new_nid)) 1449 new_nid = first_online_node; 1450 #else 1451 /* 1452 * Default to using the nearest node that has memory installed. 1453 * Otherwise, it would be necessary to patch the kernel MM code 1454 * to deal with more memoryless-node error conditions. 1455 */ 1456 new_nid = first_online_node; 1457 #endif 1458 } 1459 1460 pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__, 1461 cpu, new_nid); 1462 return new_nid; 1463 } 1464 1465 int cpu_to_coregroup_id(int cpu) 1466 { 1467 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1468 int index; 1469 1470 if (cpu < 0 || cpu > nr_cpu_ids) 1471 return -1; 1472 1473 if (!coregroup_enabled) 1474 goto out; 1475 1476 if (!firmware_has_feature(FW_FEATURE_VPHN)) 1477 goto out; 1478 1479 if (vphn_get_associativity(cpu, associativity)) 1480 goto out; 1481 1482 index = of_read_number(associativity, 1); 1483 if (index > primary_domain_index + 1) 1484 return of_read_number(&associativity[index - 1], 1); 1485 1486 out: 1487 return cpu_to_core_id(cpu); 1488 } 1489 1490 static int topology_update_init(void) 1491 { 1492 topology_inited = 1; 1493 return 0; 1494 } 1495 device_initcall(topology_update_init); 1496 #endif /* CONFIG_PPC_SPLPAR */ 1497