1 /* 2 * pSeries NUMA support 3 * 4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #define pr_fmt(fmt) "numa: " fmt 12 13 #include <linux/threads.h> 14 #include <linux/bootmem.h> 15 #include <linux/init.h> 16 #include <linux/mm.h> 17 #include <linux/mmzone.h> 18 #include <linux/export.h> 19 #include <linux/nodemask.h> 20 #include <linux/cpu.h> 21 #include <linux/notifier.h> 22 #include <linux/memblock.h> 23 #include <linux/of.h> 24 #include <linux/pfn.h> 25 #include <linux/cpuset.h> 26 #include <linux/node.h> 27 #include <linux/stop_machine.h> 28 #include <linux/proc_fs.h> 29 #include <linux/seq_file.h> 30 #include <linux/uaccess.h> 31 #include <linux/slab.h> 32 #include <asm/cputhreads.h> 33 #include <asm/sparsemem.h> 34 #include <asm/prom.h> 35 #include <asm/smp.h> 36 #include <asm/cputhreads.h> 37 #include <asm/topology.h> 38 #include <asm/firmware.h> 39 #include <asm/paca.h> 40 #include <asm/hvcall.h> 41 #include <asm/setup.h> 42 #include <asm/vdso.h> 43 44 static int numa_enabled = 1; 45 46 static char *cmdline __initdata; 47 48 static int numa_debug; 49 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } 50 51 int numa_cpu_lookup_table[NR_CPUS]; 52 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 53 struct pglist_data *node_data[MAX_NUMNODES]; 54 55 EXPORT_SYMBOL(numa_cpu_lookup_table); 56 EXPORT_SYMBOL(node_to_cpumask_map); 57 EXPORT_SYMBOL(node_data); 58 59 static int min_common_depth; 60 static int n_mem_addr_cells, n_mem_size_cells; 61 static int form1_affinity; 62 63 #define MAX_DISTANCE_REF_POINTS 4 64 static int distance_ref_points_depth; 65 static const __be32 *distance_ref_points; 66 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; 67 68 /* 69 * Allocate node_to_cpumask_map based on number of available nodes 70 * Requires node_possible_map to be valid. 71 * 72 * Note: cpumask_of_node() is not valid until after this is done. 73 */ 74 static void __init setup_node_to_cpumask_map(void) 75 { 76 unsigned int node; 77 78 /* setup nr_node_ids if not done yet */ 79 if (nr_node_ids == MAX_NUMNODES) 80 setup_nr_node_ids(); 81 82 /* allocate the map */ 83 for_each_node(node) 84 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 85 86 /* cpumask_of_node() will now work */ 87 dbg("Node to cpumask map for %d nodes\n", nr_node_ids); 88 } 89 90 static int __init fake_numa_create_new_node(unsigned long end_pfn, 91 unsigned int *nid) 92 { 93 unsigned long long mem; 94 char *p = cmdline; 95 static unsigned int fake_nid; 96 static unsigned long long curr_boundary; 97 98 /* 99 * Modify node id, iff we started creating NUMA nodes 100 * We want to continue from where we left of the last time 101 */ 102 if (fake_nid) 103 *nid = fake_nid; 104 /* 105 * In case there are no more arguments to parse, the 106 * node_id should be the same as the last fake node id 107 * (we've handled this above). 108 */ 109 if (!p) 110 return 0; 111 112 mem = memparse(p, &p); 113 if (!mem) 114 return 0; 115 116 if (mem < curr_boundary) 117 return 0; 118 119 curr_boundary = mem; 120 121 if ((end_pfn << PAGE_SHIFT) > mem) { 122 /* 123 * Skip commas and spaces 124 */ 125 while (*p == ',' || *p == ' ' || *p == '\t') 126 p++; 127 128 cmdline = p; 129 fake_nid++; 130 *nid = fake_nid; 131 dbg("created new fake_node with id %d\n", fake_nid); 132 return 1; 133 } 134 return 0; 135 } 136 137 static void reset_numa_cpu_lookup_table(void) 138 { 139 unsigned int cpu; 140 141 for_each_possible_cpu(cpu) 142 numa_cpu_lookup_table[cpu] = -1; 143 } 144 145 static void update_numa_cpu_lookup_table(unsigned int cpu, int node) 146 { 147 numa_cpu_lookup_table[cpu] = node; 148 } 149 150 static void map_cpu_to_node(int cpu, int node) 151 { 152 update_numa_cpu_lookup_table(cpu, node); 153 154 dbg("adding cpu %d to node %d\n", cpu, node); 155 156 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) 157 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 158 } 159 160 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) 161 static void unmap_cpu_from_node(unsigned long cpu) 162 { 163 int node = numa_cpu_lookup_table[cpu]; 164 165 dbg("removing cpu %lu from node %d\n", cpu, node); 166 167 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 168 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); 169 } else { 170 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 171 cpu, node); 172 } 173 } 174 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 175 176 /* must hold reference to node during call */ 177 static const __be32 *of_get_associativity(struct device_node *dev) 178 { 179 return of_get_property(dev, "ibm,associativity", NULL); 180 } 181 182 /* 183 * Returns the property linux,drconf-usable-memory if 184 * it exists (the property exists only in kexec/kdump kernels, 185 * added by kexec-tools) 186 */ 187 static const __be32 *of_get_usable_memory(struct device_node *memory) 188 { 189 const __be32 *prop; 190 u32 len; 191 prop = of_get_property(memory, "linux,drconf-usable-memory", &len); 192 if (!prop || len < sizeof(unsigned int)) 193 return NULL; 194 return prop; 195 } 196 197 int __node_distance(int a, int b) 198 { 199 int i; 200 int distance = LOCAL_DISTANCE; 201 202 if (!form1_affinity) 203 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); 204 205 for (i = 0; i < distance_ref_points_depth; i++) { 206 if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) 207 break; 208 209 /* Double the distance for each NUMA level */ 210 distance *= 2; 211 } 212 213 return distance; 214 } 215 EXPORT_SYMBOL(__node_distance); 216 217 static void initialize_distance_lookup_table(int nid, 218 const __be32 *associativity) 219 { 220 int i; 221 222 if (!form1_affinity) 223 return; 224 225 for (i = 0; i < distance_ref_points_depth; i++) { 226 const __be32 *entry; 227 228 entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1]; 229 distance_lookup_table[nid][i] = of_read_number(entry, 1); 230 } 231 } 232 233 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 234 * info is found. 235 */ 236 static int associativity_to_nid(const __be32 *associativity) 237 { 238 int nid = -1; 239 240 if (min_common_depth == -1) 241 goto out; 242 243 if (of_read_number(associativity, 1) >= min_common_depth) 244 nid = of_read_number(&associativity[min_common_depth], 1); 245 246 /* POWER4 LPAR uses 0xffff as invalid node */ 247 if (nid == 0xffff || nid >= MAX_NUMNODES) 248 nid = -1; 249 250 if (nid > 0 && 251 of_read_number(associativity, 1) >= distance_ref_points_depth) { 252 /* 253 * Skip the length field and send start of associativity array 254 */ 255 initialize_distance_lookup_table(nid, associativity + 1); 256 } 257 258 out: 259 return nid; 260 } 261 262 /* Returns the nid associated with the given device tree node, 263 * or -1 if not found. 264 */ 265 static int of_node_to_nid_single(struct device_node *device) 266 { 267 int nid = -1; 268 const __be32 *tmp; 269 270 tmp = of_get_associativity(device); 271 if (tmp) 272 nid = associativity_to_nid(tmp); 273 return nid; 274 } 275 276 /* Walk the device tree upwards, looking for an associativity id */ 277 int of_node_to_nid(struct device_node *device) 278 { 279 int nid = -1; 280 281 of_node_get(device); 282 while (device) { 283 nid = of_node_to_nid_single(device); 284 if (nid != -1) 285 break; 286 287 device = of_get_next_parent(device); 288 } 289 of_node_put(device); 290 291 return nid; 292 } 293 EXPORT_SYMBOL(of_node_to_nid); 294 295 static int __init find_min_common_depth(void) 296 { 297 int depth; 298 struct device_node *root; 299 300 if (firmware_has_feature(FW_FEATURE_OPAL)) 301 root = of_find_node_by_path("/ibm,opal"); 302 else 303 root = of_find_node_by_path("/rtas"); 304 if (!root) 305 root = of_find_node_by_path("/"); 306 307 /* 308 * This property is a set of 32-bit integers, each representing 309 * an index into the ibm,associativity nodes. 310 * 311 * With form 0 affinity the first integer is for an SMP configuration 312 * (should be all 0's) and the second is for a normal NUMA 313 * configuration. We have only one level of NUMA. 314 * 315 * With form 1 affinity the first integer is the most significant 316 * NUMA boundary and the following are progressively less significant 317 * boundaries. There can be more than one level of NUMA. 318 */ 319 distance_ref_points = of_get_property(root, 320 "ibm,associativity-reference-points", 321 &distance_ref_points_depth); 322 323 if (!distance_ref_points) { 324 dbg("NUMA: ibm,associativity-reference-points not found.\n"); 325 goto err; 326 } 327 328 distance_ref_points_depth /= sizeof(int); 329 330 if (firmware_has_feature(FW_FEATURE_OPAL) || 331 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) { 332 dbg("Using form 1 affinity\n"); 333 form1_affinity = 1; 334 } 335 336 if (form1_affinity) { 337 depth = of_read_number(distance_ref_points, 1); 338 } else { 339 if (distance_ref_points_depth < 2) { 340 printk(KERN_WARNING "NUMA: " 341 "short ibm,associativity-reference-points\n"); 342 goto err; 343 } 344 345 depth = of_read_number(&distance_ref_points[1], 1); 346 } 347 348 /* 349 * Warn and cap if the hardware supports more than 350 * MAX_DISTANCE_REF_POINTS domains. 351 */ 352 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { 353 printk(KERN_WARNING "NUMA: distance array capped at " 354 "%d entries\n", MAX_DISTANCE_REF_POINTS); 355 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; 356 } 357 358 of_node_put(root); 359 return depth; 360 361 err: 362 of_node_put(root); 363 return -1; 364 } 365 366 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 367 { 368 struct device_node *memory = NULL; 369 370 memory = of_find_node_by_type(memory, "memory"); 371 if (!memory) 372 panic("numa.c: No memory nodes found!"); 373 374 *n_addr_cells = of_n_addr_cells(memory); 375 *n_size_cells = of_n_size_cells(memory); 376 of_node_put(memory); 377 } 378 379 static unsigned long read_n_cells(int n, const __be32 **buf) 380 { 381 unsigned long result = 0; 382 383 while (n--) { 384 result = (result << 32) | of_read_number(*buf, 1); 385 (*buf)++; 386 } 387 return result; 388 } 389 390 /* 391 * Read the next memblock list entry from the ibm,dynamic-memory property 392 * and return the information in the provided of_drconf_cell structure. 393 */ 394 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp) 395 { 396 const __be32 *cp; 397 398 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); 399 400 cp = *cellp; 401 drmem->drc_index = of_read_number(cp, 1); 402 drmem->reserved = of_read_number(&cp[1], 1); 403 drmem->aa_index = of_read_number(&cp[2], 1); 404 drmem->flags = of_read_number(&cp[3], 1); 405 406 *cellp = cp + 4; 407 } 408 409 /* 410 * Retrieve and validate the ibm,dynamic-memory property of the device tree. 411 * 412 * The layout of the ibm,dynamic-memory property is a number N of memblock 413 * list entries followed by N memblock list entries. Each memblock list entry 414 * contains information as laid out in the of_drconf_cell struct above. 415 */ 416 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm) 417 { 418 const __be32 *prop; 419 u32 len, entries; 420 421 prop = of_get_property(memory, "ibm,dynamic-memory", &len); 422 if (!prop || len < sizeof(unsigned int)) 423 return 0; 424 425 entries = of_read_number(prop++, 1); 426 427 /* Now that we know the number of entries, revalidate the size 428 * of the property read in to ensure we have everything 429 */ 430 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) 431 return 0; 432 433 *dm = prop; 434 return entries; 435 } 436 437 /* 438 * Retrieve and validate the ibm,lmb-size property for drconf memory 439 * from the device tree. 440 */ 441 static u64 of_get_lmb_size(struct device_node *memory) 442 { 443 const __be32 *prop; 444 u32 len; 445 446 prop = of_get_property(memory, "ibm,lmb-size", &len); 447 if (!prop || len < sizeof(unsigned int)) 448 return 0; 449 450 return read_n_cells(n_mem_size_cells, &prop); 451 } 452 453 struct assoc_arrays { 454 u32 n_arrays; 455 u32 array_sz; 456 const __be32 *arrays; 457 }; 458 459 /* 460 * Retrieve and validate the list of associativity arrays for drconf 461 * memory from the ibm,associativity-lookup-arrays property of the 462 * device tree.. 463 * 464 * The layout of the ibm,associativity-lookup-arrays property is a number N 465 * indicating the number of associativity arrays, followed by a number M 466 * indicating the size of each associativity array, followed by a list 467 * of N associativity arrays. 468 */ 469 static int of_get_assoc_arrays(struct device_node *memory, 470 struct assoc_arrays *aa) 471 { 472 const __be32 *prop; 473 u32 len; 474 475 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); 476 if (!prop || len < 2 * sizeof(unsigned int)) 477 return -1; 478 479 aa->n_arrays = of_read_number(prop++, 1); 480 aa->array_sz = of_read_number(prop++, 1); 481 482 /* Now that we know the number of arrays and size of each array, 483 * revalidate the size of the property read in. 484 */ 485 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) 486 return -1; 487 488 aa->arrays = prop; 489 return 0; 490 } 491 492 /* 493 * This is like of_node_to_nid_single() for memory represented in the 494 * ibm,dynamic-reconfiguration-memory node. 495 */ 496 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, 497 struct assoc_arrays *aa) 498 { 499 int default_nid = 0; 500 int nid = default_nid; 501 int index; 502 503 if (min_common_depth > 0 && min_common_depth <= aa->array_sz && 504 !(drmem->flags & DRCONF_MEM_AI_INVALID) && 505 drmem->aa_index < aa->n_arrays) { 506 index = drmem->aa_index * aa->array_sz + min_common_depth - 1; 507 nid = of_read_number(&aa->arrays[index], 1); 508 509 if (nid == 0xffff || nid >= MAX_NUMNODES) 510 nid = default_nid; 511 512 if (nid > 0) { 513 index = drmem->aa_index * aa->array_sz; 514 initialize_distance_lookup_table(nid, 515 &aa->arrays[index]); 516 } 517 } 518 519 return nid; 520 } 521 522 /* 523 * Figure out to which domain a cpu belongs and stick it there. 524 * Return the id of the domain used. 525 */ 526 static int numa_setup_cpu(unsigned long lcpu) 527 { 528 int nid = -1; 529 struct device_node *cpu; 530 531 /* 532 * If a valid cpu-to-node mapping is already available, use it 533 * directly instead of querying the firmware, since it represents 534 * the most recent mapping notified to us by the platform (eg: VPHN). 535 */ 536 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) { 537 map_cpu_to_node(lcpu, nid); 538 return nid; 539 } 540 541 cpu = of_get_cpu_node(lcpu, NULL); 542 543 if (!cpu) { 544 WARN_ON(1); 545 if (cpu_present(lcpu)) 546 goto out_present; 547 else 548 goto out; 549 } 550 551 nid = of_node_to_nid_single(cpu); 552 553 out_present: 554 if (nid < 0 || !node_online(nid)) 555 nid = first_online_node; 556 557 map_cpu_to_node(lcpu, nid); 558 of_node_put(cpu); 559 out: 560 return nid; 561 } 562 563 static void verify_cpu_node_mapping(int cpu, int node) 564 { 565 int base, sibling, i; 566 567 /* Verify that all the threads in the core belong to the same node */ 568 base = cpu_first_thread_sibling(cpu); 569 570 for (i = 0; i < threads_per_core; i++) { 571 sibling = base + i; 572 573 if (sibling == cpu || cpu_is_offline(sibling)) 574 continue; 575 576 if (cpu_to_node(sibling) != node) { 577 WARN(1, "CPU thread siblings %d and %d don't belong" 578 " to the same node!\n", cpu, sibling); 579 break; 580 } 581 } 582 } 583 584 /* Must run before sched domains notifier. */ 585 static int ppc_numa_cpu_prepare(unsigned int cpu) 586 { 587 int nid; 588 589 nid = numa_setup_cpu(cpu); 590 verify_cpu_node_mapping(cpu, nid); 591 return 0; 592 } 593 594 static int ppc_numa_cpu_dead(unsigned int cpu) 595 { 596 #ifdef CONFIG_HOTPLUG_CPU 597 unmap_cpu_from_node(cpu); 598 #endif 599 return 0; 600 } 601 602 /* 603 * Check and possibly modify a memory region to enforce the memory limit. 604 * 605 * Returns the size the region should have to enforce the memory limit. 606 * This will either be the original value of size, a truncated value, 607 * or zero. If the returned value of size is 0 the region should be 608 * discarded as it lies wholly above the memory limit. 609 */ 610 static unsigned long __init numa_enforce_memory_limit(unsigned long start, 611 unsigned long size) 612 { 613 /* 614 * We use memblock_end_of_DRAM() in here instead of memory_limit because 615 * we've already adjusted it for the limit and it takes care of 616 * having memory holes below the limit. Also, in the case of 617 * iommu_is_off, memory_limit is not set but is implicitly enforced. 618 */ 619 620 if (start + size <= memblock_end_of_DRAM()) 621 return size; 622 623 if (start >= memblock_end_of_DRAM()) 624 return 0; 625 626 return memblock_end_of_DRAM() - start; 627 } 628 629 /* 630 * Reads the counter for a given entry in 631 * linux,drconf-usable-memory property 632 */ 633 static inline int __init read_usm_ranges(const __be32 **usm) 634 { 635 /* 636 * For each lmb in ibm,dynamic-memory a corresponding 637 * entry in linux,drconf-usable-memory property contains 638 * a counter followed by that many (base, size) duple. 639 * read the counter from linux,drconf-usable-memory 640 */ 641 return read_n_cells(n_mem_size_cells, usm); 642 } 643 644 /* 645 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 646 * node. This assumes n_mem_{addr,size}_cells have been set. 647 */ 648 static void __init parse_drconf_memory(struct device_node *memory) 649 { 650 const __be32 *uninitialized_var(dm), *usm; 651 unsigned int n, rc, ranges, is_kexec_kdump = 0; 652 unsigned long lmb_size, base, size, sz; 653 int nid; 654 struct assoc_arrays aa = { .arrays = NULL }; 655 656 n = of_get_drconf_memory(memory, &dm); 657 if (!n) 658 return; 659 660 lmb_size = of_get_lmb_size(memory); 661 if (!lmb_size) 662 return; 663 664 rc = of_get_assoc_arrays(memory, &aa); 665 if (rc) 666 return; 667 668 /* check if this is a kexec/kdump kernel */ 669 usm = of_get_usable_memory(memory); 670 if (usm != NULL) 671 is_kexec_kdump = 1; 672 673 for (; n != 0; --n) { 674 struct of_drconf_cell drmem; 675 676 read_drconf_cell(&drmem, &dm); 677 678 /* skip this block if the reserved bit is set in flags (0x80) 679 or if the block is not assigned to this partition (0x8) */ 680 if ((drmem.flags & DRCONF_MEM_RESERVED) 681 || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 682 continue; 683 684 base = drmem.base_addr; 685 size = lmb_size; 686 ranges = 1; 687 688 if (is_kexec_kdump) { 689 ranges = read_usm_ranges(&usm); 690 if (!ranges) /* there are no (base, size) duple */ 691 continue; 692 } 693 do { 694 if (is_kexec_kdump) { 695 base = read_n_cells(n_mem_addr_cells, &usm); 696 size = read_n_cells(n_mem_size_cells, &usm); 697 } 698 nid = of_drconf_to_nid_single(&drmem, &aa); 699 fake_numa_create_new_node( 700 ((base + size) >> PAGE_SHIFT), 701 &nid); 702 node_set_online(nid); 703 sz = numa_enforce_memory_limit(base, size); 704 if (sz) 705 memblock_set_node(base, sz, 706 &memblock.memory, nid); 707 } while (--ranges); 708 } 709 } 710 711 static int __init parse_numa_properties(void) 712 { 713 struct device_node *memory; 714 int default_nid = 0; 715 unsigned long i; 716 717 if (numa_enabled == 0) { 718 printk(KERN_WARNING "NUMA disabled by user\n"); 719 return -1; 720 } 721 722 min_common_depth = find_min_common_depth(); 723 724 if (min_common_depth < 0) 725 return min_common_depth; 726 727 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); 728 729 /* 730 * Even though we connect cpus to numa domains later in SMP 731 * init, we need to know the node ids now. This is because 732 * each node to be onlined must have NODE_DATA etc backing it. 733 */ 734 for_each_present_cpu(i) { 735 struct device_node *cpu; 736 int nid; 737 738 cpu = of_get_cpu_node(i, NULL); 739 BUG_ON(!cpu); 740 nid = of_node_to_nid_single(cpu); 741 of_node_put(cpu); 742 743 /* 744 * Don't fall back to default_nid yet -- we will plug 745 * cpus into nodes once the memory scan has discovered 746 * the topology. 747 */ 748 if (nid < 0) 749 continue; 750 node_set_online(nid); 751 } 752 753 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 754 755 for_each_node_by_type(memory, "memory") { 756 unsigned long start; 757 unsigned long size; 758 int nid; 759 int ranges; 760 const __be32 *memcell_buf; 761 unsigned int len; 762 763 memcell_buf = of_get_property(memory, 764 "linux,usable-memory", &len); 765 if (!memcell_buf || len <= 0) 766 memcell_buf = of_get_property(memory, "reg", &len); 767 if (!memcell_buf || len <= 0) 768 continue; 769 770 /* ranges in cell */ 771 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 772 new_range: 773 /* these are order-sensitive, and modify the buffer pointer */ 774 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 775 size = read_n_cells(n_mem_size_cells, &memcell_buf); 776 777 /* 778 * Assumption: either all memory nodes or none will 779 * have associativity properties. If none, then 780 * everything goes to default_nid. 781 */ 782 nid = of_node_to_nid_single(memory); 783 if (nid < 0) 784 nid = default_nid; 785 786 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); 787 node_set_online(nid); 788 789 size = numa_enforce_memory_limit(start, size); 790 if (size) 791 memblock_set_node(start, size, &memblock.memory, nid); 792 793 if (--ranges) 794 goto new_range; 795 } 796 797 /* 798 * Now do the same thing for each MEMBLOCK listed in the 799 * ibm,dynamic-memory property in the 800 * ibm,dynamic-reconfiguration-memory node. 801 */ 802 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 803 if (memory) 804 parse_drconf_memory(memory); 805 806 return 0; 807 } 808 809 static void __init setup_nonnuma(void) 810 { 811 unsigned long top_of_ram = memblock_end_of_DRAM(); 812 unsigned long total_ram = memblock_phys_mem_size(); 813 unsigned long start_pfn, end_pfn; 814 unsigned int nid = 0; 815 struct memblock_region *reg; 816 817 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 818 top_of_ram, total_ram); 819 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 820 (top_of_ram - total_ram) >> 20); 821 822 for_each_memblock(memory, reg) { 823 start_pfn = memblock_region_memory_base_pfn(reg); 824 end_pfn = memblock_region_memory_end_pfn(reg); 825 826 fake_numa_create_new_node(end_pfn, &nid); 827 memblock_set_node(PFN_PHYS(start_pfn), 828 PFN_PHYS(end_pfn - start_pfn), 829 &memblock.memory, nid); 830 node_set_online(nid); 831 } 832 } 833 834 void __init dump_numa_cpu_topology(void) 835 { 836 unsigned int node; 837 unsigned int cpu, count; 838 839 if (min_common_depth == -1 || !numa_enabled) 840 return; 841 842 for_each_online_node(node) { 843 pr_info("Node %d CPUs:", node); 844 845 count = 0; 846 /* 847 * If we used a CPU iterator here we would miss printing 848 * the holes in the cpumap. 849 */ 850 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 851 if (cpumask_test_cpu(cpu, 852 node_to_cpumask_map[node])) { 853 if (count == 0) 854 pr_cont(" %u", cpu); 855 ++count; 856 } else { 857 if (count > 1) 858 pr_cont("-%u", cpu - 1); 859 count = 0; 860 } 861 } 862 863 if (count > 1) 864 pr_cont("-%u", nr_cpu_ids - 1); 865 pr_cont("\n"); 866 } 867 } 868 869 /* Initialize NODE_DATA for a node on the local memory */ 870 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) 871 { 872 u64 spanned_pages = end_pfn - start_pfn; 873 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); 874 u64 nd_pa; 875 void *nd; 876 int tnid; 877 878 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); 879 nd = __va(nd_pa); 880 881 /* report and initialize */ 882 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", 883 nd_pa, nd_pa + nd_size - 1); 884 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 885 if (tnid != nid) 886 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); 887 888 node_data[nid] = nd; 889 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 890 NODE_DATA(nid)->node_id = nid; 891 NODE_DATA(nid)->node_start_pfn = start_pfn; 892 NODE_DATA(nid)->node_spanned_pages = spanned_pages; 893 } 894 895 void __init initmem_init(void) 896 { 897 int nid, cpu; 898 899 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 900 max_pfn = max_low_pfn; 901 902 if (parse_numa_properties()) 903 setup_nonnuma(); 904 905 memblock_dump_all(); 906 907 /* 908 * Reduce the possible NUMA nodes to the online NUMA nodes, 909 * since we do not support node hotplug. This ensures that we 910 * lower the maximum NUMA node ID to what is actually present. 911 */ 912 nodes_and(node_possible_map, node_possible_map, node_online_map); 913 914 for_each_online_node(nid) { 915 unsigned long start_pfn, end_pfn; 916 917 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 918 setup_node_data(nid, start_pfn, end_pfn); 919 sparse_memory_present_with_active_regions(nid); 920 } 921 922 sparse_init(); 923 924 setup_node_to_cpumask_map(); 925 926 reset_numa_cpu_lookup_table(); 927 928 /* 929 * We need the numa_cpu_lookup_table to be accurate for all CPUs, 930 * even before we online them, so that we can use cpu_to_{node,mem} 931 * early in boot, cf. smp_prepare_cpus(). 932 * _nocalls() + manual invocation is used because cpuhp is not yet 933 * initialized for the boot CPU. 934 */ 935 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare", 936 ppc_numa_cpu_prepare, ppc_numa_cpu_dead); 937 for_each_present_cpu(cpu) 938 numa_setup_cpu(cpu); 939 } 940 941 static int __init early_numa(char *p) 942 { 943 if (!p) 944 return 0; 945 946 if (strstr(p, "off")) 947 numa_enabled = 0; 948 949 if (strstr(p, "debug")) 950 numa_debug = 1; 951 952 p = strstr(p, "fake="); 953 if (p) 954 cmdline = p + strlen("fake="); 955 956 return 0; 957 } 958 early_param("numa", early_numa); 959 960 static bool topology_updates_enabled = true; 961 962 static int __init early_topology_updates(char *p) 963 { 964 if (!p) 965 return 0; 966 967 if (!strcmp(p, "off")) { 968 pr_info("Disabling topology updates\n"); 969 topology_updates_enabled = false; 970 } 971 972 return 0; 973 } 974 early_param("topology_updates", early_topology_updates); 975 976 #ifdef CONFIG_MEMORY_HOTPLUG 977 /* 978 * Find the node associated with a hot added memory section for 979 * memory represented in the device tree by the property 980 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. 981 */ 982 static int hot_add_drconf_scn_to_nid(struct device_node *memory, 983 unsigned long scn_addr) 984 { 985 const __be32 *dm; 986 unsigned int drconf_cell_cnt, rc; 987 unsigned long lmb_size; 988 struct assoc_arrays aa; 989 int nid = -1; 990 991 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 992 if (!drconf_cell_cnt) 993 return -1; 994 995 lmb_size = of_get_lmb_size(memory); 996 if (!lmb_size) 997 return -1; 998 999 rc = of_get_assoc_arrays(memory, &aa); 1000 if (rc) 1001 return -1; 1002 1003 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) { 1004 struct of_drconf_cell drmem; 1005 1006 read_drconf_cell(&drmem, &dm); 1007 1008 /* skip this block if it is reserved or not assigned to 1009 * this partition */ 1010 if ((drmem.flags & DRCONF_MEM_RESERVED) 1011 || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 1012 continue; 1013 1014 if ((scn_addr < drmem.base_addr) 1015 || (scn_addr >= (drmem.base_addr + lmb_size))) 1016 continue; 1017 1018 nid = of_drconf_to_nid_single(&drmem, &aa); 1019 break; 1020 } 1021 1022 return nid; 1023 } 1024 1025 /* 1026 * Find the node associated with a hot added memory section for memory 1027 * represented in the device tree as a node (i.e. memory@XXXX) for 1028 * each memblock. 1029 */ 1030 static int hot_add_node_scn_to_nid(unsigned long scn_addr) 1031 { 1032 struct device_node *memory; 1033 int nid = -1; 1034 1035 for_each_node_by_type(memory, "memory") { 1036 unsigned long start, size; 1037 int ranges; 1038 const __be32 *memcell_buf; 1039 unsigned int len; 1040 1041 memcell_buf = of_get_property(memory, "reg", &len); 1042 if (!memcell_buf || len <= 0) 1043 continue; 1044 1045 /* ranges in cell */ 1046 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 1047 1048 while (ranges--) { 1049 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 1050 size = read_n_cells(n_mem_size_cells, &memcell_buf); 1051 1052 if ((scn_addr < start) || (scn_addr >= (start + size))) 1053 continue; 1054 1055 nid = of_node_to_nid_single(memory); 1056 break; 1057 } 1058 1059 if (nid >= 0) 1060 break; 1061 } 1062 1063 of_node_put(memory); 1064 1065 return nid; 1066 } 1067 1068 /* 1069 * Find the node associated with a hot added memory section. Section 1070 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that 1071 * sections are fully contained within a single MEMBLOCK. 1072 */ 1073 int hot_add_scn_to_nid(unsigned long scn_addr) 1074 { 1075 struct device_node *memory = NULL; 1076 int nid; 1077 1078 if (!numa_enabled || (min_common_depth < 0)) 1079 return first_online_node; 1080 1081 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1082 if (memory) { 1083 nid = hot_add_drconf_scn_to_nid(memory, scn_addr); 1084 of_node_put(memory); 1085 } else { 1086 nid = hot_add_node_scn_to_nid(scn_addr); 1087 } 1088 1089 if (nid < 0 || !node_possible(nid)) 1090 nid = first_online_node; 1091 1092 return nid; 1093 } 1094 1095 static u64 hot_add_drconf_memory_max(void) 1096 { 1097 struct device_node *memory = NULL; 1098 struct device_node *dn = NULL; 1099 unsigned int drconf_cell_cnt = 0; 1100 u64 lmb_size = 0; 1101 const __be32 *dm = NULL; 1102 const __be64 *lrdr = NULL; 1103 struct of_drconf_cell drmem; 1104 1105 dn = of_find_node_by_path("/rtas"); 1106 if (dn) { 1107 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL); 1108 of_node_put(dn); 1109 if (lrdr) 1110 return be64_to_cpup(lrdr); 1111 } 1112 1113 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1114 if (memory) { 1115 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 1116 lmb_size = of_get_lmb_size(memory); 1117 1118 /* Advance to the last cell, each cell has 6 32 bit integers */ 1119 dm += (drconf_cell_cnt - 1) * 6; 1120 read_drconf_cell(&drmem, &dm); 1121 of_node_put(memory); 1122 return drmem.base_addr + lmb_size; 1123 } 1124 return 0; 1125 } 1126 1127 /* 1128 * memory_hotplug_max - return max address of memory that may be added 1129 * 1130 * This is currently only used on systems that support drconfig memory 1131 * hotplug. 1132 */ 1133 u64 memory_hotplug_max(void) 1134 { 1135 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); 1136 } 1137 #endif /* CONFIG_MEMORY_HOTPLUG */ 1138 1139 /* Virtual Processor Home Node (VPHN) support */ 1140 #ifdef CONFIG_PPC_SPLPAR 1141 1142 #include "vphn.h" 1143 1144 struct topology_update_data { 1145 struct topology_update_data *next; 1146 unsigned int cpu; 1147 int old_nid; 1148 int new_nid; 1149 }; 1150 1151 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; 1152 static cpumask_t cpu_associativity_changes_mask; 1153 static int vphn_enabled; 1154 static int prrn_enabled; 1155 static void reset_topology_timer(void); 1156 1157 /* 1158 * Store the current values of the associativity change counters in the 1159 * hypervisor. 1160 */ 1161 static void setup_cpu_associativity_change_counters(void) 1162 { 1163 int cpu; 1164 1165 /* The VPHN feature supports a maximum of 8 reference points */ 1166 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); 1167 1168 for_each_possible_cpu(cpu) { 1169 int i; 1170 u8 *counts = vphn_cpu_change_counts[cpu]; 1171 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1172 1173 for (i = 0; i < distance_ref_points_depth; i++) 1174 counts[i] = hypervisor_counts[i]; 1175 } 1176 } 1177 1178 /* 1179 * The hypervisor maintains a set of 8 associativity change counters in 1180 * the VPA of each cpu that correspond to the associativity levels in the 1181 * ibm,associativity-reference-points property. When an associativity 1182 * level changes, the corresponding counter is incremented. 1183 * 1184 * Set a bit in cpu_associativity_changes_mask for each cpu whose home 1185 * node associativity levels have changed. 1186 * 1187 * Returns the number of cpus with unhandled associativity changes. 1188 */ 1189 static int update_cpu_associativity_changes_mask(void) 1190 { 1191 int cpu; 1192 cpumask_t *changes = &cpu_associativity_changes_mask; 1193 1194 for_each_possible_cpu(cpu) { 1195 int i, changed = 0; 1196 u8 *counts = vphn_cpu_change_counts[cpu]; 1197 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1198 1199 for (i = 0; i < distance_ref_points_depth; i++) { 1200 if (hypervisor_counts[i] != counts[i]) { 1201 counts[i] = hypervisor_counts[i]; 1202 changed = 1; 1203 } 1204 } 1205 if (changed) { 1206 cpumask_or(changes, changes, cpu_sibling_mask(cpu)); 1207 cpu = cpu_last_thread_sibling(cpu); 1208 } 1209 } 1210 1211 return cpumask_weight(changes); 1212 } 1213 1214 /* 1215 * Retrieve the new associativity information for a virtual processor's 1216 * home node. 1217 */ 1218 static long hcall_vphn(unsigned long cpu, __be32 *associativity) 1219 { 1220 long rc; 1221 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1222 u64 flags = 1; 1223 int hwcpu = get_hard_smp_processor_id(cpu); 1224 1225 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 1226 vphn_unpack_associativity(retbuf, associativity); 1227 1228 return rc; 1229 } 1230 1231 static long vphn_get_associativity(unsigned long cpu, 1232 __be32 *associativity) 1233 { 1234 long rc; 1235 1236 rc = hcall_vphn(cpu, associativity); 1237 1238 switch (rc) { 1239 case H_FUNCTION: 1240 printk(KERN_INFO 1241 "VPHN is not supported. Disabling polling...\n"); 1242 stop_topology_update(); 1243 break; 1244 case H_HARDWARE: 1245 printk(KERN_ERR 1246 "hcall_vphn() experienced a hardware fault " 1247 "preventing VPHN. Disabling polling...\n"); 1248 stop_topology_update(); 1249 } 1250 1251 return rc; 1252 } 1253 1254 /* 1255 * Update the CPU maps and sysfs entries for a single CPU when its NUMA 1256 * characteristics change. This function doesn't perform any locking and is 1257 * only safe to call from stop_machine(). 1258 */ 1259 static int update_cpu_topology(void *data) 1260 { 1261 struct topology_update_data *update; 1262 unsigned long cpu; 1263 1264 if (!data) 1265 return -EINVAL; 1266 1267 cpu = smp_processor_id(); 1268 1269 for (update = data; update; update = update->next) { 1270 int new_nid = update->new_nid; 1271 if (cpu != update->cpu) 1272 continue; 1273 1274 unmap_cpu_from_node(cpu); 1275 map_cpu_to_node(cpu, new_nid); 1276 set_cpu_numa_node(cpu, new_nid); 1277 set_cpu_numa_mem(cpu, local_memory_node(new_nid)); 1278 vdso_getcpu_init(); 1279 } 1280 1281 return 0; 1282 } 1283 1284 static int update_lookup_table(void *data) 1285 { 1286 struct topology_update_data *update; 1287 1288 if (!data) 1289 return -EINVAL; 1290 1291 /* 1292 * Upon topology update, the numa-cpu lookup table needs to be updated 1293 * for all threads in the core, including offline CPUs, to ensure that 1294 * future hotplug operations respect the cpu-to-node associativity 1295 * properly. 1296 */ 1297 for (update = data; update; update = update->next) { 1298 int nid, base, j; 1299 1300 nid = update->new_nid; 1301 base = cpu_first_thread_sibling(update->cpu); 1302 1303 for (j = 0; j < threads_per_core; j++) { 1304 update_numa_cpu_lookup_table(base + j, nid); 1305 } 1306 } 1307 1308 return 0; 1309 } 1310 1311 /* 1312 * Update the node maps and sysfs entries for each cpu whose home node 1313 * has changed. Returns 1 when the topology has changed, and 0 otherwise. 1314 * 1315 * cpus_locked says whether we already hold cpu_hotplug_lock. 1316 */ 1317 int numa_update_cpu_topology(bool cpus_locked) 1318 { 1319 unsigned int cpu, sibling, changed = 0; 1320 struct topology_update_data *updates, *ud; 1321 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1322 cpumask_t updated_cpus; 1323 struct device *dev; 1324 int weight, new_nid, i = 0; 1325 1326 if (!prrn_enabled && !vphn_enabled) 1327 return 0; 1328 1329 weight = cpumask_weight(&cpu_associativity_changes_mask); 1330 if (!weight) 1331 return 0; 1332 1333 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL); 1334 if (!updates) 1335 return 0; 1336 1337 cpumask_clear(&updated_cpus); 1338 1339 for_each_cpu(cpu, &cpu_associativity_changes_mask) { 1340 /* 1341 * If siblings aren't flagged for changes, updates list 1342 * will be too short. Skip on this update and set for next 1343 * update. 1344 */ 1345 if (!cpumask_subset(cpu_sibling_mask(cpu), 1346 &cpu_associativity_changes_mask)) { 1347 pr_info("Sibling bits not set for associativity " 1348 "change, cpu%d\n", cpu); 1349 cpumask_or(&cpu_associativity_changes_mask, 1350 &cpu_associativity_changes_mask, 1351 cpu_sibling_mask(cpu)); 1352 cpu = cpu_last_thread_sibling(cpu); 1353 continue; 1354 } 1355 1356 /* Use associativity from first thread for all siblings */ 1357 vphn_get_associativity(cpu, associativity); 1358 new_nid = associativity_to_nid(associativity); 1359 if (new_nid < 0 || !node_online(new_nid)) 1360 new_nid = first_online_node; 1361 1362 if (new_nid == numa_cpu_lookup_table[cpu]) { 1363 cpumask_andnot(&cpu_associativity_changes_mask, 1364 &cpu_associativity_changes_mask, 1365 cpu_sibling_mask(cpu)); 1366 cpu = cpu_last_thread_sibling(cpu); 1367 continue; 1368 } 1369 1370 for_each_cpu(sibling, cpu_sibling_mask(cpu)) { 1371 ud = &updates[i++]; 1372 ud->cpu = sibling; 1373 ud->new_nid = new_nid; 1374 ud->old_nid = numa_cpu_lookup_table[sibling]; 1375 cpumask_set_cpu(sibling, &updated_cpus); 1376 if (i < weight) 1377 ud->next = &updates[i]; 1378 } 1379 cpu = cpu_last_thread_sibling(cpu); 1380 } 1381 1382 pr_debug("Topology update for the following CPUs:\n"); 1383 if (cpumask_weight(&updated_cpus)) { 1384 for (ud = &updates[0]; ud; ud = ud->next) { 1385 pr_debug("cpu %d moving from node %d " 1386 "to %d\n", ud->cpu, 1387 ud->old_nid, ud->new_nid); 1388 } 1389 } 1390 1391 /* 1392 * In cases where we have nothing to update (because the updates list 1393 * is too short or because the new topology is same as the old one), 1394 * skip invoking update_cpu_topology() via stop-machine(). This is 1395 * necessary (and not just a fast-path optimization) since stop-machine 1396 * can end up electing a random CPU to run update_cpu_topology(), and 1397 * thus trick us into setting up incorrect cpu-node mappings (since 1398 * 'updates' is kzalloc()'ed). 1399 * 1400 * And for the similar reason, we will skip all the following updating. 1401 */ 1402 if (!cpumask_weight(&updated_cpus)) 1403 goto out; 1404 1405 if (cpus_locked) 1406 stop_machine_cpuslocked(update_cpu_topology, &updates[0], 1407 &updated_cpus); 1408 else 1409 stop_machine(update_cpu_topology, &updates[0], &updated_cpus); 1410 1411 /* 1412 * Update the numa-cpu lookup table with the new mappings, even for 1413 * offline CPUs. It is best to perform this update from the stop- 1414 * machine context. 1415 */ 1416 if (cpus_locked) 1417 stop_machine_cpuslocked(update_lookup_table, &updates[0], 1418 cpumask_of(raw_smp_processor_id())); 1419 else 1420 stop_machine(update_lookup_table, &updates[0], 1421 cpumask_of(raw_smp_processor_id())); 1422 1423 for (ud = &updates[0]; ud; ud = ud->next) { 1424 unregister_cpu_under_node(ud->cpu, ud->old_nid); 1425 register_cpu_under_node(ud->cpu, ud->new_nid); 1426 1427 dev = get_cpu_device(ud->cpu); 1428 if (dev) 1429 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 1430 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask); 1431 changed = 1; 1432 } 1433 1434 out: 1435 kfree(updates); 1436 return changed; 1437 } 1438 1439 int arch_update_cpu_topology(void) 1440 { 1441 lockdep_assert_cpus_held(); 1442 return numa_update_cpu_topology(true); 1443 } 1444 1445 static void topology_work_fn(struct work_struct *work) 1446 { 1447 rebuild_sched_domains(); 1448 } 1449 static DECLARE_WORK(topology_work, topology_work_fn); 1450 1451 static void topology_schedule_update(void) 1452 { 1453 schedule_work(&topology_work); 1454 } 1455 1456 static void topology_timer_fn(unsigned long ignored) 1457 { 1458 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask)) 1459 topology_schedule_update(); 1460 else if (vphn_enabled) { 1461 if (update_cpu_associativity_changes_mask() > 0) 1462 topology_schedule_update(); 1463 reset_topology_timer(); 1464 } 1465 } 1466 static struct timer_list topology_timer = 1467 TIMER_INITIALIZER(topology_timer_fn, 0, 0); 1468 1469 static void reset_topology_timer(void) 1470 { 1471 topology_timer.data = 0; 1472 topology_timer.expires = jiffies + 60 * HZ; 1473 mod_timer(&topology_timer, topology_timer.expires); 1474 } 1475 1476 #ifdef CONFIG_SMP 1477 1478 static void stage_topology_update(int core_id) 1479 { 1480 cpumask_or(&cpu_associativity_changes_mask, 1481 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id)); 1482 reset_topology_timer(); 1483 } 1484 1485 static int dt_update_callback(struct notifier_block *nb, 1486 unsigned long action, void *data) 1487 { 1488 struct of_reconfig_data *update = data; 1489 int rc = NOTIFY_DONE; 1490 1491 switch (action) { 1492 case OF_RECONFIG_UPDATE_PROPERTY: 1493 if (!of_prop_cmp(update->dn->type, "cpu") && 1494 !of_prop_cmp(update->prop->name, "ibm,associativity")) { 1495 u32 core_id; 1496 of_property_read_u32(update->dn, "reg", &core_id); 1497 stage_topology_update(core_id); 1498 rc = NOTIFY_OK; 1499 } 1500 break; 1501 } 1502 1503 return rc; 1504 } 1505 1506 static struct notifier_block dt_update_nb = { 1507 .notifier_call = dt_update_callback, 1508 }; 1509 1510 #endif 1511 1512 /* 1513 * Start polling for associativity changes. 1514 */ 1515 int start_topology_update(void) 1516 { 1517 int rc = 0; 1518 1519 if (firmware_has_feature(FW_FEATURE_PRRN)) { 1520 if (!prrn_enabled) { 1521 prrn_enabled = 1; 1522 vphn_enabled = 0; 1523 #ifdef CONFIG_SMP 1524 rc = of_reconfig_notifier_register(&dt_update_nb); 1525 #endif 1526 } 1527 } else if (firmware_has_feature(FW_FEATURE_VPHN) && 1528 lppaca_shared_proc(get_lppaca())) { 1529 if (!vphn_enabled) { 1530 prrn_enabled = 0; 1531 vphn_enabled = 1; 1532 setup_cpu_associativity_change_counters(); 1533 init_timer_deferrable(&topology_timer); 1534 reset_topology_timer(); 1535 } 1536 } 1537 1538 return rc; 1539 } 1540 1541 /* 1542 * Disable polling for VPHN associativity changes. 1543 */ 1544 int stop_topology_update(void) 1545 { 1546 int rc = 0; 1547 1548 if (prrn_enabled) { 1549 prrn_enabled = 0; 1550 #ifdef CONFIG_SMP 1551 rc = of_reconfig_notifier_unregister(&dt_update_nb); 1552 #endif 1553 } else if (vphn_enabled) { 1554 vphn_enabled = 0; 1555 rc = del_timer_sync(&topology_timer); 1556 } 1557 1558 return rc; 1559 } 1560 1561 int prrn_is_enabled(void) 1562 { 1563 return prrn_enabled; 1564 } 1565 1566 static int topology_read(struct seq_file *file, void *v) 1567 { 1568 if (vphn_enabled || prrn_enabled) 1569 seq_puts(file, "on\n"); 1570 else 1571 seq_puts(file, "off\n"); 1572 1573 return 0; 1574 } 1575 1576 static int topology_open(struct inode *inode, struct file *file) 1577 { 1578 return single_open(file, topology_read, NULL); 1579 } 1580 1581 static ssize_t topology_write(struct file *file, const char __user *buf, 1582 size_t count, loff_t *off) 1583 { 1584 char kbuf[4]; /* "on" or "off" plus null. */ 1585 int read_len; 1586 1587 read_len = count < 3 ? count : 3; 1588 if (copy_from_user(kbuf, buf, read_len)) 1589 return -EINVAL; 1590 1591 kbuf[read_len] = '\0'; 1592 1593 if (!strncmp(kbuf, "on", 2)) 1594 start_topology_update(); 1595 else if (!strncmp(kbuf, "off", 3)) 1596 stop_topology_update(); 1597 else 1598 return -EINVAL; 1599 1600 return count; 1601 } 1602 1603 static const struct file_operations topology_ops = { 1604 .read = seq_read, 1605 .write = topology_write, 1606 .open = topology_open, 1607 .release = single_release 1608 }; 1609 1610 static int topology_update_init(void) 1611 { 1612 /* Do not poll for changes if disabled at boot */ 1613 if (topology_updates_enabled) 1614 start_topology_update(); 1615 1616 if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops)) 1617 return -ENOMEM; 1618 1619 return 0; 1620 } 1621 device_initcall(topology_update_init); 1622 #endif /* CONFIG_PPC_SPLPAR */ 1623