1 /* 2 * pSeries NUMA support 3 * 4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #include <linux/threads.h> 12 #include <linux/bootmem.h> 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/mmzone.h> 16 #include <linux/module.h> 17 #include <linux/nodemask.h> 18 #include <linux/cpu.h> 19 #include <linux/notifier.h> 20 #include <linux/memblock.h> 21 #include <linux/of.h> 22 #include <linux/pfn.h> 23 #include <linux/cpuset.h> 24 #include <linux/node.h> 25 #include <asm/sparsemem.h> 26 #include <asm/prom.h> 27 #include <asm/system.h> 28 #include <asm/smp.h> 29 #include <asm/firmware.h> 30 #include <asm/paca.h> 31 #include <asm/hvcall.h> 32 33 static int numa_enabled = 1; 34 35 static char *cmdline __initdata; 36 37 static int numa_debug; 38 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } 39 40 int numa_cpu_lookup_table[NR_CPUS]; 41 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 42 struct pglist_data *node_data[MAX_NUMNODES]; 43 44 EXPORT_SYMBOL(numa_cpu_lookup_table); 45 EXPORT_SYMBOL(node_to_cpumask_map); 46 EXPORT_SYMBOL(node_data); 47 48 static int min_common_depth; 49 static int n_mem_addr_cells, n_mem_size_cells; 50 static int form1_affinity; 51 52 #define MAX_DISTANCE_REF_POINTS 4 53 static int distance_ref_points_depth; 54 static const unsigned int *distance_ref_points; 55 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; 56 57 /* 58 * Allocate node_to_cpumask_map based on number of available nodes 59 * Requires node_possible_map to be valid. 60 * 61 * Note: node_to_cpumask() is not valid until after this is done. 62 */ 63 static void __init setup_node_to_cpumask_map(void) 64 { 65 unsigned int node, num = 0; 66 67 /* setup nr_node_ids if not done yet */ 68 if (nr_node_ids == MAX_NUMNODES) { 69 for_each_node_mask(node, node_possible_map) 70 num = node; 71 nr_node_ids = num + 1; 72 } 73 74 /* allocate the map */ 75 for (node = 0; node < nr_node_ids; node++) 76 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 77 78 /* cpumask_of_node() will now work */ 79 dbg("Node to cpumask map for %d nodes\n", nr_node_ids); 80 } 81 82 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, 83 unsigned int *nid) 84 { 85 unsigned long long mem; 86 char *p = cmdline; 87 static unsigned int fake_nid; 88 static unsigned long long curr_boundary; 89 90 /* 91 * Modify node id, iff we started creating NUMA nodes 92 * We want to continue from where we left of the last time 93 */ 94 if (fake_nid) 95 *nid = fake_nid; 96 /* 97 * In case there are no more arguments to parse, the 98 * node_id should be the same as the last fake node id 99 * (we've handled this above). 100 */ 101 if (!p) 102 return 0; 103 104 mem = memparse(p, &p); 105 if (!mem) 106 return 0; 107 108 if (mem < curr_boundary) 109 return 0; 110 111 curr_boundary = mem; 112 113 if ((end_pfn << PAGE_SHIFT) > mem) { 114 /* 115 * Skip commas and spaces 116 */ 117 while (*p == ',' || *p == ' ' || *p == '\t') 118 p++; 119 120 cmdline = p; 121 fake_nid++; 122 *nid = fake_nid; 123 dbg("created new fake_node with id %d\n", fake_nid); 124 return 1; 125 } 126 return 0; 127 } 128 129 /* 130 * get_active_region_work_fn - A helper function for get_node_active_region 131 * Returns datax set to the start_pfn and end_pfn if they contain 132 * the initial value of datax->start_pfn between them 133 * @start_pfn: start page(inclusive) of region to check 134 * @end_pfn: end page(exclusive) of region to check 135 * @datax: comes in with ->start_pfn set to value to search for and 136 * goes out with active range if it contains it 137 * Returns 1 if search value is in range else 0 138 */ 139 static int __init get_active_region_work_fn(unsigned long start_pfn, 140 unsigned long end_pfn, void *datax) 141 { 142 struct node_active_region *data; 143 data = (struct node_active_region *)datax; 144 145 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { 146 data->start_pfn = start_pfn; 147 data->end_pfn = end_pfn; 148 return 1; 149 } 150 return 0; 151 152 } 153 154 /* 155 * get_node_active_region - Return active region containing start_pfn 156 * Active range returned is empty if none found. 157 * @start_pfn: The page to return the region for. 158 * @node_ar: Returned set to the active region containing start_pfn 159 */ 160 static void __init get_node_active_region(unsigned long start_pfn, 161 struct node_active_region *node_ar) 162 { 163 int nid = early_pfn_to_nid(start_pfn); 164 165 node_ar->nid = nid; 166 node_ar->start_pfn = start_pfn; 167 node_ar->end_pfn = start_pfn; 168 work_with_active_regions(nid, get_active_region_work_fn, node_ar); 169 } 170 171 static void map_cpu_to_node(int cpu, int node) 172 { 173 numa_cpu_lookup_table[cpu] = node; 174 175 dbg("adding cpu %d to node %d\n", cpu, node); 176 177 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) 178 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 179 } 180 181 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) 182 static void unmap_cpu_from_node(unsigned long cpu) 183 { 184 int node = numa_cpu_lookup_table[cpu]; 185 186 dbg("removing cpu %lu from node %d\n", cpu, node); 187 188 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 189 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); 190 } else { 191 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 192 cpu, node); 193 } 194 } 195 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 196 197 /* must hold reference to node during call */ 198 static const int *of_get_associativity(struct device_node *dev) 199 { 200 return of_get_property(dev, "ibm,associativity", NULL); 201 } 202 203 /* 204 * Returns the property linux,drconf-usable-memory if 205 * it exists (the property exists only in kexec/kdump kernels, 206 * added by kexec-tools) 207 */ 208 static const u32 *of_get_usable_memory(struct device_node *memory) 209 { 210 const u32 *prop; 211 u32 len; 212 prop = of_get_property(memory, "linux,drconf-usable-memory", &len); 213 if (!prop || len < sizeof(unsigned int)) 214 return 0; 215 return prop; 216 } 217 218 int __node_distance(int a, int b) 219 { 220 int i; 221 int distance = LOCAL_DISTANCE; 222 223 if (!form1_affinity) 224 return distance; 225 226 for (i = 0; i < distance_ref_points_depth; i++) { 227 if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) 228 break; 229 230 /* Double the distance for each NUMA level */ 231 distance *= 2; 232 } 233 234 return distance; 235 } 236 237 static void initialize_distance_lookup_table(int nid, 238 const unsigned int *associativity) 239 { 240 int i; 241 242 if (!form1_affinity) 243 return; 244 245 for (i = 0; i < distance_ref_points_depth; i++) { 246 distance_lookup_table[nid][i] = 247 associativity[distance_ref_points[i]]; 248 } 249 } 250 251 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 252 * info is found. 253 */ 254 static int associativity_to_nid(const unsigned int *associativity) 255 { 256 int nid = -1; 257 258 if (min_common_depth == -1) 259 goto out; 260 261 if (associativity[0] >= min_common_depth) 262 nid = associativity[min_common_depth]; 263 264 /* POWER4 LPAR uses 0xffff as invalid node */ 265 if (nid == 0xffff || nid >= MAX_NUMNODES) 266 nid = -1; 267 268 if (nid > 0 && associativity[0] >= distance_ref_points_depth) 269 initialize_distance_lookup_table(nid, associativity); 270 271 out: 272 return nid; 273 } 274 275 /* Returns the nid associated with the given device tree node, 276 * or -1 if not found. 277 */ 278 static int of_node_to_nid_single(struct device_node *device) 279 { 280 int nid = -1; 281 const unsigned int *tmp; 282 283 tmp = of_get_associativity(device); 284 if (tmp) 285 nid = associativity_to_nid(tmp); 286 return nid; 287 } 288 289 /* Walk the device tree upwards, looking for an associativity id */ 290 int of_node_to_nid(struct device_node *device) 291 { 292 struct device_node *tmp; 293 int nid = -1; 294 295 of_node_get(device); 296 while (device) { 297 nid = of_node_to_nid_single(device); 298 if (nid != -1) 299 break; 300 301 tmp = device; 302 device = of_get_parent(tmp); 303 of_node_put(tmp); 304 } 305 of_node_put(device); 306 307 return nid; 308 } 309 EXPORT_SYMBOL_GPL(of_node_to_nid); 310 311 static int __init find_min_common_depth(void) 312 { 313 int depth; 314 struct device_node *rtas_root; 315 struct device_node *chosen; 316 const char *vec5; 317 318 rtas_root = of_find_node_by_path("/rtas"); 319 320 if (!rtas_root) 321 return -1; 322 323 /* 324 * This property is a set of 32-bit integers, each representing 325 * an index into the ibm,associativity nodes. 326 * 327 * With form 0 affinity the first integer is for an SMP configuration 328 * (should be all 0's) and the second is for a normal NUMA 329 * configuration. We have only one level of NUMA. 330 * 331 * With form 1 affinity the first integer is the most significant 332 * NUMA boundary and the following are progressively less significant 333 * boundaries. There can be more than one level of NUMA. 334 */ 335 distance_ref_points = of_get_property(rtas_root, 336 "ibm,associativity-reference-points", 337 &distance_ref_points_depth); 338 339 if (!distance_ref_points) { 340 dbg("NUMA: ibm,associativity-reference-points not found.\n"); 341 goto err; 342 } 343 344 distance_ref_points_depth /= sizeof(int); 345 346 #define VEC5_AFFINITY_BYTE 5 347 #define VEC5_AFFINITY 0x80 348 chosen = of_find_node_by_path("/chosen"); 349 if (chosen) { 350 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL); 351 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) { 352 dbg("Using form 1 affinity\n"); 353 form1_affinity = 1; 354 } 355 } 356 357 if (form1_affinity) { 358 depth = distance_ref_points[0]; 359 } else { 360 if (distance_ref_points_depth < 2) { 361 printk(KERN_WARNING "NUMA: " 362 "short ibm,associativity-reference-points\n"); 363 goto err; 364 } 365 366 depth = distance_ref_points[1]; 367 } 368 369 /* 370 * Warn and cap if the hardware supports more than 371 * MAX_DISTANCE_REF_POINTS domains. 372 */ 373 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { 374 printk(KERN_WARNING "NUMA: distance array capped at " 375 "%d entries\n", MAX_DISTANCE_REF_POINTS); 376 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; 377 } 378 379 of_node_put(rtas_root); 380 return depth; 381 382 err: 383 of_node_put(rtas_root); 384 return -1; 385 } 386 387 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 388 { 389 struct device_node *memory = NULL; 390 391 memory = of_find_node_by_type(memory, "memory"); 392 if (!memory) 393 panic("numa.c: No memory nodes found!"); 394 395 *n_addr_cells = of_n_addr_cells(memory); 396 *n_size_cells = of_n_size_cells(memory); 397 of_node_put(memory); 398 } 399 400 static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) 401 { 402 unsigned long result = 0; 403 404 while (n--) { 405 result = (result << 32) | **buf; 406 (*buf)++; 407 } 408 return result; 409 } 410 411 struct of_drconf_cell { 412 u64 base_addr; 413 u32 drc_index; 414 u32 reserved; 415 u32 aa_index; 416 u32 flags; 417 }; 418 419 #define DRCONF_MEM_ASSIGNED 0x00000008 420 #define DRCONF_MEM_AI_INVALID 0x00000040 421 #define DRCONF_MEM_RESERVED 0x00000080 422 423 /* 424 * Read the next memblock list entry from the ibm,dynamic-memory property 425 * and return the information in the provided of_drconf_cell structure. 426 */ 427 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) 428 { 429 const u32 *cp; 430 431 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); 432 433 cp = *cellp; 434 drmem->drc_index = cp[0]; 435 drmem->reserved = cp[1]; 436 drmem->aa_index = cp[2]; 437 drmem->flags = cp[3]; 438 439 *cellp = cp + 4; 440 } 441 442 /* 443 * Retreive and validate the ibm,dynamic-memory property of the device tree. 444 * 445 * The layout of the ibm,dynamic-memory property is a number N of memblock 446 * list entries followed by N memblock list entries. Each memblock list entry 447 * contains information as layed out in the of_drconf_cell struct above. 448 */ 449 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) 450 { 451 const u32 *prop; 452 u32 len, entries; 453 454 prop = of_get_property(memory, "ibm,dynamic-memory", &len); 455 if (!prop || len < sizeof(unsigned int)) 456 return 0; 457 458 entries = *prop++; 459 460 /* Now that we know the number of entries, revalidate the size 461 * of the property read in to ensure we have everything 462 */ 463 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) 464 return 0; 465 466 *dm = prop; 467 return entries; 468 } 469 470 /* 471 * Retreive and validate the ibm,lmb-size property for drconf memory 472 * from the device tree. 473 */ 474 static u64 of_get_lmb_size(struct device_node *memory) 475 { 476 const u32 *prop; 477 u32 len; 478 479 prop = of_get_property(memory, "ibm,lmb-size", &len); 480 if (!prop || len < sizeof(unsigned int)) 481 return 0; 482 483 return read_n_cells(n_mem_size_cells, &prop); 484 } 485 486 struct assoc_arrays { 487 u32 n_arrays; 488 u32 array_sz; 489 const u32 *arrays; 490 }; 491 492 /* 493 * Retreive and validate the list of associativity arrays for drconf 494 * memory from the ibm,associativity-lookup-arrays property of the 495 * device tree.. 496 * 497 * The layout of the ibm,associativity-lookup-arrays property is a number N 498 * indicating the number of associativity arrays, followed by a number M 499 * indicating the size of each associativity array, followed by a list 500 * of N associativity arrays. 501 */ 502 static int of_get_assoc_arrays(struct device_node *memory, 503 struct assoc_arrays *aa) 504 { 505 const u32 *prop; 506 u32 len; 507 508 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); 509 if (!prop || len < 2 * sizeof(unsigned int)) 510 return -1; 511 512 aa->n_arrays = *prop++; 513 aa->array_sz = *prop++; 514 515 /* Now that we know the number of arrrays and size of each array, 516 * revalidate the size of the property read in. 517 */ 518 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) 519 return -1; 520 521 aa->arrays = prop; 522 return 0; 523 } 524 525 /* 526 * This is like of_node_to_nid_single() for memory represented in the 527 * ibm,dynamic-reconfiguration-memory node. 528 */ 529 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, 530 struct assoc_arrays *aa) 531 { 532 int default_nid = 0; 533 int nid = default_nid; 534 int index; 535 536 if (min_common_depth > 0 && min_common_depth <= aa->array_sz && 537 !(drmem->flags & DRCONF_MEM_AI_INVALID) && 538 drmem->aa_index < aa->n_arrays) { 539 index = drmem->aa_index * aa->array_sz + min_common_depth - 1; 540 nid = aa->arrays[index]; 541 542 if (nid == 0xffff || nid >= MAX_NUMNODES) 543 nid = default_nid; 544 } 545 546 return nid; 547 } 548 549 /* 550 * Figure out to which domain a cpu belongs and stick it there. 551 * Return the id of the domain used. 552 */ 553 static int __cpuinit numa_setup_cpu(unsigned long lcpu) 554 { 555 int nid = 0; 556 struct device_node *cpu = of_get_cpu_node(lcpu, NULL); 557 558 if (!cpu) { 559 WARN_ON(1); 560 goto out; 561 } 562 563 nid = of_node_to_nid_single(cpu); 564 565 if (nid < 0 || !node_online(nid)) 566 nid = first_online_node; 567 out: 568 map_cpu_to_node(lcpu, nid); 569 570 of_node_put(cpu); 571 572 return nid; 573 } 574 575 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, 576 unsigned long action, 577 void *hcpu) 578 { 579 unsigned long lcpu = (unsigned long)hcpu; 580 int ret = NOTIFY_DONE; 581 582 switch (action) { 583 case CPU_UP_PREPARE: 584 case CPU_UP_PREPARE_FROZEN: 585 numa_setup_cpu(lcpu); 586 ret = NOTIFY_OK; 587 break; 588 #ifdef CONFIG_HOTPLUG_CPU 589 case CPU_DEAD: 590 case CPU_DEAD_FROZEN: 591 case CPU_UP_CANCELED: 592 case CPU_UP_CANCELED_FROZEN: 593 unmap_cpu_from_node(lcpu); 594 break; 595 ret = NOTIFY_OK; 596 #endif 597 } 598 return ret; 599 } 600 601 /* 602 * Check and possibly modify a memory region to enforce the memory limit. 603 * 604 * Returns the size the region should have to enforce the memory limit. 605 * This will either be the original value of size, a truncated value, 606 * or zero. If the returned value of size is 0 the region should be 607 * discarded as it lies wholy above the memory limit. 608 */ 609 static unsigned long __init numa_enforce_memory_limit(unsigned long start, 610 unsigned long size) 611 { 612 /* 613 * We use memblock_end_of_DRAM() in here instead of memory_limit because 614 * we've already adjusted it for the limit and it takes care of 615 * having memory holes below the limit. Also, in the case of 616 * iommu_is_off, memory_limit is not set but is implicitly enforced. 617 */ 618 619 if (start + size <= memblock_end_of_DRAM()) 620 return size; 621 622 if (start >= memblock_end_of_DRAM()) 623 return 0; 624 625 return memblock_end_of_DRAM() - start; 626 } 627 628 /* 629 * Reads the counter for a given entry in 630 * linux,drconf-usable-memory property 631 */ 632 static inline int __init read_usm_ranges(const u32 **usm) 633 { 634 /* 635 * For each lmb in ibm,dynamic-memory a corresponding 636 * entry in linux,drconf-usable-memory property contains 637 * a counter followed by that many (base, size) duple. 638 * read the counter from linux,drconf-usable-memory 639 */ 640 return read_n_cells(n_mem_size_cells, usm); 641 } 642 643 /* 644 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 645 * node. This assumes n_mem_{addr,size}_cells have been set. 646 */ 647 static void __init parse_drconf_memory(struct device_node *memory) 648 { 649 const u32 *dm, *usm; 650 unsigned int n, rc, ranges, is_kexec_kdump = 0; 651 unsigned long lmb_size, base, size, sz; 652 int nid; 653 struct assoc_arrays aa; 654 655 n = of_get_drconf_memory(memory, &dm); 656 if (!n) 657 return; 658 659 lmb_size = of_get_lmb_size(memory); 660 if (!lmb_size) 661 return; 662 663 rc = of_get_assoc_arrays(memory, &aa); 664 if (rc) 665 return; 666 667 /* check if this is a kexec/kdump kernel */ 668 usm = of_get_usable_memory(memory); 669 if (usm != NULL) 670 is_kexec_kdump = 1; 671 672 for (; n != 0; --n) { 673 struct of_drconf_cell drmem; 674 675 read_drconf_cell(&drmem, &dm); 676 677 /* skip this block if the reserved bit is set in flags (0x80) 678 or if the block is not assigned to this partition (0x8) */ 679 if ((drmem.flags & DRCONF_MEM_RESERVED) 680 || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 681 continue; 682 683 base = drmem.base_addr; 684 size = lmb_size; 685 ranges = 1; 686 687 if (is_kexec_kdump) { 688 ranges = read_usm_ranges(&usm); 689 if (!ranges) /* there are no (base, size) duple */ 690 continue; 691 } 692 do { 693 if (is_kexec_kdump) { 694 base = read_n_cells(n_mem_addr_cells, &usm); 695 size = read_n_cells(n_mem_size_cells, &usm); 696 } 697 nid = of_drconf_to_nid_single(&drmem, &aa); 698 fake_numa_create_new_node( 699 ((base + size) >> PAGE_SHIFT), 700 &nid); 701 node_set_online(nid); 702 sz = numa_enforce_memory_limit(base, size); 703 if (sz) 704 add_active_range(nid, base >> PAGE_SHIFT, 705 (base >> PAGE_SHIFT) 706 + (sz >> PAGE_SHIFT)); 707 } while (--ranges); 708 } 709 } 710 711 static int __init parse_numa_properties(void) 712 { 713 struct device_node *cpu = NULL; 714 struct device_node *memory = NULL; 715 int default_nid = 0; 716 unsigned long i; 717 718 if (numa_enabled == 0) { 719 printk(KERN_WARNING "NUMA disabled by user\n"); 720 return -1; 721 } 722 723 min_common_depth = find_min_common_depth(); 724 725 if (min_common_depth < 0) 726 return min_common_depth; 727 728 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); 729 730 /* 731 * Even though we connect cpus to numa domains later in SMP 732 * init, we need to know the node ids now. This is because 733 * each node to be onlined must have NODE_DATA etc backing it. 734 */ 735 for_each_present_cpu(i) { 736 int nid; 737 738 cpu = of_get_cpu_node(i, NULL); 739 BUG_ON(!cpu); 740 nid = of_node_to_nid_single(cpu); 741 of_node_put(cpu); 742 743 /* 744 * Don't fall back to default_nid yet -- we will plug 745 * cpus into nodes once the memory scan has discovered 746 * the topology. 747 */ 748 if (nid < 0) 749 continue; 750 node_set_online(nid); 751 } 752 753 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 754 memory = NULL; 755 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 756 unsigned long start; 757 unsigned long size; 758 int nid; 759 int ranges; 760 const unsigned int *memcell_buf; 761 unsigned int len; 762 763 memcell_buf = of_get_property(memory, 764 "linux,usable-memory", &len); 765 if (!memcell_buf || len <= 0) 766 memcell_buf = of_get_property(memory, "reg", &len); 767 if (!memcell_buf || len <= 0) 768 continue; 769 770 /* ranges in cell */ 771 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 772 new_range: 773 /* these are order-sensitive, and modify the buffer pointer */ 774 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 775 size = read_n_cells(n_mem_size_cells, &memcell_buf); 776 777 /* 778 * Assumption: either all memory nodes or none will 779 * have associativity properties. If none, then 780 * everything goes to default_nid. 781 */ 782 nid = of_node_to_nid_single(memory); 783 if (nid < 0) 784 nid = default_nid; 785 786 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); 787 node_set_online(nid); 788 789 if (!(size = numa_enforce_memory_limit(start, size))) { 790 if (--ranges) 791 goto new_range; 792 else 793 continue; 794 } 795 796 add_active_range(nid, start >> PAGE_SHIFT, 797 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); 798 799 if (--ranges) 800 goto new_range; 801 } 802 803 /* 804 * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory 805 * property in the ibm,dynamic-reconfiguration-memory node. 806 */ 807 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 808 if (memory) 809 parse_drconf_memory(memory); 810 811 return 0; 812 } 813 814 static void __init setup_nonnuma(void) 815 { 816 unsigned long top_of_ram = memblock_end_of_DRAM(); 817 unsigned long total_ram = memblock_phys_mem_size(); 818 unsigned long start_pfn, end_pfn; 819 unsigned int nid = 0; 820 struct memblock_region *reg; 821 822 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 823 top_of_ram, total_ram); 824 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 825 (top_of_ram - total_ram) >> 20); 826 827 for_each_memblock(memory, reg) { 828 start_pfn = memblock_region_memory_base_pfn(reg); 829 end_pfn = memblock_region_memory_end_pfn(reg); 830 831 fake_numa_create_new_node(end_pfn, &nid); 832 add_active_range(nid, start_pfn, end_pfn); 833 node_set_online(nid); 834 } 835 } 836 837 void __init dump_numa_cpu_topology(void) 838 { 839 unsigned int node; 840 unsigned int cpu, count; 841 842 if (min_common_depth == -1 || !numa_enabled) 843 return; 844 845 for_each_online_node(node) { 846 printk(KERN_DEBUG "Node %d CPUs:", node); 847 848 count = 0; 849 /* 850 * If we used a CPU iterator here we would miss printing 851 * the holes in the cpumap. 852 */ 853 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 854 if (cpumask_test_cpu(cpu, 855 node_to_cpumask_map[node])) { 856 if (count == 0) 857 printk(" %u", cpu); 858 ++count; 859 } else { 860 if (count > 1) 861 printk("-%u", cpu - 1); 862 count = 0; 863 } 864 } 865 866 if (count > 1) 867 printk("-%u", nr_cpu_ids - 1); 868 printk("\n"); 869 } 870 } 871 872 static void __init dump_numa_memory_topology(void) 873 { 874 unsigned int node; 875 unsigned int count; 876 877 if (min_common_depth == -1 || !numa_enabled) 878 return; 879 880 for_each_online_node(node) { 881 unsigned long i; 882 883 printk(KERN_DEBUG "Node %d Memory:", node); 884 885 count = 0; 886 887 for (i = 0; i < memblock_end_of_DRAM(); 888 i += (1 << SECTION_SIZE_BITS)) { 889 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { 890 if (count == 0) 891 printk(" 0x%lx", i); 892 ++count; 893 } else { 894 if (count > 0) 895 printk("-0x%lx", i); 896 count = 0; 897 } 898 } 899 900 if (count > 0) 901 printk("-0x%lx", i); 902 printk("\n"); 903 } 904 } 905 906 /* 907 * Allocate some memory, satisfying the memblock or bootmem allocator where 908 * required. nid is the preferred node and end is the physical address of 909 * the highest address in the node. 910 * 911 * Returns the virtual address of the memory. 912 */ 913 static void __init *careful_zallocation(int nid, unsigned long size, 914 unsigned long align, 915 unsigned long end_pfn) 916 { 917 void *ret; 918 int new_nid; 919 unsigned long ret_paddr; 920 921 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); 922 923 /* retry over all memory */ 924 if (!ret_paddr) 925 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); 926 927 if (!ret_paddr) 928 panic("numa.c: cannot allocate %lu bytes for node %d", 929 size, nid); 930 931 ret = __va(ret_paddr); 932 933 /* 934 * We initialize the nodes in numeric order: 0, 1, 2... 935 * and hand over control from the MEMBLOCK allocator to the 936 * bootmem allocator. If this function is called for 937 * node 5, then we know that all nodes <5 are using the 938 * bootmem allocator instead of the MEMBLOCK allocator. 939 * 940 * So, check the nid from which this allocation came 941 * and double check to see if we need to use bootmem 942 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory 943 * since it would be useless. 944 */ 945 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); 946 if (new_nid < nid) { 947 ret = __alloc_bootmem_node(NODE_DATA(new_nid), 948 size, align, 0); 949 950 dbg("alloc_bootmem %p %lx\n", ret, size); 951 } 952 953 memset(ret, 0, size); 954 return ret; 955 } 956 957 static struct notifier_block __cpuinitdata ppc64_numa_nb = { 958 .notifier_call = cpu_numa_callback, 959 .priority = 1 /* Must run before sched domains notifier. */ 960 }; 961 962 static void mark_reserved_regions_for_nid(int nid) 963 { 964 struct pglist_data *node = NODE_DATA(nid); 965 struct memblock_region *reg; 966 967 for_each_memblock(reserved, reg) { 968 unsigned long physbase = reg->base; 969 unsigned long size = reg->size; 970 unsigned long start_pfn = physbase >> PAGE_SHIFT; 971 unsigned long end_pfn = PFN_UP(physbase + size); 972 struct node_active_region node_ar; 973 unsigned long node_end_pfn = node->node_start_pfn + 974 node->node_spanned_pages; 975 976 /* 977 * Check to make sure that this memblock.reserved area is 978 * within the bounds of the node that we care about. 979 * Checking the nid of the start and end points is not 980 * sufficient because the reserved area could span the 981 * entire node. 982 */ 983 if (end_pfn <= node->node_start_pfn || 984 start_pfn >= node_end_pfn) 985 continue; 986 987 get_node_active_region(start_pfn, &node_ar); 988 while (start_pfn < end_pfn && 989 node_ar.start_pfn < node_ar.end_pfn) { 990 unsigned long reserve_size = size; 991 /* 992 * if reserved region extends past active region 993 * then trim size to active region 994 */ 995 if (end_pfn > node_ar.end_pfn) 996 reserve_size = (node_ar.end_pfn << PAGE_SHIFT) 997 - physbase; 998 /* 999 * Only worry about *this* node, others may not 1000 * yet have valid NODE_DATA(). 1001 */ 1002 if (node_ar.nid == nid) { 1003 dbg("reserve_bootmem %lx %lx nid=%d\n", 1004 physbase, reserve_size, node_ar.nid); 1005 reserve_bootmem_node(NODE_DATA(node_ar.nid), 1006 physbase, reserve_size, 1007 BOOTMEM_DEFAULT); 1008 } 1009 /* 1010 * if reserved region is contained in the active region 1011 * then done. 1012 */ 1013 if (end_pfn <= node_ar.end_pfn) 1014 break; 1015 1016 /* 1017 * reserved region extends past the active region 1018 * get next active region that contains this 1019 * reserved region 1020 */ 1021 start_pfn = node_ar.end_pfn; 1022 physbase = start_pfn << PAGE_SHIFT; 1023 size = size - reserve_size; 1024 get_node_active_region(start_pfn, &node_ar); 1025 } 1026 } 1027 } 1028 1029 1030 void __init do_init_bootmem(void) 1031 { 1032 int nid; 1033 1034 min_low_pfn = 0; 1035 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1036 max_pfn = max_low_pfn; 1037 1038 if (parse_numa_properties()) 1039 setup_nonnuma(); 1040 else 1041 dump_numa_memory_topology(); 1042 1043 for_each_online_node(nid) { 1044 unsigned long start_pfn, end_pfn; 1045 void *bootmem_vaddr; 1046 unsigned long bootmap_pages; 1047 1048 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1049 1050 /* 1051 * Allocate the node structure node local if possible 1052 * 1053 * Be careful moving this around, as it relies on all 1054 * previous nodes' bootmem to be initialized and have 1055 * all reserved areas marked. 1056 */ 1057 NODE_DATA(nid) = careful_zallocation(nid, 1058 sizeof(struct pglist_data), 1059 SMP_CACHE_BYTES, end_pfn); 1060 1061 dbg("node %d\n", nid); 1062 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 1063 1064 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 1065 NODE_DATA(nid)->node_start_pfn = start_pfn; 1066 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 1067 1068 if (NODE_DATA(nid)->node_spanned_pages == 0) 1069 continue; 1070 1071 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); 1072 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 1073 1074 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 1075 bootmem_vaddr = careful_zallocation(nid, 1076 bootmap_pages << PAGE_SHIFT, 1077 PAGE_SIZE, end_pfn); 1078 1079 dbg("bootmap_vaddr = %p\n", bootmem_vaddr); 1080 1081 init_bootmem_node(NODE_DATA(nid), 1082 __pa(bootmem_vaddr) >> PAGE_SHIFT, 1083 start_pfn, end_pfn); 1084 1085 free_bootmem_with_active_regions(nid, end_pfn); 1086 /* 1087 * Be very careful about moving this around. Future 1088 * calls to careful_zallocation() depend on this getting 1089 * done correctly. 1090 */ 1091 mark_reserved_regions_for_nid(nid); 1092 sparse_memory_present_with_active_regions(nid); 1093 } 1094 1095 init_bootmem_done = 1; 1096 1097 /* 1098 * Now bootmem is initialised we can create the node to cpumask 1099 * lookup tables and setup the cpu callback to populate them. 1100 */ 1101 setup_node_to_cpumask_map(); 1102 1103 register_cpu_notifier(&ppc64_numa_nb); 1104 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, 1105 (void *)(unsigned long)boot_cpuid); 1106 } 1107 1108 void __init paging_init(void) 1109 { 1110 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1111 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1112 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; 1113 free_area_init_nodes(max_zone_pfns); 1114 } 1115 1116 static int __init early_numa(char *p) 1117 { 1118 if (!p) 1119 return 0; 1120 1121 if (strstr(p, "off")) 1122 numa_enabled = 0; 1123 1124 if (strstr(p, "debug")) 1125 numa_debug = 1; 1126 1127 p = strstr(p, "fake="); 1128 if (p) 1129 cmdline = p + strlen("fake="); 1130 1131 return 0; 1132 } 1133 early_param("numa", early_numa); 1134 1135 #ifdef CONFIG_MEMORY_HOTPLUG 1136 /* 1137 * Find the node associated with a hot added memory section for 1138 * memory represented in the device tree by the property 1139 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. 1140 */ 1141 static int hot_add_drconf_scn_to_nid(struct device_node *memory, 1142 unsigned long scn_addr) 1143 { 1144 const u32 *dm; 1145 unsigned int drconf_cell_cnt, rc; 1146 unsigned long lmb_size; 1147 struct assoc_arrays aa; 1148 int nid = -1; 1149 1150 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 1151 if (!drconf_cell_cnt) 1152 return -1; 1153 1154 lmb_size = of_get_lmb_size(memory); 1155 if (!lmb_size) 1156 return -1; 1157 1158 rc = of_get_assoc_arrays(memory, &aa); 1159 if (rc) 1160 return -1; 1161 1162 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) { 1163 struct of_drconf_cell drmem; 1164 1165 read_drconf_cell(&drmem, &dm); 1166 1167 /* skip this block if it is reserved or not assigned to 1168 * this partition */ 1169 if ((drmem.flags & DRCONF_MEM_RESERVED) 1170 || !(drmem.flags & DRCONF_MEM_ASSIGNED)) 1171 continue; 1172 1173 if ((scn_addr < drmem.base_addr) 1174 || (scn_addr >= (drmem.base_addr + lmb_size))) 1175 continue; 1176 1177 nid = of_drconf_to_nid_single(&drmem, &aa); 1178 break; 1179 } 1180 1181 return nid; 1182 } 1183 1184 /* 1185 * Find the node associated with a hot added memory section for memory 1186 * represented in the device tree as a node (i.e. memory@XXXX) for 1187 * each memblock. 1188 */ 1189 int hot_add_node_scn_to_nid(unsigned long scn_addr) 1190 { 1191 struct device_node *memory = NULL; 1192 int nid = -1; 1193 1194 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 1195 unsigned long start, size; 1196 int ranges; 1197 const unsigned int *memcell_buf; 1198 unsigned int len; 1199 1200 memcell_buf = of_get_property(memory, "reg", &len); 1201 if (!memcell_buf || len <= 0) 1202 continue; 1203 1204 /* ranges in cell */ 1205 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 1206 1207 while (ranges--) { 1208 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 1209 size = read_n_cells(n_mem_size_cells, &memcell_buf); 1210 1211 if ((scn_addr < start) || (scn_addr >= (start + size))) 1212 continue; 1213 1214 nid = of_node_to_nid_single(memory); 1215 break; 1216 } 1217 1218 of_node_put(memory); 1219 if (nid >= 0) 1220 break; 1221 } 1222 1223 return nid; 1224 } 1225 1226 /* 1227 * Find the node associated with a hot added memory section. Section 1228 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that 1229 * sections are fully contained within a single MEMBLOCK. 1230 */ 1231 int hot_add_scn_to_nid(unsigned long scn_addr) 1232 { 1233 struct device_node *memory = NULL; 1234 int nid, found = 0; 1235 1236 if (!numa_enabled || (min_common_depth < 0)) 1237 return first_online_node; 1238 1239 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1240 if (memory) { 1241 nid = hot_add_drconf_scn_to_nid(memory, scn_addr); 1242 of_node_put(memory); 1243 } else { 1244 nid = hot_add_node_scn_to_nid(scn_addr); 1245 } 1246 1247 if (nid < 0 || !node_online(nid)) 1248 nid = first_online_node; 1249 1250 if (NODE_DATA(nid)->node_spanned_pages) 1251 return nid; 1252 1253 for_each_online_node(nid) { 1254 if (NODE_DATA(nid)->node_spanned_pages) { 1255 found = 1; 1256 break; 1257 } 1258 } 1259 1260 BUG_ON(!found); 1261 return nid; 1262 } 1263 1264 static u64 hot_add_drconf_memory_max(void) 1265 { 1266 struct device_node *memory = NULL; 1267 unsigned int drconf_cell_cnt = 0; 1268 u64 lmb_size = 0; 1269 const u32 *dm = 0; 1270 1271 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1272 if (memory) { 1273 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); 1274 lmb_size = of_get_lmb_size(memory); 1275 of_node_put(memory); 1276 } 1277 return lmb_size * drconf_cell_cnt; 1278 } 1279 1280 /* 1281 * memory_hotplug_max - return max address of memory that may be added 1282 * 1283 * This is currently only used on systems that support drconfig memory 1284 * hotplug. 1285 */ 1286 u64 memory_hotplug_max(void) 1287 { 1288 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); 1289 } 1290 #endif /* CONFIG_MEMORY_HOTPLUG */ 1291 1292 /* Virtual Processor Home Node (VPHN) support */ 1293 #ifdef CONFIG_PPC_SPLPAR 1294 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; 1295 static cpumask_t cpu_associativity_changes_mask; 1296 static int vphn_enabled; 1297 static void set_topology_timer(void); 1298 1299 /* 1300 * Store the current values of the associativity change counters in the 1301 * hypervisor. 1302 */ 1303 static void setup_cpu_associativity_change_counters(void) 1304 { 1305 int cpu; 1306 1307 /* The VPHN feature supports a maximum of 8 reference points */ 1308 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); 1309 1310 for_each_possible_cpu(cpu) { 1311 int i; 1312 u8 *counts = vphn_cpu_change_counts[cpu]; 1313 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1314 1315 for (i = 0; i < distance_ref_points_depth; i++) 1316 counts[i] = hypervisor_counts[i]; 1317 } 1318 } 1319 1320 /* 1321 * The hypervisor maintains a set of 8 associativity change counters in 1322 * the VPA of each cpu that correspond to the associativity levels in the 1323 * ibm,associativity-reference-points property. When an associativity 1324 * level changes, the corresponding counter is incremented. 1325 * 1326 * Set a bit in cpu_associativity_changes_mask for each cpu whose home 1327 * node associativity levels have changed. 1328 * 1329 * Returns the number of cpus with unhandled associativity changes. 1330 */ 1331 static int update_cpu_associativity_changes_mask(void) 1332 { 1333 int cpu, nr_cpus = 0; 1334 cpumask_t *changes = &cpu_associativity_changes_mask; 1335 1336 cpumask_clear(changes); 1337 1338 for_each_possible_cpu(cpu) { 1339 int i, changed = 0; 1340 u8 *counts = vphn_cpu_change_counts[cpu]; 1341 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1342 1343 for (i = 0; i < distance_ref_points_depth; i++) { 1344 if (hypervisor_counts[i] != counts[i]) { 1345 counts[i] = hypervisor_counts[i]; 1346 changed = 1; 1347 } 1348 } 1349 if (changed) { 1350 cpumask_set_cpu(cpu, changes); 1351 nr_cpus++; 1352 } 1353 } 1354 1355 return nr_cpus; 1356 } 1357 1358 /* 1359 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form 1360 * the complete property we have to add the length in the first cell. 1361 */ 1362 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) 1363 1364 /* 1365 * Convert the associativity domain numbers returned from the hypervisor 1366 * to the sequence they would appear in the ibm,associativity property. 1367 */ 1368 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) 1369 { 1370 int i, nr_assoc_doms = 0; 1371 const u16 *field = (const u16*) packed; 1372 1373 #define VPHN_FIELD_UNUSED (0xffff) 1374 #define VPHN_FIELD_MSB (0x8000) 1375 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) 1376 1377 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { 1378 if (*field == VPHN_FIELD_UNUSED) { 1379 /* All significant fields processed, and remaining 1380 * fields contain the reserved value of all 1's. 1381 * Just store them. 1382 */ 1383 unpacked[i] = *((u32*)field); 1384 field += 2; 1385 } else if (*field & VPHN_FIELD_MSB) { 1386 /* Data is in the lower 15 bits of this field */ 1387 unpacked[i] = *field & VPHN_FIELD_MASK; 1388 field++; 1389 nr_assoc_doms++; 1390 } else { 1391 /* Data is in the lower 15 bits of this field 1392 * concatenated with the next 16 bit field 1393 */ 1394 unpacked[i] = *((u32*)field); 1395 field += 2; 1396 nr_assoc_doms++; 1397 } 1398 } 1399 1400 /* The first cell contains the length of the property */ 1401 unpacked[0] = nr_assoc_doms; 1402 1403 return nr_assoc_doms; 1404 } 1405 1406 /* 1407 * Retrieve the new associativity information for a virtual processor's 1408 * home node. 1409 */ 1410 static long hcall_vphn(unsigned long cpu, unsigned int *associativity) 1411 { 1412 long rc; 1413 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1414 u64 flags = 1; 1415 int hwcpu = get_hard_smp_processor_id(cpu); 1416 1417 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 1418 vphn_unpack_associativity(retbuf, associativity); 1419 1420 return rc; 1421 } 1422 1423 static long vphn_get_associativity(unsigned long cpu, 1424 unsigned int *associativity) 1425 { 1426 long rc; 1427 1428 rc = hcall_vphn(cpu, associativity); 1429 1430 switch (rc) { 1431 case H_FUNCTION: 1432 printk(KERN_INFO 1433 "VPHN is not supported. Disabling polling...\n"); 1434 stop_topology_update(); 1435 break; 1436 case H_HARDWARE: 1437 printk(KERN_ERR 1438 "hcall_vphn() experienced a hardware fault " 1439 "preventing VPHN. Disabling polling...\n"); 1440 stop_topology_update(); 1441 } 1442 1443 return rc; 1444 } 1445 1446 /* 1447 * Update the node maps and sysfs entries for each cpu whose home node 1448 * has changed. 1449 */ 1450 int arch_update_cpu_topology(void) 1451 { 1452 int cpu, nid, old_nid; 1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1454 struct sys_device *sysdev; 1455 1456 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { 1457 vphn_get_associativity(cpu, associativity); 1458 nid = associativity_to_nid(associativity); 1459 1460 if (nid < 0 || !node_online(nid)) 1461 nid = first_online_node; 1462 1463 old_nid = numa_cpu_lookup_table[cpu]; 1464 1465 /* Disable hotplug while we update the cpu 1466 * masks and sysfs. 1467 */ 1468 get_online_cpus(); 1469 unregister_cpu_under_node(cpu, old_nid); 1470 unmap_cpu_from_node(cpu); 1471 map_cpu_to_node(cpu, nid); 1472 register_cpu_under_node(cpu, nid); 1473 put_online_cpus(); 1474 1475 sysdev = get_cpu_sysdev(cpu); 1476 if (sysdev) 1477 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); 1478 } 1479 1480 return 1; 1481 } 1482 1483 static void topology_work_fn(struct work_struct *work) 1484 { 1485 rebuild_sched_domains(); 1486 } 1487 static DECLARE_WORK(topology_work, topology_work_fn); 1488 1489 void topology_schedule_update(void) 1490 { 1491 schedule_work(&topology_work); 1492 } 1493 1494 static void topology_timer_fn(unsigned long ignored) 1495 { 1496 if (!vphn_enabled) 1497 return; 1498 if (update_cpu_associativity_changes_mask() > 0) 1499 topology_schedule_update(); 1500 set_topology_timer(); 1501 } 1502 static struct timer_list topology_timer = 1503 TIMER_INITIALIZER(topology_timer_fn, 0, 0); 1504 1505 static void set_topology_timer(void) 1506 { 1507 topology_timer.data = 0; 1508 topology_timer.expires = jiffies + 60 * HZ; 1509 add_timer(&topology_timer); 1510 } 1511 1512 /* 1513 * Start polling for VPHN associativity changes. 1514 */ 1515 int start_topology_update(void) 1516 { 1517 int rc = 0; 1518 1519 if (firmware_has_feature(FW_FEATURE_VPHN) && 1520 get_lppaca()->shared_proc) { 1521 vphn_enabled = 1; 1522 setup_cpu_associativity_change_counters(); 1523 init_timer_deferrable(&topology_timer); 1524 set_topology_timer(); 1525 rc = 1; 1526 } 1527 1528 return rc; 1529 } 1530 __initcall(start_topology_update); 1531 1532 /* 1533 * Disable polling for VPHN associativity changes. 1534 */ 1535 int stop_topology_update(void) 1536 { 1537 vphn_enabled = 0; 1538 return del_timer_sync(&topology_timer); 1539 } 1540 #endif /* CONFIG_PPC_SPLPAR */ 1541