1 /* Common code for 32 and 64-bit NUMA */ 2 #include <linux/acpi.h> 3 #include <linux/kernel.h> 4 #include <linux/mm.h> 5 #include <linux/string.h> 6 #include <linux/init.h> 7 #include <linux/memblock.h> 8 #include <linux/mmzone.h> 9 #include <linux/ctype.h> 10 #include <linux/nodemask.h> 11 #include <linux/sched.h> 12 #include <linux/topology.h> 13 14 #include <asm/e820/api.h> 15 #include <asm/proto.h> 16 #include <asm/dma.h> 17 #include <asm/amd_nb.h> 18 19 #include "numa_internal.h" 20 21 int numa_off; 22 nodemask_t numa_nodes_parsed __initdata; 23 24 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 25 EXPORT_SYMBOL(node_data); 26 27 static struct numa_meminfo numa_meminfo 28 #ifndef CONFIG_MEMORY_HOTPLUG 29 __initdata 30 #endif 31 ; 32 33 static int numa_distance_cnt; 34 static u8 *numa_distance; 35 36 static __init int numa_setup(char *opt) 37 { 38 if (!opt) 39 return -EINVAL; 40 if (!strncmp(opt, "off", 3)) 41 numa_off = 1; 42 #ifdef CONFIG_NUMA_EMU 43 if (!strncmp(opt, "fake=", 5)) 44 numa_emu_cmdline(opt + 5); 45 #endif 46 #ifdef CONFIG_ACPI_NUMA 47 if (!strncmp(opt, "noacpi", 6)) 48 acpi_numa = -1; 49 #endif 50 return 0; 51 } 52 early_param("numa", numa_setup); 53 54 /* 55 * apicid, cpu, node mappings 56 */ 57 s16 __apicid_to_node[MAX_LOCAL_APIC] = { 58 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 59 }; 60 61 int numa_cpu_node(int cpu) 62 { 63 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 64 65 if (apicid != BAD_APICID) 66 return __apicid_to_node[apicid]; 67 return NUMA_NO_NODE; 68 } 69 70 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 71 EXPORT_SYMBOL(node_to_cpumask_map); 72 73 /* 74 * Map cpu index to node index 75 */ 76 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 77 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 78 79 void numa_set_node(int cpu, int node) 80 { 81 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 82 83 /* early setting, no percpu area yet */ 84 if (cpu_to_node_map) { 85 cpu_to_node_map[cpu] = node; 86 return; 87 } 88 89 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 90 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { 91 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); 92 dump_stack(); 93 return; 94 } 95 #endif 96 per_cpu(x86_cpu_to_node_map, cpu) = node; 97 98 set_cpu_numa_node(cpu, node); 99 } 100 101 void numa_clear_node(int cpu) 102 { 103 numa_set_node(cpu, NUMA_NO_NODE); 104 } 105 106 /* 107 * Allocate node_to_cpumask_map based on number of available nodes 108 * Requires node_possible_map to be valid. 109 * 110 * Note: cpumask_of_node() is not valid until after this is done. 111 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) 112 */ 113 void __init setup_node_to_cpumask_map(void) 114 { 115 unsigned int node; 116 117 /* setup nr_node_ids if not done yet */ 118 if (nr_node_ids == MAX_NUMNODES) 119 setup_nr_node_ids(); 120 121 /* allocate the map */ 122 for (node = 0; node < nr_node_ids; node++) 123 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 124 125 /* cpumask_of_node() will now work */ 126 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); 127 } 128 129 static int __init numa_add_memblk_to(int nid, u64 start, u64 end, 130 struct numa_meminfo *mi) 131 { 132 /* ignore zero length blks */ 133 if (start == end) 134 return 0; 135 136 /* whine about and ignore invalid blks */ 137 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { 138 pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", 139 nid, start, end - 1); 140 return 0; 141 } 142 143 if (mi->nr_blks >= NR_NODE_MEMBLKS) { 144 pr_err("too many memblk ranges\n"); 145 return -EINVAL; 146 } 147 148 mi->blk[mi->nr_blks].start = start; 149 mi->blk[mi->nr_blks].end = end; 150 mi->blk[mi->nr_blks].nid = nid; 151 mi->nr_blks++; 152 return 0; 153 } 154 155 /** 156 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo 157 * @idx: Index of memblk to remove 158 * @mi: numa_meminfo to remove memblk from 159 * 160 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and 161 * decrementing @mi->nr_blks. 162 */ 163 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) 164 { 165 mi->nr_blks--; 166 memmove(&mi->blk[idx], &mi->blk[idx + 1], 167 (mi->nr_blks - idx) * sizeof(mi->blk[0])); 168 } 169 170 /** 171 * numa_add_memblk - Add one numa_memblk to numa_meminfo 172 * @nid: NUMA node ID of the new memblk 173 * @start: Start address of the new memblk 174 * @end: End address of the new memblk 175 * 176 * Add a new memblk to the default numa_meminfo. 177 * 178 * RETURNS: 179 * 0 on success, -errno on failure. 180 */ 181 int __init numa_add_memblk(int nid, u64 start, u64 end) 182 { 183 return numa_add_memblk_to(nid, start, end, &numa_meminfo); 184 } 185 186 /* Allocate NODE_DATA for a node on the local memory */ 187 static void __init alloc_node_data(int nid) 188 { 189 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 190 u64 nd_pa; 191 void *nd; 192 int tnid; 193 194 /* 195 * Allocate node data. Try node-local memory and then any node. 196 * Never allocate in DMA zone. 197 */ 198 nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); 199 if (!nd_pa) { 200 nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, 201 MEMBLOCK_ALLOC_ACCESSIBLE); 202 if (!nd_pa) { 203 pr_err("Cannot find %zu bytes in any node (initial node: %d)\n", 204 nd_size, nid); 205 return; 206 } 207 } 208 nd = __va(nd_pa); 209 210 /* report and initialize */ 211 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, 212 nd_pa, nd_pa + nd_size - 1); 213 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 214 if (tnid != nid) 215 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); 216 217 node_data[nid] = nd; 218 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 219 220 node_set_online(nid); 221 } 222 223 /** 224 * numa_cleanup_meminfo - Cleanup a numa_meminfo 225 * @mi: numa_meminfo to clean up 226 * 227 * Sanitize @mi by merging and removing unnecessary memblks. Also check for 228 * conflicts and clear unused memblks. 229 * 230 * RETURNS: 231 * 0 on success, -errno on failure. 232 */ 233 int __init numa_cleanup_meminfo(struct numa_meminfo *mi) 234 { 235 const u64 low = 0; 236 const u64 high = PFN_PHYS(max_pfn); 237 int i, j, k; 238 239 /* first, trim all entries */ 240 for (i = 0; i < mi->nr_blks; i++) { 241 struct numa_memblk *bi = &mi->blk[i]; 242 243 /* make sure all blocks are inside the limits */ 244 bi->start = max(bi->start, low); 245 bi->end = min(bi->end, high); 246 247 /* and there's no empty or non-exist block */ 248 if (bi->start >= bi->end || 249 !memblock_overlaps_region(&memblock.memory, 250 bi->start, bi->end - bi->start)) 251 numa_remove_memblk_from(i--, mi); 252 } 253 254 /* merge neighboring / overlapping entries */ 255 for (i = 0; i < mi->nr_blks; i++) { 256 struct numa_memblk *bi = &mi->blk[i]; 257 258 for (j = i + 1; j < mi->nr_blks; j++) { 259 struct numa_memblk *bj = &mi->blk[j]; 260 u64 start, end; 261 262 /* 263 * See whether there are overlapping blocks. Whine 264 * about but allow overlaps of the same nid. They 265 * will be merged below. 266 */ 267 if (bi->end > bj->start && bi->start < bj->end) { 268 if (bi->nid != bj->nid) { 269 pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", 270 bi->nid, bi->start, bi->end - 1, 271 bj->nid, bj->start, bj->end - 1); 272 return -EINVAL; 273 } 274 pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", 275 bi->nid, bi->start, bi->end - 1, 276 bj->start, bj->end - 1); 277 } 278 279 /* 280 * Join together blocks on the same node, holes 281 * between which don't overlap with memory on other 282 * nodes. 283 */ 284 if (bi->nid != bj->nid) 285 continue; 286 start = min(bi->start, bj->start); 287 end = max(bi->end, bj->end); 288 for (k = 0; k < mi->nr_blks; k++) { 289 struct numa_memblk *bk = &mi->blk[k]; 290 291 if (bi->nid == bk->nid) 292 continue; 293 if (start < bk->end && end > bk->start) 294 break; 295 } 296 if (k < mi->nr_blks) 297 continue; 298 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", 299 bi->nid, bi->start, bi->end - 1, bj->start, 300 bj->end - 1, start, end - 1); 301 bi->start = start; 302 bi->end = end; 303 numa_remove_memblk_from(j--, mi); 304 } 305 } 306 307 /* clear unused ones */ 308 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { 309 mi->blk[i].start = mi->blk[i].end = 0; 310 mi->blk[i].nid = NUMA_NO_NODE; 311 } 312 313 return 0; 314 } 315 316 /* 317 * Set nodes, which have memory in @mi, in *@nodemask. 318 */ 319 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, 320 const struct numa_meminfo *mi) 321 { 322 int i; 323 324 for (i = 0; i < ARRAY_SIZE(mi->blk); i++) 325 if (mi->blk[i].start != mi->blk[i].end && 326 mi->blk[i].nid != NUMA_NO_NODE) 327 node_set(mi->blk[i].nid, *nodemask); 328 } 329 330 /** 331 * numa_reset_distance - Reset NUMA distance table 332 * 333 * The current table is freed. The next numa_set_distance() call will 334 * create a new one. 335 */ 336 void __init numa_reset_distance(void) 337 { 338 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); 339 340 /* numa_distance could be 1LU marking allocation failure, test cnt */ 341 if (numa_distance_cnt) 342 memblock_free(__pa(numa_distance), size); 343 numa_distance_cnt = 0; 344 numa_distance = NULL; /* enable table creation */ 345 } 346 347 static int __init numa_alloc_distance(void) 348 { 349 nodemask_t nodes_parsed; 350 size_t size; 351 int i, j, cnt = 0; 352 u64 phys; 353 354 /* size the new table and allocate it */ 355 nodes_parsed = numa_nodes_parsed; 356 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); 357 358 for_each_node_mask(i, nodes_parsed) 359 cnt = i; 360 cnt++; 361 size = cnt * cnt * sizeof(numa_distance[0]); 362 363 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), 364 size, PAGE_SIZE); 365 if (!phys) { 366 pr_warn("Warning: can't allocate distance table!\n"); 367 /* don't retry until explicitly reset */ 368 numa_distance = (void *)1LU; 369 return -ENOMEM; 370 } 371 memblock_reserve(phys, size); 372 373 numa_distance = __va(phys); 374 numa_distance_cnt = cnt; 375 376 /* fill with the default distances */ 377 for (i = 0; i < cnt; i++) 378 for (j = 0; j < cnt; j++) 379 numa_distance[i * cnt + j] = i == j ? 380 LOCAL_DISTANCE : REMOTE_DISTANCE; 381 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); 382 383 return 0; 384 } 385 386 /** 387 * numa_set_distance - Set NUMA distance from one NUMA to another 388 * @from: the 'from' node to set distance 389 * @to: the 'to' node to set distance 390 * @distance: NUMA distance 391 * 392 * Set the distance from node @from to @to to @distance. If distance table 393 * doesn't exist, one which is large enough to accommodate all the currently 394 * known nodes will be created. 395 * 396 * If such table cannot be allocated, a warning is printed and further 397 * calls are ignored until the distance table is reset with 398 * numa_reset_distance(). 399 * 400 * If @from or @to is higher than the highest known node or lower than zero 401 * at the time of table creation or @distance doesn't make sense, the call 402 * is ignored. 403 * This is to allow simplification of specific NUMA config implementations. 404 */ 405 void __init numa_set_distance(int from, int to, int distance) 406 { 407 if (!numa_distance && numa_alloc_distance() < 0) 408 return; 409 410 if (from >= numa_distance_cnt || to >= numa_distance_cnt || 411 from < 0 || to < 0) { 412 pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", 413 from, to, distance); 414 return; 415 } 416 417 if ((u8)distance != distance || 418 (from == to && distance != LOCAL_DISTANCE)) { 419 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", 420 from, to, distance); 421 return; 422 } 423 424 numa_distance[from * numa_distance_cnt + to] = distance; 425 } 426 427 int __node_distance(int from, int to) 428 { 429 if (from >= numa_distance_cnt || to >= numa_distance_cnt) 430 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; 431 return numa_distance[from * numa_distance_cnt + to]; 432 } 433 EXPORT_SYMBOL(__node_distance); 434 435 /* 436 * Sanity check to catch more bad NUMA configurations (they are amazingly 437 * common). Make sure the nodes cover all memory. 438 */ 439 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) 440 { 441 u64 numaram, e820ram; 442 int i; 443 444 numaram = 0; 445 for (i = 0; i < mi->nr_blks; i++) { 446 u64 s = mi->blk[i].start >> PAGE_SHIFT; 447 u64 e = mi->blk[i].end >> PAGE_SHIFT; 448 numaram += e - s; 449 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); 450 if ((s64)numaram < 0) 451 numaram = 0; 452 } 453 454 e820ram = max_pfn - absent_pages_in_range(0, max_pfn); 455 456 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ 457 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { 458 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", 459 (numaram << PAGE_SHIFT) >> 20, 460 (e820ram << PAGE_SHIFT) >> 20); 461 return false; 462 } 463 return true; 464 } 465 466 /* 467 * Mark all currently memblock-reserved physical memory (which covers the 468 * kernel's own memory ranges) as hot-unswappable. 469 */ 470 static void __init numa_clear_kernel_node_hotplug(void) 471 { 472 nodemask_t reserved_nodemask = NODE_MASK_NONE; 473 struct memblock_region *mb_region; 474 int i; 475 476 /* 477 * We have to do some preprocessing of memblock regions, to 478 * make them suitable for reservation. 479 * 480 * At this time, all memory regions reserved by memblock are 481 * used by the kernel, but those regions are not split up 482 * along node boundaries yet, and don't necessarily have their 483 * node ID set yet either. 484 * 485 * So iterate over all memory known to the x86 architecture, 486 * and use those ranges to set the nid in memblock.reserved. 487 * This will split up the memblock regions along node 488 * boundaries and will set the node IDs as well. 489 */ 490 for (i = 0; i < numa_meminfo.nr_blks; i++) { 491 struct numa_memblk *mb = numa_meminfo.blk + i; 492 int ret; 493 494 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid); 495 WARN_ON_ONCE(ret); 496 } 497 498 /* 499 * Now go over all reserved memblock regions, to construct a 500 * node mask of all kernel reserved memory areas. 501 * 502 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel, 503 * numa_meminfo might not include all memblock.reserved 504 * memory ranges, because quirks such as trim_snb_memory() 505 * reserve specific pages for Sandy Bridge graphics. ] 506 */ 507 for_each_memblock(reserved, mb_region) { 508 if (mb_region->nid != MAX_NUMNODES) 509 node_set(mb_region->nid, reserved_nodemask); 510 } 511 512 /* 513 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory 514 * belonging to the reserved node mask. 515 * 516 * Note that this will include memory regions that reside 517 * on nodes that contain kernel memory - entire nodes 518 * become hot-unpluggable: 519 */ 520 for (i = 0; i < numa_meminfo.nr_blks; i++) { 521 struct numa_memblk *mb = numa_meminfo.blk + i; 522 523 if (!node_isset(mb->nid, reserved_nodemask)) 524 continue; 525 526 memblock_clear_hotplug(mb->start, mb->end - mb->start); 527 } 528 } 529 530 static int __init numa_register_memblks(struct numa_meminfo *mi) 531 { 532 unsigned long uninitialized_var(pfn_align); 533 int i, nid; 534 535 /* Account for nodes with cpus and no memory */ 536 node_possible_map = numa_nodes_parsed; 537 numa_nodemask_from_meminfo(&node_possible_map, mi); 538 if (WARN_ON(nodes_empty(node_possible_map))) 539 return -EINVAL; 540 541 for (i = 0; i < mi->nr_blks; i++) { 542 struct numa_memblk *mb = &mi->blk[i]; 543 memblock_set_node(mb->start, mb->end - mb->start, 544 &memblock.memory, mb->nid); 545 } 546 547 /* 548 * At very early time, the kernel have to use some memory such as 549 * loading the kernel image. We cannot prevent this anyway. So any 550 * node the kernel resides in should be un-hotpluggable. 551 * 552 * And when we come here, alloc node data won't fail. 553 */ 554 numa_clear_kernel_node_hotplug(); 555 556 /* 557 * If sections array is gonna be used for pfn -> nid mapping, check 558 * whether its granularity is fine enough. 559 */ 560 #ifdef NODE_NOT_IN_PAGE_FLAGS 561 pfn_align = node_map_pfn_alignment(); 562 if (pfn_align && pfn_align < PAGES_PER_SECTION) { 563 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", 564 PFN_PHYS(pfn_align) >> 20, 565 PFN_PHYS(PAGES_PER_SECTION) >> 20); 566 return -EINVAL; 567 } 568 #endif 569 if (!numa_meminfo_cover_memory(mi)) 570 return -EINVAL; 571 572 /* Finally register nodes. */ 573 for_each_node_mask(nid, node_possible_map) { 574 u64 start = PFN_PHYS(max_pfn); 575 u64 end = 0; 576 577 for (i = 0; i < mi->nr_blks; i++) { 578 if (nid != mi->blk[i].nid) 579 continue; 580 start = min(mi->blk[i].start, start); 581 end = max(mi->blk[i].end, end); 582 } 583 584 if (start >= end) 585 continue; 586 587 /* 588 * Don't confuse VM with a node that doesn't have the 589 * minimum amount of memory: 590 */ 591 if (end && (end - start) < NODE_MIN_SIZE) 592 continue; 593 594 alloc_node_data(nid); 595 } 596 597 /* Dump memblock with node info and return. */ 598 memblock_dump_all(); 599 return 0; 600 } 601 602 /* 603 * There are unfortunately some poorly designed mainboards around that 604 * only connect memory to a single CPU. This breaks the 1:1 cpu->node 605 * mapping. To avoid this fill in the mapping for all possible CPUs, 606 * as the number of CPUs is not known yet. We round robin the existing 607 * nodes. 608 */ 609 static void __init numa_init_array(void) 610 { 611 int rr, i; 612 613 rr = first_node(node_online_map); 614 for (i = 0; i < nr_cpu_ids; i++) { 615 if (early_cpu_to_node(i) != NUMA_NO_NODE) 616 continue; 617 numa_set_node(i, rr); 618 rr = next_node_in(rr, node_online_map); 619 } 620 } 621 622 static int __init numa_init(int (*init_func)(void)) 623 { 624 int i; 625 int ret; 626 627 for (i = 0; i < MAX_LOCAL_APIC; i++) 628 set_apicid_to_node(i, NUMA_NO_NODE); 629 630 nodes_clear(numa_nodes_parsed); 631 nodes_clear(node_possible_map); 632 nodes_clear(node_online_map); 633 memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 634 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, 635 MAX_NUMNODES)); 636 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, 637 MAX_NUMNODES)); 638 /* In case that parsing SRAT failed. */ 639 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); 640 numa_reset_distance(); 641 642 ret = init_func(); 643 if (ret < 0) 644 return ret; 645 646 /* 647 * We reset memblock back to the top-down direction 648 * here because if we configured ACPI_NUMA, we have 649 * parsed SRAT in init_func(). It is ok to have the 650 * reset here even if we did't configure ACPI_NUMA 651 * or acpi numa init fails and fallbacks to dummy 652 * numa init. 653 */ 654 memblock_set_bottom_up(false); 655 656 ret = numa_cleanup_meminfo(&numa_meminfo); 657 if (ret < 0) 658 return ret; 659 660 numa_emulation(&numa_meminfo, numa_distance_cnt); 661 662 ret = numa_register_memblks(&numa_meminfo); 663 if (ret < 0) 664 return ret; 665 666 for (i = 0; i < nr_cpu_ids; i++) { 667 int nid = early_cpu_to_node(i); 668 669 if (nid == NUMA_NO_NODE) 670 continue; 671 if (!node_online(nid)) 672 numa_clear_node(i); 673 } 674 numa_init_array(); 675 676 return 0; 677 } 678 679 /** 680 * dummy_numa_init - Fallback dummy NUMA init 681 * 682 * Used if there's no underlying NUMA architecture, NUMA initialization 683 * fails, or NUMA is disabled on the command line. 684 * 685 * Must online at least one node and add memory blocks that cover all 686 * allowed memory. This function must not fail. 687 */ 688 static int __init dummy_numa_init(void) 689 { 690 printk(KERN_INFO "%s\n", 691 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 692 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 693 0LLU, PFN_PHYS(max_pfn) - 1); 694 695 node_set(0, numa_nodes_parsed); 696 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 697 698 return 0; 699 } 700 701 /** 702 * x86_numa_init - Initialize NUMA 703 * 704 * Try each configured NUMA initialization method until one succeeds. The 705 * last fallback is dummy single node config encomapssing whole memory and 706 * never fails. 707 */ 708 void __init x86_numa_init(void) 709 { 710 if (!numa_off) { 711 #ifdef CONFIG_ACPI_NUMA 712 if (!numa_init(x86_acpi_numa_init)) 713 return; 714 #endif 715 #ifdef CONFIG_AMD_NUMA 716 if (!numa_init(amd_numa_init)) 717 return; 718 #endif 719 } 720 721 numa_init(dummy_numa_init); 722 } 723 724 static void __init init_memory_less_node(int nid) 725 { 726 unsigned long zones_size[MAX_NR_ZONES] = {0}; 727 unsigned long zholes_size[MAX_NR_ZONES] = {0}; 728 729 /* Allocate and initialize node data. Memory-less node is now online.*/ 730 alloc_node_data(nid); 731 free_area_init_node(nid, zones_size, 0, zholes_size); 732 733 /* 734 * All zonelists will be built later in start_kernel() after per cpu 735 * areas are initialized. 736 */ 737 } 738 739 /* 740 * Setup early cpu_to_node. 741 * 742 * Populate cpu_to_node[] only if x86_cpu_to_apicid[], 743 * and apicid_to_node[] tables have valid entries for a CPU. 744 * This means we skip cpu_to_node[] initialisation for NUMA 745 * emulation and faking node case (when running a kernel compiled 746 * for NUMA on a non NUMA box), which is OK as cpu_to_node[] 747 * is already initialized in a round robin manner at numa_init_array, 748 * prior to this call, and this initialization is good enough 749 * for the fake NUMA cases. 750 * 751 * Called before the per_cpu areas are setup. 752 */ 753 void __init init_cpu_to_node(void) 754 { 755 int cpu; 756 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 757 758 BUG_ON(cpu_to_apicid == NULL); 759 760 for_each_possible_cpu(cpu) { 761 int node = numa_cpu_node(cpu); 762 763 if (node == NUMA_NO_NODE) 764 continue; 765 766 if (!node_online(node)) 767 init_memory_less_node(node); 768 769 numa_set_node(cpu, node); 770 } 771 } 772 773 #ifndef CONFIG_DEBUG_PER_CPU_MAPS 774 775 # ifndef CONFIG_NUMA_EMU 776 void numa_add_cpu(int cpu) 777 { 778 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 779 } 780 781 void numa_remove_cpu(int cpu) 782 { 783 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 784 } 785 # endif /* !CONFIG_NUMA_EMU */ 786 787 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 788 789 int __cpu_to_node(int cpu) 790 { 791 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 792 printk(KERN_WARNING 793 "cpu_to_node(%d): usage too early!\n", cpu); 794 dump_stack(); 795 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 796 } 797 return per_cpu(x86_cpu_to_node_map, cpu); 798 } 799 EXPORT_SYMBOL(__cpu_to_node); 800 801 /* 802 * Same function as cpu_to_node() but used if called before the 803 * per_cpu areas are setup. 804 */ 805 int early_cpu_to_node(int cpu) 806 { 807 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 808 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 809 810 if (!cpu_possible(cpu)) { 811 printk(KERN_WARNING 812 "early_cpu_to_node(%d): no per_cpu area!\n", cpu); 813 dump_stack(); 814 return NUMA_NO_NODE; 815 } 816 return per_cpu(x86_cpu_to_node_map, cpu); 817 } 818 819 void debug_cpumask_set_cpu(int cpu, int node, bool enable) 820 { 821 struct cpumask *mask; 822 823 if (node == NUMA_NO_NODE) { 824 /* early_cpu_to_node() already emits a warning and trace */ 825 return; 826 } 827 mask = node_to_cpumask_map[node]; 828 if (!mask) { 829 pr_err("node_to_cpumask_map[%i] NULL\n", node); 830 dump_stack(); 831 return; 832 } 833 834 if (enable) 835 cpumask_set_cpu(cpu, mask); 836 else 837 cpumask_clear_cpu(cpu, mask); 838 839 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", 840 enable ? "numa_add_cpu" : "numa_remove_cpu", 841 cpu, node, cpumask_pr_args(mask)); 842 return; 843 } 844 845 # ifndef CONFIG_NUMA_EMU 846 static void numa_set_cpumask(int cpu, bool enable) 847 { 848 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 849 } 850 851 void numa_add_cpu(int cpu) 852 { 853 numa_set_cpumask(cpu, true); 854 } 855 856 void numa_remove_cpu(int cpu) 857 { 858 numa_set_cpumask(cpu, false); 859 } 860 # endif /* !CONFIG_NUMA_EMU */ 861 862 /* 863 * Returns a pointer to the bitmask of CPUs on Node 'node'. 864 */ 865 const struct cpumask *cpumask_of_node(int node) 866 { 867 if (node >= nr_node_ids) { 868 printk(KERN_WARNING 869 "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 870 node, nr_node_ids); 871 dump_stack(); 872 return cpu_none_mask; 873 } 874 if (node_to_cpumask_map[node] == NULL) { 875 printk(KERN_WARNING 876 "cpumask_of_node(%d): no node_to_cpumask_map!\n", 877 node); 878 dump_stack(); 879 return cpu_online_mask; 880 } 881 return node_to_cpumask_map[node]; 882 } 883 EXPORT_SYMBOL(cpumask_of_node); 884 885 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 886 887 #ifdef CONFIG_MEMORY_HOTPLUG 888 int memory_add_physaddr_to_nid(u64 start) 889 { 890 struct numa_meminfo *mi = &numa_meminfo; 891 int nid = mi->blk[0].nid; 892 int i; 893 894 for (i = 0; i < mi->nr_blks; i++) 895 if (mi->blk[i].start <= start && mi->blk[i].end > start) 896 nid = mi->blk[i].nid; 897 return nid; 898 } 899 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 900 #endif 901