1 /* Common code for 32 and 64-bit NUMA */ 2 #include <linux/kernel.h> 3 #include <linux/mm.h> 4 #include <linux/string.h> 5 #include <linux/init.h> 6 #include <linux/bootmem.h> 7 #include <linux/memblock.h> 8 #include <linux/mmzone.h> 9 #include <linux/ctype.h> 10 #include <linux/module.h> 11 #include <linux/nodemask.h> 12 #include <linux/sched.h> 13 #include <linux/topology.h> 14 15 #include <asm/e820.h> 16 #include <asm/proto.h> 17 #include <asm/dma.h> 18 #include <asm/acpi.h> 19 #include <asm/amd_nb.h> 20 21 #include "numa_internal.h" 22 23 int __initdata numa_off; 24 nodemask_t numa_nodes_parsed __initdata; 25 26 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 27 EXPORT_SYMBOL(node_data); 28 29 static struct numa_meminfo numa_meminfo 30 #ifndef CONFIG_MEMORY_HOTPLUG 31 __initdata 32 #endif 33 ; 34 35 static int numa_distance_cnt; 36 static u8 *numa_distance; 37 38 static __init int numa_setup(char *opt) 39 { 40 if (!opt) 41 return -EINVAL; 42 if (!strncmp(opt, "off", 3)) 43 numa_off = 1; 44 #ifdef CONFIG_NUMA_EMU 45 if (!strncmp(opt, "fake=", 5)) 46 numa_emu_cmdline(opt + 5); 47 #endif 48 #ifdef CONFIG_ACPI_NUMA 49 if (!strncmp(opt, "noacpi", 6)) 50 acpi_numa = -1; 51 #endif 52 return 0; 53 } 54 early_param("numa", numa_setup); 55 56 /* 57 * apicid, cpu, node mappings 58 */ 59 s16 __apicid_to_node[MAX_LOCAL_APIC] = { 60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 61 }; 62 63 int numa_cpu_node(int cpu) 64 { 65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 66 67 if (apicid != BAD_APICID) 68 return __apicid_to_node[apicid]; 69 return NUMA_NO_NODE; 70 } 71 72 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 73 EXPORT_SYMBOL(node_to_cpumask_map); 74 75 /* 76 * Map cpu index to node index 77 */ 78 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 79 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 80 81 void numa_set_node(int cpu, int node) 82 { 83 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 84 85 /* early setting, no percpu area yet */ 86 if (cpu_to_node_map) { 87 cpu_to_node_map[cpu] = node; 88 return; 89 } 90 91 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 92 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { 93 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); 94 dump_stack(); 95 return; 96 } 97 #endif 98 per_cpu(x86_cpu_to_node_map, cpu) = node; 99 100 set_cpu_numa_node(cpu, node); 101 } 102 103 void numa_clear_node(int cpu) 104 { 105 numa_set_node(cpu, NUMA_NO_NODE); 106 } 107 108 /* 109 * Allocate node_to_cpumask_map based on number of available nodes 110 * Requires node_possible_map to be valid. 111 * 112 * Note: cpumask_of_node() is not valid until after this is done. 113 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) 114 */ 115 void __init setup_node_to_cpumask_map(void) 116 { 117 unsigned int node; 118 119 /* setup nr_node_ids if not done yet */ 120 if (nr_node_ids == MAX_NUMNODES) 121 setup_nr_node_ids(); 122 123 /* allocate the map */ 124 for (node = 0; node < nr_node_ids; node++) 125 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 126 127 /* cpumask_of_node() will now work */ 128 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); 129 } 130 131 static int __init numa_add_memblk_to(int nid, u64 start, u64 end, 132 struct numa_meminfo *mi) 133 { 134 /* ignore zero length blks */ 135 if (start == end) 136 return 0; 137 138 /* whine about and ignore invalid blks */ 139 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { 140 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", 141 nid, start, end - 1); 142 return 0; 143 } 144 145 if (mi->nr_blks >= NR_NODE_MEMBLKS) { 146 pr_err("NUMA: too many memblk ranges\n"); 147 return -EINVAL; 148 } 149 150 mi->blk[mi->nr_blks].start = start; 151 mi->blk[mi->nr_blks].end = end; 152 mi->blk[mi->nr_blks].nid = nid; 153 mi->nr_blks++; 154 return 0; 155 } 156 157 /** 158 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo 159 * @idx: Index of memblk to remove 160 * @mi: numa_meminfo to remove memblk from 161 * 162 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and 163 * decrementing @mi->nr_blks. 164 */ 165 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) 166 { 167 mi->nr_blks--; 168 memmove(&mi->blk[idx], &mi->blk[idx + 1], 169 (mi->nr_blks - idx) * sizeof(mi->blk[0])); 170 } 171 172 /** 173 * numa_add_memblk - Add one numa_memblk to numa_meminfo 174 * @nid: NUMA node ID of the new memblk 175 * @start: Start address of the new memblk 176 * @end: End address of the new memblk 177 * 178 * Add a new memblk to the default numa_meminfo. 179 * 180 * RETURNS: 181 * 0 on success, -errno on failure. 182 */ 183 int __init numa_add_memblk(int nid, u64 start, u64 end) 184 { 185 return numa_add_memblk_to(nid, start, end, &numa_meminfo); 186 } 187 188 /* Allocate NODE_DATA for a node on the local memory */ 189 static void __init alloc_node_data(int nid) 190 { 191 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 192 u64 nd_pa; 193 void *nd; 194 int tnid; 195 196 /* 197 * Allocate node data. Try node-local memory and then any node. 198 * Never allocate in DMA zone. 199 */ 200 nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); 201 if (!nd_pa) { 202 nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, 203 MEMBLOCK_ALLOC_ACCESSIBLE); 204 if (!nd_pa) { 205 pr_err("Cannot find %zu bytes in node %d\n", 206 nd_size, nid); 207 return; 208 } 209 } 210 nd = __va(nd_pa); 211 212 /* report and initialize */ 213 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, 214 nd_pa, nd_pa + nd_size - 1); 215 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 216 if (tnid != nid) 217 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); 218 219 node_data[nid] = nd; 220 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 221 222 node_set_online(nid); 223 } 224 225 /** 226 * numa_cleanup_meminfo - Cleanup a numa_meminfo 227 * @mi: numa_meminfo to clean up 228 * 229 * Sanitize @mi by merging and removing unncessary memblks. Also check for 230 * conflicts and clear unused memblks. 231 * 232 * RETURNS: 233 * 0 on success, -errno on failure. 234 */ 235 int __init numa_cleanup_meminfo(struct numa_meminfo *mi) 236 { 237 const u64 low = 0; 238 const u64 high = PFN_PHYS(max_pfn); 239 int i, j, k; 240 241 /* first, trim all entries */ 242 for (i = 0; i < mi->nr_blks; i++) { 243 struct numa_memblk *bi = &mi->blk[i]; 244 245 /* make sure all blocks are inside the limits */ 246 bi->start = max(bi->start, low); 247 bi->end = min(bi->end, high); 248 249 /* and there's no empty or non-exist block */ 250 if (bi->start >= bi->end || 251 !memblock_overlaps_region(&memblock.memory, 252 bi->start, bi->end - bi->start)) 253 numa_remove_memblk_from(i--, mi); 254 } 255 256 /* merge neighboring / overlapping entries */ 257 for (i = 0; i < mi->nr_blks; i++) { 258 struct numa_memblk *bi = &mi->blk[i]; 259 260 for (j = i + 1; j < mi->nr_blks; j++) { 261 struct numa_memblk *bj = &mi->blk[j]; 262 u64 start, end; 263 264 /* 265 * See whether there are overlapping blocks. Whine 266 * about but allow overlaps of the same nid. They 267 * will be merged below. 268 */ 269 if (bi->end > bj->start && bi->start < bj->end) { 270 if (bi->nid != bj->nid) { 271 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", 272 bi->nid, bi->start, bi->end - 1, 273 bj->nid, bj->start, bj->end - 1); 274 return -EINVAL; 275 } 276 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", 277 bi->nid, bi->start, bi->end - 1, 278 bj->start, bj->end - 1); 279 } 280 281 /* 282 * Join together blocks on the same node, holes 283 * between which don't overlap with memory on other 284 * nodes. 285 */ 286 if (bi->nid != bj->nid) 287 continue; 288 start = min(bi->start, bj->start); 289 end = max(bi->end, bj->end); 290 for (k = 0; k < mi->nr_blks; k++) { 291 struct numa_memblk *bk = &mi->blk[k]; 292 293 if (bi->nid == bk->nid) 294 continue; 295 if (start < bk->end && end > bk->start) 296 break; 297 } 298 if (k < mi->nr_blks) 299 continue; 300 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", 301 bi->nid, bi->start, bi->end - 1, bj->start, 302 bj->end - 1, start, end - 1); 303 bi->start = start; 304 bi->end = end; 305 numa_remove_memblk_from(j--, mi); 306 } 307 } 308 309 /* clear unused ones */ 310 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { 311 mi->blk[i].start = mi->blk[i].end = 0; 312 mi->blk[i].nid = NUMA_NO_NODE; 313 } 314 315 return 0; 316 } 317 318 /* 319 * Set nodes, which have memory in @mi, in *@nodemask. 320 */ 321 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, 322 const struct numa_meminfo *mi) 323 { 324 int i; 325 326 for (i = 0; i < ARRAY_SIZE(mi->blk); i++) 327 if (mi->blk[i].start != mi->blk[i].end && 328 mi->blk[i].nid != NUMA_NO_NODE) 329 node_set(mi->blk[i].nid, *nodemask); 330 } 331 332 /** 333 * numa_reset_distance - Reset NUMA distance table 334 * 335 * The current table is freed. The next numa_set_distance() call will 336 * create a new one. 337 */ 338 void __init numa_reset_distance(void) 339 { 340 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); 341 342 /* numa_distance could be 1LU marking allocation failure, test cnt */ 343 if (numa_distance_cnt) 344 memblock_free(__pa(numa_distance), size); 345 numa_distance_cnt = 0; 346 numa_distance = NULL; /* enable table creation */ 347 } 348 349 static int __init numa_alloc_distance(void) 350 { 351 nodemask_t nodes_parsed; 352 size_t size; 353 int i, j, cnt = 0; 354 u64 phys; 355 356 /* size the new table and allocate it */ 357 nodes_parsed = numa_nodes_parsed; 358 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); 359 360 for_each_node_mask(i, nodes_parsed) 361 cnt = i; 362 cnt++; 363 size = cnt * cnt * sizeof(numa_distance[0]); 364 365 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), 366 size, PAGE_SIZE); 367 if (!phys) { 368 pr_warning("NUMA: Warning: can't allocate distance table!\n"); 369 /* don't retry until explicitly reset */ 370 numa_distance = (void *)1LU; 371 return -ENOMEM; 372 } 373 memblock_reserve(phys, size); 374 375 numa_distance = __va(phys); 376 numa_distance_cnt = cnt; 377 378 /* fill with the default distances */ 379 for (i = 0; i < cnt; i++) 380 for (j = 0; j < cnt; j++) 381 numa_distance[i * cnt + j] = i == j ? 382 LOCAL_DISTANCE : REMOTE_DISTANCE; 383 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); 384 385 return 0; 386 } 387 388 /** 389 * numa_set_distance - Set NUMA distance from one NUMA to another 390 * @from: the 'from' node to set distance 391 * @to: the 'to' node to set distance 392 * @distance: NUMA distance 393 * 394 * Set the distance from node @from to @to to @distance. If distance table 395 * doesn't exist, one which is large enough to accommodate all the currently 396 * known nodes will be created. 397 * 398 * If such table cannot be allocated, a warning is printed and further 399 * calls are ignored until the distance table is reset with 400 * numa_reset_distance(). 401 * 402 * If @from or @to is higher than the highest known node or lower than zero 403 * at the time of table creation or @distance doesn't make sense, the call 404 * is ignored. 405 * This is to allow simplification of specific NUMA config implementations. 406 */ 407 void __init numa_set_distance(int from, int to, int distance) 408 { 409 if (!numa_distance && numa_alloc_distance() < 0) 410 return; 411 412 if (from >= numa_distance_cnt || to >= numa_distance_cnt || 413 from < 0 || to < 0) { 414 pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", 415 from, to, distance); 416 return; 417 } 418 419 if ((u8)distance != distance || 420 (from == to && distance != LOCAL_DISTANCE)) { 421 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", 422 from, to, distance); 423 return; 424 } 425 426 numa_distance[from * numa_distance_cnt + to] = distance; 427 } 428 429 int __node_distance(int from, int to) 430 { 431 if (from >= numa_distance_cnt || to >= numa_distance_cnt) 432 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; 433 return numa_distance[from * numa_distance_cnt + to]; 434 } 435 EXPORT_SYMBOL(__node_distance); 436 437 /* 438 * Sanity check to catch more bad NUMA configurations (they are amazingly 439 * common). Make sure the nodes cover all memory. 440 */ 441 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) 442 { 443 u64 numaram, e820ram; 444 int i; 445 446 numaram = 0; 447 for (i = 0; i < mi->nr_blks; i++) { 448 u64 s = mi->blk[i].start >> PAGE_SHIFT; 449 u64 e = mi->blk[i].end >> PAGE_SHIFT; 450 numaram += e - s; 451 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); 452 if ((s64)numaram < 0) 453 numaram = 0; 454 } 455 456 e820ram = max_pfn - absent_pages_in_range(0, max_pfn); 457 458 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ 459 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { 460 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", 461 (numaram << PAGE_SHIFT) >> 20, 462 (e820ram << PAGE_SHIFT) >> 20); 463 return false; 464 } 465 return true; 466 } 467 468 /* 469 * Mark all currently memblock-reserved physical memory (which covers the 470 * kernel's own memory ranges) as hot-unswappable. 471 */ 472 static void __init numa_clear_kernel_node_hotplug(void) 473 { 474 nodemask_t reserved_nodemask = NODE_MASK_NONE; 475 struct memblock_region *mb_region; 476 int i; 477 478 /* 479 * We have to do some preprocessing of memblock regions, to 480 * make them suitable for reservation. 481 * 482 * At this time, all memory regions reserved by memblock are 483 * used by the kernel, but those regions are not split up 484 * along node boundaries yet, and don't necessarily have their 485 * node ID set yet either. 486 * 487 * So iterate over all memory known to the x86 architecture, 488 * and use those ranges to set the nid in memblock.reserved. 489 * This will split up the memblock regions along node 490 * boundaries and will set the node IDs as well. 491 */ 492 for (i = 0; i < numa_meminfo.nr_blks; i++) { 493 struct numa_memblk *mb = numa_meminfo.blk + i; 494 int ret; 495 496 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid); 497 WARN_ON_ONCE(ret); 498 } 499 500 /* 501 * Now go over all reserved memblock regions, to construct a 502 * node mask of all kernel reserved memory areas. 503 * 504 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel, 505 * numa_meminfo might not include all memblock.reserved 506 * memory ranges, because quirks such as trim_snb_memory() 507 * reserve specific pages for Sandy Bridge graphics. ] 508 */ 509 for_each_memblock(reserved, mb_region) { 510 if (mb_region->nid != MAX_NUMNODES) 511 node_set(mb_region->nid, reserved_nodemask); 512 } 513 514 /* 515 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory 516 * belonging to the reserved node mask. 517 * 518 * Note that this will include memory regions that reside 519 * on nodes that contain kernel memory - entire nodes 520 * become hot-unpluggable: 521 */ 522 for (i = 0; i < numa_meminfo.nr_blks; i++) { 523 struct numa_memblk *mb = numa_meminfo.blk + i; 524 525 if (!node_isset(mb->nid, reserved_nodemask)) 526 continue; 527 528 memblock_clear_hotplug(mb->start, mb->end - mb->start); 529 } 530 } 531 532 static int __init numa_register_memblks(struct numa_meminfo *mi) 533 { 534 unsigned long uninitialized_var(pfn_align); 535 int i, nid; 536 537 /* Account for nodes with cpus and no memory */ 538 node_possible_map = numa_nodes_parsed; 539 numa_nodemask_from_meminfo(&node_possible_map, mi); 540 if (WARN_ON(nodes_empty(node_possible_map))) 541 return -EINVAL; 542 543 for (i = 0; i < mi->nr_blks; i++) { 544 struct numa_memblk *mb = &mi->blk[i]; 545 memblock_set_node(mb->start, mb->end - mb->start, 546 &memblock.memory, mb->nid); 547 } 548 549 /* 550 * At very early time, the kernel have to use some memory such as 551 * loading the kernel image. We cannot prevent this anyway. So any 552 * node the kernel resides in should be un-hotpluggable. 553 * 554 * And when we come here, alloc node data won't fail. 555 */ 556 numa_clear_kernel_node_hotplug(); 557 558 /* 559 * If sections array is gonna be used for pfn -> nid mapping, check 560 * whether its granularity is fine enough. 561 */ 562 #ifdef NODE_NOT_IN_PAGE_FLAGS 563 pfn_align = node_map_pfn_alignment(); 564 if (pfn_align && pfn_align < PAGES_PER_SECTION) { 565 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", 566 PFN_PHYS(pfn_align) >> 20, 567 PFN_PHYS(PAGES_PER_SECTION) >> 20); 568 return -EINVAL; 569 } 570 #endif 571 if (!numa_meminfo_cover_memory(mi)) 572 return -EINVAL; 573 574 /* Finally register nodes. */ 575 for_each_node_mask(nid, node_possible_map) { 576 u64 start = PFN_PHYS(max_pfn); 577 u64 end = 0; 578 579 for (i = 0; i < mi->nr_blks; i++) { 580 if (nid != mi->blk[i].nid) 581 continue; 582 start = min(mi->blk[i].start, start); 583 end = max(mi->blk[i].end, end); 584 } 585 586 if (start >= end) 587 continue; 588 589 /* 590 * Don't confuse VM with a node that doesn't have the 591 * minimum amount of memory: 592 */ 593 if (end && (end - start) < NODE_MIN_SIZE) 594 continue; 595 596 alloc_node_data(nid); 597 } 598 599 /* Dump memblock with node info and return. */ 600 memblock_dump_all(); 601 return 0; 602 } 603 604 /* 605 * There are unfortunately some poorly designed mainboards around that 606 * only connect memory to a single CPU. This breaks the 1:1 cpu->node 607 * mapping. To avoid this fill in the mapping for all possible CPUs, 608 * as the number of CPUs is not known yet. We round robin the existing 609 * nodes. 610 */ 611 static void __init numa_init_array(void) 612 { 613 int rr, i; 614 615 rr = first_node(node_online_map); 616 for (i = 0; i < nr_cpu_ids; i++) { 617 if (early_cpu_to_node(i) != NUMA_NO_NODE) 618 continue; 619 numa_set_node(i, rr); 620 rr = next_node(rr, node_online_map); 621 if (rr == MAX_NUMNODES) 622 rr = first_node(node_online_map); 623 } 624 } 625 626 static int __init numa_init(int (*init_func)(void)) 627 { 628 int i; 629 int ret; 630 631 for (i = 0; i < MAX_LOCAL_APIC; i++) 632 set_apicid_to_node(i, NUMA_NO_NODE); 633 634 nodes_clear(numa_nodes_parsed); 635 nodes_clear(node_possible_map); 636 nodes_clear(node_online_map); 637 memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 638 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, 639 MAX_NUMNODES)); 640 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, 641 MAX_NUMNODES)); 642 /* In case that parsing SRAT failed. */ 643 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); 644 numa_reset_distance(); 645 646 ret = init_func(); 647 if (ret < 0) 648 return ret; 649 650 /* 651 * We reset memblock back to the top-down direction 652 * here because if we configured ACPI_NUMA, we have 653 * parsed SRAT in init_func(). It is ok to have the 654 * reset here even if we did't configure ACPI_NUMA 655 * or acpi numa init fails and fallbacks to dummy 656 * numa init. 657 */ 658 memblock_set_bottom_up(false); 659 660 ret = numa_cleanup_meminfo(&numa_meminfo); 661 if (ret < 0) 662 return ret; 663 664 numa_emulation(&numa_meminfo, numa_distance_cnt); 665 666 ret = numa_register_memblks(&numa_meminfo); 667 if (ret < 0) 668 return ret; 669 670 for (i = 0; i < nr_cpu_ids; i++) { 671 int nid = early_cpu_to_node(i); 672 673 if (nid == NUMA_NO_NODE) 674 continue; 675 if (!node_online(nid)) 676 numa_clear_node(i); 677 } 678 numa_init_array(); 679 680 return 0; 681 } 682 683 /** 684 * dummy_numa_init - Fallback dummy NUMA init 685 * 686 * Used if there's no underlying NUMA architecture, NUMA initialization 687 * fails, or NUMA is disabled on the command line. 688 * 689 * Must online at least one node and add memory blocks that cover all 690 * allowed memory. This function must not fail. 691 */ 692 static int __init dummy_numa_init(void) 693 { 694 printk(KERN_INFO "%s\n", 695 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 696 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 697 0LLU, PFN_PHYS(max_pfn) - 1); 698 699 node_set(0, numa_nodes_parsed); 700 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 701 702 return 0; 703 } 704 705 /** 706 * x86_numa_init - Initialize NUMA 707 * 708 * Try each configured NUMA initialization method until one succeeds. The 709 * last fallback is dummy single node config encomapssing whole memory and 710 * never fails. 711 */ 712 void __init x86_numa_init(void) 713 { 714 if (!numa_off) { 715 #ifdef CONFIG_ACPI_NUMA 716 if (!numa_init(x86_acpi_numa_init)) 717 return; 718 #endif 719 #ifdef CONFIG_AMD_NUMA 720 if (!numa_init(amd_numa_init)) 721 return; 722 #endif 723 } 724 725 numa_init(dummy_numa_init); 726 } 727 728 static __init int find_near_online_node(int node) 729 { 730 int n, val; 731 int min_val = INT_MAX; 732 int best_node = -1; 733 734 for_each_online_node(n) { 735 val = node_distance(node, n); 736 737 if (val < min_val) { 738 min_val = val; 739 best_node = n; 740 } 741 } 742 743 return best_node; 744 } 745 746 /* 747 * Setup early cpu_to_node. 748 * 749 * Populate cpu_to_node[] only if x86_cpu_to_apicid[], 750 * and apicid_to_node[] tables have valid entries for a CPU. 751 * This means we skip cpu_to_node[] initialisation for NUMA 752 * emulation and faking node case (when running a kernel compiled 753 * for NUMA on a non NUMA box), which is OK as cpu_to_node[] 754 * is already initialized in a round robin manner at numa_init_array, 755 * prior to this call, and this initialization is good enough 756 * for the fake NUMA cases. 757 * 758 * Called before the per_cpu areas are setup. 759 */ 760 void __init init_cpu_to_node(void) 761 { 762 int cpu; 763 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 764 765 BUG_ON(cpu_to_apicid == NULL); 766 767 for_each_possible_cpu(cpu) { 768 int node = numa_cpu_node(cpu); 769 770 if (node == NUMA_NO_NODE) 771 continue; 772 if (!node_online(node)) 773 node = find_near_online_node(node); 774 numa_set_node(cpu, node); 775 } 776 } 777 778 #ifndef CONFIG_DEBUG_PER_CPU_MAPS 779 780 # ifndef CONFIG_NUMA_EMU 781 void numa_add_cpu(int cpu) 782 { 783 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 784 } 785 786 void numa_remove_cpu(int cpu) 787 { 788 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 789 } 790 # endif /* !CONFIG_NUMA_EMU */ 791 792 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 793 794 int __cpu_to_node(int cpu) 795 { 796 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 797 printk(KERN_WARNING 798 "cpu_to_node(%d): usage too early!\n", cpu); 799 dump_stack(); 800 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 801 } 802 return per_cpu(x86_cpu_to_node_map, cpu); 803 } 804 EXPORT_SYMBOL(__cpu_to_node); 805 806 /* 807 * Same function as cpu_to_node() but used if called before the 808 * per_cpu areas are setup. 809 */ 810 int early_cpu_to_node(int cpu) 811 { 812 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 813 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 814 815 if (!cpu_possible(cpu)) { 816 printk(KERN_WARNING 817 "early_cpu_to_node(%d): no per_cpu area!\n", cpu); 818 dump_stack(); 819 return NUMA_NO_NODE; 820 } 821 return per_cpu(x86_cpu_to_node_map, cpu); 822 } 823 824 void debug_cpumask_set_cpu(int cpu, int node, bool enable) 825 { 826 struct cpumask *mask; 827 828 if (node == NUMA_NO_NODE) { 829 /* early_cpu_to_node() already emits a warning and trace */ 830 return; 831 } 832 mask = node_to_cpumask_map[node]; 833 if (!mask) { 834 pr_err("node_to_cpumask_map[%i] NULL\n", node); 835 dump_stack(); 836 return; 837 } 838 839 if (enable) 840 cpumask_set_cpu(cpu, mask); 841 else 842 cpumask_clear_cpu(cpu, mask); 843 844 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", 845 enable ? "numa_add_cpu" : "numa_remove_cpu", 846 cpu, node, cpumask_pr_args(mask)); 847 return; 848 } 849 850 # ifndef CONFIG_NUMA_EMU 851 static void numa_set_cpumask(int cpu, bool enable) 852 { 853 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 854 } 855 856 void numa_add_cpu(int cpu) 857 { 858 numa_set_cpumask(cpu, true); 859 } 860 861 void numa_remove_cpu(int cpu) 862 { 863 numa_set_cpumask(cpu, false); 864 } 865 # endif /* !CONFIG_NUMA_EMU */ 866 867 /* 868 * Returns a pointer to the bitmask of CPUs on Node 'node'. 869 */ 870 const struct cpumask *cpumask_of_node(int node) 871 { 872 if (node >= nr_node_ids) { 873 printk(KERN_WARNING 874 "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 875 node, nr_node_ids); 876 dump_stack(); 877 return cpu_none_mask; 878 } 879 if (node_to_cpumask_map[node] == NULL) { 880 printk(KERN_WARNING 881 "cpumask_of_node(%d): no node_to_cpumask_map!\n", 882 node); 883 dump_stack(); 884 return cpu_online_mask; 885 } 886 return node_to_cpumask_map[node]; 887 } 888 EXPORT_SYMBOL(cpumask_of_node); 889 890 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 891 892 #ifdef CONFIG_MEMORY_HOTPLUG 893 int memory_add_physaddr_to_nid(u64 start) 894 { 895 struct numa_meminfo *mi = &numa_meminfo; 896 int nid = mi->blk[0].nid; 897 int i; 898 899 for (i = 0; i < mi->nr_blks; i++) 900 if (mi->blk[i].start <= start && mi->blk[i].end > start) 901 nid = mi->blk[i].nid; 902 return nid; 903 } 904 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 905 #endif 906