1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Common code for 32 and 64-bit NUMA */ 3 #include <linux/acpi.h> 4 #include <linux/kernel.h> 5 #include <linux/mm.h> 6 #include <linux/string.h> 7 #include <linux/init.h> 8 #include <linux/memblock.h> 9 #include <linux/mmzone.h> 10 #include <linux/ctype.h> 11 #include <linux/nodemask.h> 12 #include <linux/sched.h> 13 #include <linux/topology.h> 14 #include <linux/sort.h> 15 16 #include <asm/e820/api.h> 17 #include <asm/proto.h> 18 #include <asm/dma.h> 19 #include <asm/amd_nb.h> 20 21 #include "numa_internal.h" 22 23 int numa_off; 24 nodemask_t numa_nodes_parsed __initdata; 25 26 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 27 EXPORT_SYMBOL(node_data); 28 29 static struct numa_meminfo numa_meminfo __initdata_or_meminfo; 30 static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo; 31 32 static int numa_distance_cnt; 33 static u8 *numa_distance; 34 35 static __init int numa_setup(char *opt) 36 { 37 if (!opt) 38 return -EINVAL; 39 if (!strncmp(opt, "off", 3)) 40 numa_off = 1; 41 if (!strncmp(opt, "fake=", 5)) 42 return numa_emu_cmdline(opt + 5); 43 if (!strncmp(opt, "noacpi", 6)) 44 disable_srat(); 45 if (!strncmp(opt, "nohmat", 6)) 46 disable_hmat(); 47 return 0; 48 } 49 early_param("numa", numa_setup); 50 51 /* 52 * apicid, cpu, node mappings 53 */ 54 s16 __apicid_to_node[MAX_LOCAL_APIC] = { 55 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 56 }; 57 58 int numa_cpu_node(int cpu) 59 { 60 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 61 62 if (apicid != BAD_APICID) 63 return __apicid_to_node[apicid]; 64 return NUMA_NO_NODE; 65 } 66 67 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 68 EXPORT_SYMBOL(node_to_cpumask_map); 69 70 /* 71 * Map cpu index to node index 72 */ 73 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 74 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 75 76 void numa_set_node(int cpu, int node) 77 { 78 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 79 80 /* early setting, no percpu area yet */ 81 if (cpu_to_node_map) { 82 cpu_to_node_map[cpu] = node; 83 return; 84 } 85 86 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 87 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { 88 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); 89 dump_stack(); 90 return; 91 } 92 #endif 93 per_cpu(x86_cpu_to_node_map, cpu) = node; 94 95 set_cpu_numa_node(cpu, node); 96 } 97 98 void numa_clear_node(int cpu) 99 { 100 numa_set_node(cpu, NUMA_NO_NODE); 101 } 102 103 /* 104 * Allocate node_to_cpumask_map based on number of available nodes 105 * Requires node_possible_map to be valid. 106 * 107 * Note: cpumask_of_node() is not valid until after this is done. 108 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) 109 */ 110 void __init setup_node_to_cpumask_map(void) 111 { 112 unsigned int node; 113 114 /* setup nr_node_ids if not done yet */ 115 if (nr_node_ids == MAX_NUMNODES) 116 setup_nr_node_ids(); 117 118 /* allocate the map */ 119 for (node = 0; node < nr_node_ids; node++) 120 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 121 122 /* cpumask_of_node() will now work */ 123 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); 124 } 125 126 static int __init numa_add_memblk_to(int nid, u64 start, u64 end, 127 struct numa_meminfo *mi) 128 { 129 /* ignore zero length blks */ 130 if (start == end) 131 return 0; 132 133 /* whine about and ignore invalid blks */ 134 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { 135 pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", 136 nid, start, end - 1); 137 return 0; 138 } 139 140 if (mi->nr_blks >= NR_NODE_MEMBLKS) { 141 pr_err("too many memblk ranges\n"); 142 return -EINVAL; 143 } 144 145 mi->blk[mi->nr_blks].start = start; 146 mi->blk[mi->nr_blks].end = end; 147 mi->blk[mi->nr_blks].nid = nid; 148 mi->nr_blks++; 149 return 0; 150 } 151 152 /** 153 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo 154 * @idx: Index of memblk to remove 155 * @mi: numa_meminfo to remove memblk from 156 * 157 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and 158 * decrementing @mi->nr_blks. 159 */ 160 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) 161 { 162 mi->nr_blks--; 163 memmove(&mi->blk[idx], &mi->blk[idx + 1], 164 (mi->nr_blks - idx) * sizeof(mi->blk[0])); 165 } 166 167 /** 168 * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another 169 * @dst: numa_meminfo to append block to 170 * @idx: Index of memblk to remove 171 * @src: numa_meminfo to remove memblk from 172 */ 173 static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx, 174 struct numa_meminfo *src) 175 { 176 dst->blk[dst->nr_blks++] = src->blk[idx]; 177 numa_remove_memblk_from(idx, src); 178 } 179 180 /** 181 * numa_add_memblk - Add one numa_memblk to numa_meminfo 182 * @nid: NUMA node ID of the new memblk 183 * @start: Start address of the new memblk 184 * @end: End address of the new memblk 185 * 186 * Add a new memblk to the default numa_meminfo. 187 * 188 * RETURNS: 189 * 0 on success, -errno on failure. 190 */ 191 int __init numa_add_memblk(int nid, u64 start, u64 end) 192 { 193 return numa_add_memblk_to(nid, start, end, &numa_meminfo); 194 } 195 196 /* Allocate NODE_DATA for a node on the local memory */ 197 static void __init alloc_node_data(int nid) 198 { 199 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 200 u64 nd_pa; 201 void *nd; 202 int tnid; 203 204 /* 205 * Allocate node data. Try node-local memory and then any node. 206 * Never allocate in DMA zone. 207 */ 208 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); 209 if (!nd_pa) { 210 pr_err("Cannot find %zu bytes in any node (initial node: %d)\n", 211 nd_size, nid); 212 return; 213 } 214 nd = __va(nd_pa); 215 216 /* report and initialize */ 217 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, 218 nd_pa, nd_pa + nd_size - 1); 219 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 220 if (tnid != nid) 221 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); 222 223 node_data[nid] = nd; 224 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 225 226 node_set_online(nid); 227 } 228 229 /** 230 * numa_cleanup_meminfo - Cleanup a numa_meminfo 231 * @mi: numa_meminfo to clean up 232 * 233 * Sanitize @mi by merging and removing unnecessary memblks. Also check for 234 * conflicts and clear unused memblks. 235 * 236 * RETURNS: 237 * 0 on success, -errno on failure. 238 */ 239 int __init numa_cleanup_meminfo(struct numa_meminfo *mi) 240 { 241 const u64 low = 0; 242 const u64 high = PFN_PHYS(max_pfn); 243 int i, j, k; 244 245 /* first, trim all entries */ 246 for (i = 0; i < mi->nr_blks; i++) { 247 struct numa_memblk *bi = &mi->blk[i]; 248 249 /* move / save reserved memory ranges */ 250 if (!memblock_overlaps_region(&memblock.memory, 251 bi->start, bi->end - bi->start)) { 252 numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi); 253 continue; 254 } 255 256 /* make sure all non-reserved blocks are inside the limits */ 257 bi->start = max(bi->start, low); 258 259 /* preserve info for non-RAM areas above 'max_pfn': */ 260 if (bi->end > high) { 261 numa_add_memblk_to(bi->nid, high, bi->end, 262 &numa_reserved_meminfo); 263 bi->end = high; 264 } 265 266 /* and there's no empty block */ 267 if (bi->start >= bi->end) 268 numa_remove_memblk_from(i--, mi); 269 } 270 271 /* merge neighboring / overlapping entries */ 272 for (i = 0; i < mi->nr_blks; i++) { 273 struct numa_memblk *bi = &mi->blk[i]; 274 275 for (j = i + 1; j < mi->nr_blks; j++) { 276 struct numa_memblk *bj = &mi->blk[j]; 277 u64 start, end; 278 279 /* 280 * See whether there are overlapping blocks. Whine 281 * about but allow overlaps of the same nid. They 282 * will be merged below. 283 */ 284 if (bi->end > bj->start && bi->start < bj->end) { 285 if (bi->nid != bj->nid) { 286 pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", 287 bi->nid, bi->start, bi->end - 1, 288 bj->nid, bj->start, bj->end - 1); 289 return -EINVAL; 290 } 291 pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", 292 bi->nid, bi->start, bi->end - 1, 293 bj->start, bj->end - 1); 294 } 295 296 /* 297 * Join together blocks on the same node, holes 298 * between which don't overlap with memory on other 299 * nodes. 300 */ 301 if (bi->nid != bj->nid) 302 continue; 303 start = min(bi->start, bj->start); 304 end = max(bi->end, bj->end); 305 for (k = 0; k < mi->nr_blks; k++) { 306 struct numa_memblk *bk = &mi->blk[k]; 307 308 if (bi->nid == bk->nid) 309 continue; 310 if (start < bk->end && end > bk->start) 311 break; 312 } 313 if (k < mi->nr_blks) 314 continue; 315 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", 316 bi->nid, bi->start, bi->end - 1, bj->start, 317 bj->end - 1, start, end - 1); 318 bi->start = start; 319 bi->end = end; 320 numa_remove_memblk_from(j--, mi); 321 } 322 } 323 324 /* clear unused ones */ 325 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { 326 mi->blk[i].start = mi->blk[i].end = 0; 327 mi->blk[i].nid = NUMA_NO_NODE; 328 } 329 330 return 0; 331 } 332 333 /* 334 * Set nodes, which have memory in @mi, in *@nodemask. 335 */ 336 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, 337 const struct numa_meminfo *mi) 338 { 339 int i; 340 341 for (i = 0; i < ARRAY_SIZE(mi->blk); i++) 342 if (mi->blk[i].start != mi->blk[i].end && 343 mi->blk[i].nid != NUMA_NO_NODE) 344 node_set(mi->blk[i].nid, *nodemask); 345 } 346 347 /** 348 * numa_reset_distance - Reset NUMA distance table 349 * 350 * The current table is freed. The next numa_set_distance() call will 351 * create a new one. 352 */ 353 void __init numa_reset_distance(void) 354 { 355 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); 356 357 /* numa_distance could be 1LU marking allocation failure, test cnt */ 358 if (numa_distance_cnt) 359 memblock_free(numa_distance, size); 360 numa_distance_cnt = 0; 361 numa_distance = NULL; /* enable table creation */ 362 } 363 364 static int __init numa_alloc_distance(void) 365 { 366 nodemask_t nodes_parsed; 367 size_t size; 368 int i, j, cnt = 0; 369 u64 phys; 370 371 /* size the new table and allocate it */ 372 nodes_parsed = numa_nodes_parsed; 373 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); 374 375 for_each_node_mask(i, nodes_parsed) 376 cnt = i; 377 cnt++; 378 size = cnt * cnt * sizeof(numa_distance[0]); 379 380 phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 381 PFN_PHYS(max_pfn_mapped)); 382 if (!phys) { 383 pr_warn("Warning: can't allocate distance table!\n"); 384 /* don't retry until explicitly reset */ 385 numa_distance = (void *)1LU; 386 return -ENOMEM; 387 } 388 389 numa_distance = __va(phys); 390 numa_distance_cnt = cnt; 391 392 /* fill with the default distances */ 393 for (i = 0; i < cnt; i++) 394 for (j = 0; j < cnt; j++) 395 numa_distance[i * cnt + j] = i == j ? 396 LOCAL_DISTANCE : REMOTE_DISTANCE; 397 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); 398 399 return 0; 400 } 401 402 /** 403 * numa_set_distance - Set NUMA distance from one NUMA to another 404 * @from: the 'from' node to set distance 405 * @to: the 'to' node to set distance 406 * @distance: NUMA distance 407 * 408 * Set the distance from node @from to @to to @distance. If distance table 409 * doesn't exist, one which is large enough to accommodate all the currently 410 * known nodes will be created. 411 * 412 * If such table cannot be allocated, a warning is printed and further 413 * calls are ignored until the distance table is reset with 414 * numa_reset_distance(). 415 * 416 * If @from or @to is higher than the highest known node or lower than zero 417 * at the time of table creation or @distance doesn't make sense, the call 418 * is ignored. 419 * This is to allow simplification of specific NUMA config implementations. 420 */ 421 void __init numa_set_distance(int from, int to, int distance) 422 { 423 if (!numa_distance && numa_alloc_distance() < 0) 424 return; 425 426 if (from >= numa_distance_cnt || to >= numa_distance_cnt || 427 from < 0 || to < 0) { 428 pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", 429 from, to, distance); 430 return; 431 } 432 433 if ((u8)distance != distance || 434 (from == to && distance != LOCAL_DISTANCE)) { 435 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", 436 from, to, distance); 437 return; 438 } 439 440 numa_distance[from * numa_distance_cnt + to] = distance; 441 } 442 443 int __node_distance(int from, int to) 444 { 445 if (from >= numa_distance_cnt || to >= numa_distance_cnt) 446 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; 447 return numa_distance[from * numa_distance_cnt + to]; 448 } 449 EXPORT_SYMBOL(__node_distance); 450 451 /* 452 * Sanity check to catch more bad NUMA configurations (they are amazingly 453 * common). Make sure the nodes cover all memory. 454 */ 455 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) 456 { 457 u64 numaram, e820ram; 458 int i; 459 460 numaram = 0; 461 for (i = 0; i < mi->nr_blks; i++) { 462 u64 s = mi->blk[i].start >> PAGE_SHIFT; 463 u64 e = mi->blk[i].end >> PAGE_SHIFT; 464 numaram += e - s; 465 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); 466 if ((s64)numaram < 0) 467 numaram = 0; 468 } 469 470 e820ram = max_pfn - absent_pages_in_range(0, max_pfn); 471 472 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ 473 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { 474 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", 475 (numaram << PAGE_SHIFT) >> 20, 476 (e820ram << PAGE_SHIFT) >> 20); 477 return false; 478 } 479 return true; 480 } 481 482 /* 483 * Mark all currently memblock-reserved physical memory (which covers the 484 * kernel's own memory ranges) as hot-unswappable. 485 */ 486 static void __init numa_clear_kernel_node_hotplug(void) 487 { 488 nodemask_t reserved_nodemask = NODE_MASK_NONE; 489 struct memblock_region *mb_region; 490 int i; 491 492 /* 493 * We have to do some preprocessing of memblock regions, to 494 * make them suitable for reservation. 495 * 496 * At this time, all memory regions reserved by memblock are 497 * used by the kernel, but those regions are not split up 498 * along node boundaries yet, and don't necessarily have their 499 * node ID set yet either. 500 * 501 * So iterate over all memory known to the x86 architecture, 502 * and use those ranges to set the nid in memblock.reserved. 503 * This will split up the memblock regions along node 504 * boundaries and will set the node IDs as well. 505 */ 506 for (i = 0; i < numa_meminfo.nr_blks; i++) { 507 struct numa_memblk *mb = numa_meminfo.blk + i; 508 int ret; 509 510 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid); 511 WARN_ON_ONCE(ret); 512 } 513 514 /* 515 * Now go over all reserved memblock regions, to construct a 516 * node mask of all kernel reserved memory areas. 517 * 518 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel, 519 * numa_meminfo might not include all memblock.reserved 520 * memory ranges, because quirks such as trim_snb_memory() 521 * reserve specific pages for Sandy Bridge graphics. ] 522 */ 523 for_each_reserved_mem_region(mb_region) { 524 int nid = memblock_get_region_node(mb_region); 525 526 if (nid != MAX_NUMNODES) 527 node_set(nid, reserved_nodemask); 528 } 529 530 /* 531 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory 532 * belonging to the reserved node mask. 533 * 534 * Note that this will include memory regions that reside 535 * on nodes that contain kernel memory - entire nodes 536 * become hot-unpluggable: 537 */ 538 for (i = 0; i < numa_meminfo.nr_blks; i++) { 539 struct numa_memblk *mb = numa_meminfo.blk + i; 540 541 if (!node_isset(mb->nid, reserved_nodemask)) 542 continue; 543 544 memblock_clear_hotplug(mb->start, mb->end - mb->start); 545 } 546 } 547 548 static int __init numa_register_memblks(struct numa_meminfo *mi) 549 { 550 int i, nid; 551 552 /* Account for nodes with cpus and no memory */ 553 node_possible_map = numa_nodes_parsed; 554 numa_nodemask_from_meminfo(&node_possible_map, mi); 555 if (WARN_ON(nodes_empty(node_possible_map))) 556 return -EINVAL; 557 558 for (i = 0; i < mi->nr_blks; i++) { 559 struct numa_memblk *mb = &mi->blk[i]; 560 memblock_set_node(mb->start, mb->end - mb->start, 561 &memblock.memory, mb->nid); 562 } 563 564 /* 565 * At very early time, the kernel have to use some memory such as 566 * loading the kernel image. We cannot prevent this anyway. So any 567 * node the kernel resides in should be un-hotpluggable. 568 * 569 * And when we come here, alloc node data won't fail. 570 */ 571 numa_clear_kernel_node_hotplug(); 572 573 /* 574 * If sections array is gonna be used for pfn -> nid mapping, check 575 * whether its granularity is fine enough. 576 */ 577 if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) { 578 unsigned long pfn_align = node_map_pfn_alignment(); 579 580 if (pfn_align && pfn_align < PAGES_PER_SECTION) { 581 pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", 582 PFN_PHYS(pfn_align) >> 20, 583 PFN_PHYS(PAGES_PER_SECTION) >> 20); 584 return -EINVAL; 585 } 586 } 587 if (!numa_meminfo_cover_memory(mi)) 588 return -EINVAL; 589 590 /* Finally register nodes. */ 591 for_each_node_mask(nid, node_possible_map) { 592 u64 start = PFN_PHYS(max_pfn); 593 u64 end = 0; 594 595 for (i = 0; i < mi->nr_blks; i++) { 596 if (nid != mi->blk[i].nid) 597 continue; 598 start = min(mi->blk[i].start, start); 599 end = max(mi->blk[i].end, end); 600 } 601 602 if (start >= end) 603 continue; 604 605 /* 606 * Don't confuse VM with a node that doesn't have the 607 * minimum amount of memory: 608 */ 609 if (end && (end - start) < NODE_MIN_SIZE) 610 continue; 611 612 alloc_node_data(nid); 613 } 614 615 /* Dump memblock with node info and return. */ 616 memblock_dump_all(); 617 return 0; 618 } 619 620 /* 621 * There are unfortunately some poorly designed mainboards around that 622 * only connect memory to a single CPU. This breaks the 1:1 cpu->node 623 * mapping. To avoid this fill in the mapping for all possible CPUs, 624 * as the number of CPUs is not known yet. We round robin the existing 625 * nodes. 626 */ 627 static void __init numa_init_array(void) 628 { 629 int rr, i; 630 631 rr = first_node(node_online_map); 632 for (i = 0; i < nr_cpu_ids; i++) { 633 if (early_cpu_to_node(i) != NUMA_NO_NODE) 634 continue; 635 numa_set_node(i, rr); 636 rr = next_node_in(rr, node_online_map); 637 } 638 } 639 640 static int __init numa_init(int (*init_func)(void)) 641 { 642 int i; 643 int ret; 644 645 for (i = 0; i < MAX_LOCAL_APIC; i++) 646 set_apicid_to_node(i, NUMA_NO_NODE); 647 648 nodes_clear(numa_nodes_parsed); 649 nodes_clear(node_possible_map); 650 nodes_clear(node_online_map); 651 memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 652 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, 653 MAX_NUMNODES)); 654 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, 655 MAX_NUMNODES)); 656 /* In case that parsing SRAT failed. */ 657 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); 658 numa_reset_distance(); 659 660 ret = init_func(); 661 if (ret < 0) 662 return ret; 663 664 /* 665 * We reset memblock back to the top-down direction 666 * here because if we configured ACPI_NUMA, we have 667 * parsed SRAT in init_func(). It is ok to have the 668 * reset here even if we did't configure ACPI_NUMA 669 * or acpi numa init fails and fallbacks to dummy 670 * numa init. 671 */ 672 memblock_set_bottom_up(false); 673 674 ret = numa_cleanup_meminfo(&numa_meminfo); 675 if (ret < 0) 676 return ret; 677 678 numa_emulation(&numa_meminfo, numa_distance_cnt); 679 680 ret = numa_register_memblks(&numa_meminfo); 681 if (ret < 0) 682 return ret; 683 684 for (i = 0; i < nr_cpu_ids; i++) { 685 int nid = early_cpu_to_node(i); 686 687 if (nid == NUMA_NO_NODE) 688 continue; 689 if (!node_online(nid)) 690 numa_clear_node(i); 691 } 692 numa_init_array(); 693 694 return 0; 695 } 696 697 /** 698 * dummy_numa_init - Fallback dummy NUMA init 699 * 700 * Used if there's no underlying NUMA architecture, NUMA initialization 701 * fails, or NUMA is disabled on the command line. 702 * 703 * Must online at least one node and add memory blocks that cover all 704 * allowed memory. This function must not fail. 705 */ 706 static int __init dummy_numa_init(void) 707 { 708 printk(KERN_INFO "%s\n", 709 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 710 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 711 0LLU, PFN_PHYS(max_pfn) - 1); 712 713 node_set(0, numa_nodes_parsed); 714 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 715 716 return 0; 717 } 718 719 /** 720 * x86_numa_init - Initialize NUMA 721 * 722 * Try each configured NUMA initialization method until one succeeds. The 723 * last fallback is dummy single node config encompassing whole memory and 724 * never fails. 725 */ 726 void __init x86_numa_init(void) 727 { 728 if (!numa_off) { 729 #ifdef CONFIG_ACPI_NUMA 730 if (!numa_init(x86_acpi_numa_init)) 731 return; 732 #endif 733 #ifdef CONFIG_AMD_NUMA 734 if (!numa_init(amd_numa_init)) 735 return; 736 #endif 737 } 738 739 numa_init(dummy_numa_init); 740 } 741 742 743 /* 744 * A node may exist which has one or more Generic Initiators but no CPUs and no 745 * memory. 746 * 747 * This function must be called after init_cpu_to_node(), to ensure that any 748 * memoryless CPU nodes have already been brought online, and before the 749 * node_data[nid] is needed for zone list setup in build_all_zonelists(). 750 * 751 * When this function is called, any nodes containing either memory and/or CPUs 752 * will already be online and there is no need to do anything extra, even if 753 * they also contain one or more Generic Initiators. 754 */ 755 void __init init_gi_nodes(void) 756 { 757 int nid; 758 759 /* 760 * Exclude this node from 761 * bringup_nonboot_cpus 762 * cpu_up 763 * __try_online_node 764 * register_one_node 765 * because node_subsys is not initialized yet. 766 * TODO remove dependency on node_online 767 */ 768 for_each_node_state(nid, N_GENERIC_INITIATOR) 769 if (!node_online(nid)) 770 node_set_online(nid); 771 } 772 773 /* 774 * Setup early cpu_to_node. 775 * 776 * Populate cpu_to_node[] only if x86_cpu_to_apicid[], 777 * and apicid_to_node[] tables have valid entries for a CPU. 778 * This means we skip cpu_to_node[] initialisation for NUMA 779 * emulation and faking node case (when running a kernel compiled 780 * for NUMA on a non NUMA box), which is OK as cpu_to_node[] 781 * is already initialized in a round robin manner at numa_init_array, 782 * prior to this call, and this initialization is good enough 783 * for the fake NUMA cases. 784 * 785 * Called before the per_cpu areas are setup. 786 */ 787 void __init init_cpu_to_node(void) 788 { 789 int cpu; 790 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 791 792 BUG_ON(cpu_to_apicid == NULL); 793 794 for_each_possible_cpu(cpu) { 795 int node = numa_cpu_node(cpu); 796 797 if (node == NUMA_NO_NODE) 798 continue; 799 800 /* 801 * Exclude this node from 802 * bringup_nonboot_cpus 803 * cpu_up 804 * __try_online_node 805 * register_one_node 806 * because node_subsys is not initialized yet. 807 * TODO remove dependency on node_online 808 */ 809 if (!node_online(node)) 810 node_set_online(node); 811 812 numa_set_node(cpu, node); 813 } 814 } 815 816 #ifndef CONFIG_DEBUG_PER_CPU_MAPS 817 818 # ifndef CONFIG_NUMA_EMU 819 void numa_add_cpu(int cpu) 820 { 821 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 822 } 823 824 void numa_remove_cpu(int cpu) 825 { 826 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 827 } 828 # endif /* !CONFIG_NUMA_EMU */ 829 830 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 831 832 int __cpu_to_node(int cpu) 833 { 834 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 835 printk(KERN_WARNING 836 "cpu_to_node(%d): usage too early!\n", cpu); 837 dump_stack(); 838 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 839 } 840 return per_cpu(x86_cpu_to_node_map, cpu); 841 } 842 EXPORT_SYMBOL(__cpu_to_node); 843 844 /* 845 * Same function as cpu_to_node() but used if called before the 846 * per_cpu areas are setup. 847 */ 848 int early_cpu_to_node(int cpu) 849 { 850 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 851 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 852 853 if (!cpu_possible(cpu)) { 854 printk(KERN_WARNING 855 "early_cpu_to_node(%d): no per_cpu area!\n", cpu); 856 dump_stack(); 857 return NUMA_NO_NODE; 858 } 859 return per_cpu(x86_cpu_to_node_map, cpu); 860 } 861 862 void debug_cpumask_set_cpu(int cpu, int node, bool enable) 863 { 864 struct cpumask *mask; 865 866 if (node == NUMA_NO_NODE) { 867 /* early_cpu_to_node() already emits a warning and trace */ 868 return; 869 } 870 mask = node_to_cpumask_map[node]; 871 if (!cpumask_available(mask)) { 872 pr_err("node_to_cpumask_map[%i] NULL\n", node); 873 dump_stack(); 874 return; 875 } 876 877 if (enable) 878 cpumask_set_cpu(cpu, mask); 879 else 880 cpumask_clear_cpu(cpu, mask); 881 882 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", 883 enable ? "numa_add_cpu" : "numa_remove_cpu", 884 cpu, node, cpumask_pr_args(mask)); 885 return; 886 } 887 888 # ifndef CONFIG_NUMA_EMU 889 static void numa_set_cpumask(int cpu, bool enable) 890 { 891 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 892 } 893 894 void numa_add_cpu(int cpu) 895 { 896 numa_set_cpumask(cpu, true); 897 } 898 899 void numa_remove_cpu(int cpu) 900 { 901 numa_set_cpumask(cpu, false); 902 } 903 # endif /* !CONFIG_NUMA_EMU */ 904 905 /* 906 * Returns a pointer to the bitmask of CPUs on Node 'node'. 907 */ 908 const struct cpumask *cpumask_of_node(int node) 909 { 910 if ((unsigned)node >= nr_node_ids) { 911 printk(KERN_WARNING 912 "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", 913 node, nr_node_ids); 914 dump_stack(); 915 return cpu_none_mask; 916 } 917 if (!cpumask_available(node_to_cpumask_map[node])) { 918 printk(KERN_WARNING 919 "cpumask_of_node(%d): no node_to_cpumask_map!\n", 920 node); 921 dump_stack(); 922 return cpu_online_mask; 923 } 924 return node_to_cpumask_map[node]; 925 } 926 EXPORT_SYMBOL(cpumask_of_node); 927 928 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 929 930 #ifdef CONFIG_NUMA_KEEP_MEMINFO 931 static int meminfo_to_nid(struct numa_meminfo *mi, u64 start) 932 { 933 int i; 934 935 for (i = 0; i < mi->nr_blks; i++) 936 if (mi->blk[i].start <= start && mi->blk[i].end > start) 937 return mi->blk[i].nid; 938 return NUMA_NO_NODE; 939 } 940 941 int phys_to_target_node(phys_addr_t start) 942 { 943 int nid = meminfo_to_nid(&numa_meminfo, start); 944 945 /* 946 * Prefer online nodes, but if reserved memory might be 947 * hot-added continue the search with reserved ranges. 948 */ 949 if (nid != NUMA_NO_NODE) 950 return nid; 951 952 return meminfo_to_nid(&numa_reserved_meminfo, start); 953 } 954 EXPORT_SYMBOL_GPL(phys_to_target_node); 955 956 int memory_add_physaddr_to_nid(u64 start) 957 { 958 int nid = meminfo_to_nid(&numa_meminfo, start); 959 960 if (nid == NUMA_NO_NODE) 961 nid = numa_meminfo.blk[0].nid; 962 return nid; 963 } 964 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 965 966 static int __init cmp_memblk(const void *a, const void *b) 967 { 968 const struct numa_memblk *ma = *(const struct numa_memblk **)a; 969 const struct numa_memblk *mb = *(const struct numa_memblk **)b; 970 971 return ma->start - mb->start; 972 } 973 974 static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata; 975 976 /** 977 * numa_fill_memblks - Fill gaps in numa_meminfo memblks 978 * @start: address to begin fill 979 * @end: address to end fill 980 * 981 * Find and extend numa_meminfo memblks to cover the @start-@end 982 * physical address range, such that the first memblk includes 983 * @start, the last memblk includes @end, and any gaps in between 984 * are filled. 985 * 986 * RETURNS: 987 * 0 : Success 988 * NUMA_NO_MEMBLK : No memblk exists in @start-@end range 989 */ 990 991 int __init numa_fill_memblks(u64 start, u64 end) 992 { 993 struct numa_memblk **blk = &numa_memblk_list[0]; 994 struct numa_meminfo *mi = &numa_meminfo; 995 int count = 0; 996 u64 prev_end; 997 998 /* 999 * Create a list of pointers to numa_meminfo memblks that 1000 * overlap start, end. Exclude (start == bi->end) since 1001 * end addresses in both a CFMWS range and a memblk range 1002 * are exclusive. 1003 * 1004 * This list of pointers is used to make in-place changes 1005 * that fill out the numa_meminfo memblks. 1006 */ 1007 for (int i = 0; i < mi->nr_blks; i++) { 1008 struct numa_memblk *bi = &mi->blk[i]; 1009 1010 if (start < bi->end && end >= bi->start) { 1011 blk[count] = &mi->blk[i]; 1012 count++; 1013 } 1014 } 1015 if (!count) 1016 return NUMA_NO_MEMBLK; 1017 1018 /* Sort the list of pointers in memblk->start order */ 1019 sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL); 1020 1021 /* Make sure the first/last memblks include start/end */ 1022 blk[0]->start = min(blk[0]->start, start); 1023 blk[count - 1]->end = max(blk[count - 1]->end, end); 1024 1025 /* 1026 * Fill any gaps by tracking the previous memblks 1027 * end address and backfilling to it if needed. 1028 */ 1029 prev_end = blk[0]->end; 1030 for (int i = 1; i < count; i++) { 1031 struct numa_memblk *curr = blk[i]; 1032 1033 if (prev_end >= curr->start) { 1034 if (prev_end < curr->end) 1035 prev_end = curr->end; 1036 } else { 1037 curr->start = prev_end; 1038 prev_end = curr->end; 1039 } 1040 } 1041 return 0; 1042 } 1043 1044 #endif 1045