1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Common code for 32 and 64-bit NUMA */
3 #include <linux/acpi.h>
4 #include <linux/kernel.h>
5 #include <linux/mm.h>
6 #include <linux/string.h>
7 #include <linux/init.h>
8 #include <linux/memblock.h>
9 #include <linux/mmzone.h>
10 #include <linux/ctype.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
14 #include <linux/sort.h>
15
16 #include <asm/e820/api.h>
17 #include <asm/proto.h>
18 #include <asm/dma.h>
19 #include <asm/amd_nb.h>
20
21 #include "numa_internal.h"
22
23 int numa_off;
24 nodemask_t numa_nodes_parsed __initdata;
25
26 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27 EXPORT_SYMBOL(node_data);
28
29 static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
30 static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
31
32 static int numa_distance_cnt;
33 static u8 *numa_distance;
34
numa_setup(char * opt)35 static __init int numa_setup(char *opt)
36 {
37 if (!opt)
38 return -EINVAL;
39 if (!strncmp(opt, "off", 3))
40 numa_off = 1;
41 if (!strncmp(opt, "fake=", 5))
42 return numa_emu_cmdline(opt + 5);
43 if (!strncmp(opt, "noacpi", 6))
44 disable_srat();
45 if (!strncmp(opt, "nohmat", 6))
46 disable_hmat();
47 return 0;
48 }
49 early_param("numa", numa_setup);
50
51 /*
52 * apicid, cpu, node mappings
53 */
54 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
55 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
56 };
57
numa_cpu_node(int cpu)58 int numa_cpu_node(int cpu)
59 {
60 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
61
62 if (apicid != BAD_APICID)
63 return __apicid_to_node[apicid];
64 return NUMA_NO_NODE;
65 }
66
67 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
68 EXPORT_SYMBOL(node_to_cpumask_map);
69
70 /*
71 * Map cpu index to node index
72 */
73 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
74 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
75
numa_set_node(int cpu,int node)76 void numa_set_node(int cpu, int node)
77 {
78 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
79
80 /* early setting, no percpu area yet */
81 if (cpu_to_node_map) {
82 cpu_to_node_map[cpu] = node;
83 return;
84 }
85
86 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
87 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
88 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
89 dump_stack();
90 return;
91 }
92 #endif
93 per_cpu(x86_cpu_to_node_map, cpu) = node;
94
95 set_cpu_numa_node(cpu, node);
96 }
97
numa_clear_node(int cpu)98 void numa_clear_node(int cpu)
99 {
100 numa_set_node(cpu, NUMA_NO_NODE);
101 }
102
103 /*
104 * Allocate node_to_cpumask_map based on number of available nodes
105 * Requires node_possible_map to be valid.
106 *
107 * Note: cpumask_of_node() is not valid until after this is done.
108 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
109 */
setup_node_to_cpumask_map(void)110 void __init setup_node_to_cpumask_map(void)
111 {
112 unsigned int node;
113
114 /* setup nr_node_ids if not done yet */
115 if (nr_node_ids == MAX_NUMNODES)
116 setup_nr_node_ids();
117
118 /* allocate the map */
119 for (node = 0; node < nr_node_ids; node++)
120 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
121
122 /* cpumask_of_node() will now work */
123 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
124 }
125
numa_add_memblk_to(int nid,u64 start,u64 end,struct numa_meminfo * mi)126 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
127 struct numa_meminfo *mi)
128 {
129 /* ignore zero length blks */
130 if (start == end)
131 return 0;
132
133 /* whine about and ignore invalid blks */
134 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
135 pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
136 nid, start, end - 1);
137 return 0;
138 }
139
140 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
141 pr_err("too many memblk ranges\n");
142 return -EINVAL;
143 }
144
145 mi->blk[mi->nr_blks].start = start;
146 mi->blk[mi->nr_blks].end = end;
147 mi->blk[mi->nr_blks].nid = nid;
148 mi->nr_blks++;
149 return 0;
150 }
151
152 /**
153 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
154 * @idx: Index of memblk to remove
155 * @mi: numa_meminfo to remove memblk from
156 *
157 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
158 * decrementing @mi->nr_blks.
159 */
numa_remove_memblk_from(int idx,struct numa_meminfo * mi)160 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
161 {
162 mi->nr_blks--;
163 memmove(&mi->blk[idx], &mi->blk[idx + 1],
164 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
165 }
166
167 /**
168 * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
169 * @dst: numa_meminfo to append block to
170 * @idx: Index of memblk to remove
171 * @src: numa_meminfo to remove memblk from
172 */
numa_move_tail_memblk(struct numa_meminfo * dst,int idx,struct numa_meminfo * src)173 static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
174 struct numa_meminfo *src)
175 {
176 dst->blk[dst->nr_blks++] = src->blk[idx];
177 numa_remove_memblk_from(idx, src);
178 }
179
180 /**
181 * numa_add_memblk - Add one numa_memblk to numa_meminfo
182 * @nid: NUMA node ID of the new memblk
183 * @start: Start address of the new memblk
184 * @end: End address of the new memblk
185 *
186 * Add a new memblk to the default numa_meminfo.
187 *
188 * RETURNS:
189 * 0 on success, -errno on failure.
190 */
numa_add_memblk(int nid,u64 start,u64 end)191 int __init numa_add_memblk(int nid, u64 start, u64 end)
192 {
193 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
194 }
195
196 /* Allocate NODE_DATA for a node on the local memory */
alloc_node_data(int nid)197 static void __init alloc_node_data(int nid)
198 {
199 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
200 u64 nd_pa;
201 void *nd;
202 int tnid;
203
204 /*
205 * Allocate node data. Try node-local memory and then any node.
206 * Never allocate in DMA zone.
207 */
208 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
209 if (!nd_pa) {
210 pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
211 nd_size, nid);
212 return;
213 }
214 nd = __va(nd_pa);
215
216 /* report and initialize */
217 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
218 nd_pa, nd_pa + nd_size - 1);
219 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
220 if (tnid != nid)
221 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
222
223 node_data[nid] = nd;
224 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
225
226 node_set_online(nid);
227 }
228
229 /**
230 * numa_cleanup_meminfo - Cleanup a numa_meminfo
231 * @mi: numa_meminfo to clean up
232 *
233 * Sanitize @mi by merging and removing unnecessary memblks. Also check for
234 * conflicts and clear unused memblks.
235 *
236 * RETURNS:
237 * 0 on success, -errno on failure.
238 */
numa_cleanup_meminfo(struct numa_meminfo * mi)239 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
240 {
241 const u64 low = 0;
242 const u64 high = PFN_PHYS(max_pfn);
243 int i, j, k;
244
245 /* first, trim all entries */
246 for (i = 0; i < mi->nr_blks; i++) {
247 struct numa_memblk *bi = &mi->blk[i];
248
249 /* move / save reserved memory ranges */
250 if (!memblock_overlaps_region(&memblock.memory,
251 bi->start, bi->end - bi->start)) {
252 numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
253 continue;
254 }
255
256 /* make sure all non-reserved blocks are inside the limits */
257 bi->start = max(bi->start, low);
258
259 /* preserve info for non-RAM areas above 'max_pfn': */
260 if (bi->end > high) {
261 numa_add_memblk_to(bi->nid, high, bi->end,
262 &numa_reserved_meminfo);
263 bi->end = high;
264 }
265
266 /* and there's no empty block */
267 if (bi->start >= bi->end)
268 numa_remove_memblk_from(i--, mi);
269 }
270
271 /* merge neighboring / overlapping entries */
272 for (i = 0; i < mi->nr_blks; i++) {
273 struct numa_memblk *bi = &mi->blk[i];
274
275 for (j = i + 1; j < mi->nr_blks; j++) {
276 struct numa_memblk *bj = &mi->blk[j];
277 u64 start, end;
278
279 /*
280 * See whether there are overlapping blocks. Whine
281 * about but allow overlaps of the same nid. They
282 * will be merged below.
283 */
284 if (bi->end > bj->start && bi->start < bj->end) {
285 if (bi->nid != bj->nid) {
286 pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
287 bi->nid, bi->start, bi->end - 1,
288 bj->nid, bj->start, bj->end - 1);
289 return -EINVAL;
290 }
291 pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
292 bi->nid, bi->start, bi->end - 1,
293 bj->start, bj->end - 1);
294 }
295
296 /*
297 * Join together blocks on the same node, holes
298 * between which don't overlap with memory on other
299 * nodes.
300 */
301 if (bi->nid != bj->nid)
302 continue;
303 start = min(bi->start, bj->start);
304 end = max(bi->end, bj->end);
305 for (k = 0; k < mi->nr_blks; k++) {
306 struct numa_memblk *bk = &mi->blk[k];
307
308 if (bi->nid == bk->nid)
309 continue;
310 if (start < bk->end && end > bk->start)
311 break;
312 }
313 if (k < mi->nr_blks)
314 continue;
315 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
316 bi->nid, bi->start, bi->end - 1, bj->start,
317 bj->end - 1, start, end - 1);
318 bi->start = start;
319 bi->end = end;
320 numa_remove_memblk_from(j--, mi);
321 }
322 }
323
324 /* clear unused ones */
325 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
326 mi->blk[i].start = mi->blk[i].end = 0;
327 mi->blk[i].nid = NUMA_NO_NODE;
328 }
329
330 return 0;
331 }
332
333 /*
334 * Set nodes, which have memory in @mi, in *@nodemask.
335 */
numa_nodemask_from_meminfo(nodemask_t * nodemask,const struct numa_meminfo * mi)336 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
337 const struct numa_meminfo *mi)
338 {
339 int i;
340
341 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
342 if (mi->blk[i].start != mi->blk[i].end &&
343 mi->blk[i].nid != NUMA_NO_NODE)
344 node_set(mi->blk[i].nid, *nodemask);
345 }
346
347 /**
348 * numa_reset_distance - Reset NUMA distance table
349 *
350 * The current table is freed. The next numa_set_distance() call will
351 * create a new one.
352 */
numa_reset_distance(void)353 void __init numa_reset_distance(void)
354 {
355 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
356
357 /* numa_distance could be 1LU marking allocation failure, test cnt */
358 if (numa_distance_cnt)
359 memblock_free(numa_distance, size);
360 numa_distance_cnt = 0;
361 numa_distance = NULL; /* enable table creation */
362 }
363
numa_alloc_distance(void)364 static int __init numa_alloc_distance(void)
365 {
366 nodemask_t nodes_parsed;
367 size_t size;
368 int i, j, cnt = 0;
369 u64 phys;
370
371 /* size the new table and allocate it */
372 nodes_parsed = numa_nodes_parsed;
373 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
374
375 for_each_node_mask(i, nodes_parsed)
376 cnt = i;
377 cnt++;
378 size = cnt * cnt * sizeof(numa_distance[0]);
379
380 phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
381 PFN_PHYS(max_pfn_mapped));
382 if (!phys) {
383 pr_warn("Warning: can't allocate distance table!\n");
384 /* don't retry until explicitly reset */
385 numa_distance = (void *)1LU;
386 return -ENOMEM;
387 }
388
389 numa_distance = __va(phys);
390 numa_distance_cnt = cnt;
391
392 /* fill with the default distances */
393 for (i = 0; i < cnt; i++)
394 for (j = 0; j < cnt; j++)
395 numa_distance[i * cnt + j] = i == j ?
396 LOCAL_DISTANCE : REMOTE_DISTANCE;
397 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
398
399 return 0;
400 }
401
402 /**
403 * numa_set_distance - Set NUMA distance from one NUMA to another
404 * @from: the 'from' node to set distance
405 * @to: the 'to' node to set distance
406 * @distance: NUMA distance
407 *
408 * Set the distance from node @from to @to to @distance. If distance table
409 * doesn't exist, one which is large enough to accommodate all the currently
410 * known nodes will be created.
411 *
412 * If such table cannot be allocated, a warning is printed and further
413 * calls are ignored until the distance table is reset with
414 * numa_reset_distance().
415 *
416 * If @from or @to is higher than the highest known node or lower than zero
417 * at the time of table creation or @distance doesn't make sense, the call
418 * is ignored.
419 * This is to allow simplification of specific NUMA config implementations.
420 */
numa_set_distance(int from,int to,int distance)421 void __init numa_set_distance(int from, int to, int distance)
422 {
423 if (!numa_distance && numa_alloc_distance() < 0)
424 return;
425
426 if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
427 from < 0 || to < 0) {
428 pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
429 from, to, distance);
430 return;
431 }
432
433 if ((u8)distance != distance ||
434 (from == to && distance != LOCAL_DISTANCE)) {
435 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
436 from, to, distance);
437 return;
438 }
439
440 numa_distance[from * numa_distance_cnt + to] = distance;
441 }
442
__node_distance(int from,int to)443 int __node_distance(int from, int to)
444 {
445 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
446 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
447 return numa_distance[from * numa_distance_cnt + to];
448 }
449 EXPORT_SYMBOL(__node_distance);
450
451 /*
452 * Mark all currently memblock-reserved physical memory (which covers the
453 * kernel's own memory ranges) as hot-unswappable.
454 */
numa_clear_kernel_node_hotplug(void)455 static void __init numa_clear_kernel_node_hotplug(void)
456 {
457 nodemask_t reserved_nodemask = NODE_MASK_NONE;
458 struct memblock_region *mb_region;
459 int i;
460
461 /*
462 * We have to do some preprocessing of memblock regions, to
463 * make them suitable for reservation.
464 *
465 * At this time, all memory regions reserved by memblock are
466 * used by the kernel, but those regions are not split up
467 * along node boundaries yet, and don't necessarily have their
468 * node ID set yet either.
469 *
470 * So iterate over all memory known to the x86 architecture,
471 * and use those ranges to set the nid in memblock.reserved.
472 * This will split up the memblock regions along node
473 * boundaries and will set the node IDs as well.
474 */
475 for (i = 0; i < numa_meminfo.nr_blks; i++) {
476 struct numa_memblk *mb = numa_meminfo.blk + i;
477 int ret;
478
479 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
480 WARN_ON_ONCE(ret);
481 }
482
483 /*
484 * Now go over all reserved memblock regions, to construct a
485 * node mask of all kernel reserved memory areas.
486 *
487 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
488 * numa_meminfo might not include all memblock.reserved
489 * memory ranges, because quirks such as trim_snb_memory()
490 * reserve specific pages for Sandy Bridge graphics. ]
491 */
492 for_each_reserved_mem_region(mb_region) {
493 int nid = memblock_get_region_node(mb_region);
494
495 if (nid != NUMA_NO_NODE)
496 node_set(nid, reserved_nodemask);
497 }
498
499 /*
500 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
501 * belonging to the reserved node mask.
502 *
503 * Note that this will include memory regions that reside
504 * on nodes that contain kernel memory - entire nodes
505 * become hot-unpluggable:
506 */
507 for (i = 0; i < numa_meminfo.nr_blks; i++) {
508 struct numa_memblk *mb = numa_meminfo.blk + i;
509
510 if (!node_isset(mb->nid, reserved_nodemask))
511 continue;
512
513 memblock_clear_hotplug(mb->start, mb->end - mb->start);
514 }
515 }
516
numa_register_memblks(struct numa_meminfo * mi)517 static int __init numa_register_memblks(struct numa_meminfo *mi)
518 {
519 int i, nid;
520
521 /* Account for nodes with cpus and no memory */
522 node_possible_map = numa_nodes_parsed;
523 numa_nodemask_from_meminfo(&node_possible_map, mi);
524 if (WARN_ON(nodes_empty(node_possible_map)))
525 return -EINVAL;
526
527 for (i = 0; i < mi->nr_blks; i++) {
528 struct numa_memblk *mb = &mi->blk[i];
529 memblock_set_node(mb->start, mb->end - mb->start,
530 &memblock.memory, mb->nid);
531 }
532
533 /*
534 * At very early time, the kernel have to use some memory such as
535 * loading the kernel image. We cannot prevent this anyway. So any
536 * node the kernel resides in should be un-hotpluggable.
537 *
538 * And when we come here, alloc node data won't fail.
539 */
540 numa_clear_kernel_node_hotplug();
541
542 /*
543 * If sections array is gonna be used for pfn -> nid mapping, check
544 * whether its granularity is fine enough.
545 */
546 if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
547 unsigned long pfn_align = node_map_pfn_alignment();
548
549 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
550 pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
551 PFN_PHYS(pfn_align) >> 20,
552 PFN_PHYS(PAGES_PER_SECTION) >> 20);
553 return -EINVAL;
554 }
555 }
556
557 if (!memblock_validate_numa_coverage(SZ_1M))
558 return -EINVAL;
559
560 /* Finally register nodes. */
561 for_each_node_mask(nid, node_possible_map) {
562 u64 start = PFN_PHYS(max_pfn);
563 u64 end = 0;
564
565 for (i = 0; i < mi->nr_blks; i++) {
566 if (nid != mi->blk[i].nid)
567 continue;
568 start = min(mi->blk[i].start, start);
569 end = max(mi->blk[i].end, end);
570 }
571
572 if (start >= end)
573 continue;
574
575 alloc_node_data(nid);
576 }
577
578 /* Dump memblock with node info and return. */
579 memblock_dump_all();
580 return 0;
581 }
582
583 /*
584 * There are unfortunately some poorly designed mainboards around that
585 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
586 * mapping. To avoid this fill in the mapping for all possible CPUs,
587 * as the number of CPUs is not known yet. We round robin the existing
588 * nodes.
589 */
numa_init_array(void)590 static void __init numa_init_array(void)
591 {
592 int rr, i;
593
594 rr = first_node(node_online_map);
595 for (i = 0; i < nr_cpu_ids; i++) {
596 if (early_cpu_to_node(i) != NUMA_NO_NODE)
597 continue;
598 numa_set_node(i, rr);
599 rr = next_node_in(rr, node_online_map);
600 }
601 }
602
numa_init(int (* init_func)(void))603 static int __init numa_init(int (*init_func)(void))
604 {
605 int i;
606 int ret;
607
608 for (i = 0; i < MAX_LOCAL_APIC; i++)
609 set_apicid_to_node(i, NUMA_NO_NODE);
610
611 nodes_clear(numa_nodes_parsed);
612 nodes_clear(node_possible_map);
613 nodes_clear(node_online_map);
614 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
615 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
616 NUMA_NO_NODE));
617 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
618 NUMA_NO_NODE));
619 /* In case that parsing SRAT failed. */
620 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
621 numa_reset_distance();
622
623 ret = init_func();
624 if (ret < 0)
625 return ret;
626
627 /*
628 * We reset memblock back to the top-down direction
629 * here because if we configured ACPI_NUMA, we have
630 * parsed SRAT in init_func(). It is ok to have the
631 * reset here even if we did't configure ACPI_NUMA
632 * or acpi numa init fails and fallbacks to dummy
633 * numa init.
634 */
635 memblock_set_bottom_up(false);
636
637 ret = numa_cleanup_meminfo(&numa_meminfo);
638 if (ret < 0)
639 return ret;
640
641 numa_emulation(&numa_meminfo, numa_distance_cnt);
642
643 ret = numa_register_memblks(&numa_meminfo);
644 if (ret < 0)
645 return ret;
646
647 for (i = 0; i < nr_cpu_ids; i++) {
648 int nid = early_cpu_to_node(i);
649
650 if (nid == NUMA_NO_NODE)
651 continue;
652 if (!node_online(nid))
653 numa_clear_node(i);
654 }
655 numa_init_array();
656
657 return 0;
658 }
659
660 /**
661 * dummy_numa_init - Fallback dummy NUMA init
662 *
663 * Used if there's no underlying NUMA architecture, NUMA initialization
664 * fails, or NUMA is disabled on the command line.
665 *
666 * Must online at least one node and add memory blocks that cover all
667 * allowed memory. This function must not fail.
668 */
dummy_numa_init(void)669 static int __init dummy_numa_init(void)
670 {
671 printk(KERN_INFO "%s\n",
672 numa_off ? "NUMA turned off" : "No NUMA configuration found");
673 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
674 0LLU, PFN_PHYS(max_pfn) - 1);
675
676 node_set(0, numa_nodes_parsed);
677 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
678
679 return 0;
680 }
681
682 /**
683 * x86_numa_init - Initialize NUMA
684 *
685 * Try each configured NUMA initialization method until one succeeds. The
686 * last fallback is dummy single node config encompassing whole memory and
687 * never fails.
688 */
x86_numa_init(void)689 void __init x86_numa_init(void)
690 {
691 if (!numa_off) {
692 #ifdef CONFIG_ACPI_NUMA
693 if (!numa_init(x86_acpi_numa_init))
694 return;
695 #endif
696 #ifdef CONFIG_AMD_NUMA
697 if (!numa_init(amd_numa_init))
698 return;
699 #endif
700 }
701
702 numa_init(dummy_numa_init);
703 }
704
705
706 /*
707 * A node may exist which has one or more Generic Initiators but no CPUs and no
708 * memory.
709 *
710 * This function must be called after init_cpu_to_node(), to ensure that any
711 * memoryless CPU nodes have already been brought online, and before the
712 * node_data[nid] is needed for zone list setup in build_all_zonelists().
713 *
714 * When this function is called, any nodes containing either memory and/or CPUs
715 * will already be online and there is no need to do anything extra, even if
716 * they also contain one or more Generic Initiators.
717 */
init_gi_nodes(void)718 void __init init_gi_nodes(void)
719 {
720 int nid;
721
722 /*
723 * Exclude this node from
724 * bringup_nonboot_cpus
725 * cpu_up
726 * __try_online_node
727 * register_one_node
728 * because node_subsys is not initialized yet.
729 * TODO remove dependency on node_online
730 */
731 for_each_node_state(nid, N_GENERIC_INITIATOR)
732 if (!node_online(nid))
733 node_set_online(nid);
734 }
735
736 /*
737 * Setup early cpu_to_node.
738 *
739 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
740 * and apicid_to_node[] tables have valid entries for a CPU.
741 * This means we skip cpu_to_node[] initialisation for NUMA
742 * emulation and faking node case (when running a kernel compiled
743 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
744 * is already initialized in a round robin manner at numa_init_array,
745 * prior to this call, and this initialization is good enough
746 * for the fake NUMA cases.
747 *
748 * Called before the per_cpu areas are setup.
749 */
init_cpu_to_node(void)750 void __init init_cpu_to_node(void)
751 {
752 int cpu;
753 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
754
755 BUG_ON(cpu_to_apicid == NULL);
756
757 for_each_possible_cpu(cpu) {
758 int node = numa_cpu_node(cpu);
759
760 if (node == NUMA_NO_NODE)
761 continue;
762
763 /*
764 * Exclude this node from
765 * bringup_nonboot_cpus
766 * cpu_up
767 * __try_online_node
768 * register_one_node
769 * because node_subsys is not initialized yet.
770 * TODO remove dependency on node_online
771 */
772 if (!node_online(node))
773 node_set_online(node);
774
775 numa_set_node(cpu, node);
776 }
777 }
778
779 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
780
781 # ifndef CONFIG_NUMA_EMU
numa_add_cpu(int cpu)782 void numa_add_cpu(int cpu)
783 {
784 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
785 }
786
numa_remove_cpu(int cpu)787 void numa_remove_cpu(int cpu)
788 {
789 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
790 }
791 # endif /* !CONFIG_NUMA_EMU */
792
793 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
794
__cpu_to_node(int cpu)795 int __cpu_to_node(int cpu)
796 {
797 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
798 printk(KERN_WARNING
799 "cpu_to_node(%d): usage too early!\n", cpu);
800 dump_stack();
801 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
802 }
803 return per_cpu(x86_cpu_to_node_map, cpu);
804 }
805 EXPORT_SYMBOL(__cpu_to_node);
806
807 /*
808 * Same function as cpu_to_node() but used if called before the
809 * per_cpu areas are setup.
810 */
early_cpu_to_node(int cpu)811 int early_cpu_to_node(int cpu)
812 {
813 if (early_per_cpu_ptr(x86_cpu_to_node_map))
814 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
815
816 if (!cpu_possible(cpu)) {
817 printk(KERN_WARNING
818 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
819 dump_stack();
820 return NUMA_NO_NODE;
821 }
822 return per_cpu(x86_cpu_to_node_map, cpu);
823 }
824
debug_cpumask_set_cpu(int cpu,int node,bool enable)825 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
826 {
827 struct cpumask *mask;
828
829 if (node == NUMA_NO_NODE) {
830 /* early_cpu_to_node() already emits a warning and trace */
831 return;
832 }
833 mask = node_to_cpumask_map[node];
834 if (!cpumask_available(mask)) {
835 pr_err("node_to_cpumask_map[%i] NULL\n", node);
836 dump_stack();
837 return;
838 }
839
840 if (enable)
841 cpumask_set_cpu(cpu, mask);
842 else
843 cpumask_clear_cpu(cpu, mask);
844
845 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
846 enable ? "numa_add_cpu" : "numa_remove_cpu",
847 cpu, node, cpumask_pr_args(mask));
848 return;
849 }
850
851 # ifndef CONFIG_NUMA_EMU
numa_set_cpumask(int cpu,bool enable)852 static void numa_set_cpumask(int cpu, bool enable)
853 {
854 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
855 }
856
numa_add_cpu(int cpu)857 void numa_add_cpu(int cpu)
858 {
859 numa_set_cpumask(cpu, true);
860 }
861
numa_remove_cpu(int cpu)862 void numa_remove_cpu(int cpu)
863 {
864 numa_set_cpumask(cpu, false);
865 }
866 # endif /* !CONFIG_NUMA_EMU */
867
868 /*
869 * Returns a pointer to the bitmask of CPUs on Node 'node'.
870 */
cpumask_of_node(int node)871 const struct cpumask *cpumask_of_node(int node)
872 {
873 if ((unsigned)node >= nr_node_ids) {
874 printk(KERN_WARNING
875 "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
876 node, nr_node_ids);
877 dump_stack();
878 return cpu_none_mask;
879 }
880 if (!cpumask_available(node_to_cpumask_map[node])) {
881 printk(KERN_WARNING
882 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
883 node);
884 dump_stack();
885 return cpu_online_mask;
886 }
887 return node_to_cpumask_map[node];
888 }
889 EXPORT_SYMBOL(cpumask_of_node);
890
891 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
892
893 #ifdef CONFIG_NUMA_KEEP_MEMINFO
meminfo_to_nid(struct numa_meminfo * mi,u64 start)894 static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
895 {
896 int i;
897
898 for (i = 0; i < mi->nr_blks; i++)
899 if (mi->blk[i].start <= start && mi->blk[i].end > start)
900 return mi->blk[i].nid;
901 return NUMA_NO_NODE;
902 }
903
phys_to_target_node(phys_addr_t start)904 int phys_to_target_node(phys_addr_t start)
905 {
906 int nid = meminfo_to_nid(&numa_meminfo, start);
907
908 /*
909 * Prefer online nodes, but if reserved memory might be
910 * hot-added continue the search with reserved ranges.
911 */
912 if (nid != NUMA_NO_NODE)
913 return nid;
914
915 return meminfo_to_nid(&numa_reserved_meminfo, start);
916 }
917 EXPORT_SYMBOL_GPL(phys_to_target_node);
918
memory_add_physaddr_to_nid(u64 start)919 int memory_add_physaddr_to_nid(u64 start)
920 {
921 int nid = meminfo_to_nid(&numa_meminfo, start);
922
923 if (nid == NUMA_NO_NODE)
924 nid = numa_meminfo.blk[0].nid;
925 return nid;
926 }
927 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
928
929 #endif
930
cmp_memblk(const void * a,const void * b)931 static int __init cmp_memblk(const void *a, const void *b)
932 {
933 const struct numa_memblk *ma = *(const struct numa_memblk **)a;
934 const struct numa_memblk *mb = *(const struct numa_memblk **)b;
935
936 return (ma->start > mb->start) - (ma->start < mb->start);
937 }
938
939 static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
940
941 /**
942 * numa_fill_memblks - Fill gaps in numa_meminfo memblks
943 * @start: address to begin fill
944 * @end: address to end fill
945 *
946 * Find and extend numa_meminfo memblks to cover the physical
947 * address range @start-@end
948 *
949 * RETURNS:
950 * 0 : Success
951 * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
952 */
953
numa_fill_memblks(u64 start,u64 end)954 int __init numa_fill_memblks(u64 start, u64 end)
955 {
956 struct numa_memblk **blk = &numa_memblk_list[0];
957 struct numa_meminfo *mi = &numa_meminfo;
958 int count = 0;
959 u64 prev_end;
960
961 /*
962 * Create a list of pointers to numa_meminfo memblks that
963 * overlap start, end. The list is used to make in-place
964 * changes that fill out the numa_meminfo memblks.
965 */
966 for (int i = 0; i < mi->nr_blks; i++) {
967 struct numa_memblk *bi = &mi->blk[i];
968
969 if (memblock_addrs_overlap(start, end - start, bi->start,
970 bi->end - bi->start)) {
971 blk[count] = &mi->blk[i];
972 count++;
973 }
974 }
975 if (!count)
976 return NUMA_NO_MEMBLK;
977
978 /* Sort the list of pointers in memblk->start order */
979 sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
980
981 /* Make sure the first/last memblks include start/end */
982 blk[0]->start = min(blk[0]->start, start);
983 blk[count - 1]->end = max(blk[count - 1]->end, end);
984
985 /*
986 * Fill any gaps by tracking the previous memblks
987 * end address and backfilling to it if needed.
988 */
989 prev_end = blk[0]->end;
990 for (int i = 1; i < count; i++) {
991 struct numa_memblk *curr = blk[i];
992
993 if (prev_end >= curr->start) {
994 if (prev_end < curr->end)
995 prev_end = curr->end;
996 } else {
997 curr->start = prev_end;
998 prev_end = curr->end;
999 }
1000 }
1001 return 0;
1002 }
1003