xref: /openbmc/linux/arch/x86/mm/numa.c (revision 32a363d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Common code for 32 and 64-bit NUMA */
3 #include <linux/acpi.h>
4 #include <linux/kernel.h>
5 #include <linux/mm.h>
6 #include <linux/string.h>
7 #include <linux/init.h>
8 #include <linux/memblock.h>
9 #include <linux/mmzone.h>
10 #include <linux/ctype.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
14 
15 #include <asm/e820/api.h>
16 #include <asm/proto.h>
17 #include <asm/dma.h>
18 #include <asm/amd_nb.h>
19 
20 #include "numa_internal.h"
21 
22 int numa_off;
23 nodemask_t numa_nodes_parsed __initdata;
24 
25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26 EXPORT_SYMBOL(node_data);
27 
28 static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
29 static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
30 
31 static int numa_distance_cnt;
32 static u8 *numa_distance;
33 
34 static __init int numa_setup(char *opt)
35 {
36 	if (!opt)
37 		return -EINVAL;
38 	if (!strncmp(opt, "off", 3))
39 		numa_off = 1;
40 #ifdef CONFIG_NUMA_EMU
41 	if (!strncmp(opt, "fake=", 5))
42 		numa_emu_cmdline(opt + 5);
43 #endif
44 #ifdef CONFIG_ACPI_NUMA
45 	if (!strncmp(opt, "noacpi", 6))
46 		acpi_numa = -1;
47 #endif
48 	return 0;
49 }
50 early_param("numa", numa_setup);
51 
52 /*
53  * apicid, cpu, node mappings
54  */
55 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
56 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
57 };
58 
59 int numa_cpu_node(int cpu)
60 {
61 	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
62 
63 	if (apicid != BAD_APICID)
64 		return __apicid_to_node[apicid];
65 	return NUMA_NO_NODE;
66 }
67 
68 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
69 EXPORT_SYMBOL(node_to_cpumask_map);
70 
71 /*
72  * Map cpu index to node index
73  */
74 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
75 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
76 
77 void numa_set_node(int cpu, int node)
78 {
79 	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
80 
81 	/* early setting, no percpu area yet */
82 	if (cpu_to_node_map) {
83 		cpu_to_node_map[cpu] = node;
84 		return;
85 	}
86 
87 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
88 	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
89 		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
90 		dump_stack();
91 		return;
92 	}
93 #endif
94 	per_cpu(x86_cpu_to_node_map, cpu) = node;
95 
96 	set_cpu_numa_node(cpu, node);
97 }
98 
99 void numa_clear_node(int cpu)
100 {
101 	numa_set_node(cpu, NUMA_NO_NODE);
102 }
103 
104 /*
105  * Allocate node_to_cpumask_map based on number of available nodes
106  * Requires node_possible_map to be valid.
107  *
108  * Note: cpumask_of_node() is not valid until after this is done.
109  * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
110  */
111 void __init setup_node_to_cpumask_map(void)
112 {
113 	unsigned int node;
114 
115 	/* setup nr_node_ids if not done yet */
116 	if (nr_node_ids == MAX_NUMNODES)
117 		setup_nr_node_ids();
118 
119 	/* allocate the map */
120 	for (node = 0; node < nr_node_ids; node++)
121 		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
122 
123 	/* cpumask_of_node() will now work */
124 	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
125 }
126 
127 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
128 				     struct numa_meminfo *mi)
129 {
130 	/* ignore zero length blks */
131 	if (start == end)
132 		return 0;
133 
134 	/* whine about and ignore invalid blks */
135 	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
136 		pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
137 			nid, start, end - 1);
138 		return 0;
139 	}
140 
141 	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
142 		pr_err("too many memblk ranges\n");
143 		return -EINVAL;
144 	}
145 
146 	mi->blk[mi->nr_blks].start = start;
147 	mi->blk[mi->nr_blks].end = end;
148 	mi->blk[mi->nr_blks].nid = nid;
149 	mi->nr_blks++;
150 	return 0;
151 }
152 
153 /**
154  * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
155  * @idx: Index of memblk to remove
156  * @mi: numa_meminfo to remove memblk from
157  *
158  * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
159  * decrementing @mi->nr_blks.
160  */
161 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
162 {
163 	mi->nr_blks--;
164 	memmove(&mi->blk[idx], &mi->blk[idx + 1],
165 		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
166 }
167 
168 /**
169  * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
170  * @dst: numa_meminfo to append block to
171  * @idx: Index of memblk to remove
172  * @src: numa_meminfo to remove memblk from
173  */
174 static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
175 					 struct numa_meminfo *src)
176 {
177 	dst->blk[dst->nr_blks++] = src->blk[idx];
178 	numa_remove_memblk_from(idx, src);
179 }
180 
181 /**
182  * numa_add_memblk - Add one numa_memblk to numa_meminfo
183  * @nid: NUMA node ID of the new memblk
184  * @start: Start address of the new memblk
185  * @end: End address of the new memblk
186  *
187  * Add a new memblk to the default numa_meminfo.
188  *
189  * RETURNS:
190  * 0 on success, -errno on failure.
191  */
192 int __init numa_add_memblk(int nid, u64 start, u64 end)
193 {
194 	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
195 }
196 
197 /* Allocate NODE_DATA for a node on the local memory */
198 static void __init alloc_node_data(int nid)
199 {
200 	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
201 	u64 nd_pa;
202 	void *nd;
203 	int tnid;
204 
205 	/*
206 	 * Allocate node data.  Try node-local memory and then any node.
207 	 * Never allocate in DMA zone.
208 	 */
209 	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
210 	if (!nd_pa) {
211 		pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
212 		       nd_size, nid);
213 		return;
214 	}
215 	nd = __va(nd_pa);
216 
217 	/* report and initialize */
218 	printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
219 	       nd_pa, nd_pa + nd_size - 1);
220 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
221 	if (tnid != nid)
222 		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
223 
224 	node_data[nid] = nd;
225 	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
226 
227 	node_set_online(nid);
228 }
229 
230 /**
231  * numa_cleanup_meminfo - Cleanup a numa_meminfo
232  * @mi: numa_meminfo to clean up
233  *
234  * Sanitize @mi by merging and removing unnecessary memblks.  Also check for
235  * conflicts and clear unused memblks.
236  *
237  * RETURNS:
238  * 0 on success, -errno on failure.
239  */
240 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
241 {
242 	const u64 low = 0;
243 	const u64 high = PFN_PHYS(max_pfn);
244 	int i, j, k;
245 
246 	/* first, trim all entries */
247 	for (i = 0; i < mi->nr_blks; i++) {
248 		struct numa_memblk *bi = &mi->blk[i];
249 
250 		/* move / save reserved memory ranges */
251 		if (!memblock_overlaps_region(&memblock.memory,
252 					bi->start, bi->end - bi->start)) {
253 			numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
254 			continue;
255 		}
256 
257 		/* make sure all non-reserved blocks are inside the limits */
258 		bi->start = max(bi->start, low);
259 		bi->end = min(bi->end, high);
260 
261 		/* and there's no empty block */
262 		if (bi->start >= bi->end)
263 			numa_remove_memblk_from(i--, mi);
264 	}
265 
266 	/* merge neighboring / overlapping entries */
267 	for (i = 0; i < mi->nr_blks; i++) {
268 		struct numa_memblk *bi = &mi->blk[i];
269 
270 		for (j = i + 1; j < mi->nr_blks; j++) {
271 			struct numa_memblk *bj = &mi->blk[j];
272 			u64 start, end;
273 
274 			/*
275 			 * See whether there are overlapping blocks.  Whine
276 			 * about but allow overlaps of the same nid.  They
277 			 * will be merged below.
278 			 */
279 			if (bi->end > bj->start && bi->start < bj->end) {
280 				if (bi->nid != bj->nid) {
281 					pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
282 					       bi->nid, bi->start, bi->end - 1,
283 					       bj->nid, bj->start, bj->end - 1);
284 					return -EINVAL;
285 				}
286 				pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
287 					bi->nid, bi->start, bi->end - 1,
288 					bj->start, bj->end - 1);
289 			}
290 
291 			/*
292 			 * Join together blocks on the same node, holes
293 			 * between which don't overlap with memory on other
294 			 * nodes.
295 			 */
296 			if (bi->nid != bj->nid)
297 				continue;
298 			start = min(bi->start, bj->start);
299 			end = max(bi->end, bj->end);
300 			for (k = 0; k < mi->nr_blks; k++) {
301 				struct numa_memblk *bk = &mi->blk[k];
302 
303 				if (bi->nid == bk->nid)
304 					continue;
305 				if (start < bk->end && end > bk->start)
306 					break;
307 			}
308 			if (k < mi->nr_blks)
309 				continue;
310 			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
311 			       bi->nid, bi->start, bi->end - 1, bj->start,
312 			       bj->end - 1, start, end - 1);
313 			bi->start = start;
314 			bi->end = end;
315 			numa_remove_memblk_from(j--, mi);
316 		}
317 	}
318 
319 	/* clear unused ones */
320 	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
321 		mi->blk[i].start = mi->blk[i].end = 0;
322 		mi->blk[i].nid = NUMA_NO_NODE;
323 	}
324 
325 	return 0;
326 }
327 
328 /*
329  * Set nodes, which have memory in @mi, in *@nodemask.
330  */
331 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
332 					      const struct numa_meminfo *mi)
333 {
334 	int i;
335 
336 	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
337 		if (mi->blk[i].start != mi->blk[i].end &&
338 		    mi->blk[i].nid != NUMA_NO_NODE)
339 			node_set(mi->blk[i].nid, *nodemask);
340 }
341 
342 /**
343  * numa_reset_distance - Reset NUMA distance table
344  *
345  * The current table is freed.  The next numa_set_distance() call will
346  * create a new one.
347  */
348 void __init numa_reset_distance(void)
349 {
350 	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
351 
352 	/* numa_distance could be 1LU marking allocation failure, test cnt */
353 	if (numa_distance_cnt)
354 		memblock_free(__pa(numa_distance), size);
355 	numa_distance_cnt = 0;
356 	numa_distance = NULL;	/* enable table creation */
357 }
358 
359 static int __init numa_alloc_distance(void)
360 {
361 	nodemask_t nodes_parsed;
362 	size_t size;
363 	int i, j, cnt = 0;
364 	u64 phys;
365 
366 	/* size the new table and allocate it */
367 	nodes_parsed = numa_nodes_parsed;
368 	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
369 
370 	for_each_node_mask(i, nodes_parsed)
371 		cnt = i;
372 	cnt++;
373 	size = cnt * cnt * sizeof(numa_distance[0]);
374 
375 	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
376 				      size, PAGE_SIZE);
377 	if (!phys) {
378 		pr_warn("Warning: can't allocate distance table!\n");
379 		/* don't retry until explicitly reset */
380 		numa_distance = (void *)1LU;
381 		return -ENOMEM;
382 	}
383 	memblock_reserve(phys, size);
384 
385 	numa_distance = __va(phys);
386 	numa_distance_cnt = cnt;
387 
388 	/* fill with the default distances */
389 	for (i = 0; i < cnt; i++)
390 		for (j = 0; j < cnt; j++)
391 			numa_distance[i * cnt + j] = i == j ?
392 				LOCAL_DISTANCE : REMOTE_DISTANCE;
393 	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
394 
395 	return 0;
396 }
397 
398 /**
399  * numa_set_distance - Set NUMA distance from one NUMA to another
400  * @from: the 'from' node to set distance
401  * @to: the 'to'  node to set distance
402  * @distance: NUMA distance
403  *
404  * Set the distance from node @from to @to to @distance.  If distance table
405  * doesn't exist, one which is large enough to accommodate all the currently
406  * known nodes will be created.
407  *
408  * If such table cannot be allocated, a warning is printed and further
409  * calls are ignored until the distance table is reset with
410  * numa_reset_distance().
411  *
412  * If @from or @to is higher than the highest known node or lower than zero
413  * at the time of table creation or @distance doesn't make sense, the call
414  * is ignored.
415  * This is to allow simplification of specific NUMA config implementations.
416  */
417 void __init numa_set_distance(int from, int to, int distance)
418 {
419 	if (!numa_distance && numa_alloc_distance() < 0)
420 		return;
421 
422 	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
423 			from < 0 || to < 0) {
424 		pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
425 			     from, to, distance);
426 		return;
427 	}
428 
429 	if ((u8)distance != distance ||
430 	    (from == to && distance != LOCAL_DISTANCE)) {
431 		pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
432 			     from, to, distance);
433 		return;
434 	}
435 
436 	numa_distance[from * numa_distance_cnt + to] = distance;
437 }
438 
439 int __node_distance(int from, int to)
440 {
441 	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
442 		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
443 	return numa_distance[from * numa_distance_cnt + to];
444 }
445 EXPORT_SYMBOL(__node_distance);
446 
447 /*
448  * Sanity check to catch more bad NUMA configurations (they are amazingly
449  * common).  Make sure the nodes cover all memory.
450  */
451 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
452 {
453 	u64 numaram, e820ram;
454 	int i;
455 
456 	numaram = 0;
457 	for (i = 0; i < mi->nr_blks; i++) {
458 		u64 s = mi->blk[i].start >> PAGE_SHIFT;
459 		u64 e = mi->blk[i].end >> PAGE_SHIFT;
460 		numaram += e - s;
461 		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
462 		if ((s64)numaram < 0)
463 			numaram = 0;
464 	}
465 
466 	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
467 
468 	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
469 	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
470 		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
471 		       (numaram << PAGE_SHIFT) >> 20,
472 		       (e820ram << PAGE_SHIFT) >> 20);
473 		return false;
474 	}
475 	return true;
476 }
477 
478 /*
479  * Mark all currently memblock-reserved physical memory (which covers the
480  * kernel's own memory ranges) as hot-unswappable.
481  */
482 static void __init numa_clear_kernel_node_hotplug(void)
483 {
484 	nodemask_t reserved_nodemask = NODE_MASK_NONE;
485 	struct memblock_region *mb_region;
486 	int i;
487 
488 	/*
489 	 * We have to do some preprocessing of memblock regions, to
490 	 * make them suitable for reservation.
491 	 *
492 	 * At this time, all memory regions reserved by memblock are
493 	 * used by the kernel, but those regions are not split up
494 	 * along node boundaries yet, and don't necessarily have their
495 	 * node ID set yet either.
496 	 *
497 	 * So iterate over all memory known to the x86 architecture,
498 	 * and use those ranges to set the nid in memblock.reserved.
499 	 * This will split up the memblock regions along node
500 	 * boundaries and will set the node IDs as well.
501 	 */
502 	for (i = 0; i < numa_meminfo.nr_blks; i++) {
503 		struct numa_memblk *mb = numa_meminfo.blk + i;
504 		int ret;
505 
506 		ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
507 		WARN_ON_ONCE(ret);
508 	}
509 
510 	/*
511 	 * Now go over all reserved memblock regions, to construct a
512 	 * node mask of all kernel reserved memory areas.
513 	 *
514 	 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
515 	 *   numa_meminfo might not include all memblock.reserved
516 	 *   memory ranges, because quirks such as trim_snb_memory()
517 	 *   reserve specific pages for Sandy Bridge graphics. ]
518 	 */
519 	for_each_memblock(reserved, mb_region) {
520 		if (mb_region->nid != MAX_NUMNODES)
521 			node_set(mb_region->nid, reserved_nodemask);
522 	}
523 
524 	/*
525 	 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
526 	 * belonging to the reserved node mask.
527 	 *
528 	 * Note that this will include memory regions that reside
529 	 * on nodes that contain kernel memory - entire nodes
530 	 * become hot-unpluggable:
531 	 */
532 	for (i = 0; i < numa_meminfo.nr_blks; i++) {
533 		struct numa_memblk *mb = numa_meminfo.blk + i;
534 
535 		if (!node_isset(mb->nid, reserved_nodemask))
536 			continue;
537 
538 		memblock_clear_hotplug(mb->start, mb->end - mb->start);
539 	}
540 }
541 
542 static int __init numa_register_memblks(struct numa_meminfo *mi)
543 {
544 	unsigned long uninitialized_var(pfn_align);
545 	int i, nid;
546 
547 	/* Account for nodes with cpus and no memory */
548 	node_possible_map = numa_nodes_parsed;
549 	numa_nodemask_from_meminfo(&node_possible_map, mi);
550 	if (WARN_ON(nodes_empty(node_possible_map)))
551 		return -EINVAL;
552 
553 	for (i = 0; i < mi->nr_blks; i++) {
554 		struct numa_memblk *mb = &mi->blk[i];
555 		memblock_set_node(mb->start, mb->end - mb->start,
556 				  &memblock.memory, mb->nid);
557 	}
558 
559 	/*
560 	 * At very early time, the kernel have to use some memory such as
561 	 * loading the kernel image. We cannot prevent this anyway. So any
562 	 * node the kernel resides in should be un-hotpluggable.
563 	 *
564 	 * And when we come here, alloc node data won't fail.
565 	 */
566 	numa_clear_kernel_node_hotplug();
567 
568 	/*
569 	 * If sections array is gonna be used for pfn -> nid mapping, check
570 	 * whether its granularity is fine enough.
571 	 */
572 #ifdef NODE_NOT_IN_PAGE_FLAGS
573 	pfn_align = node_map_pfn_alignment();
574 	if (pfn_align && pfn_align < PAGES_PER_SECTION) {
575 		printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
576 		       PFN_PHYS(pfn_align) >> 20,
577 		       PFN_PHYS(PAGES_PER_SECTION) >> 20);
578 		return -EINVAL;
579 	}
580 #endif
581 	if (!numa_meminfo_cover_memory(mi))
582 		return -EINVAL;
583 
584 	/* Finally register nodes. */
585 	for_each_node_mask(nid, node_possible_map) {
586 		u64 start = PFN_PHYS(max_pfn);
587 		u64 end = 0;
588 
589 		for (i = 0; i < mi->nr_blks; i++) {
590 			if (nid != mi->blk[i].nid)
591 				continue;
592 			start = min(mi->blk[i].start, start);
593 			end = max(mi->blk[i].end, end);
594 		}
595 
596 		if (start >= end)
597 			continue;
598 
599 		/*
600 		 * Don't confuse VM with a node that doesn't have the
601 		 * minimum amount of memory:
602 		 */
603 		if (end && (end - start) < NODE_MIN_SIZE)
604 			continue;
605 
606 		alloc_node_data(nid);
607 	}
608 
609 	/* Dump memblock with node info and return. */
610 	memblock_dump_all();
611 	return 0;
612 }
613 
614 /*
615  * There are unfortunately some poorly designed mainboards around that
616  * only connect memory to a single CPU. This breaks the 1:1 cpu->node
617  * mapping. To avoid this fill in the mapping for all possible CPUs,
618  * as the number of CPUs is not known yet. We round robin the existing
619  * nodes.
620  */
621 static void __init numa_init_array(void)
622 {
623 	int rr, i;
624 
625 	rr = first_node(node_online_map);
626 	for (i = 0; i < nr_cpu_ids; i++) {
627 		if (early_cpu_to_node(i) != NUMA_NO_NODE)
628 			continue;
629 		numa_set_node(i, rr);
630 		rr = next_node_in(rr, node_online_map);
631 	}
632 }
633 
634 static int __init numa_init(int (*init_func)(void))
635 {
636 	int i;
637 	int ret;
638 
639 	for (i = 0; i < MAX_LOCAL_APIC; i++)
640 		set_apicid_to_node(i, NUMA_NO_NODE);
641 
642 	nodes_clear(numa_nodes_parsed);
643 	nodes_clear(node_possible_map);
644 	nodes_clear(node_online_map);
645 	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
646 	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
647 				  MAX_NUMNODES));
648 	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
649 				  MAX_NUMNODES));
650 	/* In case that parsing SRAT failed. */
651 	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
652 	numa_reset_distance();
653 
654 	ret = init_func();
655 	if (ret < 0)
656 		return ret;
657 
658 	/*
659 	 * We reset memblock back to the top-down direction
660 	 * here because if we configured ACPI_NUMA, we have
661 	 * parsed SRAT in init_func(). It is ok to have the
662 	 * reset here even if we did't configure ACPI_NUMA
663 	 * or acpi numa init fails and fallbacks to dummy
664 	 * numa init.
665 	 */
666 	memblock_set_bottom_up(false);
667 
668 	ret = numa_cleanup_meminfo(&numa_meminfo);
669 	if (ret < 0)
670 		return ret;
671 
672 	numa_emulation(&numa_meminfo, numa_distance_cnt);
673 
674 	ret = numa_register_memblks(&numa_meminfo);
675 	if (ret < 0)
676 		return ret;
677 
678 	for (i = 0; i < nr_cpu_ids; i++) {
679 		int nid = early_cpu_to_node(i);
680 
681 		if (nid == NUMA_NO_NODE)
682 			continue;
683 		if (!node_online(nid))
684 			numa_clear_node(i);
685 	}
686 	numa_init_array();
687 
688 	return 0;
689 }
690 
691 /**
692  * dummy_numa_init - Fallback dummy NUMA init
693  *
694  * Used if there's no underlying NUMA architecture, NUMA initialization
695  * fails, or NUMA is disabled on the command line.
696  *
697  * Must online at least one node and add memory blocks that cover all
698  * allowed memory.  This function must not fail.
699  */
700 static int __init dummy_numa_init(void)
701 {
702 	printk(KERN_INFO "%s\n",
703 	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
704 	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
705 	       0LLU, PFN_PHYS(max_pfn) - 1);
706 
707 	node_set(0, numa_nodes_parsed);
708 	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
709 
710 	return 0;
711 }
712 
713 /**
714  * x86_numa_init - Initialize NUMA
715  *
716  * Try each configured NUMA initialization method until one succeeds.  The
717  * last fallback is dummy single node config encompassing whole memory and
718  * never fails.
719  */
720 void __init x86_numa_init(void)
721 {
722 	if (!numa_off) {
723 #ifdef CONFIG_ACPI_NUMA
724 		if (!numa_init(x86_acpi_numa_init))
725 			return;
726 #endif
727 #ifdef CONFIG_AMD_NUMA
728 		if (!numa_init(amd_numa_init))
729 			return;
730 #endif
731 	}
732 
733 	numa_init(dummy_numa_init);
734 }
735 
736 static void __init init_memory_less_node(int nid)
737 {
738 	unsigned long zones_size[MAX_NR_ZONES] = {0};
739 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
740 
741 	/* Allocate and initialize node data. Memory-less node is now online.*/
742 	alloc_node_data(nid);
743 	free_area_init_node(nid, zones_size, 0, zholes_size);
744 
745 	/*
746 	 * All zonelists will be built later in start_kernel() after per cpu
747 	 * areas are initialized.
748 	 */
749 }
750 
751 /*
752  * Setup early cpu_to_node.
753  *
754  * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
755  * and apicid_to_node[] tables have valid entries for a CPU.
756  * This means we skip cpu_to_node[] initialisation for NUMA
757  * emulation and faking node case (when running a kernel compiled
758  * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
759  * is already initialized in a round robin manner at numa_init_array,
760  * prior to this call, and this initialization is good enough
761  * for the fake NUMA cases.
762  *
763  * Called before the per_cpu areas are setup.
764  */
765 void __init init_cpu_to_node(void)
766 {
767 	int cpu;
768 	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
769 
770 	BUG_ON(cpu_to_apicid == NULL);
771 
772 	for_each_possible_cpu(cpu) {
773 		int node = numa_cpu_node(cpu);
774 
775 		if (node == NUMA_NO_NODE)
776 			continue;
777 
778 		if (!node_online(node))
779 			init_memory_less_node(node);
780 
781 		numa_set_node(cpu, node);
782 	}
783 }
784 
785 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
786 
787 # ifndef CONFIG_NUMA_EMU
788 void numa_add_cpu(int cpu)
789 {
790 	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
791 }
792 
793 void numa_remove_cpu(int cpu)
794 {
795 	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
796 }
797 # endif	/* !CONFIG_NUMA_EMU */
798 
799 #else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
800 
801 int __cpu_to_node(int cpu)
802 {
803 	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
804 		printk(KERN_WARNING
805 			"cpu_to_node(%d): usage too early!\n", cpu);
806 		dump_stack();
807 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
808 	}
809 	return per_cpu(x86_cpu_to_node_map, cpu);
810 }
811 EXPORT_SYMBOL(__cpu_to_node);
812 
813 /*
814  * Same function as cpu_to_node() but used if called before the
815  * per_cpu areas are setup.
816  */
817 int early_cpu_to_node(int cpu)
818 {
819 	if (early_per_cpu_ptr(x86_cpu_to_node_map))
820 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
821 
822 	if (!cpu_possible(cpu)) {
823 		printk(KERN_WARNING
824 			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
825 		dump_stack();
826 		return NUMA_NO_NODE;
827 	}
828 	return per_cpu(x86_cpu_to_node_map, cpu);
829 }
830 
831 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
832 {
833 	struct cpumask *mask;
834 
835 	if (node == NUMA_NO_NODE) {
836 		/* early_cpu_to_node() already emits a warning and trace */
837 		return;
838 	}
839 	mask = node_to_cpumask_map[node];
840 	if (!mask) {
841 		pr_err("node_to_cpumask_map[%i] NULL\n", node);
842 		dump_stack();
843 		return;
844 	}
845 
846 	if (enable)
847 		cpumask_set_cpu(cpu, mask);
848 	else
849 		cpumask_clear_cpu(cpu, mask);
850 
851 	printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
852 		enable ? "numa_add_cpu" : "numa_remove_cpu",
853 		cpu, node, cpumask_pr_args(mask));
854 	return;
855 }
856 
857 # ifndef CONFIG_NUMA_EMU
858 static void numa_set_cpumask(int cpu, bool enable)
859 {
860 	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
861 }
862 
863 void numa_add_cpu(int cpu)
864 {
865 	numa_set_cpumask(cpu, true);
866 }
867 
868 void numa_remove_cpu(int cpu)
869 {
870 	numa_set_cpumask(cpu, false);
871 }
872 # endif	/* !CONFIG_NUMA_EMU */
873 
874 /*
875  * Returns a pointer to the bitmask of CPUs on Node 'node'.
876  */
877 const struct cpumask *cpumask_of_node(int node)
878 {
879 	if ((unsigned)node >= nr_node_ids) {
880 		printk(KERN_WARNING
881 			"cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
882 			node, nr_node_ids);
883 		dump_stack();
884 		return cpu_none_mask;
885 	}
886 	if (node_to_cpumask_map[node] == NULL) {
887 		printk(KERN_WARNING
888 			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
889 			node);
890 		dump_stack();
891 		return cpu_online_mask;
892 	}
893 	return node_to_cpumask_map[node];
894 }
895 EXPORT_SYMBOL(cpumask_of_node);
896 
897 #endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
898 
899 #ifdef CONFIG_NUMA_KEEP_MEMINFO
900 static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
901 {
902 	int i;
903 
904 	for (i = 0; i < mi->nr_blks; i++)
905 		if (mi->blk[i].start <= start && mi->blk[i].end > start)
906 			return mi->blk[i].nid;
907 	return NUMA_NO_NODE;
908 }
909 
910 int phys_to_target_node(phys_addr_t start)
911 {
912 	int nid = meminfo_to_nid(&numa_meminfo, start);
913 
914 	/*
915 	 * Prefer online nodes, but if reserved memory might be
916 	 * hot-added continue the search with reserved ranges.
917 	 */
918 	if (nid != NUMA_NO_NODE)
919 		return nid;
920 
921 	return meminfo_to_nid(&numa_reserved_meminfo, start);
922 }
923 EXPORT_SYMBOL_GPL(phys_to_target_node);
924 
925 int memory_add_physaddr_to_nid(u64 start)
926 {
927 	int nid = meminfo_to_nid(&numa_meminfo, start);
928 
929 	if (nid == NUMA_NO_NODE)
930 		nid = numa_meminfo.blk[0].nid;
931 	return nid;
932 }
933 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
934 #endif
935