xref: /openbmc/linux/arch/x86/mm/numa.c (revision b34e08d5)
1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/string.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/memblock.h>
8 #include <linux/mmzone.h>
9 #include <linux/ctype.h>
10 #include <linux/module.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
14 
15 #include <asm/e820.h>
16 #include <asm/proto.h>
17 #include <asm/dma.h>
18 #include <asm/acpi.h>
19 #include <asm/amd_nb.h>
20 
21 #include "numa_internal.h"
22 
23 int __initdata numa_off;
24 nodemask_t numa_nodes_parsed __initdata;
25 
26 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27 EXPORT_SYMBOL(node_data);
28 
29 static struct numa_meminfo numa_meminfo
30 #ifndef CONFIG_MEMORY_HOTPLUG
31 __initdata
32 #endif
33 ;
34 
35 static int numa_distance_cnt;
36 static u8 *numa_distance;
37 
38 static __init int numa_setup(char *opt)
39 {
40 	if (!opt)
41 		return -EINVAL;
42 	if (!strncmp(opt, "off", 3))
43 		numa_off = 1;
44 #ifdef CONFIG_NUMA_EMU
45 	if (!strncmp(opt, "fake=", 5))
46 		numa_emu_cmdline(opt + 5);
47 #endif
48 #ifdef CONFIG_ACPI_NUMA
49 	if (!strncmp(opt, "noacpi", 6))
50 		acpi_numa = -1;
51 #endif
52 	return 0;
53 }
54 early_param("numa", numa_setup);
55 
56 /*
57  * apicid, cpu, node mappings
58  */
59 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
60 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61 };
62 
63 int numa_cpu_node(int cpu)
64 {
65 	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66 
67 	if (apicid != BAD_APICID)
68 		return __apicid_to_node[apicid];
69 	return NUMA_NO_NODE;
70 }
71 
72 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
73 EXPORT_SYMBOL(node_to_cpumask_map);
74 
75 /*
76  * Map cpu index to node index
77  */
78 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
79 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
80 
81 void numa_set_node(int cpu, int node)
82 {
83 	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
84 
85 	/* early setting, no percpu area yet */
86 	if (cpu_to_node_map) {
87 		cpu_to_node_map[cpu] = node;
88 		return;
89 	}
90 
91 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
93 		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
94 		dump_stack();
95 		return;
96 	}
97 #endif
98 	per_cpu(x86_cpu_to_node_map, cpu) = node;
99 
100 	set_cpu_numa_node(cpu, node);
101 }
102 
103 void numa_clear_node(int cpu)
104 {
105 	numa_set_node(cpu, NUMA_NO_NODE);
106 }
107 
108 /*
109  * Allocate node_to_cpumask_map based on number of available nodes
110  * Requires node_possible_map to be valid.
111  *
112  * Note: cpumask_of_node() is not valid until after this is done.
113  * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
114  */
115 void __init setup_node_to_cpumask_map(void)
116 {
117 	unsigned int node;
118 
119 	/* setup nr_node_ids if not done yet */
120 	if (nr_node_ids == MAX_NUMNODES)
121 		setup_nr_node_ids();
122 
123 	/* allocate the map */
124 	for (node = 0; node < nr_node_ids; node++)
125 		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
126 
127 	/* cpumask_of_node() will now work */
128 	pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
129 }
130 
131 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
132 				     struct numa_meminfo *mi)
133 {
134 	/* ignore zero length blks */
135 	if (start == end)
136 		return 0;
137 
138 	/* whine about and ignore invalid blks */
139 	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
140 		pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
141 			   nid, start, end - 1);
142 		return 0;
143 	}
144 
145 	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
146 		pr_err("NUMA: too many memblk ranges\n");
147 		return -EINVAL;
148 	}
149 
150 	mi->blk[mi->nr_blks].start = start;
151 	mi->blk[mi->nr_blks].end = end;
152 	mi->blk[mi->nr_blks].nid = nid;
153 	mi->nr_blks++;
154 	return 0;
155 }
156 
157 /**
158  * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
159  * @idx: Index of memblk to remove
160  * @mi: numa_meminfo to remove memblk from
161  *
162  * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
163  * decrementing @mi->nr_blks.
164  */
165 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
166 {
167 	mi->nr_blks--;
168 	memmove(&mi->blk[idx], &mi->blk[idx + 1],
169 		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
170 }
171 
172 /**
173  * numa_add_memblk - Add one numa_memblk to numa_meminfo
174  * @nid: NUMA node ID of the new memblk
175  * @start: Start address of the new memblk
176  * @end: End address of the new memblk
177  *
178  * Add a new memblk to the default numa_meminfo.
179  *
180  * RETURNS:
181  * 0 on success, -errno on failure.
182  */
183 int __init numa_add_memblk(int nid, u64 start, u64 end)
184 {
185 	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
186 }
187 
188 /* Initialize NODE_DATA for a node on the local memory */
189 static void __init setup_node_data(int nid, u64 start, u64 end)
190 {
191 	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
192 	u64 nd_pa;
193 	void *nd;
194 	int tnid;
195 
196 	/*
197 	 * Don't confuse VM with a node that doesn't have the
198 	 * minimum amount of memory:
199 	 */
200 	if (end && (end - start) < NODE_MIN_SIZE)
201 		return;
202 
203 	start = roundup(start, ZONE_ALIGN);
204 
205 	printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
206 	       nid, start, end - 1);
207 
208 	/*
209 	 * Allocate node data.  Try node-local memory and then any node.
210 	 * Never allocate in DMA zone.
211 	 */
212 	nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
213 	if (!nd_pa) {
214 		nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
215 					      MEMBLOCK_ALLOC_ACCESSIBLE);
216 		if (!nd_pa) {
217 			pr_err("Cannot find %zu bytes in node %d\n",
218 			       nd_size, nid);
219 			return;
220 		}
221 	}
222 	nd = __va(nd_pa);
223 
224 	/* report and initialize */
225 	printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]\n",
226 	       nd_pa, nd_pa + nd_size - 1);
227 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
228 	if (tnid != nid)
229 		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
230 
231 	node_data[nid] = nd;
232 	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
233 	NODE_DATA(nid)->node_id = nid;
234 	NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
235 	NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
236 
237 	node_set_online(nid);
238 }
239 
240 /**
241  * numa_cleanup_meminfo - Cleanup a numa_meminfo
242  * @mi: numa_meminfo to clean up
243  *
244  * Sanitize @mi by merging and removing unncessary memblks.  Also check for
245  * conflicts and clear unused memblks.
246  *
247  * RETURNS:
248  * 0 on success, -errno on failure.
249  */
250 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
251 {
252 	const u64 low = 0;
253 	const u64 high = PFN_PHYS(max_pfn);
254 	int i, j, k;
255 
256 	/* first, trim all entries */
257 	for (i = 0; i < mi->nr_blks; i++) {
258 		struct numa_memblk *bi = &mi->blk[i];
259 
260 		/* make sure all blocks are inside the limits */
261 		bi->start = max(bi->start, low);
262 		bi->end = min(bi->end, high);
263 
264 		/* and there's no empty block */
265 		if (bi->start >= bi->end)
266 			numa_remove_memblk_from(i--, mi);
267 	}
268 
269 	/* merge neighboring / overlapping entries */
270 	for (i = 0; i < mi->nr_blks; i++) {
271 		struct numa_memblk *bi = &mi->blk[i];
272 
273 		for (j = i + 1; j < mi->nr_blks; j++) {
274 			struct numa_memblk *bj = &mi->blk[j];
275 			u64 start, end;
276 
277 			/*
278 			 * See whether there are overlapping blocks.  Whine
279 			 * about but allow overlaps of the same nid.  They
280 			 * will be merged below.
281 			 */
282 			if (bi->end > bj->start && bi->start < bj->end) {
283 				if (bi->nid != bj->nid) {
284 					pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
285 					       bi->nid, bi->start, bi->end - 1,
286 					       bj->nid, bj->start, bj->end - 1);
287 					return -EINVAL;
288 				}
289 				pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
290 					   bi->nid, bi->start, bi->end - 1,
291 					   bj->start, bj->end - 1);
292 			}
293 
294 			/*
295 			 * Join together blocks on the same node, holes
296 			 * between which don't overlap with memory on other
297 			 * nodes.
298 			 */
299 			if (bi->nid != bj->nid)
300 				continue;
301 			start = min(bi->start, bj->start);
302 			end = max(bi->end, bj->end);
303 			for (k = 0; k < mi->nr_blks; k++) {
304 				struct numa_memblk *bk = &mi->blk[k];
305 
306 				if (bi->nid == bk->nid)
307 					continue;
308 				if (start < bk->end && end > bk->start)
309 					break;
310 			}
311 			if (k < mi->nr_blks)
312 				continue;
313 			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
314 			       bi->nid, bi->start, bi->end - 1, bj->start,
315 			       bj->end - 1, start, end - 1);
316 			bi->start = start;
317 			bi->end = end;
318 			numa_remove_memblk_from(j--, mi);
319 		}
320 	}
321 
322 	/* clear unused ones */
323 	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
324 		mi->blk[i].start = mi->blk[i].end = 0;
325 		mi->blk[i].nid = NUMA_NO_NODE;
326 	}
327 
328 	return 0;
329 }
330 
331 /*
332  * Set nodes, which have memory in @mi, in *@nodemask.
333  */
334 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
335 					      const struct numa_meminfo *mi)
336 {
337 	int i;
338 
339 	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
340 		if (mi->blk[i].start != mi->blk[i].end &&
341 		    mi->blk[i].nid != NUMA_NO_NODE)
342 			node_set(mi->blk[i].nid, *nodemask);
343 }
344 
345 /**
346  * numa_reset_distance - Reset NUMA distance table
347  *
348  * The current table is freed.  The next numa_set_distance() call will
349  * create a new one.
350  */
351 void __init numa_reset_distance(void)
352 {
353 	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
354 
355 	/* numa_distance could be 1LU marking allocation failure, test cnt */
356 	if (numa_distance_cnt)
357 		memblock_free(__pa(numa_distance), size);
358 	numa_distance_cnt = 0;
359 	numa_distance = NULL;	/* enable table creation */
360 }
361 
362 static int __init numa_alloc_distance(void)
363 {
364 	nodemask_t nodes_parsed;
365 	size_t size;
366 	int i, j, cnt = 0;
367 	u64 phys;
368 
369 	/* size the new table and allocate it */
370 	nodes_parsed = numa_nodes_parsed;
371 	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
372 
373 	for_each_node_mask(i, nodes_parsed)
374 		cnt = i;
375 	cnt++;
376 	size = cnt * cnt * sizeof(numa_distance[0]);
377 
378 	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
379 				      size, PAGE_SIZE);
380 	if (!phys) {
381 		pr_warning("NUMA: Warning: can't allocate distance table!\n");
382 		/* don't retry until explicitly reset */
383 		numa_distance = (void *)1LU;
384 		return -ENOMEM;
385 	}
386 	memblock_reserve(phys, size);
387 
388 	numa_distance = __va(phys);
389 	numa_distance_cnt = cnt;
390 
391 	/* fill with the default distances */
392 	for (i = 0; i < cnt; i++)
393 		for (j = 0; j < cnt; j++)
394 			numa_distance[i * cnt + j] = i == j ?
395 				LOCAL_DISTANCE : REMOTE_DISTANCE;
396 	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
397 
398 	return 0;
399 }
400 
401 /**
402  * numa_set_distance - Set NUMA distance from one NUMA to another
403  * @from: the 'from' node to set distance
404  * @to: the 'to'  node to set distance
405  * @distance: NUMA distance
406  *
407  * Set the distance from node @from to @to to @distance.  If distance table
408  * doesn't exist, one which is large enough to accommodate all the currently
409  * known nodes will be created.
410  *
411  * If such table cannot be allocated, a warning is printed and further
412  * calls are ignored until the distance table is reset with
413  * numa_reset_distance().
414  *
415  * If @from or @to is higher than the highest known node or lower than zero
416  * at the time of table creation or @distance doesn't make sense, the call
417  * is ignored.
418  * This is to allow simplification of specific NUMA config implementations.
419  */
420 void __init numa_set_distance(int from, int to, int distance)
421 {
422 	if (!numa_distance && numa_alloc_distance() < 0)
423 		return;
424 
425 	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
426 			from < 0 || to < 0) {
427 		pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
428 			    from, to, distance);
429 		return;
430 	}
431 
432 	if ((u8)distance != distance ||
433 	    (from == to && distance != LOCAL_DISTANCE)) {
434 		pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
435 			     from, to, distance);
436 		return;
437 	}
438 
439 	numa_distance[from * numa_distance_cnt + to] = distance;
440 }
441 
442 int __node_distance(int from, int to)
443 {
444 	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
445 		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
446 	return numa_distance[from * numa_distance_cnt + to];
447 }
448 EXPORT_SYMBOL(__node_distance);
449 
450 /*
451  * Sanity check to catch more bad NUMA configurations (they are amazingly
452  * common).  Make sure the nodes cover all memory.
453  */
454 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
455 {
456 	u64 numaram, e820ram;
457 	int i;
458 
459 	numaram = 0;
460 	for (i = 0; i < mi->nr_blks; i++) {
461 		u64 s = mi->blk[i].start >> PAGE_SHIFT;
462 		u64 e = mi->blk[i].end >> PAGE_SHIFT;
463 		numaram += e - s;
464 		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
465 		if ((s64)numaram < 0)
466 			numaram = 0;
467 	}
468 
469 	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
470 
471 	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
472 	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
473 		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
474 		       (numaram << PAGE_SHIFT) >> 20,
475 		       (e820ram << PAGE_SHIFT) >> 20);
476 		return false;
477 	}
478 	return true;
479 }
480 
481 static int __init numa_register_memblks(struct numa_meminfo *mi)
482 {
483 	unsigned long uninitialized_var(pfn_align);
484 	int i, nid;
485 
486 	/* Account for nodes with cpus and no memory */
487 	node_possible_map = numa_nodes_parsed;
488 	numa_nodemask_from_meminfo(&node_possible_map, mi);
489 	if (WARN_ON(nodes_empty(node_possible_map)))
490 		return -EINVAL;
491 
492 	for (i = 0; i < mi->nr_blks; i++) {
493 		struct numa_memblk *mb = &mi->blk[i];
494 		memblock_set_node(mb->start, mb->end - mb->start,
495 				  &memblock.memory, mb->nid);
496 	}
497 
498 	/*
499 	 * If sections array is gonna be used for pfn -> nid mapping, check
500 	 * whether its granularity is fine enough.
501 	 */
502 #ifdef NODE_NOT_IN_PAGE_FLAGS
503 	pfn_align = node_map_pfn_alignment();
504 	if (pfn_align && pfn_align < PAGES_PER_SECTION) {
505 		printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
506 		       PFN_PHYS(pfn_align) >> 20,
507 		       PFN_PHYS(PAGES_PER_SECTION) >> 20);
508 		return -EINVAL;
509 	}
510 #endif
511 	if (!numa_meminfo_cover_memory(mi))
512 		return -EINVAL;
513 
514 	/* Finally register nodes. */
515 	for_each_node_mask(nid, node_possible_map) {
516 		u64 start = PFN_PHYS(max_pfn);
517 		u64 end = 0;
518 
519 		for (i = 0; i < mi->nr_blks; i++) {
520 			if (nid != mi->blk[i].nid)
521 				continue;
522 			start = min(mi->blk[i].start, start);
523 			end = max(mi->blk[i].end, end);
524 		}
525 
526 		if (start < end)
527 			setup_node_data(nid, start, end);
528 	}
529 
530 	/* Dump memblock with node info and return. */
531 	memblock_dump_all();
532 	return 0;
533 }
534 
535 /*
536  * There are unfortunately some poorly designed mainboards around that
537  * only connect memory to a single CPU. This breaks the 1:1 cpu->node
538  * mapping. To avoid this fill in the mapping for all possible CPUs,
539  * as the number of CPUs is not known yet. We round robin the existing
540  * nodes.
541  */
542 static void __init numa_init_array(void)
543 {
544 	int rr, i;
545 
546 	rr = first_node(node_online_map);
547 	for (i = 0; i < nr_cpu_ids; i++) {
548 		if (early_cpu_to_node(i) != NUMA_NO_NODE)
549 			continue;
550 		numa_set_node(i, rr);
551 		rr = next_node(rr, node_online_map);
552 		if (rr == MAX_NUMNODES)
553 			rr = first_node(node_online_map);
554 	}
555 }
556 
557 static void __init numa_clear_kernel_node_hotplug(void)
558 {
559 	int i, nid;
560 	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
561 	unsigned long start, end;
562 	struct memblock_type *type = &memblock.reserved;
563 
564 	/*
565 	 * At this time, all memory regions reserved by memblock are
566 	 * used by the kernel. Set the nid in memblock.reserved will
567 	 * mark out all the nodes the kernel resides in.
568 	 */
569 	for (i = 0; i < numa_meminfo.nr_blks; i++) {
570 		struct numa_memblk *mb = &numa_meminfo.blk[i];
571 		memblock_set_node(mb->start, mb->end - mb->start,
572 				  &memblock.reserved, mb->nid);
573 	}
574 
575 	/* Mark all kernel nodes. */
576 	for (i = 0; i < type->cnt; i++)
577 		node_set(type->regions[i].nid, numa_kernel_nodes);
578 
579 	/* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
580 	for (i = 0; i < numa_meminfo.nr_blks; i++) {
581 		nid = numa_meminfo.blk[i].nid;
582 		if (!node_isset(nid, numa_kernel_nodes))
583 			continue;
584 
585 		start = numa_meminfo.blk[i].start;
586 		end = numa_meminfo.blk[i].end;
587 
588 		memblock_clear_hotplug(start, end - start);
589 	}
590 }
591 
592 static int __init numa_init(int (*init_func)(void))
593 {
594 	int i;
595 	int ret;
596 
597 	for (i = 0; i < MAX_LOCAL_APIC; i++)
598 		set_apicid_to_node(i, NUMA_NO_NODE);
599 
600 	nodes_clear(numa_nodes_parsed);
601 	nodes_clear(node_possible_map);
602 	nodes_clear(node_online_map);
603 	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
604 	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
605 				  MAX_NUMNODES));
606 	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
607 				  MAX_NUMNODES));
608 	/* In case that parsing SRAT failed. */
609 	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
610 	numa_reset_distance();
611 
612 	ret = init_func();
613 	if (ret < 0)
614 		return ret;
615 
616 	/*
617 	 * We reset memblock back to the top-down direction
618 	 * here because if we configured ACPI_NUMA, we have
619 	 * parsed SRAT in init_func(). It is ok to have the
620 	 * reset here even if we did't configure ACPI_NUMA
621 	 * or acpi numa init fails and fallbacks to dummy
622 	 * numa init.
623 	 */
624 	memblock_set_bottom_up(false);
625 
626 	ret = numa_cleanup_meminfo(&numa_meminfo);
627 	if (ret < 0)
628 		return ret;
629 
630 	numa_emulation(&numa_meminfo, numa_distance_cnt);
631 
632 	ret = numa_register_memblks(&numa_meminfo);
633 	if (ret < 0)
634 		return ret;
635 
636 	for (i = 0; i < nr_cpu_ids; i++) {
637 		int nid = early_cpu_to_node(i);
638 
639 		if (nid == NUMA_NO_NODE)
640 			continue;
641 		if (!node_online(nid))
642 			numa_clear_node(i);
643 	}
644 	numa_init_array();
645 
646 	/*
647 	 * At very early time, the kernel have to use some memory such as
648 	 * loading the kernel image. We cannot prevent this anyway. So any
649 	 * node the kernel resides in should be un-hotpluggable.
650 	 *
651 	 * And when we come here, numa_init() won't fail.
652 	 */
653 	numa_clear_kernel_node_hotplug();
654 
655 	return 0;
656 }
657 
658 /**
659  * dummy_numa_init - Fallback dummy NUMA init
660  *
661  * Used if there's no underlying NUMA architecture, NUMA initialization
662  * fails, or NUMA is disabled on the command line.
663  *
664  * Must online at least one node and add memory blocks that cover all
665  * allowed memory.  This function must not fail.
666  */
667 static int __init dummy_numa_init(void)
668 {
669 	printk(KERN_INFO "%s\n",
670 	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
671 	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
672 	       0LLU, PFN_PHYS(max_pfn) - 1);
673 
674 	node_set(0, numa_nodes_parsed);
675 	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
676 
677 	return 0;
678 }
679 
680 /**
681  * x86_numa_init - Initialize NUMA
682  *
683  * Try each configured NUMA initialization method until one succeeds.  The
684  * last fallback is dummy single node config encomapssing whole memory and
685  * never fails.
686  */
687 void __init x86_numa_init(void)
688 {
689 	if (!numa_off) {
690 #ifdef CONFIG_ACPI_NUMA
691 		if (!numa_init(x86_acpi_numa_init))
692 			return;
693 #endif
694 #ifdef CONFIG_AMD_NUMA
695 		if (!numa_init(amd_numa_init))
696 			return;
697 #endif
698 	}
699 
700 	numa_init(dummy_numa_init);
701 }
702 
703 static __init int find_near_online_node(int node)
704 {
705 	int n, val;
706 	int min_val = INT_MAX;
707 	int best_node = -1;
708 
709 	for_each_online_node(n) {
710 		val = node_distance(node, n);
711 
712 		if (val < min_val) {
713 			min_val = val;
714 			best_node = n;
715 		}
716 	}
717 
718 	return best_node;
719 }
720 
721 /*
722  * Setup early cpu_to_node.
723  *
724  * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
725  * and apicid_to_node[] tables have valid entries for a CPU.
726  * This means we skip cpu_to_node[] initialisation for NUMA
727  * emulation and faking node case (when running a kernel compiled
728  * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
729  * is already initialized in a round robin manner at numa_init_array,
730  * prior to this call, and this initialization is good enough
731  * for the fake NUMA cases.
732  *
733  * Called before the per_cpu areas are setup.
734  */
735 void __init init_cpu_to_node(void)
736 {
737 	int cpu;
738 	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
739 
740 	BUG_ON(cpu_to_apicid == NULL);
741 
742 	for_each_possible_cpu(cpu) {
743 		int node = numa_cpu_node(cpu);
744 
745 		if (node == NUMA_NO_NODE)
746 			continue;
747 		if (!node_online(node))
748 			node = find_near_online_node(node);
749 		numa_set_node(cpu, node);
750 	}
751 }
752 
753 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
754 
755 # ifndef CONFIG_NUMA_EMU
756 void numa_add_cpu(int cpu)
757 {
758 	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
759 }
760 
761 void numa_remove_cpu(int cpu)
762 {
763 	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
764 }
765 # endif	/* !CONFIG_NUMA_EMU */
766 
767 #else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
768 
769 int __cpu_to_node(int cpu)
770 {
771 	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
772 		printk(KERN_WARNING
773 			"cpu_to_node(%d): usage too early!\n", cpu);
774 		dump_stack();
775 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
776 	}
777 	return per_cpu(x86_cpu_to_node_map, cpu);
778 }
779 EXPORT_SYMBOL(__cpu_to_node);
780 
781 /*
782  * Same function as cpu_to_node() but used if called before the
783  * per_cpu areas are setup.
784  */
785 int early_cpu_to_node(int cpu)
786 {
787 	if (early_per_cpu_ptr(x86_cpu_to_node_map))
788 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
789 
790 	if (!cpu_possible(cpu)) {
791 		printk(KERN_WARNING
792 			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
793 		dump_stack();
794 		return NUMA_NO_NODE;
795 	}
796 	return per_cpu(x86_cpu_to_node_map, cpu);
797 }
798 
799 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
800 {
801 	struct cpumask *mask;
802 	char buf[64];
803 
804 	if (node == NUMA_NO_NODE) {
805 		/* early_cpu_to_node() already emits a warning and trace */
806 		return;
807 	}
808 	mask = node_to_cpumask_map[node];
809 	if (!mask) {
810 		pr_err("node_to_cpumask_map[%i] NULL\n", node);
811 		dump_stack();
812 		return;
813 	}
814 
815 	if (enable)
816 		cpumask_set_cpu(cpu, mask);
817 	else
818 		cpumask_clear_cpu(cpu, mask);
819 
820 	cpulist_scnprintf(buf, sizeof(buf), mask);
821 	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
822 		enable ? "numa_add_cpu" : "numa_remove_cpu",
823 		cpu, node, buf);
824 	return;
825 }
826 
827 # ifndef CONFIG_NUMA_EMU
828 static void numa_set_cpumask(int cpu, bool enable)
829 {
830 	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
831 }
832 
833 void numa_add_cpu(int cpu)
834 {
835 	numa_set_cpumask(cpu, true);
836 }
837 
838 void numa_remove_cpu(int cpu)
839 {
840 	numa_set_cpumask(cpu, false);
841 }
842 # endif	/* !CONFIG_NUMA_EMU */
843 
844 /*
845  * Returns a pointer to the bitmask of CPUs on Node 'node'.
846  */
847 const struct cpumask *cpumask_of_node(int node)
848 {
849 	if (node >= nr_node_ids) {
850 		printk(KERN_WARNING
851 			"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
852 			node, nr_node_ids);
853 		dump_stack();
854 		return cpu_none_mask;
855 	}
856 	if (node_to_cpumask_map[node] == NULL) {
857 		printk(KERN_WARNING
858 			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
859 			node);
860 		dump_stack();
861 		return cpu_online_mask;
862 	}
863 	return node_to_cpumask_map[node];
864 }
865 EXPORT_SYMBOL(cpumask_of_node);
866 
867 #endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
868 
869 #ifdef CONFIG_MEMORY_HOTPLUG
870 int memory_add_physaddr_to_nid(u64 start)
871 {
872 	struct numa_meminfo *mi = &numa_meminfo;
873 	int nid = mi->blk[0].nid;
874 	int i;
875 
876 	for (i = 0; i < mi->nr_blks; i++)
877 		if (mi->blk[i].start <= start && mi->blk[i].end > start)
878 			nid = mi->blk[i].nid;
879 	return nid;
880 }
881 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
882 #endif
883