xref: /openbmc/linux/arch/x86/mm/numa.c (revision 97da55fc)
1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/string.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/memblock.h>
8 #include <linux/mmzone.h>
9 #include <linux/ctype.h>
10 #include <linux/module.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
14 
15 #include <asm/e820.h>
16 #include <asm/proto.h>
17 #include <asm/dma.h>
18 #include <asm/acpi.h>
19 #include <asm/amd_nb.h>
20 
21 #include "numa_internal.h"
22 
23 int __initdata numa_off;
24 nodemask_t numa_nodes_parsed __initdata;
25 
26 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27 EXPORT_SYMBOL(node_data);
28 
29 static struct numa_meminfo numa_meminfo
30 #ifndef CONFIG_MEMORY_HOTPLUG
31 __initdata
32 #endif
33 ;
34 
35 static int numa_distance_cnt;
36 static u8 *numa_distance;
37 
38 static __init int numa_setup(char *opt)
39 {
40 	if (!opt)
41 		return -EINVAL;
42 	if (!strncmp(opt, "off", 3))
43 		numa_off = 1;
44 #ifdef CONFIG_NUMA_EMU
45 	if (!strncmp(opt, "fake=", 5))
46 		numa_emu_cmdline(opt + 5);
47 #endif
48 #ifdef CONFIG_ACPI_NUMA
49 	if (!strncmp(opt, "noacpi", 6))
50 		acpi_numa = -1;
51 #endif
52 	return 0;
53 }
54 early_param("numa", numa_setup);
55 
56 /*
57  * apicid, cpu, node mappings
58  */
59 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
60 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61 };
62 
63 int __cpuinit numa_cpu_node(int cpu)
64 {
65 	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66 
67 	if (apicid != BAD_APICID)
68 		return __apicid_to_node[apicid];
69 	return NUMA_NO_NODE;
70 }
71 
72 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
73 EXPORT_SYMBOL(node_to_cpumask_map);
74 
75 /*
76  * Map cpu index to node index
77  */
78 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
79 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
80 
81 void numa_set_node(int cpu, int node)
82 {
83 	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
84 
85 	/* early setting, no percpu area yet */
86 	if (cpu_to_node_map) {
87 		cpu_to_node_map[cpu] = node;
88 		return;
89 	}
90 
91 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
93 		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
94 		dump_stack();
95 		return;
96 	}
97 #endif
98 	per_cpu(x86_cpu_to_node_map, cpu) = node;
99 
100 	set_cpu_numa_node(cpu, node);
101 }
102 
103 void numa_clear_node(int cpu)
104 {
105 	numa_set_node(cpu, NUMA_NO_NODE);
106 }
107 
108 /*
109  * Allocate node_to_cpumask_map based on number of available nodes
110  * Requires node_possible_map to be valid.
111  *
112  * Note: cpumask_of_node() is not valid until after this is done.
113  * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
114  */
115 void __init setup_node_to_cpumask_map(void)
116 {
117 	unsigned int node, num = 0;
118 
119 	/* setup nr_node_ids if not done yet */
120 	if (nr_node_ids == MAX_NUMNODES) {
121 		for_each_node_mask(node, node_possible_map)
122 			num = node;
123 		nr_node_ids = num + 1;
124 	}
125 
126 	/* allocate the map */
127 	for (node = 0; node < nr_node_ids; node++)
128 		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
129 
130 	/* cpumask_of_node() will now work */
131 	pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
132 }
133 
134 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
135 				     struct numa_meminfo *mi)
136 {
137 	/* ignore zero length blks */
138 	if (start == end)
139 		return 0;
140 
141 	/* whine about and ignore invalid blks */
142 	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
143 		pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
144 			   nid, start, end - 1);
145 		return 0;
146 	}
147 
148 	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
149 		pr_err("NUMA: too many memblk ranges\n");
150 		return -EINVAL;
151 	}
152 
153 	mi->blk[mi->nr_blks].start = start;
154 	mi->blk[mi->nr_blks].end = end;
155 	mi->blk[mi->nr_blks].nid = nid;
156 	mi->nr_blks++;
157 	return 0;
158 }
159 
160 /**
161  * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
162  * @idx: Index of memblk to remove
163  * @mi: numa_meminfo to remove memblk from
164  *
165  * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
166  * decrementing @mi->nr_blks.
167  */
168 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
169 {
170 	mi->nr_blks--;
171 	memmove(&mi->blk[idx], &mi->blk[idx + 1],
172 		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
173 }
174 
175 /**
176  * numa_add_memblk - Add one numa_memblk to numa_meminfo
177  * @nid: NUMA node ID of the new memblk
178  * @start: Start address of the new memblk
179  * @end: End address of the new memblk
180  *
181  * Add a new memblk to the default numa_meminfo.
182  *
183  * RETURNS:
184  * 0 on success, -errno on failure.
185  */
186 int __init numa_add_memblk(int nid, u64 start, u64 end)
187 {
188 	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
189 }
190 
191 /* Initialize NODE_DATA for a node on the local memory */
192 static void __init setup_node_data(int nid, u64 start, u64 end)
193 {
194 	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
195 	u64 nd_pa;
196 	void *nd;
197 	int tnid;
198 
199 	/*
200 	 * Don't confuse VM with a node that doesn't have the
201 	 * minimum amount of memory:
202 	 */
203 	if (end && (end - start) < NODE_MIN_SIZE)
204 		return;
205 
206 	start = roundup(start, ZONE_ALIGN);
207 
208 	printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
209 	       nid, start, end - 1);
210 
211 	/*
212 	 * Allocate node data.  Try node-local memory and then any node.
213 	 * Never allocate in DMA zone.
214 	 */
215 	nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
216 	if (!nd_pa) {
217 		pr_err("Cannot find %zu bytes in node %d\n",
218 		       nd_size, nid);
219 		return;
220 	}
221 	nd = __va(nd_pa);
222 
223 	/* report and initialize */
224 	printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]\n",
225 	       nd_pa, nd_pa + nd_size - 1);
226 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
227 	if (tnid != nid)
228 		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
229 
230 	node_data[nid] = nd;
231 	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
232 	NODE_DATA(nid)->node_id = nid;
233 	NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
234 	NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
235 
236 	node_set_online(nid);
237 }
238 
239 /**
240  * numa_cleanup_meminfo - Cleanup a numa_meminfo
241  * @mi: numa_meminfo to clean up
242  *
243  * Sanitize @mi by merging and removing unncessary memblks.  Also check for
244  * conflicts and clear unused memblks.
245  *
246  * RETURNS:
247  * 0 on success, -errno on failure.
248  */
249 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
250 {
251 	const u64 low = 0;
252 	const u64 high = PFN_PHYS(max_pfn);
253 	int i, j, k;
254 
255 	/* first, trim all entries */
256 	for (i = 0; i < mi->nr_blks; i++) {
257 		struct numa_memblk *bi = &mi->blk[i];
258 
259 		/* make sure all blocks are inside the limits */
260 		bi->start = max(bi->start, low);
261 		bi->end = min(bi->end, high);
262 
263 		/* and there's no empty block */
264 		if (bi->start >= bi->end)
265 			numa_remove_memblk_from(i--, mi);
266 	}
267 
268 	/* merge neighboring / overlapping entries */
269 	for (i = 0; i < mi->nr_blks; i++) {
270 		struct numa_memblk *bi = &mi->blk[i];
271 
272 		for (j = i + 1; j < mi->nr_blks; j++) {
273 			struct numa_memblk *bj = &mi->blk[j];
274 			u64 start, end;
275 
276 			/*
277 			 * See whether there are overlapping blocks.  Whine
278 			 * about but allow overlaps of the same nid.  They
279 			 * will be merged below.
280 			 */
281 			if (bi->end > bj->start && bi->start < bj->end) {
282 				if (bi->nid != bj->nid) {
283 					pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
284 					       bi->nid, bi->start, bi->end - 1,
285 					       bj->nid, bj->start, bj->end - 1);
286 					return -EINVAL;
287 				}
288 				pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
289 					   bi->nid, bi->start, bi->end - 1,
290 					   bj->start, bj->end - 1);
291 			}
292 
293 			/*
294 			 * Join together blocks on the same node, holes
295 			 * between which don't overlap with memory on other
296 			 * nodes.
297 			 */
298 			if (bi->nid != bj->nid)
299 				continue;
300 			start = min(bi->start, bj->start);
301 			end = max(bi->end, bj->end);
302 			for (k = 0; k < mi->nr_blks; k++) {
303 				struct numa_memblk *bk = &mi->blk[k];
304 
305 				if (bi->nid == bk->nid)
306 					continue;
307 				if (start < bk->end && end > bk->start)
308 					break;
309 			}
310 			if (k < mi->nr_blks)
311 				continue;
312 			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
313 			       bi->nid, bi->start, bi->end - 1, bj->start,
314 			       bj->end - 1, start, end - 1);
315 			bi->start = start;
316 			bi->end = end;
317 			numa_remove_memblk_from(j--, mi);
318 		}
319 	}
320 
321 	/* clear unused ones */
322 	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
323 		mi->blk[i].start = mi->blk[i].end = 0;
324 		mi->blk[i].nid = NUMA_NO_NODE;
325 	}
326 
327 	return 0;
328 }
329 
330 /*
331  * Set nodes, which have memory in @mi, in *@nodemask.
332  */
333 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
334 					      const struct numa_meminfo *mi)
335 {
336 	int i;
337 
338 	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
339 		if (mi->blk[i].start != mi->blk[i].end &&
340 		    mi->blk[i].nid != NUMA_NO_NODE)
341 			node_set(mi->blk[i].nid, *nodemask);
342 }
343 
344 /**
345  * numa_reset_distance - Reset NUMA distance table
346  *
347  * The current table is freed.  The next numa_set_distance() call will
348  * create a new one.
349  */
350 void __init numa_reset_distance(void)
351 {
352 	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
353 
354 	/* numa_distance could be 1LU marking allocation failure, test cnt */
355 	if (numa_distance_cnt)
356 		memblock_free(__pa(numa_distance), size);
357 	numa_distance_cnt = 0;
358 	numa_distance = NULL;	/* enable table creation */
359 }
360 
361 static int __init numa_alloc_distance(void)
362 {
363 	nodemask_t nodes_parsed;
364 	size_t size;
365 	int i, j, cnt = 0;
366 	u64 phys;
367 
368 	/* size the new table and allocate it */
369 	nodes_parsed = numa_nodes_parsed;
370 	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
371 
372 	for_each_node_mask(i, nodes_parsed)
373 		cnt = i;
374 	cnt++;
375 	size = cnt * cnt * sizeof(numa_distance[0]);
376 
377 	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
378 				      size, PAGE_SIZE);
379 	if (!phys) {
380 		pr_warning("NUMA: Warning: can't allocate distance table!\n");
381 		/* don't retry until explicitly reset */
382 		numa_distance = (void *)1LU;
383 		return -ENOMEM;
384 	}
385 	memblock_reserve(phys, size);
386 
387 	numa_distance = __va(phys);
388 	numa_distance_cnt = cnt;
389 
390 	/* fill with the default distances */
391 	for (i = 0; i < cnt; i++)
392 		for (j = 0; j < cnt; j++)
393 			numa_distance[i * cnt + j] = i == j ?
394 				LOCAL_DISTANCE : REMOTE_DISTANCE;
395 	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
396 
397 	return 0;
398 }
399 
400 /**
401  * numa_set_distance - Set NUMA distance from one NUMA to another
402  * @from: the 'from' node to set distance
403  * @to: the 'to'  node to set distance
404  * @distance: NUMA distance
405  *
406  * Set the distance from node @from to @to to @distance.  If distance table
407  * doesn't exist, one which is large enough to accommodate all the currently
408  * known nodes will be created.
409  *
410  * If such table cannot be allocated, a warning is printed and further
411  * calls are ignored until the distance table is reset with
412  * numa_reset_distance().
413  *
414  * If @from or @to is higher than the highest known node or lower than zero
415  * at the time of table creation or @distance doesn't make sense, the call
416  * is ignored.
417  * This is to allow simplification of specific NUMA config implementations.
418  */
419 void __init numa_set_distance(int from, int to, int distance)
420 {
421 	if (!numa_distance && numa_alloc_distance() < 0)
422 		return;
423 
424 	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
425 			from < 0 || to < 0) {
426 		pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
427 			    from, to, distance);
428 		return;
429 	}
430 
431 	if ((u8)distance != distance ||
432 	    (from == to && distance != LOCAL_DISTANCE)) {
433 		pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
434 			     from, to, distance);
435 		return;
436 	}
437 
438 	numa_distance[from * numa_distance_cnt + to] = distance;
439 }
440 
441 int __node_distance(int from, int to)
442 {
443 	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
444 		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
445 	return numa_distance[from * numa_distance_cnt + to];
446 }
447 EXPORT_SYMBOL(__node_distance);
448 
449 /*
450  * Sanity check to catch more bad NUMA configurations (they are amazingly
451  * common).  Make sure the nodes cover all memory.
452  */
453 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
454 {
455 	u64 numaram, e820ram;
456 	int i;
457 
458 	numaram = 0;
459 	for (i = 0; i < mi->nr_blks; i++) {
460 		u64 s = mi->blk[i].start >> PAGE_SHIFT;
461 		u64 e = mi->blk[i].end >> PAGE_SHIFT;
462 		numaram += e - s;
463 		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
464 		if ((s64)numaram < 0)
465 			numaram = 0;
466 	}
467 
468 	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
469 
470 	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
471 	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
472 		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
473 		       (numaram << PAGE_SHIFT) >> 20,
474 		       (e820ram << PAGE_SHIFT) >> 20);
475 		return false;
476 	}
477 	return true;
478 }
479 
480 static int __init numa_register_memblks(struct numa_meminfo *mi)
481 {
482 	unsigned long uninitialized_var(pfn_align);
483 	int i, nid;
484 
485 	/* Account for nodes with cpus and no memory */
486 	node_possible_map = numa_nodes_parsed;
487 	numa_nodemask_from_meminfo(&node_possible_map, mi);
488 	if (WARN_ON(nodes_empty(node_possible_map)))
489 		return -EINVAL;
490 
491 	for (i = 0; i < mi->nr_blks; i++) {
492 		struct numa_memblk *mb = &mi->blk[i];
493 		memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
494 	}
495 
496 	/*
497 	 * If sections array is gonna be used for pfn -> nid mapping, check
498 	 * whether its granularity is fine enough.
499 	 */
500 #ifdef NODE_NOT_IN_PAGE_FLAGS
501 	pfn_align = node_map_pfn_alignment();
502 	if (pfn_align && pfn_align < PAGES_PER_SECTION) {
503 		printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
504 		       PFN_PHYS(pfn_align) >> 20,
505 		       PFN_PHYS(PAGES_PER_SECTION) >> 20);
506 		return -EINVAL;
507 	}
508 #endif
509 	if (!numa_meminfo_cover_memory(mi))
510 		return -EINVAL;
511 
512 	/* Finally register nodes. */
513 	for_each_node_mask(nid, node_possible_map) {
514 		u64 start = PFN_PHYS(max_pfn);
515 		u64 end = 0;
516 
517 		for (i = 0; i < mi->nr_blks; i++) {
518 			if (nid != mi->blk[i].nid)
519 				continue;
520 			start = min(mi->blk[i].start, start);
521 			end = max(mi->blk[i].end, end);
522 		}
523 
524 		if (start < end)
525 			setup_node_data(nid, start, end);
526 	}
527 
528 	/* Dump memblock with node info and return. */
529 	memblock_dump_all();
530 	return 0;
531 }
532 
533 /*
534  * There are unfortunately some poorly designed mainboards around that
535  * only connect memory to a single CPU. This breaks the 1:1 cpu->node
536  * mapping. To avoid this fill in the mapping for all possible CPUs,
537  * as the number of CPUs is not known yet. We round robin the existing
538  * nodes.
539  */
540 static void __init numa_init_array(void)
541 {
542 	int rr, i;
543 
544 	rr = first_node(node_online_map);
545 	for (i = 0; i < nr_cpu_ids; i++) {
546 		if (early_cpu_to_node(i) != NUMA_NO_NODE)
547 			continue;
548 		numa_set_node(i, rr);
549 		rr = next_node(rr, node_online_map);
550 		if (rr == MAX_NUMNODES)
551 			rr = first_node(node_online_map);
552 	}
553 }
554 
555 static int __init numa_init(int (*init_func)(void))
556 {
557 	int i;
558 	int ret;
559 
560 	for (i = 0; i < MAX_LOCAL_APIC; i++)
561 		set_apicid_to_node(i, NUMA_NO_NODE);
562 
563 	nodes_clear(numa_nodes_parsed);
564 	nodes_clear(node_possible_map);
565 	nodes_clear(node_online_map);
566 	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
567 	WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
568 	numa_reset_distance();
569 
570 	ret = init_func();
571 	if (ret < 0)
572 		return ret;
573 	ret = numa_cleanup_meminfo(&numa_meminfo);
574 	if (ret < 0)
575 		return ret;
576 
577 	numa_emulation(&numa_meminfo, numa_distance_cnt);
578 
579 	ret = numa_register_memblks(&numa_meminfo);
580 	if (ret < 0)
581 		return ret;
582 
583 	for (i = 0; i < nr_cpu_ids; i++) {
584 		int nid = early_cpu_to_node(i);
585 
586 		if (nid == NUMA_NO_NODE)
587 			continue;
588 		if (!node_online(nid))
589 			numa_clear_node(i);
590 	}
591 	numa_init_array();
592 	return 0;
593 }
594 
595 /**
596  * dummy_numa_init - Fallback dummy NUMA init
597  *
598  * Used if there's no underlying NUMA architecture, NUMA initialization
599  * fails, or NUMA is disabled on the command line.
600  *
601  * Must online at least one node and add memory blocks that cover all
602  * allowed memory.  This function must not fail.
603  */
604 static int __init dummy_numa_init(void)
605 {
606 	printk(KERN_INFO "%s\n",
607 	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
608 	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
609 	       0LLU, PFN_PHYS(max_pfn) - 1);
610 
611 	node_set(0, numa_nodes_parsed);
612 	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
613 
614 	return 0;
615 }
616 
617 /**
618  * x86_numa_init - Initialize NUMA
619  *
620  * Try each configured NUMA initialization method until one succeeds.  The
621  * last fallback is dummy single node config encomapssing whole memory and
622  * never fails.
623  */
624 void __init x86_numa_init(void)
625 {
626 	if (!numa_off) {
627 #ifdef CONFIG_X86_NUMAQ
628 		if (!numa_init(numaq_numa_init))
629 			return;
630 #endif
631 #ifdef CONFIG_ACPI_NUMA
632 		if (!numa_init(x86_acpi_numa_init))
633 			return;
634 #endif
635 #ifdef CONFIG_AMD_NUMA
636 		if (!numa_init(amd_numa_init))
637 			return;
638 #endif
639 	}
640 
641 	numa_init(dummy_numa_init);
642 }
643 
644 static __init int find_near_online_node(int node)
645 {
646 	int n, val;
647 	int min_val = INT_MAX;
648 	int best_node = -1;
649 
650 	for_each_online_node(n) {
651 		val = node_distance(node, n);
652 
653 		if (val < min_val) {
654 			min_val = val;
655 			best_node = n;
656 		}
657 	}
658 
659 	return best_node;
660 }
661 
662 /*
663  * Setup early cpu_to_node.
664  *
665  * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
666  * and apicid_to_node[] tables have valid entries for a CPU.
667  * This means we skip cpu_to_node[] initialisation for NUMA
668  * emulation and faking node case (when running a kernel compiled
669  * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
670  * is already initialized in a round robin manner at numa_init_array,
671  * prior to this call, and this initialization is good enough
672  * for the fake NUMA cases.
673  *
674  * Called before the per_cpu areas are setup.
675  */
676 void __init init_cpu_to_node(void)
677 {
678 	int cpu;
679 	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
680 
681 	BUG_ON(cpu_to_apicid == NULL);
682 
683 	for_each_possible_cpu(cpu) {
684 		int node = numa_cpu_node(cpu);
685 
686 		if (node == NUMA_NO_NODE)
687 			continue;
688 		if (!node_online(node))
689 			node = find_near_online_node(node);
690 		numa_set_node(cpu, node);
691 	}
692 }
693 
694 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
695 
696 # ifndef CONFIG_NUMA_EMU
697 void __cpuinit numa_add_cpu(int cpu)
698 {
699 	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
700 }
701 
702 void __cpuinit numa_remove_cpu(int cpu)
703 {
704 	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
705 }
706 # endif	/* !CONFIG_NUMA_EMU */
707 
708 #else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
709 
710 int __cpu_to_node(int cpu)
711 {
712 	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
713 		printk(KERN_WARNING
714 			"cpu_to_node(%d): usage too early!\n", cpu);
715 		dump_stack();
716 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
717 	}
718 	return per_cpu(x86_cpu_to_node_map, cpu);
719 }
720 EXPORT_SYMBOL(__cpu_to_node);
721 
722 /*
723  * Same function as cpu_to_node() but used if called before the
724  * per_cpu areas are setup.
725  */
726 int early_cpu_to_node(int cpu)
727 {
728 	if (early_per_cpu_ptr(x86_cpu_to_node_map))
729 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
730 
731 	if (!cpu_possible(cpu)) {
732 		printk(KERN_WARNING
733 			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
734 		dump_stack();
735 		return NUMA_NO_NODE;
736 	}
737 	return per_cpu(x86_cpu_to_node_map, cpu);
738 }
739 
740 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
741 {
742 	struct cpumask *mask;
743 	char buf[64];
744 
745 	if (node == NUMA_NO_NODE) {
746 		/* early_cpu_to_node() already emits a warning and trace */
747 		return;
748 	}
749 	mask = node_to_cpumask_map[node];
750 	if (!mask) {
751 		pr_err("node_to_cpumask_map[%i] NULL\n", node);
752 		dump_stack();
753 		return;
754 	}
755 
756 	if (enable)
757 		cpumask_set_cpu(cpu, mask);
758 	else
759 		cpumask_clear_cpu(cpu, mask);
760 
761 	cpulist_scnprintf(buf, sizeof(buf), mask);
762 	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
763 		enable ? "numa_add_cpu" : "numa_remove_cpu",
764 		cpu, node, buf);
765 	return;
766 }
767 
768 # ifndef CONFIG_NUMA_EMU
769 static void __cpuinit numa_set_cpumask(int cpu, bool enable)
770 {
771 	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
772 }
773 
774 void __cpuinit numa_add_cpu(int cpu)
775 {
776 	numa_set_cpumask(cpu, true);
777 }
778 
779 void __cpuinit numa_remove_cpu(int cpu)
780 {
781 	numa_set_cpumask(cpu, false);
782 }
783 # endif	/* !CONFIG_NUMA_EMU */
784 
785 /*
786  * Returns a pointer to the bitmask of CPUs on Node 'node'.
787  */
788 const struct cpumask *cpumask_of_node(int node)
789 {
790 	if (node >= nr_node_ids) {
791 		printk(KERN_WARNING
792 			"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
793 			node, nr_node_ids);
794 		dump_stack();
795 		return cpu_none_mask;
796 	}
797 	if (node_to_cpumask_map[node] == NULL) {
798 		printk(KERN_WARNING
799 			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
800 			node);
801 		dump_stack();
802 		return cpu_online_mask;
803 	}
804 	return node_to_cpumask_map[node];
805 }
806 EXPORT_SYMBOL(cpumask_of_node);
807 
808 #endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
809 
810 #ifdef CONFIG_MEMORY_HOTPLUG
811 int memory_add_physaddr_to_nid(u64 start)
812 {
813 	struct numa_meminfo *mi = &numa_meminfo;
814 	int nid = mi->blk[0].nid;
815 	int i;
816 
817 	for (i = 0; i < mi->nr_blks; i++)
818 		if (mi->blk[i].start <= start && mi->blk[i].end > start)
819 			nid = mi->blk[i].nid;
820 	return nid;
821 }
822 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
823 #endif
824