xref: /openbmc/linux/arch/powerpc/mm/numa.c (revision bc5aa3a0)
1 /*
2  * pSeries NUMA support
3  *
4  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #define pr_fmt(fmt) "numa: " fmt
12 
13 #include <linux/threads.h>
14 #include <linux/bootmem.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/mmzone.h>
18 #include <linux/export.h>
19 #include <linux/nodemask.h>
20 #include <linux/cpu.h>
21 #include <linux/notifier.h>
22 #include <linux/memblock.h>
23 #include <linux/of.h>
24 #include <linux/pfn.h>
25 #include <linux/cpuset.h>
26 #include <linux/node.h>
27 #include <linux/stop_machine.h>
28 #include <linux/proc_fs.h>
29 #include <linux/seq_file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <asm/cputhreads.h>
33 #include <asm/sparsemem.h>
34 #include <asm/prom.h>
35 #include <asm/smp.h>
36 #include <asm/cputhreads.h>
37 #include <asm/topology.h>
38 #include <asm/firmware.h>
39 #include <asm/paca.h>
40 #include <asm/hvcall.h>
41 #include <asm/setup.h>
42 #include <asm/vdso.h>
43 
44 static int numa_enabled = 1;
45 
46 static char *cmdline __initdata;
47 
48 static int numa_debug;
49 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
50 
51 int numa_cpu_lookup_table[NR_CPUS];
52 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
53 struct pglist_data *node_data[MAX_NUMNODES];
54 
55 EXPORT_SYMBOL(numa_cpu_lookup_table);
56 EXPORT_SYMBOL(node_to_cpumask_map);
57 EXPORT_SYMBOL(node_data);
58 
59 static int min_common_depth;
60 static int n_mem_addr_cells, n_mem_size_cells;
61 static int form1_affinity;
62 
63 #define MAX_DISTANCE_REF_POINTS 4
64 static int distance_ref_points_depth;
65 static const __be32 *distance_ref_points;
66 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
67 
68 /*
69  * Allocate node_to_cpumask_map based on number of available nodes
70  * Requires node_possible_map to be valid.
71  *
72  * Note: cpumask_of_node() is not valid until after this is done.
73  */
74 static void __init setup_node_to_cpumask_map(void)
75 {
76 	unsigned int node;
77 
78 	/* setup nr_node_ids if not done yet */
79 	if (nr_node_ids == MAX_NUMNODES)
80 		setup_nr_node_ids();
81 
82 	/* allocate the map */
83 	for_each_node(node)
84 		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
85 
86 	/* cpumask_of_node() will now work */
87 	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
88 }
89 
90 static int __init fake_numa_create_new_node(unsigned long end_pfn,
91 						unsigned int *nid)
92 {
93 	unsigned long long mem;
94 	char *p = cmdline;
95 	static unsigned int fake_nid;
96 	static unsigned long long curr_boundary;
97 
98 	/*
99 	 * Modify node id, iff we started creating NUMA nodes
100 	 * We want to continue from where we left of the last time
101 	 */
102 	if (fake_nid)
103 		*nid = fake_nid;
104 	/*
105 	 * In case there are no more arguments to parse, the
106 	 * node_id should be the same as the last fake node id
107 	 * (we've handled this above).
108 	 */
109 	if (!p)
110 		return 0;
111 
112 	mem = memparse(p, &p);
113 	if (!mem)
114 		return 0;
115 
116 	if (mem < curr_boundary)
117 		return 0;
118 
119 	curr_boundary = mem;
120 
121 	if ((end_pfn << PAGE_SHIFT) > mem) {
122 		/*
123 		 * Skip commas and spaces
124 		 */
125 		while (*p == ',' || *p == ' ' || *p == '\t')
126 			p++;
127 
128 		cmdline = p;
129 		fake_nid++;
130 		*nid = fake_nid;
131 		dbg("created new fake_node with id %d\n", fake_nid);
132 		return 1;
133 	}
134 	return 0;
135 }
136 
137 static void reset_numa_cpu_lookup_table(void)
138 {
139 	unsigned int cpu;
140 
141 	for_each_possible_cpu(cpu)
142 		numa_cpu_lookup_table[cpu] = -1;
143 }
144 
145 static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
146 {
147 	numa_cpu_lookup_table[cpu] = node;
148 }
149 
150 static void map_cpu_to_node(int cpu, int node)
151 {
152 	update_numa_cpu_lookup_table(cpu, node);
153 
154 	dbg("adding cpu %d to node %d\n", cpu, node);
155 
156 	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
157 		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
158 }
159 
160 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
161 static void unmap_cpu_from_node(unsigned long cpu)
162 {
163 	int node = numa_cpu_lookup_table[cpu];
164 
165 	dbg("removing cpu %lu from node %d\n", cpu, node);
166 
167 	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
168 		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
169 	} else {
170 		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
171 		       cpu, node);
172 	}
173 }
174 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
175 
176 /* must hold reference to node during call */
177 static const __be32 *of_get_associativity(struct device_node *dev)
178 {
179 	return of_get_property(dev, "ibm,associativity", NULL);
180 }
181 
182 /*
183  * Returns the property linux,drconf-usable-memory if
184  * it exists (the property exists only in kexec/kdump kernels,
185  * added by kexec-tools)
186  */
187 static const __be32 *of_get_usable_memory(struct device_node *memory)
188 {
189 	const __be32 *prop;
190 	u32 len;
191 	prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
192 	if (!prop || len < sizeof(unsigned int))
193 		return NULL;
194 	return prop;
195 }
196 
197 int __node_distance(int a, int b)
198 {
199 	int i;
200 	int distance = LOCAL_DISTANCE;
201 
202 	if (!form1_affinity)
203 		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
204 
205 	for (i = 0; i < distance_ref_points_depth; i++) {
206 		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
207 			break;
208 
209 		/* Double the distance for each NUMA level */
210 		distance *= 2;
211 	}
212 
213 	return distance;
214 }
215 EXPORT_SYMBOL(__node_distance);
216 
217 static void initialize_distance_lookup_table(int nid,
218 		const __be32 *associativity)
219 {
220 	int i;
221 
222 	if (!form1_affinity)
223 		return;
224 
225 	for (i = 0; i < distance_ref_points_depth; i++) {
226 		const __be32 *entry;
227 
228 		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
229 		distance_lookup_table[nid][i] = of_read_number(entry, 1);
230 	}
231 }
232 
233 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
234  * info is found.
235  */
236 static int associativity_to_nid(const __be32 *associativity)
237 {
238 	int nid = -1;
239 
240 	if (min_common_depth == -1)
241 		goto out;
242 
243 	if (of_read_number(associativity, 1) >= min_common_depth)
244 		nid = of_read_number(&associativity[min_common_depth], 1);
245 
246 	/* POWER4 LPAR uses 0xffff as invalid node */
247 	if (nid == 0xffff || nid >= MAX_NUMNODES)
248 		nid = -1;
249 
250 	if (nid > 0 &&
251 		of_read_number(associativity, 1) >= distance_ref_points_depth) {
252 		/*
253 		 * Skip the length field and send start of associativity array
254 		 */
255 		initialize_distance_lookup_table(nid, associativity + 1);
256 	}
257 
258 out:
259 	return nid;
260 }
261 
262 /* Returns the nid associated with the given device tree node,
263  * or -1 if not found.
264  */
265 static int of_node_to_nid_single(struct device_node *device)
266 {
267 	int nid = -1;
268 	const __be32 *tmp;
269 
270 	tmp = of_get_associativity(device);
271 	if (tmp)
272 		nid = associativity_to_nid(tmp);
273 	return nid;
274 }
275 
276 /* Walk the device tree upwards, looking for an associativity id */
277 int of_node_to_nid(struct device_node *device)
278 {
279 	int nid = -1;
280 
281 	of_node_get(device);
282 	while (device) {
283 		nid = of_node_to_nid_single(device);
284 		if (nid != -1)
285 			break;
286 
287 		device = of_get_next_parent(device);
288 	}
289 	of_node_put(device);
290 
291 	return nid;
292 }
293 EXPORT_SYMBOL_GPL(of_node_to_nid);
294 
295 static int __init find_min_common_depth(void)
296 {
297 	int depth;
298 	struct device_node *root;
299 
300 	if (firmware_has_feature(FW_FEATURE_OPAL))
301 		root = of_find_node_by_path("/ibm,opal");
302 	else
303 		root = of_find_node_by_path("/rtas");
304 	if (!root)
305 		root = of_find_node_by_path("/");
306 
307 	/*
308 	 * This property is a set of 32-bit integers, each representing
309 	 * an index into the ibm,associativity nodes.
310 	 *
311 	 * With form 0 affinity the first integer is for an SMP configuration
312 	 * (should be all 0's) and the second is for a normal NUMA
313 	 * configuration. We have only one level of NUMA.
314 	 *
315 	 * With form 1 affinity the first integer is the most significant
316 	 * NUMA boundary and the following are progressively less significant
317 	 * boundaries. There can be more than one level of NUMA.
318 	 */
319 	distance_ref_points = of_get_property(root,
320 					"ibm,associativity-reference-points",
321 					&distance_ref_points_depth);
322 
323 	if (!distance_ref_points) {
324 		dbg("NUMA: ibm,associativity-reference-points not found.\n");
325 		goto err;
326 	}
327 
328 	distance_ref_points_depth /= sizeof(int);
329 
330 	if (firmware_has_feature(FW_FEATURE_OPAL) ||
331 	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
332 		dbg("Using form 1 affinity\n");
333 		form1_affinity = 1;
334 	}
335 
336 	if (form1_affinity) {
337 		depth = of_read_number(distance_ref_points, 1);
338 	} else {
339 		if (distance_ref_points_depth < 2) {
340 			printk(KERN_WARNING "NUMA: "
341 				"short ibm,associativity-reference-points\n");
342 			goto err;
343 		}
344 
345 		depth = of_read_number(&distance_ref_points[1], 1);
346 	}
347 
348 	/*
349 	 * Warn and cap if the hardware supports more than
350 	 * MAX_DISTANCE_REF_POINTS domains.
351 	 */
352 	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
353 		printk(KERN_WARNING "NUMA: distance array capped at "
354 			"%d entries\n", MAX_DISTANCE_REF_POINTS);
355 		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
356 	}
357 
358 	of_node_put(root);
359 	return depth;
360 
361 err:
362 	of_node_put(root);
363 	return -1;
364 }
365 
366 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
367 {
368 	struct device_node *memory = NULL;
369 
370 	memory = of_find_node_by_type(memory, "memory");
371 	if (!memory)
372 		panic("numa.c: No memory nodes found!");
373 
374 	*n_addr_cells = of_n_addr_cells(memory);
375 	*n_size_cells = of_n_size_cells(memory);
376 	of_node_put(memory);
377 }
378 
379 static unsigned long read_n_cells(int n, const __be32 **buf)
380 {
381 	unsigned long result = 0;
382 
383 	while (n--) {
384 		result = (result << 32) | of_read_number(*buf, 1);
385 		(*buf)++;
386 	}
387 	return result;
388 }
389 
390 /*
391  * Read the next memblock list entry from the ibm,dynamic-memory property
392  * and return the information in the provided of_drconf_cell structure.
393  */
394 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
395 {
396 	const __be32 *cp;
397 
398 	drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
399 
400 	cp = *cellp;
401 	drmem->drc_index = of_read_number(cp, 1);
402 	drmem->reserved = of_read_number(&cp[1], 1);
403 	drmem->aa_index = of_read_number(&cp[2], 1);
404 	drmem->flags = of_read_number(&cp[3], 1);
405 
406 	*cellp = cp + 4;
407 }
408 
409 /*
410  * Retrieve and validate the ibm,dynamic-memory property of the device tree.
411  *
412  * The layout of the ibm,dynamic-memory property is a number N of memblock
413  * list entries followed by N memblock list entries.  Each memblock list entry
414  * contains information as laid out in the of_drconf_cell struct above.
415  */
416 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
417 {
418 	const __be32 *prop;
419 	u32 len, entries;
420 
421 	prop = of_get_property(memory, "ibm,dynamic-memory", &len);
422 	if (!prop || len < sizeof(unsigned int))
423 		return 0;
424 
425 	entries = of_read_number(prop++, 1);
426 
427 	/* Now that we know the number of entries, revalidate the size
428 	 * of the property read in to ensure we have everything
429 	 */
430 	if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
431 		return 0;
432 
433 	*dm = prop;
434 	return entries;
435 }
436 
437 /*
438  * Retrieve and validate the ibm,lmb-size property for drconf memory
439  * from the device tree.
440  */
441 static u64 of_get_lmb_size(struct device_node *memory)
442 {
443 	const __be32 *prop;
444 	u32 len;
445 
446 	prop = of_get_property(memory, "ibm,lmb-size", &len);
447 	if (!prop || len < sizeof(unsigned int))
448 		return 0;
449 
450 	return read_n_cells(n_mem_size_cells, &prop);
451 }
452 
453 struct assoc_arrays {
454 	u32	n_arrays;
455 	u32	array_sz;
456 	const __be32 *arrays;
457 };
458 
459 /*
460  * Retrieve and validate the list of associativity arrays for drconf
461  * memory from the ibm,associativity-lookup-arrays property of the
462  * device tree..
463  *
464  * The layout of the ibm,associativity-lookup-arrays property is a number N
465  * indicating the number of associativity arrays, followed by a number M
466  * indicating the size of each associativity array, followed by a list
467  * of N associativity arrays.
468  */
469 static int of_get_assoc_arrays(struct device_node *memory,
470 			       struct assoc_arrays *aa)
471 {
472 	const __be32 *prop;
473 	u32 len;
474 
475 	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
476 	if (!prop || len < 2 * sizeof(unsigned int))
477 		return -1;
478 
479 	aa->n_arrays = of_read_number(prop++, 1);
480 	aa->array_sz = of_read_number(prop++, 1);
481 
482 	/* Now that we know the number of arrays and size of each array,
483 	 * revalidate the size of the property read in.
484 	 */
485 	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
486 		return -1;
487 
488 	aa->arrays = prop;
489 	return 0;
490 }
491 
492 /*
493  * This is like of_node_to_nid_single() for memory represented in the
494  * ibm,dynamic-reconfiguration-memory node.
495  */
496 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
497 				   struct assoc_arrays *aa)
498 {
499 	int default_nid = 0;
500 	int nid = default_nid;
501 	int index;
502 
503 	if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
504 	    !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
505 	    drmem->aa_index < aa->n_arrays) {
506 		index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
507 		nid = of_read_number(&aa->arrays[index], 1);
508 
509 		if (nid == 0xffff || nid >= MAX_NUMNODES)
510 			nid = default_nid;
511 
512 		if (nid > 0) {
513 			index = drmem->aa_index * aa->array_sz;
514 			initialize_distance_lookup_table(nid,
515 							&aa->arrays[index]);
516 		}
517 	}
518 
519 	return nid;
520 }
521 
522 /*
523  * Figure out to which domain a cpu belongs and stick it there.
524  * Return the id of the domain used.
525  */
526 static int numa_setup_cpu(unsigned long lcpu)
527 {
528 	int nid = -1;
529 	struct device_node *cpu;
530 
531 	/*
532 	 * If a valid cpu-to-node mapping is already available, use it
533 	 * directly instead of querying the firmware, since it represents
534 	 * the most recent mapping notified to us by the platform (eg: VPHN).
535 	 */
536 	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
537 		map_cpu_to_node(lcpu, nid);
538 		return nid;
539 	}
540 
541 	cpu = of_get_cpu_node(lcpu, NULL);
542 
543 	if (!cpu) {
544 		WARN_ON(1);
545 		if (cpu_present(lcpu))
546 			goto out_present;
547 		else
548 			goto out;
549 	}
550 
551 	nid = of_node_to_nid_single(cpu);
552 
553 out_present:
554 	if (nid < 0 || !node_online(nid))
555 		nid = first_online_node;
556 
557 	map_cpu_to_node(lcpu, nid);
558 	of_node_put(cpu);
559 out:
560 	return nid;
561 }
562 
563 static void verify_cpu_node_mapping(int cpu, int node)
564 {
565 	int base, sibling, i;
566 
567 	/* Verify that all the threads in the core belong to the same node */
568 	base = cpu_first_thread_sibling(cpu);
569 
570 	for (i = 0; i < threads_per_core; i++) {
571 		sibling = base + i;
572 
573 		if (sibling == cpu || cpu_is_offline(sibling))
574 			continue;
575 
576 		if (cpu_to_node(sibling) != node) {
577 			WARN(1, "CPU thread siblings %d and %d don't belong"
578 				" to the same node!\n", cpu, sibling);
579 			break;
580 		}
581 	}
582 }
583 
584 /* Must run before sched domains notifier. */
585 static int ppc_numa_cpu_prepare(unsigned int cpu)
586 {
587 	int nid;
588 
589 	nid = numa_setup_cpu(cpu);
590 	verify_cpu_node_mapping(cpu, nid);
591 	return 0;
592 }
593 
594 static int ppc_numa_cpu_dead(unsigned int cpu)
595 {
596 #ifdef CONFIG_HOTPLUG_CPU
597 	unmap_cpu_from_node(cpu);
598 #endif
599 	return 0;
600 }
601 
602 /*
603  * Check and possibly modify a memory region to enforce the memory limit.
604  *
605  * Returns the size the region should have to enforce the memory limit.
606  * This will either be the original value of size, a truncated value,
607  * or zero. If the returned value of size is 0 the region should be
608  * discarded as it lies wholly above the memory limit.
609  */
610 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
611 						      unsigned long size)
612 {
613 	/*
614 	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
615 	 * we've already adjusted it for the limit and it takes care of
616 	 * having memory holes below the limit.  Also, in the case of
617 	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
618 	 */
619 
620 	if (start + size <= memblock_end_of_DRAM())
621 		return size;
622 
623 	if (start >= memblock_end_of_DRAM())
624 		return 0;
625 
626 	return memblock_end_of_DRAM() - start;
627 }
628 
629 /*
630  * Reads the counter for a given entry in
631  * linux,drconf-usable-memory property
632  */
633 static inline int __init read_usm_ranges(const __be32 **usm)
634 {
635 	/*
636 	 * For each lmb in ibm,dynamic-memory a corresponding
637 	 * entry in linux,drconf-usable-memory property contains
638 	 * a counter followed by that many (base, size) duple.
639 	 * read the counter from linux,drconf-usable-memory
640 	 */
641 	return read_n_cells(n_mem_size_cells, usm);
642 }
643 
644 /*
645  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
646  * node.  This assumes n_mem_{addr,size}_cells have been set.
647  */
648 static void __init parse_drconf_memory(struct device_node *memory)
649 {
650 	const __be32 *uninitialized_var(dm), *usm;
651 	unsigned int n, rc, ranges, is_kexec_kdump = 0;
652 	unsigned long lmb_size, base, size, sz;
653 	int nid;
654 	struct assoc_arrays aa = { .arrays = NULL };
655 
656 	n = of_get_drconf_memory(memory, &dm);
657 	if (!n)
658 		return;
659 
660 	lmb_size = of_get_lmb_size(memory);
661 	if (!lmb_size)
662 		return;
663 
664 	rc = of_get_assoc_arrays(memory, &aa);
665 	if (rc)
666 		return;
667 
668 	/* check if this is a kexec/kdump kernel */
669 	usm = of_get_usable_memory(memory);
670 	if (usm != NULL)
671 		is_kexec_kdump = 1;
672 
673 	for (; n != 0; --n) {
674 		struct of_drconf_cell drmem;
675 
676 		read_drconf_cell(&drmem, &dm);
677 
678 		/* skip this block if the reserved bit is set in flags (0x80)
679 		   or if the block is not assigned to this partition (0x8) */
680 		if ((drmem.flags & DRCONF_MEM_RESERVED)
681 		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
682 			continue;
683 
684 		base = drmem.base_addr;
685 		size = lmb_size;
686 		ranges = 1;
687 
688 		if (is_kexec_kdump) {
689 			ranges = read_usm_ranges(&usm);
690 			if (!ranges) /* there are no (base, size) duple */
691 				continue;
692 		}
693 		do {
694 			if (is_kexec_kdump) {
695 				base = read_n_cells(n_mem_addr_cells, &usm);
696 				size = read_n_cells(n_mem_size_cells, &usm);
697 			}
698 			nid = of_drconf_to_nid_single(&drmem, &aa);
699 			fake_numa_create_new_node(
700 				((base + size) >> PAGE_SHIFT),
701 					   &nid);
702 			node_set_online(nid);
703 			sz = numa_enforce_memory_limit(base, size);
704 			if (sz)
705 				memblock_set_node(base, sz,
706 						  &memblock.memory, nid);
707 		} while (--ranges);
708 	}
709 }
710 
711 static int __init parse_numa_properties(void)
712 {
713 	struct device_node *memory;
714 	int default_nid = 0;
715 	unsigned long i;
716 
717 	if (numa_enabled == 0) {
718 		printk(KERN_WARNING "NUMA disabled by user\n");
719 		return -1;
720 	}
721 
722 	min_common_depth = find_min_common_depth();
723 
724 	if (min_common_depth < 0)
725 		return min_common_depth;
726 
727 	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
728 
729 	/*
730 	 * Even though we connect cpus to numa domains later in SMP
731 	 * init, we need to know the node ids now. This is because
732 	 * each node to be onlined must have NODE_DATA etc backing it.
733 	 */
734 	for_each_present_cpu(i) {
735 		struct device_node *cpu;
736 		int nid;
737 
738 		cpu = of_get_cpu_node(i, NULL);
739 		BUG_ON(!cpu);
740 		nid = of_node_to_nid_single(cpu);
741 		of_node_put(cpu);
742 
743 		/*
744 		 * Don't fall back to default_nid yet -- we will plug
745 		 * cpus into nodes once the memory scan has discovered
746 		 * the topology.
747 		 */
748 		if (nid < 0)
749 			continue;
750 		node_set_online(nid);
751 	}
752 
753 	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
754 
755 	for_each_node_by_type(memory, "memory") {
756 		unsigned long start;
757 		unsigned long size;
758 		int nid;
759 		int ranges;
760 		const __be32 *memcell_buf;
761 		unsigned int len;
762 
763 		memcell_buf = of_get_property(memory,
764 			"linux,usable-memory", &len);
765 		if (!memcell_buf || len <= 0)
766 			memcell_buf = of_get_property(memory, "reg", &len);
767 		if (!memcell_buf || len <= 0)
768 			continue;
769 
770 		/* ranges in cell */
771 		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
772 new_range:
773 		/* these are order-sensitive, and modify the buffer pointer */
774 		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
775 		size = read_n_cells(n_mem_size_cells, &memcell_buf);
776 
777 		/*
778 		 * Assumption: either all memory nodes or none will
779 		 * have associativity properties.  If none, then
780 		 * everything goes to default_nid.
781 		 */
782 		nid = of_node_to_nid_single(memory);
783 		if (nid < 0)
784 			nid = default_nid;
785 
786 		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
787 		node_set_online(nid);
788 
789 		if (!(size = numa_enforce_memory_limit(start, size))) {
790 			if (--ranges)
791 				goto new_range;
792 			else
793 				continue;
794 		}
795 
796 		memblock_set_node(start, size, &memblock.memory, nid);
797 
798 		if (--ranges)
799 			goto new_range;
800 	}
801 
802 	/*
803 	 * Now do the same thing for each MEMBLOCK listed in the
804 	 * ibm,dynamic-memory property in the
805 	 * ibm,dynamic-reconfiguration-memory node.
806 	 */
807 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
808 	if (memory)
809 		parse_drconf_memory(memory);
810 
811 	return 0;
812 }
813 
814 static void __init setup_nonnuma(void)
815 {
816 	unsigned long top_of_ram = memblock_end_of_DRAM();
817 	unsigned long total_ram = memblock_phys_mem_size();
818 	unsigned long start_pfn, end_pfn;
819 	unsigned int nid = 0;
820 	struct memblock_region *reg;
821 
822 	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
823 	       top_of_ram, total_ram);
824 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
825 	       (top_of_ram - total_ram) >> 20);
826 
827 	for_each_memblock(memory, reg) {
828 		start_pfn = memblock_region_memory_base_pfn(reg);
829 		end_pfn = memblock_region_memory_end_pfn(reg);
830 
831 		fake_numa_create_new_node(end_pfn, &nid);
832 		memblock_set_node(PFN_PHYS(start_pfn),
833 				  PFN_PHYS(end_pfn - start_pfn),
834 				  &memblock.memory, nid);
835 		node_set_online(nid);
836 	}
837 }
838 
839 void __init dump_numa_cpu_topology(void)
840 {
841 	unsigned int node;
842 	unsigned int cpu, count;
843 
844 	if (min_common_depth == -1 || !numa_enabled)
845 		return;
846 
847 	for_each_online_node(node) {
848 		printk(KERN_DEBUG "Node %d CPUs:", node);
849 
850 		count = 0;
851 		/*
852 		 * If we used a CPU iterator here we would miss printing
853 		 * the holes in the cpumap.
854 		 */
855 		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
856 			if (cpumask_test_cpu(cpu,
857 					node_to_cpumask_map[node])) {
858 				if (count == 0)
859 					printk(" %u", cpu);
860 				++count;
861 			} else {
862 				if (count > 1)
863 					printk("-%u", cpu - 1);
864 				count = 0;
865 			}
866 		}
867 
868 		if (count > 1)
869 			printk("-%u", nr_cpu_ids - 1);
870 		printk("\n");
871 	}
872 }
873 
874 static void __init dump_numa_memory_topology(void)
875 {
876 	unsigned int node;
877 	unsigned int count;
878 
879 	if (min_common_depth == -1 || !numa_enabled)
880 		return;
881 
882 	for_each_online_node(node) {
883 		unsigned long i;
884 
885 		printk(KERN_DEBUG "Node %d Memory:", node);
886 
887 		count = 0;
888 
889 		for (i = 0; i < memblock_end_of_DRAM();
890 		     i += (1 << SECTION_SIZE_BITS)) {
891 			if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
892 				if (count == 0)
893 					printk(" 0x%lx", i);
894 				++count;
895 			} else {
896 				if (count > 0)
897 					printk("-0x%lx", i);
898 				count = 0;
899 			}
900 		}
901 
902 		if (count > 0)
903 			printk("-0x%lx", i);
904 		printk("\n");
905 	}
906 }
907 
908 /* Initialize NODE_DATA for a node on the local memory */
909 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
910 {
911 	u64 spanned_pages = end_pfn - start_pfn;
912 	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
913 	u64 nd_pa;
914 	void *nd;
915 	int tnid;
916 
917 	if (spanned_pages)
918 		pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
919 			nid, start_pfn << PAGE_SHIFT,
920 			(end_pfn << PAGE_SHIFT) - 1);
921 	else
922 		pr_info("Initmem setup node %d\n", nid);
923 
924 	nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
925 	nd = __va(nd_pa);
926 
927 	/* report and initialize */
928 	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
929 		nd_pa, nd_pa + nd_size - 1);
930 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
931 	if (tnid != nid)
932 		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
933 
934 	node_data[nid] = nd;
935 	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
936 	NODE_DATA(nid)->node_id = nid;
937 	NODE_DATA(nid)->node_start_pfn = start_pfn;
938 	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
939 }
940 
941 void __init initmem_init(void)
942 {
943 	int nid, cpu;
944 
945 	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
946 	max_pfn = max_low_pfn;
947 
948 	if (parse_numa_properties())
949 		setup_nonnuma();
950 	else
951 		dump_numa_memory_topology();
952 
953 	memblock_dump_all();
954 
955 	/*
956 	 * Reduce the possible NUMA nodes to the online NUMA nodes,
957 	 * since we do not support node hotplug. This ensures that  we
958 	 * lower the maximum NUMA node ID to what is actually present.
959 	 */
960 	nodes_and(node_possible_map, node_possible_map, node_online_map);
961 
962 	for_each_online_node(nid) {
963 		unsigned long start_pfn, end_pfn;
964 
965 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
966 		setup_node_data(nid, start_pfn, end_pfn);
967 		sparse_memory_present_with_active_regions(nid);
968 	}
969 
970 	sparse_init();
971 
972 	setup_node_to_cpumask_map();
973 
974 	reset_numa_cpu_lookup_table();
975 
976 	/*
977 	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
978 	 * even before we online them, so that we can use cpu_to_{node,mem}
979 	 * early in boot, cf. smp_prepare_cpus().
980 	 * _nocalls() + manual invocation is used because cpuhp is not yet
981 	 * initialized for the boot CPU.
982 	 */
983 	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
984 				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
985 	for_each_present_cpu(cpu)
986 		numa_setup_cpu(cpu);
987 }
988 
989 static int __init early_numa(char *p)
990 {
991 	if (!p)
992 		return 0;
993 
994 	if (strstr(p, "off"))
995 		numa_enabled = 0;
996 
997 	if (strstr(p, "debug"))
998 		numa_debug = 1;
999 
1000 	p = strstr(p, "fake=");
1001 	if (p)
1002 		cmdline = p + strlen("fake=");
1003 
1004 	return 0;
1005 }
1006 early_param("numa", early_numa);
1007 
1008 static bool topology_updates_enabled = true;
1009 
1010 static int __init early_topology_updates(char *p)
1011 {
1012 	if (!p)
1013 		return 0;
1014 
1015 	if (!strcmp(p, "off")) {
1016 		pr_info("Disabling topology updates\n");
1017 		topology_updates_enabled = false;
1018 	}
1019 
1020 	return 0;
1021 }
1022 early_param("topology_updates", early_topology_updates);
1023 
1024 #ifdef CONFIG_MEMORY_HOTPLUG
1025 /*
1026  * Find the node associated with a hot added memory section for
1027  * memory represented in the device tree by the property
1028  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1029  */
1030 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1031 				     unsigned long scn_addr)
1032 {
1033 	const __be32 *dm;
1034 	unsigned int drconf_cell_cnt, rc;
1035 	unsigned long lmb_size;
1036 	struct assoc_arrays aa;
1037 	int nid = -1;
1038 
1039 	drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1040 	if (!drconf_cell_cnt)
1041 		return -1;
1042 
1043 	lmb_size = of_get_lmb_size(memory);
1044 	if (!lmb_size)
1045 		return -1;
1046 
1047 	rc = of_get_assoc_arrays(memory, &aa);
1048 	if (rc)
1049 		return -1;
1050 
1051 	for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1052 		struct of_drconf_cell drmem;
1053 
1054 		read_drconf_cell(&drmem, &dm);
1055 
1056 		/* skip this block if it is reserved or not assigned to
1057 		 * this partition */
1058 		if ((drmem.flags & DRCONF_MEM_RESERVED)
1059 		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1060 			continue;
1061 
1062 		if ((scn_addr < drmem.base_addr)
1063 		    || (scn_addr >= (drmem.base_addr + lmb_size)))
1064 			continue;
1065 
1066 		nid = of_drconf_to_nid_single(&drmem, &aa);
1067 		break;
1068 	}
1069 
1070 	return nid;
1071 }
1072 
1073 /*
1074  * Find the node associated with a hot added memory section for memory
1075  * represented in the device tree as a node (i.e. memory@XXXX) for
1076  * each memblock.
1077  */
1078 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1079 {
1080 	struct device_node *memory;
1081 	int nid = -1;
1082 
1083 	for_each_node_by_type(memory, "memory") {
1084 		unsigned long start, size;
1085 		int ranges;
1086 		const __be32 *memcell_buf;
1087 		unsigned int len;
1088 
1089 		memcell_buf = of_get_property(memory, "reg", &len);
1090 		if (!memcell_buf || len <= 0)
1091 			continue;
1092 
1093 		/* ranges in cell */
1094 		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1095 
1096 		while (ranges--) {
1097 			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1098 			size = read_n_cells(n_mem_size_cells, &memcell_buf);
1099 
1100 			if ((scn_addr < start) || (scn_addr >= (start + size)))
1101 				continue;
1102 
1103 			nid = of_node_to_nid_single(memory);
1104 			break;
1105 		}
1106 
1107 		if (nid >= 0)
1108 			break;
1109 	}
1110 
1111 	of_node_put(memory);
1112 
1113 	return nid;
1114 }
1115 
1116 /*
1117  * Find the node associated with a hot added memory section.  Section
1118  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1119  * sections are fully contained within a single MEMBLOCK.
1120  */
1121 int hot_add_scn_to_nid(unsigned long scn_addr)
1122 {
1123 	struct device_node *memory = NULL;
1124 	int nid, found = 0;
1125 
1126 	if (!numa_enabled || (min_common_depth < 0))
1127 		return first_online_node;
1128 
1129 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1130 	if (memory) {
1131 		nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1132 		of_node_put(memory);
1133 	} else {
1134 		nid = hot_add_node_scn_to_nid(scn_addr);
1135 	}
1136 
1137 	if (nid < 0 || !node_online(nid))
1138 		nid = first_online_node;
1139 
1140 	if (NODE_DATA(nid)->node_spanned_pages)
1141 		return nid;
1142 
1143 	for_each_online_node(nid) {
1144 		if (NODE_DATA(nid)->node_spanned_pages) {
1145 			found = 1;
1146 			break;
1147 		}
1148 	}
1149 
1150 	BUG_ON(!found);
1151 	return nid;
1152 }
1153 
1154 static u64 hot_add_drconf_memory_max(void)
1155 {
1156 	struct device_node *memory = NULL;
1157 	struct device_node *dn = NULL;
1158 	unsigned int drconf_cell_cnt = 0;
1159 	u64 lmb_size = 0;
1160 	const __be32 *dm = NULL;
1161 	const __be64 *lrdr = NULL;
1162 	struct of_drconf_cell drmem;
1163 
1164 	dn = of_find_node_by_path("/rtas");
1165 	if (dn) {
1166 		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1167 		of_node_put(dn);
1168 		if (lrdr)
1169 			return be64_to_cpup(lrdr);
1170 	}
1171 
1172 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1173 	if (memory) {
1174 		drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1175 		lmb_size = of_get_lmb_size(memory);
1176 
1177 		/* Advance to the last cell, each cell has 6 32 bit integers */
1178 		dm += (drconf_cell_cnt - 1) * 6;
1179 		read_drconf_cell(&drmem, &dm);
1180 		of_node_put(memory);
1181 		return drmem.base_addr + lmb_size;
1182 	}
1183 	return 0;
1184 }
1185 
1186 /*
1187  * memory_hotplug_max - return max address of memory that may be added
1188  *
1189  * This is currently only used on systems that support drconfig memory
1190  * hotplug.
1191  */
1192 u64 memory_hotplug_max(void)
1193 {
1194         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1195 }
1196 #endif /* CONFIG_MEMORY_HOTPLUG */
1197 
1198 /* Virtual Processor Home Node (VPHN) support */
1199 #ifdef CONFIG_PPC_SPLPAR
1200 
1201 #include "vphn.h"
1202 
1203 struct topology_update_data {
1204 	struct topology_update_data *next;
1205 	unsigned int cpu;
1206 	int old_nid;
1207 	int new_nid;
1208 };
1209 
1210 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1211 static cpumask_t cpu_associativity_changes_mask;
1212 static int vphn_enabled;
1213 static int prrn_enabled;
1214 static void reset_topology_timer(void);
1215 
1216 /*
1217  * Store the current values of the associativity change counters in the
1218  * hypervisor.
1219  */
1220 static void setup_cpu_associativity_change_counters(void)
1221 {
1222 	int cpu;
1223 
1224 	/* The VPHN feature supports a maximum of 8 reference points */
1225 	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1226 
1227 	for_each_possible_cpu(cpu) {
1228 		int i;
1229 		u8 *counts = vphn_cpu_change_counts[cpu];
1230 		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1231 
1232 		for (i = 0; i < distance_ref_points_depth; i++)
1233 			counts[i] = hypervisor_counts[i];
1234 	}
1235 }
1236 
1237 /*
1238  * The hypervisor maintains a set of 8 associativity change counters in
1239  * the VPA of each cpu that correspond to the associativity levels in the
1240  * ibm,associativity-reference-points property. When an associativity
1241  * level changes, the corresponding counter is incremented.
1242  *
1243  * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1244  * node associativity levels have changed.
1245  *
1246  * Returns the number of cpus with unhandled associativity changes.
1247  */
1248 static int update_cpu_associativity_changes_mask(void)
1249 {
1250 	int cpu;
1251 	cpumask_t *changes = &cpu_associativity_changes_mask;
1252 
1253 	for_each_possible_cpu(cpu) {
1254 		int i, changed = 0;
1255 		u8 *counts = vphn_cpu_change_counts[cpu];
1256 		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1257 
1258 		for (i = 0; i < distance_ref_points_depth; i++) {
1259 			if (hypervisor_counts[i] != counts[i]) {
1260 				counts[i] = hypervisor_counts[i];
1261 				changed = 1;
1262 			}
1263 		}
1264 		if (changed) {
1265 			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1266 			cpu = cpu_last_thread_sibling(cpu);
1267 		}
1268 	}
1269 
1270 	return cpumask_weight(changes);
1271 }
1272 
1273 /*
1274  * Retrieve the new associativity information for a virtual processor's
1275  * home node.
1276  */
1277 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1278 {
1279 	long rc;
1280 	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1281 	u64 flags = 1;
1282 	int hwcpu = get_hard_smp_processor_id(cpu);
1283 
1284 	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1285 	vphn_unpack_associativity(retbuf, associativity);
1286 
1287 	return rc;
1288 }
1289 
1290 static long vphn_get_associativity(unsigned long cpu,
1291 					__be32 *associativity)
1292 {
1293 	long rc;
1294 
1295 	rc = hcall_vphn(cpu, associativity);
1296 
1297 	switch (rc) {
1298 	case H_FUNCTION:
1299 		printk(KERN_INFO
1300 			"VPHN is not supported. Disabling polling...\n");
1301 		stop_topology_update();
1302 		break;
1303 	case H_HARDWARE:
1304 		printk(KERN_ERR
1305 			"hcall_vphn() experienced a hardware fault "
1306 			"preventing VPHN. Disabling polling...\n");
1307 		stop_topology_update();
1308 	}
1309 
1310 	return rc;
1311 }
1312 
1313 /*
1314  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1315  * characteristics change. This function doesn't perform any locking and is
1316  * only safe to call from stop_machine().
1317  */
1318 static int update_cpu_topology(void *data)
1319 {
1320 	struct topology_update_data *update;
1321 	unsigned long cpu;
1322 
1323 	if (!data)
1324 		return -EINVAL;
1325 
1326 	cpu = smp_processor_id();
1327 
1328 	for (update = data; update; update = update->next) {
1329 		int new_nid = update->new_nid;
1330 		if (cpu != update->cpu)
1331 			continue;
1332 
1333 		unmap_cpu_from_node(cpu);
1334 		map_cpu_to_node(cpu, new_nid);
1335 		set_cpu_numa_node(cpu, new_nid);
1336 		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1337 		vdso_getcpu_init();
1338 	}
1339 
1340 	return 0;
1341 }
1342 
1343 static int update_lookup_table(void *data)
1344 {
1345 	struct topology_update_data *update;
1346 
1347 	if (!data)
1348 		return -EINVAL;
1349 
1350 	/*
1351 	 * Upon topology update, the numa-cpu lookup table needs to be updated
1352 	 * for all threads in the core, including offline CPUs, to ensure that
1353 	 * future hotplug operations respect the cpu-to-node associativity
1354 	 * properly.
1355 	 */
1356 	for (update = data; update; update = update->next) {
1357 		int nid, base, j;
1358 
1359 		nid = update->new_nid;
1360 		base = cpu_first_thread_sibling(update->cpu);
1361 
1362 		for (j = 0; j < threads_per_core; j++) {
1363 			update_numa_cpu_lookup_table(base + j, nid);
1364 		}
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 /*
1371  * Update the node maps and sysfs entries for each cpu whose home node
1372  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1373  */
1374 int arch_update_cpu_topology(void)
1375 {
1376 	unsigned int cpu, sibling, changed = 0;
1377 	struct topology_update_data *updates, *ud;
1378 	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1379 	cpumask_t updated_cpus;
1380 	struct device *dev;
1381 	int weight, new_nid, i = 0;
1382 
1383 	if (!prrn_enabled && !vphn_enabled)
1384 		return 0;
1385 
1386 	weight = cpumask_weight(&cpu_associativity_changes_mask);
1387 	if (!weight)
1388 		return 0;
1389 
1390 	updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1391 	if (!updates)
1392 		return 0;
1393 
1394 	cpumask_clear(&updated_cpus);
1395 
1396 	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1397 		/*
1398 		 * If siblings aren't flagged for changes, updates list
1399 		 * will be too short. Skip on this update and set for next
1400 		 * update.
1401 		 */
1402 		if (!cpumask_subset(cpu_sibling_mask(cpu),
1403 					&cpu_associativity_changes_mask)) {
1404 			pr_info("Sibling bits not set for associativity "
1405 					"change, cpu%d\n", cpu);
1406 			cpumask_or(&cpu_associativity_changes_mask,
1407 					&cpu_associativity_changes_mask,
1408 					cpu_sibling_mask(cpu));
1409 			cpu = cpu_last_thread_sibling(cpu);
1410 			continue;
1411 		}
1412 
1413 		/* Use associativity from first thread for all siblings */
1414 		vphn_get_associativity(cpu, associativity);
1415 		new_nid = associativity_to_nid(associativity);
1416 		if (new_nid < 0 || !node_online(new_nid))
1417 			new_nid = first_online_node;
1418 
1419 		if (new_nid == numa_cpu_lookup_table[cpu]) {
1420 			cpumask_andnot(&cpu_associativity_changes_mask,
1421 					&cpu_associativity_changes_mask,
1422 					cpu_sibling_mask(cpu));
1423 			cpu = cpu_last_thread_sibling(cpu);
1424 			continue;
1425 		}
1426 
1427 		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1428 			ud = &updates[i++];
1429 			ud->cpu = sibling;
1430 			ud->new_nid = new_nid;
1431 			ud->old_nid = numa_cpu_lookup_table[sibling];
1432 			cpumask_set_cpu(sibling, &updated_cpus);
1433 			if (i < weight)
1434 				ud->next = &updates[i];
1435 		}
1436 		cpu = cpu_last_thread_sibling(cpu);
1437 	}
1438 
1439 	pr_debug("Topology update for the following CPUs:\n");
1440 	if (cpumask_weight(&updated_cpus)) {
1441 		for (ud = &updates[0]; ud; ud = ud->next) {
1442 			pr_debug("cpu %d moving from node %d "
1443 					  "to %d\n", ud->cpu,
1444 					  ud->old_nid, ud->new_nid);
1445 		}
1446 	}
1447 
1448 	/*
1449 	 * In cases where we have nothing to update (because the updates list
1450 	 * is too short or because the new topology is same as the old one),
1451 	 * skip invoking update_cpu_topology() via stop-machine(). This is
1452 	 * necessary (and not just a fast-path optimization) since stop-machine
1453 	 * can end up electing a random CPU to run update_cpu_topology(), and
1454 	 * thus trick us into setting up incorrect cpu-node mappings (since
1455 	 * 'updates' is kzalloc()'ed).
1456 	 *
1457 	 * And for the similar reason, we will skip all the following updating.
1458 	 */
1459 	if (!cpumask_weight(&updated_cpus))
1460 		goto out;
1461 
1462 	stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1463 
1464 	/*
1465 	 * Update the numa-cpu lookup table with the new mappings, even for
1466 	 * offline CPUs. It is best to perform this update from the stop-
1467 	 * machine context.
1468 	 */
1469 	stop_machine(update_lookup_table, &updates[0],
1470 					cpumask_of(raw_smp_processor_id()));
1471 
1472 	for (ud = &updates[0]; ud; ud = ud->next) {
1473 		unregister_cpu_under_node(ud->cpu, ud->old_nid);
1474 		register_cpu_under_node(ud->cpu, ud->new_nid);
1475 
1476 		dev = get_cpu_device(ud->cpu);
1477 		if (dev)
1478 			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1479 		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1480 		changed = 1;
1481 	}
1482 
1483 out:
1484 	kfree(updates);
1485 	return changed;
1486 }
1487 
1488 static void topology_work_fn(struct work_struct *work)
1489 {
1490 	rebuild_sched_domains();
1491 }
1492 static DECLARE_WORK(topology_work, topology_work_fn);
1493 
1494 static void topology_schedule_update(void)
1495 {
1496 	schedule_work(&topology_work);
1497 }
1498 
1499 static void topology_timer_fn(unsigned long ignored)
1500 {
1501 	if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1502 		topology_schedule_update();
1503 	else if (vphn_enabled) {
1504 		if (update_cpu_associativity_changes_mask() > 0)
1505 			topology_schedule_update();
1506 		reset_topology_timer();
1507 	}
1508 }
1509 static struct timer_list topology_timer =
1510 	TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1511 
1512 static void reset_topology_timer(void)
1513 {
1514 	topology_timer.data = 0;
1515 	topology_timer.expires = jiffies + 60 * HZ;
1516 	mod_timer(&topology_timer, topology_timer.expires);
1517 }
1518 
1519 #ifdef CONFIG_SMP
1520 
1521 static void stage_topology_update(int core_id)
1522 {
1523 	cpumask_or(&cpu_associativity_changes_mask,
1524 		&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1525 	reset_topology_timer();
1526 }
1527 
1528 static int dt_update_callback(struct notifier_block *nb,
1529 				unsigned long action, void *data)
1530 {
1531 	struct of_reconfig_data *update = data;
1532 	int rc = NOTIFY_DONE;
1533 
1534 	switch (action) {
1535 	case OF_RECONFIG_UPDATE_PROPERTY:
1536 		if (!of_prop_cmp(update->dn->type, "cpu") &&
1537 		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1538 			u32 core_id;
1539 			of_property_read_u32(update->dn, "reg", &core_id);
1540 			stage_topology_update(core_id);
1541 			rc = NOTIFY_OK;
1542 		}
1543 		break;
1544 	}
1545 
1546 	return rc;
1547 }
1548 
1549 static struct notifier_block dt_update_nb = {
1550 	.notifier_call = dt_update_callback,
1551 };
1552 
1553 #endif
1554 
1555 /*
1556  * Start polling for associativity changes.
1557  */
1558 int start_topology_update(void)
1559 {
1560 	int rc = 0;
1561 
1562 	if (firmware_has_feature(FW_FEATURE_PRRN)) {
1563 		if (!prrn_enabled) {
1564 			prrn_enabled = 1;
1565 			vphn_enabled = 0;
1566 #ifdef CONFIG_SMP
1567 			rc = of_reconfig_notifier_register(&dt_update_nb);
1568 #endif
1569 		}
1570 	} else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1571 		   lppaca_shared_proc(get_lppaca())) {
1572 		if (!vphn_enabled) {
1573 			prrn_enabled = 0;
1574 			vphn_enabled = 1;
1575 			setup_cpu_associativity_change_counters();
1576 			init_timer_deferrable(&topology_timer);
1577 			reset_topology_timer();
1578 		}
1579 	}
1580 
1581 	return rc;
1582 }
1583 
1584 /*
1585  * Disable polling for VPHN associativity changes.
1586  */
1587 int stop_topology_update(void)
1588 {
1589 	int rc = 0;
1590 
1591 	if (prrn_enabled) {
1592 		prrn_enabled = 0;
1593 #ifdef CONFIG_SMP
1594 		rc = of_reconfig_notifier_unregister(&dt_update_nb);
1595 #endif
1596 	} else if (vphn_enabled) {
1597 		vphn_enabled = 0;
1598 		rc = del_timer_sync(&topology_timer);
1599 	}
1600 
1601 	return rc;
1602 }
1603 
1604 int prrn_is_enabled(void)
1605 {
1606 	return prrn_enabled;
1607 }
1608 
1609 static int topology_read(struct seq_file *file, void *v)
1610 {
1611 	if (vphn_enabled || prrn_enabled)
1612 		seq_puts(file, "on\n");
1613 	else
1614 		seq_puts(file, "off\n");
1615 
1616 	return 0;
1617 }
1618 
1619 static int topology_open(struct inode *inode, struct file *file)
1620 {
1621 	return single_open(file, topology_read, NULL);
1622 }
1623 
1624 static ssize_t topology_write(struct file *file, const char __user *buf,
1625 			      size_t count, loff_t *off)
1626 {
1627 	char kbuf[4]; /* "on" or "off" plus null. */
1628 	int read_len;
1629 
1630 	read_len = count < 3 ? count : 3;
1631 	if (copy_from_user(kbuf, buf, read_len))
1632 		return -EINVAL;
1633 
1634 	kbuf[read_len] = '\0';
1635 
1636 	if (!strncmp(kbuf, "on", 2))
1637 		start_topology_update();
1638 	else if (!strncmp(kbuf, "off", 3))
1639 		stop_topology_update();
1640 	else
1641 		return -EINVAL;
1642 
1643 	return count;
1644 }
1645 
1646 static const struct file_operations topology_ops = {
1647 	.read = seq_read,
1648 	.write = topology_write,
1649 	.open = topology_open,
1650 	.release = single_release
1651 };
1652 
1653 static int topology_update_init(void)
1654 {
1655 	/* Do not poll for changes if disabled at boot */
1656 	if (topology_updates_enabled)
1657 		start_topology_update();
1658 
1659 	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1660 		return -ENOMEM;
1661 
1662 	return 0;
1663 }
1664 device_initcall(topology_update_init);
1665 #endif /* CONFIG_PPC_SPLPAR */
1666