xref: /openbmc/linux/arch/powerpc/mm/numa.c (revision fca3aa16)
1 /*
2  * pSeries NUMA support
3  *
4  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #define pr_fmt(fmt) "numa: " fmt
12 
13 #include <linux/threads.h>
14 #include <linux/bootmem.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/mmzone.h>
18 #include <linux/export.h>
19 #include <linux/nodemask.h>
20 #include <linux/cpu.h>
21 #include <linux/notifier.h>
22 #include <linux/memblock.h>
23 #include <linux/of.h>
24 #include <linux/pfn.h>
25 #include <linux/cpuset.h>
26 #include <linux/node.h>
27 #include <linux/stop_machine.h>
28 #include <linux/proc_fs.h>
29 #include <linux/seq_file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <asm/cputhreads.h>
33 #include <asm/sparsemem.h>
34 #include <asm/prom.h>
35 #include <asm/smp.h>
36 #include <asm/cputhreads.h>
37 #include <asm/topology.h>
38 #include <asm/firmware.h>
39 #include <asm/paca.h>
40 #include <asm/hvcall.h>
41 #include <asm/setup.h>
42 #include <asm/vdso.h>
43 #include <asm/drmem.h>
44 
45 static int numa_enabled = 1;
46 
47 static char *cmdline __initdata;
48 
49 static int numa_debug;
50 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
51 
52 int numa_cpu_lookup_table[NR_CPUS];
53 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
54 struct pglist_data *node_data[MAX_NUMNODES];
55 
56 EXPORT_SYMBOL(numa_cpu_lookup_table);
57 EXPORT_SYMBOL(node_to_cpumask_map);
58 EXPORT_SYMBOL(node_data);
59 
60 static int min_common_depth;
61 static int n_mem_addr_cells, n_mem_size_cells;
62 static int form1_affinity;
63 
64 #define MAX_DISTANCE_REF_POINTS 4
65 static int distance_ref_points_depth;
66 static const __be32 *distance_ref_points;
67 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
68 
69 /*
70  * Allocate node_to_cpumask_map based on number of available nodes
71  * Requires node_possible_map to be valid.
72  *
73  * Note: cpumask_of_node() is not valid until after this is done.
74  */
75 static void __init setup_node_to_cpumask_map(void)
76 {
77 	unsigned int node;
78 
79 	/* setup nr_node_ids if not done yet */
80 	if (nr_node_ids == MAX_NUMNODES)
81 		setup_nr_node_ids();
82 
83 	/* allocate the map */
84 	for_each_node(node)
85 		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
86 
87 	/* cpumask_of_node() will now work */
88 	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
89 }
90 
91 static int __init fake_numa_create_new_node(unsigned long end_pfn,
92 						unsigned int *nid)
93 {
94 	unsigned long long mem;
95 	char *p = cmdline;
96 	static unsigned int fake_nid;
97 	static unsigned long long curr_boundary;
98 
99 	/*
100 	 * Modify node id, iff we started creating NUMA nodes
101 	 * We want to continue from where we left of the last time
102 	 */
103 	if (fake_nid)
104 		*nid = fake_nid;
105 	/*
106 	 * In case there are no more arguments to parse, the
107 	 * node_id should be the same as the last fake node id
108 	 * (we've handled this above).
109 	 */
110 	if (!p)
111 		return 0;
112 
113 	mem = memparse(p, &p);
114 	if (!mem)
115 		return 0;
116 
117 	if (mem < curr_boundary)
118 		return 0;
119 
120 	curr_boundary = mem;
121 
122 	if ((end_pfn << PAGE_SHIFT) > mem) {
123 		/*
124 		 * Skip commas and spaces
125 		 */
126 		while (*p == ',' || *p == ' ' || *p == '\t')
127 			p++;
128 
129 		cmdline = p;
130 		fake_nid++;
131 		*nid = fake_nid;
132 		dbg("created new fake_node with id %d\n", fake_nid);
133 		return 1;
134 	}
135 	return 0;
136 }
137 
138 static void reset_numa_cpu_lookup_table(void)
139 {
140 	unsigned int cpu;
141 
142 	for_each_possible_cpu(cpu)
143 		numa_cpu_lookup_table[cpu] = -1;
144 }
145 
146 static void map_cpu_to_node(int cpu, int node)
147 {
148 	update_numa_cpu_lookup_table(cpu, node);
149 
150 	dbg("adding cpu %d to node %d\n", cpu, node);
151 
152 	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
153 		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
154 }
155 
156 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
157 static void unmap_cpu_from_node(unsigned long cpu)
158 {
159 	int node = numa_cpu_lookup_table[cpu];
160 
161 	dbg("removing cpu %lu from node %d\n", cpu, node);
162 
163 	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
164 		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
165 	} else {
166 		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
167 		       cpu, node);
168 	}
169 }
170 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
171 
172 /* must hold reference to node during call */
173 static const __be32 *of_get_associativity(struct device_node *dev)
174 {
175 	return of_get_property(dev, "ibm,associativity", NULL);
176 }
177 
178 int __node_distance(int a, int b)
179 {
180 	int i;
181 	int distance = LOCAL_DISTANCE;
182 
183 	if (!form1_affinity)
184 		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
185 
186 	for (i = 0; i < distance_ref_points_depth; i++) {
187 		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
188 			break;
189 
190 		/* Double the distance for each NUMA level */
191 		distance *= 2;
192 	}
193 
194 	return distance;
195 }
196 EXPORT_SYMBOL(__node_distance);
197 
198 static void initialize_distance_lookup_table(int nid,
199 		const __be32 *associativity)
200 {
201 	int i;
202 
203 	if (!form1_affinity)
204 		return;
205 
206 	for (i = 0; i < distance_ref_points_depth; i++) {
207 		const __be32 *entry;
208 
209 		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
210 		distance_lookup_table[nid][i] = of_read_number(entry, 1);
211 	}
212 }
213 
214 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
215  * info is found.
216  */
217 static int associativity_to_nid(const __be32 *associativity)
218 {
219 	int nid = -1;
220 
221 	if (min_common_depth == -1)
222 		goto out;
223 
224 	if (of_read_number(associativity, 1) >= min_common_depth)
225 		nid = of_read_number(&associativity[min_common_depth], 1);
226 
227 	/* POWER4 LPAR uses 0xffff as invalid node */
228 	if (nid == 0xffff || nid >= MAX_NUMNODES)
229 		nid = -1;
230 
231 	if (nid > 0 &&
232 		of_read_number(associativity, 1) >= distance_ref_points_depth) {
233 		/*
234 		 * Skip the length field and send start of associativity array
235 		 */
236 		initialize_distance_lookup_table(nid, associativity + 1);
237 	}
238 
239 out:
240 	return nid;
241 }
242 
243 /* Returns the nid associated with the given device tree node,
244  * or -1 if not found.
245  */
246 static int of_node_to_nid_single(struct device_node *device)
247 {
248 	int nid = -1;
249 	const __be32 *tmp;
250 
251 	tmp = of_get_associativity(device);
252 	if (tmp)
253 		nid = associativity_to_nid(tmp);
254 	return nid;
255 }
256 
257 /* Walk the device tree upwards, looking for an associativity id */
258 int of_node_to_nid(struct device_node *device)
259 {
260 	int nid = -1;
261 
262 	of_node_get(device);
263 	while (device) {
264 		nid = of_node_to_nid_single(device);
265 		if (nid != -1)
266 			break;
267 
268 		device = of_get_next_parent(device);
269 	}
270 	of_node_put(device);
271 
272 	return nid;
273 }
274 EXPORT_SYMBOL(of_node_to_nid);
275 
276 static int __init find_min_common_depth(void)
277 {
278 	int depth;
279 	struct device_node *root;
280 
281 	if (firmware_has_feature(FW_FEATURE_OPAL))
282 		root = of_find_node_by_path("/ibm,opal");
283 	else
284 		root = of_find_node_by_path("/rtas");
285 	if (!root)
286 		root = of_find_node_by_path("/");
287 
288 	/*
289 	 * This property is a set of 32-bit integers, each representing
290 	 * an index into the ibm,associativity nodes.
291 	 *
292 	 * With form 0 affinity the first integer is for an SMP configuration
293 	 * (should be all 0's) and the second is for a normal NUMA
294 	 * configuration. We have only one level of NUMA.
295 	 *
296 	 * With form 1 affinity the first integer is the most significant
297 	 * NUMA boundary and the following are progressively less significant
298 	 * boundaries. There can be more than one level of NUMA.
299 	 */
300 	distance_ref_points = of_get_property(root,
301 					"ibm,associativity-reference-points",
302 					&distance_ref_points_depth);
303 
304 	if (!distance_ref_points) {
305 		dbg("NUMA: ibm,associativity-reference-points not found.\n");
306 		goto err;
307 	}
308 
309 	distance_ref_points_depth /= sizeof(int);
310 
311 	if (firmware_has_feature(FW_FEATURE_OPAL) ||
312 	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
313 		dbg("Using form 1 affinity\n");
314 		form1_affinity = 1;
315 	}
316 
317 	if (form1_affinity) {
318 		depth = of_read_number(distance_ref_points, 1);
319 	} else {
320 		if (distance_ref_points_depth < 2) {
321 			printk(KERN_WARNING "NUMA: "
322 				"short ibm,associativity-reference-points\n");
323 			goto err;
324 		}
325 
326 		depth = of_read_number(&distance_ref_points[1], 1);
327 	}
328 
329 	/*
330 	 * Warn and cap if the hardware supports more than
331 	 * MAX_DISTANCE_REF_POINTS domains.
332 	 */
333 	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
334 		printk(KERN_WARNING "NUMA: distance array capped at "
335 			"%d entries\n", MAX_DISTANCE_REF_POINTS);
336 		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
337 	}
338 
339 	of_node_put(root);
340 	return depth;
341 
342 err:
343 	of_node_put(root);
344 	return -1;
345 }
346 
347 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
348 {
349 	struct device_node *memory = NULL;
350 
351 	memory = of_find_node_by_type(memory, "memory");
352 	if (!memory)
353 		panic("numa.c: No memory nodes found!");
354 
355 	*n_addr_cells = of_n_addr_cells(memory);
356 	*n_size_cells = of_n_size_cells(memory);
357 	of_node_put(memory);
358 }
359 
360 static unsigned long read_n_cells(int n, const __be32 **buf)
361 {
362 	unsigned long result = 0;
363 
364 	while (n--) {
365 		result = (result << 32) | of_read_number(*buf, 1);
366 		(*buf)++;
367 	}
368 	return result;
369 }
370 
371 struct assoc_arrays {
372 	u32	n_arrays;
373 	u32	array_sz;
374 	const __be32 *arrays;
375 };
376 
377 /*
378  * Retrieve and validate the list of associativity arrays for drconf
379  * memory from the ibm,associativity-lookup-arrays property of the
380  * device tree..
381  *
382  * The layout of the ibm,associativity-lookup-arrays property is a number N
383  * indicating the number of associativity arrays, followed by a number M
384  * indicating the size of each associativity array, followed by a list
385  * of N associativity arrays.
386  */
387 static int of_get_assoc_arrays(struct assoc_arrays *aa)
388 {
389 	struct device_node *memory;
390 	const __be32 *prop;
391 	u32 len;
392 
393 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
394 	if (!memory)
395 		return -1;
396 
397 	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
398 	if (!prop || len < 2 * sizeof(unsigned int)) {
399 		of_node_put(memory);
400 		return -1;
401 	}
402 
403 	aa->n_arrays = of_read_number(prop++, 1);
404 	aa->array_sz = of_read_number(prop++, 1);
405 
406 	of_node_put(memory);
407 
408 	/* Now that we know the number of arrays and size of each array,
409 	 * revalidate the size of the property read in.
410 	 */
411 	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
412 		return -1;
413 
414 	aa->arrays = prop;
415 	return 0;
416 }
417 
418 /*
419  * This is like of_node_to_nid_single() for memory represented in the
420  * ibm,dynamic-reconfiguration-memory node.
421  */
422 static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
423 {
424 	struct assoc_arrays aa = { .arrays = NULL };
425 	int default_nid = 0;
426 	int nid = default_nid;
427 	int rc, index;
428 
429 	rc = of_get_assoc_arrays(&aa);
430 	if (rc)
431 		return default_nid;
432 
433 	if (min_common_depth > 0 && min_common_depth <= aa.array_sz &&
434 	    !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
435 	    lmb->aa_index < aa.n_arrays) {
436 		index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
437 		nid = of_read_number(&aa.arrays[index], 1);
438 
439 		if (nid == 0xffff || nid >= MAX_NUMNODES)
440 			nid = default_nid;
441 
442 		if (nid > 0) {
443 			index = lmb->aa_index * aa.array_sz;
444 			initialize_distance_lookup_table(nid,
445 							&aa.arrays[index]);
446 		}
447 	}
448 
449 	return nid;
450 }
451 
452 /*
453  * Figure out to which domain a cpu belongs and stick it there.
454  * Return the id of the domain used.
455  */
456 static int numa_setup_cpu(unsigned long lcpu)
457 {
458 	int nid = -1;
459 	struct device_node *cpu;
460 
461 	/*
462 	 * If a valid cpu-to-node mapping is already available, use it
463 	 * directly instead of querying the firmware, since it represents
464 	 * the most recent mapping notified to us by the platform (eg: VPHN).
465 	 */
466 	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
467 		map_cpu_to_node(lcpu, nid);
468 		return nid;
469 	}
470 
471 	cpu = of_get_cpu_node(lcpu, NULL);
472 
473 	if (!cpu) {
474 		WARN_ON(1);
475 		if (cpu_present(lcpu))
476 			goto out_present;
477 		else
478 			goto out;
479 	}
480 
481 	nid = of_node_to_nid_single(cpu);
482 
483 out_present:
484 	if (nid < 0 || !node_possible(nid))
485 		nid = first_online_node;
486 
487 	map_cpu_to_node(lcpu, nid);
488 	of_node_put(cpu);
489 out:
490 	return nid;
491 }
492 
493 static void verify_cpu_node_mapping(int cpu, int node)
494 {
495 	int base, sibling, i;
496 
497 	/* Verify that all the threads in the core belong to the same node */
498 	base = cpu_first_thread_sibling(cpu);
499 
500 	for (i = 0; i < threads_per_core; i++) {
501 		sibling = base + i;
502 
503 		if (sibling == cpu || cpu_is_offline(sibling))
504 			continue;
505 
506 		if (cpu_to_node(sibling) != node) {
507 			WARN(1, "CPU thread siblings %d and %d don't belong"
508 				" to the same node!\n", cpu, sibling);
509 			break;
510 		}
511 	}
512 }
513 
514 /* Must run before sched domains notifier. */
515 static int ppc_numa_cpu_prepare(unsigned int cpu)
516 {
517 	int nid;
518 
519 	nid = numa_setup_cpu(cpu);
520 	verify_cpu_node_mapping(cpu, nid);
521 	return 0;
522 }
523 
524 static int ppc_numa_cpu_dead(unsigned int cpu)
525 {
526 #ifdef CONFIG_HOTPLUG_CPU
527 	unmap_cpu_from_node(cpu);
528 #endif
529 	return 0;
530 }
531 
532 /*
533  * Check and possibly modify a memory region to enforce the memory limit.
534  *
535  * Returns the size the region should have to enforce the memory limit.
536  * This will either be the original value of size, a truncated value,
537  * or zero. If the returned value of size is 0 the region should be
538  * discarded as it lies wholly above the memory limit.
539  */
540 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
541 						      unsigned long size)
542 {
543 	/*
544 	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
545 	 * we've already adjusted it for the limit and it takes care of
546 	 * having memory holes below the limit.  Also, in the case of
547 	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
548 	 */
549 
550 	if (start + size <= memblock_end_of_DRAM())
551 		return size;
552 
553 	if (start >= memblock_end_of_DRAM())
554 		return 0;
555 
556 	return memblock_end_of_DRAM() - start;
557 }
558 
559 /*
560  * Reads the counter for a given entry in
561  * linux,drconf-usable-memory property
562  */
563 static inline int __init read_usm_ranges(const __be32 **usm)
564 {
565 	/*
566 	 * For each lmb in ibm,dynamic-memory a corresponding
567 	 * entry in linux,drconf-usable-memory property contains
568 	 * a counter followed by that many (base, size) duple.
569 	 * read the counter from linux,drconf-usable-memory
570 	 */
571 	return read_n_cells(n_mem_size_cells, usm);
572 }
573 
574 /*
575  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
576  * node.  This assumes n_mem_{addr,size}_cells have been set.
577  */
578 static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
579 					const __be32 **usm)
580 {
581 	unsigned int ranges, is_kexec_kdump = 0;
582 	unsigned long base, size, sz;
583 	int nid;
584 
585 	/*
586 	 * Skip this block if the reserved bit is set in flags (0x80)
587 	 * or if the block is not assigned to this partition (0x8)
588 	 */
589 	if ((lmb->flags & DRCONF_MEM_RESERVED)
590 	    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
591 		return;
592 
593 	if (*usm)
594 		is_kexec_kdump = 1;
595 
596 	base = lmb->base_addr;
597 	size = drmem_lmb_size();
598 	ranges = 1;
599 
600 	if (is_kexec_kdump) {
601 		ranges = read_usm_ranges(usm);
602 		if (!ranges) /* there are no (base, size) duple */
603 			return;
604 	}
605 
606 	do {
607 		if (is_kexec_kdump) {
608 			base = read_n_cells(n_mem_addr_cells, usm);
609 			size = read_n_cells(n_mem_size_cells, usm);
610 		}
611 
612 		nid = of_drconf_to_nid_single(lmb);
613 		fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
614 					  &nid);
615 		node_set_online(nid);
616 		sz = numa_enforce_memory_limit(base, size);
617 		if (sz)
618 			memblock_set_node(base, sz, &memblock.memory, nid);
619 	} while (--ranges);
620 }
621 
622 static int __init parse_numa_properties(void)
623 {
624 	struct device_node *memory;
625 	int default_nid = 0;
626 	unsigned long i;
627 
628 	if (numa_enabled == 0) {
629 		printk(KERN_WARNING "NUMA disabled by user\n");
630 		return -1;
631 	}
632 
633 	min_common_depth = find_min_common_depth();
634 
635 	if (min_common_depth < 0)
636 		return min_common_depth;
637 
638 	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
639 
640 	/*
641 	 * Even though we connect cpus to numa domains later in SMP
642 	 * init, we need to know the node ids now. This is because
643 	 * each node to be onlined must have NODE_DATA etc backing it.
644 	 */
645 	for_each_present_cpu(i) {
646 		struct device_node *cpu;
647 		int nid;
648 
649 		cpu = of_get_cpu_node(i, NULL);
650 		BUG_ON(!cpu);
651 		nid = of_node_to_nid_single(cpu);
652 		of_node_put(cpu);
653 
654 		/*
655 		 * Don't fall back to default_nid yet -- we will plug
656 		 * cpus into nodes once the memory scan has discovered
657 		 * the topology.
658 		 */
659 		if (nid < 0)
660 			continue;
661 		node_set_online(nid);
662 	}
663 
664 	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
665 
666 	for_each_node_by_type(memory, "memory") {
667 		unsigned long start;
668 		unsigned long size;
669 		int nid;
670 		int ranges;
671 		const __be32 *memcell_buf;
672 		unsigned int len;
673 
674 		memcell_buf = of_get_property(memory,
675 			"linux,usable-memory", &len);
676 		if (!memcell_buf || len <= 0)
677 			memcell_buf = of_get_property(memory, "reg", &len);
678 		if (!memcell_buf || len <= 0)
679 			continue;
680 
681 		/* ranges in cell */
682 		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
683 new_range:
684 		/* these are order-sensitive, and modify the buffer pointer */
685 		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
686 		size = read_n_cells(n_mem_size_cells, &memcell_buf);
687 
688 		/*
689 		 * Assumption: either all memory nodes or none will
690 		 * have associativity properties.  If none, then
691 		 * everything goes to default_nid.
692 		 */
693 		nid = of_node_to_nid_single(memory);
694 		if (nid < 0)
695 			nid = default_nid;
696 
697 		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
698 		node_set_online(nid);
699 
700 		size = numa_enforce_memory_limit(start, size);
701 		if (size)
702 			memblock_set_node(start, size, &memblock.memory, nid);
703 
704 		if (--ranges)
705 			goto new_range;
706 	}
707 
708 	/*
709 	 * Now do the same thing for each MEMBLOCK listed in the
710 	 * ibm,dynamic-memory property in the
711 	 * ibm,dynamic-reconfiguration-memory node.
712 	 */
713 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
714 	if (memory) {
715 		walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
716 		of_node_put(memory);
717 	}
718 
719 	return 0;
720 }
721 
722 static void __init setup_nonnuma(void)
723 {
724 	unsigned long top_of_ram = memblock_end_of_DRAM();
725 	unsigned long total_ram = memblock_phys_mem_size();
726 	unsigned long start_pfn, end_pfn;
727 	unsigned int nid = 0;
728 	struct memblock_region *reg;
729 
730 	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
731 	       top_of_ram, total_ram);
732 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
733 	       (top_of_ram - total_ram) >> 20);
734 
735 	for_each_memblock(memory, reg) {
736 		start_pfn = memblock_region_memory_base_pfn(reg);
737 		end_pfn = memblock_region_memory_end_pfn(reg);
738 
739 		fake_numa_create_new_node(end_pfn, &nid);
740 		memblock_set_node(PFN_PHYS(start_pfn),
741 				  PFN_PHYS(end_pfn - start_pfn),
742 				  &memblock.memory, nid);
743 		node_set_online(nid);
744 	}
745 }
746 
747 void __init dump_numa_cpu_topology(void)
748 {
749 	unsigned int node;
750 	unsigned int cpu, count;
751 
752 	if (min_common_depth == -1 || !numa_enabled)
753 		return;
754 
755 	for_each_online_node(node) {
756 		pr_info("Node %d CPUs:", node);
757 
758 		count = 0;
759 		/*
760 		 * If we used a CPU iterator here we would miss printing
761 		 * the holes in the cpumap.
762 		 */
763 		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
764 			if (cpumask_test_cpu(cpu,
765 					node_to_cpumask_map[node])) {
766 				if (count == 0)
767 					pr_cont(" %u", cpu);
768 				++count;
769 			} else {
770 				if (count > 1)
771 					pr_cont("-%u", cpu - 1);
772 				count = 0;
773 			}
774 		}
775 
776 		if (count > 1)
777 			pr_cont("-%u", nr_cpu_ids - 1);
778 		pr_cont("\n");
779 	}
780 }
781 
782 /* Initialize NODE_DATA for a node on the local memory */
783 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
784 {
785 	u64 spanned_pages = end_pfn - start_pfn;
786 	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
787 	u64 nd_pa;
788 	void *nd;
789 	int tnid;
790 
791 	nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
792 	nd = __va(nd_pa);
793 
794 	/* report and initialize */
795 	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
796 		nd_pa, nd_pa + nd_size - 1);
797 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
798 	if (tnid != nid)
799 		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
800 
801 	node_data[nid] = nd;
802 	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
803 	NODE_DATA(nid)->node_id = nid;
804 	NODE_DATA(nid)->node_start_pfn = start_pfn;
805 	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
806 }
807 
808 static void __init find_possible_nodes(void)
809 {
810 	struct device_node *rtas;
811 	u32 numnodes, i;
812 
813 	if (min_common_depth <= 0)
814 		return;
815 
816 	rtas = of_find_node_by_path("/rtas");
817 	if (!rtas)
818 		return;
819 
820 	if (of_property_read_u32_index(rtas,
821 				"ibm,max-associativity-domains",
822 				min_common_depth, &numnodes))
823 		goto out;
824 
825 	for (i = 0; i < numnodes; i++) {
826 		if (!node_possible(i))
827 			node_set(i, node_possible_map);
828 	}
829 
830 out:
831 	of_node_put(rtas);
832 }
833 
834 void __init mem_topology_setup(void)
835 {
836 	int cpu;
837 
838 	if (parse_numa_properties())
839 		setup_nonnuma();
840 
841 	/*
842 	 * Modify the set of possible NUMA nodes to reflect information
843 	 * available about the set of online nodes, and the set of nodes
844 	 * that we expect to make use of for this platform's affinity
845 	 * calculations.
846 	 */
847 	nodes_and(node_possible_map, node_possible_map, node_online_map);
848 
849 	find_possible_nodes();
850 
851 	setup_node_to_cpumask_map();
852 
853 	reset_numa_cpu_lookup_table();
854 
855 	for_each_present_cpu(cpu)
856 		numa_setup_cpu(cpu);
857 }
858 
859 void __init initmem_init(void)
860 {
861 	int nid;
862 
863 	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
864 	max_pfn = max_low_pfn;
865 
866 	memblock_dump_all();
867 
868 	for_each_online_node(nid) {
869 		unsigned long start_pfn, end_pfn;
870 
871 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
872 		setup_node_data(nid, start_pfn, end_pfn);
873 		sparse_memory_present_with_active_regions(nid);
874 	}
875 
876 	sparse_init();
877 
878 	/*
879 	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
880 	 * even before we online them, so that we can use cpu_to_{node,mem}
881 	 * early in boot, cf. smp_prepare_cpus().
882 	 * _nocalls() + manual invocation is used because cpuhp is not yet
883 	 * initialized for the boot CPU.
884 	 */
885 	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
886 				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
887 }
888 
889 static int __init early_numa(char *p)
890 {
891 	if (!p)
892 		return 0;
893 
894 	if (strstr(p, "off"))
895 		numa_enabled = 0;
896 
897 	if (strstr(p, "debug"))
898 		numa_debug = 1;
899 
900 	p = strstr(p, "fake=");
901 	if (p)
902 		cmdline = p + strlen("fake=");
903 
904 	return 0;
905 }
906 early_param("numa", early_numa);
907 
908 static bool topology_updates_enabled = true;
909 
910 static int __init early_topology_updates(char *p)
911 {
912 	if (!p)
913 		return 0;
914 
915 	if (!strcmp(p, "off")) {
916 		pr_info("Disabling topology updates\n");
917 		topology_updates_enabled = false;
918 	}
919 
920 	return 0;
921 }
922 early_param("topology_updates", early_topology_updates);
923 
924 #ifdef CONFIG_MEMORY_HOTPLUG
925 /*
926  * Find the node associated with a hot added memory section for
927  * memory represented in the device tree by the property
928  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
929  */
930 static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
931 {
932 	struct drmem_lmb *lmb;
933 	unsigned long lmb_size;
934 	int nid = -1;
935 
936 	lmb_size = drmem_lmb_size();
937 
938 	for_each_drmem_lmb(lmb) {
939 		/* skip this block if it is reserved or not assigned to
940 		 * this partition */
941 		if ((lmb->flags & DRCONF_MEM_RESERVED)
942 		    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
943 			continue;
944 
945 		if ((scn_addr < lmb->base_addr)
946 		    || (scn_addr >= (lmb->base_addr + lmb_size)))
947 			continue;
948 
949 		nid = of_drconf_to_nid_single(lmb);
950 		break;
951 	}
952 
953 	return nid;
954 }
955 
956 /*
957  * Find the node associated with a hot added memory section for memory
958  * represented in the device tree as a node (i.e. memory@XXXX) for
959  * each memblock.
960  */
961 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
962 {
963 	struct device_node *memory;
964 	int nid = -1;
965 
966 	for_each_node_by_type(memory, "memory") {
967 		unsigned long start, size;
968 		int ranges;
969 		const __be32 *memcell_buf;
970 		unsigned int len;
971 
972 		memcell_buf = of_get_property(memory, "reg", &len);
973 		if (!memcell_buf || len <= 0)
974 			continue;
975 
976 		/* ranges in cell */
977 		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
978 
979 		while (ranges--) {
980 			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
981 			size = read_n_cells(n_mem_size_cells, &memcell_buf);
982 
983 			if ((scn_addr < start) || (scn_addr >= (start + size)))
984 				continue;
985 
986 			nid = of_node_to_nid_single(memory);
987 			break;
988 		}
989 
990 		if (nid >= 0)
991 			break;
992 	}
993 
994 	of_node_put(memory);
995 
996 	return nid;
997 }
998 
999 /*
1000  * Find the node associated with a hot added memory section.  Section
1001  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1002  * sections are fully contained within a single MEMBLOCK.
1003  */
1004 int hot_add_scn_to_nid(unsigned long scn_addr)
1005 {
1006 	struct device_node *memory = NULL;
1007 	int nid;
1008 
1009 	if (!numa_enabled || (min_common_depth < 0))
1010 		return first_online_node;
1011 
1012 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1013 	if (memory) {
1014 		nid = hot_add_drconf_scn_to_nid(scn_addr);
1015 		of_node_put(memory);
1016 	} else {
1017 		nid = hot_add_node_scn_to_nid(scn_addr);
1018 	}
1019 
1020 	if (nid < 0 || !node_possible(nid))
1021 		nid = first_online_node;
1022 
1023 	return nid;
1024 }
1025 
1026 static u64 hot_add_drconf_memory_max(void)
1027 {
1028 	struct device_node *memory = NULL;
1029 	struct device_node *dn = NULL;
1030 	const __be64 *lrdr = NULL;
1031 
1032 	dn = of_find_node_by_path("/rtas");
1033 	if (dn) {
1034 		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1035 		of_node_put(dn);
1036 		if (lrdr)
1037 			return be64_to_cpup(lrdr);
1038 	}
1039 
1040 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1041 	if (memory) {
1042 		of_node_put(memory);
1043 		return drmem_lmb_memory_max();
1044 	}
1045 	return 0;
1046 }
1047 
1048 /*
1049  * memory_hotplug_max - return max address of memory that may be added
1050  *
1051  * This is currently only used on systems that support drconfig memory
1052  * hotplug.
1053  */
1054 u64 memory_hotplug_max(void)
1055 {
1056         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1057 }
1058 #endif /* CONFIG_MEMORY_HOTPLUG */
1059 
1060 /* Virtual Processor Home Node (VPHN) support */
1061 #ifdef CONFIG_PPC_SPLPAR
1062 
1063 #include "vphn.h"
1064 
1065 struct topology_update_data {
1066 	struct topology_update_data *next;
1067 	unsigned int cpu;
1068 	int old_nid;
1069 	int new_nid;
1070 };
1071 
1072 #define TOPOLOGY_DEF_TIMER_SECS	60
1073 
1074 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1075 static cpumask_t cpu_associativity_changes_mask;
1076 static int vphn_enabled;
1077 static int prrn_enabled;
1078 static void reset_topology_timer(void);
1079 static int topology_timer_secs = 1;
1080 static int topology_inited;
1081 static int topology_update_needed;
1082 
1083 /*
1084  * Change polling interval for associativity changes.
1085  */
1086 int timed_topology_update(int nsecs)
1087 {
1088 	if (vphn_enabled) {
1089 		if (nsecs > 0)
1090 			topology_timer_secs = nsecs;
1091 		else
1092 			topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;
1093 
1094 		reset_topology_timer();
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 /*
1101  * Store the current values of the associativity change counters in the
1102  * hypervisor.
1103  */
1104 static void setup_cpu_associativity_change_counters(void)
1105 {
1106 	int cpu;
1107 
1108 	/* The VPHN feature supports a maximum of 8 reference points */
1109 	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1110 
1111 	for_each_possible_cpu(cpu) {
1112 		int i;
1113 		u8 *counts = vphn_cpu_change_counts[cpu];
1114 		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1115 
1116 		for (i = 0; i < distance_ref_points_depth; i++)
1117 			counts[i] = hypervisor_counts[i];
1118 	}
1119 }
1120 
1121 /*
1122  * The hypervisor maintains a set of 8 associativity change counters in
1123  * the VPA of each cpu that correspond to the associativity levels in the
1124  * ibm,associativity-reference-points property. When an associativity
1125  * level changes, the corresponding counter is incremented.
1126  *
1127  * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1128  * node associativity levels have changed.
1129  *
1130  * Returns the number of cpus with unhandled associativity changes.
1131  */
1132 static int update_cpu_associativity_changes_mask(void)
1133 {
1134 	int cpu;
1135 	cpumask_t *changes = &cpu_associativity_changes_mask;
1136 
1137 	for_each_possible_cpu(cpu) {
1138 		int i, changed = 0;
1139 		u8 *counts = vphn_cpu_change_counts[cpu];
1140 		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1141 
1142 		for (i = 0; i < distance_ref_points_depth; i++) {
1143 			if (hypervisor_counts[i] != counts[i]) {
1144 				counts[i] = hypervisor_counts[i];
1145 				changed = 1;
1146 			}
1147 		}
1148 		if (changed) {
1149 			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1150 			cpu = cpu_last_thread_sibling(cpu);
1151 		}
1152 	}
1153 
1154 	return cpumask_weight(changes);
1155 }
1156 
1157 /*
1158  * Retrieve the new associativity information for a virtual processor's
1159  * home node.
1160  */
1161 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1162 {
1163 	long rc;
1164 	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1165 	u64 flags = 1;
1166 	int hwcpu = get_hard_smp_processor_id(cpu);
1167 
1168 	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1169 	vphn_unpack_associativity(retbuf, associativity);
1170 
1171 	return rc;
1172 }
1173 
1174 static long vphn_get_associativity(unsigned long cpu,
1175 					__be32 *associativity)
1176 {
1177 	long rc;
1178 
1179 	rc = hcall_vphn(cpu, associativity);
1180 
1181 	switch (rc) {
1182 	case H_FUNCTION:
1183 		printk(KERN_INFO
1184 			"VPHN is not supported. Disabling polling...\n");
1185 		stop_topology_update();
1186 		break;
1187 	case H_HARDWARE:
1188 		printk(KERN_ERR
1189 			"hcall_vphn() experienced a hardware fault "
1190 			"preventing VPHN. Disabling polling...\n");
1191 		stop_topology_update();
1192 		break;
1193 	case H_SUCCESS:
1194 		dbg("VPHN hcall succeeded. Reset polling...\n");
1195 		timed_topology_update(0);
1196 		break;
1197 	}
1198 
1199 	return rc;
1200 }
1201 
1202 int find_and_online_cpu_nid(int cpu)
1203 {
1204 	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1205 	int new_nid;
1206 
1207 	/* Use associativity from first thread for all siblings */
1208 	vphn_get_associativity(cpu, associativity);
1209 	new_nid = associativity_to_nid(associativity);
1210 	if (new_nid < 0 || !node_possible(new_nid))
1211 		new_nid = first_online_node;
1212 
1213 	if (NODE_DATA(new_nid) == NULL) {
1214 #ifdef CONFIG_MEMORY_HOTPLUG
1215 		/*
1216 		 * Need to ensure that NODE_DATA is initialized for a node from
1217 		 * available memory (see memblock_alloc_try_nid). If unable to
1218 		 * init the node, then default to nearest node that has memory
1219 		 * installed.
1220 		 */
1221 		if (try_online_node(new_nid))
1222 			new_nid = first_online_node;
1223 #else
1224 		/*
1225 		 * Default to using the nearest node that has memory installed.
1226 		 * Otherwise, it would be necessary to patch the kernel MM code
1227 		 * to deal with more memoryless-node error conditions.
1228 		 */
1229 		new_nid = first_online_node;
1230 #endif
1231 	}
1232 
1233 	pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
1234 		cpu, new_nid);
1235 	return new_nid;
1236 }
1237 
1238 /*
1239  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1240  * characteristics change. This function doesn't perform any locking and is
1241  * only safe to call from stop_machine().
1242  */
1243 static int update_cpu_topology(void *data)
1244 {
1245 	struct topology_update_data *update;
1246 	unsigned long cpu;
1247 
1248 	if (!data)
1249 		return -EINVAL;
1250 
1251 	cpu = smp_processor_id();
1252 
1253 	for (update = data; update; update = update->next) {
1254 		int new_nid = update->new_nid;
1255 		if (cpu != update->cpu)
1256 			continue;
1257 
1258 		unmap_cpu_from_node(cpu);
1259 		map_cpu_to_node(cpu, new_nid);
1260 		set_cpu_numa_node(cpu, new_nid);
1261 		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1262 		vdso_getcpu_init();
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 static int update_lookup_table(void *data)
1269 {
1270 	struct topology_update_data *update;
1271 
1272 	if (!data)
1273 		return -EINVAL;
1274 
1275 	/*
1276 	 * Upon topology update, the numa-cpu lookup table needs to be updated
1277 	 * for all threads in the core, including offline CPUs, to ensure that
1278 	 * future hotplug operations respect the cpu-to-node associativity
1279 	 * properly.
1280 	 */
1281 	for (update = data; update; update = update->next) {
1282 		int nid, base, j;
1283 
1284 		nid = update->new_nid;
1285 		base = cpu_first_thread_sibling(update->cpu);
1286 
1287 		for (j = 0; j < threads_per_core; j++) {
1288 			update_numa_cpu_lookup_table(base + j, nid);
1289 		}
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 /*
1296  * Update the node maps and sysfs entries for each cpu whose home node
1297  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1298  *
1299  * cpus_locked says whether we already hold cpu_hotplug_lock.
1300  */
1301 int numa_update_cpu_topology(bool cpus_locked)
1302 {
1303 	unsigned int cpu, sibling, changed = 0;
1304 	struct topology_update_data *updates, *ud;
1305 	cpumask_t updated_cpus;
1306 	struct device *dev;
1307 	int weight, new_nid, i = 0;
1308 
1309 	if (!prrn_enabled && !vphn_enabled) {
1310 		if (!topology_inited)
1311 			topology_update_needed = 1;
1312 		return 0;
1313 	}
1314 
1315 	weight = cpumask_weight(&cpu_associativity_changes_mask);
1316 	if (!weight)
1317 		return 0;
1318 
1319 	updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1320 	if (!updates)
1321 		return 0;
1322 
1323 	cpumask_clear(&updated_cpus);
1324 
1325 	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1326 		/*
1327 		 * If siblings aren't flagged for changes, updates list
1328 		 * will be too short. Skip on this update and set for next
1329 		 * update.
1330 		 */
1331 		if (!cpumask_subset(cpu_sibling_mask(cpu),
1332 					&cpu_associativity_changes_mask)) {
1333 			pr_info("Sibling bits not set for associativity "
1334 					"change, cpu%d\n", cpu);
1335 			cpumask_or(&cpu_associativity_changes_mask,
1336 					&cpu_associativity_changes_mask,
1337 					cpu_sibling_mask(cpu));
1338 			cpu = cpu_last_thread_sibling(cpu);
1339 			continue;
1340 		}
1341 
1342 		new_nid = find_and_online_cpu_nid(cpu);
1343 
1344 		if (new_nid == numa_cpu_lookup_table[cpu]) {
1345 			cpumask_andnot(&cpu_associativity_changes_mask,
1346 					&cpu_associativity_changes_mask,
1347 					cpu_sibling_mask(cpu));
1348 			dbg("Assoc chg gives same node %d for cpu%d\n",
1349 					new_nid, cpu);
1350 			cpu = cpu_last_thread_sibling(cpu);
1351 			continue;
1352 		}
1353 
1354 		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1355 			ud = &updates[i++];
1356 			ud->next = &updates[i];
1357 			ud->cpu = sibling;
1358 			ud->new_nid = new_nid;
1359 			ud->old_nid = numa_cpu_lookup_table[sibling];
1360 			cpumask_set_cpu(sibling, &updated_cpus);
1361 		}
1362 		cpu = cpu_last_thread_sibling(cpu);
1363 	}
1364 
1365 	/*
1366 	 * Prevent processing of 'updates' from overflowing array
1367 	 * where last entry filled in a 'next' pointer.
1368 	 */
1369 	if (i)
1370 		updates[i-1].next = NULL;
1371 
1372 	pr_debug("Topology update for the following CPUs:\n");
1373 	if (cpumask_weight(&updated_cpus)) {
1374 		for (ud = &updates[0]; ud; ud = ud->next) {
1375 			pr_debug("cpu %d moving from node %d "
1376 					  "to %d\n", ud->cpu,
1377 					  ud->old_nid, ud->new_nid);
1378 		}
1379 	}
1380 
1381 	/*
1382 	 * In cases where we have nothing to update (because the updates list
1383 	 * is too short or because the new topology is same as the old one),
1384 	 * skip invoking update_cpu_topology() via stop-machine(). This is
1385 	 * necessary (and not just a fast-path optimization) since stop-machine
1386 	 * can end up electing a random CPU to run update_cpu_topology(), and
1387 	 * thus trick us into setting up incorrect cpu-node mappings (since
1388 	 * 'updates' is kzalloc()'ed).
1389 	 *
1390 	 * And for the similar reason, we will skip all the following updating.
1391 	 */
1392 	if (!cpumask_weight(&updated_cpus))
1393 		goto out;
1394 
1395 	if (cpus_locked)
1396 		stop_machine_cpuslocked(update_cpu_topology, &updates[0],
1397 					&updated_cpus);
1398 	else
1399 		stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1400 
1401 	/*
1402 	 * Update the numa-cpu lookup table with the new mappings, even for
1403 	 * offline CPUs. It is best to perform this update from the stop-
1404 	 * machine context.
1405 	 */
1406 	if (cpus_locked)
1407 		stop_machine_cpuslocked(update_lookup_table, &updates[0],
1408 					cpumask_of(raw_smp_processor_id()));
1409 	else
1410 		stop_machine(update_lookup_table, &updates[0],
1411 			     cpumask_of(raw_smp_processor_id()));
1412 
1413 	for (ud = &updates[0]; ud; ud = ud->next) {
1414 		unregister_cpu_under_node(ud->cpu, ud->old_nid);
1415 		register_cpu_under_node(ud->cpu, ud->new_nid);
1416 
1417 		dev = get_cpu_device(ud->cpu);
1418 		if (dev)
1419 			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1420 		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1421 		changed = 1;
1422 	}
1423 
1424 out:
1425 	kfree(updates);
1426 	topology_update_needed = 0;
1427 	return changed;
1428 }
1429 
1430 int arch_update_cpu_topology(void)
1431 {
1432 	return numa_update_cpu_topology(true);
1433 }
1434 
1435 static void topology_work_fn(struct work_struct *work)
1436 {
1437 	rebuild_sched_domains();
1438 }
1439 static DECLARE_WORK(topology_work, topology_work_fn);
1440 
1441 static void topology_schedule_update(void)
1442 {
1443 	schedule_work(&topology_work);
1444 }
1445 
1446 static void topology_timer_fn(struct timer_list *unused)
1447 {
1448 	if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1449 		topology_schedule_update();
1450 	else if (vphn_enabled) {
1451 		if (update_cpu_associativity_changes_mask() > 0)
1452 			topology_schedule_update();
1453 		reset_topology_timer();
1454 	}
1455 }
1456 static struct timer_list topology_timer;
1457 
1458 static void reset_topology_timer(void)
1459 {
1460 	mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
1461 }
1462 
1463 #ifdef CONFIG_SMP
1464 
1465 static void stage_topology_update(int core_id)
1466 {
1467 	cpumask_or(&cpu_associativity_changes_mask,
1468 		&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1469 	reset_topology_timer();
1470 }
1471 
1472 static int dt_update_callback(struct notifier_block *nb,
1473 				unsigned long action, void *data)
1474 {
1475 	struct of_reconfig_data *update = data;
1476 	int rc = NOTIFY_DONE;
1477 
1478 	switch (action) {
1479 	case OF_RECONFIG_UPDATE_PROPERTY:
1480 		if (!of_prop_cmp(update->dn->type, "cpu") &&
1481 		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1482 			u32 core_id;
1483 			of_property_read_u32(update->dn, "reg", &core_id);
1484 			stage_topology_update(core_id);
1485 			rc = NOTIFY_OK;
1486 		}
1487 		break;
1488 	}
1489 
1490 	return rc;
1491 }
1492 
1493 static struct notifier_block dt_update_nb = {
1494 	.notifier_call = dt_update_callback,
1495 };
1496 
1497 #endif
1498 
1499 /*
1500  * Start polling for associativity changes.
1501  */
1502 int start_topology_update(void)
1503 {
1504 	int rc = 0;
1505 
1506 	if (firmware_has_feature(FW_FEATURE_PRRN)) {
1507 		if (!prrn_enabled) {
1508 			prrn_enabled = 1;
1509 #ifdef CONFIG_SMP
1510 			rc = of_reconfig_notifier_register(&dt_update_nb);
1511 #endif
1512 		}
1513 	}
1514 	if (firmware_has_feature(FW_FEATURE_VPHN) &&
1515 		   lppaca_shared_proc(get_lppaca())) {
1516 		if (!vphn_enabled) {
1517 			vphn_enabled = 1;
1518 			setup_cpu_associativity_change_counters();
1519 			timer_setup(&topology_timer, topology_timer_fn,
1520 				    TIMER_DEFERRABLE);
1521 			reset_topology_timer();
1522 		}
1523 	}
1524 
1525 	return rc;
1526 }
1527 
1528 /*
1529  * Disable polling for VPHN associativity changes.
1530  */
1531 int stop_topology_update(void)
1532 {
1533 	int rc = 0;
1534 
1535 	if (prrn_enabled) {
1536 		prrn_enabled = 0;
1537 #ifdef CONFIG_SMP
1538 		rc = of_reconfig_notifier_unregister(&dt_update_nb);
1539 #endif
1540 	}
1541 	if (vphn_enabled) {
1542 		vphn_enabled = 0;
1543 		rc = del_timer_sync(&topology_timer);
1544 	}
1545 
1546 	return rc;
1547 }
1548 
1549 int prrn_is_enabled(void)
1550 {
1551 	return prrn_enabled;
1552 }
1553 
1554 static int topology_read(struct seq_file *file, void *v)
1555 {
1556 	if (vphn_enabled || prrn_enabled)
1557 		seq_puts(file, "on\n");
1558 	else
1559 		seq_puts(file, "off\n");
1560 
1561 	return 0;
1562 }
1563 
1564 static int topology_open(struct inode *inode, struct file *file)
1565 {
1566 	return single_open(file, topology_read, NULL);
1567 }
1568 
1569 static ssize_t topology_write(struct file *file, const char __user *buf,
1570 			      size_t count, loff_t *off)
1571 {
1572 	char kbuf[4]; /* "on" or "off" plus null. */
1573 	int read_len;
1574 
1575 	read_len = count < 3 ? count : 3;
1576 	if (copy_from_user(kbuf, buf, read_len))
1577 		return -EINVAL;
1578 
1579 	kbuf[read_len] = '\0';
1580 
1581 	if (!strncmp(kbuf, "on", 2))
1582 		start_topology_update();
1583 	else if (!strncmp(kbuf, "off", 3))
1584 		stop_topology_update();
1585 	else
1586 		return -EINVAL;
1587 
1588 	return count;
1589 }
1590 
1591 static const struct file_operations topology_ops = {
1592 	.read = seq_read,
1593 	.write = topology_write,
1594 	.open = topology_open,
1595 	.release = single_release
1596 };
1597 
1598 static int topology_update_init(void)
1599 {
1600 	/* Do not poll for changes if disabled at boot */
1601 	if (topology_updates_enabled)
1602 		start_topology_update();
1603 
1604 	if (vphn_enabled)
1605 		topology_schedule_update();
1606 
1607 	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1608 		return -ENOMEM;
1609 
1610 	topology_inited = 1;
1611 	if (topology_update_needed)
1612 		bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
1613 					nr_cpumask_bits);
1614 
1615 	return 0;
1616 }
1617 device_initcall(topology_update_init);
1618 #endif /* CONFIG_PPC_SPLPAR */
1619