xref: /openbmc/linux/arch/arm64/kernel/topology.c (revision 8730046c)
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/init.h>
17 #include <linux/percpu.h>
18 #include <linux/node.h>
19 #include <linux/nodemask.h>
20 #include <linux/of.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/cpufreq.h>
25 
26 #include <asm/cpu.h>
27 #include <asm/cputype.h>
28 #include <asm/topology.h>
29 
30 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
31 static DEFINE_MUTEX(cpu_scale_mutex);
32 
33 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
34 {
35 	return per_cpu(cpu_scale, cpu);
36 }
37 
38 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
39 {
40 	per_cpu(cpu_scale, cpu) = capacity;
41 }
42 
43 #ifdef CONFIG_PROC_SYSCTL
44 static ssize_t cpu_capacity_show(struct device *dev,
45 				 struct device_attribute *attr,
46 				 char *buf)
47 {
48 	struct cpu *cpu = container_of(dev, struct cpu, dev);
49 
50 	return sprintf(buf, "%lu\n",
51 			arch_scale_cpu_capacity(NULL, cpu->dev.id));
52 }
53 
54 static ssize_t cpu_capacity_store(struct device *dev,
55 				  struct device_attribute *attr,
56 				  const char *buf,
57 				  size_t count)
58 {
59 	struct cpu *cpu = container_of(dev, struct cpu, dev);
60 	int this_cpu = cpu->dev.id, i;
61 	unsigned long new_capacity;
62 	ssize_t ret;
63 
64 	if (count) {
65 		ret = kstrtoul(buf, 0, &new_capacity);
66 		if (ret)
67 			return ret;
68 		if (new_capacity > SCHED_CAPACITY_SCALE)
69 			return -EINVAL;
70 
71 		mutex_lock(&cpu_scale_mutex);
72 		for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
73 			set_capacity_scale(i, new_capacity);
74 		mutex_unlock(&cpu_scale_mutex);
75 	}
76 
77 	return count;
78 }
79 
80 static DEVICE_ATTR_RW(cpu_capacity);
81 
82 static int register_cpu_capacity_sysctl(void)
83 {
84 	int i;
85 	struct device *cpu;
86 
87 	for_each_possible_cpu(i) {
88 		cpu = get_cpu_device(i);
89 		if (!cpu) {
90 			pr_err("%s: too early to get CPU%d device!\n",
91 			       __func__, i);
92 			continue;
93 		}
94 		device_create_file(cpu, &dev_attr_cpu_capacity);
95 	}
96 
97 	return 0;
98 }
99 subsys_initcall(register_cpu_capacity_sysctl);
100 #endif
101 
102 static u32 capacity_scale;
103 static u32 *raw_capacity;
104 static bool cap_parsing_failed;
105 
106 static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
107 {
108 	int ret;
109 	u32 cpu_capacity;
110 
111 	if (cap_parsing_failed)
112 		return;
113 
114 	ret = of_property_read_u32(cpu_node,
115 				   "capacity-dmips-mhz",
116 				   &cpu_capacity);
117 	if (!ret) {
118 		if (!raw_capacity) {
119 			raw_capacity = kcalloc(num_possible_cpus(),
120 					       sizeof(*raw_capacity),
121 					       GFP_KERNEL);
122 			if (!raw_capacity) {
123 				pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
124 				cap_parsing_failed = true;
125 				return;
126 			}
127 		}
128 		capacity_scale = max(cpu_capacity, capacity_scale);
129 		raw_capacity[cpu] = cpu_capacity;
130 		pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
131 			cpu_node->full_name, raw_capacity[cpu]);
132 	} else {
133 		if (raw_capacity) {
134 			pr_err("cpu_capacity: missing %s raw capacity\n",
135 				cpu_node->full_name);
136 			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
137 		}
138 		cap_parsing_failed = true;
139 		kfree(raw_capacity);
140 	}
141 }
142 
143 static void normalize_cpu_capacity(void)
144 {
145 	u64 capacity;
146 	int cpu;
147 
148 	if (!raw_capacity || cap_parsing_failed)
149 		return;
150 
151 	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
152 	mutex_lock(&cpu_scale_mutex);
153 	for_each_possible_cpu(cpu) {
154 		pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
155 			 cpu, raw_capacity[cpu]);
156 		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
157 			/ capacity_scale;
158 		set_capacity_scale(cpu, capacity);
159 		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
160 			cpu, arch_scale_cpu_capacity(NULL, cpu));
161 	}
162 	mutex_unlock(&cpu_scale_mutex);
163 }
164 
165 #ifdef CONFIG_CPU_FREQ
166 static cpumask_var_t cpus_to_visit;
167 static bool cap_parsing_done;
168 static void parsing_done_workfn(struct work_struct *work);
169 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
170 
171 static int
172 init_cpu_capacity_callback(struct notifier_block *nb,
173 			   unsigned long val,
174 			   void *data)
175 {
176 	struct cpufreq_policy *policy = data;
177 	int cpu;
178 
179 	if (cap_parsing_failed || cap_parsing_done)
180 		return 0;
181 
182 	switch (val) {
183 	case CPUFREQ_NOTIFY:
184 		pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
185 				cpumask_pr_args(policy->related_cpus),
186 				cpumask_pr_args(cpus_to_visit));
187 		cpumask_andnot(cpus_to_visit,
188 			       cpus_to_visit,
189 			       policy->related_cpus);
190 		for_each_cpu(cpu, policy->related_cpus) {
191 			raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
192 					    policy->cpuinfo.max_freq / 1000UL;
193 			capacity_scale = max(raw_capacity[cpu], capacity_scale);
194 		}
195 		if (cpumask_empty(cpus_to_visit)) {
196 			normalize_cpu_capacity();
197 			kfree(raw_capacity);
198 			pr_debug("cpu_capacity: parsing done\n");
199 			cap_parsing_done = true;
200 			schedule_work(&parsing_done_work);
201 		}
202 	}
203 	return 0;
204 }
205 
206 static struct notifier_block init_cpu_capacity_notifier = {
207 	.notifier_call = init_cpu_capacity_callback,
208 };
209 
210 static int __init register_cpufreq_notifier(void)
211 {
212 	if (cap_parsing_failed)
213 		return -EINVAL;
214 
215 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
216 		pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
217 		return -ENOMEM;
218 	}
219 	cpumask_copy(cpus_to_visit, cpu_possible_mask);
220 
221 	return cpufreq_register_notifier(&init_cpu_capacity_notifier,
222 					 CPUFREQ_POLICY_NOTIFIER);
223 }
224 core_initcall(register_cpufreq_notifier);
225 
226 static void parsing_done_workfn(struct work_struct *work)
227 {
228 	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
229 					 CPUFREQ_POLICY_NOTIFIER);
230 }
231 
232 #else
233 static int __init free_raw_capacity(void)
234 {
235 	kfree(raw_capacity);
236 
237 	return 0;
238 }
239 core_initcall(free_raw_capacity);
240 #endif
241 
242 static int __init get_cpu_for_node(struct device_node *node)
243 {
244 	struct device_node *cpu_node;
245 	int cpu;
246 
247 	cpu_node = of_parse_phandle(node, "cpu", 0);
248 	if (!cpu_node)
249 		return -1;
250 
251 	for_each_possible_cpu(cpu) {
252 		if (of_get_cpu_node(cpu, NULL) == cpu_node) {
253 			parse_cpu_capacity(cpu_node, cpu);
254 			of_node_put(cpu_node);
255 			return cpu;
256 		}
257 	}
258 
259 	pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
260 
261 	of_node_put(cpu_node);
262 	return -1;
263 }
264 
265 static int __init parse_core(struct device_node *core, int cluster_id,
266 			     int core_id)
267 {
268 	char name[10];
269 	bool leaf = true;
270 	int i = 0;
271 	int cpu;
272 	struct device_node *t;
273 
274 	do {
275 		snprintf(name, sizeof(name), "thread%d", i);
276 		t = of_get_child_by_name(core, name);
277 		if (t) {
278 			leaf = false;
279 			cpu = get_cpu_for_node(t);
280 			if (cpu >= 0) {
281 				cpu_topology[cpu].cluster_id = cluster_id;
282 				cpu_topology[cpu].core_id = core_id;
283 				cpu_topology[cpu].thread_id = i;
284 			} else {
285 				pr_err("%s: Can't get CPU for thread\n",
286 				       t->full_name);
287 				of_node_put(t);
288 				return -EINVAL;
289 			}
290 			of_node_put(t);
291 		}
292 		i++;
293 	} while (t);
294 
295 	cpu = get_cpu_for_node(core);
296 	if (cpu >= 0) {
297 		if (!leaf) {
298 			pr_err("%s: Core has both threads and CPU\n",
299 			       core->full_name);
300 			return -EINVAL;
301 		}
302 
303 		cpu_topology[cpu].cluster_id = cluster_id;
304 		cpu_topology[cpu].core_id = core_id;
305 	} else if (leaf) {
306 		pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
307 		return -EINVAL;
308 	}
309 
310 	return 0;
311 }
312 
313 static int __init parse_cluster(struct device_node *cluster, int depth)
314 {
315 	char name[10];
316 	bool leaf = true;
317 	bool has_cores = false;
318 	struct device_node *c;
319 	static int cluster_id __initdata;
320 	int core_id = 0;
321 	int i, ret;
322 
323 	/*
324 	 * First check for child clusters; we currently ignore any
325 	 * information about the nesting of clusters and present the
326 	 * scheduler with a flat list of them.
327 	 */
328 	i = 0;
329 	do {
330 		snprintf(name, sizeof(name), "cluster%d", i);
331 		c = of_get_child_by_name(cluster, name);
332 		if (c) {
333 			leaf = false;
334 			ret = parse_cluster(c, depth + 1);
335 			of_node_put(c);
336 			if (ret != 0)
337 				return ret;
338 		}
339 		i++;
340 	} while (c);
341 
342 	/* Now check for cores */
343 	i = 0;
344 	do {
345 		snprintf(name, sizeof(name), "core%d", i);
346 		c = of_get_child_by_name(cluster, name);
347 		if (c) {
348 			has_cores = true;
349 
350 			if (depth == 0) {
351 				pr_err("%s: cpu-map children should be clusters\n",
352 				       c->full_name);
353 				of_node_put(c);
354 				return -EINVAL;
355 			}
356 
357 			if (leaf) {
358 				ret = parse_core(c, cluster_id, core_id++);
359 			} else {
360 				pr_err("%s: Non-leaf cluster with core %s\n",
361 				       cluster->full_name, name);
362 				ret = -EINVAL;
363 			}
364 
365 			of_node_put(c);
366 			if (ret != 0)
367 				return ret;
368 		}
369 		i++;
370 	} while (c);
371 
372 	if (leaf && !has_cores)
373 		pr_warn("%s: empty cluster\n", cluster->full_name);
374 
375 	if (leaf)
376 		cluster_id++;
377 
378 	return 0;
379 }
380 
381 static int __init parse_dt_topology(void)
382 {
383 	struct device_node *cn, *map;
384 	int ret = 0;
385 	int cpu;
386 
387 	cn = of_find_node_by_path("/cpus");
388 	if (!cn) {
389 		pr_err("No CPU information found in DT\n");
390 		return 0;
391 	}
392 
393 	/*
394 	 * When topology is provided cpu-map is essentially a root
395 	 * cluster with restricted subnodes.
396 	 */
397 	map = of_get_child_by_name(cn, "cpu-map");
398 	if (!map) {
399 		cap_parsing_failed = true;
400 		goto out;
401 	}
402 
403 	ret = parse_cluster(map, 0);
404 	if (ret != 0)
405 		goto out_map;
406 
407 	normalize_cpu_capacity();
408 
409 	/*
410 	 * Check that all cores are in the topology; the SMP code will
411 	 * only mark cores described in the DT as possible.
412 	 */
413 	for_each_possible_cpu(cpu)
414 		if (cpu_topology[cpu].cluster_id == -1)
415 			ret = -EINVAL;
416 
417 out_map:
418 	of_node_put(map);
419 out:
420 	of_node_put(cn);
421 	return ret;
422 }
423 
424 /*
425  * cpu topology table
426  */
427 struct cpu_topology cpu_topology[NR_CPUS];
428 EXPORT_SYMBOL_GPL(cpu_topology);
429 
430 const struct cpumask *cpu_coregroup_mask(int cpu)
431 {
432 	return &cpu_topology[cpu].core_sibling;
433 }
434 
435 static void update_siblings_masks(unsigned int cpuid)
436 {
437 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
438 	int cpu;
439 
440 	/* update core and thread sibling masks */
441 	for_each_possible_cpu(cpu) {
442 		cpu_topo = &cpu_topology[cpu];
443 
444 		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
445 			continue;
446 
447 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
448 		if (cpu != cpuid)
449 			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
450 
451 		if (cpuid_topo->core_id != cpu_topo->core_id)
452 			continue;
453 
454 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
455 		if (cpu != cpuid)
456 			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
457 	}
458 }
459 
460 void store_cpu_topology(unsigned int cpuid)
461 {
462 	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
463 	u64 mpidr;
464 
465 	if (cpuid_topo->cluster_id != -1)
466 		goto topology_populated;
467 
468 	mpidr = read_cpuid_mpidr();
469 
470 	/* Uniprocessor systems can rely on default topology values */
471 	if (mpidr & MPIDR_UP_BITMASK)
472 		return;
473 
474 	/* Create cpu topology mapping based on MPIDR. */
475 	if (mpidr & MPIDR_MT_BITMASK) {
476 		/* Multiprocessor system : Multi-threads per core */
477 		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
478 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
479 		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
480 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
481 	} else {
482 		/* Multiprocessor system : Single-thread per core */
483 		cpuid_topo->thread_id  = -1;
484 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
485 		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
486 					 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
487 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
488 	}
489 
490 	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
491 		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
492 		 cpuid_topo->thread_id, mpidr);
493 
494 topology_populated:
495 	update_siblings_masks(cpuid);
496 }
497 
498 static void __init reset_cpu_topology(void)
499 {
500 	unsigned int cpu;
501 
502 	for_each_possible_cpu(cpu) {
503 		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
504 
505 		cpu_topo->thread_id = -1;
506 		cpu_topo->core_id = 0;
507 		cpu_topo->cluster_id = -1;
508 
509 		cpumask_clear(&cpu_topo->core_sibling);
510 		cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
511 		cpumask_clear(&cpu_topo->thread_sibling);
512 		cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
513 	}
514 }
515 
516 void __init init_cpu_topology(void)
517 {
518 	reset_cpu_topology();
519 
520 	/*
521 	 * Discard anything that was parsed if we hit an error so we
522 	 * don't use partial information.
523 	 */
524 	if (of_have_populated_dt() && parse_dt_topology())
525 		reset_cpu_topology();
526 }
527