xref: /openbmc/linux/arch/arm64/kernel/topology.c (revision 2359ccdd)
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 
14 #include <linux/arch_topology.h>
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
21 #include <linux/of.h>
22 #include <linux/sched.h>
23 #include <linux/sched/topology.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 
27 #include <asm/cpu.h>
28 #include <asm/cputype.h>
29 #include <asm/topology.h>
30 
31 static int __init get_cpu_for_node(struct device_node *node)
32 {
33 	struct device_node *cpu_node;
34 	int cpu;
35 
36 	cpu_node = of_parse_phandle(node, "cpu", 0);
37 	if (!cpu_node)
38 		return -1;
39 
40 	cpu = of_cpu_node_to_id(cpu_node);
41 	if (cpu >= 0)
42 		topology_parse_cpu_capacity(cpu_node, cpu);
43 	else
44 		pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
45 
46 	of_node_put(cpu_node);
47 	return cpu;
48 }
49 
50 static int __init parse_core(struct device_node *core, int cluster_id,
51 			     int core_id)
52 {
53 	char name[10];
54 	bool leaf = true;
55 	int i = 0;
56 	int cpu;
57 	struct device_node *t;
58 
59 	do {
60 		snprintf(name, sizeof(name), "thread%d", i);
61 		t = of_get_child_by_name(core, name);
62 		if (t) {
63 			leaf = false;
64 			cpu = get_cpu_for_node(t);
65 			if (cpu >= 0) {
66 				cpu_topology[cpu].cluster_id = cluster_id;
67 				cpu_topology[cpu].core_id = core_id;
68 				cpu_topology[cpu].thread_id = i;
69 			} else {
70 				pr_err("%pOF: Can't get CPU for thread\n",
71 				       t);
72 				of_node_put(t);
73 				return -EINVAL;
74 			}
75 			of_node_put(t);
76 		}
77 		i++;
78 	} while (t);
79 
80 	cpu = get_cpu_for_node(core);
81 	if (cpu >= 0) {
82 		if (!leaf) {
83 			pr_err("%pOF: Core has both threads and CPU\n",
84 			       core);
85 			return -EINVAL;
86 		}
87 
88 		cpu_topology[cpu].cluster_id = cluster_id;
89 		cpu_topology[cpu].core_id = core_id;
90 	} else if (leaf) {
91 		pr_err("%pOF: Can't get CPU for leaf core\n", core);
92 		return -EINVAL;
93 	}
94 
95 	return 0;
96 }
97 
98 static int __init parse_cluster(struct device_node *cluster, int depth)
99 {
100 	char name[10];
101 	bool leaf = true;
102 	bool has_cores = false;
103 	struct device_node *c;
104 	static int cluster_id __initdata;
105 	int core_id = 0;
106 	int i, ret;
107 
108 	/*
109 	 * First check for child clusters; we currently ignore any
110 	 * information about the nesting of clusters and present the
111 	 * scheduler with a flat list of them.
112 	 */
113 	i = 0;
114 	do {
115 		snprintf(name, sizeof(name), "cluster%d", i);
116 		c = of_get_child_by_name(cluster, name);
117 		if (c) {
118 			leaf = false;
119 			ret = parse_cluster(c, depth + 1);
120 			of_node_put(c);
121 			if (ret != 0)
122 				return ret;
123 		}
124 		i++;
125 	} while (c);
126 
127 	/* Now check for cores */
128 	i = 0;
129 	do {
130 		snprintf(name, sizeof(name), "core%d", i);
131 		c = of_get_child_by_name(cluster, name);
132 		if (c) {
133 			has_cores = true;
134 
135 			if (depth == 0) {
136 				pr_err("%pOF: cpu-map children should be clusters\n",
137 				       c);
138 				of_node_put(c);
139 				return -EINVAL;
140 			}
141 
142 			if (leaf) {
143 				ret = parse_core(c, cluster_id, core_id++);
144 			} else {
145 				pr_err("%pOF: Non-leaf cluster with core %s\n",
146 				       cluster, name);
147 				ret = -EINVAL;
148 			}
149 
150 			of_node_put(c);
151 			if (ret != 0)
152 				return ret;
153 		}
154 		i++;
155 	} while (c);
156 
157 	if (leaf && !has_cores)
158 		pr_warn("%pOF: empty cluster\n", cluster);
159 
160 	if (leaf)
161 		cluster_id++;
162 
163 	return 0;
164 }
165 
166 static int __init parse_dt_topology(void)
167 {
168 	struct device_node *cn, *map;
169 	int ret = 0;
170 	int cpu;
171 
172 	cn = of_find_node_by_path("/cpus");
173 	if (!cn) {
174 		pr_err("No CPU information found in DT\n");
175 		return 0;
176 	}
177 
178 	/*
179 	 * When topology is provided cpu-map is essentially a root
180 	 * cluster with restricted subnodes.
181 	 */
182 	map = of_get_child_by_name(cn, "cpu-map");
183 	if (!map)
184 		goto out;
185 
186 	ret = parse_cluster(map, 0);
187 	if (ret != 0)
188 		goto out_map;
189 
190 	topology_normalize_cpu_scale();
191 
192 	/*
193 	 * Check that all cores are in the topology; the SMP code will
194 	 * only mark cores described in the DT as possible.
195 	 */
196 	for_each_possible_cpu(cpu)
197 		if (cpu_topology[cpu].cluster_id == -1)
198 			ret = -EINVAL;
199 
200 out_map:
201 	of_node_put(map);
202 out:
203 	of_node_put(cn);
204 	return ret;
205 }
206 
207 /*
208  * cpu topology table
209  */
210 struct cpu_topology cpu_topology[NR_CPUS];
211 EXPORT_SYMBOL_GPL(cpu_topology);
212 
213 const struct cpumask *cpu_coregroup_mask(int cpu)
214 {
215 	return &cpu_topology[cpu].core_sibling;
216 }
217 
218 static void update_siblings_masks(unsigned int cpuid)
219 {
220 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
221 	int cpu;
222 
223 	/* update core and thread sibling masks */
224 	for_each_possible_cpu(cpu) {
225 		cpu_topo = &cpu_topology[cpu];
226 
227 		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
228 			continue;
229 
230 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
231 		if (cpu != cpuid)
232 			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
233 
234 		if (cpuid_topo->core_id != cpu_topo->core_id)
235 			continue;
236 
237 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
238 		if (cpu != cpuid)
239 			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
240 	}
241 }
242 
243 void store_cpu_topology(unsigned int cpuid)
244 {
245 	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
246 	u64 mpidr;
247 
248 	if (cpuid_topo->cluster_id != -1)
249 		goto topology_populated;
250 
251 	mpidr = read_cpuid_mpidr();
252 
253 	/* Uniprocessor systems can rely on default topology values */
254 	if (mpidr & MPIDR_UP_BITMASK)
255 		return;
256 
257 	/* Create cpu topology mapping based on MPIDR. */
258 	if (mpidr & MPIDR_MT_BITMASK) {
259 		/* Multiprocessor system : Multi-threads per core */
260 		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
261 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
262 		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
263 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
264 	} else {
265 		/* Multiprocessor system : Single-thread per core */
266 		cpuid_topo->thread_id  = -1;
267 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
268 		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
269 					 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
270 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
271 	}
272 
273 	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
274 		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
275 		 cpuid_topo->thread_id, mpidr);
276 
277 topology_populated:
278 	update_siblings_masks(cpuid);
279 }
280 
281 static void __init reset_cpu_topology(void)
282 {
283 	unsigned int cpu;
284 
285 	for_each_possible_cpu(cpu) {
286 		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
287 
288 		cpu_topo->thread_id = -1;
289 		cpu_topo->core_id = 0;
290 		cpu_topo->cluster_id = -1;
291 
292 		cpumask_clear(&cpu_topo->core_sibling);
293 		cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
294 		cpumask_clear(&cpu_topo->thread_sibling);
295 		cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
296 	}
297 }
298 
299 void __init init_cpu_topology(void)
300 {
301 	reset_cpu_topology();
302 
303 	/*
304 	 * Discard anything that was parsed if we hit an error so we
305 	 * don't use partial information.
306 	 */
307 	if (of_have_populated_dt() && parse_dt_topology())
308 		reset_cpu_topology();
309 }
310