xref: /openbmc/linux/arch/arm64/kernel/topology.c (revision bb3fc5dd)
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/init.h>
17 #include <linux/percpu.h>
18 #include <linux/node.h>
19 #include <linux/nodemask.h>
20 #include <linux/of.h>
21 #include <linux/sched.h>
22 
23 #include <asm/cputype.h>
24 #include <asm/topology.h>
25 
26 static int __init get_cpu_for_node(struct device_node *node)
27 {
28 	struct device_node *cpu_node;
29 	int cpu;
30 
31 	cpu_node = of_parse_phandle(node, "cpu", 0);
32 	if (!cpu_node)
33 		return -1;
34 
35 	for_each_possible_cpu(cpu) {
36 		if (of_get_cpu_node(cpu, NULL) == cpu_node) {
37 			of_node_put(cpu_node);
38 			return cpu;
39 		}
40 	}
41 
42 	pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
43 
44 	of_node_put(cpu_node);
45 	return -1;
46 }
47 
48 static int __init parse_core(struct device_node *core, int cluster_id,
49 			     int core_id)
50 {
51 	char name[10];
52 	bool leaf = true;
53 	int i = 0;
54 	int cpu;
55 	struct device_node *t;
56 
57 	do {
58 		snprintf(name, sizeof(name), "thread%d", i);
59 		t = of_get_child_by_name(core, name);
60 		if (t) {
61 			leaf = false;
62 			cpu = get_cpu_for_node(t);
63 			if (cpu >= 0) {
64 				cpu_topology[cpu].cluster_id = cluster_id;
65 				cpu_topology[cpu].core_id = core_id;
66 				cpu_topology[cpu].thread_id = i;
67 			} else {
68 				pr_err("%s: Can't get CPU for thread\n",
69 				       t->full_name);
70 				of_node_put(t);
71 				return -EINVAL;
72 			}
73 			of_node_put(t);
74 		}
75 		i++;
76 	} while (t);
77 
78 	cpu = get_cpu_for_node(core);
79 	if (cpu >= 0) {
80 		if (!leaf) {
81 			pr_err("%s: Core has both threads and CPU\n",
82 			       core->full_name);
83 			return -EINVAL;
84 		}
85 
86 		cpu_topology[cpu].cluster_id = cluster_id;
87 		cpu_topology[cpu].core_id = core_id;
88 	} else if (leaf) {
89 		pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
90 		return -EINVAL;
91 	}
92 
93 	return 0;
94 }
95 
96 static int __init parse_cluster(struct device_node *cluster, int depth)
97 {
98 	char name[10];
99 	bool leaf = true;
100 	bool has_cores = false;
101 	struct device_node *c;
102 	static int cluster_id __initdata;
103 	int core_id = 0;
104 	int i, ret;
105 
106 	/*
107 	 * First check for child clusters; we currently ignore any
108 	 * information about the nesting of clusters and present the
109 	 * scheduler with a flat list of them.
110 	 */
111 	i = 0;
112 	do {
113 		snprintf(name, sizeof(name), "cluster%d", i);
114 		c = of_get_child_by_name(cluster, name);
115 		if (c) {
116 			leaf = false;
117 			ret = parse_cluster(c, depth + 1);
118 			of_node_put(c);
119 			if (ret != 0)
120 				return ret;
121 		}
122 		i++;
123 	} while (c);
124 
125 	/* Now check for cores */
126 	i = 0;
127 	do {
128 		snprintf(name, sizeof(name), "core%d", i);
129 		c = of_get_child_by_name(cluster, name);
130 		if (c) {
131 			has_cores = true;
132 
133 			if (depth == 0) {
134 				pr_err("%s: cpu-map children should be clusters\n",
135 				       c->full_name);
136 				of_node_put(c);
137 				return -EINVAL;
138 			}
139 
140 			if (leaf) {
141 				ret = parse_core(c, cluster_id, core_id++);
142 			} else {
143 				pr_err("%s: Non-leaf cluster with core %s\n",
144 				       cluster->full_name, name);
145 				ret = -EINVAL;
146 			}
147 
148 			of_node_put(c);
149 			if (ret != 0)
150 				return ret;
151 		}
152 		i++;
153 	} while (c);
154 
155 	if (leaf && !has_cores)
156 		pr_warn("%s: empty cluster\n", cluster->full_name);
157 
158 	if (leaf)
159 		cluster_id++;
160 
161 	return 0;
162 }
163 
164 static int __init parse_dt_topology(void)
165 {
166 	struct device_node *cn, *map;
167 	int ret = 0;
168 	int cpu;
169 
170 	cn = of_find_node_by_path("/cpus");
171 	if (!cn) {
172 		pr_err("No CPU information found in DT\n");
173 		return 0;
174 	}
175 
176 	/*
177 	 * When topology is provided cpu-map is essentially a root
178 	 * cluster with restricted subnodes.
179 	 */
180 	map = of_get_child_by_name(cn, "cpu-map");
181 	if (!map)
182 		goto out;
183 
184 	ret = parse_cluster(map, 0);
185 	if (ret != 0)
186 		goto out_map;
187 
188 	/*
189 	 * Check that all cores are in the topology; the SMP code will
190 	 * only mark cores described in the DT as possible.
191 	 */
192 	for_each_possible_cpu(cpu)
193 		if (cpu_topology[cpu].cluster_id == -1)
194 			ret = -EINVAL;
195 
196 out_map:
197 	of_node_put(map);
198 out:
199 	of_node_put(cn);
200 	return ret;
201 }
202 
203 /*
204  * cpu topology table
205  */
206 struct cpu_topology cpu_topology[NR_CPUS];
207 EXPORT_SYMBOL_GPL(cpu_topology);
208 
209 const struct cpumask *cpu_coregroup_mask(int cpu)
210 {
211 	return &cpu_topology[cpu].core_sibling;
212 }
213 
214 static void update_siblings_masks(unsigned int cpuid)
215 {
216 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
217 	int cpu;
218 
219 	/* update core and thread sibling masks */
220 	for_each_possible_cpu(cpu) {
221 		cpu_topo = &cpu_topology[cpu];
222 
223 		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
224 			continue;
225 
226 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
227 		if (cpu != cpuid)
228 			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
229 
230 		if (cpuid_topo->core_id != cpu_topo->core_id)
231 			continue;
232 
233 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
234 		if (cpu != cpuid)
235 			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
236 	}
237 }
238 
239 void store_cpu_topology(unsigned int cpuid)
240 {
241 	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
242 	u64 mpidr;
243 
244 	if (cpuid_topo->cluster_id != -1)
245 		goto topology_populated;
246 
247 	mpidr = read_cpuid_mpidr();
248 
249 	/* Uniprocessor systems can rely on default topology values */
250 	if (mpidr & MPIDR_UP_BITMASK)
251 		return;
252 
253 	/* Create cpu topology mapping based on MPIDR. */
254 	if (mpidr & MPIDR_MT_BITMASK) {
255 		/* Multiprocessor system : Multi-threads per core */
256 		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
257 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
258 		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
259 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
260 	} else {
261 		/* Multiprocessor system : Single-thread per core */
262 		cpuid_topo->thread_id  = -1;
263 		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
264 		cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
265 					 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
266 					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
267 	}
268 
269 	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
270 		 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
271 		 cpuid_topo->thread_id, mpidr);
272 
273 topology_populated:
274 	update_siblings_masks(cpuid);
275 }
276 
277 static void __init reset_cpu_topology(void)
278 {
279 	unsigned int cpu;
280 
281 	for_each_possible_cpu(cpu) {
282 		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
283 
284 		cpu_topo->thread_id = -1;
285 		cpu_topo->core_id = 0;
286 		cpu_topo->cluster_id = -1;
287 
288 		cpumask_clear(&cpu_topo->core_sibling);
289 		cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
290 		cpumask_clear(&cpu_topo->thread_sibling);
291 		cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
292 	}
293 }
294 
295 void __init init_cpu_topology(void)
296 {
297 	reset_cpu_topology();
298 
299 	/*
300 	 * Discard anything that was parsed if we hit an error so we
301 	 * don't use partial information.
302 	 */
303 	if (of_have_populated_dt() && parse_dt_topology())
304 		reset_cpu_topology();
305 }
306