xref: /openbmc/linux/arch/arm/kernel/topology.c (revision 15e47304)
1 /*
2  * arch/arm/kernel/topology.c
3  *
4  * Copyright (C) 2011 Linaro Limited.
5  * Written by: Vincent Guittot
6  *
7  * based on arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/init.h>
17 #include <linux/percpu.h>
18 #include <linux/node.h>
19 #include <linux/nodemask.h>
20 #include <linux/of.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 
24 #include <asm/cputype.h>
25 #include <asm/topology.h>
26 
27 /*
28  * cpu power scale management
29  */
30 
31 /*
32  * cpu power table
33  * This per cpu data structure describes the relative capacity of each core.
34  * On a heteregenous system, cores don't have the same computation capacity
35  * and we reflect that difference in the cpu_power field so the scheduler can
36  * take this difference into account during load balance. A per cpu structure
37  * is preferred because each CPU updates its own cpu_power field during the
38  * load balance except for idle cores. One idle core is selected to run the
39  * rebalance_domains for all idle cores and the cpu_power can be updated
40  * during this sequence.
41  */
42 static DEFINE_PER_CPU(unsigned long, cpu_scale);
43 
44 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
45 {
46 	return per_cpu(cpu_scale, cpu);
47 }
48 
49 static void set_power_scale(unsigned int cpu, unsigned long power)
50 {
51 	per_cpu(cpu_scale, cpu) = power;
52 }
53 
54 #ifdef CONFIG_OF
55 struct cpu_efficiency {
56 	const char *compatible;
57 	unsigned long efficiency;
58 };
59 
60 /*
61  * Table of relative efficiency of each processors
62  * The efficiency value must fit in 20bit and the final
63  * cpu_scale value must be in the range
64  *   0 < cpu_scale < 3*SCHED_POWER_SCALE/2
65  * in order to return at most 1 when DIV_ROUND_CLOSEST
66  * is used to compute the capacity of a CPU.
67  * Processors that are not defined in the table,
68  * use the default SCHED_POWER_SCALE value for cpu_scale.
69  */
70 struct cpu_efficiency table_efficiency[] = {
71 	{"arm,cortex-a15", 3891},
72 	{"arm,cortex-a7",  2048},
73 	{NULL, },
74 };
75 
76 struct cpu_capacity {
77 	unsigned long hwid;
78 	unsigned long capacity;
79 };
80 
81 struct cpu_capacity *cpu_capacity;
82 
83 unsigned long middle_capacity = 1;
84 
85 /*
86  * Iterate all CPUs' descriptor in DT and compute the efficiency
87  * (as per table_efficiency). Also calculate a middle efficiency
88  * as close as possible to  (max{eff_i} - min{eff_i}) / 2
89  * This is later used to scale the cpu_power field such that an
90  * 'average' CPU is of middle power. Also see the comments near
91  * table_efficiency[] and update_cpu_power().
92  */
93 static void __init parse_dt_topology(void)
94 {
95 	struct cpu_efficiency *cpu_eff;
96 	struct device_node *cn = NULL;
97 	unsigned long min_capacity = (unsigned long)(-1);
98 	unsigned long max_capacity = 0;
99 	unsigned long capacity = 0;
100 	int alloc_size, cpu = 0;
101 
102 	alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
103 	cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT);
104 
105 	while ((cn = of_find_node_by_type(cn, "cpu"))) {
106 		const u32 *rate, *reg;
107 		int len;
108 
109 		if (cpu >= num_possible_cpus())
110 			break;
111 
112 		for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
113 			if (of_device_is_compatible(cn, cpu_eff->compatible))
114 				break;
115 
116 		if (cpu_eff->compatible == NULL)
117 			continue;
118 
119 		rate = of_get_property(cn, "clock-frequency", &len);
120 		if (!rate || len != 4) {
121 			pr_err("%s missing clock-frequency property\n",
122 				cn->full_name);
123 			continue;
124 		}
125 
126 		reg = of_get_property(cn, "reg", &len);
127 		if (!reg || len != 4) {
128 			pr_err("%s missing reg property\n", cn->full_name);
129 			continue;
130 		}
131 
132 		capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
133 
134 		/* Save min capacity of the system */
135 		if (capacity < min_capacity)
136 			min_capacity = capacity;
137 
138 		/* Save max capacity of the system */
139 		if (capacity > max_capacity)
140 			max_capacity = capacity;
141 
142 		cpu_capacity[cpu].capacity = capacity;
143 		cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
144 	}
145 
146 	if (cpu < num_possible_cpus())
147 		cpu_capacity[cpu].hwid = (unsigned long)(-1);
148 
149 	/* If min and max capacities are equals, we bypass the update of the
150 	 * cpu_scale because all CPUs have the same capacity. Otherwise, we
151 	 * compute a middle_capacity factor that will ensure that the capacity
152 	 * of an 'average' CPU of the system will be as close as possible to
153 	 * SCHED_POWER_SCALE, which is the default value, but with the
154 	 * constraint explained near table_efficiency[].
155 	 */
156 	if (min_capacity == max_capacity)
157 		cpu_capacity[0].hwid = (unsigned long)(-1);
158 	else if (4*max_capacity < (3*(max_capacity + min_capacity)))
159 		middle_capacity = (min_capacity + max_capacity)
160 				>> (SCHED_POWER_SHIFT+1);
161 	else
162 		middle_capacity = ((max_capacity / 3)
163 				>> (SCHED_POWER_SHIFT-1)) + 1;
164 
165 }
166 
167 /*
168  * Look for a customed capacity of a CPU in the cpu_capacity table during the
169  * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
170  * function returns directly for SMP system.
171  */
172 void update_cpu_power(unsigned int cpu, unsigned long hwid)
173 {
174 	unsigned int idx = 0;
175 
176 	/* look for the cpu's hwid in the cpu capacity table */
177 	for (idx = 0; idx < num_possible_cpus(); idx++) {
178 		if (cpu_capacity[idx].hwid == hwid)
179 			break;
180 
181 		if (cpu_capacity[idx].hwid == -1)
182 			return;
183 	}
184 
185 	if (idx == num_possible_cpus())
186 		return;
187 
188 	set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
189 
190 	printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
191 		cpu, arch_scale_freq_power(NULL, cpu));
192 }
193 
194 #else
195 static inline void parse_dt_topology(void) {}
196 static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
197 #endif
198 
199 
200 /*
201  * cpu topology management
202  */
203 
204 #define MPIDR_SMP_BITMASK (0x3 << 30)
205 #define MPIDR_SMP_VALUE (0x2 << 30)
206 
207 #define MPIDR_MT_BITMASK (0x1 << 24)
208 
209 /*
210  * These masks reflect the current use of the affinity levels.
211  * The affinity level can be up to 16 bits according to ARM ARM
212  */
213 #define MPIDR_HWID_BITMASK 0xFFFFFF
214 
215 #define MPIDR_LEVEL0_MASK 0x3
216 #define MPIDR_LEVEL0_SHIFT 0
217 
218 #define MPIDR_LEVEL1_MASK 0xF
219 #define MPIDR_LEVEL1_SHIFT 8
220 
221 #define MPIDR_LEVEL2_MASK 0xFF
222 #define MPIDR_LEVEL2_SHIFT 16
223 
224 /*
225  * cpu topology table
226  */
227 struct cputopo_arm cpu_topology[NR_CPUS];
228 
229 const struct cpumask *cpu_coregroup_mask(int cpu)
230 {
231 	return &cpu_topology[cpu].core_sibling;
232 }
233 
234 void update_siblings_masks(unsigned int cpuid)
235 {
236 	struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
237 	int cpu;
238 
239 	/* update core and thread sibling masks */
240 	for_each_possible_cpu(cpu) {
241 		cpu_topo = &cpu_topology[cpu];
242 
243 		if (cpuid_topo->socket_id != cpu_topo->socket_id)
244 			continue;
245 
246 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
247 		if (cpu != cpuid)
248 			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
249 
250 		if (cpuid_topo->core_id != cpu_topo->core_id)
251 			continue;
252 
253 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
254 		if (cpu != cpuid)
255 			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
256 	}
257 	smp_wmb();
258 }
259 
260 /*
261  * store_cpu_topology is called at boot when only one cpu is running
262  * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
263  * which prevents simultaneous write access to cpu_topology array
264  */
265 void store_cpu_topology(unsigned int cpuid)
266 {
267 	struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
268 	unsigned int mpidr;
269 
270 	/* If the cpu topology has been already set, just return */
271 	if (cpuid_topo->core_id != -1)
272 		return;
273 
274 	mpidr = read_cpuid_mpidr();
275 
276 	/* create cpu topology mapping */
277 	if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
278 		/*
279 		 * This is a multiprocessor system
280 		 * multiprocessor format & multiprocessor mode field are set
281 		 */
282 
283 		if (mpidr & MPIDR_MT_BITMASK) {
284 			/* core performance interdependency */
285 			cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
286 				& MPIDR_LEVEL0_MASK;
287 			cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
288 				& MPIDR_LEVEL1_MASK;
289 			cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
290 				& MPIDR_LEVEL2_MASK;
291 		} else {
292 			/* largely independent cores */
293 			cpuid_topo->thread_id = -1;
294 			cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
295 				& MPIDR_LEVEL0_MASK;
296 			cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
297 				& MPIDR_LEVEL1_MASK;
298 		}
299 	} else {
300 		/*
301 		 * This is an uniprocessor system
302 		 * we are in multiprocessor format but uniprocessor system
303 		 * or in the old uniprocessor format
304 		 */
305 		cpuid_topo->thread_id = -1;
306 		cpuid_topo->core_id = 0;
307 		cpuid_topo->socket_id = -1;
308 	}
309 
310 	update_siblings_masks(cpuid);
311 
312 	update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
313 
314 	printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
315 		cpuid, cpu_topology[cpuid].thread_id,
316 		cpu_topology[cpuid].core_id,
317 		cpu_topology[cpuid].socket_id, mpidr);
318 }
319 
320 /*
321  * init_cpu_topology is called at boot when only one cpu is running
322  * which prevent simultaneous write access to cpu_topology array
323  */
324 void __init init_cpu_topology(void)
325 {
326 	unsigned int cpu;
327 
328 	/* init core mask and power*/
329 	for_each_possible_cpu(cpu) {
330 		struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
331 
332 		cpu_topo->thread_id = -1;
333 		cpu_topo->core_id =  -1;
334 		cpu_topo->socket_id = -1;
335 		cpumask_clear(&cpu_topo->core_sibling);
336 		cpumask_clear(&cpu_topo->thread_sibling);
337 
338 		set_power_scale(cpu, SCHED_POWER_SCALE);
339 	}
340 	smp_wmb();
341 
342 	parse_dt_topology();
343 }
344