xref: /openbmc/linux/drivers/cpufreq/cpufreq-dt.c (revision 9cfc5c90)
1 /*
2  * Copyright (C) 2012 Freescale Semiconductor, Inc.
3  *
4  * Copyright (C) 2014 Linaro.
5  * Viresh Kumar <viresh.kumar@linaro.org>
6  *
7  * The OPP code in function set_target() is reused from
8  * drivers/cpufreq/omap-cpufreq.c
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
16 
17 #include <linux/clk.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/cpufreq.h>
21 #include <linux/cpufreq-dt.h>
22 #include <linux/cpumask.h>
23 #include <linux/err.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/pm_opp.h>
27 #include <linux/platform_device.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/slab.h>
30 #include <linux/thermal.h>
31 
32 struct private_data {
33 	struct device *cpu_dev;
34 	struct regulator *cpu_reg;
35 	struct thermal_cooling_device *cdev;
36 	unsigned int voltage_tolerance; /* in percentage */
37 };
38 
39 static struct freq_attr *cpufreq_dt_attr[] = {
40 	&cpufreq_freq_attr_scaling_available_freqs,
41 	NULL,   /* Extra space for boost-attr if required */
42 	NULL,
43 };
44 
45 static int set_target(struct cpufreq_policy *policy, unsigned int index)
46 {
47 	struct dev_pm_opp *opp;
48 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
49 	struct clk *cpu_clk = policy->clk;
50 	struct private_data *priv = policy->driver_data;
51 	struct device *cpu_dev = priv->cpu_dev;
52 	struct regulator *cpu_reg = priv->cpu_reg;
53 	unsigned long volt = 0, volt_old = 0, tol = 0;
54 	unsigned int old_freq, new_freq;
55 	long freq_Hz, freq_exact;
56 	int ret;
57 
58 	freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
59 	if (freq_Hz <= 0)
60 		freq_Hz = freq_table[index].frequency * 1000;
61 
62 	freq_exact = freq_Hz;
63 	new_freq = freq_Hz / 1000;
64 	old_freq = clk_get_rate(cpu_clk) / 1000;
65 
66 	if (!IS_ERR(cpu_reg)) {
67 		unsigned long opp_freq;
68 
69 		rcu_read_lock();
70 		opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
71 		if (IS_ERR(opp)) {
72 			rcu_read_unlock();
73 			dev_err(cpu_dev, "failed to find OPP for %ld\n",
74 				freq_Hz);
75 			return PTR_ERR(opp);
76 		}
77 		volt = dev_pm_opp_get_voltage(opp);
78 		opp_freq = dev_pm_opp_get_freq(opp);
79 		rcu_read_unlock();
80 		tol = volt * priv->voltage_tolerance / 100;
81 		volt_old = regulator_get_voltage(cpu_reg);
82 		dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
83 			opp_freq / 1000, volt);
84 	}
85 
86 	dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
87 		old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
88 		new_freq / 1000, volt ? volt / 1000 : -1);
89 
90 	/* scaling up?  scale voltage before frequency */
91 	if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
92 		ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
93 		if (ret) {
94 			dev_err(cpu_dev, "failed to scale voltage up: %d\n",
95 				ret);
96 			return ret;
97 		}
98 	}
99 
100 	ret = clk_set_rate(cpu_clk, freq_exact);
101 	if (ret) {
102 		dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
103 		if (!IS_ERR(cpu_reg) && volt_old > 0)
104 			regulator_set_voltage_tol(cpu_reg, volt_old, tol);
105 		return ret;
106 	}
107 
108 	/* scaling down?  scale voltage after frequency */
109 	if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
110 		ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
111 		if (ret) {
112 			dev_err(cpu_dev, "failed to scale voltage down: %d\n",
113 				ret);
114 			clk_set_rate(cpu_clk, old_freq * 1000);
115 		}
116 	}
117 
118 	return ret;
119 }
120 
121 static int allocate_resources(int cpu, struct device **cdev,
122 			      struct regulator **creg, struct clk **cclk)
123 {
124 	struct device *cpu_dev;
125 	struct regulator *cpu_reg;
126 	struct clk *cpu_clk;
127 	int ret = 0;
128 	char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
129 
130 	cpu_dev = get_cpu_device(cpu);
131 	if (!cpu_dev) {
132 		pr_err("failed to get cpu%d device\n", cpu);
133 		return -ENODEV;
134 	}
135 
136 	/* Try "cpu0" for older DTs */
137 	if (!cpu)
138 		reg = reg_cpu0;
139 	else
140 		reg = reg_cpu;
141 
142 try_again:
143 	cpu_reg = regulator_get_optional(cpu_dev, reg);
144 	if (IS_ERR(cpu_reg)) {
145 		/*
146 		 * If cpu's regulator supply node is present, but regulator is
147 		 * not yet registered, we should try defering probe.
148 		 */
149 		if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
150 			dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
151 				cpu);
152 			return -EPROBE_DEFER;
153 		}
154 
155 		/* Try with "cpu-supply" */
156 		if (reg == reg_cpu0) {
157 			reg = reg_cpu;
158 			goto try_again;
159 		}
160 
161 		dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
162 			cpu, PTR_ERR(cpu_reg));
163 	}
164 
165 	cpu_clk = clk_get(cpu_dev, NULL);
166 	if (IS_ERR(cpu_clk)) {
167 		/* put regulator */
168 		if (!IS_ERR(cpu_reg))
169 			regulator_put(cpu_reg);
170 
171 		ret = PTR_ERR(cpu_clk);
172 
173 		/*
174 		 * If cpu's clk node is present, but clock is not yet
175 		 * registered, we should try defering probe.
176 		 */
177 		if (ret == -EPROBE_DEFER)
178 			dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
179 		else
180 			dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
181 				ret);
182 	} else {
183 		*cdev = cpu_dev;
184 		*creg = cpu_reg;
185 		*cclk = cpu_clk;
186 	}
187 
188 	return ret;
189 }
190 
191 static int cpufreq_init(struct cpufreq_policy *policy)
192 {
193 	struct cpufreq_frequency_table *freq_table;
194 	struct device_node *np;
195 	struct private_data *priv;
196 	struct device *cpu_dev;
197 	struct regulator *cpu_reg;
198 	struct clk *cpu_clk;
199 	struct dev_pm_opp *suspend_opp;
200 	unsigned long min_uV = ~0, max_uV = 0;
201 	unsigned int transition_latency;
202 	bool need_update = false;
203 	int ret;
204 
205 	ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
206 	if (ret) {
207 		pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
208 		return ret;
209 	}
210 
211 	np = of_node_get(cpu_dev->of_node);
212 	if (!np) {
213 		dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
214 		ret = -ENOENT;
215 		goto out_put_reg_clk;
216 	}
217 
218 	/* Get OPP-sharing information from "operating-points-v2" bindings */
219 	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
220 	if (ret) {
221 		/*
222 		 * operating-points-v2 not supported, fallback to old method of
223 		 * finding shared-OPPs for backward compatibility.
224 		 */
225 		if (ret == -ENOENT)
226 			need_update = true;
227 		else
228 			goto out_node_put;
229 	}
230 
231 	/*
232 	 * Initialize OPP tables for all policy->cpus. They will be shared by
233 	 * all CPUs which have marked their CPUs shared with OPP bindings.
234 	 *
235 	 * For platforms not using operating-points-v2 bindings, we do this
236 	 * before updating policy->cpus. Otherwise, we will end up creating
237 	 * duplicate OPPs for policy->cpus.
238 	 *
239 	 * OPPs might be populated at runtime, don't check for error here
240 	 */
241 	dev_pm_opp_of_cpumask_add_table(policy->cpus);
242 
243 	/*
244 	 * But we need OPP table to function so if it is not there let's
245 	 * give platform code chance to provide it for us.
246 	 */
247 	ret = dev_pm_opp_get_opp_count(cpu_dev);
248 	if (ret <= 0) {
249 		pr_debug("OPP table is not ready, deferring probe\n");
250 		ret = -EPROBE_DEFER;
251 		goto out_free_opp;
252 	}
253 
254 	if (need_update) {
255 		struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
256 
257 		if (!pd || !pd->independent_clocks)
258 			cpumask_setall(policy->cpus);
259 
260 		/*
261 		 * OPP tables are initialized only for policy->cpu, do it for
262 		 * others as well.
263 		 */
264 		ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
265 		if (ret)
266 			dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
267 				__func__, ret);
268 
269 		of_property_read_u32(np, "clock-latency", &transition_latency);
270 	} else {
271 		transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
272 	}
273 
274 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
275 	if (!priv) {
276 		ret = -ENOMEM;
277 		goto out_free_opp;
278 	}
279 
280 	of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
281 
282 	if (!transition_latency)
283 		transition_latency = CPUFREQ_ETERNAL;
284 
285 	if (!IS_ERR(cpu_reg)) {
286 		unsigned long opp_freq = 0;
287 
288 		/*
289 		 * Disable any OPPs where the connected regulator isn't able to
290 		 * provide the specified voltage and record minimum and maximum
291 		 * voltage levels.
292 		 */
293 		while (1) {
294 			struct dev_pm_opp *opp;
295 			unsigned long opp_uV, tol_uV;
296 
297 			rcu_read_lock();
298 			opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
299 			if (IS_ERR(opp)) {
300 				rcu_read_unlock();
301 				break;
302 			}
303 			opp_uV = dev_pm_opp_get_voltage(opp);
304 			rcu_read_unlock();
305 
306 			tol_uV = opp_uV * priv->voltage_tolerance / 100;
307 			if (regulator_is_supported_voltage(cpu_reg,
308 							   opp_uV - tol_uV,
309 							   opp_uV + tol_uV)) {
310 				if (opp_uV < min_uV)
311 					min_uV = opp_uV;
312 				if (opp_uV > max_uV)
313 					max_uV = opp_uV;
314 			} else {
315 				dev_pm_opp_disable(cpu_dev, opp_freq);
316 			}
317 
318 			opp_freq++;
319 		}
320 
321 		ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
322 		if (ret > 0)
323 			transition_latency += ret * 1000;
324 	}
325 
326 	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
327 	if (ret) {
328 		pr_err("failed to init cpufreq table: %d\n", ret);
329 		goto out_free_priv;
330 	}
331 
332 	priv->cpu_dev = cpu_dev;
333 	priv->cpu_reg = cpu_reg;
334 	policy->driver_data = priv;
335 
336 	policy->clk = cpu_clk;
337 
338 	rcu_read_lock();
339 	suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
340 	if (suspend_opp)
341 		policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
342 	rcu_read_unlock();
343 
344 	ret = cpufreq_table_validate_and_show(policy, freq_table);
345 	if (ret) {
346 		dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
347 			ret);
348 		goto out_free_cpufreq_table;
349 	}
350 
351 	/* Support turbo/boost mode */
352 	if (policy_has_boost_freq(policy)) {
353 		/* This gets disabled by core on driver unregister */
354 		ret = cpufreq_enable_boost_support();
355 		if (ret)
356 			goto out_free_cpufreq_table;
357 		cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
358 	}
359 
360 	policy->cpuinfo.transition_latency = transition_latency;
361 
362 	of_node_put(np);
363 
364 	return 0;
365 
366 out_free_cpufreq_table:
367 	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
368 out_free_priv:
369 	kfree(priv);
370 out_free_opp:
371 	dev_pm_opp_of_cpumask_remove_table(policy->cpus);
372 out_node_put:
373 	of_node_put(np);
374 out_put_reg_clk:
375 	clk_put(cpu_clk);
376 	if (!IS_ERR(cpu_reg))
377 		regulator_put(cpu_reg);
378 
379 	return ret;
380 }
381 
382 static int cpufreq_exit(struct cpufreq_policy *policy)
383 {
384 	struct private_data *priv = policy->driver_data;
385 
386 	cpufreq_cooling_unregister(priv->cdev);
387 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
388 	dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
389 	clk_put(policy->clk);
390 	if (!IS_ERR(priv->cpu_reg))
391 		regulator_put(priv->cpu_reg);
392 	kfree(priv);
393 
394 	return 0;
395 }
396 
397 static void cpufreq_ready(struct cpufreq_policy *policy)
398 {
399 	struct private_data *priv = policy->driver_data;
400 	struct device_node *np = of_node_get(priv->cpu_dev->of_node);
401 
402 	if (WARN_ON(!np))
403 		return;
404 
405 	/*
406 	 * For now, just loading the cooling device;
407 	 * thermal DT code takes care of matching them.
408 	 */
409 	if (of_find_property(np, "#cooling-cells", NULL)) {
410 		priv->cdev = of_cpufreq_cooling_register(np,
411 							 policy->related_cpus);
412 		if (IS_ERR(priv->cdev)) {
413 			dev_err(priv->cpu_dev,
414 				"running cpufreq without cooling device: %ld\n",
415 				PTR_ERR(priv->cdev));
416 
417 			priv->cdev = NULL;
418 		}
419 	}
420 
421 	of_node_put(np);
422 }
423 
424 static struct cpufreq_driver dt_cpufreq_driver = {
425 	.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
426 	.verify = cpufreq_generic_frequency_table_verify,
427 	.target_index = set_target,
428 	.get = cpufreq_generic_get,
429 	.init = cpufreq_init,
430 	.exit = cpufreq_exit,
431 	.ready = cpufreq_ready,
432 	.name = "cpufreq-dt",
433 	.attr = cpufreq_dt_attr,
434 	.suspend = cpufreq_generic_suspend,
435 };
436 
437 static int dt_cpufreq_probe(struct platform_device *pdev)
438 {
439 	struct device *cpu_dev;
440 	struct regulator *cpu_reg;
441 	struct clk *cpu_clk;
442 	int ret;
443 
444 	/*
445 	 * All per-cluster (CPUs sharing clock/voltages) initialization is done
446 	 * from ->init(). In probe(), we just need to make sure that clk and
447 	 * regulators are available. Else defer probe and retry.
448 	 *
449 	 * FIXME: Is checking this only for CPU0 sufficient ?
450 	 */
451 	ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
452 	if (ret)
453 		return ret;
454 
455 	clk_put(cpu_clk);
456 	if (!IS_ERR(cpu_reg))
457 		regulator_put(cpu_reg);
458 
459 	dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
460 
461 	ret = cpufreq_register_driver(&dt_cpufreq_driver);
462 	if (ret)
463 		dev_err(cpu_dev, "failed register driver: %d\n", ret);
464 
465 	return ret;
466 }
467 
468 static int dt_cpufreq_remove(struct platform_device *pdev)
469 {
470 	cpufreq_unregister_driver(&dt_cpufreq_driver);
471 	return 0;
472 }
473 
474 static struct platform_driver dt_cpufreq_platdrv = {
475 	.driver = {
476 		.name	= "cpufreq-dt",
477 	},
478 	.probe		= dt_cpufreq_probe,
479 	.remove		= dt_cpufreq_remove,
480 };
481 module_platform_driver(dt_cpufreq_platdrv);
482 
483 MODULE_ALIAS("platform:cpufreq-dt");
484 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
485 MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
486 MODULE_DESCRIPTION("Generic cpufreq driver");
487 MODULE_LICENSE("GPL");
488