1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/clk.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_cooling.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpumask.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/slab.h>
26 #include <linux/thermal.h>
27 
28 #define MIN_VOLT_SHIFT		(100000)
29 #define MAX_VOLT_SHIFT		(200000)
30 #define MAX_VOLT_LIMIT		(1150000)
31 #define VOLT_TOL		(10000)
32 
33 /*
34  * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
35  * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
36  * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
37  * voltage inputs need to be controlled under a hardware limitation:
38  * 100mV < Vsram - Vproc < 200mV
39  *
40  * When scaling the clock frequency of a CPU clock domain, the clock source
41  * needs to be switched to another stable PLL clock temporarily until
42  * the original PLL becomes stable at target frequency.
43  */
44 struct mtk_cpu_dvfs_info {
45 	struct cpumask cpus;
46 	struct device *cpu_dev;
47 	struct regulator *proc_reg;
48 	struct regulator *sram_reg;
49 	struct clk *cpu_clk;
50 	struct clk *inter_clk;
51 	struct thermal_cooling_device *cdev;
52 	struct list_head list_head;
53 	int intermediate_voltage;
54 	bool need_voltage_tracking;
55 };
56 
57 static LIST_HEAD(dvfs_info_list);
58 
59 static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
60 {
61 	struct mtk_cpu_dvfs_info *info;
62 
63 	list_for_each_entry(info, &dvfs_info_list, list_head) {
64 		if (cpumask_test_cpu(cpu, &info->cpus))
65 			return info;
66 	}
67 
68 	return NULL;
69 }
70 
71 static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
72 					int new_vproc)
73 {
74 	struct regulator *proc_reg = info->proc_reg;
75 	struct regulator *sram_reg = info->sram_reg;
76 	int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
77 
78 	old_vproc = regulator_get_voltage(proc_reg);
79 	if (old_vproc < 0) {
80 		pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
81 		return old_vproc;
82 	}
83 	/* Vsram should not exceed the maximum allowed voltage of SoC. */
84 	new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
85 
86 	if (old_vproc < new_vproc) {
87 		/*
88 		 * When scaling up voltages, Vsram and Vproc scale up step
89 		 * by step. At each step, set Vsram to (Vproc + 200mV) first,
90 		 * then set Vproc to (Vsram - 100mV).
91 		 * Keep doing it until Vsram and Vproc hit target voltages.
92 		 */
93 		do {
94 			old_vsram = regulator_get_voltage(sram_reg);
95 			if (old_vsram < 0) {
96 				pr_err("%s: invalid Vsram value: %d\n",
97 				       __func__, old_vsram);
98 				return old_vsram;
99 			}
100 			old_vproc = regulator_get_voltage(proc_reg);
101 			if (old_vproc < 0) {
102 				pr_err("%s: invalid Vproc value: %d\n",
103 				       __func__, old_vproc);
104 				return old_vproc;
105 			}
106 
107 			vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
108 
109 			if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
110 				vsram = MAX_VOLT_LIMIT;
111 
112 				/*
113 				 * If the target Vsram hits the maximum voltage,
114 				 * try to set the exact voltage value first.
115 				 */
116 				ret = regulator_set_voltage(sram_reg, vsram,
117 							    vsram);
118 				if (ret)
119 					ret = regulator_set_voltage(sram_reg,
120 							vsram - VOLT_TOL,
121 							vsram);
122 
123 				vproc = new_vproc;
124 			} else {
125 				ret = regulator_set_voltage(sram_reg, vsram,
126 							    vsram + VOLT_TOL);
127 
128 				vproc = vsram - MIN_VOLT_SHIFT;
129 			}
130 			if (ret)
131 				return ret;
132 
133 			ret = regulator_set_voltage(proc_reg, vproc,
134 						    vproc + VOLT_TOL);
135 			if (ret) {
136 				regulator_set_voltage(sram_reg, old_vsram,
137 						      old_vsram);
138 				return ret;
139 			}
140 		} while (vproc < new_vproc || vsram < new_vsram);
141 	} else if (old_vproc > new_vproc) {
142 		/*
143 		 * When scaling down voltages, Vsram and Vproc scale down step
144 		 * by step. At each step, set Vproc to (Vsram - 200mV) first,
145 		 * then set Vproc to (Vproc + 100mV).
146 		 * Keep doing it until Vsram and Vproc hit target voltages.
147 		 */
148 		do {
149 			old_vproc = regulator_get_voltage(proc_reg);
150 			if (old_vproc < 0) {
151 				pr_err("%s: invalid Vproc value: %d\n",
152 				       __func__, old_vproc);
153 				return old_vproc;
154 			}
155 			old_vsram = regulator_get_voltage(sram_reg);
156 			if (old_vsram < 0) {
157 				pr_err("%s: invalid Vsram value: %d\n",
158 				       __func__, old_vsram);
159 				return old_vsram;
160 			}
161 
162 			vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
163 			ret = regulator_set_voltage(proc_reg, vproc,
164 						    vproc + VOLT_TOL);
165 			if (ret)
166 				return ret;
167 
168 			if (vproc == new_vproc)
169 				vsram = new_vsram;
170 			else
171 				vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
172 
173 			if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
174 				vsram = MAX_VOLT_LIMIT;
175 
176 				/*
177 				 * If the target Vsram hits the maximum voltage,
178 				 * try to set the exact voltage value first.
179 				 */
180 				ret = regulator_set_voltage(sram_reg, vsram,
181 							    vsram);
182 				if (ret)
183 					ret = regulator_set_voltage(sram_reg,
184 							vsram - VOLT_TOL,
185 							vsram);
186 			} else {
187 				ret = regulator_set_voltage(sram_reg, vsram,
188 							    vsram + VOLT_TOL);
189 			}
190 
191 			if (ret) {
192 				regulator_set_voltage(proc_reg, old_vproc,
193 						      old_vproc);
194 				return ret;
195 			}
196 		} while (vproc > new_vproc + VOLT_TOL ||
197 			 vsram > new_vsram + VOLT_TOL);
198 	}
199 
200 	return 0;
201 }
202 
203 static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
204 {
205 	if (info->need_voltage_tracking)
206 		return mtk_cpufreq_voltage_tracking(info, vproc);
207 	else
208 		return regulator_set_voltage(info->proc_reg, vproc,
209 					     vproc + VOLT_TOL);
210 }
211 
212 static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
213 				  unsigned int index)
214 {
215 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
216 	struct clk *cpu_clk = policy->clk;
217 	struct clk *armpll = clk_get_parent(cpu_clk);
218 	struct mtk_cpu_dvfs_info *info = policy->driver_data;
219 	struct device *cpu_dev = info->cpu_dev;
220 	struct dev_pm_opp *opp;
221 	long freq_hz, old_freq_hz;
222 	int vproc, old_vproc, inter_vproc, target_vproc, ret;
223 
224 	inter_vproc = info->intermediate_voltage;
225 
226 	old_freq_hz = clk_get_rate(cpu_clk);
227 	old_vproc = regulator_get_voltage(info->proc_reg);
228 	if (old_vproc < 0) {
229 		pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
230 		return old_vproc;
231 	}
232 
233 	freq_hz = freq_table[index].frequency * 1000;
234 
235 	opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
236 	if (IS_ERR(opp)) {
237 		pr_err("cpu%d: failed to find OPP for %ld\n",
238 		       policy->cpu, freq_hz);
239 		return PTR_ERR(opp);
240 	}
241 	vproc = dev_pm_opp_get_voltage(opp);
242 	dev_pm_opp_put(opp);
243 
244 	/*
245 	 * If the new voltage or the intermediate voltage is higher than the
246 	 * current voltage, scale up voltage first.
247 	 */
248 	target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
249 	if (old_vproc < target_vproc) {
250 		ret = mtk_cpufreq_set_voltage(info, target_vproc);
251 		if (ret) {
252 			pr_err("cpu%d: failed to scale up voltage!\n",
253 			       policy->cpu);
254 			mtk_cpufreq_set_voltage(info, old_vproc);
255 			return ret;
256 		}
257 	}
258 
259 	/* Reparent the CPU clock to intermediate clock. */
260 	ret = clk_set_parent(cpu_clk, info->inter_clk);
261 	if (ret) {
262 		pr_err("cpu%d: failed to re-parent cpu clock!\n",
263 		       policy->cpu);
264 		mtk_cpufreq_set_voltage(info, old_vproc);
265 		WARN_ON(1);
266 		return ret;
267 	}
268 
269 	/* Set the original PLL to target rate. */
270 	ret = clk_set_rate(armpll, freq_hz);
271 	if (ret) {
272 		pr_err("cpu%d: failed to scale cpu clock rate!\n",
273 		       policy->cpu);
274 		clk_set_parent(cpu_clk, armpll);
275 		mtk_cpufreq_set_voltage(info, old_vproc);
276 		return ret;
277 	}
278 
279 	/* Set parent of CPU clock back to the original PLL. */
280 	ret = clk_set_parent(cpu_clk, armpll);
281 	if (ret) {
282 		pr_err("cpu%d: failed to re-parent cpu clock!\n",
283 		       policy->cpu);
284 		mtk_cpufreq_set_voltage(info, inter_vproc);
285 		WARN_ON(1);
286 		return ret;
287 	}
288 
289 	/*
290 	 * If the new voltage is lower than the intermediate voltage or the
291 	 * original voltage, scale down to the new voltage.
292 	 */
293 	if (vproc < inter_vproc || vproc < old_vproc) {
294 		ret = mtk_cpufreq_set_voltage(info, vproc);
295 		if (ret) {
296 			pr_err("cpu%d: failed to scale down voltage!\n",
297 			       policy->cpu);
298 			clk_set_parent(cpu_clk, info->inter_clk);
299 			clk_set_rate(armpll, old_freq_hz);
300 			clk_set_parent(cpu_clk, armpll);
301 			return ret;
302 		}
303 	}
304 
305 	return 0;
306 }
307 
308 #define DYNAMIC_POWER "dynamic-power-coefficient"
309 
310 static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
311 {
312 	struct mtk_cpu_dvfs_info *info = policy->driver_data;
313 
314 	info->cdev = of_cpufreq_cooling_register(policy);
315 }
316 
317 static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
318 {
319 	struct device *cpu_dev;
320 	struct regulator *proc_reg = ERR_PTR(-ENODEV);
321 	struct regulator *sram_reg = ERR_PTR(-ENODEV);
322 	struct clk *cpu_clk = ERR_PTR(-ENODEV);
323 	struct clk *inter_clk = ERR_PTR(-ENODEV);
324 	struct dev_pm_opp *opp;
325 	unsigned long rate;
326 	int ret;
327 
328 	cpu_dev = get_cpu_device(cpu);
329 	if (!cpu_dev) {
330 		pr_err("failed to get cpu%d device\n", cpu);
331 		return -ENODEV;
332 	}
333 
334 	cpu_clk = clk_get(cpu_dev, "cpu");
335 	if (IS_ERR(cpu_clk)) {
336 		if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
337 			pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
338 		else
339 			pr_err("failed to get cpu clk for cpu%d\n", cpu);
340 
341 		ret = PTR_ERR(cpu_clk);
342 		return ret;
343 	}
344 
345 	inter_clk = clk_get(cpu_dev, "intermediate");
346 	if (IS_ERR(inter_clk)) {
347 		if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
348 			pr_warn("intermediate clk for cpu%d not ready, retry.\n",
349 				cpu);
350 		else
351 			pr_err("failed to get intermediate clk for cpu%d\n",
352 			       cpu);
353 
354 		ret = PTR_ERR(inter_clk);
355 		goto out_free_resources;
356 	}
357 
358 	proc_reg = regulator_get_exclusive(cpu_dev, "proc");
359 	if (IS_ERR(proc_reg)) {
360 		if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
361 			pr_warn("proc regulator for cpu%d not ready, retry.\n",
362 				cpu);
363 		else
364 			pr_err("failed to get proc regulator for cpu%d\n",
365 			       cpu);
366 
367 		ret = PTR_ERR(proc_reg);
368 		goto out_free_resources;
369 	}
370 
371 	/* Both presence and absence of sram regulator are valid cases. */
372 	sram_reg = regulator_get_exclusive(cpu_dev, "sram");
373 
374 	/* Get OPP-sharing information from "operating-points-v2" bindings */
375 	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
376 	if (ret) {
377 		pr_err("failed to get OPP-sharing information for cpu%d\n",
378 		       cpu);
379 		goto out_free_resources;
380 	}
381 
382 	ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
383 	if (ret) {
384 		pr_warn("no OPP table for cpu%d\n", cpu);
385 		goto out_free_resources;
386 	}
387 
388 	/* Search a safe voltage for intermediate frequency. */
389 	rate = clk_get_rate(inter_clk);
390 	opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
391 	if (IS_ERR(opp)) {
392 		pr_err("failed to get intermediate opp for cpu%d\n", cpu);
393 		ret = PTR_ERR(opp);
394 		goto out_free_opp_table;
395 	}
396 	info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
397 	dev_pm_opp_put(opp);
398 
399 	info->cpu_dev = cpu_dev;
400 	info->proc_reg = proc_reg;
401 	info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
402 	info->cpu_clk = cpu_clk;
403 	info->inter_clk = inter_clk;
404 
405 	/*
406 	 * If SRAM regulator is present, software "voltage tracking" is needed
407 	 * for this CPU power domain.
408 	 */
409 	info->need_voltage_tracking = !IS_ERR(sram_reg);
410 
411 	return 0;
412 
413 out_free_opp_table:
414 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
415 
416 out_free_resources:
417 	if (!IS_ERR(proc_reg))
418 		regulator_put(proc_reg);
419 	if (!IS_ERR(sram_reg))
420 		regulator_put(sram_reg);
421 	if (!IS_ERR(cpu_clk))
422 		clk_put(cpu_clk);
423 	if (!IS_ERR(inter_clk))
424 		clk_put(inter_clk);
425 
426 	return ret;
427 }
428 
429 static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
430 {
431 	if (!IS_ERR(info->proc_reg))
432 		regulator_put(info->proc_reg);
433 	if (!IS_ERR(info->sram_reg))
434 		regulator_put(info->sram_reg);
435 	if (!IS_ERR(info->cpu_clk))
436 		clk_put(info->cpu_clk);
437 	if (!IS_ERR(info->inter_clk))
438 		clk_put(info->inter_clk);
439 
440 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
441 }
442 
443 static int mtk_cpufreq_init(struct cpufreq_policy *policy)
444 {
445 	struct mtk_cpu_dvfs_info *info;
446 	struct cpufreq_frequency_table *freq_table;
447 	int ret;
448 
449 	info = mtk_cpu_dvfs_info_lookup(policy->cpu);
450 	if (!info) {
451 		pr_err("dvfs info for cpu%d is not initialized.\n",
452 		       policy->cpu);
453 		return -EINVAL;
454 	}
455 
456 	ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
457 	if (ret) {
458 		pr_err("failed to init cpufreq table for cpu%d: %d\n",
459 		       policy->cpu, ret);
460 		return ret;
461 	}
462 
463 	cpumask_copy(policy->cpus, &info->cpus);
464 	policy->freq_table = freq_table;
465 	policy->driver_data = info;
466 	policy->clk = info->cpu_clk;
467 
468 	return 0;
469 }
470 
471 static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
472 {
473 	struct mtk_cpu_dvfs_info *info = policy->driver_data;
474 
475 	cpufreq_cooling_unregister(info->cdev);
476 	dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
477 
478 	return 0;
479 }
480 
481 static struct cpufreq_driver mtk_cpufreq_driver = {
482 	.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
483 		 CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
484 	.verify = cpufreq_generic_frequency_table_verify,
485 	.target_index = mtk_cpufreq_set_target,
486 	.get = cpufreq_generic_get,
487 	.init = mtk_cpufreq_init,
488 	.exit = mtk_cpufreq_exit,
489 	.ready = mtk_cpufreq_ready,
490 	.name = "mtk-cpufreq",
491 	.attr = cpufreq_generic_attr,
492 };
493 
494 static int mtk_cpufreq_probe(struct platform_device *pdev)
495 {
496 	struct mtk_cpu_dvfs_info *info, *tmp;
497 	int cpu, ret;
498 
499 	for_each_possible_cpu(cpu) {
500 		info = mtk_cpu_dvfs_info_lookup(cpu);
501 		if (info)
502 			continue;
503 
504 		info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
505 		if (!info) {
506 			ret = -ENOMEM;
507 			goto release_dvfs_info_list;
508 		}
509 
510 		ret = mtk_cpu_dvfs_info_init(info, cpu);
511 		if (ret) {
512 			dev_err(&pdev->dev,
513 				"failed to initialize dvfs info for cpu%d\n",
514 				cpu);
515 			goto release_dvfs_info_list;
516 		}
517 
518 		list_add(&info->list_head, &dvfs_info_list);
519 	}
520 
521 	ret = cpufreq_register_driver(&mtk_cpufreq_driver);
522 	if (ret) {
523 		dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
524 		goto release_dvfs_info_list;
525 	}
526 
527 	return 0;
528 
529 release_dvfs_info_list:
530 	list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
531 		mtk_cpu_dvfs_info_release(info);
532 		list_del(&info->list_head);
533 	}
534 
535 	return ret;
536 }
537 
538 static struct platform_driver mtk_cpufreq_platdrv = {
539 	.driver = {
540 		.name	= "mtk-cpufreq",
541 	},
542 	.probe		= mtk_cpufreq_probe,
543 };
544 
545 /* List of machines supported by this driver */
546 static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
547 	{ .compatible = "mediatek,mt2701", },
548 	{ .compatible = "mediatek,mt2712", },
549 	{ .compatible = "mediatek,mt7622", },
550 	{ .compatible = "mediatek,mt7623", },
551 	{ .compatible = "mediatek,mt817x", },
552 	{ .compatible = "mediatek,mt8173", },
553 	{ .compatible = "mediatek,mt8176", },
554 
555 	{ }
556 };
557 
558 static int __init mtk_cpufreq_driver_init(void)
559 {
560 	struct device_node *np;
561 	const struct of_device_id *match;
562 	struct platform_device *pdev;
563 	int err;
564 
565 	np = of_find_node_by_path("/");
566 	if (!np)
567 		return -ENODEV;
568 
569 	match = of_match_node(mtk_cpufreq_machines, np);
570 	of_node_put(np);
571 	if (!match) {
572 		pr_debug("Machine is not compatible with mtk-cpufreq\n");
573 		return -ENODEV;
574 	}
575 
576 	err = platform_driver_register(&mtk_cpufreq_platdrv);
577 	if (err)
578 		return err;
579 
580 	/*
581 	 * Since there's no place to hold device registration code and no
582 	 * device tree based way to match cpufreq driver yet, both the driver
583 	 * and the device registration codes are put here to handle defer
584 	 * probing.
585 	 */
586 	pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
587 	if (IS_ERR(pdev)) {
588 		pr_err("failed to register mtk-cpufreq platform device\n");
589 		return PTR_ERR(pdev);
590 	}
591 
592 	return 0;
593 }
594 device_initcall(mtk_cpufreq_driver_init);
595 
596 MODULE_DESCRIPTION("MediaTek CPUFreq driver");
597 MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
598 MODULE_LICENSE("GPL v2");
599