1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/clk.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_cooling.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpumask.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/slab.h>
26 #include <linux/thermal.h>
27 
28 #define MIN_VOLT_SHIFT		(100000)
29 #define MAX_VOLT_SHIFT		(200000)
30 #define MAX_VOLT_LIMIT		(1150000)
31 #define VOLT_TOL		(10000)
32 
33 /*
34  * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
35  * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
36  * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
37  * voltage inputs need to be controlled under a hardware limitation:
38  * 100mV < Vsram - Vproc < 200mV
39  *
40  * When scaling the clock frequency of a CPU clock domain, the clock source
41  * needs to be switched to another stable PLL clock temporarily until
42  * the original PLL becomes stable at target frequency.
43  */
44 struct mtk_cpu_dvfs_info {
45 	struct cpumask cpus;
46 	struct device *cpu_dev;
47 	struct regulator *proc_reg;
48 	struct regulator *sram_reg;
49 	struct clk *cpu_clk;
50 	struct clk *inter_clk;
51 	struct thermal_cooling_device *cdev;
52 	struct list_head list_head;
53 	int intermediate_voltage;
54 	bool need_voltage_tracking;
55 };
56 
57 static LIST_HEAD(dvfs_info_list);
58 
59 static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
60 {
61 	struct mtk_cpu_dvfs_info *info;
62 
63 	list_for_each_entry(info, &dvfs_info_list, list_head) {
64 		if (cpumask_test_cpu(cpu, &info->cpus))
65 			return info;
66 	}
67 
68 	return NULL;
69 }
70 
71 static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
72 					int new_vproc)
73 {
74 	struct regulator *proc_reg = info->proc_reg;
75 	struct regulator *sram_reg = info->sram_reg;
76 	int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
77 
78 	old_vproc = regulator_get_voltage(proc_reg);
79 	if (old_vproc < 0) {
80 		pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
81 		return old_vproc;
82 	}
83 	/* Vsram should not exceed the maximum allowed voltage of SoC. */
84 	new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
85 
86 	if (old_vproc < new_vproc) {
87 		/*
88 		 * When scaling up voltages, Vsram and Vproc scale up step
89 		 * by step. At each step, set Vsram to (Vproc + 200mV) first,
90 		 * then set Vproc to (Vsram - 100mV).
91 		 * Keep doing it until Vsram and Vproc hit target voltages.
92 		 */
93 		do {
94 			old_vsram = regulator_get_voltage(sram_reg);
95 			if (old_vsram < 0) {
96 				pr_err("%s: invalid Vsram value: %d\n",
97 				       __func__, old_vsram);
98 				return old_vsram;
99 			}
100 			old_vproc = regulator_get_voltage(proc_reg);
101 			if (old_vproc < 0) {
102 				pr_err("%s: invalid Vproc value: %d\n",
103 				       __func__, old_vproc);
104 				return old_vproc;
105 			}
106 
107 			vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
108 
109 			if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
110 				vsram = MAX_VOLT_LIMIT;
111 
112 				/*
113 				 * If the target Vsram hits the maximum voltage,
114 				 * try to set the exact voltage value first.
115 				 */
116 				ret = regulator_set_voltage(sram_reg, vsram,
117 							    vsram);
118 				if (ret)
119 					ret = regulator_set_voltage(sram_reg,
120 							vsram - VOLT_TOL,
121 							vsram);
122 
123 				vproc = new_vproc;
124 			} else {
125 				ret = regulator_set_voltage(sram_reg, vsram,
126 							    vsram + VOLT_TOL);
127 
128 				vproc = vsram - MIN_VOLT_SHIFT;
129 			}
130 			if (ret)
131 				return ret;
132 
133 			ret = regulator_set_voltage(proc_reg, vproc,
134 						    vproc + VOLT_TOL);
135 			if (ret) {
136 				regulator_set_voltage(sram_reg, old_vsram,
137 						      old_vsram);
138 				return ret;
139 			}
140 		} while (vproc < new_vproc || vsram < new_vsram);
141 	} else if (old_vproc > new_vproc) {
142 		/*
143 		 * When scaling down voltages, Vsram and Vproc scale down step
144 		 * by step. At each step, set Vproc to (Vsram - 200mV) first,
145 		 * then set Vproc to (Vproc + 100mV).
146 		 * Keep doing it until Vsram and Vproc hit target voltages.
147 		 */
148 		do {
149 			old_vproc = regulator_get_voltage(proc_reg);
150 			if (old_vproc < 0) {
151 				pr_err("%s: invalid Vproc value: %d\n",
152 				       __func__, old_vproc);
153 				return old_vproc;
154 			}
155 			old_vsram = regulator_get_voltage(sram_reg);
156 			if (old_vsram < 0) {
157 				pr_err("%s: invalid Vsram value: %d\n",
158 				       __func__, old_vsram);
159 				return old_vsram;
160 			}
161 
162 			vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
163 			ret = regulator_set_voltage(proc_reg, vproc,
164 						    vproc + VOLT_TOL);
165 			if (ret)
166 				return ret;
167 
168 			if (vproc == new_vproc)
169 				vsram = new_vsram;
170 			else
171 				vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
172 
173 			if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
174 				vsram = MAX_VOLT_LIMIT;
175 
176 				/*
177 				 * If the target Vsram hits the maximum voltage,
178 				 * try to set the exact voltage value first.
179 				 */
180 				ret = regulator_set_voltage(sram_reg, vsram,
181 							    vsram);
182 				if (ret)
183 					ret = regulator_set_voltage(sram_reg,
184 							vsram - VOLT_TOL,
185 							vsram);
186 			} else {
187 				ret = regulator_set_voltage(sram_reg, vsram,
188 							    vsram + VOLT_TOL);
189 			}
190 
191 			if (ret) {
192 				regulator_set_voltage(proc_reg, old_vproc,
193 						      old_vproc);
194 				return ret;
195 			}
196 		} while (vproc > new_vproc + VOLT_TOL ||
197 			 vsram > new_vsram + VOLT_TOL);
198 	}
199 
200 	return 0;
201 }
202 
203 static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
204 {
205 	if (info->need_voltage_tracking)
206 		return mtk_cpufreq_voltage_tracking(info, vproc);
207 	else
208 		return regulator_set_voltage(info->proc_reg, vproc,
209 					     vproc + VOLT_TOL);
210 }
211 
212 static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
213 				  unsigned int index)
214 {
215 	struct cpufreq_frequency_table *freq_table = policy->freq_table;
216 	struct clk *cpu_clk = policy->clk;
217 	struct clk *armpll = clk_get_parent(cpu_clk);
218 	struct mtk_cpu_dvfs_info *info = policy->driver_data;
219 	struct device *cpu_dev = info->cpu_dev;
220 	struct dev_pm_opp *opp;
221 	long freq_hz, old_freq_hz;
222 	int vproc, old_vproc, inter_vproc, target_vproc, ret;
223 
224 	inter_vproc = info->intermediate_voltage;
225 
226 	old_freq_hz = clk_get_rate(cpu_clk);
227 	old_vproc = regulator_get_voltage(info->proc_reg);
228 	if (old_vproc < 0) {
229 		pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
230 		return old_vproc;
231 	}
232 
233 	freq_hz = freq_table[index].frequency * 1000;
234 
235 	opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
236 	if (IS_ERR(opp)) {
237 		pr_err("cpu%d: failed to find OPP for %ld\n",
238 		       policy->cpu, freq_hz);
239 		return PTR_ERR(opp);
240 	}
241 	vproc = dev_pm_opp_get_voltage(opp);
242 	dev_pm_opp_put(opp);
243 
244 	/*
245 	 * If the new voltage or the intermediate voltage is higher than the
246 	 * current voltage, scale up voltage first.
247 	 */
248 	target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
249 	if (old_vproc < target_vproc) {
250 		ret = mtk_cpufreq_set_voltage(info, target_vproc);
251 		if (ret) {
252 			pr_err("cpu%d: failed to scale up voltage!\n",
253 			       policy->cpu);
254 			mtk_cpufreq_set_voltage(info, old_vproc);
255 			return ret;
256 		}
257 	}
258 
259 	/* Reparent the CPU clock to intermediate clock. */
260 	ret = clk_set_parent(cpu_clk, info->inter_clk);
261 	if (ret) {
262 		pr_err("cpu%d: failed to re-parent cpu clock!\n",
263 		       policy->cpu);
264 		mtk_cpufreq_set_voltage(info, old_vproc);
265 		WARN_ON(1);
266 		return ret;
267 	}
268 
269 	/* Set the original PLL to target rate. */
270 	ret = clk_set_rate(armpll, freq_hz);
271 	if (ret) {
272 		pr_err("cpu%d: failed to scale cpu clock rate!\n",
273 		       policy->cpu);
274 		clk_set_parent(cpu_clk, armpll);
275 		mtk_cpufreq_set_voltage(info, old_vproc);
276 		return ret;
277 	}
278 
279 	/* Set parent of CPU clock back to the original PLL. */
280 	ret = clk_set_parent(cpu_clk, armpll);
281 	if (ret) {
282 		pr_err("cpu%d: failed to re-parent cpu clock!\n",
283 		       policy->cpu);
284 		mtk_cpufreq_set_voltage(info, inter_vproc);
285 		WARN_ON(1);
286 		return ret;
287 	}
288 
289 	/*
290 	 * If the new voltage is lower than the intermediate voltage or the
291 	 * original voltage, scale down to the new voltage.
292 	 */
293 	if (vproc < inter_vproc || vproc < old_vproc) {
294 		ret = mtk_cpufreq_set_voltage(info, vproc);
295 		if (ret) {
296 			pr_err("cpu%d: failed to scale down voltage!\n",
297 			       policy->cpu);
298 			clk_set_parent(cpu_clk, info->inter_clk);
299 			clk_set_rate(armpll, old_freq_hz);
300 			clk_set_parent(cpu_clk, armpll);
301 			return ret;
302 		}
303 	}
304 
305 	return 0;
306 }
307 
308 #define DYNAMIC_POWER "dynamic-power-coefficient"
309 
310 static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
311 {
312 	struct mtk_cpu_dvfs_info *info = policy->driver_data;
313 	struct device_node *np = of_node_get(info->cpu_dev->of_node);
314 	u32 capacitance = 0;
315 
316 	if (WARN_ON(!np))
317 		return;
318 
319 	if (of_find_property(np, "#cooling-cells", NULL)) {
320 		of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
321 
322 		info->cdev = of_cpufreq_power_cooling_register(np,
323 						policy, capacitance, NULL);
324 
325 		if (IS_ERR(info->cdev)) {
326 			dev_err(info->cpu_dev,
327 				"running cpufreq without cooling device: %ld\n",
328 				PTR_ERR(info->cdev));
329 
330 			info->cdev = NULL;
331 		}
332 	}
333 
334 	of_node_put(np);
335 }
336 
337 static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
338 {
339 	struct device *cpu_dev;
340 	struct regulator *proc_reg = ERR_PTR(-ENODEV);
341 	struct regulator *sram_reg = ERR_PTR(-ENODEV);
342 	struct clk *cpu_clk = ERR_PTR(-ENODEV);
343 	struct clk *inter_clk = ERR_PTR(-ENODEV);
344 	struct dev_pm_opp *opp;
345 	unsigned long rate;
346 	int ret;
347 
348 	cpu_dev = get_cpu_device(cpu);
349 	if (!cpu_dev) {
350 		pr_err("failed to get cpu%d device\n", cpu);
351 		return -ENODEV;
352 	}
353 
354 	cpu_clk = clk_get(cpu_dev, "cpu");
355 	if (IS_ERR(cpu_clk)) {
356 		if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
357 			pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
358 		else
359 			pr_err("failed to get cpu clk for cpu%d\n", cpu);
360 
361 		ret = PTR_ERR(cpu_clk);
362 		return ret;
363 	}
364 
365 	inter_clk = clk_get(cpu_dev, "intermediate");
366 	if (IS_ERR(inter_clk)) {
367 		if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
368 			pr_warn("intermediate clk for cpu%d not ready, retry.\n",
369 				cpu);
370 		else
371 			pr_err("failed to get intermediate clk for cpu%d\n",
372 			       cpu);
373 
374 		ret = PTR_ERR(inter_clk);
375 		goto out_free_resources;
376 	}
377 
378 	proc_reg = regulator_get_exclusive(cpu_dev, "proc");
379 	if (IS_ERR(proc_reg)) {
380 		if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
381 			pr_warn("proc regulator for cpu%d not ready, retry.\n",
382 				cpu);
383 		else
384 			pr_err("failed to get proc regulator for cpu%d\n",
385 			       cpu);
386 
387 		ret = PTR_ERR(proc_reg);
388 		goto out_free_resources;
389 	}
390 
391 	/* Both presence and absence of sram regulator are valid cases. */
392 	sram_reg = regulator_get_exclusive(cpu_dev, "sram");
393 
394 	/* Get OPP-sharing information from "operating-points-v2" bindings */
395 	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
396 	if (ret) {
397 		pr_err("failed to get OPP-sharing information for cpu%d\n",
398 		       cpu);
399 		goto out_free_resources;
400 	}
401 
402 	ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
403 	if (ret) {
404 		pr_warn("no OPP table for cpu%d\n", cpu);
405 		goto out_free_resources;
406 	}
407 
408 	/* Search a safe voltage for intermediate frequency. */
409 	rate = clk_get_rate(inter_clk);
410 	opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
411 	if (IS_ERR(opp)) {
412 		pr_err("failed to get intermediate opp for cpu%d\n", cpu);
413 		ret = PTR_ERR(opp);
414 		goto out_free_opp_table;
415 	}
416 	info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
417 	dev_pm_opp_put(opp);
418 
419 	info->cpu_dev = cpu_dev;
420 	info->proc_reg = proc_reg;
421 	info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
422 	info->cpu_clk = cpu_clk;
423 	info->inter_clk = inter_clk;
424 
425 	/*
426 	 * If SRAM regulator is present, software "voltage tracking" is needed
427 	 * for this CPU power domain.
428 	 */
429 	info->need_voltage_tracking = !IS_ERR(sram_reg);
430 
431 	return 0;
432 
433 out_free_opp_table:
434 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
435 
436 out_free_resources:
437 	if (!IS_ERR(proc_reg))
438 		regulator_put(proc_reg);
439 	if (!IS_ERR(sram_reg))
440 		regulator_put(sram_reg);
441 	if (!IS_ERR(cpu_clk))
442 		clk_put(cpu_clk);
443 	if (!IS_ERR(inter_clk))
444 		clk_put(inter_clk);
445 
446 	return ret;
447 }
448 
449 static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
450 {
451 	if (!IS_ERR(info->proc_reg))
452 		regulator_put(info->proc_reg);
453 	if (!IS_ERR(info->sram_reg))
454 		regulator_put(info->sram_reg);
455 	if (!IS_ERR(info->cpu_clk))
456 		clk_put(info->cpu_clk);
457 	if (!IS_ERR(info->inter_clk))
458 		clk_put(info->inter_clk);
459 
460 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
461 }
462 
463 static int mtk_cpufreq_init(struct cpufreq_policy *policy)
464 {
465 	struct mtk_cpu_dvfs_info *info;
466 	struct cpufreq_frequency_table *freq_table;
467 	int ret;
468 
469 	info = mtk_cpu_dvfs_info_lookup(policy->cpu);
470 	if (!info) {
471 		pr_err("dvfs info for cpu%d is not initialized.\n",
472 		       policy->cpu);
473 		return -EINVAL;
474 	}
475 
476 	ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
477 	if (ret) {
478 		pr_err("failed to init cpufreq table for cpu%d: %d\n",
479 		       policy->cpu, ret);
480 		return ret;
481 	}
482 
483 	ret = cpufreq_table_validate_and_show(policy, freq_table);
484 	if (ret) {
485 		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
486 		goto out_free_cpufreq_table;
487 	}
488 
489 	cpumask_copy(policy->cpus, &info->cpus);
490 	policy->driver_data = info;
491 	policy->clk = info->cpu_clk;
492 
493 	return 0;
494 
495 out_free_cpufreq_table:
496 	dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
497 	return ret;
498 }
499 
500 static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
501 {
502 	struct mtk_cpu_dvfs_info *info = policy->driver_data;
503 
504 	cpufreq_cooling_unregister(info->cdev);
505 	dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
506 
507 	return 0;
508 }
509 
510 static struct cpufreq_driver mtk_cpufreq_driver = {
511 	.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
512 		 CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
513 	.verify = cpufreq_generic_frequency_table_verify,
514 	.target_index = mtk_cpufreq_set_target,
515 	.get = cpufreq_generic_get,
516 	.init = mtk_cpufreq_init,
517 	.exit = mtk_cpufreq_exit,
518 	.ready = mtk_cpufreq_ready,
519 	.name = "mtk-cpufreq",
520 	.attr = cpufreq_generic_attr,
521 };
522 
523 static int mtk_cpufreq_probe(struct platform_device *pdev)
524 {
525 	struct mtk_cpu_dvfs_info *info, *tmp;
526 	int cpu, ret;
527 
528 	for_each_possible_cpu(cpu) {
529 		info = mtk_cpu_dvfs_info_lookup(cpu);
530 		if (info)
531 			continue;
532 
533 		info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
534 		if (!info) {
535 			ret = -ENOMEM;
536 			goto release_dvfs_info_list;
537 		}
538 
539 		ret = mtk_cpu_dvfs_info_init(info, cpu);
540 		if (ret) {
541 			dev_err(&pdev->dev,
542 				"failed to initialize dvfs info for cpu%d\n",
543 				cpu);
544 			goto release_dvfs_info_list;
545 		}
546 
547 		list_add(&info->list_head, &dvfs_info_list);
548 	}
549 
550 	ret = cpufreq_register_driver(&mtk_cpufreq_driver);
551 	if (ret) {
552 		dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
553 		goto release_dvfs_info_list;
554 	}
555 
556 	return 0;
557 
558 release_dvfs_info_list:
559 	list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
560 		mtk_cpu_dvfs_info_release(info);
561 		list_del(&info->list_head);
562 	}
563 
564 	return ret;
565 }
566 
567 static struct platform_driver mtk_cpufreq_platdrv = {
568 	.driver = {
569 		.name	= "mtk-cpufreq",
570 	},
571 	.probe		= mtk_cpufreq_probe,
572 };
573 
574 /* List of machines supported by this driver */
575 static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
576 	{ .compatible = "mediatek,mt2701", },
577 	{ .compatible = "mediatek,mt7622", },
578 	{ .compatible = "mediatek,mt7623", },
579 	{ .compatible = "mediatek,mt817x", },
580 	{ .compatible = "mediatek,mt8173", },
581 	{ .compatible = "mediatek,mt8176", },
582 
583 	{ }
584 };
585 
586 static int __init mtk_cpufreq_driver_init(void)
587 {
588 	struct device_node *np;
589 	const struct of_device_id *match;
590 	struct platform_device *pdev;
591 	int err;
592 
593 	np = of_find_node_by_path("/");
594 	if (!np)
595 		return -ENODEV;
596 
597 	match = of_match_node(mtk_cpufreq_machines, np);
598 	of_node_put(np);
599 	if (!match) {
600 		pr_warn("Machine is not compatible with mtk-cpufreq\n");
601 		return -ENODEV;
602 	}
603 
604 	err = platform_driver_register(&mtk_cpufreq_platdrv);
605 	if (err)
606 		return err;
607 
608 	/*
609 	 * Since there's no place to hold device registration code and no
610 	 * device tree based way to match cpufreq driver yet, both the driver
611 	 * and the device registration codes are put here to handle defer
612 	 * probing.
613 	 */
614 	pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
615 	if (IS_ERR(pdev)) {
616 		pr_err("failed to register mtk-cpufreq platform device\n");
617 		return PTR_ERR(pdev);
618 	}
619 
620 	return 0;
621 }
622 device_initcall(mtk_cpufreq_driver_init);
623