1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Collabora ltd. */
3 
4 #include <linux/clk.h>
5 #include <linux/devfreq.h>
6 #include <linux/devfreq_cooling.h>
7 #include <linux/nvmem-consumer.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_opp.h>
10 
11 #include "panfrost_device.h"
12 #include "panfrost_devfreq.h"
13 
14 static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfreq)
15 {
16 	ktime_t now, last;
17 
18 	now = ktime_get();
19 	last = pfdevfreq->time_last_update;
20 
21 	if (pfdevfreq->busy_count > 0)
22 		pfdevfreq->busy_time += ktime_sub(now, last);
23 	else
24 		pfdevfreq->idle_time += ktime_sub(now, last);
25 
26 	pfdevfreq->time_last_update = now;
27 }
28 
29 static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
30 				   u32 flags)
31 {
32 	struct dev_pm_opp *opp;
33 
34 	opp = devfreq_recommended_opp(dev, freq, flags);
35 	if (IS_ERR(opp))
36 		return PTR_ERR(opp);
37 	dev_pm_opp_put(opp);
38 
39 	return dev_pm_opp_set_rate(dev, *freq);
40 }
41 
42 static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
43 {
44 	pfdevfreq->busy_time = 0;
45 	pfdevfreq->idle_time = 0;
46 	pfdevfreq->time_last_update = ktime_get();
47 }
48 
49 static int panfrost_devfreq_get_dev_status(struct device *dev,
50 					   struct devfreq_dev_status *status)
51 {
52 	struct panfrost_device *pfdev = dev_get_drvdata(dev);
53 	struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
54 	unsigned long irqflags;
55 
56 	status->current_frequency = clk_get_rate(pfdev->clock);
57 
58 	spin_lock_irqsave(&pfdevfreq->lock, irqflags);
59 
60 	panfrost_devfreq_update_utilization(pfdevfreq);
61 
62 	status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
63 						   pfdevfreq->idle_time));
64 
65 	status->busy_time = ktime_to_ns(pfdevfreq->busy_time);
66 
67 	panfrost_devfreq_reset(pfdevfreq);
68 
69 	spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
70 
71 	dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
72 		status->busy_time, status->total_time,
73 		status->busy_time / (status->total_time / 100),
74 		status->current_frequency / 1000 / 1000);
75 
76 	return 0;
77 }
78 
79 static struct devfreq_dev_profile panfrost_devfreq_profile = {
80 	.timer = DEVFREQ_TIMER_DELAYED,
81 	.polling_ms = 50, /* ~3 frames */
82 	.target = panfrost_devfreq_target,
83 	.get_dev_status = panfrost_devfreq_get_dev_status,
84 };
85 
86 static int panfrost_read_speedbin(struct device *dev)
87 {
88 	u32 val;
89 	int ret;
90 
91 	ret = nvmem_cell_read_variable_le_u32(dev, "speed-bin", &val);
92 	if (ret) {
93 		/*
94 		 * -ENOENT means that this platform doesn't support speedbins
95 		 * as it didn't declare any speed-bin nvmem: in this case, we
96 		 * keep going without it; any other error means that we are
97 		 * supposed to read the bin value, but we failed doing so.
98 		 */
99 		if (ret != -ENOENT && ret != -EOPNOTSUPP) {
100 			DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret);
101 			return ret;
102 		}
103 
104 		return 0;
105 	}
106 	DRM_DEV_DEBUG(dev, "Using speed-bin = 0x%x\n", val);
107 
108 	return devm_pm_opp_set_supported_hw(dev, &val, 1);
109 }
110 
111 int panfrost_devfreq_init(struct panfrost_device *pfdev)
112 {
113 	int ret;
114 	struct dev_pm_opp *opp;
115 	unsigned long cur_freq;
116 	struct device *dev = &pfdev->pdev->dev;
117 	struct devfreq *devfreq;
118 	struct thermal_cooling_device *cooling;
119 	struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
120 
121 	if (pfdev->comp->num_supplies > 1) {
122 		/*
123 		 * GPUs with more than 1 supply require platform-specific handling:
124 		 * continue without devfreq
125 		 */
126 		DRM_DEV_INFO(dev, "More than 1 supply is not supported yet\n");
127 		return 0;
128 	}
129 
130 	ret = panfrost_read_speedbin(dev);
131 	if (ret)
132 		return ret;
133 
134 	ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
135 	if (ret) {
136 		/* Continue if the optional regulator is missing */
137 		if (ret != -ENODEV) {
138 			if (ret != -EPROBE_DEFER)
139 				DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
140 			return ret;
141 		}
142 	}
143 
144 	ret = devm_pm_opp_of_add_table(dev);
145 	if (ret) {
146 		/* Optional, continue without devfreq */
147 		if (ret == -ENODEV)
148 			ret = 0;
149 		return ret;
150 	}
151 	pfdevfreq->opp_of_table_added = true;
152 
153 	spin_lock_init(&pfdevfreq->lock);
154 
155 	panfrost_devfreq_reset(pfdevfreq);
156 
157 	cur_freq = clk_get_rate(pfdev->clock);
158 
159 	opp = devfreq_recommended_opp(dev, &cur_freq, 0);
160 	if (IS_ERR(opp))
161 		return PTR_ERR(opp);
162 
163 	panfrost_devfreq_profile.initial_freq = cur_freq;
164 
165 	/*
166 	 * Set the recommend OPP this will enable and configure the regulator
167 	 * if any and will avoid a switch off by regulator_late_cleanup()
168 	 */
169 	ret = dev_pm_opp_set_opp(dev, opp);
170 	if (ret) {
171 		DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
172 		return ret;
173 	}
174 
175 	dev_pm_opp_put(opp);
176 
177 	/*
178 	 * Setup default thresholds for the simple_ondemand governor.
179 	 * The values are chosen based on experiments.
180 	 */
181 	pfdevfreq->gov_data.upthreshold = 45;
182 	pfdevfreq->gov_data.downdifferential = 5;
183 
184 	devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
185 					  DEVFREQ_GOV_SIMPLE_ONDEMAND,
186 					  &pfdevfreq->gov_data);
187 	if (IS_ERR(devfreq)) {
188 		DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
189 		return PTR_ERR(devfreq);
190 	}
191 	pfdevfreq->devfreq = devfreq;
192 
193 	cooling = devfreq_cooling_em_register(devfreq, NULL);
194 	if (IS_ERR(cooling))
195 		DRM_DEV_INFO(dev, "Failed to register cooling device\n");
196 	else
197 		pfdevfreq->cooling = cooling;
198 
199 	return 0;
200 }
201 
202 void panfrost_devfreq_fini(struct panfrost_device *pfdev)
203 {
204 	struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
205 
206 	if (pfdevfreq->cooling) {
207 		devfreq_cooling_unregister(pfdevfreq->cooling);
208 		pfdevfreq->cooling = NULL;
209 	}
210 }
211 
212 void panfrost_devfreq_resume(struct panfrost_device *pfdev)
213 {
214 	struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
215 
216 	if (!pfdevfreq->devfreq)
217 		return;
218 
219 	panfrost_devfreq_reset(pfdevfreq);
220 
221 	devfreq_resume_device(pfdevfreq->devfreq);
222 }
223 
224 void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
225 {
226 	struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
227 
228 	if (!pfdevfreq->devfreq)
229 		return;
230 
231 	devfreq_suspend_device(pfdevfreq->devfreq);
232 }
233 
234 void panfrost_devfreq_record_busy(struct panfrost_devfreq *pfdevfreq)
235 {
236 	unsigned long irqflags;
237 
238 	if (!pfdevfreq->devfreq)
239 		return;
240 
241 	spin_lock_irqsave(&pfdevfreq->lock, irqflags);
242 
243 	panfrost_devfreq_update_utilization(pfdevfreq);
244 
245 	pfdevfreq->busy_count++;
246 
247 	spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
248 }
249 
250 void panfrost_devfreq_record_idle(struct panfrost_devfreq *pfdevfreq)
251 {
252 	unsigned long irqflags;
253 
254 	if (!pfdevfreq->devfreq)
255 		return;
256 
257 	spin_lock_irqsave(&pfdevfreq->lock, irqflags);
258 
259 	panfrost_devfreq_update_utilization(pfdevfreq);
260 
261 	WARN_ON(--pfdevfreq->busy_count < 0);
262 
263 	spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
264 }
265