1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "msm_gpu.h"
8 #include "msm_gpu_trace.h"
9 
10 #include <linux/devfreq.h>
11 #include <linux/devfreq_cooling.h>
12 #include <linux/units.h>
13 
14 /*
15  * Power Management:
16  */
17 
18 static int msm_devfreq_target(struct device *dev, unsigned long *freq,
19 		u32 flags)
20 {
21 	struct msm_gpu *gpu = dev_to_gpu(dev);
22 	struct dev_pm_opp *opp;
23 
24 	/*
25 	 * Note that devfreq_recommended_opp() can modify the freq
26 	 * to something that actually is in the opp table:
27 	 */
28 	opp = devfreq_recommended_opp(dev, freq, flags);
29 	if (IS_ERR(opp))
30 		return PTR_ERR(opp);
31 
32 	trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
33 
34 	if (gpu->funcs->gpu_set_freq)
35 		gpu->funcs->gpu_set_freq(gpu, opp);
36 	else
37 		clk_set_rate(gpu->core_clk, *freq);
38 
39 	dev_pm_opp_put(opp);
40 
41 	return 0;
42 }
43 
44 static unsigned long get_freq(struct msm_gpu *gpu)
45 {
46 	if (gpu->funcs->gpu_get_freq)
47 		return gpu->funcs->gpu_get_freq(gpu);
48 
49 	return clk_get_rate(gpu->core_clk);
50 }
51 
52 static int msm_devfreq_get_dev_status(struct device *dev,
53 		struct devfreq_dev_status *status)
54 {
55 	struct msm_gpu *gpu = dev_to_gpu(dev);
56 	ktime_t time;
57 
58 	status->current_frequency = get_freq(gpu);
59 	status->busy_time = gpu->funcs->gpu_busy(gpu);
60 
61 	time = ktime_get();
62 	status->total_time = ktime_us_delta(time, gpu->devfreq.time);
63 	gpu->devfreq.time = time;
64 
65 	return 0;
66 }
67 
68 static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
69 {
70 	*freq = get_freq(dev_to_gpu(dev));
71 
72 	return 0;
73 }
74 
75 static struct devfreq_dev_profile msm_devfreq_profile = {
76 	.timer = DEVFREQ_TIMER_DELAYED,
77 	.polling_ms = 50,
78 	.target = msm_devfreq_target,
79 	.get_dev_status = msm_devfreq_get_dev_status,
80 	.get_cur_freq = msm_devfreq_get_cur_freq,
81 };
82 
83 static void msm_devfreq_boost_work(struct kthread_work *work);
84 static void msm_devfreq_idle_work(struct kthread_work *work);
85 
86 static bool has_devfreq(struct msm_gpu *gpu)
87 {
88 	struct msm_gpu_devfreq *df = &gpu->devfreq;
89 	return !!df->devfreq;
90 }
91 
92 void msm_devfreq_init(struct msm_gpu *gpu)
93 {
94 	struct msm_gpu_devfreq *df = &gpu->devfreq;
95 
96 	/* We need target support to do devfreq */
97 	if (!gpu->funcs->gpu_busy)
98 		return;
99 
100 	dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
101 			       DEV_PM_QOS_MAX_FREQUENCY,
102 			       PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
103 	dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
104 			       DEV_PM_QOS_MIN_FREQUENCY, 0);
105 
106 	msm_devfreq_profile.initial_freq = gpu->fast_rate;
107 
108 	/*
109 	 * Don't set the freq_table or max_state and let devfreq build the table
110 	 * from OPP
111 	 * After a deferred probe, these may have be left to non-zero values,
112 	 * so set them back to zero before creating the devfreq device
113 	 */
114 	msm_devfreq_profile.freq_table = NULL;
115 	msm_devfreq_profile.max_state = 0;
116 
117 	df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
118 			&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
119 			NULL);
120 
121 	if (IS_ERR(df->devfreq)) {
122 		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
123 		df->devfreq = NULL;
124 		return;
125 	}
126 
127 	devfreq_suspend_device(df->devfreq);
128 
129 	gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq);
130 	if (IS_ERR(gpu->cooling)) {
131 		DRM_DEV_ERROR(&gpu->pdev->dev,
132 				"Couldn't register GPU cooling device\n");
133 		gpu->cooling = NULL;
134 	}
135 
136 	msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
137 			      CLOCK_MONOTONIC, HRTIMER_MODE_REL);
138 	msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
139 			      CLOCK_MONOTONIC, HRTIMER_MODE_REL);
140 }
141 
142 static void cancel_idle_work(struct msm_gpu_devfreq *df)
143 {
144 	hrtimer_cancel(&df->idle_work.timer);
145 	kthread_cancel_work_sync(&df->idle_work.work);
146 }
147 
148 static void cancel_boost_work(struct msm_gpu_devfreq *df)
149 {
150 	hrtimer_cancel(&df->boost_work.timer);
151 	kthread_cancel_work_sync(&df->boost_work.work);
152 }
153 
154 void msm_devfreq_cleanup(struct msm_gpu *gpu)
155 {
156 	struct msm_gpu_devfreq *df = &gpu->devfreq;
157 
158 	if (!has_devfreq(gpu))
159 		return;
160 
161 	devfreq_cooling_unregister(gpu->cooling);
162 	dev_pm_qos_remove_request(&df->boost_freq);
163 	dev_pm_qos_remove_request(&df->idle_freq);
164 }
165 
166 void msm_devfreq_resume(struct msm_gpu *gpu)
167 {
168 	struct msm_gpu_devfreq *df = &gpu->devfreq;
169 
170 	if (!has_devfreq(gpu))
171 		return;
172 
173 	df->busy_cycles = 0;
174 	df->time = ktime_get();
175 
176 	devfreq_resume_device(df->devfreq);
177 }
178 
179 void msm_devfreq_suspend(struct msm_gpu *gpu)
180 {
181 	struct msm_gpu_devfreq *df = &gpu->devfreq;
182 
183 	if (!has_devfreq(gpu))
184 		return;
185 
186 	devfreq_suspend_device(df->devfreq);
187 
188 	cancel_idle_work(df);
189 	cancel_boost_work(df);
190 }
191 
192 static void msm_devfreq_boost_work(struct kthread_work *work)
193 {
194 	struct msm_gpu_devfreq *df = container_of(work,
195 			struct msm_gpu_devfreq, boost_work.work);
196 
197 	dev_pm_qos_update_request(&df->boost_freq, 0);
198 }
199 
200 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
201 {
202 	struct msm_gpu_devfreq *df = &gpu->devfreq;
203 	uint64_t freq;
204 
205 	if (!has_devfreq(gpu))
206 		return;
207 
208 	freq = get_freq(gpu);
209 	freq *= factor;
210 
211 	/*
212 	 * A nice little trap is that PM QoS operates in terms of KHz,
213 	 * while devfreq operates in terms of Hz:
214 	 */
215 	do_div(freq, HZ_PER_KHZ);
216 
217 	dev_pm_qos_update_request(&df->boost_freq, freq);
218 
219 	msm_hrtimer_queue_work(&df->boost_work,
220 			       ms_to_ktime(msm_devfreq_profile.polling_ms),
221 			       HRTIMER_MODE_REL);
222 }
223 
224 void msm_devfreq_active(struct msm_gpu *gpu)
225 {
226 	struct msm_gpu_devfreq *df = &gpu->devfreq;
227 	struct devfreq_dev_status status;
228 	unsigned int idle_time;
229 
230 	if (!has_devfreq(gpu))
231 		return;
232 
233 	/*
234 	 * Cancel any pending transition to idle frequency:
235 	 */
236 	cancel_idle_work(df);
237 
238 	idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
239 
240 	/*
241 	 * If we've been idle for a significant fraction of a polling
242 	 * interval, then we won't meet the threshold of busyness for
243 	 * the governor to ramp up the freq.. so give some boost
244 	 */
245 	if (idle_time > msm_devfreq_profile.polling_ms) {
246 		msm_devfreq_boost(gpu, 2);
247 	}
248 
249 	dev_pm_qos_update_request(&df->idle_freq,
250 				  PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
251 
252 	/*
253 	 * Reset the polling interval so we aren't inconsistent
254 	 * about freq vs busy/total cycles
255 	 */
256 	msm_devfreq_get_dev_status(&gpu->pdev->dev, &status);
257 }
258 
259 
260 static void msm_devfreq_idle_work(struct kthread_work *work)
261 {
262 	struct msm_gpu_devfreq *df = container_of(work,
263 			struct msm_gpu_devfreq, idle_work.work);
264 	struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
265 
266 	df->idle_time = ktime_get();
267 
268 	if (gpu->clamp_to_idle)
269 		dev_pm_qos_update_request(&df->idle_freq, 0);
270 }
271 
272 void msm_devfreq_idle(struct msm_gpu *gpu)
273 {
274 	struct msm_gpu_devfreq *df = &gpu->devfreq;
275 
276 	if (!has_devfreq(gpu))
277 		return;
278 
279 	msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
280 			       HRTIMER_MODE_REL);
281 }
282