1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atom.h"
27 #include "atombios_encoders.h"
28 #include "amdgpu_pll.h"
29 #include <asm/div64.h>
30 #include <linux/gcd.h>
31 
32 /**
33  * amdgpu_pll_reduce_ratio - fractional number reduction
34  *
35  * @nom: nominator
36  * @den: denominator
37  * @nom_min: minimum value for nominator
38  * @den_min: minimum value for denominator
39  *
40  * Find the greatest common divisor and apply it on both nominator and
41  * denominator, but make nominator and denominator are at least as large
42  * as their minimum values.
43  */
44 static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
45 				    unsigned nom_min, unsigned den_min)
46 {
47 	unsigned tmp;
48 
49 	/* reduce the numbers to a simpler ratio */
50 	tmp = gcd(*nom, *den);
51 	*nom /= tmp;
52 	*den /= tmp;
53 
54 	/* make sure nominator is large enough */
55 	if (*nom < nom_min) {
56 		tmp = DIV_ROUND_UP(nom_min, *nom);
57 		*nom *= tmp;
58 		*den *= tmp;
59 	}
60 
61 	/* make sure the denominator is large enough */
62 	if (*den < den_min) {
63 		tmp = DIV_ROUND_UP(den_min, *den);
64 		*nom *= tmp;
65 		*den *= tmp;
66 	}
67 }
68 
69 /**
70  * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
71  *
72  * @nom: nominator
73  * @den: denominator
74  * @post_div: post divider
75  * @fb_div_max: feedback divider maximum
76  * @ref_div_max: reference divider maximum
77  * @fb_div: resulting feedback divider
78  * @ref_div: resulting reference divider
79  *
80  * Calculate feedback and reference divider for a given post divider. Makes
81  * sure we stay within the limits.
82  */
83 static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
84 				      unsigned fb_div_max, unsigned ref_div_max,
85 				      unsigned *fb_div, unsigned *ref_div)
86 {
87 	/* limit reference * post divider to a maximum */
88 	ref_div_max = min(128 / post_div, ref_div_max);
89 
90 	/* get matching reference and feedback divider */
91 	*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
92 	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
93 
94 	/* limit fb divider to its maximum */
95 	if (*fb_div > fb_div_max) {
96 		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
97 		*fb_div = fb_div_max;
98 	}
99 }
100 
101 /**
102  * amdgpu_pll_compute - compute PLL paramaters
103  *
104  * @pll: information about the PLL
105  * @freq: requested frequency
106  * @dot_clock_p: resulting pixel clock
107  * @fb_div_p: resulting feedback divider
108  * @frac_fb_div_p: fractional part of the feedback divider
109  * @ref_div_p: resulting reference divider
110  * @post_div_p: resulting reference divider
111  *
112  * Try to calculate the PLL parameters to generate the given frequency:
113  * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
114  */
115 void amdgpu_pll_compute(struct amdgpu_pll *pll,
116 			u32 freq,
117 			u32 *dot_clock_p,
118 			u32 *fb_div_p,
119 			u32 *frac_fb_div_p,
120 			u32 *ref_div_p,
121 			u32 *post_div_p)
122 {
123 	unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
124 		freq : freq / 10;
125 
126 	unsigned fb_div_min, fb_div_max, fb_div;
127 	unsigned post_div_min, post_div_max, post_div;
128 	unsigned ref_div_min, ref_div_max, ref_div;
129 	unsigned post_div_best, diff_best;
130 	unsigned nom, den;
131 
132 	/* determine allowed feedback divider range */
133 	fb_div_min = pll->min_feedback_div;
134 	fb_div_max = pll->max_feedback_div;
135 
136 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
137 		fb_div_min *= 10;
138 		fb_div_max *= 10;
139 	}
140 
141 	/* determine allowed ref divider range */
142 	if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
143 		ref_div_min = pll->reference_div;
144 	else
145 		ref_div_min = pll->min_ref_div;
146 
147 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
148 	    pll->flags & AMDGPU_PLL_USE_REF_DIV)
149 		ref_div_max = pll->reference_div;
150 	else
151 		ref_div_max = pll->max_ref_div;
152 
153 	/* determine allowed post divider range */
154 	if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
155 		post_div_min = pll->post_div;
156 		post_div_max = pll->post_div;
157 	} else {
158 		unsigned vco_min, vco_max;
159 
160 		if (pll->flags & AMDGPU_PLL_IS_LCD) {
161 			vco_min = pll->lcd_pll_out_min;
162 			vco_max = pll->lcd_pll_out_max;
163 		} else {
164 			vco_min = pll->pll_out_min;
165 			vco_max = pll->pll_out_max;
166 		}
167 
168 		if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
169 			vco_min *= 10;
170 			vco_max *= 10;
171 		}
172 
173 		post_div_min = vco_min / target_clock;
174 		if ((target_clock * post_div_min) < vco_min)
175 			++post_div_min;
176 		if (post_div_min < pll->min_post_div)
177 			post_div_min = pll->min_post_div;
178 
179 		post_div_max = vco_max / target_clock;
180 		if ((target_clock * post_div_max) > vco_max)
181 			--post_div_max;
182 		if (post_div_max > pll->max_post_div)
183 			post_div_max = pll->max_post_div;
184 	}
185 
186 	/* represent the searched ratio as fractional number */
187 	nom = target_clock;
188 	den = pll->reference_freq;
189 
190 	/* reduce the numbers to a simpler ratio */
191 	amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
192 
193 	/* now search for a post divider */
194 	if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
195 		post_div_best = post_div_min;
196 	else
197 		post_div_best = post_div_max;
198 	diff_best = ~0;
199 
200 	for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
201 		unsigned diff;
202 		amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
203 					  ref_div_max, &fb_div, &ref_div);
204 		diff = abs(target_clock - (pll->reference_freq * fb_div) /
205 			(ref_div * post_div));
206 
207 		if (diff < diff_best || (diff == diff_best &&
208 		    !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
209 
210 			post_div_best = post_div;
211 			diff_best = diff;
212 		}
213 	}
214 	post_div = post_div_best;
215 
216 	/* get the feedback and reference divider for the optimal value */
217 	amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
218 				  &fb_div, &ref_div);
219 
220 	/* reduce the numbers to a simpler ratio once more */
221 	/* this also makes sure that the reference divider is large enough */
222 	amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
223 
224 	/* avoid high jitter with small fractional dividers */
225 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
226 		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
227 		if (fb_div < fb_div_min) {
228 			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
229 			fb_div *= tmp;
230 			ref_div *= tmp;
231 		}
232 	}
233 
234 	/* and finally save the result */
235 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
236 		*fb_div_p = fb_div / 10;
237 		*frac_fb_div_p = fb_div % 10;
238 	} else {
239 		*fb_div_p = fb_div;
240 		*frac_fb_div_p = 0;
241 	}
242 
243 	*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
244 			(pll->reference_freq * *frac_fb_div_p)) /
245 		       (ref_div * post_div * 10);
246 	*ref_div_p = ref_div;
247 	*post_div_p = post_div;
248 
249 	DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
250 		      freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
251 		      ref_div, post_div);
252 }
253 
254 /**
255  * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
256  *
257  * @crtc: drm crtc
258  *
259  * Returns the mask of which PPLLs (Pixel PLLs) are in use.
260  */
261 u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
262 {
263 	struct drm_device *dev = crtc->dev;
264 	struct drm_crtc *test_crtc;
265 	struct amdgpu_crtc *test_amdgpu_crtc;
266 	u32 pll_in_use = 0;
267 
268 	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
269 		if (crtc == test_crtc)
270 			continue;
271 
272 		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
273 		if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
274 			pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
275 	}
276 	return pll_in_use;
277 }
278 
279 /**
280  * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
281  *
282  * @crtc: drm crtc
283  *
284  * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
285  * also in DP mode.  For DP, a single PPLL can be used for all DP
286  * crtcs/encoders.
287  */
288 int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
289 {
290 	struct drm_device *dev = crtc->dev;
291 	struct drm_crtc *test_crtc;
292 	struct amdgpu_crtc *test_amdgpu_crtc;
293 
294 	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
295 		if (crtc == test_crtc)
296 			continue;
297 		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
298 		if (test_amdgpu_crtc->encoder &&
299 		    ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
300 			/* for DP use the same PLL for all */
301 			if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
302 				return test_amdgpu_crtc->pll_id;
303 		}
304 	}
305 	return ATOM_PPLL_INVALID;
306 }
307 
308 /**
309  * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
310  *
311  * @crtc: drm crtc
312  *
313  * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
314  * be shared (i.e., same clock).
315  */
316 int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
317 {
318 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
319 	struct drm_device *dev = crtc->dev;
320 	struct drm_crtc *test_crtc;
321 	struct amdgpu_crtc *test_amdgpu_crtc;
322 	u32 adjusted_clock, test_adjusted_clock;
323 
324 	adjusted_clock = amdgpu_crtc->adjusted_clock;
325 
326 	if (adjusted_clock == 0)
327 		return ATOM_PPLL_INVALID;
328 
329 	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
330 		if (crtc == test_crtc)
331 			continue;
332 		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
333 		if (test_amdgpu_crtc->encoder &&
334 		    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
335 			/* check if we are already driving this connector with another crtc */
336 			if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
337 				/* if we are, return that pll */
338 				if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
339 					return test_amdgpu_crtc->pll_id;
340 			}
341 			/* for non-DP check the clock */
342 			test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
343 			if ((crtc->mode.clock == test_crtc->mode.clock) &&
344 			    (adjusted_clock == test_adjusted_clock) &&
345 			    (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
346 			    (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
347 				return test_amdgpu_crtc->pll_id;
348 		}
349 	}
350 	return ATOM_PPLL_INVALID;
351 }
352