1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string_helpers.h>
8 
9 #include "i915_reg.h"
10 #include "intel_crtc.h"
11 #include "intel_cx0_phy.h"
12 #include "intel_de.h"
13 #include "intel_display.h"
14 #include "intel_display_types.h"
15 #include "intel_dpio_phy.h"
16 #include "intel_dpll.h"
17 #include "intel_lvds.h"
18 #include "intel_panel.h"
19 #include "intel_pps.h"
20 #include "intel_snps_phy.h"
21 #include "vlv_sideband.h"
22 
23 struct intel_dpll_funcs {
24 	int (*crtc_compute_clock)(struct intel_atomic_state *state,
25 				  struct intel_crtc *crtc);
26 	int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
27 				    struct intel_crtc *crtc);
28 };
29 
30 struct intel_limit {
31 	struct {
32 		int min, max;
33 	} dot, vco, n, m, m1, m2, p, p1;
34 
35 	struct {
36 		int dot_limit;
37 		int p2_slow, p2_fast;
38 	} p2;
39 };
40 static const struct intel_limit intel_limits_i8xx_dac = {
41 	.dot = { .min = 25000, .max = 350000 },
42 	.vco = { .min = 908000, .max = 1512000 },
43 	.n = { .min = 2, .max = 16 },
44 	.m = { .min = 96, .max = 140 },
45 	.m1 = { .min = 18, .max = 26 },
46 	.m2 = { .min = 6, .max = 16 },
47 	.p = { .min = 4, .max = 128 },
48 	.p1 = { .min = 2, .max = 33 },
49 	.p2 = { .dot_limit = 165000,
50 		.p2_slow = 4, .p2_fast = 2 },
51 };
52 
53 static const struct intel_limit intel_limits_i8xx_dvo = {
54 	.dot = { .min = 25000, .max = 350000 },
55 	.vco = { .min = 908000, .max = 1512000 },
56 	.n = { .min = 2, .max = 16 },
57 	.m = { .min = 96, .max = 140 },
58 	.m1 = { .min = 18, .max = 26 },
59 	.m2 = { .min = 6, .max = 16 },
60 	.p = { .min = 4, .max = 128 },
61 	.p1 = { .min = 2, .max = 33 },
62 	.p2 = { .dot_limit = 165000,
63 		.p2_slow = 4, .p2_fast = 4 },
64 };
65 
66 static const struct intel_limit intel_limits_i8xx_lvds = {
67 	.dot = { .min = 25000, .max = 350000 },
68 	.vco = { .min = 908000, .max = 1512000 },
69 	.n = { .min = 2, .max = 16 },
70 	.m = { .min = 96, .max = 140 },
71 	.m1 = { .min = 18, .max = 26 },
72 	.m2 = { .min = 6, .max = 16 },
73 	.p = { .min = 4, .max = 128 },
74 	.p1 = { .min = 1, .max = 6 },
75 	.p2 = { .dot_limit = 165000,
76 		.p2_slow = 14, .p2_fast = 7 },
77 };
78 
79 static const struct intel_limit intel_limits_i9xx_sdvo = {
80 	.dot = { .min = 20000, .max = 400000 },
81 	.vco = { .min = 1400000, .max = 2800000 },
82 	.n = { .min = 1, .max = 6 },
83 	.m = { .min = 70, .max = 120 },
84 	.m1 = { .min = 8, .max = 18 },
85 	.m2 = { .min = 3, .max = 7 },
86 	.p = { .min = 5, .max = 80 },
87 	.p1 = { .min = 1, .max = 8 },
88 	.p2 = { .dot_limit = 200000,
89 		.p2_slow = 10, .p2_fast = 5 },
90 };
91 
92 static const struct intel_limit intel_limits_i9xx_lvds = {
93 	.dot = { .min = 20000, .max = 400000 },
94 	.vco = { .min = 1400000, .max = 2800000 },
95 	.n = { .min = 1, .max = 6 },
96 	.m = { .min = 70, .max = 120 },
97 	.m1 = { .min = 8, .max = 18 },
98 	.m2 = { .min = 3, .max = 7 },
99 	.p = { .min = 7, .max = 98 },
100 	.p1 = { .min = 1, .max = 8 },
101 	.p2 = { .dot_limit = 112000,
102 		.p2_slow = 14, .p2_fast = 7 },
103 };
104 
105 
106 static const struct intel_limit intel_limits_g4x_sdvo = {
107 	.dot = { .min = 25000, .max = 270000 },
108 	.vco = { .min = 1750000, .max = 3500000},
109 	.n = { .min = 1, .max = 4 },
110 	.m = { .min = 104, .max = 138 },
111 	.m1 = { .min = 17, .max = 23 },
112 	.m2 = { .min = 5, .max = 11 },
113 	.p = { .min = 10, .max = 30 },
114 	.p1 = { .min = 1, .max = 3},
115 	.p2 = { .dot_limit = 270000,
116 		.p2_slow = 10,
117 		.p2_fast = 10
118 	},
119 };
120 
121 static const struct intel_limit intel_limits_g4x_hdmi = {
122 	.dot = { .min = 22000, .max = 400000 },
123 	.vco = { .min = 1750000, .max = 3500000},
124 	.n = { .min = 1, .max = 4 },
125 	.m = { .min = 104, .max = 138 },
126 	.m1 = { .min = 16, .max = 23 },
127 	.m2 = { .min = 5, .max = 11 },
128 	.p = { .min = 5, .max = 80 },
129 	.p1 = { .min = 1, .max = 8},
130 	.p2 = { .dot_limit = 165000,
131 		.p2_slow = 10, .p2_fast = 5 },
132 };
133 
134 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
135 	.dot = { .min = 20000, .max = 115000 },
136 	.vco = { .min = 1750000, .max = 3500000 },
137 	.n = { .min = 1, .max = 3 },
138 	.m = { .min = 104, .max = 138 },
139 	.m1 = { .min = 17, .max = 23 },
140 	.m2 = { .min = 5, .max = 11 },
141 	.p = { .min = 28, .max = 112 },
142 	.p1 = { .min = 2, .max = 8 },
143 	.p2 = { .dot_limit = 0,
144 		.p2_slow = 14, .p2_fast = 14
145 	},
146 };
147 
148 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
149 	.dot = { .min = 80000, .max = 224000 },
150 	.vco = { .min = 1750000, .max = 3500000 },
151 	.n = { .min = 1, .max = 3 },
152 	.m = { .min = 104, .max = 138 },
153 	.m1 = { .min = 17, .max = 23 },
154 	.m2 = { .min = 5, .max = 11 },
155 	.p = { .min = 14, .max = 42 },
156 	.p1 = { .min = 2, .max = 6 },
157 	.p2 = { .dot_limit = 0,
158 		.p2_slow = 7, .p2_fast = 7
159 	},
160 };
161 
162 static const struct intel_limit pnv_limits_sdvo = {
163 	.dot = { .min = 20000, .max = 400000},
164 	.vco = { .min = 1700000, .max = 3500000 },
165 	/* Pineview's Ncounter is a ring counter */
166 	.n = { .min = 3, .max = 6 },
167 	.m = { .min = 2, .max = 256 },
168 	/* Pineview only has one combined m divider, which we treat as m2. */
169 	.m1 = { .min = 0, .max = 0 },
170 	.m2 = { .min = 0, .max = 254 },
171 	.p = { .min = 5, .max = 80 },
172 	.p1 = { .min = 1, .max = 8 },
173 	.p2 = { .dot_limit = 200000,
174 		.p2_slow = 10, .p2_fast = 5 },
175 };
176 
177 static const struct intel_limit pnv_limits_lvds = {
178 	.dot = { .min = 20000, .max = 400000 },
179 	.vco = { .min = 1700000, .max = 3500000 },
180 	.n = { .min = 3, .max = 6 },
181 	.m = { .min = 2, .max = 256 },
182 	.m1 = { .min = 0, .max = 0 },
183 	.m2 = { .min = 0, .max = 254 },
184 	.p = { .min = 7, .max = 112 },
185 	.p1 = { .min = 1, .max = 8 },
186 	.p2 = { .dot_limit = 112000,
187 		.p2_slow = 14, .p2_fast = 14 },
188 };
189 
190 /* Ironlake / Sandybridge
191  *
192  * We calculate clock using (register_value + 2) for N/M1/M2, so here
193  * the range value for them is (actual_value - 2).
194  */
195 static const struct intel_limit ilk_limits_dac = {
196 	.dot = { .min = 25000, .max = 350000 },
197 	.vco = { .min = 1760000, .max = 3510000 },
198 	.n = { .min = 1, .max = 5 },
199 	.m = { .min = 79, .max = 127 },
200 	.m1 = { .min = 12, .max = 22 },
201 	.m2 = { .min = 5, .max = 9 },
202 	.p = { .min = 5, .max = 80 },
203 	.p1 = { .min = 1, .max = 8 },
204 	.p2 = { .dot_limit = 225000,
205 		.p2_slow = 10, .p2_fast = 5 },
206 };
207 
208 static const struct intel_limit ilk_limits_single_lvds = {
209 	.dot = { .min = 25000, .max = 350000 },
210 	.vco = { .min = 1760000, .max = 3510000 },
211 	.n = { .min = 1, .max = 3 },
212 	.m = { .min = 79, .max = 118 },
213 	.m1 = { .min = 12, .max = 22 },
214 	.m2 = { .min = 5, .max = 9 },
215 	.p = { .min = 28, .max = 112 },
216 	.p1 = { .min = 2, .max = 8 },
217 	.p2 = { .dot_limit = 225000,
218 		.p2_slow = 14, .p2_fast = 14 },
219 };
220 
221 static const struct intel_limit ilk_limits_dual_lvds = {
222 	.dot = { .min = 25000, .max = 350000 },
223 	.vco = { .min = 1760000, .max = 3510000 },
224 	.n = { .min = 1, .max = 3 },
225 	.m = { .min = 79, .max = 127 },
226 	.m1 = { .min = 12, .max = 22 },
227 	.m2 = { .min = 5, .max = 9 },
228 	.p = { .min = 14, .max = 56 },
229 	.p1 = { .min = 2, .max = 8 },
230 	.p2 = { .dot_limit = 225000,
231 		.p2_slow = 7, .p2_fast = 7 },
232 };
233 
234 /* LVDS 100mhz refclk limits. */
235 static const struct intel_limit ilk_limits_single_lvds_100m = {
236 	.dot = { .min = 25000, .max = 350000 },
237 	.vco = { .min = 1760000, .max = 3510000 },
238 	.n = { .min = 1, .max = 2 },
239 	.m = { .min = 79, .max = 126 },
240 	.m1 = { .min = 12, .max = 22 },
241 	.m2 = { .min = 5, .max = 9 },
242 	.p = { .min = 28, .max = 112 },
243 	.p1 = { .min = 2, .max = 8 },
244 	.p2 = { .dot_limit = 225000,
245 		.p2_slow = 14, .p2_fast = 14 },
246 };
247 
248 static const struct intel_limit ilk_limits_dual_lvds_100m = {
249 	.dot = { .min = 25000, .max = 350000 },
250 	.vco = { .min = 1760000, .max = 3510000 },
251 	.n = { .min = 1, .max = 3 },
252 	.m = { .min = 79, .max = 126 },
253 	.m1 = { .min = 12, .max = 22 },
254 	.m2 = { .min = 5, .max = 9 },
255 	.p = { .min = 14, .max = 42 },
256 	.p1 = { .min = 2, .max = 6 },
257 	.p2 = { .dot_limit = 225000,
258 		.p2_slow = 7, .p2_fast = 7 },
259 };
260 
261 static const struct intel_limit intel_limits_vlv = {
262 	 /*
263 	  * These are based on the data rate limits (measured in fast clocks)
264 	  * since those are the strictest limits we have. The fast
265 	  * clock and actual rate limits are more relaxed, so checking
266 	  * them would make no difference.
267 	  */
268 	.dot = { .min = 25000, .max = 270000 },
269 	.vco = { .min = 4000000, .max = 6000000 },
270 	.n = { .min = 1, .max = 7 },
271 	.m1 = { .min = 2, .max = 3 },
272 	.m2 = { .min = 11, .max = 156 },
273 	.p1 = { .min = 2, .max = 3 },
274 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
275 };
276 
277 static const struct intel_limit intel_limits_chv = {
278 	/*
279 	 * These are based on the data rate limits (measured in fast clocks)
280 	 * since those are the strictest limits we have.  The fast
281 	 * clock and actual rate limits are more relaxed, so checking
282 	 * them would make no difference.
283 	 */
284 	.dot = { .min = 25000, .max = 540000 },
285 	.vco = { .min = 4800000, .max = 6480000 },
286 	.n = { .min = 1, .max = 1 },
287 	.m1 = { .min = 2, .max = 2 },
288 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
289 	.p1 = { .min = 2, .max = 4 },
290 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
291 };
292 
293 static const struct intel_limit intel_limits_bxt = {
294 	.dot = { .min = 25000, .max = 594000 },
295 	.vco = { .min = 4800000, .max = 6700000 },
296 	.n = { .min = 1, .max = 1 },
297 	.m1 = { .min = 2, .max = 2 },
298 	/* FIXME: find real m2 limits */
299 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
300 	.p1 = { .min = 2, .max = 4 },
301 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
302 };
303 
304 /*
305  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
306  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
307  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
308  * The helpers' return value is the rate of the clock that is fed to the
309  * display engine's pipe which can be the above fast dot clock rate or a
310  * divided-down version of it.
311  */
312 /* m1 is reserved as 0 in Pineview, n is a ring counter */
313 int pnv_calc_dpll_params(int refclk, struct dpll *clock)
314 {
315 	clock->m = clock->m2 + 2;
316 	clock->p = clock->p1 * clock->p2;
317 	if (WARN_ON(clock->n == 0 || clock->p == 0))
318 		return 0;
319 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
320 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
321 
322 	return clock->dot;
323 }
324 
325 static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
326 {
327 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
328 }
329 
330 int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
331 {
332 	clock->m = i9xx_dpll_compute_m(clock);
333 	clock->p = clock->p1 * clock->p2;
334 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
335 		return 0;
336 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
337 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
338 
339 	return clock->dot;
340 }
341 
342 int vlv_calc_dpll_params(int refclk, struct dpll *clock)
343 {
344 	clock->m = clock->m1 * clock->m2;
345 	clock->p = clock->p1 * clock->p2 * 5;
346 	if (WARN_ON(clock->n == 0 || clock->p == 0))
347 		return 0;
348 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
349 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
350 
351 	return clock->dot;
352 }
353 
354 int chv_calc_dpll_params(int refclk, struct dpll *clock)
355 {
356 	clock->m = clock->m1 * clock->m2;
357 	clock->p = clock->p1 * clock->p2 * 5;
358 	if (WARN_ON(clock->n == 0 || clock->p == 0))
359 		return 0;
360 	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
361 					   clock->n << 22);
362 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
363 
364 	return clock->dot;
365 }
366 
367 /*
368  * Returns whether the given set of divisors are valid for a given refclk with
369  * the given connectors.
370  */
371 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
372 			       const struct intel_limit *limit,
373 			       const struct dpll *clock)
374 {
375 	if (clock->n < limit->n.min || limit->n.max < clock->n)
376 		return false;
377 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
378 		return false;
379 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
380 		return false;
381 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
382 		return false;
383 
384 	if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv))
385 		if (clock->m1 <= clock->m2)
386 			return false;
387 
388 	if (!IS_LP(dev_priv)) {
389 		if (clock->p < limit->p.min || limit->p.max < clock->p)
390 			return false;
391 		if (clock->m < limit->m.min || limit->m.max < clock->m)
392 			return false;
393 	}
394 
395 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
396 		return false;
397 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
398 	 * connector, etc., rather than just a single range.
399 	 */
400 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
401 		return false;
402 
403 	return true;
404 }
405 
406 static int
407 i9xx_select_p2_div(const struct intel_limit *limit,
408 		   const struct intel_crtc_state *crtc_state,
409 		   int target)
410 {
411 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
412 
413 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
414 		/*
415 		 * For LVDS just rely on its current settings for dual-channel.
416 		 * We haven't figured out how to reliably set up different
417 		 * single/dual channel state, if we even can.
418 		 */
419 		if (intel_is_dual_link_lvds(dev_priv))
420 			return limit->p2.p2_fast;
421 		else
422 			return limit->p2.p2_slow;
423 	} else {
424 		if (target < limit->p2.dot_limit)
425 			return limit->p2.p2_slow;
426 		else
427 			return limit->p2.p2_fast;
428 	}
429 }
430 
431 /*
432  * Returns a set of divisors for the desired target clock with the given
433  * refclk, or FALSE.
434  *
435  * Target and reference clocks are specified in kHz.
436  *
437  * If match_clock is provided, then best_clock P divider must match the P
438  * divider from @match_clock used for LVDS downclocking.
439  */
440 static bool
441 i9xx_find_best_dpll(const struct intel_limit *limit,
442 		    struct intel_crtc_state *crtc_state,
443 		    int target, int refclk,
444 		    const struct dpll *match_clock,
445 		    struct dpll *best_clock)
446 {
447 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
448 	struct dpll clock;
449 	int err = target;
450 
451 	memset(best_clock, 0, sizeof(*best_clock));
452 
453 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
454 
455 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
456 	     clock.m1++) {
457 		for (clock.m2 = limit->m2.min;
458 		     clock.m2 <= limit->m2.max; clock.m2++) {
459 			if (clock.m2 >= clock.m1)
460 				break;
461 			for (clock.n = limit->n.min;
462 			     clock.n <= limit->n.max; clock.n++) {
463 				for (clock.p1 = limit->p1.min;
464 					clock.p1 <= limit->p1.max; clock.p1++) {
465 					int this_err;
466 
467 					i9xx_calc_dpll_params(refclk, &clock);
468 					if (!intel_pll_is_valid(to_i915(dev),
469 								limit,
470 								&clock))
471 						continue;
472 					if (match_clock &&
473 					    clock.p != match_clock->p)
474 						continue;
475 
476 					this_err = abs(clock.dot - target);
477 					if (this_err < err) {
478 						*best_clock = clock;
479 						err = this_err;
480 					}
481 				}
482 			}
483 		}
484 	}
485 
486 	return (err != target);
487 }
488 
489 /*
490  * Returns a set of divisors for the desired target clock with the given
491  * refclk, or FALSE.
492  *
493  * Target and reference clocks are specified in kHz.
494  *
495  * If match_clock is provided, then best_clock P divider must match the P
496  * divider from @match_clock used for LVDS downclocking.
497  */
498 static bool
499 pnv_find_best_dpll(const struct intel_limit *limit,
500 		   struct intel_crtc_state *crtc_state,
501 		   int target, int refclk,
502 		   const struct dpll *match_clock,
503 		   struct dpll *best_clock)
504 {
505 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
506 	struct dpll clock;
507 	int err = target;
508 
509 	memset(best_clock, 0, sizeof(*best_clock));
510 
511 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
512 
513 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
514 	     clock.m1++) {
515 		for (clock.m2 = limit->m2.min;
516 		     clock.m2 <= limit->m2.max; clock.m2++) {
517 			for (clock.n = limit->n.min;
518 			     clock.n <= limit->n.max; clock.n++) {
519 				for (clock.p1 = limit->p1.min;
520 					clock.p1 <= limit->p1.max; clock.p1++) {
521 					int this_err;
522 
523 					pnv_calc_dpll_params(refclk, &clock);
524 					if (!intel_pll_is_valid(to_i915(dev),
525 								limit,
526 								&clock))
527 						continue;
528 					if (match_clock &&
529 					    clock.p != match_clock->p)
530 						continue;
531 
532 					this_err = abs(clock.dot - target);
533 					if (this_err < err) {
534 						*best_clock = clock;
535 						err = this_err;
536 					}
537 				}
538 			}
539 		}
540 	}
541 
542 	return (err != target);
543 }
544 
545 /*
546  * Returns a set of divisors for the desired target clock with the given
547  * refclk, or FALSE.
548  *
549  * Target and reference clocks are specified in kHz.
550  *
551  * If match_clock is provided, then best_clock P divider must match the P
552  * divider from @match_clock used for LVDS downclocking.
553  */
554 static bool
555 g4x_find_best_dpll(const struct intel_limit *limit,
556 		   struct intel_crtc_state *crtc_state,
557 		   int target, int refclk,
558 		   const struct dpll *match_clock,
559 		   struct dpll *best_clock)
560 {
561 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
562 	struct dpll clock;
563 	int max_n;
564 	bool found = false;
565 	/* approximately equals target * 0.00585 */
566 	int err_most = (target >> 8) + (target >> 9);
567 
568 	memset(best_clock, 0, sizeof(*best_clock));
569 
570 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
571 
572 	max_n = limit->n.max;
573 	/* based on hardware requirement, prefer smaller n to precision */
574 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
575 		/* based on hardware requirement, prefere larger m1,m2 */
576 		for (clock.m1 = limit->m1.max;
577 		     clock.m1 >= limit->m1.min; clock.m1--) {
578 			for (clock.m2 = limit->m2.max;
579 			     clock.m2 >= limit->m2.min; clock.m2--) {
580 				for (clock.p1 = limit->p1.max;
581 				     clock.p1 >= limit->p1.min; clock.p1--) {
582 					int this_err;
583 
584 					i9xx_calc_dpll_params(refclk, &clock);
585 					if (!intel_pll_is_valid(to_i915(dev),
586 								limit,
587 								&clock))
588 						continue;
589 
590 					this_err = abs(clock.dot - target);
591 					if (this_err < err_most) {
592 						*best_clock = clock;
593 						err_most = this_err;
594 						max_n = clock.n;
595 						found = true;
596 					}
597 				}
598 			}
599 		}
600 	}
601 	return found;
602 }
603 
604 /*
605  * Check if the calculated PLL configuration is more optimal compared to the
606  * best configuration and error found so far. Return the calculated error.
607  */
608 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
609 			       const struct dpll *calculated_clock,
610 			       const struct dpll *best_clock,
611 			       unsigned int best_error_ppm,
612 			       unsigned int *error_ppm)
613 {
614 	/*
615 	 * For CHV ignore the error and consider only the P value.
616 	 * Prefer a bigger P value based on HW requirements.
617 	 */
618 	if (IS_CHERRYVIEW(to_i915(dev))) {
619 		*error_ppm = 0;
620 
621 		return calculated_clock->p > best_clock->p;
622 	}
623 
624 	if (drm_WARN_ON_ONCE(dev, !target_freq))
625 		return false;
626 
627 	*error_ppm = div_u64(1000000ULL *
628 				abs(target_freq - calculated_clock->dot),
629 			     target_freq);
630 	/*
631 	 * Prefer a better P value over a better (smaller) error if the error
632 	 * is small. Ensure this preference for future configurations too by
633 	 * setting the error to 0.
634 	 */
635 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
636 		*error_ppm = 0;
637 
638 		return true;
639 	}
640 
641 	return *error_ppm + 10 < best_error_ppm;
642 }
643 
644 /*
645  * Returns a set of divisors for the desired target clock with the given
646  * refclk, or FALSE.
647  */
648 static bool
649 vlv_find_best_dpll(const struct intel_limit *limit,
650 		   struct intel_crtc_state *crtc_state,
651 		   int target, int refclk,
652 		   const struct dpll *match_clock,
653 		   struct dpll *best_clock)
654 {
655 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
656 	struct drm_device *dev = crtc->base.dev;
657 	struct dpll clock;
658 	unsigned int bestppm = 1000000;
659 	/* min update 19.2 MHz */
660 	int max_n = min(limit->n.max, refclk / 19200);
661 	bool found = false;
662 
663 	memset(best_clock, 0, sizeof(*best_clock));
664 
665 	/* based on hardware requirement, prefer smaller n to precision */
666 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
667 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
668 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
669 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
670 				clock.p = clock.p1 * clock.p2 * 5;
671 				/* based on hardware requirement, prefer bigger m1,m2 values */
672 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
673 					unsigned int ppm;
674 
675 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
676 								     refclk * clock.m1);
677 
678 					vlv_calc_dpll_params(refclk, &clock);
679 
680 					if (!intel_pll_is_valid(to_i915(dev),
681 								limit,
682 								&clock))
683 						continue;
684 
685 					if (!vlv_PLL_is_optimal(dev, target,
686 								&clock,
687 								best_clock,
688 								bestppm, &ppm))
689 						continue;
690 
691 					*best_clock = clock;
692 					bestppm = ppm;
693 					found = true;
694 				}
695 			}
696 		}
697 	}
698 
699 	return found;
700 }
701 
702 /*
703  * Returns a set of divisors for the desired target clock with the given
704  * refclk, or FALSE.
705  */
706 static bool
707 chv_find_best_dpll(const struct intel_limit *limit,
708 		   struct intel_crtc_state *crtc_state,
709 		   int target, int refclk,
710 		   const struct dpll *match_clock,
711 		   struct dpll *best_clock)
712 {
713 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
714 	struct drm_device *dev = crtc->base.dev;
715 	unsigned int best_error_ppm;
716 	struct dpll clock;
717 	u64 m2;
718 	int found = false;
719 
720 	memset(best_clock, 0, sizeof(*best_clock));
721 	best_error_ppm = 1000000;
722 
723 	/*
724 	 * Based on hardware doc, the n always set to 1, and m1 always
725 	 * set to 2.  If requires to support 200Mhz refclk, we need to
726 	 * revisit this because n may not 1 anymore.
727 	 */
728 	clock.n = 1;
729 	clock.m1 = 2;
730 
731 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
732 		for (clock.p2 = limit->p2.p2_fast;
733 				clock.p2 >= limit->p2.p2_slow;
734 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
735 			unsigned int error_ppm;
736 
737 			clock.p = clock.p1 * clock.p2 * 5;
738 
739 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
740 						   refclk * clock.m1);
741 
742 			if (m2 > INT_MAX/clock.m1)
743 				continue;
744 
745 			clock.m2 = m2;
746 
747 			chv_calc_dpll_params(refclk, &clock);
748 
749 			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
750 				continue;
751 
752 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
753 						best_error_ppm, &error_ppm))
754 				continue;
755 
756 			*best_clock = clock;
757 			best_error_ppm = error_ppm;
758 			found = true;
759 		}
760 	}
761 
762 	return found;
763 }
764 
765 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
766 			struct dpll *best_clock)
767 {
768 	const struct intel_limit *limit = &intel_limits_bxt;
769 	int refclk = 100000;
770 
771 	return chv_find_best_dpll(limit, crtc_state,
772 				  crtc_state->port_clock, refclk,
773 				  NULL, best_clock);
774 }
775 
776 u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
777 {
778 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
779 }
780 
781 static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
782 {
783 	return (1 << dpll->n) << 16 | dpll->m2;
784 }
785 
786 static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
787 				     const struct dpll *clock,
788 				     const struct dpll *reduced_clock)
789 {
790 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
791 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
792 	u32 fp, fp2;
793 
794 	if (IS_PINEVIEW(dev_priv)) {
795 		fp = pnv_dpll_compute_fp(clock);
796 		fp2 = pnv_dpll_compute_fp(reduced_clock);
797 	} else {
798 		fp = i9xx_dpll_compute_fp(clock);
799 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
800 	}
801 
802 	crtc_state->dpll_hw_state.fp0 = fp;
803 	crtc_state->dpll_hw_state.fp1 = fp2;
804 }
805 
806 static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
807 			      const struct dpll *clock,
808 			      const struct dpll *reduced_clock)
809 {
810 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
811 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
812 	u32 dpll;
813 
814 	i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
815 
816 	dpll = DPLL_VGA_MODE_DIS;
817 
818 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
819 		dpll |= DPLLB_MODE_LVDS;
820 	else
821 		dpll |= DPLLB_MODE_DAC_SERIAL;
822 
823 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
824 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
825 		dpll |= (crtc_state->pixel_multiplier - 1)
826 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
827 	}
828 
829 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
830 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
831 		dpll |= DPLL_SDVO_HIGH_SPEED;
832 
833 	if (intel_crtc_has_dp_encoder(crtc_state))
834 		dpll |= DPLL_SDVO_HIGH_SPEED;
835 
836 	/* compute bitmask from p1 value */
837 	if (IS_G4X(dev_priv)) {
838 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
839 		dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
840 	} else if (IS_PINEVIEW(dev_priv)) {
841 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
842 		WARN_ON(reduced_clock->p1 != clock->p1);
843 	} else {
844 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
845 		WARN_ON(reduced_clock->p1 != clock->p1);
846 	}
847 
848 	switch (clock->p2) {
849 	case 5:
850 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
851 		break;
852 	case 7:
853 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
854 		break;
855 	case 10:
856 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
857 		break;
858 	case 14:
859 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
860 		break;
861 	}
862 	WARN_ON(reduced_clock->p2 != clock->p2);
863 
864 	if (DISPLAY_VER(dev_priv) >= 4)
865 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
866 
867 	if (crtc_state->sdvo_tv_clock)
868 		dpll |= PLL_REF_INPUT_TVCLKINBC;
869 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
870 		 intel_panel_use_ssc(dev_priv))
871 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
872 	else
873 		dpll |= PLL_REF_INPUT_DREFCLK;
874 
875 	dpll |= DPLL_VCO_ENABLE;
876 	crtc_state->dpll_hw_state.dpll = dpll;
877 
878 	if (DISPLAY_VER(dev_priv) >= 4) {
879 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
880 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
881 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
882 	}
883 }
884 
885 static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
886 			      const struct dpll *clock,
887 			      const struct dpll *reduced_clock)
888 {
889 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
890 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
891 	u32 dpll;
892 
893 	i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
894 
895 	dpll = DPLL_VGA_MODE_DIS;
896 
897 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
898 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
899 	} else {
900 		if (clock->p1 == 2)
901 			dpll |= PLL_P1_DIVIDE_BY_TWO;
902 		else
903 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
904 		if (clock->p2 == 4)
905 			dpll |= PLL_P2_DIVIDE_BY_4;
906 	}
907 	WARN_ON(reduced_clock->p1 != clock->p1);
908 	WARN_ON(reduced_clock->p2 != clock->p2);
909 
910 	/*
911 	 * Bspec:
912 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
913 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
914 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
915 	 *  Enable) must be set to “1” in both the DPLL A Control Register
916 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
917 	 *
918 	 * For simplicity We simply keep both bits always enabled in
919 	 * both DPLLS. The spec says we should disable the DVO 2X clock
920 	 * when not needed, but this seems to work fine in practice.
921 	 */
922 	if (IS_I830(dev_priv) ||
923 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
924 		dpll |= DPLL_DVO_2X_MODE;
925 
926 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
927 	    intel_panel_use_ssc(dev_priv))
928 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
929 	else
930 		dpll |= PLL_REF_INPUT_DREFCLK;
931 
932 	dpll |= DPLL_VCO_ENABLE;
933 	crtc_state->dpll_hw_state.dpll = dpll;
934 }
935 
936 static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
937 				  struct intel_crtc *crtc)
938 {
939 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
940 	struct intel_crtc_state *crtc_state =
941 		intel_atomic_get_new_crtc_state(state, crtc);
942 	struct intel_encoder *encoder =
943 		intel_get_crtc_new_encoder(state, crtc_state);
944 	int ret;
945 
946 	if (DISPLAY_VER(dev_priv) < 11 &&
947 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
948 		return 0;
949 
950 	ret = intel_compute_shared_dplls(state, crtc, encoder);
951 	if (ret)
952 		return ret;
953 
954 	/* FIXME this is a mess */
955 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
956 		return 0;
957 
958 	/* CRT dotclock is determined via other means */
959 	if (!crtc_state->has_pch_encoder)
960 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
961 
962 	return 0;
963 }
964 
965 static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
966 				    struct intel_crtc *crtc)
967 {
968 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
969 	struct intel_crtc_state *crtc_state =
970 		intel_atomic_get_new_crtc_state(state, crtc);
971 	struct intel_encoder *encoder =
972 		intel_get_crtc_new_encoder(state, crtc_state);
973 
974 	if (DISPLAY_VER(dev_priv) < 11 &&
975 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
976 		return 0;
977 
978 	return intel_reserve_shared_dplls(state, crtc, encoder);
979 }
980 
981 static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
982 				  struct intel_crtc *crtc)
983 {
984 	struct intel_crtc_state *crtc_state =
985 		intel_atomic_get_new_crtc_state(state, crtc);
986 	struct intel_encoder *encoder =
987 		intel_get_crtc_new_encoder(state, crtc_state);
988 	int ret;
989 
990 	ret = intel_mpllb_calc_state(crtc_state, encoder);
991 	if (ret)
992 		return ret;
993 
994 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
995 
996 	return 0;
997 }
998 
999 static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
1000 				  struct intel_crtc *crtc)
1001 {
1002 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1003 	struct intel_crtc_state *crtc_state =
1004 		intel_atomic_get_new_crtc_state(state, crtc);
1005 	struct intel_encoder *encoder =
1006 		intel_get_crtc_new_encoder(state, crtc_state);
1007 	enum phy phy = intel_port_to_phy(i915, encoder->port);
1008 	int ret;
1009 
1010 	ret = intel_cx0pll_calc_state(crtc_state, encoder);
1011 	if (ret)
1012 		return ret;
1013 
1014 	/* TODO: Do the readback via intel_compute_shared_dplls() */
1015 	if (intel_is_c10phy(i915, phy))
1016 		crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
1017 	else
1018 		crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
1019 
1020 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1021 
1022 	return 0;
1023 }
1024 
1025 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
1026 {
1027 	return dpll->m < factor * dpll->n;
1028 }
1029 
1030 static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
1031 				    const struct dpll *clock,
1032 				    const struct dpll *reduced_clock)
1033 {
1034 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1035 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1036 	u32 fp, fp2;
1037 	int factor;
1038 
1039 	/* Enable autotuning of the PLL clock (if permissible) */
1040 	factor = 21;
1041 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1042 		if ((intel_panel_use_ssc(dev_priv) &&
1043 		     dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
1044 		    (HAS_PCH_IBX(dev_priv) &&
1045 		     intel_is_dual_link_lvds(dev_priv)))
1046 			factor = 25;
1047 	} else if (crtc_state->sdvo_tv_clock) {
1048 		factor = 20;
1049 	}
1050 
1051 	fp = i9xx_dpll_compute_fp(clock);
1052 	if (ilk_needs_fb_cb_tune(clock, factor))
1053 		fp |= FP_CB_TUNE;
1054 
1055 	fp2 = i9xx_dpll_compute_fp(reduced_clock);
1056 	if (ilk_needs_fb_cb_tune(reduced_clock, factor))
1057 		fp2 |= FP_CB_TUNE;
1058 
1059 	crtc_state->dpll_hw_state.fp0 = fp;
1060 	crtc_state->dpll_hw_state.fp1 = fp2;
1061 }
1062 
1063 static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
1064 			     const struct dpll *clock,
1065 			     const struct dpll *reduced_clock)
1066 {
1067 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1068 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1069 	u32 dpll;
1070 
1071 	ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
1072 
1073 	dpll = 0;
1074 
1075 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
1076 		dpll |= DPLLB_MODE_LVDS;
1077 	else
1078 		dpll |= DPLLB_MODE_DAC_SERIAL;
1079 
1080 	dpll |= (crtc_state->pixel_multiplier - 1)
1081 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1082 
1083 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
1084 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1085 		dpll |= DPLL_SDVO_HIGH_SPEED;
1086 
1087 	if (intel_crtc_has_dp_encoder(crtc_state))
1088 		dpll |= DPLL_SDVO_HIGH_SPEED;
1089 
1090 	/*
1091 	 * The high speed IO clock is only really required for
1092 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
1093 	 * possible to share the DPLL between CRT and HDMI. Enabling
1094 	 * the clock needlessly does no real harm, except use up a
1095 	 * bit of power potentially.
1096 	 *
1097 	 * We'll limit this to IVB with 3 pipes, since it has only two
1098 	 * DPLLs and so DPLL sharing is the only way to get three pipes
1099 	 * driving PCH ports at the same time. On SNB we could do this,
1100 	 * and potentially avoid enabling the second DPLL, but it's not
1101 	 * clear if it''s a win or loss power wise. No point in doing
1102 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1103 	 */
1104 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
1105 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1106 		dpll |= DPLL_SDVO_HIGH_SPEED;
1107 
1108 	/* compute bitmask from p1 value */
1109 	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1110 	/* also FPA1 */
1111 	dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1112 
1113 	switch (clock->p2) {
1114 	case 5:
1115 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
1116 		break;
1117 	case 7:
1118 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
1119 		break;
1120 	case 10:
1121 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
1122 		break;
1123 	case 14:
1124 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1125 		break;
1126 	}
1127 	WARN_ON(reduced_clock->p2 != clock->p2);
1128 
1129 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
1130 	    intel_panel_use_ssc(dev_priv))
1131 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
1132 	else
1133 		dpll |= PLL_REF_INPUT_DREFCLK;
1134 
1135 	dpll |= DPLL_VCO_ENABLE;
1136 
1137 	crtc_state->dpll_hw_state.dpll = dpll;
1138 }
1139 
1140 static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
1141 				  struct intel_crtc *crtc)
1142 {
1143 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1144 	struct intel_crtc_state *crtc_state =
1145 		intel_atomic_get_new_crtc_state(state, crtc);
1146 	const struct intel_limit *limit;
1147 	int refclk = 120000;
1148 	int ret;
1149 
1150 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1151 	if (!crtc_state->has_pch_encoder)
1152 		return 0;
1153 
1154 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1155 		if (intel_panel_use_ssc(dev_priv)) {
1156 			drm_dbg_kms(&dev_priv->drm,
1157 				    "using SSC reference clock of %d kHz\n",
1158 				    dev_priv->display.vbt.lvds_ssc_freq);
1159 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1160 		}
1161 
1162 		if (intel_is_dual_link_lvds(dev_priv)) {
1163 			if (refclk == 100000)
1164 				limit = &ilk_limits_dual_lvds_100m;
1165 			else
1166 				limit = &ilk_limits_dual_lvds;
1167 		} else {
1168 			if (refclk == 100000)
1169 				limit = &ilk_limits_single_lvds_100m;
1170 			else
1171 				limit = &ilk_limits_single_lvds;
1172 		}
1173 	} else {
1174 		limit = &ilk_limits_dac;
1175 	}
1176 
1177 	if (!crtc_state->clock_set &&
1178 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1179 				refclk, NULL, &crtc_state->dpll))
1180 		return -EINVAL;
1181 
1182 	ilk_compute_dpll(crtc_state, &crtc_state->dpll,
1183 			 &crtc_state->dpll);
1184 
1185 	ret = intel_compute_shared_dplls(state, crtc, NULL);
1186 	if (ret)
1187 		return ret;
1188 
1189 	crtc_state->port_clock = crtc_state->dpll.dot;
1190 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1191 
1192 	return ret;
1193 }
1194 
1195 static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
1196 				    struct intel_crtc *crtc)
1197 {
1198 	struct intel_crtc_state *crtc_state =
1199 		intel_atomic_get_new_crtc_state(state, crtc);
1200 
1201 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
1202 	if (!crtc_state->has_pch_encoder)
1203 		return 0;
1204 
1205 	return intel_reserve_shared_dplls(state, crtc, NULL);
1206 }
1207 
1208 void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
1209 {
1210 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1211 
1212 	crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
1213 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1214 	if (crtc->pipe != PIPE_A)
1215 		crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1216 
1217 	/* DPLL not used with DSI, but still need the rest set up */
1218 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1219 		crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
1220 			DPLL_EXT_BUFFER_ENABLE_VLV;
1221 
1222 	crtc_state->dpll_hw_state.dpll_md =
1223 		(crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1224 }
1225 
1226 void chv_compute_dpll(struct intel_crtc_state *crtc_state)
1227 {
1228 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1229 
1230 	crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
1231 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1232 	if (crtc->pipe != PIPE_A)
1233 		crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1234 
1235 	/* DPLL not used with DSI, but still need the rest set up */
1236 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1237 		crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
1238 
1239 	crtc_state->dpll_hw_state.dpll_md =
1240 		(crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1241 }
1242 
1243 static int chv_crtc_compute_clock(struct intel_atomic_state *state,
1244 				  struct intel_crtc *crtc)
1245 {
1246 	struct intel_crtc_state *crtc_state =
1247 		intel_atomic_get_new_crtc_state(state, crtc);
1248 	const struct intel_limit *limit = &intel_limits_chv;
1249 	int refclk = 100000;
1250 
1251 	if (!crtc_state->clock_set &&
1252 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1253 				refclk, NULL, &crtc_state->dpll))
1254 		return -EINVAL;
1255 
1256 	chv_compute_dpll(crtc_state);
1257 
1258 	/* FIXME this is a mess */
1259 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1260 		return 0;
1261 
1262 	crtc_state->port_clock = crtc_state->dpll.dot;
1263 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1264 
1265 	return 0;
1266 }
1267 
1268 static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
1269 				  struct intel_crtc *crtc)
1270 {
1271 	struct intel_crtc_state *crtc_state =
1272 		intel_atomic_get_new_crtc_state(state, crtc);
1273 	const struct intel_limit *limit = &intel_limits_vlv;
1274 	int refclk = 100000;
1275 
1276 	if (!crtc_state->clock_set &&
1277 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1278 				refclk, NULL, &crtc_state->dpll)) {
1279 		return -EINVAL;
1280 	}
1281 
1282 	vlv_compute_dpll(crtc_state);
1283 
1284 	/* FIXME this is a mess */
1285 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
1286 		return 0;
1287 
1288 	crtc_state->port_clock = crtc_state->dpll.dot;
1289 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1290 
1291 	return 0;
1292 }
1293 
1294 static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
1295 				  struct intel_crtc *crtc)
1296 {
1297 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1298 	struct intel_crtc_state *crtc_state =
1299 		intel_atomic_get_new_crtc_state(state, crtc);
1300 	const struct intel_limit *limit;
1301 	int refclk = 96000;
1302 
1303 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1304 		if (intel_panel_use_ssc(dev_priv)) {
1305 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1306 			drm_dbg_kms(&dev_priv->drm,
1307 				    "using SSC reference clock of %d kHz\n",
1308 				    refclk);
1309 		}
1310 
1311 		if (intel_is_dual_link_lvds(dev_priv))
1312 			limit = &intel_limits_g4x_dual_channel_lvds;
1313 		else
1314 			limit = &intel_limits_g4x_single_channel_lvds;
1315 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
1316 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
1317 		limit = &intel_limits_g4x_hdmi;
1318 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
1319 		limit = &intel_limits_g4x_sdvo;
1320 	} else {
1321 		/* The option is for other outputs */
1322 		limit = &intel_limits_i9xx_sdvo;
1323 	}
1324 
1325 	if (!crtc_state->clock_set &&
1326 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1327 				refclk, NULL, &crtc_state->dpll))
1328 		return -EINVAL;
1329 
1330 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1331 			  &crtc_state->dpll);
1332 
1333 	crtc_state->port_clock = crtc_state->dpll.dot;
1334 	/* FIXME this is a mess */
1335 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1336 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1337 
1338 	return 0;
1339 }
1340 
1341 static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
1342 				  struct intel_crtc *crtc)
1343 {
1344 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1345 	struct intel_crtc_state *crtc_state =
1346 		intel_atomic_get_new_crtc_state(state, crtc);
1347 	const struct intel_limit *limit;
1348 	int refclk = 96000;
1349 
1350 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1351 		if (intel_panel_use_ssc(dev_priv)) {
1352 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1353 			drm_dbg_kms(&dev_priv->drm,
1354 				    "using SSC reference clock of %d kHz\n",
1355 				    refclk);
1356 		}
1357 
1358 		limit = &pnv_limits_lvds;
1359 	} else {
1360 		limit = &pnv_limits_sdvo;
1361 	}
1362 
1363 	if (!crtc_state->clock_set &&
1364 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1365 				refclk, NULL, &crtc_state->dpll))
1366 		return -EINVAL;
1367 
1368 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1369 			  &crtc_state->dpll);
1370 
1371 	crtc_state->port_clock = crtc_state->dpll.dot;
1372 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1373 
1374 	return 0;
1375 }
1376 
1377 static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
1378 				   struct intel_crtc *crtc)
1379 {
1380 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1381 	struct intel_crtc_state *crtc_state =
1382 		intel_atomic_get_new_crtc_state(state, crtc);
1383 	const struct intel_limit *limit;
1384 	int refclk = 96000;
1385 
1386 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1387 		if (intel_panel_use_ssc(dev_priv)) {
1388 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1389 			drm_dbg_kms(&dev_priv->drm,
1390 				    "using SSC reference clock of %d kHz\n",
1391 				    refclk);
1392 		}
1393 
1394 		limit = &intel_limits_i9xx_lvds;
1395 	} else {
1396 		limit = &intel_limits_i9xx_sdvo;
1397 	}
1398 
1399 	if (!crtc_state->clock_set &&
1400 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1401 				 refclk, NULL, &crtc_state->dpll))
1402 		return -EINVAL;
1403 
1404 	i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
1405 			  &crtc_state->dpll);
1406 
1407 	crtc_state->port_clock = crtc_state->dpll.dot;
1408 	/* FIXME this is a mess */
1409 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
1410 		crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1411 
1412 	return 0;
1413 }
1414 
1415 static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
1416 				   struct intel_crtc *crtc)
1417 {
1418 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1419 	struct intel_crtc_state *crtc_state =
1420 		intel_atomic_get_new_crtc_state(state, crtc);
1421 	const struct intel_limit *limit;
1422 	int refclk = 48000;
1423 
1424 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1425 		if (intel_panel_use_ssc(dev_priv)) {
1426 			refclk = dev_priv->display.vbt.lvds_ssc_freq;
1427 			drm_dbg_kms(&dev_priv->drm,
1428 				    "using SSC reference clock of %d kHz\n",
1429 				    refclk);
1430 		}
1431 
1432 		limit = &intel_limits_i8xx_lvds;
1433 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
1434 		limit = &intel_limits_i8xx_dvo;
1435 	} else {
1436 		limit = &intel_limits_i8xx_dac;
1437 	}
1438 
1439 	if (!crtc_state->clock_set &&
1440 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
1441 				 refclk, NULL, &crtc_state->dpll))
1442 		return -EINVAL;
1443 
1444 	i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
1445 			  &crtc_state->dpll);
1446 
1447 	crtc_state->port_clock = crtc_state->dpll.dot;
1448 	crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
1449 
1450 	return 0;
1451 }
1452 
1453 static const struct intel_dpll_funcs mtl_dpll_funcs = {
1454 	.crtc_compute_clock = mtl_crtc_compute_clock,
1455 };
1456 
1457 static const struct intel_dpll_funcs dg2_dpll_funcs = {
1458 	.crtc_compute_clock = dg2_crtc_compute_clock,
1459 };
1460 
1461 static const struct intel_dpll_funcs hsw_dpll_funcs = {
1462 	.crtc_compute_clock = hsw_crtc_compute_clock,
1463 	.crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
1464 };
1465 
1466 static const struct intel_dpll_funcs ilk_dpll_funcs = {
1467 	.crtc_compute_clock = ilk_crtc_compute_clock,
1468 	.crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
1469 };
1470 
1471 static const struct intel_dpll_funcs chv_dpll_funcs = {
1472 	.crtc_compute_clock = chv_crtc_compute_clock,
1473 };
1474 
1475 static const struct intel_dpll_funcs vlv_dpll_funcs = {
1476 	.crtc_compute_clock = vlv_crtc_compute_clock,
1477 };
1478 
1479 static const struct intel_dpll_funcs g4x_dpll_funcs = {
1480 	.crtc_compute_clock = g4x_crtc_compute_clock,
1481 };
1482 
1483 static const struct intel_dpll_funcs pnv_dpll_funcs = {
1484 	.crtc_compute_clock = pnv_crtc_compute_clock,
1485 };
1486 
1487 static const struct intel_dpll_funcs i9xx_dpll_funcs = {
1488 	.crtc_compute_clock = i9xx_crtc_compute_clock,
1489 };
1490 
1491 static const struct intel_dpll_funcs i8xx_dpll_funcs = {
1492 	.crtc_compute_clock = i8xx_crtc_compute_clock,
1493 };
1494 
1495 int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
1496 				  struct intel_crtc *crtc)
1497 {
1498 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1499 	struct intel_crtc_state *crtc_state =
1500 		intel_atomic_get_new_crtc_state(state, crtc);
1501 	int ret;
1502 
1503 	drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1504 
1505 	memset(&crtc_state->dpll_hw_state, 0,
1506 	       sizeof(crtc_state->dpll_hw_state));
1507 
1508 	if (!crtc_state->hw.enable)
1509 		return 0;
1510 
1511 	ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
1512 	if (ret) {
1513 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
1514 			    crtc->base.base.id, crtc->base.name);
1515 		return ret;
1516 	}
1517 
1518 	return 0;
1519 }
1520 
1521 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
1522 				    struct intel_crtc *crtc)
1523 {
1524 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1525 	struct intel_crtc_state *crtc_state =
1526 		intel_atomic_get_new_crtc_state(state, crtc);
1527 	int ret;
1528 
1529 	drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
1530 	drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
1531 
1532 	if (!crtc_state->hw.enable || crtc_state->shared_dpll)
1533 		return 0;
1534 
1535 	if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
1536 		return 0;
1537 
1538 	ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
1539 	if (ret) {
1540 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
1541 			    crtc->base.base.id, crtc->base.name);
1542 		return ret;
1543 	}
1544 
1545 	return 0;
1546 }
1547 
1548 void
1549 intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
1550 {
1551 	if (DISPLAY_VER(dev_priv) >= 14)
1552 		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
1553 	else if (IS_DG2(dev_priv))
1554 		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
1555 	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
1556 		dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
1557 	else if (HAS_PCH_SPLIT(dev_priv))
1558 		dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
1559 	else if (IS_CHERRYVIEW(dev_priv))
1560 		dev_priv->display.funcs.dpll = &chv_dpll_funcs;
1561 	else if (IS_VALLEYVIEW(dev_priv))
1562 		dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
1563 	else if (IS_G4X(dev_priv))
1564 		dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
1565 	else if (IS_PINEVIEW(dev_priv))
1566 		dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
1567 	else if (DISPLAY_VER(dev_priv) != 2)
1568 		dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
1569 	else
1570 		dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
1571 }
1572 
1573 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1574 {
1575 	if (IS_I830(dev_priv))
1576 		return false;
1577 
1578 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1579 }
1580 
1581 void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
1582 {
1583 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1584 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1585 	u32 dpll = crtc_state->dpll_hw_state.dpll;
1586 	enum pipe pipe = crtc->pipe;
1587 	int i;
1588 
1589 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1590 
1591 	/* PLL is protected by panel, make sure we can write it */
1592 	if (i9xx_has_pps(dev_priv))
1593 		assert_pps_unlocked(dev_priv, pipe);
1594 
1595 	intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
1596 	intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
1597 
1598 	/*
1599 	 * Apparently we need to have VGA mode enabled prior to changing
1600 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1601 	 * dividers, even though the register value does change.
1602 	 */
1603 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
1604 	intel_de_write(dev_priv, DPLL(pipe), dpll);
1605 
1606 	/* Wait for the clocks to stabilize. */
1607 	intel_de_posting_read(dev_priv, DPLL(pipe));
1608 	udelay(150);
1609 
1610 	if (DISPLAY_VER(dev_priv) >= 4) {
1611 		intel_de_write(dev_priv, DPLL_MD(pipe),
1612 			       crtc_state->dpll_hw_state.dpll_md);
1613 	} else {
1614 		/* The pixel multiplier can only be updated once the
1615 		 * DPLL is enabled and the clocks are stable.
1616 		 *
1617 		 * So write it again.
1618 		 */
1619 		intel_de_write(dev_priv, DPLL(pipe), dpll);
1620 	}
1621 
1622 	/* We do this three times for luck */
1623 	for (i = 0; i < 3; i++) {
1624 		intel_de_write(dev_priv, DPLL(pipe), dpll);
1625 		intel_de_posting_read(dev_priv, DPLL(pipe));
1626 		udelay(150); /* wait for warmup */
1627 	}
1628 }
1629 
1630 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
1631 				 enum pipe pipe)
1632 {
1633 	u32 reg_val;
1634 
1635 	/*
1636 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
1637 	 * and set it to a reasonable value instead.
1638 	 */
1639 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
1640 	reg_val &= 0xffffff00;
1641 	reg_val |= 0x00000030;
1642 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
1643 
1644 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
1645 	reg_val &= 0x00ffffff;
1646 	reg_val |= 0x8c000000;
1647 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
1648 
1649 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
1650 	reg_val &= 0xffffff00;
1651 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
1652 
1653 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
1654 	reg_val &= 0x00ffffff;
1655 	reg_val |= 0xb0000000;
1656 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
1657 }
1658 
1659 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
1660 {
1661 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1662 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1663 	enum pipe pipe = crtc->pipe;
1664 	u32 mdiv;
1665 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
1666 	u32 coreclk, reg_val;
1667 
1668 	vlv_dpio_get(dev_priv);
1669 
1670 	bestn = crtc_state->dpll.n;
1671 	bestm1 = crtc_state->dpll.m1;
1672 	bestm2 = crtc_state->dpll.m2;
1673 	bestp1 = crtc_state->dpll.p1;
1674 	bestp2 = crtc_state->dpll.p2;
1675 
1676 	/* See eDP HDMI DPIO driver vbios notes doc */
1677 
1678 	/* PLL B needs special handling */
1679 	if (pipe == PIPE_B)
1680 		vlv_pllb_recal_opamp(dev_priv, pipe);
1681 
1682 	/* Set up Tx target for periodic Rcomp update */
1683 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
1684 
1685 	/* Disable target IRef on PLL */
1686 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
1687 	reg_val &= 0x00ffffff;
1688 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
1689 
1690 	/* Disable fast lock */
1691 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
1692 
1693 	/* Set idtafcrecal before PLL is enabled */
1694 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
1695 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
1696 	mdiv |= ((bestn << DPIO_N_SHIFT));
1697 	mdiv |= (1 << DPIO_K_SHIFT);
1698 
1699 	/*
1700 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
1701 	 * but we don't support that).
1702 	 * Note: don't use the DAC post divider as it seems unstable.
1703 	 */
1704 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
1705 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
1706 
1707 	mdiv |= DPIO_ENABLE_CALIBRATION;
1708 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
1709 
1710 	/* Set HBR and RBR LPF coefficients */
1711 	if (crtc_state->port_clock == 162000 ||
1712 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
1713 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1714 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
1715 				 0x009f0003);
1716 	else
1717 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
1718 				 0x00d0000f);
1719 
1720 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1721 		/* Use SSC source */
1722 		if (pipe == PIPE_A)
1723 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1724 					 0x0df40000);
1725 		else
1726 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1727 					 0x0df70000);
1728 	} else { /* HDMI or VGA */
1729 		/* Use bend source */
1730 		if (pipe == PIPE_A)
1731 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1732 					 0x0df70000);
1733 		else
1734 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
1735 					 0x0df40000);
1736 	}
1737 
1738 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
1739 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
1740 	if (intel_crtc_has_dp_encoder(crtc_state))
1741 		coreclk |= 0x01000000;
1742 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
1743 
1744 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
1745 
1746 	vlv_dpio_put(dev_priv);
1747 }
1748 
1749 static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1750 {
1751 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1752 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1753 	enum pipe pipe = crtc->pipe;
1754 
1755 	intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
1756 	intel_de_posting_read(dev_priv, DPLL(pipe));
1757 	udelay(150);
1758 
1759 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1760 		drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1761 }
1762 
1763 void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
1764 {
1765 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1766 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1767 	enum pipe pipe = crtc->pipe;
1768 
1769 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1770 
1771 	/* PLL is protected by panel, make sure we can write it */
1772 	assert_pps_unlocked(dev_priv, pipe);
1773 
1774 	/* Enable Refclk */
1775 	intel_de_write(dev_priv, DPLL(pipe),
1776 		       crtc_state->dpll_hw_state.dpll &
1777 		       ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
1778 
1779 	if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
1780 		vlv_prepare_pll(crtc_state);
1781 		_vlv_enable_pll(crtc_state);
1782 	}
1783 
1784 	intel_de_write(dev_priv, DPLL_MD(pipe),
1785 		       crtc_state->dpll_hw_state.dpll_md);
1786 	intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1787 }
1788 
1789 static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
1790 {
1791 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1792 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1793 	enum pipe pipe = crtc->pipe;
1794 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1795 	u32 loopfilter, tribuf_calcntr;
1796 	u32 bestm2, bestp1, bestp2, bestm2_frac;
1797 	u32 dpio_val;
1798 	int vco;
1799 
1800 	bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
1801 	bestm2 = crtc_state->dpll.m2 >> 22;
1802 	bestp1 = crtc_state->dpll.p1;
1803 	bestp2 = crtc_state->dpll.p2;
1804 	vco = crtc_state->dpll.vco;
1805 	dpio_val = 0;
1806 	loopfilter = 0;
1807 
1808 	vlv_dpio_get(dev_priv);
1809 
1810 	/* p1 and p2 divider */
1811 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
1812 			5 << DPIO_CHV_S1_DIV_SHIFT |
1813 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
1814 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
1815 			1 << DPIO_CHV_K_DIV_SHIFT);
1816 
1817 	/* Feedback post-divider - m2 */
1818 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
1819 
1820 	/* Feedback refclk divider - n and m1 */
1821 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
1822 			DPIO_CHV_M1_DIV_BY_2 |
1823 			1 << DPIO_CHV_N_DIV_SHIFT);
1824 
1825 	/* M2 fraction division */
1826 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
1827 
1828 	/* M2 fraction division enable */
1829 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
1830 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
1831 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
1832 	if (bestm2_frac)
1833 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
1834 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
1835 
1836 	/* Program digital lock detect threshold */
1837 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
1838 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
1839 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
1840 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
1841 	if (!bestm2_frac)
1842 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
1843 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
1844 
1845 	/* Loop filter */
1846 	if (vco == 5400000) {
1847 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
1848 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
1849 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
1850 		tribuf_calcntr = 0x9;
1851 	} else if (vco <= 6200000) {
1852 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
1853 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
1854 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
1855 		tribuf_calcntr = 0x9;
1856 	} else if (vco <= 6480000) {
1857 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
1858 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
1859 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
1860 		tribuf_calcntr = 0x8;
1861 	} else {
1862 		/* Not supported. Apply the same limits as in the max case */
1863 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
1864 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
1865 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
1866 		tribuf_calcntr = 0;
1867 	}
1868 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
1869 
1870 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
1871 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
1872 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
1873 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
1874 
1875 	/* AFC Recal */
1876 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
1877 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
1878 			DPIO_AFC_RECAL);
1879 
1880 	vlv_dpio_put(dev_priv);
1881 }
1882 
1883 static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
1884 {
1885 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1886 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1887 	enum pipe pipe = crtc->pipe;
1888 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1889 	u32 tmp;
1890 
1891 	vlv_dpio_get(dev_priv);
1892 
1893 	/* Enable back the 10bit clock to display controller */
1894 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1895 	tmp |= DPIO_DCLKP_EN;
1896 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1897 
1898 	vlv_dpio_put(dev_priv);
1899 
1900 	/*
1901 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1902 	 */
1903 	udelay(1);
1904 
1905 	/* Enable PLL */
1906 	intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
1907 
1908 	/* Check PLL is locked */
1909 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1910 		drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1911 }
1912 
1913 void chv_enable_pll(const struct intel_crtc_state *crtc_state)
1914 {
1915 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1916 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1917 	enum pipe pipe = crtc->pipe;
1918 
1919 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1920 
1921 	/* PLL is protected by panel, make sure we can write it */
1922 	assert_pps_unlocked(dev_priv, pipe);
1923 
1924 	/* Enable Refclk and SSC */
1925 	intel_de_write(dev_priv, DPLL(pipe),
1926 		       crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
1927 
1928 	if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
1929 		chv_prepare_pll(crtc_state);
1930 		_chv_enable_pll(crtc_state);
1931 	}
1932 
1933 	if (pipe != PIPE_A) {
1934 		/*
1935 		 * WaPixelRepeatModeFixForC0:chv
1936 		 *
1937 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1938 		 * the value from DPLLBMD to either pipe B or C.
1939 		 */
1940 		intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1941 		intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1942 			       crtc_state->dpll_hw_state.dpll_md);
1943 		intel_de_write(dev_priv, CBR4_VLV, 0);
1944 		dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
1945 
1946 		/*
1947 		 * DPLLB VGA mode also seems to cause problems.
1948 		 * We should always have it disabled.
1949 		 */
1950 		drm_WARN_ON(&dev_priv->drm,
1951 			    (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1952 			     DPLL_VGA_MODE_DIS) == 0);
1953 	} else {
1954 		intel_de_write(dev_priv, DPLL_MD(pipe),
1955 			       crtc_state->dpll_hw_state.dpll_md);
1956 		intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1957 	}
1958 }
1959 
1960 /**
1961  * vlv_force_pll_on - forcibly enable just the PLL
1962  * @dev_priv: i915 private structure
1963  * @pipe: pipe PLL to enable
1964  * @dpll: PLL configuration
1965  *
1966  * Enable the PLL for @pipe using the supplied @dpll config. To be used
1967  * in cases where we need the PLL enabled even when @pipe is not going to
1968  * be enabled.
1969  */
1970 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
1971 		     const struct dpll *dpll)
1972 {
1973 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1974 	struct intel_crtc_state *crtc_state;
1975 
1976 	crtc_state = intel_crtc_state_alloc(crtc);
1977 	if (!crtc_state)
1978 		return -ENOMEM;
1979 
1980 	crtc_state->cpu_transcoder = (enum transcoder)pipe;
1981 	crtc_state->pixel_multiplier = 1;
1982 	crtc_state->dpll = *dpll;
1983 	crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
1984 
1985 	if (IS_CHERRYVIEW(dev_priv)) {
1986 		chv_compute_dpll(crtc_state);
1987 		chv_enable_pll(crtc_state);
1988 	} else {
1989 		vlv_compute_dpll(crtc_state);
1990 		vlv_enable_pll(crtc_state);
1991 	}
1992 
1993 	kfree(crtc_state);
1994 
1995 	return 0;
1996 }
1997 
1998 void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1999 {
2000 	u32 val;
2001 
2002 	/* Make sure the pipe isn't still relying on us */
2003 	assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2004 
2005 	val = DPLL_INTEGRATED_REF_CLK_VLV |
2006 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2007 	if (pipe != PIPE_A)
2008 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2009 
2010 	intel_de_write(dev_priv, DPLL(pipe), val);
2011 	intel_de_posting_read(dev_priv, DPLL(pipe));
2012 }
2013 
2014 void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2015 {
2016 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
2017 	u32 val;
2018 
2019 	/* Make sure the pipe isn't still relying on us */
2020 	assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
2021 
2022 	val = DPLL_SSC_REF_CLK_CHV |
2023 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
2024 	if (pipe != PIPE_A)
2025 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
2026 
2027 	intel_de_write(dev_priv, DPLL(pipe), val);
2028 	intel_de_posting_read(dev_priv, DPLL(pipe));
2029 
2030 	vlv_dpio_get(dev_priv);
2031 
2032 	/* Disable 10bit clock to display controller */
2033 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
2034 	val &= ~DPIO_DCLKP_EN;
2035 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
2036 
2037 	vlv_dpio_put(dev_priv);
2038 }
2039 
2040 void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
2041 {
2042 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2043 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2044 	enum pipe pipe = crtc->pipe;
2045 
2046 	/* Don't disable pipe or pipe PLLs if needed */
2047 	if (IS_I830(dev_priv))
2048 		return;
2049 
2050 	/* Make sure the pipe isn't still relying on us */
2051 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2052 
2053 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
2054 	intel_de_posting_read(dev_priv, DPLL(pipe));
2055 }
2056 
2057 
2058 /**
2059  * vlv_force_pll_off - forcibly disable just the PLL
2060  * @dev_priv: i915 private structure
2061  * @pipe: pipe PLL to disable
2062  *
2063  * Disable the PLL for @pipe. To be used in cases where we need
2064  * the PLL enabled even when @pipe is not going to be enabled.
2065  */
2066 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
2067 {
2068 	if (IS_CHERRYVIEW(dev_priv))
2069 		chv_disable_pll(dev_priv, pipe);
2070 	else
2071 		vlv_disable_pll(dev_priv, pipe);
2072 }
2073 
2074 /* Only for pre-ILK configs */
2075 static void assert_pll(struct drm_i915_private *dev_priv,
2076 		       enum pipe pipe, bool state)
2077 {
2078 	bool cur_state;
2079 
2080 	cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
2081 	I915_STATE_WARN(dev_priv, cur_state != state,
2082 			"PLL state assertion failure (expected %s, current %s)\n",
2083 			str_on_off(state), str_on_off(cur_state));
2084 }
2085 
2086 void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
2087 {
2088 	assert_pll(i915, pipe, true);
2089 }
2090 
2091 void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
2092 {
2093 	assert_pll(i915, pipe, false);
2094 }
2095