1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "i915_reg.h"
7 #include "intel_de.h"
8 #include "intel_display_types.h"
9 #include "intel_panel.h"
10 #include "intel_pch_refclk.h"
11 #include "intel_sbi.h"
12 
lpt_fdi_reset_mphy(struct drm_i915_private * dev_priv)13 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
14 {
15 	intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
16 
17 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
18 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
19 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
20 
21 	intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
22 
23 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
24 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
25 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
26 }
27 
28 /* WaMPhyProgramming:hsw */
lpt_fdi_program_mphy(struct drm_i915_private * dev_priv)29 static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
30 {
31 	u32 tmp;
32 
33 	lpt_fdi_reset_mphy(dev_priv);
34 
35 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
36 	tmp &= ~(0xFF << 24);
37 	tmp |= (0x12 << 24);
38 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
39 
40 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
41 	tmp |= (1 << 11);
42 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
43 
44 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
45 	tmp |= (1 << 11);
46 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
47 
48 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
49 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
50 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
51 
52 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
53 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
54 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
55 
56 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
57 	tmp &= ~(7 << 13);
58 	tmp |= (5 << 13);
59 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
60 
61 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
62 	tmp &= ~(7 << 13);
63 	tmp |= (5 << 13);
64 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
65 
66 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
67 	tmp &= ~0xFF;
68 	tmp |= 0x1C;
69 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
70 
71 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
72 	tmp &= ~0xFF;
73 	tmp |= 0x1C;
74 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
75 
76 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
77 	tmp &= ~(0xFF << 16);
78 	tmp |= (0x1C << 16);
79 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
80 
81 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
82 	tmp &= ~(0xFF << 16);
83 	tmp |= (0x1C << 16);
84 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
85 
86 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
87 	tmp |= (1 << 27);
88 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
89 
90 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
91 	tmp |= (1 << 27);
92 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
93 
94 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
95 	tmp &= ~(0xF << 28);
96 	tmp |= (4 << 28);
97 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
98 
99 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
100 	tmp &= ~(0xF << 28);
101 	tmp |= (4 << 28);
102 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
103 }
104 
lpt_disable_iclkip(struct drm_i915_private * dev_priv)105 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
106 {
107 	u32 temp;
108 
109 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
110 
111 	mutex_lock(&dev_priv->sb_lock);
112 
113 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
114 	temp |= SBI_SSCCTL_DISABLE;
115 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
116 
117 	mutex_unlock(&dev_priv->sb_lock);
118 }
119 
120 struct iclkip_params {
121 	u32 iclk_virtual_root_freq;
122 	u32 iclk_pi_range;
123 	u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor;
124 };
125 
iclkip_params_init(struct iclkip_params * p)126 static void iclkip_params_init(struct iclkip_params *p)
127 {
128 	memset(p, 0, sizeof(*p));
129 
130 	p->iclk_virtual_root_freq = 172800 * 1000;
131 	p->iclk_pi_range = 64;
132 }
133 
lpt_iclkip_freq(struct iclkip_params * p)134 static int lpt_iclkip_freq(struct iclkip_params *p)
135 {
136 	return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
137 				 p->desired_divisor << p->auxdiv);
138 }
139 
lpt_compute_iclkip(struct iclkip_params * p,int clock)140 static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
141 {
142 	iclkip_params_init(p);
143 
144 	/* The iCLK virtual clock root frequency is in MHz,
145 	 * but the adjusted_mode->crtc_clock in KHz. To get the
146 	 * divisors, it is necessary to divide one by another, so we
147 	 * convert the virtual clock precision to KHz here for higher
148 	 * precision.
149 	 */
150 	for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) {
151 		p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
152 						       clock << p->auxdiv);
153 		p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2;
154 		p->phaseinc = p->desired_divisor % p->iclk_pi_range;
155 
156 		/*
157 		 * Near 20MHz is a corner case which is
158 		 * out of range for the 7-bit divisor
159 		 */
160 		if (p->divsel <= 0x7f)
161 			break;
162 	}
163 }
164 
lpt_iclkip(const struct intel_crtc_state * crtc_state)165 int lpt_iclkip(const struct intel_crtc_state *crtc_state)
166 {
167 	struct iclkip_params p;
168 
169 	lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
170 
171 	return lpt_iclkip_freq(&p);
172 }
173 
174 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(const struct intel_crtc_state * crtc_state)175 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
176 {
177 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
178 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
179 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
180 	struct iclkip_params p;
181 	u32 temp;
182 
183 	lpt_disable_iclkip(dev_priv);
184 
185 	lpt_compute_iclkip(&p, clock);
186 	drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
187 
188 	/* This should not happen with any sane values */
189 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
190 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
191 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
192 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
193 
194 	drm_dbg_kms(&dev_priv->drm,
195 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
196 		    clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
197 
198 	mutex_lock(&dev_priv->sb_lock);
199 
200 	/* Program SSCDIVINTPHASE6 */
201 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
202 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
203 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
204 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
205 	temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
206 	temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
207 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
208 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
209 
210 	/* Program SSCAUXDIV */
211 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
212 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
213 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
214 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
215 
216 	/* Enable modulator and associated divider */
217 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
218 	temp &= ~SBI_SSCCTL_DISABLE;
219 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
220 
221 	mutex_unlock(&dev_priv->sb_lock);
222 
223 	/* Wait for initialization time */
224 	udelay(24);
225 
226 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
227 }
228 
lpt_get_iclkip(struct drm_i915_private * dev_priv)229 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
230 {
231 	struct iclkip_params p;
232 	u32 temp;
233 
234 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
235 		return 0;
236 
237 	iclkip_params_init(&p);
238 
239 	mutex_lock(&dev_priv->sb_lock);
240 
241 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
242 	if (temp & SBI_SSCCTL_DISABLE) {
243 		mutex_unlock(&dev_priv->sb_lock);
244 		return 0;
245 	}
246 
247 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
248 	p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
249 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
250 	p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
251 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
252 
253 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
254 	p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
255 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
256 
257 	mutex_unlock(&dev_priv->sb_lock);
258 
259 	p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
260 
261 	return lpt_iclkip_freq(&p);
262 }
263 
264 /* Implements 3 different sequences from BSpec chapter "Display iCLK
265  * Programming" based on the parameters passed:
266  * - Sequence to enable CLKOUT_DP
267  * - Sequence to enable CLKOUT_DP without spread
268  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
269  */
lpt_enable_clkout_dp(struct drm_i915_private * dev_priv,bool with_spread,bool with_fdi)270 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
271 				 bool with_spread, bool with_fdi)
272 {
273 	u32 reg, tmp;
274 
275 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
276 		     "FDI requires downspread\n"))
277 		with_spread = true;
278 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
279 		     with_fdi, "LP PCH doesn't have FDI\n"))
280 		with_fdi = false;
281 
282 	mutex_lock(&dev_priv->sb_lock);
283 
284 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
285 	tmp &= ~SBI_SSCCTL_DISABLE;
286 	tmp |= SBI_SSCCTL_PATHALT;
287 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
288 
289 	udelay(24);
290 
291 	if (with_spread) {
292 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
293 		tmp &= ~SBI_SSCCTL_PATHALT;
294 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
295 
296 		if (with_fdi)
297 			lpt_fdi_program_mphy(dev_priv);
298 	}
299 
300 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
301 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
302 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
303 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
304 
305 	mutex_unlock(&dev_priv->sb_lock);
306 }
307 
308 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_i915_private * dev_priv)309 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
310 {
311 	u32 reg, tmp;
312 
313 	mutex_lock(&dev_priv->sb_lock);
314 
315 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
316 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
317 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
318 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
319 
320 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
321 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
322 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
323 			tmp |= SBI_SSCCTL_PATHALT;
324 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
325 			udelay(32);
326 		}
327 		tmp |= SBI_SSCCTL_DISABLE;
328 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
329 	}
330 
331 	mutex_unlock(&dev_priv->sb_lock);
332 }
333 
334 #define BEND_IDX(steps) ((50 + (steps)) / 5)
335 
336 static const u16 sscdivintphase[] = {
337 	[BEND_IDX( 50)] = 0x3B23,
338 	[BEND_IDX( 45)] = 0x3B23,
339 	[BEND_IDX( 40)] = 0x3C23,
340 	[BEND_IDX( 35)] = 0x3C23,
341 	[BEND_IDX( 30)] = 0x3D23,
342 	[BEND_IDX( 25)] = 0x3D23,
343 	[BEND_IDX( 20)] = 0x3E23,
344 	[BEND_IDX( 15)] = 0x3E23,
345 	[BEND_IDX( 10)] = 0x3F23,
346 	[BEND_IDX(  5)] = 0x3F23,
347 	[BEND_IDX(  0)] = 0x0025,
348 	[BEND_IDX( -5)] = 0x0025,
349 	[BEND_IDX(-10)] = 0x0125,
350 	[BEND_IDX(-15)] = 0x0125,
351 	[BEND_IDX(-20)] = 0x0225,
352 	[BEND_IDX(-25)] = 0x0225,
353 	[BEND_IDX(-30)] = 0x0325,
354 	[BEND_IDX(-35)] = 0x0325,
355 	[BEND_IDX(-40)] = 0x0425,
356 	[BEND_IDX(-45)] = 0x0425,
357 	[BEND_IDX(-50)] = 0x0525,
358 };
359 
360 /*
361  * Bend CLKOUT_DP
362  * steps -50 to 50 inclusive, in steps of 5
363  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
364  * change in clock period = -(steps / 10) * 5.787 ps
365  */
lpt_bend_clkout_dp(struct drm_i915_private * dev_priv,int steps)366 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
367 {
368 	u32 tmp;
369 	int idx = BEND_IDX(steps);
370 
371 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
372 		return;
373 
374 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
375 		return;
376 
377 	mutex_lock(&dev_priv->sb_lock);
378 
379 	if (steps % 10 != 0)
380 		tmp = 0xAAAAAAAB;
381 	else
382 		tmp = 0x00000000;
383 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
384 
385 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
386 	tmp &= 0xffff0000;
387 	tmp |= sscdivintphase[idx];
388 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
389 
390 	mutex_unlock(&dev_priv->sb_lock);
391 }
392 
393 #undef BEND_IDX
394 
spll_uses_pch_ssc(struct drm_i915_private * dev_priv)395 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
396 {
397 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
398 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
399 
400 	if ((ctl & SPLL_PLL_ENABLE) == 0)
401 		return false;
402 
403 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
404 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
405 		return true;
406 
407 	if (IS_BROADWELL(dev_priv) &&
408 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
409 		return true;
410 
411 	return false;
412 }
413 
wrpll_uses_pch_ssc(struct drm_i915_private * dev_priv,enum intel_dpll_id id)414 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
415 			       enum intel_dpll_id id)
416 {
417 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
418 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
419 
420 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
421 		return false;
422 
423 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
424 		return true;
425 
426 	if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
427 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
428 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
429 		return true;
430 
431 	return false;
432 }
433 
lpt_init_pch_refclk(struct drm_i915_private * dev_priv)434 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
435 {
436 	struct intel_encoder *encoder;
437 	bool has_fdi = false;
438 
439 	for_each_intel_encoder(&dev_priv->drm, encoder) {
440 		switch (encoder->type) {
441 		case INTEL_OUTPUT_ANALOG:
442 			has_fdi = true;
443 			break;
444 		default:
445 			break;
446 		}
447 	}
448 
449 	/*
450 	 * The BIOS may have decided to use the PCH SSC
451 	 * reference so we must not disable it until the
452 	 * relevant PLLs have stopped relying on it. We'll
453 	 * just leave the PCH SSC reference enabled in case
454 	 * any active PLL is using it. It will get disabled
455 	 * after runtime suspend if we don't have FDI.
456 	 *
457 	 * TODO: Move the whole reference clock handling
458 	 * to the modeset sequence proper so that we can
459 	 * actually enable/disable/reconfigure these things
460 	 * safely. To do that we need to introduce a real
461 	 * clock hierarchy. That would also allow us to do
462 	 * clock bending finally.
463 	 */
464 	dev_priv->display.dpll.pch_ssc_use = 0;
465 
466 	if (spll_uses_pch_ssc(dev_priv)) {
467 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
468 		dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
469 	}
470 
471 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
472 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
473 		dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
474 	}
475 
476 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
477 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
478 		dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
479 	}
480 
481 	if (dev_priv->display.dpll.pch_ssc_use)
482 		return;
483 
484 	if (has_fdi) {
485 		lpt_bend_clkout_dp(dev_priv, 0);
486 		lpt_enable_clkout_dp(dev_priv, true, true);
487 	} else {
488 		lpt_disable_clkout_dp(dev_priv);
489 	}
490 }
491 
ilk_init_pch_refclk(struct drm_i915_private * dev_priv)492 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
493 {
494 	struct intel_encoder *encoder;
495 	int i;
496 	u32 val, final;
497 	bool has_lvds = false;
498 	bool has_cpu_edp = false;
499 	bool has_panel = false;
500 	bool has_ck505 = false;
501 	bool can_ssc = false;
502 	bool using_ssc_source = false;
503 
504 	/* We need to take the global config into account */
505 	for_each_intel_encoder(&dev_priv->drm, encoder) {
506 		switch (encoder->type) {
507 		case INTEL_OUTPUT_LVDS:
508 			has_panel = true;
509 			has_lvds = true;
510 			break;
511 		case INTEL_OUTPUT_EDP:
512 			has_panel = true;
513 			if (encoder->port == PORT_A)
514 				has_cpu_edp = true;
515 			break;
516 		default:
517 			break;
518 		}
519 	}
520 
521 	if (HAS_PCH_IBX(dev_priv)) {
522 		has_ck505 = dev_priv->display.vbt.display_clock_mode;
523 		can_ssc = has_ck505;
524 	} else {
525 		has_ck505 = false;
526 		can_ssc = true;
527 	}
528 
529 	/* Check if any DPLLs are using the SSC source */
530 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
531 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
532 
533 		if (!(temp & DPLL_VCO_ENABLE))
534 			continue;
535 
536 		if ((temp & PLL_REF_INPUT_MASK) ==
537 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
538 			using_ssc_source = true;
539 			break;
540 		}
541 	}
542 
543 	drm_dbg_kms(&dev_priv->drm,
544 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
545 		    has_panel, has_lvds, has_ck505, using_ssc_source);
546 
547 	/* Ironlake: try to setup display ref clock before DPLL
548 	 * enabling. This is only under driver's control after
549 	 * PCH B stepping, previous chipset stepping should be
550 	 * ignoring this setting.
551 	 */
552 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
553 
554 	/* As we must carefully and slowly disable/enable each source in turn,
555 	 * compute the final state we want first and check if we need to
556 	 * make any changes at all.
557 	 */
558 	final = val;
559 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
560 	if (has_ck505)
561 		final |= DREF_NONSPREAD_CK505_ENABLE;
562 	else
563 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
564 
565 	final &= ~DREF_SSC_SOURCE_MASK;
566 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
567 	final &= ~DREF_SSC1_ENABLE;
568 
569 	if (has_panel) {
570 		final |= DREF_SSC_SOURCE_ENABLE;
571 
572 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
573 			final |= DREF_SSC1_ENABLE;
574 
575 		if (has_cpu_edp) {
576 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
577 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
578 			else
579 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
580 		} else {
581 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
582 		}
583 	} else if (using_ssc_source) {
584 		final |= DREF_SSC_SOURCE_ENABLE;
585 		final |= DREF_SSC1_ENABLE;
586 	}
587 
588 	if (final == val)
589 		return;
590 
591 	/* Always enable nonspread source */
592 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
593 
594 	if (has_ck505)
595 		val |= DREF_NONSPREAD_CK505_ENABLE;
596 	else
597 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
598 
599 	if (has_panel) {
600 		val &= ~DREF_SSC_SOURCE_MASK;
601 		val |= DREF_SSC_SOURCE_ENABLE;
602 
603 		/* SSC must be turned on before enabling the CPU output  */
604 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
605 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
606 			val |= DREF_SSC1_ENABLE;
607 		} else {
608 			val &= ~DREF_SSC1_ENABLE;
609 		}
610 
611 		/* Get SSC going before enabling the outputs */
612 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
613 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
614 		udelay(200);
615 
616 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
617 
618 		/* Enable CPU source on CPU attached eDP */
619 		if (has_cpu_edp) {
620 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
621 				drm_dbg_kms(&dev_priv->drm,
622 					    "Using SSC on eDP\n");
623 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
624 			} else {
625 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
626 			}
627 		} else {
628 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
629 		}
630 
631 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
632 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
633 		udelay(200);
634 	} else {
635 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
636 
637 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
638 
639 		/* Turn off CPU output */
640 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
641 
642 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
643 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
644 		udelay(200);
645 
646 		if (!using_ssc_source) {
647 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
648 
649 			/* Turn off the SSC source */
650 			val &= ~DREF_SSC_SOURCE_MASK;
651 			val |= DREF_SSC_SOURCE_DISABLE;
652 
653 			/* Turn off SSC1 */
654 			val &= ~DREF_SSC1_ENABLE;
655 
656 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
657 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
658 			udelay(200);
659 		}
660 	}
661 
662 	drm_WARN_ON(&dev_priv->drm, val != final);
663 }
664 
665 /*
666  * Initialize reference clocks when the driver loads
667  */
intel_init_pch_refclk(struct drm_i915_private * dev_priv)668 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
669 {
670 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
671 		ilk_init_pch_refclk(dev_priv);
672 	else if (HAS_PCH_LPT(dev_priv))
673 		lpt_init_pch_refclk(dev_priv);
674 }
675