1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "i915_reg.h"
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
12 #include "intel_de.h"
13 #include "intel_display_types.h"
14 #include "intel_fdi.h"
15 #include "intel_fdi_regs.h"
16 
17 struct intel_fdi_funcs {
18 	void (*fdi_link_train)(struct intel_crtc *crtc,
19 			       const struct intel_crtc_state *crtc_state);
20 };
21 
22 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
23 			  enum pipe pipe, bool state)
24 {
25 	bool cur_state;
26 
27 	if (HAS_DDI(dev_priv)) {
28 		/*
29 		 * DDI does not have a specific FDI_TX register.
30 		 *
31 		 * FDI is never fed from EDP transcoder
32 		 * so pipe->transcoder cast is fine here.
33 		 */
34 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
35 		cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
36 	} else {
37 		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
38 	}
39 	I915_STATE_WARN(dev_priv, cur_state != state,
40 			"FDI TX state assertion failure (expected %s, current %s)\n",
41 			str_on_off(state), str_on_off(cur_state));
42 }
43 
44 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
45 {
46 	assert_fdi_tx(i915, pipe, true);
47 }
48 
49 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
50 {
51 	assert_fdi_tx(i915, pipe, false);
52 }
53 
54 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
55 			  enum pipe pipe, bool state)
56 {
57 	bool cur_state;
58 
59 	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
60 	I915_STATE_WARN(dev_priv, cur_state != state,
61 			"FDI RX state assertion failure (expected %s, current %s)\n",
62 			str_on_off(state), str_on_off(cur_state));
63 }
64 
65 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
66 {
67 	assert_fdi_rx(i915, pipe, true);
68 }
69 
70 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
71 {
72 	assert_fdi_rx(i915, pipe, false);
73 }
74 
75 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
76 			       enum pipe pipe)
77 {
78 	bool cur_state;
79 
80 	/* ILK FDI PLL is always enabled */
81 	if (IS_IRONLAKE(i915))
82 		return;
83 
84 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
85 	if (HAS_DDI(i915))
86 		return;
87 
88 	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
89 	I915_STATE_WARN(i915, !cur_state,
90 			"FDI TX PLL assertion failure, should be active but is disabled\n");
91 }
92 
93 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
94 			      enum pipe pipe, bool state)
95 {
96 	bool cur_state;
97 
98 	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
99 	I915_STATE_WARN(i915, cur_state != state,
100 			"FDI RX PLL assertion failure (expected %s, current %s)\n",
101 			str_on_off(state), str_on_off(cur_state));
102 }
103 
104 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
105 {
106 	assert_fdi_rx_pll(i915, pipe, true);
107 }
108 
109 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
110 {
111 	assert_fdi_rx_pll(i915, pipe, false);
112 }
113 
114 void intel_fdi_link_train(struct intel_crtc *crtc,
115 			  const struct intel_crtc_state *crtc_state)
116 {
117 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
118 
119 	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
120 }
121 
122 /* units of 100MHz */
123 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
124 {
125 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
126 		return crtc_state->fdi_lanes;
127 
128 	return 0;
129 }
130 
131 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
132 			       struct intel_crtc_state *pipe_config)
133 {
134 	struct drm_i915_private *dev_priv = to_i915(dev);
135 	struct drm_atomic_state *state = pipe_config->uapi.state;
136 	struct intel_crtc *other_crtc;
137 	struct intel_crtc_state *other_crtc_state;
138 
139 	drm_dbg_kms(&dev_priv->drm,
140 		    "checking fdi config on pipe %c, lanes %i\n",
141 		    pipe_name(pipe), pipe_config->fdi_lanes);
142 	if (pipe_config->fdi_lanes > 4) {
143 		drm_dbg_kms(&dev_priv->drm,
144 			    "invalid fdi lane config on pipe %c: %i lanes\n",
145 			    pipe_name(pipe), pipe_config->fdi_lanes);
146 		return -EINVAL;
147 	}
148 
149 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
150 		if (pipe_config->fdi_lanes > 2) {
151 			drm_dbg_kms(&dev_priv->drm,
152 				    "only 2 lanes on haswell, required: %i lanes\n",
153 				    pipe_config->fdi_lanes);
154 			return -EINVAL;
155 		} else {
156 			return 0;
157 		}
158 	}
159 
160 	if (INTEL_NUM_PIPES(dev_priv) == 2)
161 		return 0;
162 
163 	/* Ivybridge 3 pipe is really complicated */
164 	switch (pipe) {
165 	case PIPE_A:
166 		return 0;
167 	case PIPE_B:
168 		if (pipe_config->fdi_lanes <= 2)
169 			return 0;
170 
171 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
172 		other_crtc_state =
173 			intel_atomic_get_crtc_state(state, other_crtc);
174 		if (IS_ERR(other_crtc_state))
175 			return PTR_ERR(other_crtc_state);
176 
177 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
178 			drm_dbg_kms(&dev_priv->drm,
179 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
180 				    pipe_name(pipe), pipe_config->fdi_lanes);
181 			return -EINVAL;
182 		}
183 		return 0;
184 	case PIPE_C:
185 		if (pipe_config->fdi_lanes > 2) {
186 			drm_dbg_kms(&dev_priv->drm,
187 				    "only 2 lanes on pipe %c: required %i lanes\n",
188 				    pipe_name(pipe), pipe_config->fdi_lanes);
189 			return -EINVAL;
190 		}
191 
192 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
193 		other_crtc_state =
194 			intel_atomic_get_crtc_state(state, other_crtc);
195 		if (IS_ERR(other_crtc_state))
196 			return PTR_ERR(other_crtc_state);
197 
198 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
199 			drm_dbg_kms(&dev_priv->drm,
200 				    "fdi link B uses too many lanes to enable link C\n");
201 			return -EINVAL;
202 		}
203 		return 0;
204 	default:
205 		MISSING_CASE(pipe);
206 		return 0;
207 	}
208 }
209 
210 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
211 {
212 	if (IS_IRONLAKE(i915)) {
213 		u32 fdi_pll_clk =
214 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
215 
216 		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
217 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
218 		i915->display.fdi.pll_freq = 270000;
219 	} else {
220 		return;
221 	}
222 
223 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
224 }
225 
226 int intel_fdi_link_freq(struct drm_i915_private *i915,
227 			const struct intel_crtc_state *pipe_config)
228 {
229 	if (HAS_DDI(i915))
230 		return pipe_config->port_clock; /* SPLL */
231 	else
232 		return i915->display.fdi.pll_freq;
233 }
234 
235 int ilk_fdi_compute_config(struct intel_crtc *crtc,
236 			   struct intel_crtc_state *pipe_config)
237 {
238 	struct drm_device *dev = crtc->base.dev;
239 	struct drm_i915_private *i915 = to_i915(dev);
240 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
241 	int lane, link_bw, fdi_dotclock, ret;
242 	bool needs_recompute = false;
243 
244 retry:
245 	/* FDI is a binary signal running at ~2.7GHz, encoding
246 	 * each output octet as 10 bits. The actual frequency
247 	 * is stored as a divider into a 100MHz clock, and the
248 	 * mode pixel clock is stored in units of 1KHz.
249 	 * Hence the bw of each lane in terms of the mode signal
250 	 * is:
251 	 */
252 	link_bw = intel_fdi_link_freq(i915, pipe_config);
253 
254 	fdi_dotclock = adjusted_mode->crtc_clock;
255 
256 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
257 				      pipe_config->pipe_bpp);
258 
259 	pipe_config->fdi_lanes = lane;
260 
261 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
262 			       link_bw, &pipe_config->fdi_m_n, false);
263 
264 	ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
265 	if (ret == -EDEADLK)
266 		return ret;
267 
268 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
269 		pipe_config->pipe_bpp -= 2*3;
270 		drm_dbg_kms(&i915->drm,
271 			    "fdi link bw constraint, reducing pipe bpp to %i\n",
272 			    pipe_config->pipe_bpp);
273 		needs_recompute = true;
274 		pipe_config->bw_constrained = true;
275 
276 		goto retry;
277 	}
278 
279 	if (needs_recompute)
280 		return -EAGAIN;
281 
282 	return ret;
283 }
284 
285 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
286 {
287 	u32 temp;
288 
289 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
290 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
291 		return;
292 
293 	drm_WARN_ON(&dev_priv->drm,
294 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
295 		    FDI_RX_ENABLE);
296 	drm_WARN_ON(&dev_priv->drm,
297 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
298 		    FDI_RX_ENABLE);
299 
300 	temp &= ~FDI_BC_BIFURCATION_SELECT;
301 	if (enable)
302 		temp |= FDI_BC_BIFURCATION_SELECT;
303 
304 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
305 		    enable ? "en" : "dis");
306 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
307 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
308 }
309 
310 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
311 {
312 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
313 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
314 
315 	switch (crtc->pipe) {
316 	case PIPE_A:
317 		break;
318 	case PIPE_B:
319 		if (crtc_state->fdi_lanes > 2)
320 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
321 		else
322 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
323 
324 		break;
325 	case PIPE_C:
326 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
327 
328 		break;
329 	default:
330 		MISSING_CASE(crtc->pipe);
331 	}
332 }
333 
334 void intel_fdi_normal_train(struct intel_crtc *crtc)
335 {
336 	struct drm_device *dev = crtc->base.dev;
337 	struct drm_i915_private *dev_priv = to_i915(dev);
338 	enum pipe pipe = crtc->pipe;
339 	i915_reg_t reg;
340 	u32 temp;
341 
342 	/* enable normal train */
343 	reg = FDI_TX_CTL(pipe);
344 	temp = intel_de_read(dev_priv, reg);
345 	if (IS_IVYBRIDGE(dev_priv)) {
346 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
347 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
348 	} else {
349 		temp &= ~FDI_LINK_TRAIN_NONE;
350 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
351 	}
352 	intel_de_write(dev_priv, reg, temp);
353 
354 	reg = FDI_RX_CTL(pipe);
355 	temp = intel_de_read(dev_priv, reg);
356 	if (HAS_PCH_CPT(dev_priv)) {
357 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
358 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
359 	} else {
360 		temp &= ~FDI_LINK_TRAIN_NONE;
361 		temp |= FDI_LINK_TRAIN_NONE;
362 	}
363 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
364 
365 	/* wait one idle pattern time */
366 	intel_de_posting_read(dev_priv, reg);
367 	udelay(1000);
368 
369 	/* IVB wants error correction enabled */
370 	if (IS_IVYBRIDGE(dev_priv))
371 		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
372 }
373 
374 /* The FDI link training functions for ILK/Ibexpeak. */
375 static void ilk_fdi_link_train(struct intel_crtc *crtc,
376 			       const struct intel_crtc_state *crtc_state)
377 {
378 	struct drm_device *dev = crtc->base.dev;
379 	struct drm_i915_private *dev_priv = to_i915(dev);
380 	enum pipe pipe = crtc->pipe;
381 	i915_reg_t reg;
382 	u32 temp, tries;
383 
384 	/*
385 	 * Write the TU size bits before fdi link training, so that error
386 	 * detection works.
387 	 */
388 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
389 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
390 
391 	/* FDI needs bits from pipe first */
392 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
393 
394 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
395 	   for train result */
396 	reg = FDI_RX_IMR(pipe);
397 	temp = intel_de_read(dev_priv, reg);
398 	temp &= ~FDI_RX_SYMBOL_LOCK;
399 	temp &= ~FDI_RX_BIT_LOCK;
400 	intel_de_write(dev_priv, reg, temp);
401 	intel_de_read(dev_priv, reg);
402 	udelay(150);
403 
404 	/* enable CPU FDI TX and PCH FDI RX */
405 	reg = FDI_TX_CTL(pipe);
406 	temp = intel_de_read(dev_priv, reg);
407 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
408 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
409 	temp &= ~FDI_LINK_TRAIN_NONE;
410 	temp |= FDI_LINK_TRAIN_PATTERN_1;
411 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
412 
413 	reg = FDI_RX_CTL(pipe);
414 	temp = intel_de_read(dev_priv, reg);
415 	temp &= ~FDI_LINK_TRAIN_NONE;
416 	temp |= FDI_LINK_TRAIN_PATTERN_1;
417 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
418 
419 	intel_de_posting_read(dev_priv, reg);
420 	udelay(150);
421 
422 	/* Ironlake workaround, enable clock pointer after FDI enable*/
423 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
424 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
425 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
426 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
427 
428 	reg = FDI_RX_IIR(pipe);
429 	for (tries = 0; tries < 5; tries++) {
430 		temp = intel_de_read(dev_priv, reg);
431 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
432 
433 		if ((temp & FDI_RX_BIT_LOCK)) {
434 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
435 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
436 			break;
437 		}
438 	}
439 	if (tries == 5)
440 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
441 
442 	/* Train 2 */
443 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
444 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
445 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
446 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
447 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
448 	udelay(150);
449 
450 	reg = FDI_RX_IIR(pipe);
451 	for (tries = 0; tries < 5; tries++) {
452 		temp = intel_de_read(dev_priv, reg);
453 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
454 
455 		if (temp & FDI_RX_SYMBOL_LOCK) {
456 			intel_de_write(dev_priv, reg,
457 				       temp | FDI_RX_SYMBOL_LOCK);
458 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
459 			break;
460 		}
461 	}
462 	if (tries == 5)
463 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
464 
465 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
466 
467 }
468 
469 static const int snb_b_fdi_train_param[] = {
470 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
471 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
472 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
473 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
474 };
475 
476 /* The FDI link training functions for SNB/Cougarpoint. */
477 static void gen6_fdi_link_train(struct intel_crtc *crtc,
478 				const struct intel_crtc_state *crtc_state)
479 {
480 	struct drm_device *dev = crtc->base.dev;
481 	struct drm_i915_private *dev_priv = to_i915(dev);
482 	enum pipe pipe = crtc->pipe;
483 	i915_reg_t reg;
484 	u32 temp, i, retry;
485 
486 	/*
487 	 * Write the TU size bits before fdi link training, so that error
488 	 * detection works.
489 	 */
490 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
491 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
492 
493 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
494 	   for train result */
495 	reg = FDI_RX_IMR(pipe);
496 	temp = intel_de_read(dev_priv, reg);
497 	temp &= ~FDI_RX_SYMBOL_LOCK;
498 	temp &= ~FDI_RX_BIT_LOCK;
499 	intel_de_write(dev_priv, reg, temp);
500 
501 	intel_de_posting_read(dev_priv, reg);
502 	udelay(150);
503 
504 	/* enable CPU FDI TX and PCH FDI RX */
505 	reg = FDI_TX_CTL(pipe);
506 	temp = intel_de_read(dev_priv, reg);
507 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
508 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
509 	temp &= ~FDI_LINK_TRAIN_NONE;
510 	temp |= FDI_LINK_TRAIN_PATTERN_1;
511 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
512 	/* SNB-B */
513 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
514 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
515 
516 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
517 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
518 
519 	reg = FDI_RX_CTL(pipe);
520 	temp = intel_de_read(dev_priv, reg);
521 	if (HAS_PCH_CPT(dev_priv)) {
522 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
523 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
524 	} else {
525 		temp &= ~FDI_LINK_TRAIN_NONE;
526 		temp |= FDI_LINK_TRAIN_PATTERN_1;
527 	}
528 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
529 
530 	intel_de_posting_read(dev_priv, reg);
531 	udelay(150);
532 
533 	for (i = 0; i < 4; i++) {
534 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
535 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
536 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
537 		udelay(500);
538 
539 		for (retry = 0; retry < 5; retry++) {
540 			reg = FDI_RX_IIR(pipe);
541 			temp = intel_de_read(dev_priv, reg);
542 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
543 			if (temp & FDI_RX_BIT_LOCK) {
544 				intel_de_write(dev_priv, reg,
545 					       temp | FDI_RX_BIT_LOCK);
546 				drm_dbg_kms(&dev_priv->drm,
547 					    "FDI train 1 done.\n");
548 				break;
549 			}
550 			udelay(50);
551 		}
552 		if (retry < 5)
553 			break;
554 	}
555 	if (i == 4)
556 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
557 
558 	/* Train 2 */
559 	reg = FDI_TX_CTL(pipe);
560 	temp = intel_de_read(dev_priv, reg);
561 	temp &= ~FDI_LINK_TRAIN_NONE;
562 	temp |= FDI_LINK_TRAIN_PATTERN_2;
563 	if (IS_SANDYBRIDGE(dev_priv)) {
564 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
565 		/* SNB-B */
566 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
567 	}
568 	intel_de_write(dev_priv, reg, temp);
569 
570 	reg = FDI_RX_CTL(pipe);
571 	temp = intel_de_read(dev_priv, reg);
572 	if (HAS_PCH_CPT(dev_priv)) {
573 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
574 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
575 	} else {
576 		temp &= ~FDI_LINK_TRAIN_NONE;
577 		temp |= FDI_LINK_TRAIN_PATTERN_2;
578 	}
579 	intel_de_write(dev_priv, reg, temp);
580 
581 	intel_de_posting_read(dev_priv, reg);
582 	udelay(150);
583 
584 	for (i = 0; i < 4; i++) {
585 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
586 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
587 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
588 		udelay(500);
589 
590 		for (retry = 0; retry < 5; retry++) {
591 			reg = FDI_RX_IIR(pipe);
592 			temp = intel_de_read(dev_priv, reg);
593 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
594 			if (temp & FDI_RX_SYMBOL_LOCK) {
595 				intel_de_write(dev_priv, reg,
596 					       temp | FDI_RX_SYMBOL_LOCK);
597 				drm_dbg_kms(&dev_priv->drm,
598 					    "FDI train 2 done.\n");
599 				break;
600 			}
601 			udelay(50);
602 		}
603 		if (retry < 5)
604 			break;
605 	}
606 	if (i == 4)
607 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
608 
609 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
610 }
611 
612 /* Manual link training for Ivy Bridge A0 parts */
613 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
614 				      const struct intel_crtc_state *crtc_state)
615 {
616 	struct drm_device *dev = crtc->base.dev;
617 	struct drm_i915_private *dev_priv = to_i915(dev);
618 	enum pipe pipe = crtc->pipe;
619 	i915_reg_t reg;
620 	u32 temp, i, j;
621 
622 	ivb_update_fdi_bc_bifurcation(crtc_state);
623 
624 	/*
625 	 * Write the TU size bits before fdi link training, so that error
626 	 * detection works.
627 	 */
628 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
629 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
630 
631 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
632 	   for train result */
633 	reg = FDI_RX_IMR(pipe);
634 	temp = intel_de_read(dev_priv, reg);
635 	temp &= ~FDI_RX_SYMBOL_LOCK;
636 	temp &= ~FDI_RX_BIT_LOCK;
637 	intel_de_write(dev_priv, reg, temp);
638 
639 	intel_de_posting_read(dev_priv, reg);
640 	udelay(150);
641 
642 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
643 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
644 
645 	/* Try each vswing and preemphasis setting twice before moving on */
646 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
647 		/* disable first in case we need to retry */
648 		reg = FDI_TX_CTL(pipe);
649 		temp = intel_de_read(dev_priv, reg);
650 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
651 		temp &= ~FDI_TX_ENABLE;
652 		intel_de_write(dev_priv, reg, temp);
653 
654 		reg = FDI_RX_CTL(pipe);
655 		temp = intel_de_read(dev_priv, reg);
656 		temp &= ~FDI_LINK_TRAIN_AUTO;
657 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
658 		temp &= ~FDI_RX_ENABLE;
659 		intel_de_write(dev_priv, reg, temp);
660 
661 		/* enable CPU FDI TX and PCH FDI RX */
662 		reg = FDI_TX_CTL(pipe);
663 		temp = intel_de_read(dev_priv, reg);
664 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
665 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
666 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
667 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
668 		temp |= snb_b_fdi_train_param[j/2];
669 		temp |= FDI_COMPOSITE_SYNC;
670 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
671 
672 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
673 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
674 
675 		reg = FDI_RX_CTL(pipe);
676 		temp = intel_de_read(dev_priv, reg);
677 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
678 		temp |= FDI_COMPOSITE_SYNC;
679 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
680 
681 		intel_de_posting_read(dev_priv, reg);
682 		udelay(1); /* should be 0.5us */
683 
684 		for (i = 0; i < 4; i++) {
685 			reg = FDI_RX_IIR(pipe);
686 			temp = intel_de_read(dev_priv, reg);
687 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
688 
689 			if (temp & FDI_RX_BIT_LOCK ||
690 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
691 				intel_de_write(dev_priv, reg,
692 					       temp | FDI_RX_BIT_LOCK);
693 				drm_dbg_kms(&dev_priv->drm,
694 					    "FDI train 1 done, level %i.\n",
695 					    i);
696 				break;
697 			}
698 			udelay(1); /* should be 0.5us */
699 		}
700 		if (i == 4) {
701 			drm_dbg_kms(&dev_priv->drm,
702 				    "FDI train 1 fail on vswing %d\n", j / 2);
703 			continue;
704 		}
705 
706 		/* Train 2 */
707 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
708 			     FDI_LINK_TRAIN_NONE_IVB,
709 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
710 		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
711 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
712 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
713 		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
714 		udelay(2); /* should be 1.5us */
715 
716 		for (i = 0; i < 4; i++) {
717 			reg = FDI_RX_IIR(pipe);
718 			temp = intel_de_read(dev_priv, reg);
719 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
720 
721 			if (temp & FDI_RX_SYMBOL_LOCK ||
722 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
723 				intel_de_write(dev_priv, reg,
724 					       temp | FDI_RX_SYMBOL_LOCK);
725 				drm_dbg_kms(&dev_priv->drm,
726 					    "FDI train 2 done, level %i.\n",
727 					    i);
728 				goto train_done;
729 			}
730 			udelay(2); /* should be 1.5us */
731 		}
732 		if (i == 4)
733 			drm_dbg_kms(&dev_priv->drm,
734 				    "FDI train 2 fail on vswing %d\n", j / 2);
735 	}
736 
737 train_done:
738 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
739 }
740 
741 /* Starting with Haswell, different DDI ports can work in FDI mode for
742  * connection to the PCH-located connectors. For this, it is necessary to train
743  * both the DDI port and PCH receiver for the desired DDI buffer settings.
744  *
745  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
746  * please note that when FDI mode is active on DDI E, it shares 2 lines with
747  * DDI A (which is used for eDP)
748  */
749 void hsw_fdi_link_train(struct intel_encoder *encoder,
750 			const struct intel_crtc_state *crtc_state)
751 {
752 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
753 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
754 	u32 temp, i, rx_ctl_val;
755 	int n_entries;
756 
757 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
758 
759 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
760 
761 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
762 	 * mode set "sequence for CRT port" document:
763 	 * - TP1 to TP2 time with the default value
764 	 * - FDI delay to 90h
765 	 *
766 	 * WaFDIAutoLinkSetTimingOverrride:hsw
767 	 */
768 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
769 		       FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
770 
771 	/* Enable the PCH Receiver FDI PLL */
772 	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
773 		     FDI_RX_PLL_ENABLE |
774 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
775 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
776 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
777 	udelay(220);
778 
779 	/* Switch from Rawclk to PCDclk */
780 	rx_ctl_val |= FDI_PCDCLK;
781 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
782 
783 	/* Configure Port Clock Select */
784 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
785 	intel_ddi_enable_clock(encoder, crtc_state);
786 
787 	/* Start the training iterating through available voltages and emphasis,
788 	 * testing each value twice. */
789 	for (i = 0; i < n_entries * 2; i++) {
790 		/* Configure DP_TP_CTL with auto-training */
791 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
792 			       DP_TP_CTL_FDI_AUTOTRAIN |
793 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
794 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
795 			       DP_TP_CTL_ENABLE);
796 
797 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
798 		 * DDI E does not support port reversal, the functionality is
799 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
800 		 * port reversal bit */
801 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
802 			       DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
803 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
804 
805 		udelay(600);
806 
807 		/* Program PCH FDI Receiver TU */
808 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
809 
810 		/* Enable PCH FDI Receiver with auto-training */
811 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
812 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
813 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
814 
815 		/* Wait for FDI receiver lane calibration */
816 		udelay(30);
817 
818 		/* Unset FDI_RX_MISC pwrdn lanes */
819 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
820 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
821 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
822 
823 		/* Wait for FDI auto training time */
824 		udelay(5);
825 
826 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
827 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
828 			drm_dbg_kms(&dev_priv->drm,
829 				    "FDI link training done on step %d\n", i);
830 			break;
831 		}
832 
833 		/*
834 		 * Leave things enabled even if we failed to train FDI.
835 		 * Results in less fireworks from the state checker.
836 		 */
837 		if (i == n_entries * 2 - 1) {
838 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
839 			break;
840 		}
841 
842 		rx_ctl_val &= ~FDI_RX_ENABLE;
843 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
844 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
845 
846 		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
847 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
848 
849 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
850 		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
851 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
852 
853 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
854 
855 		/* Reset FDI_RX_MISC pwrdn lanes */
856 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
857 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
858 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
859 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
860 	}
861 
862 	/* Enable normal pixel sending for FDI */
863 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
864 		       DP_TP_CTL_FDI_AUTOTRAIN |
865 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
866 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
867 		       DP_TP_CTL_ENABLE);
868 }
869 
870 void hsw_fdi_disable(struct intel_encoder *encoder)
871 {
872 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
873 
874 	/*
875 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
876 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
877 	 * step 13 is the correct place for it. Step 18 is where it was
878 	 * originally before the BUN.
879 	 */
880 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
881 	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
882 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
883 	intel_ddi_disable_clock(encoder);
884 	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
885 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
886 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
887 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
888 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
889 }
890 
891 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
892 {
893 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
894 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
895 	enum pipe pipe = crtc->pipe;
896 	i915_reg_t reg;
897 	u32 temp;
898 
899 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
900 	reg = FDI_RX_CTL(pipe);
901 	temp = intel_de_read(dev_priv, reg);
902 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
903 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
904 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
905 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
906 
907 	intel_de_posting_read(dev_priv, reg);
908 	udelay(200);
909 
910 	/* Switch from Rawclk to PCDclk */
911 	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
912 	intel_de_posting_read(dev_priv, reg);
913 	udelay(200);
914 
915 	/* Enable CPU FDI TX PLL, always on for Ironlake */
916 	reg = FDI_TX_CTL(pipe);
917 	temp = intel_de_read(dev_priv, reg);
918 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
919 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
920 
921 		intel_de_posting_read(dev_priv, reg);
922 		udelay(100);
923 	}
924 }
925 
926 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
927 {
928 	struct drm_device *dev = crtc->base.dev;
929 	struct drm_i915_private *dev_priv = to_i915(dev);
930 	enum pipe pipe = crtc->pipe;
931 
932 	/* Switch from PCDclk to Rawclk */
933 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
934 
935 	/* Disable CPU FDI TX PLL */
936 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
937 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
938 	udelay(100);
939 
940 	/* Wait for the clocks to turn off. */
941 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
942 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
943 	udelay(100);
944 }
945 
946 void ilk_fdi_disable(struct intel_crtc *crtc)
947 {
948 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
949 	enum pipe pipe = crtc->pipe;
950 	i915_reg_t reg;
951 	u32 temp;
952 
953 	/* disable CPU FDI tx and PCH FDI rx */
954 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
955 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
956 
957 	reg = FDI_RX_CTL(pipe);
958 	temp = intel_de_read(dev_priv, reg);
959 	temp &= ~(0x7 << 16);
960 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
961 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
962 
963 	intel_de_posting_read(dev_priv, reg);
964 	udelay(100);
965 
966 	/* Ironlake workaround, disable clock pointer after downing FDI */
967 	if (HAS_PCH_IBX(dev_priv))
968 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
969 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
970 
971 	/* still set train pattern 1 */
972 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
973 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
974 
975 	reg = FDI_RX_CTL(pipe);
976 	temp = intel_de_read(dev_priv, reg);
977 	if (HAS_PCH_CPT(dev_priv)) {
978 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
979 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
980 	} else {
981 		temp &= ~FDI_LINK_TRAIN_NONE;
982 		temp |= FDI_LINK_TRAIN_PATTERN_1;
983 	}
984 	/* BPC in FDI rx is consistent with that in TRANSCONF */
985 	temp &= ~(0x07 << 16);
986 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
987 	intel_de_write(dev_priv, reg, temp);
988 
989 	intel_de_posting_read(dev_priv, reg);
990 	udelay(100);
991 }
992 
993 static const struct intel_fdi_funcs ilk_funcs = {
994 	.fdi_link_train = ilk_fdi_link_train,
995 };
996 
997 static const struct intel_fdi_funcs gen6_funcs = {
998 	.fdi_link_train = gen6_fdi_link_train,
999 };
1000 
1001 static const struct intel_fdi_funcs ivb_funcs = {
1002 	.fdi_link_train = ivb_manual_fdi_link_train,
1003 };
1004 
1005 void
1006 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1007 {
1008 	if (IS_IRONLAKE(dev_priv)) {
1009 		dev_priv->display.funcs.fdi = &ilk_funcs;
1010 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1011 		dev_priv->display.funcs.fdi = &gen6_funcs;
1012 	} else if (IS_IVYBRIDGE(dev_priv)) {
1013 		/* FIXME: detect B0+ stepping and use auto training */
1014 		dev_priv->display.funcs.fdi = &ivb_funcs;
1015 	}
1016 }
1017