1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "i915_reg.h"
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
12 #include "intel_de.h"
13 #include "intel_display_types.h"
14 #include "intel_fdi.h"
15 
16 struct intel_fdi_funcs {
17 	void (*fdi_link_train)(struct intel_crtc *crtc,
18 			       const struct intel_crtc_state *crtc_state);
19 };
20 
21 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
22 			  enum pipe pipe, bool state)
23 {
24 	bool cur_state;
25 
26 	if (HAS_DDI(dev_priv)) {
27 		/*
28 		 * DDI does not have a specific FDI_TX register.
29 		 *
30 		 * FDI is never fed from EDP transcoder
31 		 * so pipe->transcoder cast is fine here.
32 		 */
33 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
34 		cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
35 	} else {
36 		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
37 	}
38 	I915_STATE_WARN(cur_state != state,
39 			"FDI TX state assertion failure (expected %s, current %s)\n",
40 			str_on_off(state), str_on_off(cur_state));
41 }
42 
43 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
44 {
45 	assert_fdi_tx(i915, pipe, true);
46 }
47 
48 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
49 {
50 	assert_fdi_tx(i915, pipe, false);
51 }
52 
53 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
54 			  enum pipe pipe, bool state)
55 {
56 	bool cur_state;
57 
58 	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
59 	I915_STATE_WARN(cur_state != state,
60 			"FDI RX state assertion failure (expected %s, current %s)\n",
61 			str_on_off(state), str_on_off(cur_state));
62 }
63 
64 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
65 {
66 	assert_fdi_rx(i915, pipe, true);
67 }
68 
69 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
70 {
71 	assert_fdi_rx(i915, pipe, false);
72 }
73 
74 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
75 			       enum pipe pipe)
76 {
77 	bool cur_state;
78 
79 	/* ILK FDI PLL is always enabled */
80 	if (IS_IRONLAKE(i915))
81 		return;
82 
83 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
84 	if (HAS_DDI(i915))
85 		return;
86 
87 	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
88 	I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
89 }
90 
91 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
92 			      enum pipe pipe, bool state)
93 {
94 	bool cur_state;
95 
96 	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
97 	I915_STATE_WARN(cur_state != state,
98 			"FDI RX PLL assertion failure (expected %s, current %s)\n",
99 			str_on_off(state), str_on_off(cur_state));
100 }
101 
102 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
103 {
104 	assert_fdi_rx_pll(i915, pipe, true);
105 }
106 
107 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
108 {
109 	assert_fdi_rx_pll(i915, pipe, false);
110 }
111 
112 void intel_fdi_link_train(struct intel_crtc *crtc,
113 			  const struct intel_crtc_state *crtc_state)
114 {
115 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
116 
117 	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
118 }
119 
120 /* units of 100MHz */
121 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
122 {
123 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
124 		return crtc_state->fdi_lanes;
125 
126 	return 0;
127 }
128 
129 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
130 			       struct intel_crtc_state *pipe_config)
131 {
132 	struct drm_i915_private *dev_priv = to_i915(dev);
133 	struct drm_atomic_state *state = pipe_config->uapi.state;
134 	struct intel_crtc *other_crtc;
135 	struct intel_crtc_state *other_crtc_state;
136 
137 	drm_dbg_kms(&dev_priv->drm,
138 		    "checking fdi config on pipe %c, lanes %i\n",
139 		    pipe_name(pipe), pipe_config->fdi_lanes);
140 	if (pipe_config->fdi_lanes > 4) {
141 		drm_dbg_kms(&dev_priv->drm,
142 			    "invalid fdi lane config on pipe %c: %i lanes\n",
143 			    pipe_name(pipe), pipe_config->fdi_lanes);
144 		return -EINVAL;
145 	}
146 
147 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
148 		if (pipe_config->fdi_lanes > 2) {
149 			drm_dbg_kms(&dev_priv->drm,
150 				    "only 2 lanes on haswell, required: %i lanes\n",
151 				    pipe_config->fdi_lanes);
152 			return -EINVAL;
153 		} else {
154 			return 0;
155 		}
156 	}
157 
158 	if (INTEL_NUM_PIPES(dev_priv) == 2)
159 		return 0;
160 
161 	/* Ivybridge 3 pipe is really complicated */
162 	switch (pipe) {
163 	case PIPE_A:
164 		return 0;
165 	case PIPE_B:
166 		if (pipe_config->fdi_lanes <= 2)
167 			return 0;
168 
169 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
170 		other_crtc_state =
171 			intel_atomic_get_crtc_state(state, other_crtc);
172 		if (IS_ERR(other_crtc_state))
173 			return PTR_ERR(other_crtc_state);
174 
175 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
176 			drm_dbg_kms(&dev_priv->drm,
177 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
178 				    pipe_name(pipe), pipe_config->fdi_lanes);
179 			return -EINVAL;
180 		}
181 		return 0;
182 	case PIPE_C:
183 		if (pipe_config->fdi_lanes > 2) {
184 			drm_dbg_kms(&dev_priv->drm,
185 				    "only 2 lanes on pipe %c: required %i lanes\n",
186 				    pipe_name(pipe), pipe_config->fdi_lanes);
187 			return -EINVAL;
188 		}
189 
190 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
191 		other_crtc_state =
192 			intel_atomic_get_crtc_state(state, other_crtc);
193 		if (IS_ERR(other_crtc_state))
194 			return PTR_ERR(other_crtc_state);
195 
196 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
197 			drm_dbg_kms(&dev_priv->drm,
198 				    "fdi link B uses too many lanes to enable link C\n");
199 			return -EINVAL;
200 		}
201 		return 0;
202 	default:
203 		MISSING_CASE(pipe);
204 		return 0;
205 	}
206 }
207 
208 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
209 {
210 	if (IS_IRONLAKE(i915)) {
211 		u32 fdi_pll_clk =
212 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
213 
214 		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
215 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
216 		i915->display.fdi.pll_freq = 270000;
217 	} else {
218 		return;
219 	}
220 
221 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
222 }
223 
224 int intel_fdi_link_freq(struct drm_i915_private *i915,
225 			const struct intel_crtc_state *pipe_config)
226 {
227 	if (HAS_DDI(i915))
228 		return pipe_config->port_clock; /* SPLL */
229 	else
230 		return i915->display.fdi.pll_freq;
231 }
232 
233 int ilk_fdi_compute_config(struct intel_crtc *crtc,
234 			   struct intel_crtc_state *pipe_config)
235 {
236 	struct drm_device *dev = crtc->base.dev;
237 	struct drm_i915_private *i915 = to_i915(dev);
238 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
239 	int lane, link_bw, fdi_dotclock, ret;
240 	bool needs_recompute = false;
241 
242 retry:
243 	/* FDI is a binary signal running at ~2.7GHz, encoding
244 	 * each output octet as 10 bits. The actual frequency
245 	 * is stored as a divider into a 100MHz clock, and the
246 	 * mode pixel clock is stored in units of 1KHz.
247 	 * Hence the bw of each lane in terms of the mode signal
248 	 * is:
249 	 */
250 	link_bw = intel_fdi_link_freq(i915, pipe_config);
251 
252 	fdi_dotclock = adjusted_mode->crtc_clock;
253 
254 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
255 				      pipe_config->pipe_bpp);
256 
257 	pipe_config->fdi_lanes = lane;
258 
259 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
260 			       link_bw, &pipe_config->fdi_m_n, false);
261 
262 	ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
263 	if (ret == -EDEADLK)
264 		return ret;
265 
266 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
267 		pipe_config->pipe_bpp -= 2*3;
268 		drm_dbg_kms(&i915->drm,
269 			    "fdi link bw constraint, reducing pipe bpp to %i\n",
270 			    pipe_config->pipe_bpp);
271 		needs_recompute = true;
272 		pipe_config->bw_constrained = true;
273 
274 		goto retry;
275 	}
276 
277 	if (needs_recompute)
278 		return -EAGAIN;
279 
280 	return ret;
281 }
282 
283 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
284 {
285 	u32 temp;
286 
287 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
288 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
289 		return;
290 
291 	drm_WARN_ON(&dev_priv->drm,
292 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
293 		    FDI_RX_ENABLE);
294 	drm_WARN_ON(&dev_priv->drm,
295 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
296 		    FDI_RX_ENABLE);
297 
298 	temp &= ~FDI_BC_BIFURCATION_SELECT;
299 	if (enable)
300 		temp |= FDI_BC_BIFURCATION_SELECT;
301 
302 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
303 		    enable ? "en" : "dis");
304 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
305 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
306 }
307 
308 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
309 {
310 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
311 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312 
313 	switch (crtc->pipe) {
314 	case PIPE_A:
315 		break;
316 	case PIPE_B:
317 		if (crtc_state->fdi_lanes > 2)
318 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
319 		else
320 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
321 
322 		break;
323 	case PIPE_C:
324 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
325 
326 		break;
327 	default:
328 		MISSING_CASE(crtc->pipe);
329 	}
330 }
331 
332 void intel_fdi_normal_train(struct intel_crtc *crtc)
333 {
334 	struct drm_device *dev = crtc->base.dev;
335 	struct drm_i915_private *dev_priv = to_i915(dev);
336 	enum pipe pipe = crtc->pipe;
337 	i915_reg_t reg;
338 	u32 temp;
339 
340 	/* enable normal train */
341 	reg = FDI_TX_CTL(pipe);
342 	temp = intel_de_read(dev_priv, reg);
343 	if (IS_IVYBRIDGE(dev_priv)) {
344 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
345 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
346 	} else {
347 		temp &= ~FDI_LINK_TRAIN_NONE;
348 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
349 	}
350 	intel_de_write(dev_priv, reg, temp);
351 
352 	reg = FDI_RX_CTL(pipe);
353 	temp = intel_de_read(dev_priv, reg);
354 	if (HAS_PCH_CPT(dev_priv)) {
355 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
356 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
357 	} else {
358 		temp &= ~FDI_LINK_TRAIN_NONE;
359 		temp |= FDI_LINK_TRAIN_NONE;
360 	}
361 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
362 
363 	/* wait one idle pattern time */
364 	intel_de_posting_read(dev_priv, reg);
365 	udelay(1000);
366 
367 	/* IVB wants error correction enabled */
368 	if (IS_IVYBRIDGE(dev_priv))
369 		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
370 }
371 
372 /* The FDI link training functions for ILK/Ibexpeak. */
373 static void ilk_fdi_link_train(struct intel_crtc *crtc,
374 			       const struct intel_crtc_state *crtc_state)
375 {
376 	struct drm_device *dev = crtc->base.dev;
377 	struct drm_i915_private *dev_priv = to_i915(dev);
378 	enum pipe pipe = crtc->pipe;
379 	i915_reg_t reg;
380 	u32 temp, tries;
381 
382 	/*
383 	 * Write the TU size bits before fdi link training, so that error
384 	 * detection works.
385 	 */
386 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
387 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
388 
389 	/* FDI needs bits from pipe first */
390 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
391 
392 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
393 	   for train result */
394 	reg = FDI_RX_IMR(pipe);
395 	temp = intel_de_read(dev_priv, reg);
396 	temp &= ~FDI_RX_SYMBOL_LOCK;
397 	temp &= ~FDI_RX_BIT_LOCK;
398 	intel_de_write(dev_priv, reg, temp);
399 	intel_de_read(dev_priv, reg);
400 	udelay(150);
401 
402 	/* enable CPU FDI TX and PCH FDI RX */
403 	reg = FDI_TX_CTL(pipe);
404 	temp = intel_de_read(dev_priv, reg);
405 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
406 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
407 	temp &= ~FDI_LINK_TRAIN_NONE;
408 	temp |= FDI_LINK_TRAIN_PATTERN_1;
409 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
410 
411 	reg = FDI_RX_CTL(pipe);
412 	temp = intel_de_read(dev_priv, reg);
413 	temp &= ~FDI_LINK_TRAIN_NONE;
414 	temp |= FDI_LINK_TRAIN_PATTERN_1;
415 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
416 
417 	intel_de_posting_read(dev_priv, reg);
418 	udelay(150);
419 
420 	/* Ironlake workaround, enable clock pointer after FDI enable*/
421 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
422 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
423 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
424 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
425 
426 	reg = FDI_RX_IIR(pipe);
427 	for (tries = 0; tries < 5; tries++) {
428 		temp = intel_de_read(dev_priv, reg);
429 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
430 
431 		if ((temp & FDI_RX_BIT_LOCK)) {
432 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
433 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
434 			break;
435 		}
436 	}
437 	if (tries == 5)
438 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
439 
440 	/* Train 2 */
441 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
442 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
443 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
444 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
445 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
446 	udelay(150);
447 
448 	reg = FDI_RX_IIR(pipe);
449 	for (tries = 0; tries < 5; tries++) {
450 		temp = intel_de_read(dev_priv, reg);
451 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
452 
453 		if (temp & FDI_RX_SYMBOL_LOCK) {
454 			intel_de_write(dev_priv, reg,
455 				       temp | FDI_RX_SYMBOL_LOCK);
456 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
457 			break;
458 		}
459 	}
460 	if (tries == 5)
461 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
462 
463 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
464 
465 }
466 
467 static const int snb_b_fdi_train_param[] = {
468 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
469 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
470 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
471 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
472 };
473 
474 /* The FDI link training functions for SNB/Cougarpoint. */
475 static void gen6_fdi_link_train(struct intel_crtc *crtc,
476 				const struct intel_crtc_state *crtc_state)
477 {
478 	struct drm_device *dev = crtc->base.dev;
479 	struct drm_i915_private *dev_priv = to_i915(dev);
480 	enum pipe pipe = crtc->pipe;
481 	i915_reg_t reg;
482 	u32 temp, i, retry;
483 
484 	/*
485 	 * Write the TU size bits before fdi link training, so that error
486 	 * detection works.
487 	 */
488 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
489 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
490 
491 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
492 	   for train result */
493 	reg = FDI_RX_IMR(pipe);
494 	temp = intel_de_read(dev_priv, reg);
495 	temp &= ~FDI_RX_SYMBOL_LOCK;
496 	temp &= ~FDI_RX_BIT_LOCK;
497 	intel_de_write(dev_priv, reg, temp);
498 
499 	intel_de_posting_read(dev_priv, reg);
500 	udelay(150);
501 
502 	/* enable CPU FDI TX and PCH FDI RX */
503 	reg = FDI_TX_CTL(pipe);
504 	temp = intel_de_read(dev_priv, reg);
505 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
506 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
507 	temp &= ~FDI_LINK_TRAIN_NONE;
508 	temp |= FDI_LINK_TRAIN_PATTERN_1;
509 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
510 	/* SNB-B */
511 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
512 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
513 
514 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
515 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
516 
517 	reg = FDI_RX_CTL(pipe);
518 	temp = intel_de_read(dev_priv, reg);
519 	if (HAS_PCH_CPT(dev_priv)) {
520 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
521 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
522 	} else {
523 		temp &= ~FDI_LINK_TRAIN_NONE;
524 		temp |= FDI_LINK_TRAIN_PATTERN_1;
525 	}
526 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
527 
528 	intel_de_posting_read(dev_priv, reg);
529 	udelay(150);
530 
531 	for (i = 0; i < 4; i++) {
532 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
533 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
534 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
535 		udelay(500);
536 
537 		for (retry = 0; retry < 5; retry++) {
538 			reg = FDI_RX_IIR(pipe);
539 			temp = intel_de_read(dev_priv, reg);
540 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
541 			if (temp & FDI_RX_BIT_LOCK) {
542 				intel_de_write(dev_priv, reg,
543 					       temp | FDI_RX_BIT_LOCK);
544 				drm_dbg_kms(&dev_priv->drm,
545 					    "FDI train 1 done.\n");
546 				break;
547 			}
548 			udelay(50);
549 		}
550 		if (retry < 5)
551 			break;
552 	}
553 	if (i == 4)
554 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
555 
556 	/* Train 2 */
557 	reg = FDI_TX_CTL(pipe);
558 	temp = intel_de_read(dev_priv, reg);
559 	temp &= ~FDI_LINK_TRAIN_NONE;
560 	temp |= FDI_LINK_TRAIN_PATTERN_2;
561 	if (IS_SANDYBRIDGE(dev_priv)) {
562 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
563 		/* SNB-B */
564 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
565 	}
566 	intel_de_write(dev_priv, reg, temp);
567 
568 	reg = FDI_RX_CTL(pipe);
569 	temp = intel_de_read(dev_priv, reg);
570 	if (HAS_PCH_CPT(dev_priv)) {
571 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
572 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
573 	} else {
574 		temp &= ~FDI_LINK_TRAIN_NONE;
575 		temp |= FDI_LINK_TRAIN_PATTERN_2;
576 	}
577 	intel_de_write(dev_priv, reg, temp);
578 
579 	intel_de_posting_read(dev_priv, reg);
580 	udelay(150);
581 
582 	for (i = 0; i < 4; i++) {
583 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
584 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
585 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
586 		udelay(500);
587 
588 		for (retry = 0; retry < 5; retry++) {
589 			reg = FDI_RX_IIR(pipe);
590 			temp = intel_de_read(dev_priv, reg);
591 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
592 			if (temp & FDI_RX_SYMBOL_LOCK) {
593 				intel_de_write(dev_priv, reg,
594 					       temp | FDI_RX_SYMBOL_LOCK);
595 				drm_dbg_kms(&dev_priv->drm,
596 					    "FDI train 2 done.\n");
597 				break;
598 			}
599 			udelay(50);
600 		}
601 		if (retry < 5)
602 			break;
603 	}
604 	if (i == 4)
605 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
606 
607 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
608 }
609 
610 /* Manual link training for Ivy Bridge A0 parts */
611 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
612 				      const struct intel_crtc_state *crtc_state)
613 {
614 	struct drm_device *dev = crtc->base.dev;
615 	struct drm_i915_private *dev_priv = to_i915(dev);
616 	enum pipe pipe = crtc->pipe;
617 	i915_reg_t reg;
618 	u32 temp, i, j;
619 
620 	ivb_update_fdi_bc_bifurcation(crtc_state);
621 
622 	/*
623 	 * Write the TU size bits before fdi link training, so that error
624 	 * detection works.
625 	 */
626 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
627 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
628 
629 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
630 	   for train result */
631 	reg = FDI_RX_IMR(pipe);
632 	temp = intel_de_read(dev_priv, reg);
633 	temp &= ~FDI_RX_SYMBOL_LOCK;
634 	temp &= ~FDI_RX_BIT_LOCK;
635 	intel_de_write(dev_priv, reg, temp);
636 
637 	intel_de_posting_read(dev_priv, reg);
638 	udelay(150);
639 
640 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
641 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
642 
643 	/* Try each vswing and preemphasis setting twice before moving on */
644 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
645 		/* disable first in case we need to retry */
646 		reg = FDI_TX_CTL(pipe);
647 		temp = intel_de_read(dev_priv, reg);
648 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
649 		temp &= ~FDI_TX_ENABLE;
650 		intel_de_write(dev_priv, reg, temp);
651 
652 		reg = FDI_RX_CTL(pipe);
653 		temp = intel_de_read(dev_priv, reg);
654 		temp &= ~FDI_LINK_TRAIN_AUTO;
655 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
656 		temp &= ~FDI_RX_ENABLE;
657 		intel_de_write(dev_priv, reg, temp);
658 
659 		/* enable CPU FDI TX and PCH FDI RX */
660 		reg = FDI_TX_CTL(pipe);
661 		temp = intel_de_read(dev_priv, reg);
662 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
663 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
664 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
665 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
666 		temp |= snb_b_fdi_train_param[j/2];
667 		temp |= FDI_COMPOSITE_SYNC;
668 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
669 
670 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
671 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
672 
673 		reg = FDI_RX_CTL(pipe);
674 		temp = intel_de_read(dev_priv, reg);
675 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
676 		temp |= FDI_COMPOSITE_SYNC;
677 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
678 
679 		intel_de_posting_read(dev_priv, reg);
680 		udelay(1); /* should be 0.5us */
681 
682 		for (i = 0; i < 4; i++) {
683 			reg = FDI_RX_IIR(pipe);
684 			temp = intel_de_read(dev_priv, reg);
685 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
686 
687 			if (temp & FDI_RX_BIT_LOCK ||
688 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
689 				intel_de_write(dev_priv, reg,
690 					       temp | FDI_RX_BIT_LOCK);
691 				drm_dbg_kms(&dev_priv->drm,
692 					    "FDI train 1 done, level %i.\n",
693 					    i);
694 				break;
695 			}
696 			udelay(1); /* should be 0.5us */
697 		}
698 		if (i == 4) {
699 			drm_dbg_kms(&dev_priv->drm,
700 				    "FDI train 1 fail on vswing %d\n", j / 2);
701 			continue;
702 		}
703 
704 		/* Train 2 */
705 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
706 			     FDI_LINK_TRAIN_NONE_IVB,
707 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
708 		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
709 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
710 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
711 		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
712 		udelay(2); /* should be 1.5us */
713 
714 		for (i = 0; i < 4; i++) {
715 			reg = FDI_RX_IIR(pipe);
716 			temp = intel_de_read(dev_priv, reg);
717 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
718 
719 			if (temp & FDI_RX_SYMBOL_LOCK ||
720 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
721 				intel_de_write(dev_priv, reg,
722 					       temp | FDI_RX_SYMBOL_LOCK);
723 				drm_dbg_kms(&dev_priv->drm,
724 					    "FDI train 2 done, level %i.\n",
725 					    i);
726 				goto train_done;
727 			}
728 			udelay(2); /* should be 1.5us */
729 		}
730 		if (i == 4)
731 			drm_dbg_kms(&dev_priv->drm,
732 				    "FDI train 2 fail on vswing %d\n", j / 2);
733 	}
734 
735 train_done:
736 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
737 }
738 
739 /* Starting with Haswell, different DDI ports can work in FDI mode for
740  * connection to the PCH-located connectors. For this, it is necessary to train
741  * both the DDI port and PCH receiver for the desired DDI buffer settings.
742  *
743  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
744  * please note that when FDI mode is active on DDI E, it shares 2 lines with
745  * DDI A (which is used for eDP)
746  */
747 void hsw_fdi_link_train(struct intel_encoder *encoder,
748 			const struct intel_crtc_state *crtc_state)
749 {
750 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
751 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
752 	u32 temp, i, rx_ctl_val;
753 	int n_entries;
754 
755 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
756 
757 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
758 
759 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
760 	 * mode set "sequence for CRT port" document:
761 	 * - TP1 to TP2 time with the default value
762 	 * - FDI delay to 90h
763 	 *
764 	 * WaFDIAutoLinkSetTimingOverrride:hsw
765 	 */
766 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
767 		       FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
768 
769 	/* Enable the PCH Receiver FDI PLL */
770 	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
771 		     FDI_RX_PLL_ENABLE |
772 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
773 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
774 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
775 	udelay(220);
776 
777 	/* Switch from Rawclk to PCDclk */
778 	rx_ctl_val |= FDI_PCDCLK;
779 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
780 
781 	/* Configure Port Clock Select */
782 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
783 	intel_ddi_enable_clock(encoder, crtc_state);
784 
785 	/* Start the training iterating through available voltages and emphasis,
786 	 * testing each value twice. */
787 	for (i = 0; i < n_entries * 2; i++) {
788 		/* Configure DP_TP_CTL with auto-training */
789 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
790 			       DP_TP_CTL_FDI_AUTOTRAIN |
791 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
792 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
793 			       DP_TP_CTL_ENABLE);
794 
795 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
796 		 * DDI E does not support port reversal, the functionality is
797 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
798 		 * port reversal bit */
799 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
800 			       DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
801 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
802 
803 		udelay(600);
804 
805 		/* Program PCH FDI Receiver TU */
806 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
807 
808 		/* Enable PCH FDI Receiver with auto-training */
809 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
810 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
811 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
812 
813 		/* Wait for FDI receiver lane calibration */
814 		udelay(30);
815 
816 		/* Unset FDI_RX_MISC pwrdn lanes */
817 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
818 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
819 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
820 
821 		/* Wait for FDI auto training time */
822 		udelay(5);
823 
824 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
825 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
826 			drm_dbg_kms(&dev_priv->drm,
827 				    "FDI link training done on step %d\n", i);
828 			break;
829 		}
830 
831 		/*
832 		 * Leave things enabled even if we failed to train FDI.
833 		 * Results in less fireworks from the state checker.
834 		 */
835 		if (i == n_entries * 2 - 1) {
836 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
837 			break;
838 		}
839 
840 		rx_ctl_val &= ~FDI_RX_ENABLE;
841 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
842 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
843 
844 		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
845 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
846 
847 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
848 		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
849 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
850 
851 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
852 
853 		/* Reset FDI_RX_MISC pwrdn lanes */
854 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
855 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
856 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
857 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
858 	}
859 
860 	/* Enable normal pixel sending for FDI */
861 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
862 		       DP_TP_CTL_FDI_AUTOTRAIN |
863 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
864 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
865 		       DP_TP_CTL_ENABLE);
866 }
867 
868 void hsw_fdi_disable(struct intel_encoder *encoder)
869 {
870 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
871 
872 	/*
873 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
874 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
875 	 * step 13 is the correct place for it. Step 18 is where it was
876 	 * originally before the BUN.
877 	 */
878 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
879 	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
880 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
881 	intel_ddi_disable_clock(encoder);
882 	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
883 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
884 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
885 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
886 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
887 }
888 
889 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
890 {
891 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
892 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
893 	enum pipe pipe = crtc->pipe;
894 	i915_reg_t reg;
895 	u32 temp;
896 
897 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
898 	reg = FDI_RX_CTL(pipe);
899 	temp = intel_de_read(dev_priv, reg);
900 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
901 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
902 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
903 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
904 
905 	intel_de_posting_read(dev_priv, reg);
906 	udelay(200);
907 
908 	/* Switch from Rawclk to PCDclk */
909 	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
910 	intel_de_posting_read(dev_priv, reg);
911 	udelay(200);
912 
913 	/* Enable CPU FDI TX PLL, always on for Ironlake */
914 	reg = FDI_TX_CTL(pipe);
915 	temp = intel_de_read(dev_priv, reg);
916 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
917 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
918 
919 		intel_de_posting_read(dev_priv, reg);
920 		udelay(100);
921 	}
922 }
923 
924 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
925 {
926 	struct drm_device *dev = crtc->base.dev;
927 	struct drm_i915_private *dev_priv = to_i915(dev);
928 	enum pipe pipe = crtc->pipe;
929 
930 	/* Switch from PCDclk to Rawclk */
931 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
932 
933 	/* Disable CPU FDI TX PLL */
934 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
935 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
936 	udelay(100);
937 
938 	/* Wait for the clocks to turn off. */
939 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
940 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
941 	udelay(100);
942 }
943 
944 void ilk_fdi_disable(struct intel_crtc *crtc)
945 {
946 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
947 	enum pipe pipe = crtc->pipe;
948 	i915_reg_t reg;
949 	u32 temp;
950 
951 	/* disable CPU FDI tx and PCH FDI rx */
952 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
953 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
954 
955 	reg = FDI_RX_CTL(pipe);
956 	temp = intel_de_read(dev_priv, reg);
957 	temp &= ~(0x7 << 16);
958 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
959 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
960 
961 	intel_de_posting_read(dev_priv, reg);
962 	udelay(100);
963 
964 	/* Ironlake workaround, disable clock pointer after downing FDI */
965 	if (HAS_PCH_IBX(dev_priv))
966 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
967 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
968 
969 	/* still set train pattern 1 */
970 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
971 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
972 
973 	reg = FDI_RX_CTL(pipe);
974 	temp = intel_de_read(dev_priv, reg);
975 	if (HAS_PCH_CPT(dev_priv)) {
976 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
977 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
978 	} else {
979 		temp &= ~FDI_LINK_TRAIN_NONE;
980 		temp |= FDI_LINK_TRAIN_PATTERN_1;
981 	}
982 	/* BPC in FDI rx is consistent with that in TRANSCONF */
983 	temp &= ~(0x07 << 16);
984 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
985 	intel_de_write(dev_priv, reg, temp);
986 
987 	intel_de_posting_read(dev_priv, reg);
988 	udelay(100);
989 }
990 
991 static const struct intel_fdi_funcs ilk_funcs = {
992 	.fdi_link_train = ilk_fdi_link_train,
993 };
994 
995 static const struct intel_fdi_funcs gen6_funcs = {
996 	.fdi_link_train = gen6_fdi_link_train,
997 };
998 
999 static const struct intel_fdi_funcs ivb_funcs = {
1000 	.fdi_link_train = ivb_manual_fdi_link_train,
1001 };
1002 
1003 void
1004 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1005 {
1006 	if (IS_IRONLAKE(dev_priv)) {
1007 		dev_priv->display.funcs.fdi = &ilk_funcs;
1008 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1009 		dev_priv->display.funcs.fdi = &gen6_funcs;
1010 	} else if (IS_IVYBRIDGE(dev_priv)) {
1011 		/* FIXME: detect B0+ stepping and use auto training */
1012 		dev_priv->display.funcs.fdi = &ivb_funcs;
1013 	}
1014 }
1015