xref: /openbmc/u-boot/drivers/video/tegra124/dp.c (revision 23ff8633)
1 /*
2  * Copyright (c) 2011-2013, NVIDIA Corporation.
3  * Copyright 2014 Google Inc.
4  *
5  * SPDX-License-Identifier:     GPL-2.0
6  */
7 
8 #include <common.h>
9 #include <displayport.h>
10 #include <dm.h>
11 #include <div64.h>
12 #include <errno.h>
13 #include <fdtdec.h>
14 #include <asm/io.h>
15 #include <asm/arch-tegra/dc.h>
16 #include "displayport.h"
17 #include "edid.h"
18 #include "sor.h"
19 
20 DECLARE_GLOBAL_DATA_PTR;
21 
22 #define DO_FAST_LINK_TRAINING		1
23 
24 struct tegra_dp_plat {
25 	ulong base;
26 };
27 
28 struct tegra_dp_priv {
29 	struct dpaux_ctlr *regs;
30 	struct tegra_dc_sor_data *sor;
31 	u8 revision;
32 	int enabled;
33 };
34 
35 struct tegra_dp_priv dp_data;
36 
37 static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
38 {
39 	return readl((u32 *)dp->regs + reg);
40 }
41 
42 static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
43 				      u32 val)
44 {
45 	writel(val, (u32 *)dp->regs + reg);
46 }
47 
48 static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
49 					   u32 reg, u32 mask, u32 exp_val,
50 					   u32 poll_interval_us,
51 					   u32 timeout_us)
52 {
53 	u32 reg_val = 0;
54 	u32 temp = timeout_us;
55 
56 	do {
57 		udelay(poll_interval_us);
58 		reg_val = tegra_dpaux_readl(dp, reg);
59 		if (timeout_us > poll_interval_us)
60 			timeout_us -= poll_interval_us;
61 		else
62 			break;
63 	} while ((reg_val & mask) != exp_val);
64 
65 	if ((reg_val & mask) == exp_val)
66 		return 0;	/* success */
67 	debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
68 	      reg, reg_val, mask, exp_val);
69 	return temp;
70 }
71 
72 static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
73 {
74 	/* According to DP spec, each aux transaction needs to finish
75 	   within 40ms. */
76 	if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
77 					 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
78 					 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
79 					 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
80 		debug("dp: DPAUX transaction timeout\n");
81 		return -1;
82 	}
83 	return 0;
84 }
85 
86 static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
87 					  u32 addr, u8 *data, u32 *size,
88 					  u32 *aux_stat)
89 {
90 	int i;
91 	u32 reg_val;
92 	u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
93 	u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
94 	u32 temp_data;
95 
96 	if (*size > DP_AUX_MAX_BYTES)
97 		return -1;	/* only write one chunk of data */
98 
99 	/* Make sure the command is write command */
100 	switch (cmd) {
101 	case DPAUX_DP_AUXCTL_CMD_I2CWR:
102 	case DPAUX_DP_AUXCTL_CMD_MOTWR:
103 	case DPAUX_DP_AUXCTL_CMD_AUXWR:
104 		break;
105 	default:
106 		debug("dp: aux write cmd 0x%x is invalid\n", cmd);
107 		return -EINVAL;
108 	}
109 
110 	tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
111 	for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
112 		memcpy(&temp_data, data, 4);
113 		tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
114 		data += 4;
115 	}
116 
117 	reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
118 	reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
119 	reg_val |= cmd;
120 	reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
121 	reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
122 
123 	while ((timeout_retries > 0) && (defer_retries > 0)) {
124 		if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
125 		    (defer_retries != DP_AUX_DEFER_MAX_TRIES))
126 			udelay(1);
127 
128 		reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
129 		tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
130 
131 		if (tegra_dpaux_wait_transaction(dp))
132 			debug("dp: aux write transaction timeout\n");
133 
134 		*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
135 
136 		if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
137 		    (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
138 		    (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
139 		    (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
140 			if (timeout_retries-- > 0) {
141 				debug("dp: aux write retry (0x%x) -- %d\n",
142 				      *aux_stat, timeout_retries);
143 				/* clear the error bits */
144 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
145 						   *aux_stat);
146 				continue;
147 			} else {
148 				debug("dp: aux write got error (0x%x)\n",
149 				      *aux_stat);
150 				return -ETIMEDOUT;
151 			}
152 		}
153 
154 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
155 		    (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
156 			if (defer_retries-- > 0) {
157 				debug("dp: aux write defer (0x%x) -- %d\n",
158 				      *aux_stat, defer_retries);
159 				/* clear the error bits */
160 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
161 						   *aux_stat);
162 				continue;
163 			} else {
164 				debug("dp: aux write defer exceeds max retries (0x%x)\n",
165 				      *aux_stat);
166 				return -ETIMEDOUT;
167 			}
168 		}
169 
170 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
171 			DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
172 			*size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
173 			return 0;
174 		} else {
175 			debug("dp: aux write failed (0x%x)\n", *aux_stat);
176 			return -EIO;
177 		}
178 	}
179 	/* Should never come to here */
180 	return -EIO;
181 }
182 
183 static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
184 					 u32 addr, u8 *data, u32 *size,
185 					 u32 *aux_stat)
186 {
187 	u32 reg_val;
188 	u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
189 	u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
190 
191 	if (*size > DP_AUX_MAX_BYTES) {
192 		debug("only read one chunk\n");
193 		return -EIO;	/* only read one chunk */
194 	}
195 
196 	/* Check to make sure the command is read command */
197 	switch (cmd) {
198 	case DPAUX_DP_AUXCTL_CMD_I2CRD:
199 	case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
200 	case DPAUX_DP_AUXCTL_CMD_MOTRD:
201 	case DPAUX_DP_AUXCTL_CMD_AUXRD:
202 		break;
203 	default:
204 		debug("dp: aux read cmd 0x%x is invalid\n", cmd);
205 		return -EIO;
206 	}
207 
208 	*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
209 	if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
210 		debug("dp: HPD is not detected\n");
211 		return -EIO;
212 	}
213 
214 	tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
215 
216 	reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
217 	reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
218 	reg_val |= cmd;
219 	reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
220 	reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
221 	while ((timeout_retries > 0) && (defer_retries > 0)) {
222 		if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
223 		    (defer_retries != DP_AUX_DEFER_MAX_TRIES))
224 			udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
225 
226 		reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
227 		tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
228 
229 		if (tegra_dpaux_wait_transaction(dp))
230 			debug("dp: aux read transaction timeout\n");
231 
232 		*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
233 
234 		if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
235 		    (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
236 		    (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
237 		    (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
238 			if (timeout_retries-- > 0) {
239 				debug("dp: aux read retry (0x%x) -- %d\n",
240 				      *aux_stat, timeout_retries);
241 				/* clear the error bits */
242 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
243 						   *aux_stat);
244 				continue;	/* retry */
245 			} else {
246 				debug("dp: aux read got error (0x%x)\n",
247 				      *aux_stat);
248 				return -ETIMEDOUT;
249 			}
250 		}
251 
252 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
253 		    (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
254 			if (defer_retries-- > 0) {
255 				debug("dp: aux read defer (0x%x) -- %d\n",
256 				      *aux_stat, defer_retries);
257 				/* clear the error bits */
258 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
259 						   *aux_stat);
260 				continue;
261 			} else {
262 				debug("dp: aux read defer exceeds max retries (0x%x)\n",
263 				      *aux_stat);
264 				return -ETIMEDOUT;
265 			}
266 		}
267 
268 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
269 			DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
270 			int i;
271 			u32 temp_data[4];
272 
273 			for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
274 				temp_data[i] = tegra_dpaux_readl(dp,
275 						DPAUX_DP_AUXDATA_READ_W(i));
276 
277 			*size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
278 			memcpy(data, temp_data, *size);
279 
280 			return 0;
281 		} else {
282 			debug("dp: aux read failed (0x%x\n", *aux_stat);
283 			return -EIO;
284 		}
285 	}
286 	/* Should never come to here */
287 	debug("%s: can't\n", __func__);
288 
289 	return -EIO;
290 }
291 
292 static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
293 			u8 *data, u32 *size, u32 *aux_stat)
294 {
295 	u32 finished = 0;
296 	u32 cur_size;
297 	int ret = 0;
298 
299 	do {
300 		cur_size = *size - finished;
301 		if (cur_size > DP_AUX_MAX_BYTES)
302 			cur_size = DP_AUX_MAX_BYTES;
303 
304 		ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
305 						data, &cur_size, aux_stat);
306 		if (ret)
307 			break;
308 
309 		/* cur_size should be the real size returned */
310 		addr += cur_size;
311 		data += cur_size;
312 		finished += cur_size;
313 
314 	} while (*size > finished);
315 	*size = finished;
316 
317 	return ret;
318 }
319 
320 static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
321 				 u8 *data_ptr)
322 {
323 	u32 size = 1;
324 	u32 status = 0;
325 	int ret;
326 
327 	ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
328 					cmd, data_ptr, &size, &status);
329 	if (ret) {
330 		debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
331 		      cmd, status);
332 	}
333 
334 	return ret;
335 }
336 
337 static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
338 				u8 data)
339 {
340 	u32 size = 1;
341 	u32 status = 0;
342 	int ret;
343 
344 	ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
345 					cmd, &data, &size, &status);
346 	if (ret) {
347 		debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
348 		      cmd, status);
349 	}
350 
351 	return ret;
352 }
353 
354 static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
355 				 u8 addr, u8 *data, u32 size, u32 *aux_stat)
356 {
357 	u32 finished = 0;
358 	int ret = 0;
359 
360 	do {
361 		u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
362 
363 		u32 len = 1;
364 		ret = tegra_dc_dpaux_write_chunk(
365 				dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
366 				&addr, &len, aux_stat);
367 		if (ret) {
368 			debug("%s: error sending address to read.\n",
369 			      __func__);
370 			return ret;
371 		}
372 
373 		ret = tegra_dc_dpaux_read_chunk(
374 				dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
375 				data, &cur_size, aux_stat);
376 		if (ret) {
377 			debug("%s: error reading data.\n", __func__);
378 			return ret;
379 		}
380 
381 		/* cur_size should be the real size returned */
382 		addr += cur_size;
383 		data += cur_size;
384 		finished += cur_size;
385 	} while (size > finished);
386 
387 	return finished;
388 }
389 
390 static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
391 {
392 	/* clear interrupt */
393 	tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
394 	/* do not enable interrupt for now. Enable them when Isr in place */
395 	tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
396 
397 	tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
398 			   DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
399 			   DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
400 			   0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
401 			   DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
402 
403 	tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
404 			   DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
405 }
406 
407 #ifdef DEBUG
408 static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
409 	const struct tegra_dp_link_config *link_cfg)
410 {
411 	debug("DP config: cfg_name               cfg_value\n");
412 	debug("           Lane Count             %d\n",
413 	      link_cfg->max_lane_count);
414 	debug("           SupportEnhancedFraming %s\n",
415 	      link_cfg->support_enhanced_framing ? "Y" : "N");
416 	debug("           Bandwidth              %d\n",
417 	      link_cfg->max_link_bw);
418 	debug("           bpp                    %d\n",
419 	      link_cfg->bits_per_pixel);
420 	debug("           EnhancedFraming        %s\n",
421 	      link_cfg->enhanced_framing ? "Y" : "N");
422 	debug("           Scramble_enabled       %s\n",
423 	      link_cfg->scramble_ena ? "Y" : "N");
424 	debug("           LinkBW                 %d\n",
425 	      link_cfg->link_bw);
426 	debug("           lane_count             %d\n",
427 	      link_cfg->lane_count);
428 	debug("           activespolarity        %d\n",
429 	      link_cfg->activepolarity);
430 	debug("           active_count           %d\n",
431 	      link_cfg->active_count);
432 	debug("           tu_size                %d\n",
433 	      link_cfg->tu_size);
434 	debug("           active_frac            %d\n",
435 	      link_cfg->active_frac);
436 	debug("           watermark              %d\n",
437 	      link_cfg->watermark);
438 	debug("           hblank_sym             %d\n",
439 	      link_cfg->hblank_sym);
440 	debug("           vblank_sym             %d\n",
441 	      link_cfg->vblank_sym);
442 }
443 #endif
444 
445 static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
446 				       struct tegra_dp_link_config *cfg)
447 {
448 	switch (cfg->link_bw) {
449 	case SOR_LINK_SPEED_G1_62:
450 		if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
451 			cfg->link_bw = SOR_LINK_SPEED_G2_7;
452 		cfg->lane_count /= 2;
453 		break;
454 	case SOR_LINK_SPEED_G2_7:
455 		cfg->link_bw = SOR_LINK_SPEED_G1_62;
456 		break;
457 	case SOR_LINK_SPEED_G5_4:
458 		if (cfg->lane_count == 1) {
459 			cfg->link_bw = SOR_LINK_SPEED_G2_7;
460 			cfg->lane_count = cfg->max_lane_count;
461 		} else {
462 			cfg->lane_count /= 2;
463 		}
464 		break;
465 	default:
466 		debug("dp: Error link rate %d\n", cfg->link_bw);
467 		return -ENOLINK;
468 	}
469 
470 	return (cfg->lane_count > 0) ? 0 : -ENOLINK;
471 }
472 
473 /*
474  * Calcuate if given cfg can meet the mode request.
475  * Return 0 if mode is possible, -1 otherwise
476  */
477 static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
478 				   const struct display_timing *timing,
479 				   struct tegra_dp_link_config *link_cfg)
480 {
481 	const u32	link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
482 	const u64	f	  = 100000;	/* precision factor */
483 	u32	num_linkclk_line; /* Number of link clocks per line */
484 	u64	ratio_f; /* Ratio of incoming to outgoing data rate */
485 	u64	frac_f;
486 	u64	activesym_f;	/* Activesym per TU */
487 	u64	activecount_f;
488 	u32	activecount;
489 	u32	activepolarity;
490 	u64	approx_value_f;
491 	u32	activefrac		  = 0;
492 	u64	accumulated_error_f	  = 0;
493 	u32	lowest_neg_activecount	  = 0;
494 	u32	lowest_neg_activepolarity = 0;
495 	u32	lowest_neg_tusize	  = 64;
496 	u32	num_symbols_per_line;
497 	u64	lowest_neg_activefrac	  = 0;
498 	u64	lowest_neg_error_f	  = 64 * f;
499 	u64	watermark_f;
500 	int	i;
501 	int	neg;
502 
503 	if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
504 	    !link_cfg->bits_per_pixel)
505 		return -1;
506 
507 	if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
508 		(u64)link_rate * 8 * link_cfg->lane_count)
509 		return -1;
510 
511 	num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
512 				       timing->pixelclock.typ));
513 
514 	ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
515 	ratio_f /= 8;
516 	do_div(ratio_f, link_rate * link_cfg->lane_count);
517 
518 	for (i = 64; i >= 32; --i) {
519 		activesym_f	= ratio_f * i;
520 		activecount_f	= lldiv(activesym_f, (u32)f) * f;
521 		frac_f		= activesym_f - activecount_f;
522 		activecount	= (u32)(lldiv(activecount_f, (u32)f));
523 
524 		if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
525 			activepolarity = 0;
526 		else {
527 			activepolarity = 1;
528 			frac_f = f - frac_f;
529 		}
530 
531 		if (frac_f != 0) {
532 			/* warning: frac_f should be 64-bit */
533 			frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
534 			if (frac_f > (15 * f))
535 				activefrac = activepolarity ? 1 : 15;
536 			else
537 				activefrac = activepolarity ?
538 					(u32)lldiv(frac_f, (u32)f) + 1 :
539 					(u32)lldiv(frac_f, (u32)f);
540 		}
541 
542 		if (activefrac == 1)
543 			activepolarity = 0;
544 
545 		if (activepolarity == 1)
546 			approx_value_f = activefrac ? lldiv(
547 				(activecount_f + (activefrac * f - f) * f),
548 				(activefrac * f)) :
549 				activecount_f + f;
550 		else
551 			approx_value_f = activefrac ?
552 				activecount_f + lldiv(f, activefrac) :
553 				activecount_f;
554 
555 		if (activesym_f < approx_value_f) {
556 			accumulated_error_f = num_linkclk_line *
557 				lldiv(approx_value_f - activesym_f, i);
558 			neg = 1;
559 		} else {
560 			accumulated_error_f = num_linkclk_line *
561 				lldiv(activesym_f - approx_value_f, i);
562 			neg = 0;
563 		}
564 
565 		if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
566 		    (accumulated_error_f == 0)) {
567 			lowest_neg_error_f = accumulated_error_f;
568 			lowest_neg_tusize = i;
569 			lowest_neg_activecount = activecount;
570 			lowest_neg_activepolarity = activepolarity;
571 			lowest_neg_activefrac = activefrac;
572 
573 			if (accumulated_error_f == 0)
574 				break;
575 		}
576 	}
577 
578 	if (lowest_neg_activefrac == 0) {
579 		link_cfg->activepolarity = 0;
580 		link_cfg->active_count   = lowest_neg_activepolarity ?
581 			lowest_neg_activecount : lowest_neg_activecount - 1;
582 		link_cfg->tu_size	      = lowest_neg_tusize;
583 		link_cfg->active_frac    = 1;
584 	} else {
585 		link_cfg->activepolarity = lowest_neg_activepolarity;
586 		link_cfg->active_count   = (u32)lowest_neg_activecount;
587 		link_cfg->tu_size	      = lowest_neg_tusize;
588 		link_cfg->active_frac    = (u32)lowest_neg_activefrac;
589 	}
590 
591 	watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
592 	link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
593 		f)) + link_cfg->bits_per_pixel / 4 - 1;
594 	num_symbols_per_line = (timing->hactive.typ *
595 				link_cfg->bits_per_pixel) /
596 			       (8 * link_cfg->lane_count);
597 
598 	if (link_cfg->watermark > 30) {
599 		debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
600 		link_cfg->watermark = 30;
601 		return -1;
602 	} else if (link_cfg->watermark > num_symbols_per_line) {
603 		debug("dp: sor setting: force watermark to the number of symbols in the line\n");
604 		link_cfg->watermark = num_symbols_per_line;
605 		return -1;
606 	}
607 
608 	/*
609 	 * Refer to dev_disp.ref for more information.
610 	 * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
611 	 *                      SetRasterBlankStart.X - 7) * link_clk / pclk)
612 	 *                      - 3 * enhanced_framing - Y
613 	 * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
614 	 */
615 	link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
616 			timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
617 			link_rate, timing->pixelclock.typ) -
618 			3 * link_cfg->enhanced_framing -
619 			(12 / link_cfg->lane_count);
620 
621 	if (link_cfg->hblank_sym < 0)
622 		link_cfg->hblank_sym = 0;
623 
624 
625 	/*
626 	 * Refer to dev_disp.ref for more information.
627 	 * # symbols/vblank = ((SetRasterBlankStart.X -
628 	 *                      SetRasterBlankEen.X - 25) * link_clk / pclk)
629 	 *                      - Y - 1;
630 	 * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
631 	 */
632 	link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
633 			* link_rate, timing->pixelclock.typ) - (36 /
634 			link_cfg->lane_count) - 4;
635 
636 	if (link_cfg->vblank_sym < 0)
637 		link_cfg->vblank_sym = 0;
638 
639 	link_cfg->is_valid = 1;
640 #ifdef DEBUG
641 	tegra_dc_dp_dump_link_cfg(dp, link_cfg);
642 #endif
643 
644 	return 0;
645 }
646 
647 static int tegra_dc_dp_init_max_link_cfg(
648 			const struct display_timing *timing,
649 			struct tegra_dp_priv *dp,
650 			struct tegra_dp_link_config *link_cfg)
651 {
652 	const int drive_current = 0x40404040;
653 	const int preemphasis = 0x0f0f0f0f;
654 	const int postcursor = 0;
655 	u8 dpcd_data;
656 	int ret;
657 
658 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
659 	if (ret)
660 		return ret;
661 	link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
662 	link_cfg->tps3_supported = (dpcd_data &
663 			DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
664 
665 	link_cfg->support_enhanced_framing =
666 		(dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
667 		1 : 0;
668 
669 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
670 	if (ret)
671 		return ret;
672 	link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
673 				1 : 0;
674 
675 	ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
676 				    &link_cfg->aux_rd_interval);
677 	if (ret)
678 		return ret;
679 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
680 				    &link_cfg->max_link_bw);
681 	if (ret)
682 		return ret;
683 
684 	/*
685 	 * Set to a high value for link training and attach.
686 	 * Will be re-programmed when dp is enabled.
687 	 */
688 	link_cfg->drive_current = drive_current;
689 	link_cfg->preemphasis = preemphasis;
690 	link_cfg->postcursor = postcursor;
691 
692 	ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
693 	if (ret)
694 		return ret;
695 
696 	link_cfg->alt_scramber_reset_cap =
697 		(dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
698 		1 : 0;
699 	link_cfg->only_enhanced_framing =
700 		(dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
701 		1 : 0;
702 
703 	link_cfg->lane_count = link_cfg->max_lane_count;
704 	link_cfg->link_bw = link_cfg->max_link_bw;
705 	link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
706 	link_cfg->frame_in_ms = (1000 / 60) + 1;
707 
708 	tegra_dc_dp_calc_config(dp, timing, link_cfg);
709 	return 0;
710 }
711 
712 static int tegra_dc_dp_set_assr(struct tegra_dp_priv *dp,
713 				struct tegra_dc_sor_data *sor, int ena)
714 {
715 	int ret;
716 
717 	u8 dpcd_data = ena ?
718 		DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
719 		DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
720 
721 	ret = tegra_dc_dp_dpcd_write(dp, DP_EDP_CONFIGURATION_SET,
722 				     dpcd_data);
723 	if (ret)
724 		return ret;
725 
726 	/* Also reset the scrambler to 0xfffe */
727 	tegra_dc_sor_set_internal_panel(sor, ena);
728 	return 0;
729 }
730 
731 static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
732 				       struct tegra_dc_sor_data *sor,
733 				       u8 link_bw)
734 {
735 	tegra_dc_sor_set_link_bandwidth(sor, link_bw);
736 
737 	/* Sink side */
738 	return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
739 }
740 
741 static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
742 		const struct tegra_dp_link_config *link_cfg,
743 		struct tegra_dc_sor_data *sor)
744 {
745 	u8	dpcd_data;
746 	int	ret;
747 
748 	/* check if panel support enhanched_framing */
749 	dpcd_data = link_cfg->lane_count;
750 	if (link_cfg->enhanced_framing)
751 		dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
752 	ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
753 	if (ret)
754 		return ret;
755 
756 	tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
757 
758 	/* Also power down lanes that will not be used */
759 	return 0;
760 }
761 
762 static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
763 				    const struct tegra_dp_link_config *cfg)
764 {
765 	u32 lane;
766 	u8 mask;
767 	u8 data;
768 	int ret;
769 
770 	for (lane = 0; lane < cfg->lane_count; ++lane) {
771 		ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
772 				DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
773 				&data);
774 		if (ret)
775 			return ret;
776 		mask = (lane & 1) ?
777 			NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
778 			NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
779 			NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
780 			DP_LANE_CR_DONE |
781 			DP_LANE_CHANNEL_EQ_DONE |
782 			DP_LANE_SYMBOL_LOCKED;
783 		if ((data & mask) != mask)
784 			return -1;
785 	}
786 	return 0;
787 }
788 
789 static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
790 				      const struct tegra_dp_link_config *cfg)
791 {
792 	u32 cnt;
793 	u32 n_lanes = cfg->lane_count;
794 	u8 data;
795 	u8 ce_done = 1;
796 	int ret;
797 
798 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
799 		ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
800 		if (ret)
801 			return ret;
802 
803 		if (n_lanes == 1) {
804 			ce_done = (data & (0x1 <<
805 			NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
806 			(data & (0x1 <<
807 			NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
808 			break;
809 		} else if (!(data & (0x1 <<
810 				NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
811 			   !(data & (0x1 <<
812 				NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
813 			   !(data & (0x1 <<
814 				NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
815 			   !(data & (0x1 <<
816 				NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
817 			return -EIO;
818 	}
819 
820 	if (ce_done) {
821 		ret = tegra_dc_dp_dpcd_read(dp,
822 					    DP_LANE_ALIGN_STATUS_UPDATED,
823 					    &data);
824 		if (ret)
825 			return ret;
826 		if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
827 			ce_done = 0;
828 	}
829 
830 	return ce_done ? 0 : -EIO;
831 }
832 
833 static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
834 					 const struct tegra_dp_link_config *cfg)
835 {
836 	u32 cnt;
837 	u32 n_lanes = cfg->lane_count;
838 	u8 data_ptr;
839 	int ret;
840 
841 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
842 		ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
843 					    &data_ptr);
844 		if (ret)
845 			return ret;
846 
847 		if (n_lanes == 1)
848 			return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
849 				1 : 0;
850 		else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
851 			 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
852 			return 0;
853 	}
854 
855 	return 1;
856 }
857 
858 static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
859 			      u32 pc[4], u8 pc_supported,
860 			      const struct tegra_dp_link_config *cfg)
861 {
862 	size_t cnt;
863 	u8 data_ptr;
864 	u32 n_lanes = cfg->lane_count;
865 	int ret;
866 
867 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
868 		ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
869 					    &data_ptr);
870 		if (ret)
871 			return ret;
872 		pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
873 					NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
874 		vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
875 					NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
876 		pe[1 + 2 * cnt] =
877 			(data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
878 					NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
879 		vs[1 + 2 * cnt] =
880 			(data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
881 					NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
882 	}
883 	if (pc_supported) {
884 		ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
885 					    &data_ptr);
886 		if (ret)
887 			return ret;
888 		for (cnt = 0; cnt < n_lanes; cnt++) {
889 			pc[cnt] = (data_ptr >>
890 			NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
891 			NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
892 		}
893 	}
894 
895 	return 0;
896 }
897 
898 static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
899 					bool is_clk_recovery,
900 					const struct tegra_dp_link_config *cfg)
901 {
902 	if (!cfg->aux_rd_interval)
903 		udelay(is_clk_recovery ? 200 : 500);
904 	else
905 		mdelay(cfg->aux_rd_interval * 4);
906 }
907 
908 static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
909 			 const struct tegra_dp_link_config *cfg)
910 {
911 	u8 data = (tp == training_pattern_disabled)
912 		? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
913 		: (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
914 
915 	tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
916 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
917 }
918 
919 static int tegra_dp_link_config(struct tegra_dp_priv *dp,
920 				const struct tegra_dp_link_config *link_cfg)
921 {
922 	u8 dpcd_data;
923 	u32 retry;
924 	int ret;
925 
926 	if (link_cfg->lane_count == 0) {
927 		debug("dp: error: lane count is 0. Can not set link config.\n");
928 		return -ENOLINK;
929 	}
930 
931 	/* Set power state if it is not in normal level */
932 	ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
933 	if (ret)
934 		return ret;
935 
936 	if (dpcd_data == DP_SET_POWER_D3) {
937 		dpcd_data = DP_SET_POWER_D0;
938 
939 		/* DP spec requires 3 retries */
940 		for (retry = 3; retry > 0; --retry) {
941 			ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
942 						     dpcd_data);
943 			if (!ret)
944 				break;
945 			if (retry == 1) {
946 				debug("dp: Failed to set DP panel power\n");
947 				return ret;
948 			}
949 		}
950 	}
951 
952 	/* Enable ASSR if possible */
953 	if (link_cfg->alt_scramber_reset_cap) {
954 		ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
955 		if (ret)
956 			return ret;
957 	}
958 
959 	ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
960 	if (ret) {
961 		debug("dp: Failed to set link bandwidth\n");
962 		return ret;
963 	}
964 	ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
965 	if (ret) {
966 		debug("dp: Failed to set lane count\n");
967 		return ret;
968 	}
969 	tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
970 				    link_cfg);
971 
972 	return 0;
973 }
974 
975 static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
976 				      const struct display_timing *timing,
977 				      struct tegra_dp_link_config *cfg)
978 {
979 	struct tegra_dp_link_config tmp_cfg;
980 	int ret;
981 
982 	tmp_cfg = *cfg;
983 	cfg->is_valid = 0;
984 
985 	ret = _tegra_dp_lower_link_config(dp, cfg);
986 	if (!ret)
987 		ret = tegra_dc_dp_calc_config(dp, timing, cfg);
988 	if (!ret)
989 		ret = tegra_dp_link_config(dp, cfg);
990 	if (ret)
991 		goto fail;
992 
993 	return 0;
994 
995 fail:
996 	*cfg = tmp_cfg;
997 	tegra_dp_link_config(dp, &tmp_cfg);
998 	return ret;
999 }
1000 
1001 static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1002 			      u32 pc[4], const struct tegra_dp_link_config *cfg)
1003 {
1004 	struct tegra_dc_sor_data *sor = dp->sor;
1005 	u32 n_lanes = cfg->lane_count;
1006 	u8 pc_supported = cfg->tps3_supported;
1007 	u32 cnt;
1008 	u32 val;
1009 
1010 	for (cnt = 0; cnt < n_lanes; cnt++) {
1011 		u32 mask = 0;
1012 		u32 pe_reg, vs_reg, pc_reg;
1013 		u32 shift = 0;
1014 
1015 		switch (cnt) {
1016 		case 0:
1017 			mask = PR_LANE2_DP_LANE0_MASK;
1018 			shift = PR_LANE2_DP_LANE0_SHIFT;
1019 			break;
1020 		case 1:
1021 			mask = PR_LANE1_DP_LANE1_MASK;
1022 			shift = PR_LANE1_DP_LANE1_SHIFT;
1023 			break;
1024 		case 2:
1025 			mask = PR_LANE0_DP_LANE2_MASK;
1026 			shift = PR_LANE0_DP_LANE2_SHIFT;
1027 			break;
1028 		case 3:
1029 			mask = PR_LANE3_DP_LANE3_MASK;
1030 			shift = PR_LANE3_DP_LANE3_SHIFT;
1031 			break;
1032 		default:
1033 			debug("dp: incorrect lane cnt\n");
1034 			return -EINVAL;
1035 		}
1036 
1037 		pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1038 		vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1039 		pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1040 
1041 		tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1042 				      vs_reg << shift, pc_reg << shift,
1043 				      pc_supported);
1044 	}
1045 
1046 	tegra_dp_disable_tx_pu(dp->sor);
1047 	udelay(20);
1048 
1049 	for (cnt = 0; cnt < n_lanes; cnt++) {
1050 		u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1051 		u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1052 
1053 		val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1054 			(max_vs_flag ?
1055 			NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1056 			NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1057 			(pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1058 			(max_pe_flag ?
1059 			NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1060 			NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1061 		tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
1062 	}
1063 
1064 	if (pc_supported) {
1065 		for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1066 			u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1067 			u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1068 			val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1069 				(max_pc_flag0 ?
1070 				NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1071 				NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1072 				(pc[cnt + 1] <<
1073 				NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1074 				(max_pc_flag1 ?
1075 				NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1076 				NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1077 			tegra_dc_dp_dpcd_write(dp,
1078 					       NV_DPCD_TRAINING_LANE0_1_SET2 +
1079 					       cnt, val);
1080 		}
1081 	}
1082 
1083 	return 0;
1084 }
1085 
1086 static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
1087 				u32 vs[4], u32 pc[4], u8 pc_supported,
1088 				u32 n_lanes,
1089 				const struct tegra_dp_link_config *cfg)
1090 {
1091 	u32 retry_cnt;
1092 
1093 	for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1094 		int ret;
1095 
1096 		if (retry_cnt) {
1097 			ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
1098 						 cfg);
1099 			if (ret)
1100 				return ret;
1101 			tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1102 		}
1103 
1104 		tegra_dp_wait_aux_training(dp, false, cfg);
1105 
1106 		if (!tegra_dp_clock_recovery_status(dp, cfg)) {
1107 			debug("dp: CR failed in channel EQ sequence!\n");
1108 			break;
1109 		}
1110 
1111 		if (!tegra_dp_channel_eq_status(dp, cfg))
1112 			return 0;
1113 	}
1114 
1115 	return -EIO;
1116 }
1117 
1118 static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1119 			       u32 pc[4],
1120 			       const struct tegra_dp_link_config *cfg)
1121 {
1122 	u32 n_lanes = cfg->lane_count;
1123 	u8 pc_supported = cfg->tps3_supported;
1124 	int ret;
1125 	u32 tp_src = training_pattern_2;
1126 
1127 	if (pc_supported)
1128 		tp_src = training_pattern_3;
1129 
1130 	tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
1131 
1132 	ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
1133 
1134 	tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1135 
1136 	return ret;
1137 }
1138 
1139 static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1140 				  u32 vs[4], u32 pc[4], u8 pc_supported,
1141 				  u32 n_lanes,
1142 				  const struct tegra_dp_link_config *cfg)
1143 {
1144 	u32 vs_temp[4];
1145 	u32 retry_cnt = 0;
1146 
1147 	do {
1148 		tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1149 		tegra_dp_wait_aux_training(dp, true, cfg);
1150 
1151 		if (tegra_dp_clock_recovery_status(dp, cfg))
1152 			return 0;
1153 
1154 		memcpy(vs_temp, vs, sizeof(vs_temp));
1155 		tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
1156 
1157 		if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1158 			retry_cnt = 0;
1159 		else
1160 			++retry_cnt;
1161 	} while (retry_cnt < 5);
1162 
1163 	return -EIO;
1164 }
1165 
1166 static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1167 				 u32 vs[4], u32 pc[4],
1168 				 const struct tegra_dp_link_config *cfg)
1169 {
1170 	u32 n_lanes = cfg->lane_count;
1171 	u8 pc_supported = cfg->tps3_supported;
1172 	int err;
1173 
1174 	tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
1175 
1176 	err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
1177 				     cfg);
1178 	if (err < 0)
1179 		tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1180 
1181 	return err;
1182 }
1183 
1184 static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
1185 					  const struct display_timing *timing,
1186 					  struct tegra_dp_link_config *cfg)
1187 {
1188 	struct tegra_dc_sor_data *sor = dp->sor;
1189 	int err;
1190 	u32 pe[4], vs[4], pc[4];
1191 
1192 	tegra_sor_precharge_lanes(sor, cfg);
1193 
1194 retry_cr:
1195 	memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
1196 	memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
1197 	memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
1198 
1199 	err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
1200 	if (err) {
1201 		if (!tegra_dp_lower_link_config(dp, timing, cfg))
1202 			goto retry_cr;
1203 
1204 		debug("dp: clk recovery failed\n");
1205 		goto fail;
1206 	}
1207 
1208 	err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
1209 	if (err) {
1210 		if (!tegra_dp_lower_link_config(dp, timing, cfg))
1211 			goto retry_cr;
1212 
1213 		debug("dp: channel equalization failed\n");
1214 		goto fail;
1215 	}
1216 #ifdef DEBUG
1217 	tegra_dc_dp_dump_link_cfg(dp, cfg);
1218 #endif
1219 	return 0;
1220 
1221 fail:
1222 	return err;
1223 }
1224 
1225 /*
1226  * All link training functions are ported from kernel dc driver.
1227  * See more details at drivers/video/tegra/dc/dp.c
1228  */
1229 static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
1230 		const struct tegra_dp_link_config *link_cfg,
1231 		struct tegra_dc_sor_data *sor)
1232 {
1233 	u8	link_bw;
1234 	u8	lane_count;
1235 	u16	data16;
1236 	u32	data32;
1237 	u32	size;
1238 	u32	status;
1239 	int	j;
1240 	u32	mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1241 
1242 	tegra_dc_sor_set_lane_parm(sor, link_cfg);
1243 	tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
1244 			       DP_SET_ANSI_8B10B);
1245 
1246 	/* Send TP1 */
1247 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1248 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1249 			       DP_TRAINING_PATTERN_1);
1250 
1251 	for (j = 0; j < link_cfg->lane_count; ++j)
1252 		tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1253 	udelay(520);
1254 
1255 	size = sizeof(data16);
1256 	tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1257 			    DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1258 	status = mask & 0x1111;
1259 	if ((data16 & status) != status) {
1260 		debug("dp: Link training error for TP1 (%#x, status %#x)\n",
1261 		      data16, status);
1262 		return -EFAULT;
1263 	}
1264 
1265 	/* enable ASSR */
1266 	tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
1267 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1268 
1269 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1270 			       link_cfg->link_bw == 20 ? 0x23 : 0x22);
1271 	for (j = 0; j < link_cfg->lane_count; ++j)
1272 		tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1273 	udelay(520);
1274 
1275 	size = sizeof(data32);
1276 	tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
1277 			    (u8 *)&data32, &size, &status);
1278 	if ((data32 & mask) != (0x7777 & mask)) {
1279 		debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
1280 		return -EFAULT;
1281 	}
1282 
1283 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1284 				    link_cfg);
1285 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
1286 
1287 	if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1288 		tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1289 		debug("Fast link training failed, link bw %d, lane # %d\n",
1290 		      link_bw, lane_count);
1291 		return -EFAULT;
1292 	}
1293 
1294 	debug("Fast link training succeeded, link bw %d, lane %d\n",
1295 	      link_cfg->link_bw, link_cfg->lane_count);
1296 
1297 	return 0;
1298 }
1299 
1300 static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
1301 		struct tegra_dp_link_config *link_cfg,
1302 		const struct display_timing *timing,
1303 		struct tegra_dc_sor_data *sor)
1304 {
1305 	u8	link_bw;
1306 	u8	lane_count;
1307 	int	ret;
1308 
1309 	if (DO_FAST_LINK_TRAINING) {
1310 		ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
1311 		if (ret) {
1312 			debug("dp: fast link training failed\n");
1313 		} else {
1314 			/*
1315 			* set to a known-good drive setting if fast link
1316 			* succeeded. Ignore any error.
1317 			*/
1318 			ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
1319 			if (ret)
1320 				debug("Failed to set voltage swing\n");
1321 		}
1322 	} else {
1323 		ret = -ENOSYS;
1324 	}
1325 	if (ret) {
1326 		/* Try full link training then */
1327 		ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
1328 		if (ret) {
1329 			debug("dp: full link training failed\n");
1330 			return ret;
1331 		}
1332 	}
1333 
1334 	/* Everything is good; double check the link config */
1335 	tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1336 
1337 	if ((link_cfg->link_bw == link_bw) &&
1338 	    (link_cfg->lane_count == lane_count))
1339 		return 0;
1340 	else
1341 		return -EFAULT;
1342 }
1343 
1344 static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
1345 			struct tegra_dp_link_config *link_cfg,
1346 			struct tegra_dc_sor_data *sor,
1347 			const struct display_timing *timing)
1348 {
1349 	struct tegra_dp_link_config temp_cfg;
1350 
1351 	if (!timing->pixelclock.typ || !timing->hactive.typ ||
1352 	    !timing->vactive.typ) {
1353 		debug("dp: error mode configuration");
1354 		return -EINVAL;
1355 	}
1356 	if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1357 		debug("dp: error link configuration");
1358 		return -EINVAL;
1359 	}
1360 
1361 	link_cfg->is_valid = 0;
1362 
1363 	memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1364 
1365 	temp_cfg.link_bw = temp_cfg.max_link_bw;
1366 	temp_cfg.lane_count = temp_cfg.max_lane_count;
1367 
1368 	/*
1369 	 * set to max link config
1370 	 */
1371 	if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
1372 	    (!tegra_dp_link_config(dp, &temp_cfg)) &&
1373 		(!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
1374 		/* the max link cfg is doable */
1375 		memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1376 
1377 	return link_cfg->is_valid ? 0 : -EFAULT;
1378 }
1379 
1380 static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
1381 {
1382 	const int vdd_to_hpd_delay_ms = 200;
1383 	u32 val;
1384 	ulong start;
1385 
1386 	start = get_timer(0);
1387 	do {
1388 		val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1389 		if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1390 			return 0;
1391 		udelay(100);
1392 	} while (get_timer(start) < vdd_to_hpd_delay_ms);
1393 
1394 	return -EIO;
1395 }
1396 
1397 static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
1398 {
1399 	u8 dpcd_data;
1400 	int out_of_sync;
1401 	int ret;
1402 
1403 	debug("%s: delay=%d\n", __func__, delay_ms);
1404 	mdelay(delay_ms);
1405 	ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
1406 	if (ret)
1407 		return ret;
1408 
1409 	out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
1410 	if (out_of_sync)
1411 		debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
1412 	else
1413 		debug("SINK is in synchronization\n");
1414 
1415 	return out_of_sync;
1416 }
1417 
1418 static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
1419 				  struct tegra_dp_link_config *link_cfg,
1420 				  const struct display_timing *timing)
1421 {
1422 	const int max_retry = 5;
1423 	int delay_frame;
1424 	int retries;
1425 
1426 	/*
1427 	 * DP TCON may skip some main stream frames, thus we need to wait
1428 	 * some delay before reading the DPCD SINK STATUS register, starting
1429 	 * from 5
1430 	 */
1431 	delay_frame = 5;
1432 
1433 	retries = max_retry;
1434 	do {
1435 		int ret;
1436 
1437 		if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
1438 						  delay_frame))
1439 			return 0;
1440 
1441 		debug("%s: retries left %d\n", __func__, retries);
1442 		if (!retries--) {
1443 			printf("DP: Out of sync after %d retries\n", max_retry);
1444 			return -EIO;
1445 		}
1446 		ret = tegra_dc_sor_detach(dp->sor);
1447 		if (ret)
1448 			return ret;
1449 		if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
1450 						 timing)) {
1451 			debug("dp: %s: error to configure link\n", __func__);
1452 			continue;
1453 		}
1454 
1455 		tegra_dc_sor_set_power_state(dp->sor, 1);
1456 		tegra_dc_sor_attach(dp->sor, link_cfg, timing);
1457 
1458 		/* Increase delay_frame for next try in case the sink is
1459 		   skipping more frames */
1460 		delay_frame += 10;
1461 	} while (1);
1462 }
1463 
1464 int tegra_dp_enable(struct udevice *dev, int panel_bpp,
1465 		    const struct display_timing *timing)
1466 {
1467 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1468 	struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
1469 	struct tegra_dc_sor_data *sor;
1470 	int data;
1471 	int retry;
1472 	int ret;
1473 
1474 	memset(link_cfg, '\0', sizeof(*link_cfg));
1475 	link_cfg->is_valid = 0;
1476 	link_cfg->scramble_ena = 1;
1477 
1478 	tegra_dc_dpaux_enable(priv);
1479 
1480 	if (tegra_dp_hpd_plug(priv) < 0) {
1481 		debug("dp: hpd plug failed\n");
1482 		return -EIO;
1483 	}
1484 
1485 	link_cfg->bits_per_pixel = panel_bpp;
1486 	if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
1487 		debug("dp: failed to init link configuration\n");
1488 		return -ENOLINK;
1489 	}
1490 
1491 	ret = tegra_dc_sor_init(&sor);
1492 	if (ret)
1493 		return ret;
1494 	priv->sor = sor;
1495 	ret = tegra_dc_sor_enable_dp(sor, link_cfg);
1496 	if (ret)
1497 		return ret;
1498 
1499 	tegra_dc_sor_set_panel_power(sor, 1);
1500 
1501 	/* Write power on to DPCD */
1502 	data = DP_SET_POWER_D0;
1503 	retry = 0;
1504 	do {
1505 		ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
1506 	} while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1507 
1508 	if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1509 		debug("dp: failed to power on panel (0x%x)\n", ret);
1510 		return -ENETUNREACH;
1511 		goto error_enable;
1512 	}
1513 
1514 	/* Confirm DP plugging status */
1515 	if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
1516 			DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1517 		debug("dp: could not detect HPD\n");
1518 		return -ENXIO;
1519 	}
1520 
1521 	/* Check DP version */
1522 	if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
1523 		debug("dp: failed to read the revision number from sink\n");
1524 		return -EIO;
1525 	}
1526 
1527 	if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
1528 		debug("dp: error configuring link\n");
1529 		return -ENOMEDIUM;
1530 	}
1531 
1532 	tegra_dc_sor_set_power_state(sor, 1);
1533 	ret = tegra_dc_sor_attach(sor, link_cfg, timing);
1534 	if (ret && ret != -EEXIST)
1535 		return ret;
1536 
1537 	/*
1538 	 * This takes a long time, but can apparently resolve a failure to
1539 	 * bring up the display correctly.
1540 	 */
1541 	if (0) {
1542 		ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
1543 		if (ret)
1544 			return ret;
1545 	}
1546 
1547 	/* Power down the unused lanes to save power - a few hundred mW */
1548 	tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
1549 
1550 	priv->enabled = true;
1551 error_enable:
1552 	return 0;
1553 }
1554 
1555 static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
1556 {
1557 	struct tegra_dp_plat *plat = dev_get_platdata(dev);
1558 
1559 	plat->base = dev_get_addr(dev);
1560 
1561 	return 0;
1562 }
1563 
1564 static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
1565 {
1566 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1567 	const int tegra_edid_i2c_address = 0x50;
1568 	u32 aux_stat = 0;
1569 
1570 	tegra_dc_dpaux_enable(priv);
1571 
1572 	return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
1573 				     buf_size, &aux_stat);
1574 }
1575 
1576 static const struct dm_display_port_ops dp_tegra_ops = {
1577 	.read_edid = tegra_dp_read_edid,
1578 	.enable = tegra_dp_enable,
1579 };
1580 
1581 static int dp_tegra_probe(struct udevice *dev)
1582 {
1583 	struct tegra_dp_plat *plat = dev_get_platdata(dev);
1584 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1585 
1586 	priv->regs = (struct dpaux_ctlr *)plat->base;
1587 	priv->enabled = false;
1588 
1589 	return 0;
1590 }
1591 
1592 static const struct udevice_id tegra_dp_ids[] = {
1593 	{ .compatible = "nvidia,tegra124-dpaux" },
1594 	{ }
1595 };
1596 
1597 U_BOOT_DRIVER(dp_tegra) = {
1598 	.name	= "dpaux_tegra",
1599 	.id	= UCLASS_DISPLAY_PORT,
1600 	.of_match = tegra_dp_ids,
1601 	.ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
1602 	.probe	= dp_tegra_probe,
1603 	.ops	= &dp_tegra_ops,
1604 	.priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
1605 	.platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
1606 };
1607