xref: /openbmc/u-boot/drivers/video/tegra124/dp.c (revision 70b95ded)
1 /*
2  * Copyright (c) 2011-2013, NVIDIA Corporation.
3  * Copyright 2014 Google Inc.
4  *
5  * SPDX-License-Identifier:     GPL-2.0
6  */
7 
8 #include <common.h>
9 #include <display.h>
10 #include <dm.h>
11 #include <div64.h>
12 #include <errno.h>
13 #include <fdtdec.h>
14 #include <video_bridge.h>
15 #include <asm/io.h>
16 #include <asm/arch-tegra/dc.h>
17 #include "display.h"
18 #include "edid.h"
19 #include "sor.h"
20 #include "displayport.h"
21 
22 DECLARE_GLOBAL_DATA_PTR;
23 
24 #define DO_FAST_LINK_TRAINING		1
25 
26 struct tegra_dp_plat {
27 	ulong base;
28 };
29 
30 /**
31  * struct tegra_dp_priv - private displayport driver info
32  *
33  * @dc_dev:	Display controller device that is sending the video feed
34  */
35 struct tegra_dp_priv {
36 	struct udevice *sor;
37 	struct udevice *dc_dev;
38 	struct dpaux_ctlr *regs;
39 	u8 revision;
40 	int enabled;
41 };
42 
43 struct tegra_dp_priv dp_data;
44 
45 static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
46 {
47 	return readl((u32 *)dp->regs + reg);
48 }
49 
50 static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
51 				      u32 val)
52 {
53 	writel(val, (u32 *)dp->regs + reg);
54 }
55 
56 static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
57 					   u32 reg, u32 mask, u32 exp_val,
58 					   u32 poll_interval_us,
59 					   u32 timeout_us)
60 {
61 	u32 reg_val = 0;
62 	u32 temp = timeout_us;
63 
64 	do {
65 		udelay(poll_interval_us);
66 		reg_val = tegra_dpaux_readl(dp, reg);
67 		if (timeout_us > poll_interval_us)
68 			timeout_us -= poll_interval_us;
69 		else
70 			break;
71 	} while ((reg_val & mask) != exp_val);
72 
73 	if ((reg_val & mask) == exp_val)
74 		return 0;	/* success */
75 	debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
76 	      reg, reg_val, mask, exp_val);
77 	return temp;
78 }
79 
80 static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
81 {
82 	/* According to DP spec, each aux transaction needs to finish
83 	   within 40ms. */
84 	if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
85 					 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
86 					 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
87 					 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
88 		debug("dp: DPAUX transaction timeout\n");
89 		return -1;
90 	}
91 	return 0;
92 }
93 
94 static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
95 					  u32 addr, u8 *data, u32 *size,
96 					  u32 *aux_stat)
97 {
98 	int i;
99 	u32 reg_val;
100 	u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
101 	u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
102 	u32 temp_data;
103 
104 	if (*size > DP_AUX_MAX_BYTES)
105 		return -1;	/* only write one chunk of data */
106 
107 	/* Make sure the command is write command */
108 	switch (cmd) {
109 	case DPAUX_DP_AUXCTL_CMD_I2CWR:
110 	case DPAUX_DP_AUXCTL_CMD_MOTWR:
111 	case DPAUX_DP_AUXCTL_CMD_AUXWR:
112 		break;
113 	default:
114 		debug("dp: aux write cmd 0x%x is invalid\n", cmd);
115 		return -EINVAL;
116 	}
117 
118 	tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
119 	for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
120 		memcpy(&temp_data, data, 4);
121 		tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
122 		data += 4;
123 	}
124 
125 	reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
126 	reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
127 	reg_val |= cmd;
128 	reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
129 	reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
130 
131 	while ((timeout_retries > 0) && (defer_retries > 0)) {
132 		if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
133 		    (defer_retries != DP_AUX_DEFER_MAX_TRIES))
134 			udelay(1);
135 
136 		reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
137 		tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
138 
139 		if (tegra_dpaux_wait_transaction(dp))
140 			debug("dp: aux write transaction timeout\n");
141 
142 		*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
143 
144 		if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
145 		    (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
146 		    (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
147 		    (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
148 			if (timeout_retries-- > 0) {
149 				debug("dp: aux write retry (0x%x) -- %d\n",
150 				      *aux_stat, timeout_retries);
151 				/* clear the error bits */
152 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
153 						   *aux_stat);
154 				continue;
155 			} else {
156 				debug("dp: aux write got error (0x%x)\n",
157 				      *aux_stat);
158 				return -ETIMEDOUT;
159 			}
160 		}
161 
162 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
163 		    (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
164 			if (defer_retries-- > 0) {
165 				debug("dp: aux write defer (0x%x) -- %d\n",
166 				      *aux_stat, defer_retries);
167 				/* clear the error bits */
168 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
169 						   *aux_stat);
170 				continue;
171 			} else {
172 				debug("dp: aux write defer exceeds max retries (0x%x)\n",
173 				      *aux_stat);
174 				return -ETIMEDOUT;
175 			}
176 		}
177 
178 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
179 			DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
180 			*size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
181 			return 0;
182 		} else {
183 			debug("dp: aux write failed (0x%x)\n", *aux_stat);
184 			return -EIO;
185 		}
186 	}
187 	/* Should never come to here */
188 	return -EIO;
189 }
190 
191 static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
192 					 u32 addr, u8 *data, u32 *size,
193 					 u32 *aux_stat)
194 {
195 	u32 reg_val;
196 	u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
197 	u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
198 
199 	if (*size > DP_AUX_MAX_BYTES) {
200 		debug("only read one chunk\n");
201 		return -EIO;	/* only read one chunk */
202 	}
203 
204 	/* Check to make sure the command is read command */
205 	switch (cmd) {
206 	case DPAUX_DP_AUXCTL_CMD_I2CRD:
207 	case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
208 	case DPAUX_DP_AUXCTL_CMD_MOTRD:
209 	case DPAUX_DP_AUXCTL_CMD_AUXRD:
210 		break;
211 	default:
212 		debug("dp: aux read cmd 0x%x is invalid\n", cmd);
213 		return -EIO;
214 	}
215 
216 	*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
217 	if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
218 		debug("dp: HPD is not detected\n");
219 		return -EIO;
220 	}
221 
222 	tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
223 
224 	reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
225 	reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
226 	reg_val |= cmd;
227 	reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
228 	reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
229 	while ((timeout_retries > 0) && (defer_retries > 0)) {
230 		if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
231 		    (defer_retries != DP_AUX_DEFER_MAX_TRIES))
232 			udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
233 
234 		reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
235 		tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
236 
237 		if (tegra_dpaux_wait_transaction(dp))
238 			debug("dp: aux read transaction timeout\n");
239 
240 		*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
241 
242 		if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
243 		    (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
244 		    (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
245 		    (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
246 			if (timeout_retries-- > 0) {
247 				debug("dp: aux read retry (0x%x) -- %d\n",
248 				      *aux_stat, timeout_retries);
249 				/* clear the error bits */
250 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
251 						   *aux_stat);
252 				continue;	/* retry */
253 			} else {
254 				debug("dp: aux read got error (0x%x)\n",
255 				      *aux_stat);
256 				return -ETIMEDOUT;
257 			}
258 		}
259 
260 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
261 		    (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
262 			if (defer_retries-- > 0) {
263 				debug("dp: aux read defer (0x%x) -- %d\n",
264 				      *aux_stat, defer_retries);
265 				/* clear the error bits */
266 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
267 						   *aux_stat);
268 				continue;
269 			} else {
270 				debug("dp: aux read defer exceeds max retries (0x%x)\n",
271 				      *aux_stat);
272 				return -ETIMEDOUT;
273 			}
274 		}
275 
276 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
277 			DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
278 			int i;
279 			u32 temp_data[4];
280 
281 			for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
282 				temp_data[i] = tegra_dpaux_readl(dp,
283 						DPAUX_DP_AUXDATA_READ_W(i));
284 
285 			*size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
286 			memcpy(data, temp_data, *size);
287 
288 			return 0;
289 		} else {
290 			debug("dp: aux read failed (0x%x\n", *aux_stat);
291 			return -EIO;
292 		}
293 	}
294 	/* Should never come to here */
295 	debug("%s: can't\n", __func__);
296 
297 	return -EIO;
298 }
299 
300 static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
301 			u8 *data, u32 *size, u32 *aux_stat)
302 {
303 	u32 finished = 0;
304 	u32 cur_size;
305 	int ret = 0;
306 
307 	do {
308 		cur_size = *size - finished;
309 		if (cur_size > DP_AUX_MAX_BYTES)
310 			cur_size = DP_AUX_MAX_BYTES;
311 
312 		ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
313 						data, &cur_size, aux_stat);
314 		if (ret)
315 			break;
316 
317 		/* cur_size should be the real size returned */
318 		addr += cur_size;
319 		data += cur_size;
320 		finished += cur_size;
321 
322 	} while (*size > finished);
323 	*size = finished;
324 
325 	return ret;
326 }
327 
328 static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
329 				 u8 *data_ptr)
330 {
331 	u32 size = 1;
332 	u32 status = 0;
333 	int ret;
334 
335 	ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
336 					cmd, data_ptr, &size, &status);
337 	if (ret) {
338 		debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
339 		      cmd, status);
340 	}
341 
342 	return ret;
343 }
344 
345 static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
346 				u8 data)
347 {
348 	u32 size = 1;
349 	u32 status = 0;
350 	int ret;
351 
352 	ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
353 					cmd, &data, &size, &status);
354 	if (ret) {
355 		debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
356 		      cmd, status);
357 	}
358 
359 	return ret;
360 }
361 
362 static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
363 				 u8 addr, u8 *data, u32 size, u32 *aux_stat)
364 {
365 	u32 finished = 0;
366 	int ret = 0;
367 
368 	do {
369 		u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
370 
371 		u32 len = 1;
372 		ret = tegra_dc_dpaux_write_chunk(
373 				dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
374 				&addr, &len, aux_stat);
375 		if (ret) {
376 			debug("%s: error sending address to read.\n",
377 			      __func__);
378 			return ret;
379 		}
380 
381 		ret = tegra_dc_dpaux_read_chunk(
382 				dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
383 				data, &cur_size, aux_stat);
384 		if (ret) {
385 			debug("%s: error reading data.\n", __func__);
386 			return ret;
387 		}
388 
389 		/* cur_size should be the real size returned */
390 		addr += cur_size;
391 		data += cur_size;
392 		finished += cur_size;
393 	} while (size > finished);
394 
395 	return finished;
396 }
397 
398 static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
399 {
400 	/* clear interrupt */
401 	tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
402 	/* do not enable interrupt for now. Enable them when Isr in place */
403 	tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
404 
405 	tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
406 			   DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
407 			   DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
408 			   0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
409 			   DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
410 
411 	tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
412 			   DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
413 }
414 
415 #ifdef DEBUG
416 static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
417 	const struct tegra_dp_link_config *link_cfg)
418 {
419 	debug("DP config: cfg_name               cfg_value\n");
420 	debug("           Lane Count             %d\n",
421 	      link_cfg->max_lane_count);
422 	debug("           SupportEnhancedFraming %s\n",
423 	      link_cfg->support_enhanced_framing ? "Y" : "N");
424 	debug("           Bandwidth              %d\n",
425 	      link_cfg->max_link_bw);
426 	debug("           bpp                    %d\n",
427 	      link_cfg->bits_per_pixel);
428 	debug("           EnhancedFraming        %s\n",
429 	      link_cfg->enhanced_framing ? "Y" : "N");
430 	debug("           Scramble_enabled       %s\n",
431 	      link_cfg->scramble_ena ? "Y" : "N");
432 	debug("           LinkBW                 %d\n",
433 	      link_cfg->link_bw);
434 	debug("           lane_count             %d\n",
435 	      link_cfg->lane_count);
436 	debug("           activespolarity        %d\n",
437 	      link_cfg->activepolarity);
438 	debug("           active_count           %d\n",
439 	      link_cfg->active_count);
440 	debug("           tu_size                %d\n",
441 	      link_cfg->tu_size);
442 	debug("           active_frac            %d\n",
443 	      link_cfg->active_frac);
444 	debug("           watermark              %d\n",
445 	      link_cfg->watermark);
446 	debug("           hblank_sym             %d\n",
447 	      link_cfg->hblank_sym);
448 	debug("           vblank_sym             %d\n",
449 	      link_cfg->vblank_sym);
450 }
451 #endif
452 
453 static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
454 				       struct tegra_dp_link_config *cfg)
455 {
456 	switch (cfg->link_bw) {
457 	case SOR_LINK_SPEED_G1_62:
458 		if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
459 			cfg->link_bw = SOR_LINK_SPEED_G2_7;
460 		cfg->lane_count /= 2;
461 		break;
462 	case SOR_LINK_SPEED_G2_7:
463 		cfg->link_bw = SOR_LINK_SPEED_G1_62;
464 		break;
465 	case SOR_LINK_SPEED_G5_4:
466 		if (cfg->lane_count == 1) {
467 			cfg->link_bw = SOR_LINK_SPEED_G2_7;
468 			cfg->lane_count = cfg->max_lane_count;
469 		} else {
470 			cfg->lane_count /= 2;
471 		}
472 		break;
473 	default:
474 		debug("dp: Error link rate %d\n", cfg->link_bw);
475 		return -ENOLINK;
476 	}
477 
478 	return (cfg->lane_count > 0) ? 0 : -ENOLINK;
479 }
480 
481 /*
482  * Calcuate if given cfg can meet the mode request.
483  * Return 0 if mode is possible, -1 otherwise
484  */
485 static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
486 				   const struct display_timing *timing,
487 				   struct tegra_dp_link_config *link_cfg)
488 {
489 	const u32	link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
490 	const u64	f	  = 100000;	/* precision factor */
491 	u32	num_linkclk_line; /* Number of link clocks per line */
492 	u64	ratio_f; /* Ratio of incoming to outgoing data rate */
493 	u64	frac_f;
494 	u64	activesym_f;	/* Activesym per TU */
495 	u64	activecount_f;
496 	u32	activecount;
497 	u32	activepolarity;
498 	u64	approx_value_f;
499 	u32	activefrac		  = 0;
500 	u64	accumulated_error_f	  = 0;
501 	u32	lowest_neg_activecount	  = 0;
502 	u32	lowest_neg_activepolarity = 0;
503 	u32	lowest_neg_tusize	  = 64;
504 	u32	num_symbols_per_line;
505 	u64	lowest_neg_activefrac	  = 0;
506 	u64	lowest_neg_error_f	  = 64 * f;
507 	u64	watermark_f;
508 	int	i;
509 	int	neg;
510 
511 	if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
512 	    !link_cfg->bits_per_pixel)
513 		return -1;
514 
515 	if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
516 		(u64)link_rate * 8 * link_cfg->lane_count)
517 		return -1;
518 
519 	num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
520 				       timing->pixelclock.typ));
521 
522 	ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
523 	ratio_f /= 8;
524 	do_div(ratio_f, link_rate * link_cfg->lane_count);
525 
526 	for (i = 64; i >= 32; --i) {
527 		activesym_f	= ratio_f * i;
528 		activecount_f	= lldiv(activesym_f, (u32)f) * f;
529 		frac_f		= activesym_f - activecount_f;
530 		activecount	= (u32)(lldiv(activecount_f, (u32)f));
531 
532 		if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
533 			activepolarity = 0;
534 		else {
535 			activepolarity = 1;
536 			frac_f = f - frac_f;
537 		}
538 
539 		if (frac_f != 0) {
540 			/* warning: frac_f should be 64-bit */
541 			frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
542 			if (frac_f > (15 * f))
543 				activefrac = activepolarity ? 1 : 15;
544 			else
545 				activefrac = activepolarity ?
546 					(u32)lldiv(frac_f, (u32)f) + 1 :
547 					(u32)lldiv(frac_f, (u32)f);
548 		}
549 
550 		if (activefrac == 1)
551 			activepolarity = 0;
552 
553 		if (activepolarity == 1)
554 			approx_value_f = activefrac ? lldiv(
555 				(activecount_f + (activefrac * f - f) * f),
556 				(activefrac * f)) :
557 				activecount_f + f;
558 		else
559 			approx_value_f = activefrac ?
560 				activecount_f + lldiv(f, activefrac) :
561 				activecount_f;
562 
563 		if (activesym_f < approx_value_f) {
564 			accumulated_error_f = num_linkclk_line *
565 				lldiv(approx_value_f - activesym_f, i);
566 			neg = 1;
567 		} else {
568 			accumulated_error_f = num_linkclk_line *
569 				lldiv(activesym_f - approx_value_f, i);
570 			neg = 0;
571 		}
572 
573 		if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
574 		    (accumulated_error_f == 0)) {
575 			lowest_neg_error_f = accumulated_error_f;
576 			lowest_neg_tusize = i;
577 			lowest_neg_activecount = activecount;
578 			lowest_neg_activepolarity = activepolarity;
579 			lowest_neg_activefrac = activefrac;
580 
581 			if (accumulated_error_f == 0)
582 				break;
583 		}
584 	}
585 
586 	if (lowest_neg_activefrac == 0) {
587 		link_cfg->activepolarity = 0;
588 		link_cfg->active_count   = lowest_neg_activepolarity ?
589 			lowest_neg_activecount : lowest_neg_activecount - 1;
590 		link_cfg->tu_size	      = lowest_neg_tusize;
591 		link_cfg->active_frac    = 1;
592 	} else {
593 		link_cfg->activepolarity = lowest_neg_activepolarity;
594 		link_cfg->active_count   = (u32)lowest_neg_activecount;
595 		link_cfg->tu_size	      = lowest_neg_tusize;
596 		link_cfg->active_frac    = (u32)lowest_neg_activefrac;
597 	}
598 
599 	watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
600 	link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
601 		f)) + link_cfg->bits_per_pixel / 4 - 1;
602 	num_symbols_per_line = (timing->hactive.typ *
603 				link_cfg->bits_per_pixel) /
604 			       (8 * link_cfg->lane_count);
605 
606 	if (link_cfg->watermark > 30) {
607 		debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
608 		link_cfg->watermark = 30;
609 		return -1;
610 	} else if (link_cfg->watermark > num_symbols_per_line) {
611 		debug("dp: sor setting: force watermark to the number of symbols in the line\n");
612 		link_cfg->watermark = num_symbols_per_line;
613 		return -1;
614 	}
615 
616 	/*
617 	 * Refer to dev_disp.ref for more information.
618 	 * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
619 	 *                      SetRasterBlankStart.X - 7) * link_clk / pclk)
620 	 *                      - 3 * enhanced_framing - Y
621 	 * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
622 	 */
623 	link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
624 			timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
625 			link_rate, timing->pixelclock.typ) -
626 			3 * link_cfg->enhanced_framing -
627 			(12 / link_cfg->lane_count);
628 
629 	if (link_cfg->hblank_sym < 0)
630 		link_cfg->hblank_sym = 0;
631 
632 
633 	/*
634 	 * Refer to dev_disp.ref for more information.
635 	 * # symbols/vblank = ((SetRasterBlankStart.X -
636 	 *                      SetRasterBlankEen.X - 25) * link_clk / pclk)
637 	 *                      - Y - 1;
638 	 * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
639 	 */
640 	link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
641 			* link_rate, timing->pixelclock.typ) - (36 /
642 			link_cfg->lane_count) - 4;
643 
644 	if (link_cfg->vblank_sym < 0)
645 		link_cfg->vblank_sym = 0;
646 
647 	link_cfg->is_valid = 1;
648 #ifdef DEBUG
649 	tegra_dc_dp_dump_link_cfg(dp, link_cfg);
650 #endif
651 
652 	return 0;
653 }
654 
655 static int tegra_dc_dp_init_max_link_cfg(
656 			const struct display_timing *timing,
657 			struct tegra_dp_priv *dp,
658 			struct tegra_dp_link_config *link_cfg)
659 {
660 	const int drive_current = 0x40404040;
661 	const int preemphasis = 0x0f0f0f0f;
662 	const int postcursor = 0;
663 	u8 dpcd_data;
664 	int ret;
665 
666 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
667 	if (ret)
668 		return ret;
669 	link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
670 	link_cfg->tps3_supported = (dpcd_data &
671 			DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
672 
673 	link_cfg->support_enhanced_framing =
674 		(dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
675 		1 : 0;
676 
677 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
678 	if (ret)
679 		return ret;
680 	link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
681 				1 : 0;
682 
683 	ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
684 				    &link_cfg->aux_rd_interval);
685 	if (ret)
686 		return ret;
687 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
688 				    &link_cfg->max_link_bw);
689 	if (ret)
690 		return ret;
691 
692 	/*
693 	 * Set to a high value for link training and attach.
694 	 * Will be re-programmed when dp is enabled.
695 	 */
696 	link_cfg->drive_current = drive_current;
697 	link_cfg->preemphasis = preemphasis;
698 	link_cfg->postcursor = postcursor;
699 
700 	ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
701 	if (ret)
702 		return ret;
703 
704 	link_cfg->alt_scramber_reset_cap =
705 		(dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
706 		1 : 0;
707 	link_cfg->only_enhanced_framing =
708 		(dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
709 		1 : 0;
710 
711 	link_cfg->lane_count = link_cfg->max_lane_count;
712 	link_cfg->link_bw = link_cfg->max_link_bw;
713 	link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
714 	link_cfg->frame_in_ms = (1000 / 60) + 1;
715 
716 	tegra_dc_dp_calc_config(dp, timing, link_cfg);
717 	return 0;
718 }
719 
720 static int tegra_dc_dp_set_assr(struct tegra_dp_priv *priv,
721 				struct udevice *sor, int ena)
722 {
723 	int ret;
724 
725 	u8 dpcd_data = ena ?
726 		DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
727 		DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
728 
729 	ret = tegra_dc_dp_dpcd_write(priv, DP_EDP_CONFIGURATION_SET,
730 				     dpcd_data);
731 	if (ret)
732 		return ret;
733 
734 	/* Also reset the scrambler to 0xfffe */
735 	tegra_dc_sor_set_internal_panel(sor, ena);
736 	return 0;
737 }
738 
739 static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
740 				       struct udevice *sor,
741 				       u8 link_bw)
742 {
743 	tegra_dc_sor_set_link_bandwidth(sor, link_bw);
744 
745 	/* Sink side */
746 	return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
747 }
748 
749 static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
750 		const struct tegra_dp_link_config *link_cfg,
751 		struct udevice *sor)
752 {
753 	u8	dpcd_data;
754 	int	ret;
755 
756 	/* check if panel support enhanched_framing */
757 	dpcd_data = link_cfg->lane_count;
758 	if (link_cfg->enhanced_framing)
759 		dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
760 	ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
761 	if (ret)
762 		return ret;
763 
764 	tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
765 
766 	/* Also power down lanes that will not be used */
767 	return 0;
768 }
769 
770 static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
771 				    const struct tegra_dp_link_config *cfg)
772 {
773 	u32 lane;
774 	u8 mask;
775 	u8 data;
776 	int ret;
777 
778 	for (lane = 0; lane < cfg->lane_count; ++lane) {
779 		ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
780 				DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
781 				&data);
782 		if (ret)
783 			return ret;
784 		mask = (lane & 1) ?
785 			NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
786 			NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
787 			NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
788 			DP_LANE_CR_DONE |
789 			DP_LANE_CHANNEL_EQ_DONE |
790 			DP_LANE_SYMBOL_LOCKED;
791 		if ((data & mask) != mask)
792 			return -1;
793 	}
794 	return 0;
795 }
796 
797 static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
798 				      const struct tegra_dp_link_config *cfg)
799 {
800 	u32 cnt;
801 	u32 n_lanes = cfg->lane_count;
802 	u8 data;
803 	u8 ce_done = 1;
804 	int ret;
805 
806 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
807 		ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
808 		if (ret)
809 			return ret;
810 
811 		if (n_lanes == 1) {
812 			ce_done = (data & (0x1 <<
813 			NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
814 			(data & (0x1 <<
815 			NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
816 			break;
817 		} else if (!(data & (0x1 <<
818 				NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
819 			   !(data & (0x1 <<
820 				NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
821 			   !(data & (0x1 <<
822 				NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
823 			   !(data & (0x1 <<
824 				NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
825 			return -EIO;
826 	}
827 
828 	if (ce_done) {
829 		ret = tegra_dc_dp_dpcd_read(dp,
830 					    DP_LANE_ALIGN_STATUS_UPDATED,
831 					    &data);
832 		if (ret)
833 			return ret;
834 		if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
835 			ce_done = 0;
836 	}
837 
838 	return ce_done ? 0 : -EIO;
839 }
840 
841 static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
842 					 const struct tegra_dp_link_config *cfg)
843 {
844 	u32 cnt;
845 	u32 n_lanes = cfg->lane_count;
846 	u8 data_ptr;
847 	int ret;
848 
849 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
850 		ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
851 					    &data_ptr);
852 		if (ret)
853 			return ret;
854 
855 		if (n_lanes == 1)
856 			return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
857 				1 : 0;
858 		else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
859 			 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
860 			return 0;
861 	}
862 
863 	return 1;
864 }
865 
866 static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
867 			      u32 pc[4], u8 pc_supported,
868 			      const struct tegra_dp_link_config *cfg)
869 {
870 	size_t cnt;
871 	u8 data_ptr;
872 	u32 n_lanes = cfg->lane_count;
873 	int ret;
874 
875 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
876 		ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
877 					    &data_ptr);
878 		if (ret)
879 			return ret;
880 		pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
881 					NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
882 		vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
883 					NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
884 		pe[1 + 2 * cnt] =
885 			(data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
886 					NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
887 		vs[1 + 2 * cnt] =
888 			(data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
889 					NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
890 	}
891 	if (pc_supported) {
892 		ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
893 					    &data_ptr);
894 		if (ret)
895 			return ret;
896 		for (cnt = 0; cnt < n_lanes; cnt++) {
897 			pc[cnt] = (data_ptr >>
898 			NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
899 			NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
900 		}
901 	}
902 
903 	return 0;
904 }
905 
906 static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
907 					bool is_clk_recovery,
908 					const struct tegra_dp_link_config *cfg)
909 {
910 	if (!cfg->aux_rd_interval)
911 		udelay(is_clk_recovery ? 200 : 500);
912 	else
913 		mdelay(cfg->aux_rd_interval * 4);
914 }
915 
916 static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
917 			 const struct tegra_dp_link_config *cfg)
918 {
919 	u8 data = (tp == training_pattern_disabled)
920 		? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
921 		: (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
922 
923 	tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
924 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
925 }
926 
927 static int tegra_dp_link_config(struct tegra_dp_priv *dp,
928 				const struct tegra_dp_link_config *link_cfg)
929 {
930 	u8 dpcd_data;
931 	u32 retry;
932 	int ret;
933 
934 	if (link_cfg->lane_count == 0) {
935 		debug("dp: error: lane count is 0. Can not set link config.\n");
936 		return -ENOLINK;
937 	}
938 
939 	/* Set power state if it is not in normal level */
940 	ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
941 	if (ret)
942 		return ret;
943 
944 	if (dpcd_data == DP_SET_POWER_D3) {
945 		dpcd_data = DP_SET_POWER_D0;
946 
947 		/* DP spec requires 3 retries */
948 		for (retry = 3; retry > 0; --retry) {
949 			ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
950 						     dpcd_data);
951 			if (!ret)
952 				break;
953 			if (retry == 1) {
954 				debug("dp: Failed to set DP panel power\n");
955 				return ret;
956 			}
957 		}
958 	}
959 
960 	/* Enable ASSR if possible */
961 	if (link_cfg->alt_scramber_reset_cap) {
962 		ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
963 		if (ret)
964 			return ret;
965 	}
966 
967 	ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
968 	if (ret) {
969 		debug("dp: Failed to set link bandwidth\n");
970 		return ret;
971 	}
972 	ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
973 	if (ret) {
974 		debug("dp: Failed to set lane count\n");
975 		return ret;
976 	}
977 	tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
978 				    link_cfg);
979 
980 	return 0;
981 }
982 
983 static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
984 				      const struct display_timing *timing,
985 				      struct tegra_dp_link_config *cfg)
986 {
987 	struct tegra_dp_link_config tmp_cfg;
988 	int ret;
989 
990 	tmp_cfg = *cfg;
991 	cfg->is_valid = 0;
992 
993 	ret = _tegra_dp_lower_link_config(dp, cfg);
994 	if (!ret)
995 		ret = tegra_dc_dp_calc_config(dp, timing, cfg);
996 	if (!ret)
997 		ret = tegra_dp_link_config(dp, cfg);
998 	if (ret)
999 		goto fail;
1000 
1001 	return 0;
1002 
1003 fail:
1004 	*cfg = tmp_cfg;
1005 	tegra_dp_link_config(dp, &tmp_cfg);
1006 	return ret;
1007 }
1008 
1009 static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1010 			      u32 pc[4], const struct tegra_dp_link_config *cfg)
1011 {
1012 	struct udevice *sor = dp->sor;
1013 	u32 n_lanes = cfg->lane_count;
1014 	u8 pc_supported = cfg->tps3_supported;
1015 	u32 cnt;
1016 	u32 val;
1017 
1018 	for (cnt = 0; cnt < n_lanes; cnt++) {
1019 		u32 mask = 0;
1020 		u32 pe_reg, vs_reg, pc_reg;
1021 		u32 shift = 0;
1022 
1023 		switch (cnt) {
1024 		case 0:
1025 			mask = PR_LANE2_DP_LANE0_MASK;
1026 			shift = PR_LANE2_DP_LANE0_SHIFT;
1027 			break;
1028 		case 1:
1029 			mask = PR_LANE1_DP_LANE1_MASK;
1030 			shift = PR_LANE1_DP_LANE1_SHIFT;
1031 			break;
1032 		case 2:
1033 			mask = PR_LANE0_DP_LANE2_MASK;
1034 			shift = PR_LANE0_DP_LANE2_SHIFT;
1035 			break;
1036 		case 3:
1037 			mask = PR_LANE3_DP_LANE3_MASK;
1038 			shift = PR_LANE3_DP_LANE3_SHIFT;
1039 			break;
1040 		default:
1041 			debug("dp: incorrect lane cnt\n");
1042 			return -EINVAL;
1043 		}
1044 
1045 		pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1046 		vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1047 		pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1048 
1049 		tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1050 				      vs_reg << shift, pc_reg << shift,
1051 				      pc_supported);
1052 	}
1053 
1054 	tegra_dp_disable_tx_pu(dp->sor);
1055 	udelay(20);
1056 
1057 	for (cnt = 0; cnt < n_lanes; cnt++) {
1058 		u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1059 		u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1060 
1061 		val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1062 			(max_vs_flag ?
1063 			NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1064 			NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1065 			(pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1066 			(max_pe_flag ?
1067 			NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1068 			NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1069 		tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
1070 	}
1071 
1072 	if (pc_supported) {
1073 		for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1074 			u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1075 			u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1076 			val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1077 				(max_pc_flag0 ?
1078 				NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1079 				NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1080 				(pc[cnt + 1] <<
1081 				NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1082 				(max_pc_flag1 ?
1083 				NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1084 				NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1085 			tegra_dc_dp_dpcd_write(dp,
1086 					       NV_DPCD_TRAINING_LANE0_1_SET2 +
1087 					       cnt, val);
1088 		}
1089 	}
1090 
1091 	return 0;
1092 }
1093 
1094 static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
1095 				u32 vs[4], u32 pc[4], u8 pc_supported,
1096 				u32 n_lanes,
1097 				const struct tegra_dp_link_config *cfg)
1098 {
1099 	u32 retry_cnt;
1100 
1101 	for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1102 		int ret;
1103 
1104 		if (retry_cnt) {
1105 			ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
1106 						 cfg);
1107 			if (ret)
1108 				return ret;
1109 			tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1110 		}
1111 
1112 		tegra_dp_wait_aux_training(dp, false, cfg);
1113 
1114 		if (!tegra_dp_clock_recovery_status(dp, cfg)) {
1115 			debug("dp: CR failed in channel EQ sequence!\n");
1116 			break;
1117 		}
1118 
1119 		if (!tegra_dp_channel_eq_status(dp, cfg))
1120 			return 0;
1121 	}
1122 
1123 	return -EIO;
1124 }
1125 
1126 static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1127 			       u32 pc[4],
1128 			       const struct tegra_dp_link_config *cfg)
1129 {
1130 	u32 n_lanes = cfg->lane_count;
1131 	u8 pc_supported = cfg->tps3_supported;
1132 	int ret;
1133 	u32 tp_src = training_pattern_2;
1134 
1135 	if (pc_supported)
1136 		tp_src = training_pattern_3;
1137 
1138 	tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
1139 
1140 	ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
1141 
1142 	tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1143 
1144 	return ret;
1145 }
1146 
1147 static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1148 				  u32 vs[4], u32 pc[4], u8 pc_supported,
1149 				  u32 n_lanes,
1150 				  const struct tegra_dp_link_config *cfg)
1151 {
1152 	u32 vs_temp[4];
1153 	u32 retry_cnt = 0;
1154 
1155 	do {
1156 		tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1157 		tegra_dp_wait_aux_training(dp, true, cfg);
1158 
1159 		if (tegra_dp_clock_recovery_status(dp, cfg))
1160 			return 0;
1161 
1162 		memcpy(vs_temp, vs, sizeof(vs_temp));
1163 		tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
1164 
1165 		if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1166 			retry_cnt = 0;
1167 		else
1168 			++retry_cnt;
1169 	} while (retry_cnt < 5);
1170 
1171 	return -EIO;
1172 }
1173 
1174 static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1175 				 u32 vs[4], u32 pc[4],
1176 				 const struct tegra_dp_link_config *cfg)
1177 {
1178 	u32 n_lanes = cfg->lane_count;
1179 	u8 pc_supported = cfg->tps3_supported;
1180 	int err;
1181 
1182 	tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
1183 
1184 	err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
1185 				     cfg);
1186 	if (err < 0)
1187 		tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1188 
1189 	return err;
1190 }
1191 
1192 static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
1193 					  const struct display_timing *timing,
1194 					  struct tegra_dp_link_config *cfg)
1195 {
1196 	struct udevice *sor = dp->sor;
1197 	int err;
1198 	u32 pe[4], vs[4], pc[4];
1199 
1200 	tegra_sor_precharge_lanes(sor, cfg);
1201 
1202 retry_cr:
1203 	memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
1204 	memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
1205 	memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
1206 
1207 	err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
1208 	if (err) {
1209 		if (!tegra_dp_lower_link_config(dp, timing, cfg))
1210 			goto retry_cr;
1211 
1212 		debug("dp: clk recovery failed\n");
1213 		goto fail;
1214 	}
1215 
1216 	err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
1217 	if (err) {
1218 		if (!tegra_dp_lower_link_config(dp, timing, cfg))
1219 			goto retry_cr;
1220 
1221 		debug("dp: channel equalization failed\n");
1222 		goto fail;
1223 	}
1224 #ifdef DEBUG
1225 	tegra_dc_dp_dump_link_cfg(dp, cfg);
1226 #endif
1227 	return 0;
1228 
1229 fail:
1230 	return err;
1231 }
1232 
1233 /*
1234  * All link training functions are ported from kernel dc driver.
1235  * See more details at drivers/video/tegra/dc/dp.c
1236  */
1237 static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
1238 		const struct tegra_dp_link_config *link_cfg,
1239 		struct udevice *sor)
1240 {
1241 	u8	link_bw;
1242 	u8	lane_count;
1243 	u16	data16;
1244 	u32	data32;
1245 	u32	size;
1246 	u32	status;
1247 	int	j;
1248 	u32	mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1249 
1250 	tegra_dc_sor_set_lane_parm(sor, link_cfg);
1251 	tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
1252 			       DP_SET_ANSI_8B10B);
1253 
1254 	/* Send TP1 */
1255 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1256 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1257 			       DP_TRAINING_PATTERN_1);
1258 
1259 	for (j = 0; j < link_cfg->lane_count; ++j)
1260 		tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1261 	udelay(520);
1262 
1263 	size = sizeof(data16);
1264 	tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1265 			    DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1266 	status = mask & 0x1111;
1267 	if ((data16 & status) != status) {
1268 		debug("dp: Link training error for TP1 (%#x, status %#x)\n",
1269 		      data16, status);
1270 		return -EFAULT;
1271 	}
1272 
1273 	/* enable ASSR */
1274 	tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
1275 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1276 
1277 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1278 			       link_cfg->link_bw == 20 ? 0x23 : 0x22);
1279 	for (j = 0; j < link_cfg->lane_count; ++j)
1280 		tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1281 	udelay(520);
1282 
1283 	size = sizeof(data32);
1284 	tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
1285 			    (u8 *)&data32, &size, &status);
1286 	if ((data32 & mask) != (0x7777 & mask)) {
1287 		debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
1288 		return -EFAULT;
1289 	}
1290 
1291 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1292 				    link_cfg);
1293 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
1294 
1295 	if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1296 		tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1297 		debug("Fast link training failed, link bw %d, lane # %d\n",
1298 		      link_bw, lane_count);
1299 		return -EFAULT;
1300 	}
1301 
1302 	debug("Fast link training succeeded, link bw %d, lane %d\n",
1303 	      link_cfg->link_bw, link_cfg->lane_count);
1304 
1305 	return 0;
1306 }
1307 
1308 static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
1309 		struct tegra_dp_link_config *link_cfg,
1310 		const struct display_timing *timing,
1311 		struct udevice *sor)
1312 {
1313 	u8	link_bw;
1314 	u8	lane_count;
1315 	int	ret;
1316 
1317 	if (DO_FAST_LINK_TRAINING) {
1318 		ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
1319 		if (ret) {
1320 			debug("dp: fast link training failed\n");
1321 		} else {
1322 			/*
1323 			* set to a known-good drive setting if fast link
1324 			* succeeded. Ignore any error.
1325 			*/
1326 			ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
1327 			if (ret)
1328 				debug("Failed to set voltage swing\n");
1329 		}
1330 	} else {
1331 		ret = -ENOSYS;
1332 	}
1333 	if (ret) {
1334 		/* Try full link training then */
1335 		ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
1336 		if (ret) {
1337 			debug("dp: full link training failed\n");
1338 			return ret;
1339 		}
1340 	}
1341 
1342 	/* Everything is good; double check the link config */
1343 	tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1344 
1345 	if ((link_cfg->link_bw == link_bw) &&
1346 	    (link_cfg->lane_count == lane_count))
1347 		return 0;
1348 	else
1349 		return -EFAULT;
1350 }
1351 
1352 static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
1353 			struct tegra_dp_link_config *link_cfg,
1354 			struct udevice *sor,
1355 			const struct display_timing *timing)
1356 {
1357 	struct tegra_dp_link_config temp_cfg;
1358 
1359 	if (!timing->pixelclock.typ || !timing->hactive.typ ||
1360 	    !timing->vactive.typ) {
1361 		debug("dp: error mode configuration");
1362 		return -EINVAL;
1363 	}
1364 	if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1365 		debug("dp: error link configuration");
1366 		return -EINVAL;
1367 	}
1368 
1369 	link_cfg->is_valid = 0;
1370 
1371 	memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1372 
1373 	temp_cfg.link_bw = temp_cfg.max_link_bw;
1374 	temp_cfg.lane_count = temp_cfg.max_lane_count;
1375 
1376 	/*
1377 	 * set to max link config
1378 	 */
1379 	if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
1380 	    (!tegra_dp_link_config(dp, &temp_cfg)) &&
1381 		(!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
1382 		/* the max link cfg is doable */
1383 		memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1384 
1385 	return link_cfg->is_valid ? 0 : -EFAULT;
1386 }
1387 
1388 static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
1389 {
1390 	const int vdd_to_hpd_delay_ms = 200;
1391 	u32 val;
1392 	ulong start;
1393 
1394 	start = get_timer(0);
1395 	do {
1396 		val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1397 		if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1398 			return 0;
1399 		udelay(100);
1400 	} while (get_timer(start) < vdd_to_hpd_delay_ms);
1401 
1402 	return -EIO;
1403 }
1404 
1405 static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
1406 {
1407 	u8 dpcd_data;
1408 	int out_of_sync;
1409 	int ret;
1410 
1411 	debug("%s: delay=%d\n", __func__, delay_ms);
1412 	mdelay(delay_ms);
1413 	ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
1414 	if (ret)
1415 		return ret;
1416 
1417 	out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
1418 	if (out_of_sync)
1419 		debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
1420 	else
1421 		debug("SINK is in synchronization\n");
1422 
1423 	return out_of_sync;
1424 }
1425 
1426 static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
1427 				  struct tegra_dp_link_config *link_cfg,
1428 				  const struct display_timing *timing)
1429 {
1430 	const int max_retry = 5;
1431 	int delay_frame;
1432 	int retries;
1433 
1434 	/*
1435 	 * DP TCON may skip some main stream frames, thus we need to wait
1436 	 * some delay before reading the DPCD SINK STATUS register, starting
1437 	 * from 5
1438 	 */
1439 	delay_frame = 5;
1440 
1441 	retries = max_retry;
1442 	do {
1443 		int ret;
1444 
1445 		if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
1446 						  delay_frame))
1447 			return 0;
1448 
1449 		debug("%s: retries left %d\n", __func__, retries);
1450 		if (!retries--) {
1451 			printf("DP: Out of sync after %d retries\n", max_retry);
1452 			return -EIO;
1453 		}
1454 		ret = tegra_dc_sor_detach(dp->dc_dev, dp->sor);
1455 		if (ret)
1456 			return ret;
1457 		if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
1458 						 timing)) {
1459 			debug("dp: %s: error to configure link\n", __func__);
1460 			continue;
1461 		}
1462 
1463 		tegra_dc_sor_set_power_state(dp->sor, 1);
1464 		tegra_dc_sor_attach(dp->dc_dev, dp->sor, link_cfg, timing);
1465 
1466 		/* Increase delay_frame for next try in case the sink is
1467 		   skipping more frames */
1468 		delay_frame += 10;
1469 	} while (1);
1470 }
1471 
1472 int tegra_dp_enable(struct udevice *dev, int panel_bpp,
1473 		    const struct display_timing *timing)
1474 {
1475 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1476 	struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
1477 	struct udevice *sor;
1478 	int data;
1479 	int retry;
1480 	int ret;
1481 
1482 	memset(link_cfg, '\0', sizeof(*link_cfg));
1483 	link_cfg->is_valid = 0;
1484 	link_cfg->scramble_ena = 1;
1485 
1486 	tegra_dc_dpaux_enable(priv);
1487 
1488 	if (tegra_dp_hpd_plug(priv) < 0) {
1489 		debug("dp: hpd plug failed\n");
1490 		return -EIO;
1491 	}
1492 
1493 	link_cfg->bits_per_pixel = panel_bpp;
1494 	if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
1495 		debug("dp: failed to init link configuration\n");
1496 		return -ENOLINK;
1497 	}
1498 
1499 	ret = uclass_first_device(UCLASS_VIDEO_BRIDGE, &sor);
1500 	if (ret || !sor) {
1501 		debug("dp: failed to find SOR device: ret=%d\n", ret);
1502 		return ret;
1503 	}
1504 	priv->sor = sor;
1505 	ret = tegra_dc_sor_enable_dp(sor, link_cfg);
1506 	if (ret)
1507 		return ret;
1508 
1509 	tegra_dc_sor_set_panel_power(sor, 1);
1510 
1511 	/* Write power on to DPCD */
1512 	data = DP_SET_POWER_D0;
1513 	retry = 0;
1514 	do {
1515 		ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
1516 	} while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1517 
1518 	if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1519 		debug("dp: failed to power on panel (0x%x)\n", ret);
1520 		return -ENETUNREACH;
1521 		goto error_enable;
1522 	}
1523 
1524 	/* Confirm DP plugging status */
1525 	if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
1526 			DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1527 		debug("dp: could not detect HPD\n");
1528 		return -ENXIO;
1529 	}
1530 
1531 	/* Check DP version */
1532 	if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
1533 		debug("dp: failed to read the revision number from sink\n");
1534 		return -EIO;
1535 	}
1536 
1537 	if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
1538 		debug("dp: error configuring link\n");
1539 		return -ENOMEDIUM;
1540 	}
1541 
1542 	tegra_dc_sor_set_power_state(sor, 1);
1543 	ret = tegra_dc_sor_attach(priv->dc_dev, sor, link_cfg, timing);
1544 	if (ret && ret != -EEXIST)
1545 		return ret;
1546 
1547 	/*
1548 	 * This takes a long time, but can apparently resolve a failure to
1549 	 * bring up the display correctly.
1550 	 */
1551 	if (0) {
1552 		ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
1553 		if (ret)
1554 			return ret;
1555 	}
1556 
1557 	/* Power down the unused lanes to save power - a few hundred mW */
1558 	tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
1559 
1560 	ret = video_bridge_set_backlight(sor, 80);
1561 	if (ret) {
1562 		debug("dp: failed to set backlight\n");
1563 		return ret;
1564 	}
1565 
1566 	priv->enabled = true;
1567 error_enable:
1568 	return 0;
1569 }
1570 
1571 static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
1572 {
1573 	struct tegra_dp_plat *plat = dev_get_platdata(dev);
1574 
1575 	plat->base = devfdt_get_addr(dev);
1576 
1577 	return 0;
1578 }
1579 
1580 static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
1581 {
1582 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1583 	const int tegra_edid_i2c_address = 0x50;
1584 	u32 aux_stat = 0;
1585 
1586 	tegra_dc_dpaux_enable(priv);
1587 
1588 	return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
1589 				     buf_size, &aux_stat);
1590 }
1591 
1592 static const struct dm_display_ops dp_tegra_ops = {
1593 	.read_edid = tegra_dp_read_edid,
1594 	.enable = tegra_dp_enable,
1595 };
1596 
1597 static int dp_tegra_probe(struct udevice *dev)
1598 {
1599 	struct tegra_dp_plat *plat = dev_get_platdata(dev);
1600 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1601 	struct display_plat *disp_uc_plat = dev_get_uclass_platdata(dev);
1602 
1603 	priv->regs = (struct dpaux_ctlr *)plat->base;
1604 	priv->enabled = false;
1605 
1606 	/* Remember the display controller that is sending us video */
1607 	priv->dc_dev = disp_uc_plat->src_dev;
1608 
1609 	return 0;
1610 }
1611 
1612 static const struct udevice_id tegra_dp_ids[] = {
1613 	{ .compatible = "nvidia,tegra124-dpaux" },
1614 	{ }
1615 };
1616 
1617 U_BOOT_DRIVER(dp_tegra) = {
1618 	.name	= "dpaux_tegra",
1619 	.id	= UCLASS_DISPLAY,
1620 	.of_match = tegra_dp_ids,
1621 	.ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
1622 	.probe	= dp_tegra_probe,
1623 	.ops	= &dp_tegra_ops,
1624 	.priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
1625 	.platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
1626 };
1627