xref: /openbmc/u-boot/drivers/video/tegra124/dp.c (revision f4abbee3)
1 /*
2  * Copyright (c) 2011-2013, NVIDIA Corporation.
3  * Copyright 2014 Google Inc.
4  *
5  * SPDX-License-Identifier:     GPL-2.0
6  */
7 
8 #include <common.h>
9 #include <display.h>
10 #include <dm.h>
11 #include <div64.h>
12 #include <errno.h>
13 #include <fdtdec.h>
14 #include <asm/io.h>
15 #include <asm/arch-tegra/dc.h>
16 #include "display.h"
17 #include "edid.h"
18 #include "sor.h"
19 #include "displayport.h"
20 
21 DECLARE_GLOBAL_DATA_PTR;
22 
23 #define DO_FAST_LINK_TRAINING		1
24 
25 struct tegra_dp_plat {
26 	ulong base;
27 };
28 
29 struct tegra_dp_priv {
30 	struct dpaux_ctlr *regs;
31 	struct tegra_dc_sor_data *sor;
32 	u8 revision;
33 	int enabled;
34 };
35 
36 struct tegra_dp_priv dp_data;
37 
38 static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
39 {
40 	return readl((u32 *)dp->regs + reg);
41 }
42 
43 static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
44 				      u32 val)
45 {
46 	writel(val, (u32 *)dp->regs + reg);
47 }
48 
49 static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
50 					   u32 reg, u32 mask, u32 exp_val,
51 					   u32 poll_interval_us,
52 					   u32 timeout_us)
53 {
54 	u32 reg_val = 0;
55 	u32 temp = timeout_us;
56 
57 	do {
58 		udelay(poll_interval_us);
59 		reg_val = tegra_dpaux_readl(dp, reg);
60 		if (timeout_us > poll_interval_us)
61 			timeout_us -= poll_interval_us;
62 		else
63 			break;
64 	} while ((reg_val & mask) != exp_val);
65 
66 	if ((reg_val & mask) == exp_val)
67 		return 0;	/* success */
68 	debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
69 	      reg, reg_val, mask, exp_val);
70 	return temp;
71 }
72 
73 static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
74 {
75 	/* According to DP spec, each aux transaction needs to finish
76 	   within 40ms. */
77 	if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
78 					 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
79 					 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
80 					 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
81 		debug("dp: DPAUX transaction timeout\n");
82 		return -1;
83 	}
84 	return 0;
85 }
86 
87 static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
88 					  u32 addr, u8 *data, u32 *size,
89 					  u32 *aux_stat)
90 {
91 	int i;
92 	u32 reg_val;
93 	u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
94 	u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
95 	u32 temp_data;
96 
97 	if (*size > DP_AUX_MAX_BYTES)
98 		return -1;	/* only write one chunk of data */
99 
100 	/* Make sure the command is write command */
101 	switch (cmd) {
102 	case DPAUX_DP_AUXCTL_CMD_I2CWR:
103 	case DPAUX_DP_AUXCTL_CMD_MOTWR:
104 	case DPAUX_DP_AUXCTL_CMD_AUXWR:
105 		break;
106 	default:
107 		debug("dp: aux write cmd 0x%x is invalid\n", cmd);
108 		return -EINVAL;
109 	}
110 
111 	tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
112 	for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
113 		memcpy(&temp_data, data, 4);
114 		tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
115 		data += 4;
116 	}
117 
118 	reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
119 	reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
120 	reg_val |= cmd;
121 	reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
122 	reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
123 
124 	while ((timeout_retries > 0) && (defer_retries > 0)) {
125 		if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
126 		    (defer_retries != DP_AUX_DEFER_MAX_TRIES))
127 			udelay(1);
128 
129 		reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
130 		tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
131 
132 		if (tegra_dpaux_wait_transaction(dp))
133 			debug("dp: aux write transaction timeout\n");
134 
135 		*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
136 
137 		if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
138 		    (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
139 		    (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
140 		    (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
141 			if (timeout_retries-- > 0) {
142 				debug("dp: aux write retry (0x%x) -- %d\n",
143 				      *aux_stat, timeout_retries);
144 				/* clear the error bits */
145 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
146 						   *aux_stat);
147 				continue;
148 			} else {
149 				debug("dp: aux write got error (0x%x)\n",
150 				      *aux_stat);
151 				return -ETIMEDOUT;
152 			}
153 		}
154 
155 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
156 		    (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
157 			if (defer_retries-- > 0) {
158 				debug("dp: aux write defer (0x%x) -- %d\n",
159 				      *aux_stat, defer_retries);
160 				/* clear the error bits */
161 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
162 						   *aux_stat);
163 				continue;
164 			} else {
165 				debug("dp: aux write defer exceeds max retries (0x%x)\n",
166 				      *aux_stat);
167 				return -ETIMEDOUT;
168 			}
169 		}
170 
171 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
172 			DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
173 			*size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
174 			return 0;
175 		} else {
176 			debug("dp: aux write failed (0x%x)\n", *aux_stat);
177 			return -EIO;
178 		}
179 	}
180 	/* Should never come to here */
181 	return -EIO;
182 }
183 
184 static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
185 					 u32 addr, u8 *data, u32 *size,
186 					 u32 *aux_stat)
187 {
188 	u32 reg_val;
189 	u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
190 	u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
191 
192 	if (*size > DP_AUX_MAX_BYTES) {
193 		debug("only read one chunk\n");
194 		return -EIO;	/* only read one chunk */
195 	}
196 
197 	/* Check to make sure the command is read command */
198 	switch (cmd) {
199 	case DPAUX_DP_AUXCTL_CMD_I2CRD:
200 	case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
201 	case DPAUX_DP_AUXCTL_CMD_MOTRD:
202 	case DPAUX_DP_AUXCTL_CMD_AUXRD:
203 		break;
204 	default:
205 		debug("dp: aux read cmd 0x%x is invalid\n", cmd);
206 		return -EIO;
207 	}
208 
209 	*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
210 	if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
211 		debug("dp: HPD is not detected\n");
212 		return -EIO;
213 	}
214 
215 	tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
216 
217 	reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
218 	reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
219 	reg_val |= cmd;
220 	reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
221 	reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
222 	while ((timeout_retries > 0) && (defer_retries > 0)) {
223 		if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
224 		    (defer_retries != DP_AUX_DEFER_MAX_TRIES))
225 			udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
226 
227 		reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
228 		tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
229 
230 		if (tegra_dpaux_wait_transaction(dp))
231 			debug("dp: aux read transaction timeout\n");
232 
233 		*aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
234 
235 		if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
236 		    (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
237 		    (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
238 		    (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
239 			if (timeout_retries-- > 0) {
240 				debug("dp: aux read retry (0x%x) -- %d\n",
241 				      *aux_stat, timeout_retries);
242 				/* clear the error bits */
243 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
244 						   *aux_stat);
245 				continue;	/* retry */
246 			} else {
247 				debug("dp: aux read got error (0x%x)\n",
248 				      *aux_stat);
249 				return -ETIMEDOUT;
250 			}
251 		}
252 
253 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
254 		    (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
255 			if (defer_retries-- > 0) {
256 				debug("dp: aux read defer (0x%x) -- %d\n",
257 				      *aux_stat, defer_retries);
258 				/* clear the error bits */
259 				tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
260 						   *aux_stat);
261 				continue;
262 			} else {
263 				debug("dp: aux read defer exceeds max retries (0x%x)\n",
264 				      *aux_stat);
265 				return -ETIMEDOUT;
266 			}
267 		}
268 
269 		if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
270 			DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
271 			int i;
272 			u32 temp_data[4];
273 
274 			for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
275 				temp_data[i] = tegra_dpaux_readl(dp,
276 						DPAUX_DP_AUXDATA_READ_W(i));
277 
278 			*size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
279 			memcpy(data, temp_data, *size);
280 
281 			return 0;
282 		} else {
283 			debug("dp: aux read failed (0x%x\n", *aux_stat);
284 			return -EIO;
285 		}
286 	}
287 	/* Should never come to here */
288 	debug("%s: can't\n", __func__);
289 
290 	return -EIO;
291 }
292 
293 static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
294 			u8 *data, u32 *size, u32 *aux_stat)
295 {
296 	u32 finished = 0;
297 	u32 cur_size;
298 	int ret = 0;
299 
300 	do {
301 		cur_size = *size - finished;
302 		if (cur_size > DP_AUX_MAX_BYTES)
303 			cur_size = DP_AUX_MAX_BYTES;
304 
305 		ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
306 						data, &cur_size, aux_stat);
307 		if (ret)
308 			break;
309 
310 		/* cur_size should be the real size returned */
311 		addr += cur_size;
312 		data += cur_size;
313 		finished += cur_size;
314 
315 	} while (*size > finished);
316 	*size = finished;
317 
318 	return ret;
319 }
320 
321 static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
322 				 u8 *data_ptr)
323 {
324 	u32 size = 1;
325 	u32 status = 0;
326 	int ret;
327 
328 	ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
329 					cmd, data_ptr, &size, &status);
330 	if (ret) {
331 		debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
332 		      cmd, status);
333 	}
334 
335 	return ret;
336 }
337 
338 static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
339 				u8 data)
340 {
341 	u32 size = 1;
342 	u32 status = 0;
343 	int ret;
344 
345 	ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
346 					cmd, &data, &size, &status);
347 	if (ret) {
348 		debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
349 		      cmd, status);
350 	}
351 
352 	return ret;
353 }
354 
355 static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
356 				 u8 addr, u8 *data, u32 size, u32 *aux_stat)
357 {
358 	u32 finished = 0;
359 	int ret = 0;
360 
361 	do {
362 		u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
363 
364 		u32 len = 1;
365 		ret = tegra_dc_dpaux_write_chunk(
366 				dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
367 				&addr, &len, aux_stat);
368 		if (ret) {
369 			debug("%s: error sending address to read.\n",
370 			      __func__);
371 			return ret;
372 		}
373 
374 		ret = tegra_dc_dpaux_read_chunk(
375 				dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
376 				data, &cur_size, aux_stat);
377 		if (ret) {
378 			debug("%s: error reading data.\n", __func__);
379 			return ret;
380 		}
381 
382 		/* cur_size should be the real size returned */
383 		addr += cur_size;
384 		data += cur_size;
385 		finished += cur_size;
386 	} while (size > finished);
387 
388 	return finished;
389 }
390 
391 static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
392 {
393 	/* clear interrupt */
394 	tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
395 	/* do not enable interrupt for now. Enable them when Isr in place */
396 	tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
397 
398 	tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
399 			   DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
400 			   DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
401 			   0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
402 			   DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
403 
404 	tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
405 			   DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
406 }
407 
408 #ifdef DEBUG
409 static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
410 	const struct tegra_dp_link_config *link_cfg)
411 {
412 	debug("DP config: cfg_name               cfg_value\n");
413 	debug("           Lane Count             %d\n",
414 	      link_cfg->max_lane_count);
415 	debug("           SupportEnhancedFraming %s\n",
416 	      link_cfg->support_enhanced_framing ? "Y" : "N");
417 	debug("           Bandwidth              %d\n",
418 	      link_cfg->max_link_bw);
419 	debug("           bpp                    %d\n",
420 	      link_cfg->bits_per_pixel);
421 	debug("           EnhancedFraming        %s\n",
422 	      link_cfg->enhanced_framing ? "Y" : "N");
423 	debug("           Scramble_enabled       %s\n",
424 	      link_cfg->scramble_ena ? "Y" : "N");
425 	debug("           LinkBW                 %d\n",
426 	      link_cfg->link_bw);
427 	debug("           lane_count             %d\n",
428 	      link_cfg->lane_count);
429 	debug("           activespolarity        %d\n",
430 	      link_cfg->activepolarity);
431 	debug("           active_count           %d\n",
432 	      link_cfg->active_count);
433 	debug("           tu_size                %d\n",
434 	      link_cfg->tu_size);
435 	debug("           active_frac            %d\n",
436 	      link_cfg->active_frac);
437 	debug("           watermark              %d\n",
438 	      link_cfg->watermark);
439 	debug("           hblank_sym             %d\n",
440 	      link_cfg->hblank_sym);
441 	debug("           vblank_sym             %d\n",
442 	      link_cfg->vblank_sym);
443 }
444 #endif
445 
446 static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
447 				       struct tegra_dp_link_config *cfg)
448 {
449 	switch (cfg->link_bw) {
450 	case SOR_LINK_SPEED_G1_62:
451 		if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
452 			cfg->link_bw = SOR_LINK_SPEED_G2_7;
453 		cfg->lane_count /= 2;
454 		break;
455 	case SOR_LINK_SPEED_G2_7:
456 		cfg->link_bw = SOR_LINK_SPEED_G1_62;
457 		break;
458 	case SOR_LINK_SPEED_G5_4:
459 		if (cfg->lane_count == 1) {
460 			cfg->link_bw = SOR_LINK_SPEED_G2_7;
461 			cfg->lane_count = cfg->max_lane_count;
462 		} else {
463 			cfg->lane_count /= 2;
464 		}
465 		break;
466 	default:
467 		debug("dp: Error link rate %d\n", cfg->link_bw);
468 		return -ENOLINK;
469 	}
470 
471 	return (cfg->lane_count > 0) ? 0 : -ENOLINK;
472 }
473 
474 /*
475  * Calcuate if given cfg can meet the mode request.
476  * Return 0 if mode is possible, -1 otherwise
477  */
478 static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
479 				   const struct display_timing *timing,
480 				   struct tegra_dp_link_config *link_cfg)
481 {
482 	const u32	link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
483 	const u64	f	  = 100000;	/* precision factor */
484 	u32	num_linkclk_line; /* Number of link clocks per line */
485 	u64	ratio_f; /* Ratio of incoming to outgoing data rate */
486 	u64	frac_f;
487 	u64	activesym_f;	/* Activesym per TU */
488 	u64	activecount_f;
489 	u32	activecount;
490 	u32	activepolarity;
491 	u64	approx_value_f;
492 	u32	activefrac		  = 0;
493 	u64	accumulated_error_f	  = 0;
494 	u32	lowest_neg_activecount	  = 0;
495 	u32	lowest_neg_activepolarity = 0;
496 	u32	lowest_neg_tusize	  = 64;
497 	u32	num_symbols_per_line;
498 	u64	lowest_neg_activefrac	  = 0;
499 	u64	lowest_neg_error_f	  = 64 * f;
500 	u64	watermark_f;
501 	int	i;
502 	int	neg;
503 
504 	if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
505 	    !link_cfg->bits_per_pixel)
506 		return -1;
507 
508 	if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
509 		(u64)link_rate * 8 * link_cfg->lane_count)
510 		return -1;
511 
512 	num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
513 				       timing->pixelclock.typ));
514 
515 	ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
516 	ratio_f /= 8;
517 	do_div(ratio_f, link_rate * link_cfg->lane_count);
518 
519 	for (i = 64; i >= 32; --i) {
520 		activesym_f	= ratio_f * i;
521 		activecount_f	= lldiv(activesym_f, (u32)f) * f;
522 		frac_f		= activesym_f - activecount_f;
523 		activecount	= (u32)(lldiv(activecount_f, (u32)f));
524 
525 		if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
526 			activepolarity = 0;
527 		else {
528 			activepolarity = 1;
529 			frac_f = f - frac_f;
530 		}
531 
532 		if (frac_f != 0) {
533 			/* warning: frac_f should be 64-bit */
534 			frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
535 			if (frac_f > (15 * f))
536 				activefrac = activepolarity ? 1 : 15;
537 			else
538 				activefrac = activepolarity ?
539 					(u32)lldiv(frac_f, (u32)f) + 1 :
540 					(u32)lldiv(frac_f, (u32)f);
541 		}
542 
543 		if (activefrac == 1)
544 			activepolarity = 0;
545 
546 		if (activepolarity == 1)
547 			approx_value_f = activefrac ? lldiv(
548 				(activecount_f + (activefrac * f - f) * f),
549 				(activefrac * f)) :
550 				activecount_f + f;
551 		else
552 			approx_value_f = activefrac ?
553 				activecount_f + lldiv(f, activefrac) :
554 				activecount_f;
555 
556 		if (activesym_f < approx_value_f) {
557 			accumulated_error_f = num_linkclk_line *
558 				lldiv(approx_value_f - activesym_f, i);
559 			neg = 1;
560 		} else {
561 			accumulated_error_f = num_linkclk_line *
562 				lldiv(activesym_f - approx_value_f, i);
563 			neg = 0;
564 		}
565 
566 		if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
567 		    (accumulated_error_f == 0)) {
568 			lowest_neg_error_f = accumulated_error_f;
569 			lowest_neg_tusize = i;
570 			lowest_neg_activecount = activecount;
571 			lowest_neg_activepolarity = activepolarity;
572 			lowest_neg_activefrac = activefrac;
573 
574 			if (accumulated_error_f == 0)
575 				break;
576 		}
577 	}
578 
579 	if (lowest_neg_activefrac == 0) {
580 		link_cfg->activepolarity = 0;
581 		link_cfg->active_count   = lowest_neg_activepolarity ?
582 			lowest_neg_activecount : lowest_neg_activecount - 1;
583 		link_cfg->tu_size	      = lowest_neg_tusize;
584 		link_cfg->active_frac    = 1;
585 	} else {
586 		link_cfg->activepolarity = lowest_neg_activepolarity;
587 		link_cfg->active_count   = (u32)lowest_neg_activecount;
588 		link_cfg->tu_size	      = lowest_neg_tusize;
589 		link_cfg->active_frac    = (u32)lowest_neg_activefrac;
590 	}
591 
592 	watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
593 	link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
594 		f)) + link_cfg->bits_per_pixel / 4 - 1;
595 	num_symbols_per_line = (timing->hactive.typ *
596 				link_cfg->bits_per_pixel) /
597 			       (8 * link_cfg->lane_count);
598 
599 	if (link_cfg->watermark > 30) {
600 		debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
601 		link_cfg->watermark = 30;
602 		return -1;
603 	} else if (link_cfg->watermark > num_symbols_per_line) {
604 		debug("dp: sor setting: force watermark to the number of symbols in the line\n");
605 		link_cfg->watermark = num_symbols_per_line;
606 		return -1;
607 	}
608 
609 	/*
610 	 * Refer to dev_disp.ref for more information.
611 	 * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
612 	 *                      SetRasterBlankStart.X - 7) * link_clk / pclk)
613 	 *                      - 3 * enhanced_framing - Y
614 	 * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
615 	 */
616 	link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
617 			timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
618 			link_rate, timing->pixelclock.typ) -
619 			3 * link_cfg->enhanced_framing -
620 			(12 / link_cfg->lane_count);
621 
622 	if (link_cfg->hblank_sym < 0)
623 		link_cfg->hblank_sym = 0;
624 
625 
626 	/*
627 	 * Refer to dev_disp.ref for more information.
628 	 * # symbols/vblank = ((SetRasterBlankStart.X -
629 	 *                      SetRasterBlankEen.X - 25) * link_clk / pclk)
630 	 *                      - Y - 1;
631 	 * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
632 	 */
633 	link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
634 			* link_rate, timing->pixelclock.typ) - (36 /
635 			link_cfg->lane_count) - 4;
636 
637 	if (link_cfg->vblank_sym < 0)
638 		link_cfg->vblank_sym = 0;
639 
640 	link_cfg->is_valid = 1;
641 #ifdef DEBUG
642 	tegra_dc_dp_dump_link_cfg(dp, link_cfg);
643 #endif
644 
645 	return 0;
646 }
647 
648 static int tegra_dc_dp_init_max_link_cfg(
649 			const struct display_timing *timing,
650 			struct tegra_dp_priv *dp,
651 			struct tegra_dp_link_config *link_cfg)
652 {
653 	const int drive_current = 0x40404040;
654 	const int preemphasis = 0x0f0f0f0f;
655 	const int postcursor = 0;
656 	u8 dpcd_data;
657 	int ret;
658 
659 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
660 	if (ret)
661 		return ret;
662 	link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
663 	link_cfg->tps3_supported = (dpcd_data &
664 			DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
665 
666 	link_cfg->support_enhanced_framing =
667 		(dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
668 		1 : 0;
669 
670 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
671 	if (ret)
672 		return ret;
673 	link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
674 				1 : 0;
675 
676 	ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
677 				    &link_cfg->aux_rd_interval);
678 	if (ret)
679 		return ret;
680 	ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
681 				    &link_cfg->max_link_bw);
682 	if (ret)
683 		return ret;
684 
685 	/*
686 	 * Set to a high value for link training and attach.
687 	 * Will be re-programmed when dp is enabled.
688 	 */
689 	link_cfg->drive_current = drive_current;
690 	link_cfg->preemphasis = preemphasis;
691 	link_cfg->postcursor = postcursor;
692 
693 	ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
694 	if (ret)
695 		return ret;
696 
697 	link_cfg->alt_scramber_reset_cap =
698 		(dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
699 		1 : 0;
700 	link_cfg->only_enhanced_framing =
701 		(dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
702 		1 : 0;
703 
704 	link_cfg->lane_count = link_cfg->max_lane_count;
705 	link_cfg->link_bw = link_cfg->max_link_bw;
706 	link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
707 	link_cfg->frame_in_ms = (1000 / 60) + 1;
708 
709 	tegra_dc_dp_calc_config(dp, timing, link_cfg);
710 	return 0;
711 }
712 
713 static int tegra_dc_dp_set_assr(struct tegra_dp_priv *dp,
714 				struct tegra_dc_sor_data *sor, int ena)
715 {
716 	int ret;
717 
718 	u8 dpcd_data = ena ?
719 		DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
720 		DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
721 
722 	ret = tegra_dc_dp_dpcd_write(dp, DP_EDP_CONFIGURATION_SET,
723 				     dpcd_data);
724 	if (ret)
725 		return ret;
726 
727 	/* Also reset the scrambler to 0xfffe */
728 	tegra_dc_sor_set_internal_panel(sor, ena);
729 	return 0;
730 }
731 
732 static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
733 				       struct tegra_dc_sor_data *sor,
734 				       u8 link_bw)
735 {
736 	tegra_dc_sor_set_link_bandwidth(sor, link_bw);
737 
738 	/* Sink side */
739 	return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
740 }
741 
742 static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
743 		const struct tegra_dp_link_config *link_cfg,
744 		struct tegra_dc_sor_data *sor)
745 {
746 	u8	dpcd_data;
747 	int	ret;
748 
749 	/* check if panel support enhanched_framing */
750 	dpcd_data = link_cfg->lane_count;
751 	if (link_cfg->enhanced_framing)
752 		dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
753 	ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
754 	if (ret)
755 		return ret;
756 
757 	tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
758 
759 	/* Also power down lanes that will not be used */
760 	return 0;
761 }
762 
763 static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
764 				    const struct tegra_dp_link_config *cfg)
765 {
766 	u32 lane;
767 	u8 mask;
768 	u8 data;
769 	int ret;
770 
771 	for (lane = 0; lane < cfg->lane_count; ++lane) {
772 		ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
773 				DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
774 				&data);
775 		if (ret)
776 			return ret;
777 		mask = (lane & 1) ?
778 			NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
779 			NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
780 			NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
781 			DP_LANE_CR_DONE |
782 			DP_LANE_CHANNEL_EQ_DONE |
783 			DP_LANE_SYMBOL_LOCKED;
784 		if ((data & mask) != mask)
785 			return -1;
786 	}
787 	return 0;
788 }
789 
790 static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
791 				      const struct tegra_dp_link_config *cfg)
792 {
793 	u32 cnt;
794 	u32 n_lanes = cfg->lane_count;
795 	u8 data;
796 	u8 ce_done = 1;
797 	int ret;
798 
799 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
800 		ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
801 		if (ret)
802 			return ret;
803 
804 		if (n_lanes == 1) {
805 			ce_done = (data & (0x1 <<
806 			NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
807 			(data & (0x1 <<
808 			NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
809 			break;
810 		} else if (!(data & (0x1 <<
811 				NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
812 			   !(data & (0x1 <<
813 				NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
814 			   !(data & (0x1 <<
815 				NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
816 			   !(data & (0x1 <<
817 				NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
818 			return -EIO;
819 	}
820 
821 	if (ce_done) {
822 		ret = tegra_dc_dp_dpcd_read(dp,
823 					    DP_LANE_ALIGN_STATUS_UPDATED,
824 					    &data);
825 		if (ret)
826 			return ret;
827 		if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
828 			ce_done = 0;
829 	}
830 
831 	return ce_done ? 0 : -EIO;
832 }
833 
834 static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
835 					 const struct tegra_dp_link_config *cfg)
836 {
837 	u32 cnt;
838 	u32 n_lanes = cfg->lane_count;
839 	u8 data_ptr;
840 	int ret;
841 
842 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
843 		ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
844 					    &data_ptr);
845 		if (ret)
846 			return ret;
847 
848 		if (n_lanes == 1)
849 			return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
850 				1 : 0;
851 		else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
852 			 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
853 			return 0;
854 	}
855 
856 	return 1;
857 }
858 
859 static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
860 			      u32 pc[4], u8 pc_supported,
861 			      const struct tegra_dp_link_config *cfg)
862 {
863 	size_t cnt;
864 	u8 data_ptr;
865 	u32 n_lanes = cfg->lane_count;
866 	int ret;
867 
868 	for (cnt = 0; cnt < n_lanes / 2; cnt++) {
869 		ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
870 					    &data_ptr);
871 		if (ret)
872 			return ret;
873 		pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
874 					NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
875 		vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
876 					NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
877 		pe[1 + 2 * cnt] =
878 			(data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
879 					NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
880 		vs[1 + 2 * cnt] =
881 			(data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
882 					NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
883 	}
884 	if (pc_supported) {
885 		ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
886 					    &data_ptr);
887 		if (ret)
888 			return ret;
889 		for (cnt = 0; cnt < n_lanes; cnt++) {
890 			pc[cnt] = (data_ptr >>
891 			NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
892 			NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
893 		}
894 	}
895 
896 	return 0;
897 }
898 
899 static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
900 					bool is_clk_recovery,
901 					const struct tegra_dp_link_config *cfg)
902 {
903 	if (!cfg->aux_rd_interval)
904 		udelay(is_clk_recovery ? 200 : 500);
905 	else
906 		mdelay(cfg->aux_rd_interval * 4);
907 }
908 
909 static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
910 			 const struct tegra_dp_link_config *cfg)
911 {
912 	u8 data = (tp == training_pattern_disabled)
913 		? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
914 		: (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
915 
916 	tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
917 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
918 }
919 
920 static int tegra_dp_link_config(struct tegra_dp_priv *dp,
921 				const struct tegra_dp_link_config *link_cfg)
922 {
923 	u8 dpcd_data;
924 	u32 retry;
925 	int ret;
926 
927 	if (link_cfg->lane_count == 0) {
928 		debug("dp: error: lane count is 0. Can not set link config.\n");
929 		return -ENOLINK;
930 	}
931 
932 	/* Set power state if it is not in normal level */
933 	ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
934 	if (ret)
935 		return ret;
936 
937 	if (dpcd_data == DP_SET_POWER_D3) {
938 		dpcd_data = DP_SET_POWER_D0;
939 
940 		/* DP spec requires 3 retries */
941 		for (retry = 3; retry > 0; --retry) {
942 			ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
943 						     dpcd_data);
944 			if (!ret)
945 				break;
946 			if (retry == 1) {
947 				debug("dp: Failed to set DP panel power\n");
948 				return ret;
949 			}
950 		}
951 	}
952 
953 	/* Enable ASSR if possible */
954 	if (link_cfg->alt_scramber_reset_cap) {
955 		ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
956 		if (ret)
957 			return ret;
958 	}
959 
960 	ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
961 	if (ret) {
962 		debug("dp: Failed to set link bandwidth\n");
963 		return ret;
964 	}
965 	ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
966 	if (ret) {
967 		debug("dp: Failed to set lane count\n");
968 		return ret;
969 	}
970 	tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
971 				    link_cfg);
972 
973 	return 0;
974 }
975 
976 static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
977 				      const struct display_timing *timing,
978 				      struct tegra_dp_link_config *cfg)
979 {
980 	struct tegra_dp_link_config tmp_cfg;
981 	int ret;
982 
983 	tmp_cfg = *cfg;
984 	cfg->is_valid = 0;
985 
986 	ret = _tegra_dp_lower_link_config(dp, cfg);
987 	if (!ret)
988 		ret = tegra_dc_dp_calc_config(dp, timing, cfg);
989 	if (!ret)
990 		ret = tegra_dp_link_config(dp, cfg);
991 	if (ret)
992 		goto fail;
993 
994 	return 0;
995 
996 fail:
997 	*cfg = tmp_cfg;
998 	tegra_dp_link_config(dp, &tmp_cfg);
999 	return ret;
1000 }
1001 
1002 static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1003 			      u32 pc[4], const struct tegra_dp_link_config *cfg)
1004 {
1005 	struct tegra_dc_sor_data *sor = dp->sor;
1006 	u32 n_lanes = cfg->lane_count;
1007 	u8 pc_supported = cfg->tps3_supported;
1008 	u32 cnt;
1009 	u32 val;
1010 
1011 	for (cnt = 0; cnt < n_lanes; cnt++) {
1012 		u32 mask = 0;
1013 		u32 pe_reg, vs_reg, pc_reg;
1014 		u32 shift = 0;
1015 
1016 		switch (cnt) {
1017 		case 0:
1018 			mask = PR_LANE2_DP_LANE0_MASK;
1019 			shift = PR_LANE2_DP_LANE0_SHIFT;
1020 			break;
1021 		case 1:
1022 			mask = PR_LANE1_DP_LANE1_MASK;
1023 			shift = PR_LANE1_DP_LANE1_SHIFT;
1024 			break;
1025 		case 2:
1026 			mask = PR_LANE0_DP_LANE2_MASK;
1027 			shift = PR_LANE0_DP_LANE2_SHIFT;
1028 			break;
1029 		case 3:
1030 			mask = PR_LANE3_DP_LANE3_MASK;
1031 			shift = PR_LANE3_DP_LANE3_SHIFT;
1032 			break;
1033 		default:
1034 			debug("dp: incorrect lane cnt\n");
1035 			return -EINVAL;
1036 		}
1037 
1038 		pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1039 		vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1040 		pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1041 
1042 		tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1043 				      vs_reg << shift, pc_reg << shift,
1044 				      pc_supported);
1045 	}
1046 
1047 	tegra_dp_disable_tx_pu(dp->sor);
1048 	udelay(20);
1049 
1050 	for (cnt = 0; cnt < n_lanes; cnt++) {
1051 		u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1052 		u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1053 
1054 		val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1055 			(max_vs_flag ?
1056 			NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1057 			NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1058 			(pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1059 			(max_pe_flag ?
1060 			NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1061 			NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1062 		tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
1063 	}
1064 
1065 	if (pc_supported) {
1066 		for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1067 			u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1068 			u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1069 			val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1070 				(max_pc_flag0 ?
1071 				NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1072 				NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1073 				(pc[cnt + 1] <<
1074 				NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1075 				(max_pc_flag1 ?
1076 				NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1077 				NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1078 			tegra_dc_dp_dpcd_write(dp,
1079 					       NV_DPCD_TRAINING_LANE0_1_SET2 +
1080 					       cnt, val);
1081 		}
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
1088 				u32 vs[4], u32 pc[4], u8 pc_supported,
1089 				u32 n_lanes,
1090 				const struct tegra_dp_link_config *cfg)
1091 {
1092 	u32 retry_cnt;
1093 
1094 	for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1095 		int ret;
1096 
1097 		if (retry_cnt) {
1098 			ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
1099 						 cfg);
1100 			if (ret)
1101 				return ret;
1102 			tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1103 		}
1104 
1105 		tegra_dp_wait_aux_training(dp, false, cfg);
1106 
1107 		if (!tegra_dp_clock_recovery_status(dp, cfg)) {
1108 			debug("dp: CR failed in channel EQ sequence!\n");
1109 			break;
1110 		}
1111 
1112 		if (!tegra_dp_channel_eq_status(dp, cfg))
1113 			return 0;
1114 	}
1115 
1116 	return -EIO;
1117 }
1118 
1119 static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1120 			       u32 pc[4],
1121 			       const struct tegra_dp_link_config *cfg)
1122 {
1123 	u32 n_lanes = cfg->lane_count;
1124 	u8 pc_supported = cfg->tps3_supported;
1125 	int ret;
1126 	u32 tp_src = training_pattern_2;
1127 
1128 	if (pc_supported)
1129 		tp_src = training_pattern_3;
1130 
1131 	tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
1132 
1133 	ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
1134 
1135 	tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1136 
1137 	return ret;
1138 }
1139 
1140 static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1141 				  u32 vs[4], u32 pc[4], u8 pc_supported,
1142 				  u32 n_lanes,
1143 				  const struct tegra_dp_link_config *cfg)
1144 {
1145 	u32 vs_temp[4];
1146 	u32 retry_cnt = 0;
1147 
1148 	do {
1149 		tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1150 		tegra_dp_wait_aux_training(dp, true, cfg);
1151 
1152 		if (tegra_dp_clock_recovery_status(dp, cfg))
1153 			return 0;
1154 
1155 		memcpy(vs_temp, vs, sizeof(vs_temp));
1156 		tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
1157 
1158 		if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1159 			retry_cnt = 0;
1160 		else
1161 			++retry_cnt;
1162 	} while (retry_cnt < 5);
1163 
1164 	return -EIO;
1165 }
1166 
1167 static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1168 				 u32 vs[4], u32 pc[4],
1169 				 const struct tegra_dp_link_config *cfg)
1170 {
1171 	u32 n_lanes = cfg->lane_count;
1172 	u8 pc_supported = cfg->tps3_supported;
1173 	int err;
1174 
1175 	tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
1176 
1177 	err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
1178 				     cfg);
1179 	if (err < 0)
1180 		tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1181 
1182 	return err;
1183 }
1184 
1185 static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
1186 					  const struct display_timing *timing,
1187 					  struct tegra_dp_link_config *cfg)
1188 {
1189 	struct tegra_dc_sor_data *sor = dp->sor;
1190 	int err;
1191 	u32 pe[4], vs[4], pc[4];
1192 
1193 	tegra_sor_precharge_lanes(sor, cfg);
1194 
1195 retry_cr:
1196 	memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
1197 	memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
1198 	memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
1199 
1200 	err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
1201 	if (err) {
1202 		if (!tegra_dp_lower_link_config(dp, timing, cfg))
1203 			goto retry_cr;
1204 
1205 		debug("dp: clk recovery failed\n");
1206 		goto fail;
1207 	}
1208 
1209 	err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
1210 	if (err) {
1211 		if (!tegra_dp_lower_link_config(dp, timing, cfg))
1212 			goto retry_cr;
1213 
1214 		debug("dp: channel equalization failed\n");
1215 		goto fail;
1216 	}
1217 #ifdef DEBUG
1218 	tegra_dc_dp_dump_link_cfg(dp, cfg);
1219 #endif
1220 	return 0;
1221 
1222 fail:
1223 	return err;
1224 }
1225 
1226 /*
1227  * All link training functions are ported from kernel dc driver.
1228  * See more details at drivers/video/tegra/dc/dp.c
1229  */
1230 static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
1231 		const struct tegra_dp_link_config *link_cfg,
1232 		struct tegra_dc_sor_data *sor)
1233 {
1234 	u8	link_bw;
1235 	u8	lane_count;
1236 	u16	data16;
1237 	u32	data32;
1238 	u32	size;
1239 	u32	status;
1240 	int	j;
1241 	u32	mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1242 
1243 	tegra_dc_sor_set_lane_parm(sor, link_cfg);
1244 	tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
1245 			       DP_SET_ANSI_8B10B);
1246 
1247 	/* Send TP1 */
1248 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1249 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1250 			       DP_TRAINING_PATTERN_1);
1251 
1252 	for (j = 0; j < link_cfg->lane_count; ++j)
1253 		tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1254 	udelay(520);
1255 
1256 	size = sizeof(data16);
1257 	tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1258 			    DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1259 	status = mask & 0x1111;
1260 	if ((data16 & status) != status) {
1261 		debug("dp: Link training error for TP1 (%#x, status %#x)\n",
1262 		      data16, status);
1263 		return -EFAULT;
1264 	}
1265 
1266 	/* enable ASSR */
1267 	tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
1268 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1269 
1270 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1271 			       link_cfg->link_bw == 20 ? 0x23 : 0x22);
1272 	for (j = 0; j < link_cfg->lane_count; ++j)
1273 		tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1274 	udelay(520);
1275 
1276 	size = sizeof(data32);
1277 	tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
1278 			    (u8 *)&data32, &size, &status);
1279 	if ((data32 & mask) != (0x7777 & mask)) {
1280 		debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
1281 		return -EFAULT;
1282 	}
1283 
1284 	tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1285 				    link_cfg);
1286 	tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
1287 
1288 	if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1289 		tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1290 		debug("Fast link training failed, link bw %d, lane # %d\n",
1291 		      link_bw, lane_count);
1292 		return -EFAULT;
1293 	}
1294 
1295 	debug("Fast link training succeeded, link bw %d, lane %d\n",
1296 	      link_cfg->link_bw, link_cfg->lane_count);
1297 
1298 	return 0;
1299 }
1300 
1301 static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
1302 		struct tegra_dp_link_config *link_cfg,
1303 		const struct display_timing *timing,
1304 		struct tegra_dc_sor_data *sor)
1305 {
1306 	u8	link_bw;
1307 	u8	lane_count;
1308 	int	ret;
1309 
1310 	if (DO_FAST_LINK_TRAINING) {
1311 		ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
1312 		if (ret) {
1313 			debug("dp: fast link training failed\n");
1314 		} else {
1315 			/*
1316 			* set to a known-good drive setting if fast link
1317 			* succeeded. Ignore any error.
1318 			*/
1319 			ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
1320 			if (ret)
1321 				debug("Failed to set voltage swing\n");
1322 		}
1323 	} else {
1324 		ret = -ENOSYS;
1325 	}
1326 	if (ret) {
1327 		/* Try full link training then */
1328 		ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
1329 		if (ret) {
1330 			debug("dp: full link training failed\n");
1331 			return ret;
1332 		}
1333 	}
1334 
1335 	/* Everything is good; double check the link config */
1336 	tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1337 
1338 	if ((link_cfg->link_bw == link_bw) &&
1339 	    (link_cfg->lane_count == lane_count))
1340 		return 0;
1341 	else
1342 		return -EFAULT;
1343 }
1344 
1345 static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
1346 			struct tegra_dp_link_config *link_cfg,
1347 			struct tegra_dc_sor_data *sor,
1348 			const struct display_timing *timing)
1349 {
1350 	struct tegra_dp_link_config temp_cfg;
1351 
1352 	if (!timing->pixelclock.typ || !timing->hactive.typ ||
1353 	    !timing->vactive.typ) {
1354 		debug("dp: error mode configuration");
1355 		return -EINVAL;
1356 	}
1357 	if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1358 		debug("dp: error link configuration");
1359 		return -EINVAL;
1360 	}
1361 
1362 	link_cfg->is_valid = 0;
1363 
1364 	memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1365 
1366 	temp_cfg.link_bw = temp_cfg.max_link_bw;
1367 	temp_cfg.lane_count = temp_cfg.max_lane_count;
1368 
1369 	/*
1370 	 * set to max link config
1371 	 */
1372 	if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
1373 	    (!tegra_dp_link_config(dp, &temp_cfg)) &&
1374 		(!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
1375 		/* the max link cfg is doable */
1376 		memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1377 
1378 	return link_cfg->is_valid ? 0 : -EFAULT;
1379 }
1380 
1381 static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
1382 {
1383 	const int vdd_to_hpd_delay_ms = 200;
1384 	u32 val;
1385 	ulong start;
1386 
1387 	start = get_timer(0);
1388 	do {
1389 		val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1390 		if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1391 			return 0;
1392 		udelay(100);
1393 	} while (get_timer(start) < vdd_to_hpd_delay_ms);
1394 
1395 	return -EIO;
1396 }
1397 
1398 static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
1399 {
1400 	u8 dpcd_data;
1401 	int out_of_sync;
1402 	int ret;
1403 
1404 	debug("%s: delay=%d\n", __func__, delay_ms);
1405 	mdelay(delay_ms);
1406 	ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
1407 	if (ret)
1408 		return ret;
1409 
1410 	out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
1411 	if (out_of_sync)
1412 		debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
1413 	else
1414 		debug("SINK is in synchronization\n");
1415 
1416 	return out_of_sync;
1417 }
1418 
1419 static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
1420 				  struct tegra_dp_link_config *link_cfg,
1421 				  const struct display_timing *timing)
1422 {
1423 	const int max_retry = 5;
1424 	int delay_frame;
1425 	int retries;
1426 
1427 	/*
1428 	 * DP TCON may skip some main stream frames, thus we need to wait
1429 	 * some delay before reading the DPCD SINK STATUS register, starting
1430 	 * from 5
1431 	 */
1432 	delay_frame = 5;
1433 
1434 	retries = max_retry;
1435 	do {
1436 		int ret;
1437 
1438 		if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
1439 						  delay_frame))
1440 			return 0;
1441 
1442 		debug("%s: retries left %d\n", __func__, retries);
1443 		if (!retries--) {
1444 			printf("DP: Out of sync after %d retries\n", max_retry);
1445 			return -EIO;
1446 		}
1447 		ret = tegra_dc_sor_detach(dp->sor);
1448 		if (ret)
1449 			return ret;
1450 		if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
1451 						 timing)) {
1452 			debug("dp: %s: error to configure link\n", __func__);
1453 			continue;
1454 		}
1455 
1456 		tegra_dc_sor_set_power_state(dp->sor, 1);
1457 		tegra_dc_sor_attach(dp->sor, link_cfg, timing);
1458 
1459 		/* Increase delay_frame for next try in case the sink is
1460 		   skipping more frames */
1461 		delay_frame += 10;
1462 	} while (1);
1463 }
1464 
1465 int tegra_dp_enable(struct udevice *dev, int panel_bpp,
1466 		    const struct display_timing *timing)
1467 {
1468 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1469 	struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
1470 	struct tegra_dc_sor_data *sor;
1471 	int data;
1472 	int retry;
1473 	int ret;
1474 
1475 	memset(link_cfg, '\0', sizeof(*link_cfg));
1476 	link_cfg->is_valid = 0;
1477 	link_cfg->scramble_ena = 1;
1478 
1479 	tegra_dc_dpaux_enable(priv);
1480 
1481 	if (tegra_dp_hpd_plug(priv) < 0) {
1482 		debug("dp: hpd plug failed\n");
1483 		return -EIO;
1484 	}
1485 
1486 	link_cfg->bits_per_pixel = panel_bpp;
1487 	if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
1488 		debug("dp: failed to init link configuration\n");
1489 		return -ENOLINK;
1490 	}
1491 
1492 	ret = tegra_dc_sor_init(&sor);
1493 	if (ret)
1494 		return ret;
1495 	priv->sor = sor;
1496 	ret = tegra_dc_sor_enable_dp(sor, link_cfg);
1497 	if (ret)
1498 		return ret;
1499 
1500 	tegra_dc_sor_set_panel_power(sor, 1);
1501 
1502 	/* Write power on to DPCD */
1503 	data = DP_SET_POWER_D0;
1504 	retry = 0;
1505 	do {
1506 		ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
1507 	} while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1508 
1509 	if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1510 		debug("dp: failed to power on panel (0x%x)\n", ret);
1511 		return -ENETUNREACH;
1512 		goto error_enable;
1513 	}
1514 
1515 	/* Confirm DP plugging status */
1516 	if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
1517 			DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1518 		debug("dp: could not detect HPD\n");
1519 		return -ENXIO;
1520 	}
1521 
1522 	/* Check DP version */
1523 	if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
1524 		debug("dp: failed to read the revision number from sink\n");
1525 		return -EIO;
1526 	}
1527 
1528 	if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
1529 		debug("dp: error configuring link\n");
1530 		return -ENOMEDIUM;
1531 	}
1532 
1533 	tegra_dc_sor_set_power_state(sor, 1);
1534 	ret = tegra_dc_sor_attach(sor, link_cfg, timing);
1535 	if (ret && ret != -EEXIST)
1536 		return ret;
1537 
1538 	/*
1539 	 * This takes a long time, but can apparently resolve a failure to
1540 	 * bring up the display correctly.
1541 	 */
1542 	if (0) {
1543 		ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
1544 		if (ret)
1545 			return ret;
1546 	}
1547 
1548 	/* Power down the unused lanes to save power - a few hundred mW */
1549 	tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
1550 
1551 	priv->enabled = true;
1552 error_enable:
1553 	return 0;
1554 }
1555 
1556 static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
1557 {
1558 	struct tegra_dp_plat *plat = dev_get_platdata(dev);
1559 
1560 	plat->base = dev_get_addr(dev);
1561 
1562 	return 0;
1563 }
1564 
1565 static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
1566 {
1567 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1568 	const int tegra_edid_i2c_address = 0x50;
1569 	u32 aux_stat = 0;
1570 
1571 	tegra_dc_dpaux_enable(priv);
1572 
1573 	return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
1574 				     buf_size, &aux_stat);
1575 }
1576 
1577 static const struct dm_display_ops dp_tegra_ops = {
1578 	.read_edid = tegra_dp_read_edid,
1579 	.enable = tegra_dp_enable,
1580 };
1581 
1582 static int dp_tegra_probe(struct udevice *dev)
1583 {
1584 	struct tegra_dp_plat *plat = dev_get_platdata(dev);
1585 	struct tegra_dp_priv *priv = dev_get_priv(dev);
1586 
1587 	priv->regs = (struct dpaux_ctlr *)plat->base;
1588 	priv->enabled = false;
1589 
1590 	return 0;
1591 }
1592 
1593 static const struct udevice_id tegra_dp_ids[] = {
1594 	{ .compatible = "nvidia,tegra124-dpaux" },
1595 	{ }
1596 };
1597 
1598 U_BOOT_DRIVER(dp_tegra) = {
1599 	.name	= "dpaux_tegra",
1600 	.id	= UCLASS_DISPLAY,
1601 	.of_match = tegra_dp_ids,
1602 	.ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
1603 	.probe	= dp_tegra_probe,
1604 	.ops	= &dp_tegra_ops,
1605 	.priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
1606 	.platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
1607 };
1608