1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2011-2013, NVIDIA Corporation.
4 * Copyright 2014 Google Inc.
5 */
6
7 #include <common.h>
8 #include <display.h>
9 #include <dm.h>
10 #include <div64.h>
11 #include <errno.h>
12 #include <video_bridge.h>
13 #include <asm/io.h>
14 #include <asm/arch-tegra/dc.h>
15 #include "display.h"
16 #include "edid.h"
17 #include "sor.h"
18 #include "displayport.h"
19
20 #define DO_FAST_LINK_TRAINING 1
21
22 struct tegra_dp_plat {
23 ulong base;
24 };
25
26 /**
27 * struct tegra_dp_priv - private displayport driver info
28 *
29 * @dc_dev: Display controller device that is sending the video feed
30 */
31 struct tegra_dp_priv {
32 struct udevice *sor;
33 struct udevice *dc_dev;
34 struct dpaux_ctlr *regs;
35 u8 revision;
36 int enabled;
37 };
38
39 struct tegra_dp_priv dp_data;
40
tegra_dpaux_readl(struct tegra_dp_priv * dp,u32 reg)41 static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
42 {
43 return readl((u32 *)dp->regs + reg);
44 }
45
tegra_dpaux_writel(struct tegra_dp_priv * dp,u32 reg,u32 val)46 static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
47 u32 val)
48 {
49 writel(val, (u32 *)dp->regs + reg);
50 }
51
tegra_dc_dpaux_poll_register(struct tegra_dp_priv * dp,u32 reg,u32 mask,u32 exp_val,u32 poll_interval_us,u32 timeout_us)52 static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
53 u32 reg, u32 mask, u32 exp_val,
54 u32 poll_interval_us,
55 u32 timeout_us)
56 {
57 u32 reg_val = 0;
58 u32 temp = timeout_us;
59
60 do {
61 udelay(poll_interval_us);
62 reg_val = tegra_dpaux_readl(dp, reg);
63 if (timeout_us > poll_interval_us)
64 timeout_us -= poll_interval_us;
65 else
66 break;
67 } while ((reg_val & mask) != exp_val);
68
69 if ((reg_val & mask) == exp_val)
70 return 0; /* success */
71 debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
72 reg, reg_val, mask, exp_val);
73 return temp;
74 }
75
tegra_dpaux_wait_transaction(struct tegra_dp_priv * dp)76 static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
77 {
78 /* According to DP spec, each aux transaction needs to finish
79 within 40ms. */
80 if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
81 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
82 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
83 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
84 debug("dp: DPAUX transaction timeout\n");
85 return -1;
86 }
87 return 0;
88 }
89
tegra_dc_dpaux_write_chunk(struct tegra_dp_priv * dp,u32 cmd,u32 addr,u8 * data,u32 * size,u32 * aux_stat)90 static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
91 u32 addr, u8 *data, u32 *size,
92 u32 *aux_stat)
93 {
94 int i;
95 u32 reg_val;
96 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
97 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
98 u32 temp_data;
99
100 if (*size > DP_AUX_MAX_BYTES)
101 return -1; /* only write one chunk of data */
102
103 /* Make sure the command is write command */
104 switch (cmd) {
105 case DPAUX_DP_AUXCTL_CMD_I2CWR:
106 case DPAUX_DP_AUXCTL_CMD_MOTWR:
107 case DPAUX_DP_AUXCTL_CMD_AUXWR:
108 break;
109 default:
110 debug("dp: aux write cmd 0x%x is invalid\n", cmd);
111 return -EINVAL;
112 }
113
114 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
115 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
116 memcpy(&temp_data, data, 4);
117 tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
118 data += 4;
119 }
120
121 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
122 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
123 reg_val |= cmd;
124 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
125 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
126
127 while ((timeout_retries > 0) && (defer_retries > 0)) {
128 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
129 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
130 udelay(1);
131
132 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
133 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
134
135 if (tegra_dpaux_wait_transaction(dp))
136 debug("dp: aux write transaction timeout\n");
137
138 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
139
140 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
141 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
142 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
143 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
144 if (timeout_retries-- > 0) {
145 debug("dp: aux write retry (0x%x) -- %d\n",
146 *aux_stat, timeout_retries);
147 /* clear the error bits */
148 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
149 *aux_stat);
150 continue;
151 } else {
152 debug("dp: aux write got error (0x%x)\n",
153 *aux_stat);
154 return -ETIMEDOUT;
155 }
156 }
157
158 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
159 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
160 if (defer_retries-- > 0) {
161 debug("dp: aux write defer (0x%x) -- %d\n",
162 *aux_stat, defer_retries);
163 /* clear the error bits */
164 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
165 *aux_stat);
166 continue;
167 } else {
168 debug("dp: aux write defer exceeds max retries (0x%x)\n",
169 *aux_stat);
170 return -ETIMEDOUT;
171 }
172 }
173
174 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
175 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
176 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
177 return 0;
178 } else {
179 debug("dp: aux write failed (0x%x)\n", *aux_stat);
180 return -EIO;
181 }
182 }
183 /* Should never come to here */
184 return -EIO;
185 }
186
tegra_dc_dpaux_read_chunk(struct tegra_dp_priv * dp,u32 cmd,u32 addr,u8 * data,u32 * size,u32 * aux_stat)187 static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
188 u32 addr, u8 *data, u32 *size,
189 u32 *aux_stat)
190 {
191 u32 reg_val;
192 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
193 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
194
195 if (*size > DP_AUX_MAX_BYTES) {
196 debug("only read one chunk\n");
197 return -EIO; /* only read one chunk */
198 }
199
200 /* Check to make sure the command is read command */
201 switch (cmd) {
202 case DPAUX_DP_AUXCTL_CMD_I2CRD:
203 case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
204 case DPAUX_DP_AUXCTL_CMD_MOTRD:
205 case DPAUX_DP_AUXCTL_CMD_AUXRD:
206 break;
207 default:
208 debug("dp: aux read cmd 0x%x is invalid\n", cmd);
209 return -EIO;
210 }
211
212 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
213 if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
214 debug("dp: HPD is not detected\n");
215 return -EIO;
216 }
217
218 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
219
220 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
221 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
222 reg_val |= cmd;
223 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
224 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
225 while ((timeout_retries > 0) && (defer_retries > 0)) {
226 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
227 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
228 udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
229
230 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
231 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
232
233 if (tegra_dpaux_wait_transaction(dp))
234 debug("dp: aux read transaction timeout\n");
235
236 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
237
238 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
239 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
240 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
241 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
242 if (timeout_retries-- > 0) {
243 debug("dp: aux read retry (0x%x) -- %d\n",
244 *aux_stat, timeout_retries);
245 /* clear the error bits */
246 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
247 *aux_stat);
248 continue; /* retry */
249 } else {
250 debug("dp: aux read got error (0x%x)\n",
251 *aux_stat);
252 return -ETIMEDOUT;
253 }
254 }
255
256 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
257 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
258 if (defer_retries-- > 0) {
259 debug("dp: aux read defer (0x%x) -- %d\n",
260 *aux_stat, defer_retries);
261 /* clear the error bits */
262 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
263 *aux_stat);
264 continue;
265 } else {
266 debug("dp: aux read defer exceeds max retries (0x%x)\n",
267 *aux_stat);
268 return -ETIMEDOUT;
269 }
270 }
271
272 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
273 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
274 int i;
275 u32 temp_data[4];
276
277 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
278 temp_data[i] = tegra_dpaux_readl(dp,
279 DPAUX_DP_AUXDATA_READ_W(i));
280
281 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
282 memcpy(data, temp_data, *size);
283
284 return 0;
285 } else {
286 debug("dp: aux read failed (0x%x\n", *aux_stat);
287 return -EIO;
288 }
289 }
290 /* Should never come to here */
291 debug("%s: can't\n", __func__);
292
293 return -EIO;
294 }
295
tegra_dc_dpaux_read(struct tegra_dp_priv * dp,u32 cmd,u32 addr,u8 * data,u32 * size,u32 * aux_stat)296 static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
297 u8 *data, u32 *size, u32 *aux_stat)
298 {
299 u32 finished = 0;
300 u32 cur_size;
301 int ret = 0;
302
303 do {
304 cur_size = *size - finished;
305 if (cur_size > DP_AUX_MAX_BYTES)
306 cur_size = DP_AUX_MAX_BYTES;
307
308 ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
309 data, &cur_size, aux_stat);
310 if (ret)
311 break;
312
313 /* cur_size should be the real size returned */
314 addr += cur_size;
315 data += cur_size;
316 finished += cur_size;
317
318 } while (*size > finished);
319 *size = finished;
320
321 return ret;
322 }
323
tegra_dc_dp_dpcd_read(struct tegra_dp_priv * dp,u32 cmd,u8 * data_ptr)324 static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
325 u8 *data_ptr)
326 {
327 u32 size = 1;
328 u32 status = 0;
329 int ret;
330
331 ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
332 cmd, data_ptr, &size, &status);
333 if (ret) {
334 debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
335 cmd, status);
336 }
337
338 return ret;
339 }
340
tegra_dc_dp_dpcd_write(struct tegra_dp_priv * dp,u32 cmd,u8 data)341 static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
342 u8 data)
343 {
344 u32 size = 1;
345 u32 status = 0;
346 int ret;
347
348 ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
349 cmd, &data, &size, &status);
350 if (ret) {
351 debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
352 cmd, status);
353 }
354
355 return ret;
356 }
357
tegra_dc_i2c_aux_read(struct tegra_dp_priv * dp,u32 i2c_addr,u8 addr,u8 * data,u32 size,u32 * aux_stat)358 static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
359 u8 addr, u8 *data, u32 size, u32 *aux_stat)
360 {
361 u32 finished = 0;
362 int ret = 0;
363
364 do {
365 u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
366
367 u32 len = 1;
368 ret = tegra_dc_dpaux_write_chunk(
369 dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
370 &addr, &len, aux_stat);
371 if (ret) {
372 debug("%s: error sending address to read.\n",
373 __func__);
374 return ret;
375 }
376
377 ret = tegra_dc_dpaux_read_chunk(
378 dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
379 data, &cur_size, aux_stat);
380 if (ret) {
381 debug("%s: error reading data.\n", __func__);
382 return ret;
383 }
384
385 /* cur_size should be the real size returned */
386 addr += cur_size;
387 data += cur_size;
388 finished += cur_size;
389 } while (size > finished);
390
391 return finished;
392 }
393
tegra_dc_dpaux_enable(struct tegra_dp_priv * dp)394 static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
395 {
396 /* clear interrupt */
397 tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
398 /* do not enable interrupt for now. Enable them when Isr in place */
399 tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
400
401 tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
402 DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
403 DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
404 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
405 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
406
407 tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
408 DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
409 }
410
411 #ifdef DEBUG
tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv * dp,const struct tegra_dp_link_config * link_cfg)412 static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
413 const struct tegra_dp_link_config *link_cfg)
414 {
415 debug("DP config: cfg_name cfg_value\n");
416 debug(" Lane Count %d\n",
417 link_cfg->max_lane_count);
418 debug(" SupportEnhancedFraming %s\n",
419 link_cfg->support_enhanced_framing ? "Y" : "N");
420 debug(" Bandwidth %d\n",
421 link_cfg->max_link_bw);
422 debug(" bpp %d\n",
423 link_cfg->bits_per_pixel);
424 debug(" EnhancedFraming %s\n",
425 link_cfg->enhanced_framing ? "Y" : "N");
426 debug(" Scramble_enabled %s\n",
427 link_cfg->scramble_ena ? "Y" : "N");
428 debug(" LinkBW %d\n",
429 link_cfg->link_bw);
430 debug(" lane_count %d\n",
431 link_cfg->lane_count);
432 debug(" activespolarity %d\n",
433 link_cfg->activepolarity);
434 debug(" active_count %d\n",
435 link_cfg->active_count);
436 debug(" tu_size %d\n",
437 link_cfg->tu_size);
438 debug(" active_frac %d\n",
439 link_cfg->active_frac);
440 debug(" watermark %d\n",
441 link_cfg->watermark);
442 debug(" hblank_sym %d\n",
443 link_cfg->hblank_sym);
444 debug(" vblank_sym %d\n",
445 link_cfg->vblank_sym);
446 }
447 #endif
448
_tegra_dp_lower_link_config(struct tegra_dp_priv * dp,struct tegra_dp_link_config * cfg)449 static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
450 struct tegra_dp_link_config *cfg)
451 {
452 switch (cfg->link_bw) {
453 case SOR_LINK_SPEED_G1_62:
454 if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
455 cfg->link_bw = SOR_LINK_SPEED_G2_7;
456 cfg->lane_count /= 2;
457 break;
458 case SOR_LINK_SPEED_G2_7:
459 cfg->link_bw = SOR_LINK_SPEED_G1_62;
460 break;
461 case SOR_LINK_SPEED_G5_4:
462 if (cfg->lane_count == 1) {
463 cfg->link_bw = SOR_LINK_SPEED_G2_7;
464 cfg->lane_count = cfg->max_lane_count;
465 } else {
466 cfg->lane_count /= 2;
467 }
468 break;
469 default:
470 debug("dp: Error link rate %d\n", cfg->link_bw);
471 return -ENOLINK;
472 }
473
474 return (cfg->lane_count > 0) ? 0 : -ENOLINK;
475 }
476
477 /*
478 * Calcuate if given cfg can meet the mode request.
479 * Return 0 if mode is possible, -1 otherwise
480 */
tegra_dc_dp_calc_config(struct tegra_dp_priv * dp,const struct display_timing * timing,struct tegra_dp_link_config * link_cfg)481 static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
482 const struct display_timing *timing,
483 struct tegra_dp_link_config *link_cfg)
484 {
485 const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
486 const u64 f = 100000; /* precision factor */
487 u32 num_linkclk_line; /* Number of link clocks per line */
488 u64 ratio_f; /* Ratio of incoming to outgoing data rate */
489 u64 frac_f;
490 u64 activesym_f; /* Activesym per TU */
491 u64 activecount_f;
492 u32 activecount;
493 u32 activepolarity;
494 u64 approx_value_f;
495 u32 activefrac = 0;
496 u64 accumulated_error_f = 0;
497 u32 lowest_neg_activecount = 0;
498 u32 lowest_neg_activepolarity = 0;
499 u32 lowest_neg_tusize = 64;
500 u32 num_symbols_per_line;
501 u64 lowest_neg_activefrac = 0;
502 u64 lowest_neg_error_f = 64 * f;
503 u64 watermark_f;
504 int i;
505 int neg;
506
507 if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
508 !link_cfg->bits_per_pixel)
509 return -1;
510
511 if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
512 (u64)link_rate * 8 * link_cfg->lane_count)
513 return -1;
514
515 num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
516 timing->pixelclock.typ));
517
518 ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
519 ratio_f /= 8;
520 do_div(ratio_f, link_rate * link_cfg->lane_count);
521
522 for (i = 64; i >= 32; --i) {
523 activesym_f = ratio_f * i;
524 activecount_f = lldiv(activesym_f, (u32)f) * f;
525 frac_f = activesym_f - activecount_f;
526 activecount = (u32)(lldiv(activecount_f, (u32)f));
527
528 if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
529 activepolarity = 0;
530 else {
531 activepolarity = 1;
532 frac_f = f - frac_f;
533 }
534
535 if (frac_f != 0) {
536 /* warning: frac_f should be 64-bit */
537 frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
538 if (frac_f > (15 * f))
539 activefrac = activepolarity ? 1 : 15;
540 else
541 activefrac = activepolarity ?
542 (u32)lldiv(frac_f, (u32)f) + 1 :
543 (u32)lldiv(frac_f, (u32)f);
544 }
545
546 if (activefrac == 1)
547 activepolarity = 0;
548
549 if (activepolarity == 1)
550 approx_value_f = activefrac ? lldiv(
551 (activecount_f + (activefrac * f - f) * f),
552 (activefrac * f)) :
553 activecount_f + f;
554 else
555 approx_value_f = activefrac ?
556 activecount_f + lldiv(f, activefrac) :
557 activecount_f;
558
559 if (activesym_f < approx_value_f) {
560 accumulated_error_f = num_linkclk_line *
561 lldiv(approx_value_f - activesym_f, i);
562 neg = 1;
563 } else {
564 accumulated_error_f = num_linkclk_line *
565 lldiv(activesym_f - approx_value_f, i);
566 neg = 0;
567 }
568
569 if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
570 (accumulated_error_f == 0)) {
571 lowest_neg_error_f = accumulated_error_f;
572 lowest_neg_tusize = i;
573 lowest_neg_activecount = activecount;
574 lowest_neg_activepolarity = activepolarity;
575 lowest_neg_activefrac = activefrac;
576
577 if (accumulated_error_f == 0)
578 break;
579 }
580 }
581
582 if (lowest_neg_activefrac == 0) {
583 link_cfg->activepolarity = 0;
584 link_cfg->active_count = lowest_neg_activepolarity ?
585 lowest_neg_activecount : lowest_neg_activecount - 1;
586 link_cfg->tu_size = lowest_neg_tusize;
587 link_cfg->active_frac = 1;
588 } else {
589 link_cfg->activepolarity = lowest_neg_activepolarity;
590 link_cfg->active_count = (u32)lowest_neg_activecount;
591 link_cfg->tu_size = lowest_neg_tusize;
592 link_cfg->active_frac = (u32)lowest_neg_activefrac;
593 }
594
595 watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
596 link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
597 f)) + link_cfg->bits_per_pixel / 4 - 1;
598 num_symbols_per_line = (timing->hactive.typ *
599 link_cfg->bits_per_pixel) /
600 (8 * link_cfg->lane_count);
601
602 if (link_cfg->watermark > 30) {
603 debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
604 link_cfg->watermark = 30;
605 return -1;
606 } else if (link_cfg->watermark > num_symbols_per_line) {
607 debug("dp: sor setting: force watermark to the number of symbols in the line\n");
608 link_cfg->watermark = num_symbols_per_line;
609 return -1;
610 }
611
612 /*
613 * Refer to dev_disp.ref for more information.
614 * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
615 * SetRasterBlankStart.X - 7) * link_clk / pclk)
616 * - 3 * enhanced_framing - Y
617 * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
618 */
619 link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
620 timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
621 link_rate, timing->pixelclock.typ) -
622 3 * link_cfg->enhanced_framing -
623 (12 / link_cfg->lane_count);
624
625 if (link_cfg->hblank_sym < 0)
626 link_cfg->hblank_sym = 0;
627
628
629 /*
630 * Refer to dev_disp.ref for more information.
631 * # symbols/vblank = ((SetRasterBlankStart.X -
632 * SetRasterBlankEen.X - 25) * link_clk / pclk)
633 * - Y - 1;
634 * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
635 */
636 link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
637 * link_rate, timing->pixelclock.typ) - (36 /
638 link_cfg->lane_count) - 4;
639
640 if (link_cfg->vblank_sym < 0)
641 link_cfg->vblank_sym = 0;
642
643 link_cfg->is_valid = 1;
644 #ifdef DEBUG
645 tegra_dc_dp_dump_link_cfg(dp, link_cfg);
646 #endif
647
648 return 0;
649 }
650
tegra_dc_dp_init_max_link_cfg(const struct display_timing * timing,struct tegra_dp_priv * dp,struct tegra_dp_link_config * link_cfg)651 static int tegra_dc_dp_init_max_link_cfg(
652 const struct display_timing *timing,
653 struct tegra_dp_priv *dp,
654 struct tegra_dp_link_config *link_cfg)
655 {
656 const int drive_current = 0x40404040;
657 const int preemphasis = 0x0f0f0f0f;
658 const int postcursor = 0;
659 u8 dpcd_data;
660 int ret;
661
662 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
663 if (ret)
664 return ret;
665 link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
666 link_cfg->tps3_supported = (dpcd_data &
667 DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
668
669 link_cfg->support_enhanced_framing =
670 (dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
671 1 : 0;
672
673 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
674 if (ret)
675 return ret;
676 link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
677 1 : 0;
678
679 ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
680 &link_cfg->aux_rd_interval);
681 if (ret)
682 return ret;
683 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
684 &link_cfg->max_link_bw);
685 if (ret)
686 return ret;
687
688 /*
689 * Set to a high value for link training and attach.
690 * Will be re-programmed when dp is enabled.
691 */
692 link_cfg->drive_current = drive_current;
693 link_cfg->preemphasis = preemphasis;
694 link_cfg->postcursor = postcursor;
695
696 ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
697 if (ret)
698 return ret;
699
700 link_cfg->alt_scramber_reset_cap =
701 (dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
702 1 : 0;
703 link_cfg->only_enhanced_framing =
704 (dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
705 1 : 0;
706
707 link_cfg->lane_count = link_cfg->max_lane_count;
708 link_cfg->link_bw = link_cfg->max_link_bw;
709 link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
710 link_cfg->frame_in_ms = (1000 / 60) + 1;
711
712 tegra_dc_dp_calc_config(dp, timing, link_cfg);
713 return 0;
714 }
715
tegra_dc_dp_set_assr(struct tegra_dp_priv * priv,struct udevice * sor,int ena)716 static int tegra_dc_dp_set_assr(struct tegra_dp_priv *priv,
717 struct udevice *sor, int ena)
718 {
719 int ret;
720
721 u8 dpcd_data = ena ?
722 DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
723 DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
724
725 ret = tegra_dc_dp_dpcd_write(priv, DP_EDP_CONFIGURATION_SET,
726 dpcd_data);
727 if (ret)
728 return ret;
729
730 /* Also reset the scrambler to 0xfffe */
731 tegra_dc_sor_set_internal_panel(sor, ena);
732 return 0;
733 }
734
tegra_dp_set_link_bandwidth(struct tegra_dp_priv * dp,struct udevice * sor,u8 link_bw)735 static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
736 struct udevice *sor,
737 u8 link_bw)
738 {
739 tegra_dc_sor_set_link_bandwidth(sor, link_bw);
740
741 /* Sink side */
742 return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
743 }
744
tegra_dp_set_lane_count(struct tegra_dp_priv * dp,const struct tegra_dp_link_config * link_cfg,struct udevice * sor)745 static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
746 const struct tegra_dp_link_config *link_cfg,
747 struct udevice *sor)
748 {
749 u8 dpcd_data;
750 int ret;
751
752 /* check if panel support enhanched_framing */
753 dpcd_data = link_cfg->lane_count;
754 if (link_cfg->enhanced_framing)
755 dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
756 ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
757 if (ret)
758 return ret;
759
760 tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
761
762 /* Also power down lanes that will not be used */
763 return 0;
764 }
765
tegra_dc_dp_link_trained(struct tegra_dp_priv * dp,const struct tegra_dp_link_config * cfg)766 static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
767 const struct tegra_dp_link_config *cfg)
768 {
769 u32 lane;
770 u8 mask;
771 u8 data;
772 int ret;
773
774 for (lane = 0; lane < cfg->lane_count; ++lane) {
775 ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
776 DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
777 &data);
778 if (ret)
779 return ret;
780 mask = (lane & 1) ?
781 NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
782 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
783 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
784 DP_LANE_CR_DONE |
785 DP_LANE_CHANNEL_EQ_DONE |
786 DP_LANE_SYMBOL_LOCKED;
787 if ((data & mask) != mask)
788 return -1;
789 }
790 return 0;
791 }
792
tegra_dp_channel_eq_status(struct tegra_dp_priv * dp,const struct tegra_dp_link_config * cfg)793 static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
794 const struct tegra_dp_link_config *cfg)
795 {
796 u32 cnt;
797 u32 n_lanes = cfg->lane_count;
798 u8 data;
799 u8 ce_done = 1;
800 int ret;
801
802 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
803 ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
804 if (ret)
805 return ret;
806
807 if (n_lanes == 1) {
808 ce_done = (data & (0x1 <<
809 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
810 (data & (0x1 <<
811 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
812 break;
813 } else if (!(data & (0x1 <<
814 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
815 !(data & (0x1 <<
816 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
817 !(data & (0x1 <<
818 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
819 !(data & (0x1 <<
820 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
821 return -EIO;
822 }
823
824 if (ce_done) {
825 ret = tegra_dc_dp_dpcd_read(dp,
826 DP_LANE_ALIGN_STATUS_UPDATED,
827 &data);
828 if (ret)
829 return ret;
830 if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
831 ce_done = 0;
832 }
833
834 return ce_done ? 0 : -EIO;
835 }
836
tegra_dp_clock_recovery_status(struct tegra_dp_priv * dp,const struct tegra_dp_link_config * cfg)837 static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
838 const struct tegra_dp_link_config *cfg)
839 {
840 u32 cnt;
841 u32 n_lanes = cfg->lane_count;
842 u8 data_ptr;
843 int ret;
844
845 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
846 ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
847 &data_ptr);
848 if (ret)
849 return ret;
850
851 if (n_lanes == 1)
852 return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
853 1 : 0;
854 else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
855 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
856 return 0;
857 }
858
859 return 1;
860 }
861
tegra_dp_lt_adjust(struct tegra_dp_priv * dp,u32 pe[4],u32 vs[4],u32 pc[4],u8 pc_supported,const struct tegra_dp_link_config * cfg)862 static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
863 u32 pc[4], u8 pc_supported,
864 const struct tegra_dp_link_config *cfg)
865 {
866 size_t cnt;
867 u8 data_ptr;
868 u32 n_lanes = cfg->lane_count;
869 int ret;
870
871 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
872 ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
873 &data_ptr);
874 if (ret)
875 return ret;
876 pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
877 NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
878 vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
879 NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
880 pe[1 + 2 * cnt] =
881 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
882 NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
883 vs[1 + 2 * cnt] =
884 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
885 NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
886 }
887 if (pc_supported) {
888 ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
889 &data_ptr);
890 if (ret)
891 return ret;
892 for (cnt = 0; cnt < n_lanes; cnt++) {
893 pc[cnt] = (data_ptr >>
894 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
895 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
896 }
897 }
898
899 return 0;
900 }
901
tegra_dp_wait_aux_training(struct tegra_dp_priv * dp,bool is_clk_recovery,const struct tegra_dp_link_config * cfg)902 static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
903 bool is_clk_recovery,
904 const struct tegra_dp_link_config *cfg)
905 {
906 if (!cfg->aux_rd_interval)
907 udelay(is_clk_recovery ? 200 : 500);
908 else
909 mdelay(cfg->aux_rd_interval * 4);
910 }
911
tegra_dp_tpg(struct tegra_dp_priv * dp,u32 tp,u32 n_lanes,const struct tegra_dp_link_config * cfg)912 static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
913 const struct tegra_dp_link_config *cfg)
914 {
915 u8 data = (tp == training_pattern_disabled)
916 ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
917 : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
918
919 tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
920 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
921 }
922
tegra_dp_link_config(struct tegra_dp_priv * dp,const struct tegra_dp_link_config * link_cfg)923 static int tegra_dp_link_config(struct tegra_dp_priv *dp,
924 const struct tegra_dp_link_config *link_cfg)
925 {
926 u8 dpcd_data;
927 u32 retry;
928 int ret;
929
930 if (link_cfg->lane_count == 0) {
931 debug("dp: error: lane count is 0. Can not set link config.\n");
932 return -ENOLINK;
933 }
934
935 /* Set power state if it is not in normal level */
936 ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
937 if (ret)
938 return ret;
939
940 if (dpcd_data == DP_SET_POWER_D3) {
941 dpcd_data = DP_SET_POWER_D0;
942
943 /* DP spec requires 3 retries */
944 for (retry = 3; retry > 0; --retry) {
945 ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
946 dpcd_data);
947 if (!ret)
948 break;
949 if (retry == 1) {
950 debug("dp: Failed to set DP panel power\n");
951 return ret;
952 }
953 }
954 }
955
956 /* Enable ASSR if possible */
957 if (link_cfg->alt_scramber_reset_cap) {
958 ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
959 if (ret)
960 return ret;
961 }
962
963 ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
964 if (ret) {
965 debug("dp: Failed to set link bandwidth\n");
966 return ret;
967 }
968 ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
969 if (ret) {
970 debug("dp: Failed to set lane count\n");
971 return ret;
972 }
973 tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
974 link_cfg);
975
976 return 0;
977 }
978
tegra_dp_lower_link_config(struct tegra_dp_priv * dp,const struct display_timing * timing,struct tegra_dp_link_config * cfg)979 static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
980 const struct display_timing *timing,
981 struct tegra_dp_link_config *cfg)
982 {
983 struct tegra_dp_link_config tmp_cfg;
984 int ret;
985
986 tmp_cfg = *cfg;
987 cfg->is_valid = 0;
988
989 ret = _tegra_dp_lower_link_config(dp, cfg);
990 if (!ret)
991 ret = tegra_dc_dp_calc_config(dp, timing, cfg);
992 if (!ret)
993 ret = tegra_dp_link_config(dp, cfg);
994 if (ret)
995 goto fail;
996
997 return 0;
998
999 fail:
1000 *cfg = tmp_cfg;
1001 tegra_dp_link_config(dp, &tmp_cfg);
1002 return ret;
1003 }
1004
tegra_dp_lt_config(struct tegra_dp_priv * dp,u32 pe[4],u32 vs[4],u32 pc[4],const struct tegra_dp_link_config * cfg)1005 static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1006 u32 pc[4], const struct tegra_dp_link_config *cfg)
1007 {
1008 struct udevice *sor = dp->sor;
1009 u32 n_lanes = cfg->lane_count;
1010 u8 pc_supported = cfg->tps3_supported;
1011 u32 cnt;
1012 u32 val;
1013
1014 for (cnt = 0; cnt < n_lanes; cnt++) {
1015 u32 mask = 0;
1016 u32 pe_reg, vs_reg, pc_reg;
1017 u32 shift = 0;
1018
1019 switch (cnt) {
1020 case 0:
1021 mask = PR_LANE2_DP_LANE0_MASK;
1022 shift = PR_LANE2_DP_LANE0_SHIFT;
1023 break;
1024 case 1:
1025 mask = PR_LANE1_DP_LANE1_MASK;
1026 shift = PR_LANE1_DP_LANE1_SHIFT;
1027 break;
1028 case 2:
1029 mask = PR_LANE0_DP_LANE2_MASK;
1030 shift = PR_LANE0_DP_LANE2_SHIFT;
1031 break;
1032 case 3:
1033 mask = PR_LANE3_DP_LANE3_MASK;
1034 shift = PR_LANE3_DP_LANE3_SHIFT;
1035 break;
1036 default:
1037 debug("dp: incorrect lane cnt\n");
1038 return -EINVAL;
1039 }
1040
1041 pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1042 vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1043 pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1044
1045 tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1046 vs_reg << shift, pc_reg << shift,
1047 pc_supported);
1048 }
1049
1050 tegra_dp_disable_tx_pu(dp->sor);
1051 udelay(20);
1052
1053 for (cnt = 0; cnt < n_lanes; cnt++) {
1054 u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1055 u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1056
1057 val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1058 (max_vs_flag ?
1059 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1060 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1061 (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1062 (max_pe_flag ?
1063 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1064 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1065 tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
1066 }
1067
1068 if (pc_supported) {
1069 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1070 u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1071 u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1072 val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1073 (max_pc_flag0 ?
1074 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1075 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1076 (pc[cnt + 1] <<
1077 NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1078 (max_pc_flag1 ?
1079 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1080 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1081 tegra_dc_dp_dpcd_write(dp,
1082 NV_DPCD_TRAINING_LANE0_1_SET2 +
1083 cnt, val);
1084 }
1085 }
1086
1087 return 0;
1088 }
1089
_tegra_dp_channel_eq(struct tegra_dp_priv * dp,u32 pe[4],u32 vs[4],u32 pc[4],u8 pc_supported,u32 n_lanes,const struct tegra_dp_link_config * cfg)1090 static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
1091 u32 vs[4], u32 pc[4], u8 pc_supported,
1092 u32 n_lanes,
1093 const struct tegra_dp_link_config *cfg)
1094 {
1095 u32 retry_cnt;
1096
1097 for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1098 int ret;
1099
1100 if (retry_cnt) {
1101 ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
1102 cfg);
1103 if (ret)
1104 return ret;
1105 tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1106 }
1107
1108 tegra_dp_wait_aux_training(dp, false, cfg);
1109
1110 if (!tegra_dp_clock_recovery_status(dp, cfg)) {
1111 debug("dp: CR failed in channel EQ sequence!\n");
1112 break;
1113 }
1114
1115 if (!tegra_dp_channel_eq_status(dp, cfg))
1116 return 0;
1117 }
1118
1119 return -EIO;
1120 }
1121
tegra_dp_channel_eq(struct tegra_dp_priv * dp,u32 pe[4],u32 vs[4],u32 pc[4],const struct tegra_dp_link_config * cfg)1122 static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1123 u32 pc[4],
1124 const struct tegra_dp_link_config *cfg)
1125 {
1126 u32 n_lanes = cfg->lane_count;
1127 u8 pc_supported = cfg->tps3_supported;
1128 int ret;
1129 u32 tp_src = training_pattern_2;
1130
1131 if (pc_supported)
1132 tp_src = training_pattern_3;
1133
1134 tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
1135
1136 ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
1137
1138 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1139
1140 return ret;
1141 }
1142
_tegra_dp_clk_recovery(struct tegra_dp_priv * dp,u32 pe[4],u32 vs[4],u32 pc[4],u8 pc_supported,u32 n_lanes,const struct tegra_dp_link_config * cfg)1143 static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1144 u32 vs[4], u32 pc[4], u8 pc_supported,
1145 u32 n_lanes,
1146 const struct tegra_dp_link_config *cfg)
1147 {
1148 u32 vs_temp[4];
1149 u32 retry_cnt = 0;
1150
1151 do {
1152 tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1153 tegra_dp_wait_aux_training(dp, true, cfg);
1154
1155 if (tegra_dp_clock_recovery_status(dp, cfg))
1156 return 0;
1157
1158 memcpy(vs_temp, vs, sizeof(vs_temp));
1159 tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
1160
1161 if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1162 retry_cnt = 0;
1163 else
1164 ++retry_cnt;
1165 } while (retry_cnt < 5);
1166
1167 return -EIO;
1168 }
1169
tegra_dp_clk_recovery(struct tegra_dp_priv * dp,u32 pe[4],u32 vs[4],u32 pc[4],const struct tegra_dp_link_config * cfg)1170 static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1171 u32 vs[4], u32 pc[4],
1172 const struct tegra_dp_link_config *cfg)
1173 {
1174 u32 n_lanes = cfg->lane_count;
1175 u8 pc_supported = cfg->tps3_supported;
1176 int err;
1177
1178 tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
1179
1180 err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
1181 cfg);
1182 if (err < 0)
1183 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1184
1185 return err;
1186 }
1187
tegra_dc_dp_full_link_training(struct tegra_dp_priv * dp,const struct display_timing * timing,struct tegra_dp_link_config * cfg)1188 static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
1189 const struct display_timing *timing,
1190 struct tegra_dp_link_config *cfg)
1191 {
1192 struct udevice *sor = dp->sor;
1193 int err;
1194 u32 pe[4], vs[4], pc[4];
1195
1196 tegra_sor_precharge_lanes(sor, cfg);
1197
1198 retry_cr:
1199 memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
1200 memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
1201 memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
1202
1203 err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
1204 if (err) {
1205 if (!tegra_dp_lower_link_config(dp, timing, cfg))
1206 goto retry_cr;
1207
1208 debug("dp: clk recovery failed\n");
1209 goto fail;
1210 }
1211
1212 err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
1213 if (err) {
1214 if (!tegra_dp_lower_link_config(dp, timing, cfg))
1215 goto retry_cr;
1216
1217 debug("dp: channel equalization failed\n");
1218 goto fail;
1219 }
1220 #ifdef DEBUG
1221 tegra_dc_dp_dump_link_cfg(dp, cfg);
1222 #endif
1223 return 0;
1224
1225 fail:
1226 return err;
1227 }
1228
1229 /*
1230 * All link training functions are ported from kernel dc driver.
1231 * See more details at drivers/video/tegra/dc/dp.c
1232 */
tegra_dc_dp_fast_link_training(struct tegra_dp_priv * dp,const struct tegra_dp_link_config * link_cfg,struct udevice * sor)1233 static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
1234 const struct tegra_dp_link_config *link_cfg,
1235 struct udevice *sor)
1236 {
1237 u8 link_bw;
1238 u8 lane_count;
1239 u16 data16;
1240 u32 data32;
1241 u32 size;
1242 u32 status;
1243 int j;
1244 u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1245
1246 tegra_dc_sor_set_lane_parm(sor, link_cfg);
1247 tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
1248 DP_SET_ANSI_8B10B);
1249
1250 /* Send TP1 */
1251 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1252 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1253 DP_TRAINING_PATTERN_1);
1254
1255 for (j = 0; j < link_cfg->lane_count; ++j)
1256 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1257 udelay(520);
1258
1259 size = sizeof(data16);
1260 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1261 DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1262 status = mask & 0x1111;
1263 if ((data16 & status) != status) {
1264 debug("dp: Link training error for TP1 (%#x, status %#x)\n",
1265 data16, status);
1266 return -EFAULT;
1267 }
1268
1269 /* enable ASSR */
1270 tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
1271 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1272
1273 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1274 link_cfg->link_bw == 20 ? 0x23 : 0x22);
1275 for (j = 0; j < link_cfg->lane_count; ++j)
1276 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1277 udelay(520);
1278
1279 size = sizeof(data32);
1280 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
1281 (u8 *)&data32, &size, &status);
1282 if ((data32 & mask) != (0x7777 & mask)) {
1283 debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
1284 return -EFAULT;
1285 }
1286
1287 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1288 link_cfg);
1289 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
1290
1291 if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1292 tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1293 debug("Fast link training failed, link bw %d, lane # %d\n",
1294 link_bw, lane_count);
1295 return -EFAULT;
1296 }
1297
1298 debug("Fast link training succeeded, link bw %d, lane %d\n",
1299 link_cfg->link_bw, link_cfg->lane_count);
1300
1301 return 0;
1302 }
1303
tegra_dp_do_link_training(struct tegra_dp_priv * dp,struct tegra_dp_link_config * link_cfg,const struct display_timing * timing,struct udevice * sor)1304 static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
1305 struct tegra_dp_link_config *link_cfg,
1306 const struct display_timing *timing,
1307 struct udevice *sor)
1308 {
1309 u8 link_bw;
1310 u8 lane_count;
1311 int ret;
1312
1313 if (DO_FAST_LINK_TRAINING) {
1314 ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
1315 if (ret) {
1316 debug("dp: fast link training failed\n");
1317 } else {
1318 /*
1319 * set to a known-good drive setting if fast link
1320 * succeeded. Ignore any error.
1321 */
1322 ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
1323 if (ret)
1324 debug("Failed to set voltage swing\n");
1325 }
1326 } else {
1327 ret = -ENOSYS;
1328 }
1329 if (ret) {
1330 /* Try full link training then */
1331 ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
1332 if (ret) {
1333 debug("dp: full link training failed\n");
1334 return ret;
1335 }
1336 }
1337
1338 /* Everything is good; double check the link config */
1339 tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1340
1341 if ((link_cfg->link_bw == link_bw) &&
1342 (link_cfg->lane_count == lane_count))
1343 return 0;
1344 else
1345 return -EFAULT;
1346 }
1347
tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv * dp,struct tegra_dp_link_config * link_cfg,struct udevice * sor,const struct display_timing * timing)1348 static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
1349 struct tegra_dp_link_config *link_cfg,
1350 struct udevice *sor,
1351 const struct display_timing *timing)
1352 {
1353 struct tegra_dp_link_config temp_cfg;
1354
1355 if (!timing->pixelclock.typ || !timing->hactive.typ ||
1356 !timing->vactive.typ) {
1357 debug("dp: error mode configuration");
1358 return -EINVAL;
1359 }
1360 if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1361 debug("dp: error link configuration");
1362 return -EINVAL;
1363 }
1364
1365 link_cfg->is_valid = 0;
1366
1367 memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1368
1369 temp_cfg.link_bw = temp_cfg.max_link_bw;
1370 temp_cfg.lane_count = temp_cfg.max_lane_count;
1371
1372 /*
1373 * set to max link config
1374 */
1375 if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
1376 (!tegra_dp_link_config(dp, &temp_cfg)) &&
1377 (!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
1378 /* the max link cfg is doable */
1379 memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1380
1381 return link_cfg->is_valid ? 0 : -EFAULT;
1382 }
1383
tegra_dp_hpd_plug(struct tegra_dp_priv * dp)1384 static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
1385 {
1386 const int vdd_to_hpd_delay_ms = 200;
1387 u32 val;
1388 ulong start;
1389
1390 start = get_timer(0);
1391 do {
1392 val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1393 if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1394 return 0;
1395 udelay(100);
1396 } while (get_timer(start) < vdd_to_hpd_delay_ms);
1397
1398 return -EIO;
1399 }
1400
tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv * dp,u32 delay_ms)1401 static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
1402 {
1403 u8 dpcd_data;
1404 int out_of_sync;
1405 int ret;
1406
1407 debug("%s: delay=%d\n", __func__, delay_ms);
1408 mdelay(delay_ms);
1409 ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
1410 if (ret)
1411 return ret;
1412
1413 out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
1414 if (out_of_sync)
1415 debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
1416 else
1417 debug("SINK is in synchronization\n");
1418
1419 return out_of_sync;
1420 }
1421
tegra_dc_dp_check_sink(struct tegra_dp_priv * dp,struct tegra_dp_link_config * link_cfg,const struct display_timing * timing)1422 static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
1423 struct tegra_dp_link_config *link_cfg,
1424 const struct display_timing *timing)
1425 {
1426 const int max_retry = 5;
1427 int delay_frame;
1428 int retries;
1429
1430 /*
1431 * DP TCON may skip some main stream frames, thus we need to wait
1432 * some delay before reading the DPCD SINK STATUS register, starting
1433 * from 5
1434 */
1435 delay_frame = 5;
1436
1437 retries = max_retry;
1438 do {
1439 int ret;
1440
1441 if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
1442 delay_frame))
1443 return 0;
1444
1445 debug("%s: retries left %d\n", __func__, retries);
1446 if (!retries--) {
1447 printf("DP: Out of sync after %d retries\n", max_retry);
1448 return -EIO;
1449 }
1450 ret = tegra_dc_sor_detach(dp->dc_dev, dp->sor);
1451 if (ret)
1452 return ret;
1453 if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
1454 timing)) {
1455 debug("dp: %s: error to configure link\n", __func__);
1456 continue;
1457 }
1458
1459 tegra_dc_sor_set_power_state(dp->sor, 1);
1460 tegra_dc_sor_attach(dp->dc_dev, dp->sor, link_cfg, timing);
1461
1462 /* Increase delay_frame for next try in case the sink is
1463 skipping more frames */
1464 delay_frame += 10;
1465 } while (1);
1466 }
1467
tegra_dp_enable(struct udevice * dev,int panel_bpp,const struct display_timing * timing)1468 int tegra_dp_enable(struct udevice *dev, int panel_bpp,
1469 const struct display_timing *timing)
1470 {
1471 struct tegra_dp_priv *priv = dev_get_priv(dev);
1472 struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
1473 struct udevice *sor;
1474 int data;
1475 int retry;
1476 int ret;
1477
1478 memset(link_cfg, '\0', sizeof(*link_cfg));
1479 link_cfg->is_valid = 0;
1480 link_cfg->scramble_ena = 1;
1481
1482 tegra_dc_dpaux_enable(priv);
1483
1484 if (tegra_dp_hpd_plug(priv) < 0) {
1485 debug("dp: hpd plug failed\n");
1486 return -EIO;
1487 }
1488
1489 link_cfg->bits_per_pixel = panel_bpp;
1490 if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
1491 debug("dp: failed to init link configuration\n");
1492 return -ENOLINK;
1493 }
1494
1495 ret = uclass_first_device(UCLASS_VIDEO_BRIDGE, &sor);
1496 if (ret || !sor) {
1497 debug("dp: failed to find SOR device: ret=%d\n", ret);
1498 return ret;
1499 }
1500 priv->sor = sor;
1501 ret = tegra_dc_sor_enable_dp(sor, link_cfg);
1502 if (ret)
1503 return ret;
1504
1505 tegra_dc_sor_set_panel_power(sor, 1);
1506
1507 /* Write power on to DPCD */
1508 data = DP_SET_POWER_D0;
1509 retry = 0;
1510 do {
1511 ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
1512 } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1513
1514 if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1515 debug("dp: failed to power on panel (0x%x)\n", ret);
1516 return -ENETUNREACH;
1517 goto error_enable;
1518 }
1519
1520 /* Confirm DP plugging status */
1521 if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
1522 DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1523 debug("dp: could not detect HPD\n");
1524 return -ENXIO;
1525 }
1526
1527 /* Check DP version */
1528 if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
1529 debug("dp: failed to read the revision number from sink\n");
1530 return -EIO;
1531 }
1532
1533 if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
1534 debug("dp: error configuring link\n");
1535 return -ENOMEDIUM;
1536 }
1537
1538 tegra_dc_sor_set_power_state(sor, 1);
1539 ret = tegra_dc_sor_attach(priv->dc_dev, sor, link_cfg, timing);
1540 if (ret && ret != -EEXIST)
1541 return ret;
1542
1543 /*
1544 * This takes a long time, but can apparently resolve a failure to
1545 * bring up the display correctly.
1546 */
1547 if (0) {
1548 ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
1549 if (ret)
1550 return ret;
1551 }
1552
1553 /* Power down the unused lanes to save power - a few hundred mW */
1554 tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
1555
1556 ret = video_bridge_set_backlight(sor, 80);
1557 if (ret) {
1558 debug("dp: failed to set backlight\n");
1559 return ret;
1560 }
1561
1562 priv->enabled = true;
1563 error_enable:
1564 return 0;
1565 }
1566
tegra_dp_ofdata_to_platdata(struct udevice * dev)1567 static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
1568 {
1569 struct tegra_dp_plat *plat = dev_get_platdata(dev);
1570
1571 plat->base = dev_read_addr(dev);
1572
1573 return 0;
1574 }
1575
tegra_dp_read_edid(struct udevice * dev,u8 * buf,int buf_size)1576 static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
1577 {
1578 struct tegra_dp_priv *priv = dev_get_priv(dev);
1579 const int tegra_edid_i2c_address = 0x50;
1580 u32 aux_stat = 0;
1581
1582 tegra_dc_dpaux_enable(priv);
1583
1584 return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
1585 buf_size, &aux_stat);
1586 }
1587
1588 static const struct dm_display_ops dp_tegra_ops = {
1589 .read_edid = tegra_dp_read_edid,
1590 .enable = tegra_dp_enable,
1591 };
1592
dp_tegra_probe(struct udevice * dev)1593 static int dp_tegra_probe(struct udevice *dev)
1594 {
1595 struct tegra_dp_plat *plat = dev_get_platdata(dev);
1596 struct tegra_dp_priv *priv = dev_get_priv(dev);
1597 struct display_plat *disp_uc_plat = dev_get_uclass_platdata(dev);
1598
1599 priv->regs = (struct dpaux_ctlr *)plat->base;
1600 priv->enabled = false;
1601
1602 /* Remember the display controller that is sending us video */
1603 priv->dc_dev = disp_uc_plat->src_dev;
1604
1605 return 0;
1606 }
1607
1608 static const struct udevice_id tegra_dp_ids[] = {
1609 { .compatible = "nvidia,tegra124-dpaux" },
1610 { }
1611 };
1612
1613 U_BOOT_DRIVER(dp_tegra) = {
1614 .name = "dpaux_tegra",
1615 .id = UCLASS_DISPLAY,
1616 .of_match = tegra_dp_ids,
1617 .ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
1618 .probe = dp_tegra_probe,
1619 .ops = &dp_tegra_ops,
1620 .priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
1621 .platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),
1622 };
1623