xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c (revision 692f5510159c79bfa312a4e27a15e266232bfb4c)
1  /*
2   * Copyright 2014 Red Hat Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   * Authors: Ben Skeggs
23   */
24  #include "dp.h"
25  #include "conn.h"
26  #include "head.h"
27  #include "ior.h"
28  
29  #include <drm/display/drm_dp.h>
30  
31  #include <subdev/bios.h>
32  #include <subdev/bios/init.h>
33  #include <subdev/gpio.h>
34  #include <subdev/i2c.h>
35  
36  #include <nvif/event.h>
37  
38  /* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
39   * the x86 option ROM.  However, the relevant VBIOS table versions weren't modified,
40   * so we're unable to detect this in a nice way.
41   */
42  #define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
43  
44  struct lt_state {
45  	struct nvkm_outp *outp;
46  
47  	int repeaters;
48  	int repeater;
49  
50  	u8  stat[6];
51  	u8  conf[4];
52  	bool pc2;
53  	u8  pc2stat;
54  	u8  pc2conf[2];
55  };
56  
57  static int
nvkm_dp_train_sense(struct lt_state * lt,bool pc,u32 delay)58  nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
59  {
60  	struct nvkm_outp *outp = lt->outp;
61  	u32 addr;
62  	int ret;
63  
64  	usleep_range(delay, delay * 2);
65  
66  	if (lt->repeater)
67  		addr = DPCD_LTTPR_LANE0_1_STATUS(lt->repeater);
68  	else
69  		addr = DPCD_LS02;
70  
71  	ret = nvkm_rdaux(outp->dp.aux, addr, &lt->stat[0], 3);
72  	if (ret)
73  		return ret;
74  
75  	if (lt->repeater)
76  		addr = DPCD_LTTPR_LANE0_1_ADJUST(lt->repeater);
77  	else
78  		addr = DPCD_LS06;
79  
80  	ret = nvkm_rdaux(outp->dp.aux, addr, &lt->stat[4], 2);
81  	if (ret)
82  		return ret;
83  
84  	if (pc) {
85  		ret = nvkm_rdaux(outp->dp.aux, DPCD_LS0C, &lt->pc2stat, 1);
86  		if (ret)
87  			lt->pc2stat = 0x00;
88  
89  		OUTP_TRACE(outp, "status %6ph pc2 %02x", lt->stat, lt->pc2stat);
90  	} else {
91  		OUTP_TRACE(outp, "status %6ph", lt->stat);
92  	}
93  
94  	return 0;
95  }
96  
97  static int
nvkm_dp_train_drive(struct lt_state * lt,bool pc)98  nvkm_dp_train_drive(struct lt_state *lt, bool pc)
99  {
100  	struct nvkm_outp *outp = lt->outp;
101  	struct nvkm_ior *ior = outp->ior;
102  	struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
103  	struct nvbios_dpout info;
104  	struct nvbios_dpcfg ocfg;
105  	u8  ver, hdr, cnt, len;
106  	u32 addr;
107  	u32 data;
108  	int ret, i;
109  
110  	for (i = 0; i < ior->dp.nr; i++) {
111  		u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
112  		u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
113  		u8 lpre = (lane & 0x0c) >> 2;
114  		u8 lvsw = (lane & 0x03) >> 0;
115  		u8 hivs = 3 - lpre;
116  		u8 hipe = 3;
117  		u8 hipc = 3;
118  
119  		if (lpc2 >= hipc)
120  			lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
121  		if (lpre >= hipe) {
122  			lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
123  			lvsw = hivs = 3 - (lpre & 3);
124  		} else
125  		if (lvsw >= hivs) {
126  			lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
127  		}
128  
129  		lt->conf[i] = (lpre << 3) | lvsw;
130  		lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
131  
132  		OUTP_TRACE(outp, "config lane %d %02x %02x", i, lt->conf[i], lpc2);
133  
134  		if (lt->repeater != lt->repeaters)
135  			continue;
136  
137  		data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
138  					  &ver, &hdr, &cnt, &len, &info);
139  		if (!data)
140  			continue;
141  
142  		data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3, lpre & 3,
143  					  &ver, &hdr, &cnt, &len, &ocfg);
144  		if (!data)
145  			continue;
146  
147  		ior->func->dp->drive(ior, i, ocfg.pc, ocfg.dc, ocfg.pe, ocfg.tx_pu);
148  	}
149  
150  	if (lt->repeater)
151  		addr = DPCD_LTTPR_LANE0_SET(lt->repeater);
152  	else
153  		addr = DPCD_LC03(0);
154  
155  	ret = nvkm_wraux(outp->dp.aux, addr, lt->conf, 4);
156  	if (ret)
157  		return ret;
158  
159  	if (pc) {
160  		ret = nvkm_wraux(outp->dp.aux, DPCD_LC0F, lt->pc2conf, 2);
161  		if (ret)
162  			return ret;
163  	}
164  
165  	return 0;
166  }
167  
168  static void
nvkm_dp_train_pattern(struct lt_state * lt,u8 pattern)169  nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
170  {
171  	struct nvkm_outp *outp = lt->outp;
172  	u32 addr;
173  	u8 sink_tp;
174  
175  	OUTP_TRACE(outp, "training pattern %d", pattern);
176  	outp->ior->func->dp->pattern(outp->ior, pattern);
177  
178  	if (lt->repeater)
179  		addr = DPCD_LTTPR_PATTERN_SET(lt->repeater);
180  	else
181  		addr = DPCD_LC02;
182  
183  	nvkm_rdaux(outp->dp.aux, addr, &sink_tp, 1);
184  	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
185  	sink_tp |= (pattern != 4) ? pattern : 7;
186  
187  	if (pattern != 0)
188  		sink_tp |=  DPCD_LC02_SCRAMBLING_DISABLE;
189  	else
190  		sink_tp &= ~DPCD_LC02_SCRAMBLING_DISABLE;
191  	nvkm_wraux(outp->dp.aux, addr, &sink_tp, 1);
192  }
193  
194  static int
nvkm_dp_train_eq(struct lt_state * lt)195  nvkm_dp_train_eq(struct lt_state *lt)
196  {
197  	struct nvkm_i2c_aux *aux = lt->outp->dp.aux;
198  	bool eq_done = false, cr_done = true;
199  	int tries = 0, usec = 0, i;
200  	u8 data;
201  
202  	if (lt->repeater) {
203  		if (!nvkm_rdaux(aux, DPCD_LTTPR_AUX_RD_INTERVAL(lt->repeater), &data, sizeof(data)))
204  			usec = (data & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
205  
206  		nvkm_dp_train_pattern(lt, 4);
207  	} else {
208  		if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x14 &&
209  		    lt->outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)
210  			nvkm_dp_train_pattern(lt, 4);
211  		else
212  		if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x12 &&
213  		    lt->outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
214  			nvkm_dp_train_pattern(lt, 3);
215  		else
216  			nvkm_dp_train_pattern(lt, 2);
217  
218  		usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
219  	}
220  
221  	do {
222  		if ((tries &&
223  		    nvkm_dp_train_drive(lt, lt->pc2)) ||
224  		    nvkm_dp_train_sense(lt, lt->pc2, usec ? usec : 400))
225  			break;
226  
227  		eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
228  		for (i = 0; i < lt->outp->ior->dp.nr && eq_done; i++) {
229  			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
230  			if (!(lane & DPCD_LS02_LANE0_CR_DONE))
231  				cr_done = false;
232  			if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
233  			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
234  				eq_done = false;
235  		}
236  	} while (!eq_done && cr_done && ++tries <= 5);
237  
238  	return eq_done ? 0 : -1;
239  }
240  
241  static int
nvkm_dp_train_cr(struct lt_state * lt)242  nvkm_dp_train_cr(struct lt_state *lt)
243  {
244  	bool cr_done = false, abort = false;
245  	int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
246  	int tries = 0, usec = 0, i;
247  
248  	nvkm_dp_train_pattern(lt, 1);
249  
250  	if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x14 && !lt->repeater)
251  		usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
252  
253  	do {
254  		if (nvkm_dp_train_drive(lt, false) ||
255  		    nvkm_dp_train_sense(lt, false, usec ? usec : 100))
256  			break;
257  
258  		cr_done = true;
259  		for (i = 0; i < lt->outp->ior->dp.nr; i++) {
260  			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
261  			if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
262  				cr_done = false;
263  				if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
264  					abort = true;
265  				break;
266  			}
267  		}
268  
269  		if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
270  			voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
271  			tries = 0;
272  		}
273  	} while (!cr_done && !abort && ++tries < 5);
274  
275  	return cr_done ? 0 : -1;
276  }
277  
278  static int
nvkm_dp_train_link(struct nvkm_outp * outp,int rate)279  nvkm_dp_train_link(struct nvkm_outp *outp, int rate)
280  {
281  	struct nvkm_ior *ior = outp->ior;
282  	struct lt_state lt = {
283  		.outp = outp,
284  		.pc2 = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED,
285  	};
286  	u8 sink[2], data;
287  	int ret;
288  
289  	OUTP_DBG(outp, "training %dx%02x", ior->dp.nr, ior->dp.bw);
290  
291  	/* Select LTTPR non-transparent mode if we have a valid configuration,
292  	 * use transparent mode otherwise.
293  	 */
294  	if (outp->dp.lttpr[0] >= 0x14) {
295  		data = DPCD_LTTPR_MODE_TRANSPARENT;
296  		nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
297  
298  		if (outp->dp.lttprs) {
299  			data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
300  			nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
301  			lt.repeaters = outp->dp.lttprs;
302  		}
303  	}
304  
305  	/* Set desired link configuration on the sink. */
306  	sink[0] = (outp->dp.rate[rate].dpcd < 0) ? ior->dp.bw : 0;
307  	sink[1] = ior->dp.nr;
308  	if (ior->dp.ef)
309  		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
310  
311  	ret = nvkm_wraux(outp->dp.aux, DPCD_LC00_LINK_BW_SET, sink, 2);
312  	if (ret)
313  		return ret;
314  
315  	if (outp->dp.rate[rate].dpcd >= 0) {
316  		ret = nvkm_rdaux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
317  		if (ret)
318  			return ret;
319  
320  		sink[0] &= ~DPCD_LC15_LINK_RATE_SET_MASK;
321  		sink[0] |= outp->dp.rate[rate].dpcd;
322  
323  		ret = nvkm_wraux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
324  		if (ret)
325  			return ret;
326  	}
327  
328  	/* Attempt to train the link in this configuration. */
329  	for (lt.repeater = lt.repeaters; lt.repeater >= 0; lt.repeater--) {
330  		if (lt.repeater)
331  			OUTP_DBG(outp, "training LTTPR%d", lt.repeater);
332  		else
333  			OUTP_DBG(outp, "training sink");
334  
335  		memset(lt.stat, 0x00, sizeof(lt.stat));
336  		ret = nvkm_dp_train_cr(&lt);
337  		if (ret == 0)
338  			ret = nvkm_dp_train_eq(&lt);
339  		nvkm_dp_train_pattern(&lt, 0);
340  	}
341  
342  	return ret;
343  }
344  
345  static int
nvkm_dp_train_links(struct nvkm_outp * outp,int rate)346  nvkm_dp_train_links(struct nvkm_outp *outp, int rate)
347  {
348  	struct nvkm_ior *ior = outp->ior;
349  	struct nvkm_disp *disp = outp->disp;
350  	struct nvkm_subdev *subdev = &disp->engine.subdev;
351  	struct nvkm_bios *bios = subdev->device->bios;
352  	u32 lnkcmp;
353  	int ret;
354  
355  	OUTP_DBG(outp, "programming link for %dx%02x", ior->dp.nr, ior->dp.bw);
356  
357  	/* Intersect misc. capabilities of the OR and sink. */
358  	if (disp->engine.subdev.device->chipset < 0x110)
359  		outp->dp.dpcd[DPCD_RC03] &= ~DPCD_RC03_TPS4_SUPPORTED;
360  	if (disp->engine.subdev.device->chipset < 0xd0)
361  		outp->dp.dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
362  
363  	if (AMPERE_IED_HACK(disp) && (lnkcmp = outp->dp.info.script[0])) {
364  		/* Execute BeforeLinkTraining script from DP Info table. */
365  		while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
366  			lnkcmp += 3;
367  		lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
368  
369  		nvbios_init(&outp->disp->engine.subdev, lnkcmp,
370  			init.outp = &outp->info;
371  			init.or   = ior->id;
372  			init.link = ior->asy.link;
373  		);
374  	}
375  
376  	/* Set desired link configuration on the source. */
377  	if ((lnkcmp = outp->dp.info.lnkcmp)) {
378  		if (outp->dp.version < 0x30) {
379  			while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
380  				lnkcmp += 4;
381  			lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
382  		} else {
383  			while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
384  				lnkcmp += 3;
385  			lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
386  		}
387  
388  		nvbios_init(subdev, lnkcmp,
389  			init.outp = &outp->info;
390  			init.or   = ior->id;
391  			init.link = ior->asy.link;
392  		);
393  	}
394  
395  	ret = ior->func->dp->links(ior, outp->dp.aux);
396  	if (ret) {
397  		if (ret < 0) {
398  			OUTP_ERR(outp, "train failed with %d", ret);
399  			return ret;
400  		}
401  		return 0;
402  	}
403  
404  	ior->func->dp->power(ior, ior->dp.nr);
405  
406  	/* Attempt to train the link in this configuration. */
407  	return nvkm_dp_train_link(outp, rate);
408  }
409  
410  static void
nvkm_dp_train_fini(struct nvkm_outp * outp)411  nvkm_dp_train_fini(struct nvkm_outp *outp)
412  {
413  	/* Execute AfterLinkTraining script from DP Info table. */
414  	nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[1],
415  		init.outp = &outp->info;
416  		init.or   = outp->ior->id;
417  		init.link = outp->ior->asy.link;
418  	);
419  }
420  
421  static void
nvkm_dp_train_init(struct nvkm_outp * outp)422  nvkm_dp_train_init(struct nvkm_outp *outp)
423  {
424  	/* Execute EnableSpread/DisableSpread script from DP Info table. */
425  	if (outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
426  		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[2],
427  			init.outp = &outp->info;
428  			init.or   = outp->ior->id;
429  			init.link = outp->ior->asy.link;
430  		);
431  	} else {
432  		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[3],
433  			init.outp = &outp->info;
434  			init.or   = outp->ior->id;
435  			init.link = outp->ior->asy.link;
436  		);
437  	}
438  
439  	if (!AMPERE_IED_HACK(outp->disp)) {
440  		/* Execute BeforeLinkTraining script from DP Info table. */
441  		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[0],
442  			init.outp = &outp->info;
443  			init.or   = outp->ior->id;
444  			init.link = outp->ior->asy.link;
445  		);
446  	}
447  }
448  
449  static int
nvkm_dp_train(struct nvkm_outp * outp,u32 dataKBps)450  nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
451  {
452  	struct nvkm_ior *ior = outp->ior;
453  	int ret = -EINVAL, nr, rate;
454  	u8  pwr;
455  
456  	/* Retraining link?  Skip source configuration, it can mess up the active modeset. */
457  	if (atomic_read(&outp->dp.lt.done)) {
458  		for (rate = 0; rate < outp->dp.rates; rate++) {
459  			if (outp->dp.rate[rate].rate == ior->dp.bw * 27000)
460  				return nvkm_dp_train_link(outp, ret);
461  		}
462  		WARN_ON(1);
463  		return -EINVAL;
464  	}
465  
466  	/* Ensure sink is not in a low-power state. */
467  	if (!nvkm_rdaux(outp->dp.aux, DPCD_SC00, &pwr, 1)) {
468  		if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
469  			pwr &= ~DPCD_SC00_SET_POWER;
470  			pwr |=  DPCD_SC00_SET_POWER_D0;
471  			nvkm_wraux(outp->dp.aux, DPCD_SC00, &pwr, 1);
472  		}
473  	}
474  
475  	ior->dp.mst = outp->dp.lt.mst;
476  	ior->dp.ef = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
477  	ior->dp.nr = 0;
478  
479  	/* Link training. */
480  	OUTP_DBG(outp, "training");
481  	nvkm_dp_train_init(outp);
482  
483  	/* Validate and train at configuration requested (if any) on ACQUIRE. */
484  	if (outp->dp.lt.nr) {
485  		for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
486  			for (rate = 0; nr == outp->dp.lt.nr && rate < outp->dp.rates; rate++) {
487  				if (outp->dp.rate[rate].rate / 27000 == outp->dp.lt.bw) {
488  					ior->dp.bw = outp->dp.rate[rate].rate / 27000;
489  					ior->dp.nr = nr;
490  					ret = nvkm_dp_train_links(outp, rate);
491  				}
492  			}
493  		}
494  	}
495  
496  	/* Otherwise, loop through all valid link configurations that support the data rate. */
497  	for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
498  		for (rate = 0; ret < 0 && rate < outp->dp.rates; rate++) {
499  			if (outp->dp.rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
500  				/* Program selected link configuration. */
501  				ior->dp.bw = outp->dp.rate[rate].rate / 27000;
502  				ior->dp.nr = nr;
503  				ret = nvkm_dp_train_links(outp, rate);
504  			}
505  		}
506  	}
507  
508  	/* Finish up. */
509  	nvkm_dp_train_fini(outp);
510  	if (ret < 0)
511  		OUTP_ERR(outp, "training failed");
512  	else
513  		OUTP_DBG(outp, "training done");
514  	atomic_set(&outp->dp.lt.done, 1);
515  	return ret;
516  }
517  
518  void
nvkm_dp_disable(struct nvkm_outp * outp,struct nvkm_ior * ior)519  nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
520  {
521  	/* Execute DisableLT script from DP Info Table. */
522  	nvbios_init(&ior->disp->engine.subdev, outp->dp.info.script[4],
523  		init.outp = &outp->info;
524  		init.or   = ior->id;
525  		init.link = ior->arm.link;
526  	);
527  }
528  
529  static void
nvkm_dp_release(struct nvkm_outp * outp)530  nvkm_dp_release(struct nvkm_outp *outp)
531  {
532  	/* Prevent link from being retrained if sink sends an IRQ. */
533  	atomic_set(&outp->dp.lt.done, 0);
534  	outp->ior->dp.nr = 0;
535  }
536  
537  static int
nvkm_dp_acquire(struct nvkm_outp * outp)538  nvkm_dp_acquire(struct nvkm_outp *outp)
539  {
540  	struct nvkm_ior *ior = outp->ior;
541  	struct nvkm_head *head;
542  	bool retrain = true;
543  	u32 datakbps = 0;
544  	u32 dataKBps;
545  	u32 linkKBps;
546  	u8  stat[3];
547  	int ret, i;
548  
549  	mutex_lock(&outp->dp.mutex);
550  
551  	/* Check that link configuration meets current requirements. */
552  	list_for_each_entry(head, &outp->disp->heads, head) {
553  		if (ior->asy.head & (1 << head->id)) {
554  			u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
555  			datakbps += khz * head->asy.or.depth;
556  		}
557  	}
558  
559  	linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
560  	dataKBps = DIV_ROUND_UP(datakbps, 8);
561  	OUTP_DBG(outp, "data %d KB/s link %d KB/s mst %d->%d",
562  		 dataKBps, linkKBps, ior->dp.mst, outp->dp.lt.mst);
563  	if (linkKBps < dataKBps || ior->dp.mst != outp->dp.lt.mst) {
564  		OUTP_DBG(outp, "link requirements changed");
565  		goto done;
566  	}
567  
568  	/* Check that link is still trained. */
569  	ret = nvkm_rdaux(outp->dp.aux, DPCD_LS02, stat, 3);
570  	if (ret) {
571  		OUTP_DBG(outp, "failed to read link status, assuming no sink");
572  		goto done;
573  	}
574  
575  	if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
576  		for (i = 0; i < ior->dp.nr; i++) {
577  			u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
578  			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
579  			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
580  			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
581  				OUTP_DBG(outp, "lane %d not equalised", lane);
582  				goto done;
583  			}
584  		}
585  		retrain = false;
586  	} else {
587  		OUTP_DBG(outp, "no inter-lane alignment");
588  	}
589  
590  done:
591  	if (retrain || !atomic_read(&outp->dp.lt.done))
592  		ret = nvkm_dp_train(outp, dataKBps);
593  	mutex_unlock(&outp->dp.mutex);
594  	return ret;
595  }
596  
597  static bool
nvkm_dp_enable_supported_link_rates(struct nvkm_outp * outp)598  nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
599  {
600  	u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
601  	int i, j, k;
602  
603  	if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
604  	    outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
605  	    nvkm_rdaux(outp->dp.aux, DPCD_RC10_SUPPORTED_LINK_RATES(0),
606  		       sink_rates, sizeof(sink_rates)))
607  		return false;
608  
609  	for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
610  		const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
611  
612  		if (!rate || WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
613  			break;
614  
615  		if (rate > outp->info.dpconf.link_bw * 27000) {
616  			OUTP_DBG(outp, "rate %d !outp", rate);
617  			continue;
618  		}
619  
620  		for (j = 0; j < outp->dp.rates; j++) {
621  			if (rate > outp->dp.rate[j].rate) {
622  				for (k = outp->dp.rates; k > j; k--)
623  					outp->dp.rate[k] = outp->dp.rate[k - 1];
624  				break;
625  			}
626  		}
627  
628  		outp->dp.rate[j].dpcd = i / 2;
629  		outp->dp.rate[j].rate = rate;
630  		outp->dp.rates++;
631  	}
632  
633  	for (i = 0; i < outp->dp.rates; i++)
634  		OUTP_DBG(outp, "link_rate[%d] = %d", outp->dp.rate[i].dpcd, outp->dp.rate[i].rate);
635  
636  	return outp->dp.rates != 0;
637  }
638  
639  /* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
640   * converted to work inside nvkm. This is a temporary holdover until we start
641   * passing the drm_dp_aux device through NVKM
642   */
643  static int
nvkm_dp_read_dpcd_caps(struct nvkm_outp * outp)644  nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
645  {
646  	struct nvkm_i2c_aux *aux = outp->dp.aux;
647  	u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
648  	int ret;
649  
650  	ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
651  	if (ret < 0)
652  		return ret;
653  
654  	/*
655  	 * Prior to DP1.3 the bit represented by
656  	 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
657  	 * If it is set DP_DPCD_REV at 0000h could be at a value less than
658  	 * the true capability of the panel. The only way to check is to
659  	 * then compare 0000h and 2200h.
660  	 */
661  	if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
662  	      DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
663  		return 0;
664  
665  	ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
666  	if (ret < 0)
667  		return ret;
668  
669  	if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
670  		OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
671  			 outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
672  		return 0;
673  	}
674  
675  	if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
676  		return 0;
677  
678  	memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
679  
680  	return 0;
681  }
682  
683  void
nvkm_dp_enable(struct nvkm_outp * outp,bool auxpwr)684  nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
685  {
686  	struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
687  	struct nvkm_i2c_aux *aux = outp->dp.aux;
688  
689  	if (auxpwr && !outp->dp.aux_pwr) {
690  		/* eDP panels need powering on by us (if the VBIOS doesn't default it
691  		 * to on) before doing any AUX channel transactions.  LVDS panel power
692  		 * is handled by the SOR itself, and not required for LVDS DDC.
693  		 */
694  		if (outp->conn->info.type == DCB_CONNECTOR_eDP) {
695  			int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
696  			if (power == 0) {
697  				nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
698  				outp->dp.aux_pwr_pu = true;
699  			}
700  
701  			/* We delay here unconditionally, even if already powered,
702  			 * because some laptop panels having a significant resume
703  			 * delay before the panel begins responding.
704  			 *
705  			 * This is likely a bit of a hack, but no better idea for
706  			 * handling this at the moment.
707  			 */
708  			msleep(300);
709  		}
710  
711  		OUTP_DBG(outp, "aux power -> always");
712  		nvkm_i2c_aux_monitor(aux, true);
713  		outp->dp.aux_pwr = true;
714  
715  		/* Detect any LTTPRs before reading DPCD receiver caps. */
716  		if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, outp->dp.lttpr, sizeof(outp->dp.lttpr)) &&
717  		    outp->dp.lttpr[0] >= 0x14 && outp->dp.lttpr[2]) {
718  			switch (outp->dp.lttpr[2]) {
719  			case 0x80: outp->dp.lttprs = 1; break;
720  			case 0x40: outp->dp.lttprs = 2; break;
721  			case 0x20: outp->dp.lttprs = 3; break;
722  			case 0x10: outp->dp.lttprs = 4; break;
723  			case 0x08: outp->dp.lttprs = 5; break;
724  			case 0x04: outp->dp.lttprs = 6; break;
725  			case 0x02: outp->dp.lttprs = 7; break;
726  			case 0x01: outp->dp.lttprs = 8; break;
727  			default:
728  				/* Unknown LTTPR count, we'll switch to transparent mode. */
729  				WARN_ON(1);
730  				outp->dp.lttprs = 0;
731  				break;
732  			}
733  		} else {
734  			/* No LTTPR support, or zero LTTPR count - don't touch it at all. */
735  			memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
736  		}
737  
738  		if (!nvkm_dp_read_dpcd_caps(outp)) {
739  			const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
740  			const u8 *rate;
741  			int rate_max;
742  
743  			outp->dp.rates = 0;
744  			outp->dp.links = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
745  			outp->dp.links = min(outp->dp.links, outp->info.dpconf.link_nr);
746  			if (outp->dp.lttprs && outp->dp.lttpr[4])
747  				outp->dp.links = min_t(int, outp->dp.links, outp->dp.lttpr[4]);
748  
749  			rate_max = outp->dp.dpcd[DPCD_RC01_MAX_LINK_RATE];
750  			rate_max = min(rate_max, outp->info.dpconf.link_bw);
751  			if (outp->dp.lttprs && outp->dp.lttpr[1])
752  				rate_max = min_t(int, rate_max, outp->dp.lttpr[1]);
753  
754  			if (!nvkm_dp_enable_supported_link_rates(outp)) {
755  				for (rate = rates; *rate; rate++) {
756  					if (*rate > rate_max)
757  						continue;
758  
759  					if (WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
760  						break;
761  
762  					outp->dp.rate[outp->dp.rates].dpcd = -1;
763  					outp->dp.rate[outp->dp.rates].rate = *rate * 27000;
764  					outp->dp.rates++;
765  				}
766  			}
767  		}
768  	} else
769  	if (!auxpwr && outp->dp.aux_pwr) {
770  		OUTP_DBG(outp, "aux power -> demand");
771  		nvkm_i2c_aux_monitor(aux, false);
772  		outp->dp.aux_pwr = false;
773  		atomic_set(&outp->dp.lt.done, 0);
774  
775  		/* Restore eDP panel GPIO to its prior state if we changed it, as
776  		 * it could potentially interfere with other outputs.
777  		 */
778  		if (outp->conn->info.type == DCB_CONNECTOR_eDP) {
779  			if (outp->dp.aux_pwr_pu) {
780  				nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
781  				outp->dp.aux_pwr_pu = false;
782  			}
783  		}
784  	}
785  }
786  
787  static void
nvkm_dp_fini(struct nvkm_outp * outp)788  nvkm_dp_fini(struct nvkm_outp *outp)
789  {
790  	nvkm_dp_enable(outp, false);
791  }
792  
793  static void
nvkm_dp_init(struct nvkm_outp * outp)794  nvkm_dp_init(struct nvkm_outp *outp)
795  {
796  	nvkm_dp_enable(outp, outp->dp.enabled);
797  }
798  
799  static void *
nvkm_dp_dtor(struct nvkm_outp * outp)800  nvkm_dp_dtor(struct nvkm_outp *outp)
801  {
802  	return outp;
803  }
804  
805  static const struct nvkm_outp_func
806  nvkm_dp_func = {
807  	.dtor = nvkm_dp_dtor,
808  	.init = nvkm_dp_init,
809  	.fini = nvkm_dp_fini,
810  	.acquire = nvkm_dp_acquire,
811  	.release = nvkm_dp_release,
812  	.disable = nvkm_dp_disable,
813  };
814  
815  int
nvkm_dp_new(struct nvkm_disp * disp,int index,struct dcb_output * dcbE,struct nvkm_outp ** poutp)816  nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct nvkm_outp **poutp)
817  {
818  	struct nvkm_device *device = disp->engine.subdev.device;
819  	struct nvkm_bios *bios = device->bios;
820  	struct nvkm_i2c *i2c = device->i2c;
821  	struct nvkm_outp *outp;
822  	u8  hdr, cnt, len;
823  	u32 data;
824  	int ret;
825  
826  	ret = nvkm_outp_new_(&nvkm_dp_func, disp, index, dcbE, poutp);
827  	outp = *poutp;
828  	if (ret)
829  		return ret;
830  
831  	if (dcbE->location == 0)
832  		outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
833  	else
834  		outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
835  	if (!outp->dp.aux) {
836  		OUTP_ERR(outp, "no aux");
837  		return -EINVAL;
838  	}
839  
840  	/* bios data is not optional */
841  	data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
842  				  &outp->dp.version, &hdr, &cnt, &len, &outp->dp.info);
843  	if (!data) {
844  		OUTP_ERR(outp, "no bios dp data");
845  		return -EINVAL;
846  	}
847  
848  	OUTP_DBG(outp, "bios dp %02x %02x %02x %02x", outp->dp.version, hdr, cnt, len);
849  
850  	mutex_init(&outp->dp.mutex);
851  	atomic_set(&outp->dp.lt.done, 0);
852  	return 0;
853  }
854