1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 #include "rootnv50.h"
26 
27 #include <core/client.h>
28 #include <core/enum.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bios.h>
31 #include <subdev/bios/disp.h>
32 #include <subdev/bios/init.h>
33 #include <subdev/bios/pll.h>
34 #include <subdev/devinit.h>
35 #include <subdev/timer.h>
36 
37 static const struct nvkm_disp_oclass *
38 nv50_disp_root_(struct nvkm_disp *base)
39 {
40 	return nv50_disp(base)->func->root;
41 }
42 
43 static int
44 nv50_disp_outp_internal_crt_(struct nvkm_disp *base, int index,
45 			     struct dcb_output *dcb, struct nvkm_output **poutp)
46 {
47 	struct nv50_disp *disp = nv50_disp(base);
48 	return disp->func->outp.internal.crt(base, index, dcb, poutp);
49 }
50 
51 static int
52 nv50_disp_outp_internal_tmds_(struct nvkm_disp *base, int index,
53 			      struct dcb_output *dcb,
54 			      struct nvkm_output **poutp)
55 {
56 	struct nv50_disp *disp = nv50_disp(base);
57 	return disp->func->outp.internal.tmds(base, index, dcb, poutp);
58 }
59 
60 static int
61 nv50_disp_outp_internal_lvds_(struct nvkm_disp *base, int index,
62 			      struct dcb_output *dcb,
63 			      struct nvkm_output **poutp)
64 {
65 	struct nv50_disp *disp = nv50_disp(base);
66 	return disp->func->outp.internal.lvds(base, index, dcb, poutp);
67 }
68 
69 static int
70 nv50_disp_outp_internal_dp_(struct nvkm_disp *base, int index,
71 			    struct dcb_output *dcb, struct nvkm_output **poutp)
72 {
73 	struct nv50_disp *disp = nv50_disp(base);
74 	if (disp->func->outp.internal.dp)
75 		return disp->func->outp.internal.dp(base, index, dcb, poutp);
76 	return -ENODEV;
77 }
78 
79 static int
80 nv50_disp_outp_external_tmds_(struct nvkm_disp *base, int index,
81 			      struct dcb_output *dcb,
82 			      struct nvkm_output **poutp)
83 {
84 	struct nv50_disp *disp = nv50_disp(base);
85 	if (disp->func->outp.external.tmds)
86 		return disp->func->outp.external.tmds(base, index, dcb, poutp);
87 	return -ENODEV;
88 }
89 
90 static int
91 nv50_disp_outp_external_dp_(struct nvkm_disp *base, int index,
92 			    struct dcb_output *dcb, struct nvkm_output **poutp)
93 {
94 	struct nv50_disp *disp = nv50_disp(base);
95 	if (disp->func->outp.external.dp)
96 		return disp->func->outp.external.dp(base, index, dcb, poutp);
97 	return -ENODEV;
98 }
99 
100 static void
101 nv50_disp_vblank_fini_(struct nvkm_disp *base, int head)
102 {
103 	struct nv50_disp *disp = nv50_disp(base);
104 	disp->func->head.vblank_fini(disp, head);
105 }
106 
107 static void
108 nv50_disp_vblank_init_(struct nvkm_disp *base, int head)
109 {
110 	struct nv50_disp *disp = nv50_disp(base);
111 	disp->func->head.vblank_init(disp, head);
112 }
113 
114 static void
115 nv50_disp_intr_(struct nvkm_disp *base)
116 {
117 	struct nv50_disp *disp = nv50_disp(base);
118 	disp->func->intr(disp);
119 }
120 
121 static void *
122 nv50_disp_dtor_(struct nvkm_disp *base)
123 {
124 	struct nv50_disp *disp = nv50_disp(base);
125 	nvkm_event_fini(&disp->uevent);
126 	return disp;
127 }
128 
129 static const struct nvkm_disp_func
130 nv50_disp_ = {
131 	.dtor = nv50_disp_dtor_,
132 	.intr = nv50_disp_intr_,
133 	.root = nv50_disp_root_,
134 	.outp.internal.crt = nv50_disp_outp_internal_crt_,
135 	.outp.internal.tmds = nv50_disp_outp_internal_tmds_,
136 	.outp.internal.lvds = nv50_disp_outp_internal_lvds_,
137 	.outp.internal.dp = nv50_disp_outp_internal_dp_,
138 	.outp.external.tmds = nv50_disp_outp_external_tmds_,
139 	.outp.external.dp = nv50_disp_outp_external_dp_,
140 	.head.vblank_init = nv50_disp_vblank_init_,
141 	.head.vblank_fini = nv50_disp_vblank_fini_,
142 };
143 
144 int
145 nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
146 	       int index, int heads, struct nvkm_disp **pdisp)
147 {
148 	struct nv50_disp *disp;
149 	int ret;
150 
151 	if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
152 		return -ENOMEM;
153 	INIT_WORK(&disp->supervisor, func->super);
154 	disp->func = func;
155 	*pdisp = &disp->base;
156 
157 	ret = nvkm_disp_ctor(&nv50_disp_, device, index, heads, &disp->base);
158 	if (ret)
159 		return ret;
160 
161 	return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
162 }
163 
164 void
165 nv50_disp_vblank_fini(struct nv50_disp *disp, int head)
166 {
167 	struct nvkm_device *device = disp->base.engine.subdev.device;
168 	nvkm_mask(device, 0x61002c, (4 << head), 0);
169 }
170 
171 void
172 nv50_disp_vblank_init(struct nv50_disp *disp, int head)
173 {
174 	struct nvkm_device *device = disp->base.engine.subdev.device;
175 	nvkm_mask(device, 0x61002c, (4 << head), (4 << head));
176 }
177 
178 static const struct nvkm_enum
179 nv50_disp_intr_error_type[] = {
180 	{ 3, "ILLEGAL_MTHD" },
181 	{ 4, "INVALID_VALUE" },
182 	{ 5, "INVALID_STATE" },
183 	{ 7, "INVALID_HANDLE" },
184 	{}
185 };
186 
187 static const struct nvkm_enum
188 nv50_disp_intr_error_code[] = {
189 	{ 0x00, "" },
190 	{}
191 };
192 
193 static void
194 nv50_disp_intr_error(struct nv50_disp *disp, int chid)
195 {
196 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
197 	struct nvkm_device *device = subdev->device;
198 	u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
199 	u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
200 	u32 code = (addr & 0x00ff0000) >> 16;
201 	u32 type = (addr & 0x00007000) >> 12;
202 	u32 mthd = (addr & 0x00000ffc);
203 	const struct nvkm_enum *ec, *et;
204 
205 	et = nvkm_enum_find(nv50_disp_intr_error_type, type);
206 	ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
207 
208 	nvkm_error(subdev,
209 		   "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
210 		   type, et ? et->name : "", code, ec ? ec->name : "",
211 		   chid, mthd, data);
212 
213 	if (chid < ARRAY_SIZE(disp->chan)) {
214 		switch (mthd) {
215 		case 0x0080:
216 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
217 			break;
218 		default:
219 			break;
220 		}
221 	}
222 
223 	nvkm_wr32(device, 0x610020, 0x00010000 << chid);
224 	nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
225 }
226 
227 static struct nvkm_output *
228 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
229 	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
230 	    struct nvbios_outp *info)
231 {
232 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
233 	struct nvkm_bios *bios = subdev->device->bios;
234 	struct nvkm_output *outp;
235 	u16 mask, type;
236 
237 	if (or < 4) {
238 		type = DCB_OUTPUT_ANALOG;
239 		mask = 0;
240 	} else
241 	if (or < 8) {
242 		switch (ctrl & 0x00000f00) {
243 		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
244 		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
245 		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
246 		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
247 		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
248 		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
249 		default:
250 			nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
251 			return NULL;
252 		}
253 		or  -= 4;
254 	} else {
255 		or   = or - 8;
256 		type = 0x0010;
257 		mask = 0;
258 		switch (ctrl & 0x00000f00) {
259 		case 0x00000000: type |= disp->pior.type[or]; break;
260 		default:
261 			nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
262 			return NULL;
263 		}
264 	}
265 
266 	mask  = 0x00c0 & (mask << 6);
267 	mask |= 0x0001 << or;
268 	mask |= 0x0100 << head;
269 
270 	list_for_each_entry(outp, &disp->base.outp, head) {
271 		if ((outp->info.hasht & 0xff) == type &&
272 		    (outp->info.hashm & mask) == mask) {
273 			*data = nvbios_outp_match(bios, outp->info.hasht, mask,
274 						  ver, hdr, cnt, len, info);
275 			if (!*data)
276 				return NULL;
277 			return outp;
278 		}
279 	}
280 
281 	return NULL;
282 }
283 
284 static struct nvkm_output *
285 exec_script(struct nv50_disp *disp, int head, int id)
286 {
287 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
288 	struct nvkm_device *device = subdev->device;
289 	struct nvkm_bios *bios = device->bios;
290 	struct nvkm_output *outp;
291 	struct nvbios_outp info;
292 	u8  ver, hdr, cnt, len;
293 	u32 data, ctrl = 0;
294 	u32 reg;
295 	int i;
296 
297 	/* DAC */
298 	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
299 		ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
300 
301 	/* SOR */
302 	if (!(ctrl & (1 << head))) {
303 		if (device->chipset  < 0x90 ||
304 		    device->chipset == 0x92 ||
305 		    device->chipset == 0xa0) {
306 			reg = 0x610b74;
307 		} else {
308 			reg = 0x610798;
309 		}
310 		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
311 			ctrl = nvkm_rd32(device, reg + (i * 8));
312 		i += 4;
313 	}
314 
315 	/* PIOR */
316 	if (!(ctrl & (1 << head))) {
317 		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
318 			ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
319 		i += 8;
320 	}
321 
322 	if (!(ctrl & (1 << head)))
323 		return NULL;
324 	i--;
325 
326 	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
327 	if (outp) {
328 		struct nvbios_init init = {
329 			.subdev = subdev,
330 			.bios = bios,
331 			.offset = info.script[id],
332 			.outp = &outp->info,
333 			.crtc = head,
334 			.execute = 1,
335 		};
336 
337 		nvbios_exec(&init);
338 	}
339 
340 	return outp;
341 }
342 
343 static struct nvkm_output *
344 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
345 {
346 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
347 	struct nvkm_device *device = subdev->device;
348 	struct nvkm_bios *bios = device->bios;
349 	struct nvkm_output *outp;
350 	struct nvbios_outp info1;
351 	struct nvbios_ocfg info2;
352 	u8  ver, hdr, cnt, len;
353 	u32 data, ctrl = 0;
354 	u32 reg;
355 	int i;
356 
357 	/* DAC */
358 	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
359 		ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
360 
361 	/* SOR */
362 	if (!(ctrl & (1 << head))) {
363 		if (device->chipset  < 0x90 ||
364 		    device->chipset == 0x92 ||
365 		    device->chipset == 0xa0) {
366 			reg = 0x610b70;
367 		} else {
368 			reg = 0x610794;
369 		}
370 		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
371 			ctrl = nvkm_rd32(device, reg + (i * 8));
372 		i += 4;
373 	}
374 
375 	/* PIOR */
376 	if (!(ctrl & (1 << head))) {
377 		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
378 			ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
379 		i += 8;
380 	}
381 
382 	if (!(ctrl & (1 << head)))
383 		return NULL;
384 	i--;
385 
386 	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
387 	if (!outp)
388 		return NULL;
389 
390 	*conf = (ctrl & 0x00000f00) >> 8;
391 	if (outp->info.location == 0) {
392 		switch (outp->info.type) {
393 		case DCB_OUTPUT_TMDS:
394 			if (*conf == 5)
395 				*conf |= 0x0100;
396 			break;
397 		case DCB_OUTPUT_LVDS:
398 			*conf |= disp->sor.lvdsconf;
399 			break;
400 		default:
401 			break;
402 		}
403 	} else {
404 		*conf = (ctrl & 0x00000f00) >> 8;
405 		pclk = pclk / 2;
406 	}
407 
408 	data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
409 				 &ver, &hdr, &cnt, &len, &info2);
410 	if (data && id < 0xff) {
411 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
412 		if (data) {
413 			struct nvbios_init init = {
414 				.subdev = subdev,
415 				.bios = bios,
416 				.offset = data,
417 				.outp = &outp->info,
418 				.crtc = head,
419 				.execute = 1,
420 			};
421 
422 			nvbios_exec(&init);
423 		}
424 	}
425 
426 	return outp;
427 }
428 
429 static bool
430 nv50_disp_dptmds_war(struct nvkm_device *device)
431 {
432 	switch (device->chipset) {
433 	case 0x94:
434 	case 0x96:
435 	case 0x98:
436 	case 0xaa:
437 	case 0xac:
438 		return true;
439 	default:
440 		break;
441 	}
442 	return false;
443 }
444 
445 static bool
446 nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
447 {
448 	struct nvkm_device *device = disp->base.engine.subdev.device;
449 	const u32 soff = __ffs(outp->or) * 0x800;
450 	if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
451 		switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
452 		case 0x00000000:
453 		case 0x00030000:
454 			return true;
455 		default:
456 			break;
457 		}
458 	}
459 	return false;
460 
461 }
462 
463 static void
464 nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
465 {
466 	struct nvkm_device *device = disp->base.engine.subdev.device;
467 	const u32 soff = __ffs(outp->or) * 0x800;
468 
469 	if (!nv50_disp_dptmds_war_needed(disp, outp))
470 		return;
471 
472 	nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
473 	nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
474 	nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
475 
476 	nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
477 	nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
478 	nvkm_usec(device, 400, NVKM_DELAY);
479 	nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
480 	nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
481 
482 	if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
483 		u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
484 		u32  pu_pc = seqctl & 0x0000000f;
485 		nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
486 	}
487 }
488 
489 static void
490 nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
491 {
492 	struct nvkm_device *device = disp->base.engine.subdev.device;
493 	const u32 soff = __ffs(outp->or) * 0x800;
494 	u32 sorpwr;
495 
496 	if (!nv50_disp_dptmds_war_needed(disp, outp))
497 		return;
498 
499 	sorpwr = nvkm_rd32(device, 0x61c004 + soff);
500 	if (sorpwr & 0x00000001) {
501 		u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
502 		u32  pd_pc = (seqctl & 0x00000f00) >> 8;
503 		u32  pu_pc =  seqctl & 0x0000000f;
504 
505 		nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
506 
507 		nvkm_msec(device, 2000,
508 			if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
509 				break;
510 		);
511 		nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
512 		nvkm_msec(device, 2000,
513 			if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
514 				break;
515 		);
516 
517 		nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
518 		nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
519 	}
520 
521 	nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
522 	nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
523 
524 	if (sorpwr & 0x00000001) {
525 		nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
526 	}
527 }
528 
529 static void
530 nv50_disp_update_sppll1(struct nv50_disp *disp)
531 {
532 	struct nvkm_device *device = disp->base.engine.subdev.device;
533 	bool used = false;
534 	int sor;
535 
536 	if (!nv50_disp_dptmds_war(device))
537 		return;
538 
539 	for (sor = 0; sor < disp->func->sor.nr; sor++) {
540 		u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
541 		switch (clksor & 0x03000000) {
542 		case 0x02000000:
543 		case 0x03000000:
544 			used = true;
545 			break;
546 		default:
547 			break;
548 		}
549 	}
550 
551 	if (used)
552 		return;
553 
554 	nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
555 }
556 
557 static void
558 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
559 {
560 	exec_script(disp, head, 1);
561 }
562 
563 static void
564 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
565 {
566 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
567 	struct nvkm_output *outp = exec_script(disp, head, 2);
568 
569 	/* the binary driver does this outside of the supervisor handling
570 	 * (after the third supervisor from a detach).  we (currently?)
571 	 * allow both detach/attach to happen in the same set of
572 	 * supervisor interrupts, so it would make sense to execute this
573 	 * (full power down?) script after all the detach phases of the
574 	 * supervisor handling.  like with training if needed from the
575 	 * second supervisor, nvidia doesn't do this, so who knows if it's
576 	 * entirely safe, but it does appear to work..
577 	 *
578 	 * without this script being run, on some configurations i've
579 	 * seen, switching from DP to TMDS on a DP connector may result
580 	 * in a blank screen (SOR_PWR off/on can restore it)
581 	 */
582 	if (outp && outp->info.type == DCB_OUTPUT_DP) {
583 		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
584 		struct nvbios_init init = {
585 			.subdev = subdev,
586 			.bios = subdev->device->bios,
587 			.outp = &outp->info,
588 			.crtc = head,
589 			.offset = outpdp->info.script[4],
590 			.execute = 1,
591 		};
592 
593 		nvbios_exec(&init);
594 		atomic_set(&outpdp->lt.done, 0);
595 	}
596 }
597 
598 static void
599 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
600 {
601 	struct nvkm_device *device = disp->base.engine.subdev.device;
602 	struct nvkm_devinit *devinit = device->devinit;
603 	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
604 	if (pclk)
605 		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
606 }
607 
608 static void
609 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
610 			  struct dcb_output *outp, u32 pclk)
611 {
612 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
613 	struct nvkm_device *device = subdev->device;
614 	const int link = !(outp->sorconf.link & 1);
615 	const int   or = ffs(outp->or) - 1;
616 	const u32 soff = (  or * 0x800);
617 	const u32 loff = (link * 0x080) + soff;
618 	const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
619 	const u32 symbol = 100000;
620 	const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
621 	const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
622 	const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
623 	u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
624 	u32 clksor = nvkm_rd32(device, 0x614300 + soff);
625 	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
626 	int TU, VTUi, VTUf, VTUa;
627 	u64 link_data_rate, link_ratio, unk;
628 	u32 best_diff = 64 * symbol;
629 	u32 link_nr, link_bw, bits;
630 	u64 value;
631 
632 	link_bw = (clksor & 0x000c0000) ? 270000 : 162000;
633 	link_nr = hweight32(dpctrl & 0x000f0000);
634 
635 	/* symbols/hblank - algorithm taken from comments in tegra driver */
636 	value = vblanke + vactive - vblanks - 7;
637 	value = value * link_bw;
638 	do_div(value, pclk);
639 	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
640 	nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
641 
642 	/* symbols/vblank - algorithm taken from comments in tegra driver */
643 	value = vblanks - vblanke - 25;
644 	value = value * link_bw;
645 	do_div(value, pclk);
646 	value = value - ((36 / link_nr) + 3) - 1;
647 	nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
648 
649 	/* watermark / activesym */
650 	if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
651 	else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
652 	else                                  bits = 18;
653 
654 	link_data_rate = (pclk * bits / 8) / link_nr;
655 
656 	/* calculate ratio of packed data rate to link symbol rate */
657 	link_ratio = link_data_rate * symbol;
658 	do_div(link_ratio, link_bw);
659 
660 	for (TU = 64; TU >= 32; TU--) {
661 		/* calculate average number of valid symbols in each TU */
662 		u32 tu_valid = link_ratio * TU;
663 		u32 calc, diff;
664 
665 		/* find a hw representation for the fraction.. */
666 		VTUi = tu_valid / symbol;
667 		calc = VTUi * symbol;
668 		diff = tu_valid - calc;
669 		if (diff) {
670 			if (diff >= (symbol / 2)) {
671 				VTUf = symbol / (symbol - diff);
672 				if (symbol - (VTUf * diff))
673 					VTUf++;
674 
675 				if (VTUf <= 15) {
676 					VTUa  = 1;
677 					calc += symbol - (symbol / VTUf);
678 				} else {
679 					VTUa  = 0;
680 					VTUf  = 1;
681 					calc += symbol;
682 				}
683 			} else {
684 				VTUa  = 0;
685 				VTUf  = min((int)(symbol / diff), 15);
686 				calc += symbol / VTUf;
687 			}
688 
689 			diff = calc - tu_valid;
690 		} else {
691 			/* no remainder, but the hw doesn't like the fractional
692 			 * part to be zero.  decrement the integer part and
693 			 * have the fraction add a whole symbol back
694 			 */
695 			VTUa = 0;
696 			VTUf = 1;
697 			VTUi--;
698 		}
699 
700 		if (diff < best_diff) {
701 			best_diff = diff;
702 			bestTU = TU;
703 			bestVTUa = VTUa;
704 			bestVTUf = VTUf;
705 			bestVTUi = VTUi;
706 			if (diff == 0)
707 				break;
708 		}
709 	}
710 
711 	if (!bestTU) {
712 		nvkm_error(subdev, "unable to find suitable dp config\n");
713 		return;
714 	}
715 
716 	/* XXX close to vbios numbers, but not right */
717 	unk  = (symbol - link_ratio) * bestTU;
718 	unk *= link_ratio;
719 	do_div(unk, symbol);
720 	do_div(unk, symbol);
721 	unk += 6;
722 
723 	nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
724 	nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
725 						   bestVTUf << 16 |
726 						   bestVTUi << 8 | unk);
727 }
728 
729 static void
730 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
731 {
732 	struct nvkm_device *device = disp->base.engine.subdev.device;
733 	struct nvkm_output *outp;
734 	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
735 	u32 hval, hreg = 0x614200 + (head * 0x800);
736 	u32 oval, oreg;
737 	u32 mask, conf;
738 
739 	outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
740 	if (!outp)
741 		return;
742 
743 	/* we allow both encoder attach and detach operations to occur
744 	 * within a single supervisor (ie. modeset) sequence.  the
745 	 * encoder detach scripts quite often switch off power to the
746 	 * lanes, which requires the link to be re-trained.
747 	 *
748 	 * this is not generally an issue as the sink "must" (heh)
749 	 * signal an irq when it's lost sync so the driver can
750 	 * re-train.
751 	 *
752 	 * however, on some boards, if one does not configure at least
753 	 * the gpu side of the link *before* attaching, then various
754 	 * things can go horribly wrong (PDISP disappearing from mmio,
755 	 * third supervisor never happens, etc).
756 	 *
757 	 * the solution is simply to retrain here, if necessary.  last
758 	 * i checked, the binary driver userspace does not appear to
759 	 * trigger this situation (it forces an UPDATE between steps).
760 	 */
761 	if (outp->info.type == DCB_OUTPUT_DP) {
762 		u32 soff = (ffs(outp->info.or) - 1) * 0x08;
763 		u32 ctrl, datarate;
764 
765 		if (outp->info.location == 0) {
766 			ctrl = nvkm_rd32(device, 0x610794 + soff);
767 			soff = 1;
768 		} else {
769 			ctrl = nvkm_rd32(device, 0x610b80 + soff);
770 			soff = 2;
771 		}
772 
773 		switch ((ctrl & 0x000f0000) >> 16) {
774 		case 6: datarate = pclk * 30; break;
775 		case 5: datarate = pclk * 24; break;
776 		case 2:
777 		default:
778 			datarate = pclk * 18;
779 			break;
780 		}
781 
782 		if (nvkm_output_dp_train(outp, datarate / soff, true))
783 			OUTP_ERR(outp, "link not trained before attach");
784 	}
785 
786 	exec_clkcmp(disp, head, 0, pclk, &conf);
787 
788 	if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
789 		oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
790 		oval = 0x00000000;
791 		hval = 0x00000000;
792 		mask = 0xffffffff;
793 	} else
794 	if (!outp->info.location) {
795 		if (outp->info.type == DCB_OUTPUT_DP)
796 			nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
797 		oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
798 		oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
799 		hval = 0x00000000;
800 		mask = 0x00000707;
801 	} else {
802 		oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800;
803 		oval = 0x00000001;
804 		hval = 0x00000001;
805 		mask = 0x00000707;
806 	}
807 
808 	nvkm_mask(device, hreg, 0x0000000f, hval);
809 	nvkm_mask(device, oreg, mask, oval);
810 
811 	nv50_disp_dptmds_war_2(disp, &outp->info);
812 }
813 
814 /* If programming a TMDS output on a SOR that can also be configured for
815  * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
816  *
817  * It looks like the VBIOS TMDS scripts make an attempt at this, however,
818  * the VBIOS scripts on at least one board I have only switch it off on
819  * link 0, causing a blank display if the output has previously been
820  * programmed for DisplayPort.
821  */
822 static void
823 nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
824 			    struct dcb_output *outp)
825 {
826 	struct nvkm_device *device = disp->base.engine.subdev.device;
827 	struct nvkm_bios *bios = device->bios;
828 	const int link = !(outp->sorconf.link & 1);
829 	const int   or = ffs(outp->or) - 1;
830 	const u32 loff = (or * 0x800) + (link * 0x80);
831 	const u16 mask = (outp->sorconf.link << 6) | outp->or;
832 	struct dcb_output match;
833 	u8  ver, hdr;
834 
835 	if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
836 		nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
837 }
838 
839 static void
840 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
841 {
842 	struct nvkm_device *device = disp->base.engine.subdev.device;
843 	struct nvkm_output *outp;
844 	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
845 	u32 conf;
846 
847 	outp = exec_clkcmp(disp, head, 1, pclk, &conf);
848 	if (!outp)
849 		return;
850 
851 	if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
852 		nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
853 	nv50_disp_dptmds_war_3(disp, &outp->info);
854 }
855 
856 void
857 nv50_disp_intr_supervisor(struct work_struct *work)
858 {
859 	struct nv50_disp *disp =
860 		container_of(work, struct nv50_disp, supervisor);
861 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
862 	struct nvkm_device *device = subdev->device;
863 	u32 super = nvkm_rd32(device, 0x610030);
864 	int head;
865 
866 	nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
867 
868 	if (disp->super & 0x00000010) {
869 		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
870 		for (head = 0; head < disp->base.head.nr; head++) {
871 			if (!(super & (0x00000020 << head)))
872 				continue;
873 			if (!(super & (0x00000080 << head)))
874 				continue;
875 			nv50_disp_intr_unk10_0(disp, head);
876 		}
877 	} else
878 	if (disp->super & 0x00000020) {
879 		for (head = 0; head < disp->base.head.nr; head++) {
880 			if (!(super & (0x00000080 << head)))
881 				continue;
882 			nv50_disp_intr_unk20_0(disp, head);
883 		}
884 		for (head = 0; head < disp->base.head.nr; head++) {
885 			if (!(super & (0x00000200 << head)))
886 				continue;
887 			nv50_disp_intr_unk20_1(disp, head);
888 		}
889 		for (head = 0; head < disp->base.head.nr; head++) {
890 			if (!(super & (0x00000080 << head)))
891 				continue;
892 			nv50_disp_intr_unk20_2(disp, head);
893 		}
894 	} else
895 	if (disp->super & 0x00000040) {
896 		for (head = 0; head < disp->base.head.nr; head++) {
897 			if (!(super & (0x00000080 << head)))
898 				continue;
899 			nv50_disp_intr_unk40_0(disp, head);
900 		}
901 		nv50_disp_update_sppll1(disp);
902 	}
903 
904 	nvkm_wr32(device, 0x610030, 0x80000000);
905 }
906 
907 void
908 nv50_disp_intr(struct nv50_disp *disp)
909 {
910 	struct nvkm_device *device = disp->base.engine.subdev.device;
911 	u32 intr0 = nvkm_rd32(device, 0x610020);
912 	u32 intr1 = nvkm_rd32(device, 0x610024);
913 
914 	while (intr0 & 0x001f0000) {
915 		u32 chid = __ffs(intr0 & 0x001f0000) - 16;
916 		nv50_disp_intr_error(disp, chid);
917 		intr0 &= ~(0x00010000 << chid);
918 	}
919 
920 	while (intr0 & 0x0000001f) {
921 		u32 chid = __ffs(intr0 & 0x0000001f);
922 		nv50_disp_chan_uevent_send(disp, chid);
923 		intr0 &= ~(0x00000001 << chid);
924 	}
925 
926 	if (intr1 & 0x00000004) {
927 		nvkm_disp_vblank(&disp->base, 0);
928 		nvkm_wr32(device, 0x610024, 0x00000004);
929 	}
930 
931 	if (intr1 & 0x00000008) {
932 		nvkm_disp_vblank(&disp->base, 1);
933 		nvkm_wr32(device, 0x610024, 0x00000008);
934 	}
935 
936 	if (intr1 & 0x00000070) {
937 		disp->super = (intr1 & 0x00000070);
938 		schedule_work(&disp->supervisor);
939 		nvkm_wr32(device, 0x610024, disp->super);
940 	}
941 }
942 
943 static const struct nv50_disp_func
944 nv50_disp = {
945 	.intr = nv50_disp_intr,
946 	.uevent = &nv50_disp_chan_uevent,
947 	.super = nv50_disp_intr_supervisor,
948 	.root = &nv50_disp_root_oclass,
949 	.head.vblank_init = nv50_disp_vblank_init,
950 	.head.vblank_fini = nv50_disp_vblank_fini,
951 	.head.scanoutpos = nv50_disp_root_scanoutpos,
952 	.outp.internal.crt = nv50_dac_output_new,
953 	.outp.internal.tmds = nv50_sor_output_new,
954 	.outp.internal.lvds = nv50_sor_output_new,
955 	.outp.external.tmds = nv50_pior_output_new,
956 	.outp.external.dp = nv50_pior_dp_new,
957 	.dac.nr = 3,
958 	.dac.power = nv50_dac_power,
959 	.dac.sense = nv50_dac_sense,
960 	.sor.nr = 2,
961 	.sor.power = nv50_sor_power,
962 	.pior.nr = 3,
963 	.pior.power = nv50_pior_power,
964 };
965 
966 int
967 nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
968 {
969 	return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);
970 }
971