1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv50.h" 25 #include "outp.h" 26 #include "outpdp.h" 27 28 #include <core/client.h> 29 #include <core/engctx.h> 30 #include <core/enum.h> 31 #include <core/handle.h> 32 #include <core/ramht.h> 33 #include <engine/dmaobj.h> 34 #include <subdev/bios.h> 35 #include <subdev/bios/dcb.h> 36 #include <subdev/bios/disp.h> 37 #include <subdev/bios/init.h> 38 #include <subdev/bios/pll.h> 39 #include <subdev/devinit.h> 40 #include <subdev/fb.h> 41 #include <subdev/timer.h> 42 43 #include <nvif/class.h> 44 #include <nvif/event.h> 45 #include <nvif/unpack.h> 46 47 /******************************************************************************* 48 * EVO channel base class 49 ******************************************************************************/ 50 51 static int 52 nv50_disp_chan_create_(struct nvkm_object *parent, 53 struct nvkm_object *engine, 54 struct nvkm_oclass *oclass, int head, 55 int length, void **pobject) 56 { 57 const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs; 58 struct nv50_disp_base *base = (void *)parent; 59 struct nv50_disp_chan *chan; 60 int chid = impl->chid + head; 61 int ret; 62 63 if (base->chan & (1 << chid)) 64 return -EBUSY; 65 base->chan |= (1 << chid); 66 67 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL, 68 (1ULL << NVDEV_ENGINE_DMAOBJ), 69 length, pobject); 70 chan = *pobject; 71 if (ret) 72 return ret; 73 chan->chid = chid; 74 75 nv_parent(chan)->object_attach = impl->attach; 76 nv_parent(chan)->object_detach = impl->detach; 77 return 0; 78 } 79 80 static void 81 nv50_disp_chan_destroy(struct nv50_disp_chan *chan) 82 { 83 struct nv50_disp_base *base = (void *)nv_object(chan)->parent; 84 base->chan &= ~(1 << chan->chid); 85 nvkm_namedb_destroy(&chan->base); 86 } 87 88 static void 89 nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index) 90 { 91 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 92 struct nvkm_device *device = disp->base.engine.subdev.device; 93 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index); 94 nvkm_wr32(device, 0x610020, 0x00000001 << index); 95 } 96 97 static void 98 nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) 99 { 100 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 101 struct nvkm_device *device = disp->base.engine.subdev.device; 102 nvkm_wr32(device, 0x610020, 0x00000001 << index); 103 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index); 104 } 105 106 void 107 nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid) 108 { 109 struct nvif_notify_uevent_rep { 110 } rep; 111 112 nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep)); 113 } 114 115 int 116 nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 117 struct nvkm_notify *notify) 118 { 119 struct nv50_disp_dmac *dmac = (void *)object; 120 union { 121 struct nvif_notify_uevent_req none; 122 } *args = data; 123 int ret; 124 125 if (nvif_unvers(args->none)) { 126 notify->size = sizeof(struct nvif_notify_uevent_rep); 127 notify->types = 1; 128 notify->index = dmac->base.chid; 129 return 0; 130 } 131 132 return ret; 133 } 134 135 const struct nvkm_event_func 136 nv50_disp_chan_uevent = { 137 .ctor = nv50_disp_chan_uevent_ctor, 138 .init = nv50_disp_chan_uevent_init, 139 .fini = nv50_disp_chan_uevent_fini, 140 }; 141 142 int 143 nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type, 144 struct nvkm_event **pevent) 145 { 146 struct nv50_disp *disp = (void *)object->engine; 147 switch (type) { 148 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT: 149 *pevent = &disp->uevent; 150 return 0; 151 default: 152 break; 153 } 154 return -EINVAL; 155 } 156 157 int 158 nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) 159 { 160 struct nv50_disp_chan *chan = (void *)object; 161 *addr = nv_device_resource_start(nv_device(object), 0) + 162 0x640000 + (chan->chid * 0x1000); 163 *size = 0x001000; 164 return 0; 165 } 166 167 u32 168 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr) 169 { 170 struct nv50_disp_chan *chan = (void *)object; 171 struct nvkm_device *device = object->engine->subdev.device; 172 return nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); 173 } 174 175 void 176 nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) 177 { 178 struct nv50_disp_chan *chan = (void *)object; 179 struct nvkm_device *device = object->engine->subdev.device; 180 nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); 181 } 182 183 /******************************************************************************* 184 * EVO DMA channel base class 185 ******************************************************************************/ 186 187 static int 188 nv50_disp_dmac_object_attach(struct nvkm_object *parent, 189 struct nvkm_object *object, u32 name) 190 { 191 struct nv50_disp_base *base = (void *)parent->parent; 192 struct nv50_disp_chan *chan = (void *)parent; 193 u32 addr = nv_gpuobj(object)->node->offset; 194 u32 chid = chan->chid; 195 u32 data = (chid << 28) | (addr << 10) | chid; 196 return nvkm_ramht_insert(base->ramht, chid, name, data); 197 } 198 199 static void 200 nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie) 201 { 202 struct nv50_disp_base *base = (void *)parent->parent; 203 nvkm_ramht_remove(base->ramht, cookie); 204 } 205 206 static int 207 nv50_disp_dmac_create_(struct nvkm_object *parent, 208 struct nvkm_object *engine, 209 struct nvkm_oclass *oclass, u32 pushbuf, int head, 210 int length, void **pobject) 211 { 212 struct nv50_disp_dmac *dmac; 213 int ret; 214 215 ret = nv50_disp_chan_create_(parent, engine, oclass, head, 216 length, pobject); 217 dmac = *pobject; 218 if (ret) 219 return ret; 220 221 dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf); 222 if (!dmac->pushdma) 223 return -ENOENT; 224 225 switch (nv_mclass(dmac->pushdma)) { 226 case 0x0002: 227 case 0x003d: 228 if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff) 229 return -EINVAL; 230 231 switch (dmac->pushdma->target) { 232 case NV_MEM_TARGET_VRAM: 233 dmac->push = 0x00000001 | dmac->pushdma->start >> 8; 234 break; 235 case NV_MEM_TARGET_PCI_NOSNOOP: 236 dmac->push = 0x00000003 | dmac->pushdma->start >> 8; 237 break; 238 default: 239 return -EINVAL; 240 } 241 break; 242 default: 243 return -EINVAL; 244 } 245 246 return 0; 247 } 248 249 void 250 nv50_disp_dmac_dtor(struct nvkm_object *object) 251 { 252 struct nv50_disp_dmac *dmac = (void *)object; 253 nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma); 254 nv50_disp_chan_destroy(&dmac->base); 255 } 256 257 static int 258 nv50_disp_dmac_init(struct nvkm_object *object) 259 { 260 struct nv50_disp *disp = (void *)object->engine; 261 struct nv50_disp_dmac *dmac = (void *)object; 262 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 263 struct nvkm_device *device = subdev->device; 264 int chid = dmac->base.chid; 265 int ret; 266 267 ret = nv50_disp_chan_init(&dmac->base); 268 if (ret) 269 return ret; 270 271 /* enable error reporting */ 272 nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); 273 274 /* initialise channel for dma command submission */ 275 nvkm_wr32(device, 0x610204 + (chid * 0x0010), dmac->push); 276 nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); 277 nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); 278 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); 279 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); 280 nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); 281 282 /* wait for it to go inactive */ 283 if (nvkm_msec(device, 2000, 284 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) 285 break; 286 ) < 0) { 287 nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, 288 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 289 return -EBUSY; 290 } 291 292 return 0; 293 } 294 295 static int 296 nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend) 297 { 298 struct nv50_disp *disp = (void *)object->engine; 299 struct nv50_disp_dmac *dmac = (void *)object; 300 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 301 struct nvkm_device *device = subdev->device; 302 int chid = dmac->base.chid; 303 304 /* deactivate channel */ 305 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); 306 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); 307 if (nvkm_msec(device, 2000, 308 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) 309 break; 310 ) < 0) { 311 nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, 312 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 313 if (suspend) 314 return -EBUSY; 315 } 316 317 /* disable error reporting and completion notifications */ 318 nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); 319 320 return nv50_disp_chan_fini(&dmac->base, suspend); 321 } 322 323 /******************************************************************************* 324 * EVO master channel object 325 ******************************************************************************/ 326 327 static void 328 nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c, 329 const struct nv50_disp_mthd_list *list, int inst) 330 { 331 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 332 struct nvkm_device *device = subdev->device; 333 int i; 334 335 for (i = 0; list->data[i].mthd; i++) { 336 if (list->data[i].addr) { 337 u32 next = nvkm_rd32(device, list->data[i].addr + base + 0); 338 u32 prev = nvkm_rd32(device, list->data[i].addr + base + c); 339 u32 mthd = list->data[i].mthd + (list->mthd * inst); 340 const char *name = list->data[i].name; 341 char mods[16]; 342 343 if (prev != next) 344 snprintf(mods, sizeof(mods), "-> %08x", next); 345 else 346 snprintf(mods, sizeof(mods), "%13c", ' '); 347 348 nvkm_printk_(subdev, debug, info, 349 "\t%04x: %08x %s%s%s\n", 350 mthd, prev, mods, name ? " // " : "", 351 name ? name : ""); 352 } 353 } 354 } 355 356 void 357 nv50_disp_mthd_chan(struct nv50_disp *disp, int debug, int head, 358 const struct nv50_disp_mthd_chan *chan) 359 { 360 struct nvkm_object *object = nv_object(disp); 361 const struct nv50_disp_impl *impl = (void *)object->oclass; 362 const struct nv50_disp_mthd_list *list; 363 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 364 int i, j; 365 366 if (debug > nv_subdev(disp)->debug) 367 return; 368 369 for (i = 0; (list = chan->data[i].mthd) != NULL; i++) { 370 u32 base = head * chan->addr; 371 for (j = 0; j < chan->data[i].nr; j++, base += list->addr) { 372 const char *cname = chan->name; 373 const char *sname = ""; 374 char cname_[16], sname_[16]; 375 376 if (chan->addr) { 377 snprintf(cname_, sizeof(cname_), "%s %d", 378 chan->name, head); 379 cname = cname_; 380 } 381 382 if (chan->data[i].nr > 1) { 383 snprintf(sname_, sizeof(sname_), " - %s %d", 384 chan->data[i].name, j); 385 sname = sname_; 386 } 387 388 nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname); 389 nv50_disp_mthd_list(disp, debug, base, impl->mthd.prev, 390 list, j); 391 } 392 } 393 } 394 395 const struct nv50_disp_mthd_list 396 nv50_disp_core_mthd_base = { 397 .mthd = 0x0000, 398 .addr = 0x000000, 399 .data = { 400 { 0x0080, 0x000000 }, 401 { 0x0084, 0x610bb8 }, 402 { 0x0088, 0x610b9c }, 403 { 0x008c, 0x000000 }, 404 {} 405 } 406 }; 407 408 static const struct nv50_disp_mthd_list 409 nv50_disp_core_mthd_dac = { 410 .mthd = 0x0080, 411 .addr = 0x000008, 412 .data = { 413 { 0x0400, 0x610b58 }, 414 { 0x0404, 0x610bdc }, 415 { 0x0420, 0x610828 }, 416 {} 417 } 418 }; 419 420 const struct nv50_disp_mthd_list 421 nv50_disp_core_mthd_sor = { 422 .mthd = 0x0040, 423 .addr = 0x000008, 424 .data = { 425 { 0x0600, 0x610b70 }, 426 {} 427 } 428 }; 429 430 const struct nv50_disp_mthd_list 431 nv50_disp_core_mthd_pior = { 432 .mthd = 0x0040, 433 .addr = 0x000008, 434 .data = { 435 { 0x0700, 0x610b80 }, 436 {} 437 } 438 }; 439 440 static const struct nv50_disp_mthd_list 441 nv50_disp_core_mthd_head = { 442 .mthd = 0x0400, 443 .addr = 0x000540, 444 .data = { 445 { 0x0800, 0x610ad8 }, 446 { 0x0804, 0x610ad0 }, 447 { 0x0808, 0x610a48 }, 448 { 0x080c, 0x610a78 }, 449 { 0x0810, 0x610ac0 }, 450 { 0x0814, 0x610af8 }, 451 { 0x0818, 0x610b00 }, 452 { 0x081c, 0x610ae8 }, 453 { 0x0820, 0x610af0 }, 454 { 0x0824, 0x610b08 }, 455 { 0x0828, 0x610b10 }, 456 { 0x082c, 0x610a68 }, 457 { 0x0830, 0x610a60 }, 458 { 0x0834, 0x000000 }, 459 { 0x0838, 0x610a40 }, 460 { 0x0840, 0x610a24 }, 461 { 0x0844, 0x610a2c }, 462 { 0x0848, 0x610aa8 }, 463 { 0x084c, 0x610ab0 }, 464 { 0x0860, 0x610a84 }, 465 { 0x0864, 0x610a90 }, 466 { 0x0868, 0x610b18 }, 467 { 0x086c, 0x610b20 }, 468 { 0x0870, 0x610ac8 }, 469 { 0x0874, 0x610a38 }, 470 { 0x0880, 0x610a58 }, 471 { 0x0884, 0x610a9c }, 472 { 0x08a0, 0x610a70 }, 473 { 0x08a4, 0x610a50 }, 474 { 0x08a8, 0x610ae0 }, 475 { 0x08c0, 0x610b28 }, 476 { 0x08c4, 0x610b30 }, 477 { 0x08c8, 0x610b40 }, 478 { 0x08d4, 0x610b38 }, 479 { 0x08d8, 0x610b48 }, 480 { 0x08dc, 0x610b50 }, 481 { 0x0900, 0x610a18 }, 482 { 0x0904, 0x610ab8 }, 483 {} 484 } 485 }; 486 487 static const struct nv50_disp_mthd_chan 488 nv50_disp_core_mthd_chan = { 489 .name = "Core", 490 .addr = 0x000000, 491 .data = { 492 { "Global", 1, &nv50_disp_core_mthd_base }, 493 { "DAC", 3, &nv50_disp_core_mthd_dac }, 494 { "SOR", 2, &nv50_disp_core_mthd_sor }, 495 { "PIOR", 3, &nv50_disp_core_mthd_pior }, 496 { "HEAD", 2, &nv50_disp_core_mthd_head }, 497 {} 498 } 499 }; 500 501 int 502 nv50_disp_core_ctor(struct nvkm_object *parent, 503 struct nvkm_object *engine, 504 struct nvkm_oclass *oclass, void *data, u32 size, 505 struct nvkm_object **pobject) 506 { 507 union { 508 struct nv50_disp_core_channel_dma_v0 v0; 509 } *args = data; 510 struct nv50_disp_dmac *mast; 511 int ret; 512 513 nvif_ioctl(parent, "create disp core channel dma size %d\n", size); 514 if (nvif_unpack(args->v0, 0, 0, false)) { 515 nvif_ioctl(parent, "create disp core channel dma vers %d " 516 "pushbuf %08x\n", 517 args->v0.version, args->v0.pushbuf); 518 } else 519 return ret; 520 521 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 522 0, sizeof(*mast), (void **)&mast); 523 *pobject = nv_object(mast); 524 if (ret) 525 return ret; 526 527 return 0; 528 } 529 530 static int 531 nv50_disp_core_init(struct nvkm_object *object) 532 { 533 struct nv50_disp *disp = (void *)object->engine; 534 struct nv50_disp_dmac *mast = (void *)object; 535 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 536 struct nvkm_device *device = subdev->device; 537 int ret; 538 539 ret = nv50_disp_chan_init(&mast->base); 540 if (ret) 541 return ret; 542 543 /* enable error reporting */ 544 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000); 545 546 /* attempt to unstick channel from some unknown state */ 547 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000) 548 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000); 549 if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000) 550 nvkm_mask(device, 0x610200, 0x00600000, 0x00600000); 551 552 /* initialise channel for dma command submission */ 553 nvkm_wr32(device, 0x610204, mast->push); 554 nvkm_wr32(device, 0x610208, 0x00010000); 555 nvkm_wr32(device, 0x61020c, 0x00000000); 556 nvkm_mask(device, 0x610200, 0x00000010, 0x00000010); 557 nvkm_wr32(device, 0x640000, 0x00000000); 558 nvkm_wr32(device, 0x610200, 0x01000013); 559 560 /* wait for it to go inactive */ 561 if (nvkm_msec(device, 2000, 562 if (!(nvkm_rd32(device, 0x610200) & 0x80000000)) 563 break; 564 ) < 0) { 565 nvkm_error(subdev, "core init: %08x\n", 566 nvkm_rd32(device, 0x610200)); 567 return -EBUSY; 568 } 569 570 return 0; 571 } 572 573 static int 574 nv50_disp_core_fini(struct nvkm_object *object, bool suspend) 575 { 576 struct nv50_disp *disp = (void *)object->engine; 577 struct nv50_disp_dmac *mast = (void *)object; 578 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 579 struct nvkm_device *device = subdev->device; 580 581 /* deactivate channel */ 582 nvkm_mask(device, 0x610200, 0x00000010, 0x00000000); 583 nvkm_mask(device, 0x610200, 0x00000003, 0x00000000); 584 if (nvkm_msec(device, 2000, 585 if (!(nvkm_rd32(device, 0x610200) & 0x001e0000)) 586 break; 587 ) < 0) { 588 nvkm_error(subdev, "core fini: %08x\n", 589 nvkm_rd32(device, 0x610200)); 590 if (suspend) 591 return -EBUSY; 592 } 593 594 /* disable error reporting and completion notifications */ 595 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000); 596 597 return nv50_disp_chan_fini(&mast->base, suspend); 598 } 599 600 struct nv50_disp_chan_impl 601 nv50_disp_core_ofuncs = { 602 .base.ctor = nv50_disp_core_ctor, 603 .base.dtor = nv50_disp_dmac_dtor, 604 .base.init = nv50_disp_core_init, 605 .base.fini = nv50_disp_core_fini, 606 .base.map = nv50_disp_chan_map, 607 .base.ntfy = nv50_disp_chan_ntfy, 608 .base.rd32 = nv50_disp_chan_rd32, 609 .base.wr32 = nv50_disp_chan_wr32, 610 .chid = 0, 611 .attach = nv50_disp_dmac_object_attach, 612 .detach = nv50_disp_dmac_object_detach, 613 }; 614 615 /******************************************************************************* 616 * EVO sync channel objects 617 ******************************************************************************/ 618 619 static const struct nv50_disp_mthd_list 620 nv50_disp_base_mthd_base = { 621 .mthd = 0x0000, 622 .addr = 0x000000, 623 .data = { 624 { 0x0080, 0x000000 }, 625 { 0x0084, 0x0008c4 }, 626 { 0x0088, 0x0008d0 }, 627 { 0x008c, 0x0008dc }, 628 { 0x0090, 0x0008e4 }, 629 { 0x0094, 0x610884 }, 630 { 0x00a0, 0x6108a0 }, 631 { 0x00a4, 0x610878 }, 632 { 0x00c0, 0x61086c }, 633 { 0x00e0, 0x610858 }, 634 { 0x00e4, 0x610860 }, 635 { 0x00e8, 0x6108ac }, 636 { 0x00ec, 0x6108b4 }, 637 { 0x0100, 0x610894 }, 638 { 0x0110, 0x6108bc }, 639 { 0x0114, 0x61088c }, 640 {} 641 } 642 }; 643 644 const struct nv50_disp_mthd_list 645 nv50_disp_base_mthd_image = { 646 .mthd = 0x0400, 647 .addr = 0x000000, 648 .data = { 649 { 0x0800, 0x6108f0 }, 650 { 0x0804, 0x6108fc }, 651 { 0x0808, 0x61090c }, 652 { 0x080c, 0x610914 }, 653 { 0x0810, 0x610904 }, 654 {} 655 } 656 }; 657 658 static const struct nv50_disp_mthd_chan 659 nv50_disp_base_mthd_chan = { 660 .name = "Base", 661 .addr = 0x000540, 662 .data = { 663 { "Global", 1, &nv50_disp_base_mthd_base }, 664 { "Image", 2, &nv50_disp_base_mthd_image }, 665 {} 666 } 667 }; 668 669 int 670 nv50_disp_base_ctor(struct nvkm_object *parent, 671 struct nvkm_object *engine, 672 struct nvkm_oclass *oclass, void *data, u32 size, 673 struct nvkm_object **pobject) 674 { 675 union { 676 struct nv50_disp_base_channel_dma_v0 v0; 677 } *args = data; 678 struct nv50_disp *disp = (void *)engine; 679 struct nv50_disp_dmac *dmac; 680 int ret; 681 682 nvif_ioctl(parent, "create disp base channel dma size %d\n", size); 683 if (nvif_unpack(args->v0, 0, 0, false)) { 684 nvif_ioctl(parent, "create disp base channel dma vers %d " 685 "pushbuf %08x head %d\n", 686 args->v0.version, args->v0.pushbuf, args->v0.head); 687 if (args->v0.head > disp->head.nr) 688 return -EINVAL; 689 } else 690 return ret; 691 692 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 693 args->v0.head, sizeof(*dmac), 694 (void **)&dmac); 695 *pobject = nv_object(dmac); 696 if (ret) 697 return ret; 698 699 return 0; 700 } 701 702 struct nv50_disp_chan_impl 703 nv50_disp_base_ofuncs = { 704 .base.ctor = nv50_disp_base_ctor, 705 .base.dtor = nv50_disp_dmac_dtor, 706 .base.init = nv50_disp_dmac_init, 707 .base.fini = nv50_disp_dmac_fini, 708 .base.ntfy = nv50_disp_chan_ntfy, 709 .base.map = nv50_disp_chan_map, 710 .base.rd32 = nv50_disp_chan_rd32, 711 .base.wr32 = nv50_disp_chan_wr32, 712 .chid = 1, 713 .attach = nv50_disp_dmac_object_attach, 714 .detach = nv50_disp_dmac_object_detach, 715 }; 716 717 /******************************************************************************* 718 * EVO overlay channel objects 719 ******************************************************************************/ 720 721 const struct nv50_disp_mthd_list 722 nv50_disp_ovly_mthd_base = { 723 .mthd = 0x0000, 724 .addr = 0x000000, 725 .data = { 726 { 0x0080, 0x000000 }, 727 { 0x0084, 0x0009a0 }, 728 { 0x0088, 0x0009c0 }, 729 { 0x008c, 0x0009c8 }, 730 { 0x0090, 0x6109b4 }, 731 { 0x0094, 0x610970 }, 732 { 0x00a0, 0x610998 }, 733 { 0x00a4, 0x610964 }, 734 { 0x00c0, 0x610958 }, 735 { 0x00e0, 0x6109a8 }, 736 { 0x00e4, 0x6109d0 }, 737 { 0x00e8, 0x6109d8 }, 738 { 0x0100, 0x61094c }, 739 { 0x0104, 0x610984 }, 740 { 0x0108, 0x61098c }, 741 { 0x0800, 0x6109f8 }, 742 { 0x0808, 0x610a08 }, 743 { 0x080c, 0x610a10 }, 744 { 0x0810, 0x610a00 }, 745 {} 746 } 747 }; 748 749 static const struct nv50_disp_mthd_chan 750 nv50_disp_ovly_mthd_chan = { 751 .name = "Overlay", 752 .addr = 0x000540, 753 .data = { 754 { "Global", 1, &nv50_disp_ovly_mthd_base }, 755 {} 756 } 757 }; 758 759 int 760 nv50_disp_ovly_ctor(struct nvkm_object *parent, 761 struct nvkm_object *engine, 762 struct nvkm_oclass *oclass, void *data, u32 size, 763 struct nvkm_object **pobject) 764 { 765 union { 766 struct nv50_disp_overlay_channel_dma_v0 v0; 767 } *args = data; 768 struct nv50_disp *disp = (void *)engine; 769 struct nv50_disp_dmac *dmac; 770 int ret; 771 772 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size); 773 if (nvif_unpack(args->v0, 0, 0, false)) { 774 nvif_ioctl(parent, "create disp overlay channel dma vers %d " 775 "pushbuf %08x head %d\n", 776 args->v0.version, args->v0.pushbuf, args->v0.head); 777 if (args->v0.head > disp->head.nr) 778 return -EINVAL; 779 } else 780 return ret; 781 782 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 783 args->v0.head, sizeof(*dmac), 784 (void **)&dmac); 785 *pobject = nv_object(dmac); 786 if (ret) 787 return ret; 788 789 return 0; 790 } 791 792 struct nv50_disp_chan_impl 793 nv50_disp_ovly_ofuncs = { 794 .base.ctor = nv50_disp_ovly_ctor, 795 .base.dtor = nv50_disp_dmac_dtor, 796 .base.init = nv50_disp_dmac_init, 797 .base.fini = nv50_disp_dmac_fini, 798 .base.ntfy = nv50_disp_chan_ntfy, 799 .base.map = nv50_disp_chan_map, 800 .base.rd32 = nv50_disp_chan_rd32, 801 .base.wr32 = nv50_disp_chan_wr32, 802 .chid = 3, 803 .attach = nv50_disp_dmac_object_attach, 804 .detach = nv50_disp_dmac_object_detach, 805 }; 806 807 /******************************************************************************* 808 * EVO PIO channel base class 809 ******************************************************************************/ 810 811 static int 812 nv50_disp_pioc_create_(struct nvkm_object *parent, 813 struct nvkm_object *engine, 814 struct nvkm_oclass *oclass, int head, 815 int length, void **pobject) 816 { 817 return nv50_disp_chan_create_(parent, engine, oclass, head, 818 length, pobject); 819 } 820 821 void 822 nv50_disp_pioc_dtor(struct nvkm_object *object) 823 { 824 struct nv50_disp_pioc *pioc = (void *)object; 825 nv50_disp_chan_destroy(&pioc->base); 826 } 827 828 static int 829 nv50_disp_pioc_init(struct nvkm_object *object) 830 { 831 struct nv50_disp *disp = (void *)object->engine; 832 struct nv50_disp_pioc *pioc = (void *)object; 833 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 834 struct nvkm_device *device = subdev->device; 835 int chid = pioc->base.chid; 836 int ret; 837 838 ret = nv50_disp_chan_init(&pioc->base); 839 if (ret) 840 return ret; 841 842 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); 843 if (nvkm_msec(device, 2000, 844 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 845 break; 846 ) < 0) { 847 nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, 848 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 849 return -EBUSY; 850 } 851 852 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); 853 if (nvkm_msec(device, 2000, 854 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); 855 if ((tmp & 0x00030000) == 0x00010000) 856 break; 857 ) < 0) { 858 nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, 859 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 860 return -EBUSY; 861 } 862 863 return 0; 864 } 865 866 static int 867 nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend) 868 { 869 struct nv50_disp *disp = (void *)object->engine; 870 struct nv50_disp_pioc *pioc = (void *)object; 871 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 872 struct nvkm_device *device = subdev->device; 873 int chid = pioc->base.chid; 874 875 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); 876 if (nvkm_msec(device, 2000, 877 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 878 break; 879 ) < 0) { 880 nvkm_error(subdev, "ch %d timeout: %08x\n", chid, 881 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 882 if (suspend) 883 return -EBUSY; 884 } 885 886 return nv50_disp_chan_fini(&pioc->base, suspend); 887 } 888 889 /******************************************************************************* 890 * EVO immediate overlay channel objects 891 ******************************************************************************/ 892 893 int 894 nv50_disp_oimm_ctor(struct nvkm_object *parent, 895 struct nvkm_object *engine, 896 struct nvkm_oclass *oclass, void *data, u32 size, 897 struct nvkm_object **pobject) 898 { 899 union { 900 struct nv50_disp_overlay_v0 v0; 901 } *args = data; 902 struct nv50_disp *disp = (void *)engine; 903 struct nv50_disp_pioc *pioc; 904 int ret; 905 906 nvif_ioctl(parent, "create disp overlay size %d\n", size); 907 if (nvif_unpack(args->v0, 0, 0, false)) { 908 nvif_ioctl(parent, "create disp overlay vers %d head %d\n", 909 args->v0.version, args->v0.head); 910 if (args->v0.head > disp->head.nr) 911 return -EINVAL; 912 } else 913 return ret; 914 915 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 916 sizeof(*pioc), (void **)&pioc); 917 *pobject = nv_object(pioc); 918 if (ret) 919 return ret; 920 921 return 0; 922 } 923 924 struct nv50_disp_chan_impl 925 nv50_disp_oimm_ofuncs = { 926 .base.ctor = nv50_disp_oimm_ctor, 927 .base.dtor = nv50_disp_pioc_dtor, 928 .base.init = nv50_disp_pioc_init, 929 .base.fini = nv50_disp_pioc_fini, 930 .base.ntfy = nv50_disp_chan_ntfy, 931 .base.map = nv50_disp_chan_map, 932 .base.rd32 = nv50_disp_chan_rd32, 933 .base.wr32 = nv50_disp_chan_wr32, 934 .chid = 5, 935 }; 936 937 /******************************************************************************* 938 * EVO cursor channel objects 939 ******************************************************************************/ 940 941 int 942 nv50_disp_curs_ctor(struct nvkm_object *parent, 943 struct nvkm_object *engine, 944 struct nvkm_oclass *oclass, void *data, u32 size, 945 struct nvkm_object **pobject) 946 { 947 union { 948 struct nv50_disp_cursor_v0 v0; 949 } *args = data; 950 struct nv50_disp *disp = (void *)engine; 951 struct nv50_disp_pioc *pioc; 952 int ret; 953 954 nvif_ioctl(parent, "create disp cursor size %d\n", size); 955 if (nvif_unpack(args->v0, 0, 0, false)) { 956 nvif_ioctl(parent, "create disp cursor vers %d head %d\n", 957 args->v0.version, args->v0.head); 958 if (args->v0.head > disp->head.nr) 959 return -EINVAL; 960 } else 961 return ret; 962 963 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 964 sizeof(*pioc), (void **)&pioc); 965 *pobject = nv_object(pioc); 966 if (ret) 967 return ret; 968 969 return 0; 970 } 971 972 struct nv50_disp_chan_impl 973 nv50_disp_curs_ofuncs = { 974 .base.ctor = nv50_disp_curs_ctor, 975 .base.dtor = nv50_disp_pioc_dtor, 976 .base.init = nv50_disp_pioc_init, 977 .base.fini = nv50_disp_pioc_fini, 978 .base.ntfy = nv50_disp_chan_ntfy, 979 .base.map = nv50_disp_chan_map, 980 .base.rd32 = nv50_disp_chan_rd32, 981 .base.wr32 = nv50_disp_chan_wr32, 982 .chid = 7, 983 }; 984 985 /******************************************************************************* 986 * Base display object 987 ******************************************************************************/ 988 989 int 990 nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0) 991 { 992 struct nvkm_device *device = disp->base.engine.subdev.device; 993 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540)); 994 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540)); 995 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540)); 996 union { 997 struct nv04_disp_scanoutpos_v0 v0; 998 } *args = data; 999 int ret; 1000 1001 nvif_ioctl(object, "disp scanoutpos size %d\n", size); 1002 if (nvif_unpack(args->v0, 0, 0, false)) { 1003 nvif_ioctl(object, "disp scanoutpos vers %d\n", 1004 args->v0.version); 1005 args->v0.vblanke = (blanke & 0xffff0000) >> 16; 1006 args->v0.hblanke = (blanke & 0x0000ffff); 1007 args->v0.vblanks = (blanks & 0xffff0000) >> 16; 1008 args->v0.hblanks = (blanks & 0x0000ffff); 1009 args->v0.vtotal = ( total & 0xffff0000) >> 16; 1010 args->v0.htotal = ( total & 0x0000ffff); 1011 args->v0.time[0] = ktime_to_ns(ktime_get()); 1012 args->v0.vline = /* vline read locks hline */ 1013 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; 1014 args->v0.time[1] = ktime_to_ns(ktime_get()); 1015 args->v0.hline = 1016 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; 1017 } else 1018 return ret; 1019 1020 return 0; 1021 } 1022 1023 int 1024 nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 1025 { 1026 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine); 1027 union { 1028 struct nv50_disp_mthd_v0 v0; 1029 struct nv50_disp_mthd_v1 v1; 1030 } *args = data; 1031 struct nv50_disp *disp = (void *)object->engine; 1032 struct nvkm_output *outp = NULL; 1033 struct nvkm_output *temp; 1034 u16 type, mask = 0; 1035 int head, ret; 1036 1037 if (mthd != NV50_DISP_MTHD) 1038 return -EINVAL; 1039 1040 nvif_ioctl(object, "disp mthd size %d\n", size); 1041 if (nvif_unpack(args->v0, 0, 0, true)) { 1042 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", 1043 args->v0.version, args->v0.method, args->v0.head); 1044 mthd = args->v0.method; 1045 head = args->v0.head; 1046 } else 1047 if (nvif_unpack(args->v1, 1, 1, true)) { 1048 nvif_ioctl(object, "disp mthd vers %d mthd %02x " 1049 "type %04x mask %04x\n", 1050 args->v1.version, args->v1.method, 1051 args->v1.hasht, args->v1.hashm); 1052 mthd = args->v1.method; 1053 type = args->v1.hasht; 1054 mask = args->v1.hashm; 1055 head = ffs((mask >> 8) & 0x0f) - 1; 1056 } else 1057 return ret; 1058 1059 if (head < 0 || head >= disp->head.nr) 1060 return -ENXIO; 1061 1062 if (mask) { 1063 list_for_each_entry(temp, &disp->base.outp, head) { 1064 if ((temp->info.hasht == type) && 1065 (temp->info.hashm & mask) == mask) { 1066 outp = temp; 1067 break; 1068 } 1069 } 1070 if (outp == NULL) 1071 return -ENXIO; 1072 } 1073 1074 switch (mthd) { 1075 case NV50_DISP_SCANOUTPOS: 1076 return impl->head.scanoutpos(object, disp, data, size, head); 1077 default: 1078 break; 1079 } 1080 1081 switch (mthd * !!outp) { 1082 case NV50_DISP_MTHD_V1_DAC_PWR: 1083 return disp->dac.power(object, disp, data, size, head, outp); 1084 case NV50_DISP_MTHD_V1_DAC_LOAD: 1085 return disp->dac.sense(object, disp, data, size, head, outp); 1086 case NV50_DISP_MTHD_V1_SOR_PWR: 1087 return disp->sor.power(object, disp, data, size, head, outp); 1088 case NV50_DISP_MTHD_V1_SOR_HDA_ELD: 1089 if (!disp->sor.hda_eld) 1090 return -ENODEV; 1091 return disp->sor.hda_eld(object, disp, data, size, head, outp); 1092 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR: 1093 if (!disp->sor.hdmi) 1094 return -ENODEV; 1095 return disp->sor.hdmi(object, disp, data, size, head, outp); 1096 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: { 1097 union { 1098 struct nv50_disp_sor_lvds_script_v0 v0; 1099 } *args = data; 1100 nvif_ioctl(object, "disp sor lvds script size %d\n", size); 1101 if (nvif_unpack(args->v0, 0, 0, false)) { 1102 nvif_ioctl(object, "disp sor lvds script " 1103 "vers %d name %04x\n", 1104 args->v0.version, args->v0.script); 1105 disp->sor.lvdsconf = args->v0.script; 1106 return 0; 1107 } else 1108 return ret; 1109 } 1110 break; 1111 case NV50_DISP_MTHD_V1_SOR_DP_PWR: { 1112 struct nvkm_output_dp *outpdp = (void *)outp; 1113 union { 1114 struct nv50_disp_sor_dp_pwr_v0 v0; 1115 } *args = data; 1116 nvif_ioctl(object, "disp sor dp pwr size %d\n", size); 1117 if (nvif_unpack(args->v0, 0, 0, false)) { 1118 nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n", 1119 args->v0.version, args->v0.state); 1120 if (args->v0.state == 0) { 1121 nvkm_notify_put(&outpdp->irq); 1122 ((struct nvkm_output_dp_impl *)nv_oclass(outp)) 1123 ->lnk_pwr(outpdp, 0); 1124 atomic_set(&outpdp->lt.done, 0); 1125 return 0; 1126 } else 1127 if (args->v0.state != 0) { 1128 nvkm_output_dp_train(&outpdp->base, 0, true); 1129 return 0; 1130 } 1131 } else 1132 return ret; 1133 } 1134 break; 1135 case NV50_DISP_MTHD_V1_PIOR_PWR: 1136 if (!disp->pior.power) 1137 return -ENODEV; 1138 return disp->pior.power(object, disp, data, size, head, outp); 1139 default: 1140 break; 1141 } 1142 1143 return -EINVAL; 1144 } 1145 1146 int 1147 nv50_disp_main_ctor(struct nvkm_object *parent, 1148 struct nvkm_object *engine, 1149 struct nvkm_oclass *oclass, void *data, u32 size, 1150 struct nvkm_object **pobject) 1151 { 1152 struct nv50_disp *disp = (void *)engine; 1153 struct nv50_disp_base *base; 1154 int ret; 1155 1156 ret = nvkm_parent_create(parent, engine, oclass, 0, 1157 disp->sclass, 0, &base); 1158 *pobject = nv_object(base); 1159 if (ret) 1160 return ret; 1161 1162 return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, 1163 &base->ramht); 1164 } 1165 1166 void 1167 nv50_disp_main_dtor(struct nvkm_object *object) 1168 { 1169 struct nv50_disp_base *base = (void *)object; 1170 nvkm_ramht_ref(NULL, &base->ramht); 1171 nvkm_parent_destroy(&base->base); 1172 } 1173 1174 static int 1175 nv50_disp_main_init(struct nvkm_object *object) 1176 { 1177 struct nv50_disp *disp = (void *)object->engine; 1178 struct nv50_disp_base *base = (void *)object; 1179 struct nvkm_device *device = disp->base.engine.subdev.device; 1180 int ret, i; 1181 u32 tmp; 1182 1183 ret = nvkm_parent_init(&base->base); 1184 if (ret) 1185 return ret; 1186 1187 /* The below segments of code copying values from one register to 1188 * another appear to inform EVO of the display capabilities or 1189 * something similar. NFI what the 0x614004 caps are for.. 1190 */ 1191 tmp = nvkm_rd32(device, 0x614004); 1192 nvkm_wr32(device, 0x610184, tmp); 1193 1194 /* ... CRTC caps */ 1195 for (i = 0; i < disp->head.nr; i++) { 1196 tmp = nvkm_rd32(device, 0x616100 + (i * 0x800)); 1197 nvkm_wr32(device, 0x610190 + (i * 0x10), tmp); 1198 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800)); 1199 nvkm_wr32(device, 0x610194 + (i * 0x10), tmp); 1200 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800)); 1201 nvkm_wr32(device, 0x610198 + (i * 0x10), tmp); 1202 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800)); 1203 nvkm_wr32(device, 0x61019c + (i * 0x10), tmp); 1204 } 1205 1206 /* ... DAC caps */ 1207 for (i = 0; i < disp->dac.nr; i++) { 1208 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800)); 1209 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp); 1210 } 1211 1212 /* ... SOR caps */ 1213 for (i = 0; i < disp->sor.nr; i++) { 1214 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800)); 1215 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp); 1216 } 1217 1218 /* ... PIOR caps */ 1219 for (i = 0; i < disp->pior.nr; i++) { 1220 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800)); 1221 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp); 1222 } 1223 1224 /* steal display away from vbios, or something like that */ 1225 if (nvkm_rd32(device, 0x610024) & 0x00000100) { 1226 nvkm_wr32(device, 0x610024, 0x00000100); 1227 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000); 1228 if (nvkm_msec(device, 2000, 1229 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002)) 1230 break; 1231 ) < 0) 1232 return -EBUSY; 1233 } 1234 1235 /* point at display engine memory area (hash table, objects) */ 1236 nvkm_wr32(device, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9); 1237 1238 /* enable supervisor interrupts, disable everything else */ 1239 nvkm_wr32(device, 0x61002c, 0x00000370); 1240 nvkm_wr32(device, 0x610028, 0x00000000); 1241 return 0; 1242 } 1243 1244 static int 1245 nv50_disp_main_fini(struct nvkm_object *object, bool suspend) 1246 { 1247 struct nv50_disp *disp = (void *)object->engine; 1248 struct nv50_disp_base *base = (void *)object; 1249 struct nvkm_device *device = disp->base.engine.subdev.device; 1250 1251 /* disable all interrupts */ 1252 nvkm_wr32(device, 0x610024, 0x00000000); 1253 nvkm_wr32(device, 0x610020, 0x00000000); 1254 1255 return nvkm_parent_fini(&base->base, suspend); 1256 } 1257 1258 struct nvkm_ofuncs 1259 nv50_disp_main_ofuncs = { 1260 .ctor = nv50_disp_main_ctor, 1261 .dtor = nv50_disp_main_dtor, 1262 .init = nv50_disp_main_init, 1263 .fini = nv50_disp_main_fini, 1264 .mthd = nv50_disp_main_mthd, 1265 .ntfy = nvkm_disp_ntfy, 1266 }; 1267 1268 static struct nvkm_oclass 1269 nv50_disp_main_oclass[] = { 1270 { NV50_DISP, &nv50_disp_main_ofuncs }, 1271 {} 1272 }; 1273 1274 static struct nvkm_oclass 1275 nv50_disp_sclass[] = { 1276 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base }, 1277 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base }, 1278 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 1279 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 1280 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 1281 {} 1282 }; 1283 1284 /******************************************************************************* 1285 * Display context, tracks instmem allocation and prevents more than one 1286 * client using the display hardware at any time. 1287 ******************************************************************************/ 1288 1289 static int 1290 nv50_disp_data_ctor(struct nvkm_object *parent, 1291 struct nvkm_object *engine, 1292 struct nvkm_oclass *oclass, void *data, u32 size, 1293 struct nvkm_object **pobject) 1294 { 1295 struct nv50_disp *disp = (void *)engine; 1296 struct nvkm_engctx *ectx; 1297 int ret = -EBUSY; 1298 1299 /* no context needed for channel objects... */ 1300 if (nv_mclass(parent) != NV_DEVICE) { 1301 atomic_inc(&parent->refcount); 1302 *pobject = parent; 1303 return 1; 1304 } 1305 1306 /* allocate display hardware to client */ 1307 mutex_lock(&nv_subdev(disp)->mutex); 1308 if (list_empty(&nv_engine(disp)->contexts)) { 1309 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 1310 0x10000, NVOBJ_FLAG_HEAP, &ectx); 1311 *pobject = nv_object(ectx); 1312 } 1313 mutex_unlock(&nv_subdev(disp)->mutex); 1314 return ret; 1315 } 1316 1317 struct nvkm_oclass 1318 nv50_disp_cclass = { 1319 .handle = NV_ENGCTX(DISP, 0x50), 1320 .ofuncs = &(struct nvkm_ofuncs) { 1321 .ctor = nv50_disp_data_ctor, 1322 .dtor = _nvkm_engctx_dtor, 1323 .init = _nvkm_engctx_init, 1324 .fini = _nvkm_engctx_fini, 1325 .rd32 = _nvkm_engctx_rd32, 1326 .wr32 = _nvkm_engctx_wr32, 1327 }, 1328 }; 1329 1330 /******************************************************************************* 1331 * Display engine implementation 1332 ******************************************************************************/ 1333 1334 static void 1335 nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head) 1336 { 1337 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1338 struct nvkm_device *device = disp->engine.subdev.device; 1339 nvkm_mask(device, 0x61002c, (4 << head), 0); 1340 } 1341 1342 static void 1343 nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) 1344 { 1345 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1346 struct nvkm_device *device = disp->engine.subdev.device; 1347 nvkm_mask(device, 0x61002c, (4 << head), (4 << head)); 1348 } 1349 1350 const struct nvkm_event_func 1351 nv50_disp_vblank_func = { 1352 .ctor = nvkm_disp_vblank_ctor, 1353 .init = nv50_disp_vblank_init, 1354 .fini = nv50_disp_vblank_fini, 1355 }; 1356 1357 static const struct nvkm_enum 1358 nv50_disp_intr_error_type[] = { 1359 { 3, "ILLEGAL_MTHD" }, 1360 { 4, "INVALID_VALUE" }, 1361 { 5, "INVALID_STATE" }, 1362 { 7, "INVALID_HANDLE" }, 1363 {} 1364 }; 1365 1366 static const struct nvkm_enum 1367 nv50_disp_intr_error_code[] = { 1368 { 0x00, "" }, 1369 {} 1370 }; 1371 1372 static void 1373 nv50_disp_intr_error(struct nv50_disp *disp, int chid) 1374 { 1375 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1376 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1377 struct nvkm_device *device = subdev->device; 1378 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08)); 1379 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08)); 1380 u32 code = (addr & 0x00ff0000) >> 16; 1381 u32 type = (addr & 0x00007000) >> 12; 1382 u32 mthd = (addr & 0x00000ffc); 1383 const struct nvkm_enum *ec, *et; 1384 1385 et = nvkm_enum_find(nv50_disp_intr_error_type, type); 1386 ec = nvkm_enum_find(nv50_disp_intr_error_code, code); 1387 1388 nvkm_error(subdev, 1389 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n", 1390 type, et ? et->name : "", code, ec ? ec->name : "", 1391 chid, mthd, data); 1392 1393 if (chid == 0) { 1394 switch (mthd) { 1395 case 0x0080: 1396 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 0, 1397 impl->mthd.core); 1398 break; 1399 default: 1400 break; 1401 } 1402 } else 1403 if (chid <= 2) { 1404 switch (mthd) { 1405 case 0x0080: 1406 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 1, 1407 impl->mthd.base); 1408 break; 1409 default: 1410 break; 1411 } 1412 } else 1413 if (chid <= 4) { 1414 switch (mthd) { 1415 case 0x0080: 1416 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 3, 1417 impl->mthd.ovly); 1418 break; 1419 default: 1420 break; 1421 } 1422 } 1423 1424 nvkm_wr32(device, 0x610020, 0x00010000 << chid); 1425 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000); 1426 } 1427 1428 static struct nvkm_output * 1429 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, 1430 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 1431 struct nvbios_outp *info) 1432 { 1433 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1434 struct nvkm_bios *bios = subdev->device->bios; 1435 struct nvkm_output *outp; 1436 u16 mask, type; 1437 1438 if (or < 4) { 1439 type = DCB_OUTPUT_ANALOG; 1440 mask = 0; 1441 } else 1442 if (or < 8) { 1443 switch (ctrl & 0x00000f00) { 1444 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; 1445 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; 1446 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; 1447 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; 1448 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; 1449 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 1450 default: 1451 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl); 1452 return NULL; 1453 } 1454 or -= 4; 1455 } else { 1456 or = or - 8; 1457 type = 0x0010; 1458 mask = 0; 1459 switch (ctrl & 0x00000f00) { 1460 case 0x00000000: type |= disp->pior.type[or]; break; 1461 default: 1462 nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl); 1463 return NULL; 1464 } 1465 } 1466 1467 mask = 0x00c0 & (mask << 6); 1468 mask |= 0x0001 << or; 1469 mask |= 0x0100 << head; 1470 1471 list_for_each_entry(outp, &disp->base.outp, head) { 1472 if ((outp->info.hasht & 0xff) == type && 1473 (outp->info.hashm & mask) == mask) { 1474 *data = nvbios_outp_match(bios, outp->info.hasht, 1475 outp->info.hashm, 1476 ver, hdr, cnt, len, info); 1477 if (!*data) 1478 return NULL; 1479 return outp; 1480 } 1481 } 1482 1483 return NULL; 1484 } 1485 1486 static struct nvkm_output * 1487 exec_script(struct nv50_disp *disp, int head, int id) 1488 { 1489 struct nvkm_device *device = disp->base.engine.subdev.device; 1490 struct nvkm_bios *bios = device->bios; 1491 struct nvkm_output *outp; 1492 struct nvbios_outp info; 1493 u8 ver, hdr, cnt, len; 1494 u32 data, ctrl = 0; 1495 u32 reg; 1496 int i; 1497 1498 /* DAC */ 1499 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1500 ctrl = nvkm_rd32(device, 0x610b5c + (i * 8)); 1501 1502 /* SOR */ 1503 if (!(ctrl & (1 << head))) { 1504 if (nv_device(disp)->chipset < 0x90 || 1505 nv_device(disp)->chipset == 0x92 || 1506 nv_device(disp)->chipset == 0xa0) { 1507 reg = 0x610b74; 1508 } else { 1509 reg = 0x610798; 1510 } 1511 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1512 ctrl = nvkm_rd32(device, reg + (i * 8)); 1513 i += 4; 1514 } 1515 1516 /* PIOR */ 1517 if (!(ctrl & (1 << head))) { 1518 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1519 ctrl = nvkm_rd32(device, 0x610b84 + (i * 8)); 1520 i += 8; 1521 } 1522 1523 if (!(ctrl & (1 << head))) 1524 return NULL; 1525 i--; 1526 1527 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info); 1528 if (outp) { 1529 struct nvbios_init init = { 1530 .subdev = nv_subdev(disp), 1531 .bios = bios, 1532 .offset = info.script[id], 1533 .outp = &outp->info, 1534 .crtc = head, 1535 .execute = 1, 1536 }; 1537 1538 nvbios_exec(&init); 1539 } 1540 1541 return outp; 1542 } 1543 1544 static struct nvkm_output * 1545 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) 1546 { 1547 struct nvkm_device *device = disp->base.engine.subdev.device; 1548 struct nvkm_bios *bios = device->bios; 1549 struct nvkm_output *outp; 1550 struct nvbios_outp info1; 1551 struct nvbios_ocfg info2; 1552 u8 ver, hdr, cnt, len; 1553 u32 data, ctrl = 0; 1554 u32 reg; 1555 int i; 1556 1557 /* DAC */ 1558 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1559 ctrl = nvkm_rd32(device, 0x610b58 + (i * 8)); 1560 1561 /* SOR */ 1562 if (!(ctrl & (1 << head))) { 1563 if (nv_device(disp)->chipset < 0x90 || 1564 nv_device(disp)->chipset == 0x92 || 1565 nv_device(disp)->chipset == 0xa0) { 1566 reg = 0x610b70; 1567 } else { 1568 reg = 0x610794; 1569 } 1570 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1571 ctrl = nvkm_rd32(device, reg + (i * 8)); 1572 i += 4; 1573 } 1574 1575 /* PIOR */ 1576 if (!(ctrl & (1 << head))) { 1577 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1578 ctrl = nvkm_rd32(device, 0x610b80 + (i * 8)); 1579 i += 8; 1580 } 1581 1582 if (!(ctrl & (1 << head))) 1583 return NULL; 1584 i--; 1585 1586 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); 1587 if (!outp) 1588 return NULL; 1589 1590 if (outp->info.location == 0) { 1591 switch (outp->info.type) { 1592 case DCB_OUTPUT_TMDS: 1593 *conf = (ctrl & 0x00000f00) >> 8; 1594 if (pclk >= 165000) 1595 *conf |= 0x0100; 1596 break; 1597 case DCB_OUTPUT_LVDS: 1598 *conf = disp->sor.lvdsconf; 1599 break; 1600 case DCB_OUTPUT_DP: 1601 *conf = (ctrl & 0x00000f00) >> 8; 1602 break; 1603 case DCB_OUTPUT_ANALOG: 1604 default: 1605 *conf = 0x00ff; 1606 break; 1607 } 1608 } else { 1609 *conf = (ctrl & 0x00000f00) >> 8; 1610 pclk = pclk / 2; 1611 } 1612 1613 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); 1614 if (data && id < 0xff) { 1615 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 1616 if (data) { 1617 struct nvbios_init init = { 1618 .subdev = nv_subdev(disp), 1619 .bios = bios, 1620 .offset = data, 1621 .outp = &outp->info, 1622 .crtc = head, 1623 .execute = 1, 1624 }; 1625 1626 nvbios_exec(&init); 1627 } 1628 } 1629 1630 return outp; 1631 } 1632 1633 static void 1634 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) 1635 { 1636 exec_script(disp, head, 1); 1637 } 1638 1639 static void 1640 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head) 1641 { 1642 struct nvkm_output *outp = exec_script(disp, head, 2); 1643 1644 /* the binary driver does this outside of the supervisor handling 1645 * (after the third supervisor from a detach). we (currently?) 1646 * allow both detach/attach to happen in the same set of 1647 * supervisor interrupts, so it would make sense to execute this 1648 * (full power down?) script after all the detach phases of the 1649 * supervisor handling. like with training if needed from the 1650 * second supervisor, nvidia doesn't do this, so who knows if it's 1651 * entirely safe, but it does appear to work.. 1652 * 1653 * without this script being run, on some configurations i've 1654 * seen, switching from DP to TMDS on a DP connector may result 1655 * in a blank screen (SOR_PWR off/on can restore it) 1656 */ 1657 if (outp && outp->info.type == DCB_OUTPUT_DP) { 1658 struct nvkm_output_dp *outpdp = (void *)outp; 1659 struct nvbios_init init = { 1660 .subdev = nv_subdev(disp), 1661 .bios = nvkm_bios(disp), 1662 .outp = &outp->info, 1663 .crtc = head, 1664 .offset = outpdp->info.script[4], 1665 .execute = 1, 1666 }; 1667 1668 nvbios_exec(&init); 1669 atomic_set(&outpdp->lt.done, 0); 1670 } 1671 } 1672 1673 static void 1674 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head) 1675 { 1676 struct nvkm_device *device = disp->base.engine.subdev.device; 1677 struct nvkm_devinit *devinit = device->devinit; 1678 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1679 if (pclk) 1680 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk); 1681 } 1682 1683 static void 1684 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head, 1685 struct dcb_output *outp, u32 pclk) 1686 { 1687 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1688 struct nvkm_device *device = subdev->device; 1689 const int link = !(outp->sorconf.link & 1); 1690 const int or = ffs(outp->or) - 1; 1691 const u32 soff = ( or * 0x800); 1692 const u32 loff = (link * 0x080) + soff; 1693 const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8)); 1694 const u32 symbol = 100000; 1695 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff; 1696 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff; 1697 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff; 1698 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff); 1699 u32 clksor = nvkm_rd32(device, 0x614300 + soff); 1700 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 1701 int TU, VTUi, VTUf, VTUa; 1702 u64 link_data_rate, link_ratio, unk; 1703 u32 best_diff = 64 * symbol; 1704 u32 link_nr, link_bw, bits; 1705 u64 value; 1706 1707 link_bw = (clksor & 0x000c0000) ? 270000 : 162000; 1708 link_nr = hweight32(dpctrl & 0x000f0000); 1709 1710 /* symbols/hblank - algorithm taken from comments in tegra driver */ 1711 value = vblanke + vactive - vblanks - 7; 1712 value = value * link_bw; 1713 do_div(value, pclk); 1714 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr); 1715 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value); 1716 1717 /* symbols/vblank - algorithm taken from comments in tegra driver */ 1718 value = vblanks - vblanke - 25; 1719 value = value * link_bw; 1720 do_div(value, pclk); 1721 value = value - ((36 / link_nr) + 3) - 1; 1722 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value); 1723 1724 /* watermark / activesym */ 1725 if ((ctrl & 0xf0000) == 0x60000) bits = 30; 1726 else if ((ctrl & 0xf0000) == 0x50000) bits = 24; 1727 else bits = 18; 1728 1729 link_data_rate = (pclk * bits / 8) / link_nr; 1730 1731 /* calculate ratio of packed data rate to link symbol rate */ 1732 link_ratio = link_data_rate * symbol; 1733 do_div(link_ratio, link_bw); 1734 1735 for (TU = 64; TU >= 32; TU--) { 1736 /* calculate average number of valid symbols in each TU */ 1737 u32 tu_valid = link_ratio * TU; 1738 u32 calc, diff; 1739 1740 /* find a hw representation for the fraction.. */ 1741 VTUi = tu_valid / symbol; 1742 calc = VTUi * symbol; 1743 diff = tu_valid - calc; 1744 if (diff) { 1745 if (diff >= (symbol / 2)) { 1746 VTUf = symbol / (symbol - diff); 1747 if (symbol - (VTUf * diff)) 1748 VTUf++; 1749 1750 if (VTUf <= 15) { 1751 VTUa = 1; 1752 calc += symbol - (symbol / VTUf); 1753 } else { 1754 VTUa = 0; 1755 VTUf = 1; 1756 calc += symbol; 1757 } 1758 } else { 1759 VTUa = 0; 1760 VTUf = min((int)(symbol / diff), 15); 1761 calc += symbol / VTUf; 1762 } 1763 1764 diff = calc - tu_valid; 1765 } else { 1766 /* no remainder, but the hw doesn't like the fractional 1767 * part to be zero. decrement the integer part and 1768 * have the fraction add a whole symbol back 1769 */ 1770 VTUa = 0; 1771 VTUf = 1; 1772 VTUi--; 1773 } 1774 1775 if (diff < best_diff) { 1776 best_diff = diff; 1777 bestTU = TU; 1778 bestVTUa = VTUa; 1779 bestVTUf = VTUf; 1780 bestVTUi = VTUi; 1781 if (diff == 0) 1782 break; 1783 } 1784 } 1785 1786 if (!bestTU) { 1787 nvkm_error(subdev, "unable to find suitable dp config\n"); 1788 return; 1789 } 1790 1791 /* XXX close to vbios numbers, but not right */ 1792 unk = (symbol - link_ratio) * bestTU; 1793 unk *= link_ratio; 1794 do_div(unk, symbol); 1795 do_div(unk, symbol); 1796 unk += 6; 1797 1798 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2); 1799 nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | 1800 bestVTUf << 16 | 1801 bestVTUi << 8 | unk); 1802 } 1803 1804 static void 1805 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head) 1806 { 1807 struct nvkm_device *device = disp->base.engine.subdev.device; 1808 struct nvkm_output *outp; 1809 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1810 u32 hval, hreg = 0x614200 + (head * 0x800); 1811 u32 oval, oreg; 1812 u32 mask, conf; 1813 1814 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf); 1815 if (!outp) 1816 return; 1817 1818 /* we allow both encoder attach and detach operations to occur 1819 * within a single supervisor (ie. modeset) sequence. the 1820 * encoder detach scripts quite often switch off power to the 1821 * lanes, which requires the link to be re-trained. 1822 * 1823 * this is not generally an issue as the sink "must" (heh) 1824 * signal an irq when it's lost sync so the driver can 1825 * re-train. 1826 * 1827 * however, on some boards, if one does not configure at least 1828 * the gpu side of the link *before* attaching, then various 1829 * things can go horribly wrong (PDISP disappearing from mmio, 1830 * third supervisor never happens, etc). 1831 * 1832 * the solution is simply to retrain here, if necessary. last 1833 * i checked, the binary driver userspace does not appear to 1834 * trigger this situation (it forces an UPDATE between steps). 1835 */ 1836 if (outp->info.type == DCB_OUTPUT_DP) { 1837 u32 soff = (ffs(outp->info.or) - 1) * 0x08; 1838 u32 ctrl, datarate; 1839 1840 if (outp->info.location == 0) { 1841 ctrl = nvkm_rd32(device, 0x610794 + soff); 1842 soff = 1; 1843 } else { 1844 ctrl = nvkm_rd32(device, 0x610b80 + soff); 1845 soff = 2; 1846 } 1847 1848 switch ((ctrl & 0x000f0000) >> 16) { 1849 case 6: datarate = pclk * 30; break; 1850 case 5: datarate = pclk * 24; break; 1851 case 2: 1852 default: 1853 datarate = pclk * 18; 1854 break; 1855 } 1856 1857 if (nvkm_output_dp_train(outp, datarate / soff, true)) 1858 ERR("link not trained before attach\n"); 1859 } 1860 1861 exec_clkcmp(disp, head, 0, pclk, &conf); 1862 1863 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) { 1864 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800; 1865 oval = 0x00000000; 1866 hval = 0x00000000; 1867 mask = 0xffffffff; 1868 } else 1869 if (!outp->info.location) { 1870 if (outp->info.type == DCB_OUTPUT_DP) 1871 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk); 1872 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800; 1873 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000; 1874 hval = 0x00000000; 1875 mask = 0x00000707; 1876 } else { 1877 oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800; 1878 oval = 0x00000001; 1879 hval = 0x00000001; 1880 mask = 0x00000707; 1881 } 1882 1883 nvkm_mask(device, hreg, 0x0000000f, hval); 1884 nvkm_mask(device, oreg, mask, oval); 1885 } 1886 1887 /* If programming a TMDS output on a SOR that can also be configured for 1888 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. 1889 * 1890 * It looks like the VBIOS TMDS scripts make an attempt at this, however, 1891 * the VBIOS scripts on at least one board I have only switch it off on 1892 * link 0, causing a blank display if the output has previously been 1893 * programmed for DisplayPort. 1894 */ 1895 static void 1896 nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp, 1897 struct dcb_output *outp) 1898 { 1899 struct nvkm_device *device = disp->base.engine.subdev.device; 1900 struct nvkm_bios *bios = device->bios; 1901 const int link = !(outp->sorconf.link & 1); 1902 const int or = ffs(outp->or) - 1; 1903 const u32 loff = (or * 0x800) + (link * 0x80); 1904 const u16 mask = (outp->sorconf.link << 6) | outp->or; 1905 struct dcb_output match; 1906 u8 ver, hdr; 1907 1908 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) 1909 nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000); 1910 } 1911 1912 static void 1913 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head) 1914 { 1915 struct nvkm_device *device = disp->base.engine.subdev.device; 1916 struct nvkm_output *outp; 1917 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1918 u32 conf; 1919 1920 outp = exec_clkcmp(disp, head, 1, pclk, &conf); 1921 if (!outp) 1922 return; 1923 1924 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) 1925 nv50_disp_intr_unk40_0_tmds(disp, &outp->info); 1926 } 1927 1928 void 1929 nv50_disp_intr_supervisor(struct work_struct *work) 1930 { 1931 struct nv50_disp *disp = 1932 container_of(work, struct nv50_disp, supervisor); 1933 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1934 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1935 struct nvkm_device *device = subdev->device; 1936 u32 super = nvkm_rd32(device, 0x610030); 1937 int head; 1938 1939 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super); 1940 1941 if (disp->super & 0x00000010) { 1942 nv50_disp_mthd_chan(disp, NV_DBG_DEBUG, 0, impl->mthd.core); 1943 for (head = 0; head < disp->head.nr; head++) { 1944 if (!(super & (0x00000020 << head))) 1945 continue; 1946 if (!(super & (0x00000080 << head))) 1947 continue; 1948 nv50_disp_intr_unk10_0(disp, head); 1949 } 1950 } else 1951 if (disp->super & 0x00000020) { 1952 for (head = 0; head < disp->head.nr; head++) { 1953 if (!(super & (0x00000080 << head))) 1954 continue; 1955 nv50_disp_intr_unk20_0(disp, head); 1956 } 1957 for (head = 0; head < disp->head.nr; head++) { 1958 if (!(super & (0x00000200 << head))) 1959 continue; 1960 nv50_disp_intr_unk20_1(disp, head); 1961 } 1962 for (head = 0; head < disp->head.nr; head++) { 1963 if (!(super & (0x00000080 << head))) 1964 continue; 1965 nv50_disp_intr_unk20_2(disp, head); 1966 } 1967 } else 1968 if (disp->super & 0x00000040) { 1969 for (head = 0; head < disp->head.nr; head++) { 1970 if (!(super & (0x00000080 << head))) 1971 continue; 1972 nv50_disp_intr_unk40_0(disp, head); 1973 } 1974 } 1975 1976 nvkm_wr32(device, 0x610030, 0x80000000); 1977 } 1978 1979 void 1980 nv50_disp_intr(struct nvkm_subdev *subdev) 1981 { 1982 struct nv50_disp *disp = (void *)subdev; 1983 struct nvkm_device *device = disp->base.engine.subdev.device; 1984 u32 intr0 = nvkm_rd32(device, 0x610020); 1985 u32 intr1 = nvkm_rd32(device, 0x610024); 1986 1987 while (intr0 & 0x001f0000) { 1988 u32 chid = __ffs(intr0 & 0x001f0000) - 16; 1989 nv50_disp_intr_error(disp, chid); 1990 intr0 &= ~(0x00010000 << chid); 1991 } 1992 1993 while (intr0 & 0x0000001f) { 1994 u32 chid = __ffs(intr0 & 0x0000001f); 1995 nv50_disp_chan_uevent_send(disp, chid); 1996 intr0 &= ~(0x00000001 << chid); 1997 } 1998 1999 if (intr1 & 0x00000004) { 2000 nvkm_disp_vblank(&disp->base, 0); 2001 nvkm_wr32(device, 0x610024, 0x00000004); 2002 } 2003 2004 if (intr1 & 0x00000008) { 2005 nvkm_disp_vblank(&disp->base, 1); 2006 nvkm_wr32(device, 0x610024, 0x00000008); 2007 } 2008 2009 if (intr1 & 0x00000070) { 2010 disp->super = (intr1 & 0x00000070); 2011 schedule_work(&disp->supervisor); 2012 nvkm_wr32(device, 0x610024, disp->super); 2013 } 2014 } 2015 2016 static int 2017 nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 2018 struct nvkm_oclass *oclass, void *data, u32 size, 2019 struct nvkm_object **pobject) 2020 { 2021 struct nv50_disp *disp; 2022 int ret; 2023 2024 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP", 2025 "display", &disp); 2026 *pobject = nv_object(disp); 2027 if (ret) 2028 return ret; 2029 2030 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &disp->uevent); 2031 if (ret) 2032 return ret; 2033 2034 nv_engine(disp)->sclass = nv50_disp_main_oclass; 2035 nv_engine(disp)->cclass = &nv50_disp_cclass; 2036 nv_subdev(disp)->intr = nv50_disp_intr; 2037 INIT_WORK(&disp->supervisor, nv50_disp_intr_supervisor); 2038 disp->sclass = nv50_disp_sclass; 2039 disp->head.nr = 2; 2040 disp->dac.nr = 3; 2041 disp->sor.nr = 2; 2042 disp->pior.nr = 3; 2043 disp->dac.power = nv50_dac_power; 2044 disp->dac.sense = nv50_dac_sense; 2045 disp->sor.power = nv50_sor_power; 2046 disp->pior.power = nv50_pior_power; 2047 return 0; 2048 } 2049 2050 struct nvkm_oclass * 2051 nv50_disp_outp_sclass[] = { 2052 &nv50_pior_dp_impl.base.base, 2053 NULL 2054 }; 2055 2056 struct nvkm_oclass * 2057 nv50_disp_oclass = &(struct nv50_disp_impl) { 2058 .base.base.handle = NV_ENGINE(DISP, 0x50), 2059 .base.base.ofuncs = &(struct nvkm_ofuncs) { 2060 .ctor = nv50_disp_ctor, 2061 .dtor = _nvkm_disp_dtor, 2062 .init = _nvkm_disp_init, 2063 .fini = _nvkm_disp_fini, 2064 }, 2065 .base.vblank = &nv50_disp_vblank_func, 2066 .base.outp = nv50_disp_outp_sclass, 2067 .mthd.core = &nv50_disp_core_mthd_chan, 2068 .mthd.base = &nv50_disp_base_mthd_chan, 2069 .mthd.ovly = &nv50_disp_ovly_mthd_chan, 2070 .mthd.prev = 0x000004, 2071 .head.scanoutpos = nv50_disp_main_scanoutpos, 2072 }.base.base; 2073