1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv50.h" 25 #include "outp.h" 26 #include "outpdp.h" 27 28 #include <core/client.h> 29 #include <core/gpuobj.h> 30 #include <core/enum.h> 31 #include <core/handle.h> 32 #include <core/ramht.h> 33 #include <engine/dmaobj.h> 34 #include <subdev/bios.h> 35 #include <subdev/bios/dcb.h> 36 #include <subdev/bios/disp.h> 37 #include <subdev/bios/init.h> 38 #include <subdev/bios/pll.h> 39 #include <subdev/devinit.h> 40 #include <subdev/fb.h> 41 #include <subdev/timer.h> 42 43 #include <nvif/class.h> 44 #include <nvif/event.h> 45 #include <nvif/unpack.h> 46 47 /******************************************************************************* 48 * EVO channel base class 49 ******************************************************************************/ 50 51 static int 52 nv50_disp_chan_create_(struct nvkm_object *parent, 53 struct nvkm_object *engine, 54 struct nvkm_oclass *oclass, int head, 55 int length, void **pobject) 56 { 57 const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs; 58 struct nv50_disp_base *base = (void *)parent; 59 struct nv50_disp_chan *chan; 60 int chid = impl->chid + head; 61 int ret; 62 63 if (base->chan & (1 << chid)) 64 return -EBUSY; 65 base->chan |= (1 << chid); 66 67 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL, 68 (1ULL << NVDEV_ENGINE_DMAOBJ), 69 length, pobject); 70 chan = *pobject; 71 if (ret) 72 return ret; 73 chan->chid = chid; 74 75 nv_parent(chan)->object_attach = impl->attach; 76 nv_parent(chan)->object_detach = impl->detach; 77 return 0; 78 } 79 80 static void 81 nv50_disp_chan_destroy(struct nv50_disp_chan *chan) 82 { 83 struct nv50_disp_base *base = (void *)nv_object(chan)->parent; 84 base->chan &= ~(1 << chan->chid); 85 nvkm_namedb_destroy(&chan->base); 86 } 87 88 static void 89 nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index) 90 { 91 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 92 struct nvkm_device *device = disp->base.engine.subdev.device; 93 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index); 94 nvkm_wr32(device, 0x610020, 0x00000001 << index); 95 } 96 97 static void 98 nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) 99 { 100 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 101 struct nvkm_device *device = disp->base.engine.subdev.device; 102 nvkm_wr32(device, 0x610020, 0x00000001 << index); 103 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index); 104 } 105 106 void 107 nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid) 108 { 109 struct nvif_notify_uevent_rep { 110 } rep; 111 112 nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep)); 113 } 114 115 int 116 nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 117 struct nvkm_notify *notify) 118 { 119 struct nv50_disp_dmac *dmac = (void *)object; 120 union { 121 struct nvif_notify_uevent_req none; 122 } *args = data; 123 int ret; 124 125 if (nvif_unvers(args->none)) { 126 notify->size = sizeof(struct nvif_notify_uevent_rep); 127 notify->types = 1; 128 notify->index = dmac->base.chid; 129 return 0; 130 } 131 132 return ret; 133 } 134 135 const struct nvkm_event_func 136 nv50_disp_chan_uevent = { 137 .ctor = nv50_disp_chan_uevent_ctor, 138 .init = nv50_disp_chan_uevent_init, 139 .fini = nv50_disp_chan_uevent_fini, 140 }; 141 142 int 143 nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type, 144 struct nvkm_event **pevent) 145 { 146 struct nv50_disp *disp = (void *)object->engine; 147 switch (type) { 148 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT: 149 *pevent = &disp->uevent; 150 return 0; 151 default: 152 break; 153 } 154 return -EINVAL; 155 } 156 157 int 158 nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) 159 { 160 struct nv50_disp_chan *chan = (void *)object; 161 *addr = nv_device_resource_start(nv_device(object), 0) + 162 0x640000 + (chan->chid * 0x1000); 163 *size = 0x001000; 164 return 0; 165 } 166 167 u32 168 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr) 169 { 170 struct nv50_disp_chan *chan = (void *)object; 171 struct nvkm_device *device = object->engine->subdev.device; 172 return nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); 173 } 174 175 void 176 nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) 177 { 178 struct nv50_disp_chan *chan = (void *)object; 179 struct nvkm_device *device = object->engine->subdev.device; 180 nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); 181 } 182 183 /******************************************************************************* 184 * EVO DMA channel base class 185 ******************************************************************************/ 186 187 static int 188 nv50_disp_dmac_object_attach(struct nvkm_object *parent, 189 struct nvkm_object *object, u32 name) 190 { 191 struct nv50_disp_base *base = (void *)parent->parent; 192 struct nv50_disp_chan *chan = (void *)parent; 193 u32 addr = nv_gpuobj(object)->node->offset; 194 u32 chid = chan->chid; 195 u32 data = (chid << 28) | (addr << 10) | chid; 196 return nvkm_ramht_insert(base->ramht, chid, name, data); 197 } 198 199 static void 200 nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie) 201 { 202 struct nv50_disp_base *base = (void *)parent->parent; 203 nvkm_ramht_remove(base->ramht, cookie); 204 } 205 206 static int 207 nv50_disp_dmac_create_(struct nvkm_object *parent, 208 struct nvkm_object *engine, 209 struct nvkm_oclass *oclass, u64 pushbuf, int head, 210 int length, void **pobject) 211 { 212 struct nvkm_client *client = nvkm_client(parent); 213 struct nvkm_handle *handle; 214 struct nvkm_dmaobj *dmaobj; 215 struct nv50_disp_dmac *dmac; 216 int ret; 217 218 ret = nv50_disp_chan_create_(parent, engine, oclass, head, 219 length, pobject); 220 dmac = *pobject; 221 if (ret) 222 return ret; 223 224 handle = nvkm_client_search(client, pushbuf); 225 if (!handle) 226 return -ENOENT; 227 dmaobj = (void *)handle->object; 228 229 switch (nv_mclass(dmaobj)) { 230 case 0x0002: 231 case 0x003d: 232 if (dmaobj->limit - dmaobj->start != 0xfff) 233 return -EINVAL; 234 235 switch (dmaobj->target) { 236 case NV_MEM_TARGET_VRAM: 237 dmac->push = 0x00000001 | dmaobj->start >> 8; 238 break; 239 case NV_MEM_TARGET_PCI_NOSNOOP: 240 dmac->push = 0x00000003 | dmaobj->start >> 8; 241 break; 242 default: 243 return -EINVAL; 244 } 245 break; 246 default: 247 return -EINVAL; 248 } 249 250 return 0; 251 } 252 253 void 254 nv50_disp_dmac_dtor(struct nvkm_object *object) 255 { 256 struct nv50_disp_dmac *dmac = (void *)object; 257 nv50_disp_chan_destroy(&dmac->base); 258 } 259 260 static int 261 nv50_disp_dmac_init(struct nvkm_object *object) 262 { 263 struct nv50_disp *disp = (void *)object->engine; 264 struct nv50_disp_dmac *dmac = (void *)object; 265 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 266 struct nvkm_device *device = subdev->device; 267 int chid = dmac->base.chid; 268 int ret; 269 270 ret = nv50_disp_chan_init(&dmac->base); 271 if (ret) 272 return ret; 273 274 /* enable error reporting */ 275 nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); 276 277 /* initialise channel for dma command submission */ 278 nvkm_wr32(device, 0x610204 + (chid * 0x0010), dmac->push); 279 nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); 280 nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); 281 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); 282 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); 283 nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); 284 285 /* wait for it to go inactive */ 286 if (nvkm_msec(device, 2000, 287 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) 288 break; 289 ) < 0) { 290 nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, 291 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 292 return -EBUSY; 293 } 294 295 return 0; 296 } 297 298 static int 299 nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend) 300 { 301 struct nv50_disp *disp = (void *)object->engine; 302 struct nv50_disp_dmac *dmac = (void *)object; 303 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 304 struct nvkm_device *device = subdev->device; 305 int chid = dmac->base.chid; 306 307 /* deactivate channel */ 308 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); 309 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); 310 if (nvkm_msec(device, 2000, 311 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) 312 break; 313 ) < 0) { 314 nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, 315 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 316 if (suspend) 317 return -EBUSY; 318 } 319 320 /* disable error reporting and completion notifications */ 321 nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); 322 323 return nv50_disp_chan_fini(&dmac->base, suspend); 324 } 325 326 /******************************************************************************* 327 * EVO master channel object 328 ******************************************************************************/ 329 330 static void 331 nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c, 332 const struct nv50_disp_mthd_list *list, int inst) 333 { 334 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 335 struct nvkm_device *device = subdev->device; 336 int i; 337 338 for (i = 0; list->data[i].mthd; i++) { 339 if (list->data[i].addr) { 340 u32 next = nvkm_rd32(device, list->data[i].addr + base + 0); 341 u32 prev = nvkm_rd32(device, list->data[i].addr + base + c); 342 u32 mthd = list->data[i].mthd + (list->mthd * inst); 343 const char *name = list->data[i].name; 344 char mods[16]; 345 346 if (prev != next) 347 snprintf(mods, sizeof(mods), "-> %08x", next); 348 else 349 snprintf(mods, sizeof(mods), "%13c", ' '); 350 351 nvkm_printk_(subdev, debug, info, 352 "\t%04x: %08x %s%s%s\n", 353 mthd, prev, mods, name ? " // " : "", 354 name ? name : ""); 355 } 356 } 357 } 358 359 void 360 nv50_disp_mthd_chan(struct nv50_disp *disp, int debug, int head, 361 const struct nv50_disp_mthd_chan *chan) 362 { 363 struct nvkm_object *object = nv_object(disp); 364 const struct nv50_disp_impl *impl = (void *)object->oclass; 365 const struct nv50_disp_mthd_list *list; 366 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 367 int i, j; 368 369 if (debug > nv_subdev(disp)->debug) 370 return; 371 372 for (i = 0; (list = chan->data[i].mthd) != NULL; i++) { 373 u32 base = head * chan->addr; 374 for (j = 0; j < chan->data[i].nr; j++, base += list->addr) { 375 const char *cname = chan->name; 376 const char *sname = ""; 377 char cname_[16], sname_[16]; 378 379 if (chan->addr) { 380 snprintf(cname_, sizeof(cname_), "%s %d", 381 chan->name, head); 382 cname = cname_; 383 } 384 385 if (chan->data[i].nr > 1) { 386 snprintf(sname_, sizeof(sname_), " - %s %d", 387 chan->data[i].name, j); 388 sname = sname_; 389 } 390 391 nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname); 392 nv50_disp_mthd_list(disp, debug, base, impl->mthd.prev, 393 list, j); 394 } 395 } 396 } 397 398 const struct nv50_disp_mthd_list 399 nv50_disp_core_mthd_base = { 400 .mthd = 0x0000, 401 .addr = 0x000000, 402 .data = { 403 { 0x0080, 0x000000 }, 404 { 0x0084, 0x610bb8 }, 405 { 0x0088, 0x610b9c }, 406 { 0x008c, 0x000000 }, 407 {} 408 } 409 }; 410 411 static const struct nv50_disp_mthd_list 412 nv50_disp_core_mthd_dac = { 413 .mthd = 0x0080, 414 .addr = 0x000008, 415 .data = { 416 { 0x0400, 0x610b58 }, 417 { 0x0404, 0x610bdc }, 418 { 0x0420, 0x610828 }, 419 {} 420 } 421 }; 422 423 const struct nv50_disp_mthd_list 424 nv50_disp_core_mthd_sor = { 425 .mthd = 0x0040, 426 .addr = 0x000008, 427 .data = { 428 { 0x0600, 0x610b70 }, 429 {} 430 } 431 }; 432 433 const struct nv50_disp_mthd_list 434 nv50_disp_core_mthd_pior = { 435 .mthd = 0x0040, 436 .addr = 0x000008, 437 .data = { 438 { 0x0700, 0x610b80 }, 439 {} 440 } 441 }; 442 443 static const struct nv50_disp_mthd_list 444 nv50_disp_core_mthd_head = { 445 .mthd = 0x0400, 446 .addr = 0x000540, 447 .data = { 448 { 0x0800, 0x610ad8 }, 449 { 0x0804, 0x610ad0 }, 450 { 0x0808, 0x610a48 }, 451 { 0x080c, 0x610a78 }, 452 { 0x0810, 0x610ac0 }, 453 { 0x0814, 0x610af8 }, 454 { 0x0818, 0x610b00 }, 455 { 0x081c, 0x610ae8 }, 456 { 0x0820, 0x610af0 }, 457 { 0x0824, 0x610b08 }, 458 { 0x0828, 0x610b10 }, 459 { 0x082c, 0x610a68 }, 460 { 0x0830, 0x610a60 }, 461 { 0x0834, 0x000000 }, 462 { 0x0838, 0x610a40 }, 463 { 0x0840, 0x610a24 }, 464 { 0x0844, 0x610a2c }, 465 { 0x0848, 0x610aa8 }, 466 { 0x084c, 0x610ab0 }, 467 { 0x0860, 0x610a84 }, 468 { 0x0864, 0x610a90 }, 469 { 0x0868, 0x610b18 }, 470 { 0x086c, 0x610b20 }, 471 { 0x0870, 0x610ac8 }, 472 { 0x0874, 0x610a38 }, 473 { 0x0880, 0x610a58 }, 474 { 0x0884, 0x610a9c }, 475 { 0x08a0, 0x610a70 }, 476 { 0x08a4, 0x610a50 }, 477 { 0x08a8, 0x610ae0 }, 478 { 0x08c0, 0x610b28 }, 479 { 0x08c4, 0x610b30 }, 480 { 0x08c8, 0x610b40 }, 481 { 0x08d4, 0x610b38 }, 482 { 0x08d8, 0x610b48 }, 483 { 0x08dc, 0x610b50 }, 484 { 0x0900, 0x610a18 }, 485 { 0x0904, 0x610ab8 }, 486 {} 487 } 488 }; 489 490 static const struct nv50_disp_mthd_chan 491 nv50_disp_core_mthd_chan = { 492 .name = "Core", 493 .addr = 0x000000, 494 .data = { 495 { "Global", 1, &nv50_disp_core_mthd_base }, 496 { "DAC", 3, &nv50_disp_core_mthd_dac }, 497 { "SOR", 2, &nv50_disp_core_mthd_sor }, 498 { "PIOR", 3, &nv50_disp_core_mthd_pior }, 499 { "HEAD", 2, &nv50_disp_core_mthd_head }, 500 {} 501 } 502 }; 503 504 int 505 nv50_disp_core_ctor(struct nvkm_object *parent, 506 struct nvkm_object *engine, 507 struct nvkm_oclass *oclass, void *data, u32 size, 508 struct nvkm_object **pobject) 509 { 510 union { 511 struct nv50_disp_core_channel_dma_v0 v0; 512 } *args = data; 513 struct nv50_disp_dmac *mast; 514 int ret; 515 516 nvif_ioctl(parent, "create disp core channel dma size %d\n", size); 517 if (nvif_unpack(args->v0, 0, 0, false)) { 518 nvif_ioctl(parent, "create disp core channel dma vers %d " 519 "pushbuf %016llx\n", 520 args->v0.version, args->v0.pushbuf); 521 } else 522 return ret; 523 524 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 525 0, sizeof(*mast), (void **)&mast); 526 *pobject = nv_object(mast); 527 if (ret) 528 return ret; 529 530 return 0; 531 } 532 533 static int 534 nv50_disp_core_init(struct nvkm_object *object) 535 { 536 struct nv50_disp *disp = (void *)object->engine; 537 struct nv50_disp_dmac *mast = (void *)object; 538 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 539 struct nvkm_device *device = subdev->device; 540 int ret; 541 542 ret = nv50_disp_chan_init(&mast->base); 543 if (ret) 544 return ret; 545 546 /* enable error reporting */ 547 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000); 548 549 /* attempt to unstick channel from some unknown state */ 550 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000) 551 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000); 552 if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000) 553 nvkm_mask(device, 0x610200, 0x00600000, 0x00600000); 554 555 /* initialise channel for dma command submission */ 556 nvkm_wr32(device, 0x610204, mast->push); 557 nvkm_wr32(device, 0x610208, 0x00010000); 558 nvkm_wr32(device, 0x61020c, 0x00000000); 559 nvkm_mask(device, 0x610200, 0x00000010, 0x00000010); 560 nvkm_wr32(device, 0x640000, 0x00000000); 561 nvkm_wr32(device, 0x610200, 0x01000013); 562 563 /* wait for it to go inactive */ 564 if (nvkm_msec(device, 2000, 565 if (!(nvkm_rd32(device, 0x610200) & 0x80000000)) 566 break; 567 ) < 0) { 568 nvkm_error(subdev, "core init: %08x\n", 569 nvkm_rd32(device, 0x610200)); 570 return -EBUSY; 571 } 572 573 return 0; 574 } 575 576 static int 577 nv50_disp_core_fini(struct nvkm_object *object, bool suspend) 578 { 579 struct nv50_disp *disp = (void *)object->engine; 580 struct nv50_disp_dmac *mast = (void *)object; 581 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 582 struct nvkm_device *device = subdev->device; 583 584 /* deactivate channel */ 585 nvkm_mask(device, 0x610200, 0x00000010, 0x00000000); 586 nvkm_mask(device, 0x610200, 0x00000003, 0x00000000); 587 if (nvkm_msec(device, 2000, 588 if (!(nvkm_rd32(device, 0x610200) & 0x001e0000)) 589 break; 590 ) < 0) { 591 nvkm_error(subdev, "core fini: %08x\n", 592 nvkm_rd32(device, 0x610200)); 593 if (suspend) 594 return -EBUSY; 595 } 596 597 /* disable error reporting and completion notifications */ 598 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000); 599 600 return nv50_disp_chan_fini(&mast->base, suspend); 601 } 602 603 struct nv50_disp_chan_impl 604 nv50_disp_core_ofuncs = { 605 .base.ctor = nv50_disp_core_ctor, 606 .base.dtor = nv50_disp_dmac_dtor, 607 .base.init = nv50_disp_core_init, 608 .base.fini = nv50_disp_core_fini, 609 .base.map = nv50_disp_chan_map, 610 .base.ntfy = nv50_disp_chan_ntfy, 611 .base.rd32 = nv50_disp_chan_rd32, 612 .base.wr32 = nv50_disp_chan_wr32, 613 .chid = 0, 614 .attach = nv50_disp_dmac_object_attach, 615 .detach = nv50_disp_dmac_object_detach, 616 }; 617 618 /******************************************************************************* 619 * EVO sync channel objects 620 ******************************************************************************/ 621 622 static const struct nv50_disp_mthd_list 623 nv50_disp_base_mthd_base = { 624 .mthd = 0x0000, 625 .addr = 0x000000, 626 .data = { 627 { 0x0080, 0x000000 }, 628 { 0x0084, 0x0008c4 }, 629 { 0x0088, 0x0008d0 }, 630 { 0x008c, 0x0008dc }, 631 { 0x0090, 0x0008e4 }, 632 { 0x0094, 0x610884 }, 633 { 0x00a0, 0x6108a0 }, 634 { 0x00a4, 0x610878 }, 635 { 0x00c0, 0x61086c }, 636 { 0x00e0, 0x610858 }, 637 { 0x00e4, 0x610860 }, 638 { 0x00e8, 0x6108ac }, 639 { 0x00ec, 0x6108b4 }, 640 { 0x0100, 0x610894 }, 641 { 0x0110, 0x6108bc }, 642 { 0x0114, 0x61088c }, 643 {} 644 } 645 }; 646 647 const struct nv50_disp_mthd_list 648 nv50_disp_base_mthd_image = { 649 .mthd = 0x0400, 650 .addr = 0x000000, 651 .data = { 652 { 0x0800, 0x6108f0 }, 653 { 0x0804, 0x6108fc }, 654 { 0x0808, 0x61090c }, 655 { 0x080c, 0x610914 }, 656 { 0x0810, 0x610904 }, 657 {} 658 } 659 }; 660 661 static const struct nv50_disp_mthd_chan 662 nv50_disp_base_mthd_chan = { 663 .name = "Base", 664 .addr = 0x000540, 665 .data = { 666 { "Global", 1, &nv50_disp_base_mthd_base }, 667 { "Image", 2, &nv50_disp_base_mthd_image }, 668 {} 669 } 670 }; 671 672 int 673 nv50_disp_base_ctor(struct nvkm_object *parent, 674 struct nvkm_object *engine, 675 struct nvkm_oclass *oclass, void *data, u32 size, 676 struct nvkm_object **pobject) 677 { 678 union { 679 struct nv50_disp_base_channel_dma_v0 v0; 680 } *args = data; 681 struct nv50_disp *disp = (void *)engine; 682 struct nv50_disp_dmac *dmac; 683 int ret; 684 685 nvif_ioctl(parent, "create disp base channel dma size %d\n", size); 686 if (nvif_unpack(args->v0, 0, 0, false)) { 687 nvif_ioctl(parent, "create disp base channel dma vers %d " 688 "pushbuf %016llx head %d\n", 689 args->v0.version, args->v0.pushbuf, args->v0.head); 690 if (args->v0.head > disp->head.nr) 691 return -EINVAL; 692 } else 693 return ret; 694 695 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 696 args->v0.head, sizeof(*dmac), 697 (void **)&dmac); 698 *pobject = nv_object(dmac); 699 if (ret) 700 return ret; 701 702 return 0; 703 } 704 705 struct nv50_disp_chan_impl 706 nv50_disp_base_ofuncs = { 707 .base.ctor = nv50_disp_base_ctor, 708 .base.dtor = nv50_disp_dmac_dtor, 709 .base.init = nv50_disp_dmac_init, 710 .base.fini = nv50_disp_dmac_fini, 711 .base.ntfy = nv50_disp_chan_ntfy, 712 .base.map = nv50_disp_chan_map, 713 .base.rd32 = nv50_disp_chan_rd32, 714 .base.wr32 = nv50_disp_chan_wr32, 715 .chid = 1, 716 .attach = nv50_disp_dmac_object_attach, 717 .detach = nv50_disp_dmac_object_detach, 718 }; 719 720 /******************************************************************************* 721 * EVO overlay channel objects 722 ******************************************************************************/ 723 724 const struct nv50_disp_mthd_list 725 nv50_disp_ovly_mthd_base = { 726 .mthd = 0x0000, 727 .addr = 0x000000, 728 .data = { 729 { 0x0080, 0x000000 }, 730 { 0x0084, 0x0009a0 }, 731 { 0x0088, 0x0009c0 }, 732 { 0x008c, 0x0009c8 }, 733 { 0x0090, 0x6109b4 }, 734 { 0x0094, 0x610970 }, 735 { 0x00a0, 0x610998 }, 736 { 0x00a4, 0x610964 }, 737 { 0x00c0, 0x610958 }, 738 { 0x00e0, 0x6109a8 }, 739 { 0x00e4, 0x6109d0 }, 740 { 0x00e8, 0x6109d8 }, 741 { 0x0100, 0x61094c }, 742 { 0x0104, 0x610984 }, 743 { 0x0108, 0x61098c }, 744 { 0x0800, 0x6109f8 }, 745 { 0x0808, 0x610a08 }, 746 { 0x080c, 0x610a10 }, 747 { 0x0810, 0x610a00 }, 748 {} 749 } 750 }; 751 752 static const struct nv50_disp_mthd_chan 753 nv50_disp_ovly_mthd_chan = { 754 .name = "Overlay", 755 .addr = 0x000540, 756 .data = { 757 { "Global", 1, &nv50_disp_ovly_mthd_base }, 758 {} 759 } 760 }; 761 762 int 763 nv50_disp_ovly_ctor(struct nvkm_object *parent, 764 struct nvkm_object *engine, 765 struct nvkm_oclass *oclass, void *data, u32 size, 766 struct nvkm_object **pobject) 767 { 768 union { 769 struct nv50_disp_overlay_channel_dma_v0 v0; 770 } *args = data; 771 struct nv50_disp *disp = (void *)engine; 772 struct nv50_disp_dmac *dmac; 773 int ret; 774 775 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size); 776 if (nvif_unpack(args->v0, 0, 0, false)) { 777 nvif_ioctl(parent, "create disp overlay channel dma vers %d " 778 "pushbuf %016llx head %d\n", 779 args->v0.version, args->v0.pushbuf, args->v0.head); 780 if (args->v0.head > disp->head.nr) 781 return -EINVAL; 782 } else 783 return ret; 784 785 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 786 args->v0.head, sizeof(*dmac), 787 (void **)&dmac); 788 *pobject = nv_object(dmac); 789 if (ret) 790 return ret; 791 792 return 0; 793 } 794 795 struct nv50_disp_chan_impl 796 nv50_disp_ovly_ofuncs = { 797 .base.ctor = nv50_disp_ovly_ctor, 798 .base.dtor = nv50_disp_dmac_dtor, 799 .base.init = nv50_disp_dmac_init, 800 .base.fini = nv50_disp_dmac_fini, 801 .base.ntfy = nv50_disp_chan_ntfy, 802 .base.map = nv50_disp_chan_map, 803 .base.rd32 = nv50_disp_chan_rd32, 804 .base.wr32 = nv50_disp_chan_wr32, 805 .chid = 3, 806 .attach = nv50_disp_dmac_object_attach, 807 .detach = nv50_disp_dmac_object_detach, 808 }; 809 810 /******************************************************************************* 811 * EVO PIO channel base class 812 ******************************************************************************/ 813 814 static int 815 nv50_disp_pioc_create_(struct nvkm_object *parent, 816 struct nvkm_object *engine, 817 struct nvkm_oclass *oclass, int head, 818 int length, void **pobject) 819 { 820 return nv50_disp_chan_create_(parent, engine, oclass, head, 821 length, pobject); 822 } 823 824 void 825 nv50_disp_pioc_dtor(struct nvkm_object *object) 826 { 827 struct nv50_disp_pioc *pioc = (void *)object; 828 nv50_disp_chan_destroy(&pioc->base); 829 } 830 831 static int 832 nv50_disp_pioc_init(struct nvkm_object *object) 833 { 834 struct nv50_disp *disp = (void *)object->engine; 835 struct nv50_disp_pioc *pioc = (void *)object; 836 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 837 struct nvkm_device *device = subdev->device; 838 int chid = pioc->base.chid; 839 int ret; 840 841 ret = nv50_disp_chan_init(&pioc->base); 842 if (ret) 843 return ret; 844 845 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); 846 if (nvkm_msec(device, 2000, 847 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 848 break; 849 ) < 0) { 850 nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, 851 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 852 return -EBUSY; 853 } 854 855 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); 856 if (nvkm_msec(device, 2000, 857 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); 858 if ((tmp & 0x00030000) == 0x00010000) 859 break; 860 ) < 0) { 861 nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, 862 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 863 return -EBUSY; 864 } 865 866 return 0; 867 } 868 869 static int 870 nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend) 871 { 872 struct nv50_disp *disp = (void *)object->engine; 873 struct nv50_disp_pioc *pioc = (void *)object; 874 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 875 struct nvkm_device *device = subdev->device; 876 int chid = pioc->base.chid; 877 878 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); 879 if (nvkm_msec(device, 2000, 880 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 881 break; 882 ) < 0) { 883 nvkm_error(subdev, "ch %d timeout: %08x\n", chid, 884 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 885 if (suspend) 886 return -EBUSY; 887 } 888 889 return nv50_disp_chan_fini(&pioc->base, suspend); 890 } 891 892 /******************************************************************************* 893 * EVO immediate overlay channel objects 894 ******************************************************************************/ 895 896 int 897 nv50_disp_oimm_ctor(struct nvkm_object *parent, 898 struct nvkm_object *engine, 899 struct nvkm_oclass *oclass, void *data, u32 size, 900 struct nvkm_object **pobject) 901 { 902 union { 903 struct nv50_disp_overlay_v0 v0; 904 } *args = data; 905 struct nv50_disp *disp = (void *)engine; 906 struct nv50_disp_pioc *pioc; 907 int ret; 908 909 nvif_ioctl(parent, "create disp overlay size %d\n", size); 910 if (nvif_unpack(args->v0, 0, 0, false)) { 911 nvif_ioctl(parent, "create disp overlay vers %d head %d\n", 912 args->v0.version, args->v0.head); 913 if (args->v0.head > disp->head.nr) 914 return -EINVAL; 915 } else 916 return ret; 917 918 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 919 sizeof(*pioc), (void **)&pioc); 920 *pobject = nv_object(pioc); 921 if (ret) 922 return ret; 923 924 return 0; 925 } 926 927 struct nv50_disp_chan_impl 928 nv50_disp_oimm_ofuncs = { 929 .base.ctor = nv50_disp_oimm_ctor, 930 .base.dtor = nv50_disp_pioc_dtor, 931 .base.init = nv50_disp_pioc_init, 932 .base.fini = nv50_disp_pioc_fini, 933 .base.ntfy = nv50_disp_chan_ntfy, 934 .base.map = nv50_disp_chan_map, 935 .base.rd32 = nv50_disp_chan_rd32, 936 .base.wr32 = nv50_disp_chan_wr32, 937 .chid = 5, 938 }; 939 940 /******************************************************************************* 941 * EVO cursor channel objects 942 ******************************************************************************/ 943 944 int 945 nv50_disp_curs_ctor(struct nvkm_object *parent, 946 struct nvkm_object *engine, 947 struct nvkm_oclass *oclass, void *data, u32 size, 948 struct nvkm_object **pobject) 949 { 950 union { 951 struct nv50_disp_cursor_v0 v0; 952 } *args = data; 953 struct nv50_disp *disp = (void *)engine; 954 struct nv50_disp_pioc *pioc; 955 int ret; 956 957 nvif_ioctl(parent, "create disp cursor size %d\n", size); 958 if (nvif_unpack(args->v0, 0, 0, false)) { 959 nvif_ioctl(parent, "create disp cursor vers %d head %d\n", 960 args->v0.version, args->v0.head); 961 if (args->v0.head > disp->head.nr) 962 return -EINVAL; 963 } else 964 return ret; 965 966 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 967 sizeof(*pioc), (void **)&pioc); 968 *pobject = nv_object(pioc); 969 if (ret) 970 return ret; 971 972 return 0; 973 } 974 975 struct nv50_disp_chan_impl 976 nv50_disp_curs_ofuncs = { 977 .base.ctor = nv50_disp_curs_ctor, 978 .base.dtor = nv50_disp_pioc_dtor, 979 .base.init = nv50_disp_pioc_init, 980 .base.fini = nv50_disp_pioc_fini, 981 .base.ntfy = nv50_disp_chan_ntfy, 982 .base.map = nv50_disp_chan_map, 983 .base.rd32 = nv50_disp_chan_rd32, 984 .base.wr32 = nv50_disp_chan_wr32, 985 .chid = 7, 986 }; 987 988 /******************************************************************************* 989 * Base display object 990 ******************************************************************************/ 991 992 int 993 nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0) 994 { 995 struct nvkm_device *device = disp->base.engine.subdev.device; 996 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540)); 997 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540)); 998 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540)); 999 union { 1000 struct nv04_disp_scanoutpos_v0 v0; 1001 } *args = data; 1002 int ret; 1003 1004 nvif_ioctl(object, "disp scanoutpos size %d\n", size); 1005 if (nvif_unpack(args->v0, 0, 0, false)) { 1006 nvif_ioctl(object, "disp scanoutpos vers %d\n", 1007 args->v0.version); 1008 args->v0.vblanke = (blanke & 0xffff0000) >> 16; 1009 args->v0.hblanke = (blanke & 0x0000ffff); 1010 args->v0.vblanks = (blanks & 0xffff0000) >> 16; 1011 args->v0.hblanks = (blanks & 0x0000ffff); 1012 args->v0.vtotal = ( total & 0xffff0000) >> 16; 1013 args->v0.htotal = ( total & 0x0000ffff); 1014 args->v0.time[0] = ktime_to_ns(ktime_get()); 1015 args->v0.vline = /* vline read locks hline */ 1016 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; 1017 args->v0.time[1] = ktime_to_ns(ktime_get()); 1018 args->v0.hline = 1019 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; 1020 } else 1021 return ret; 1022 1023 return 0; 1024 } 1025 1026 int 1027 nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 1028 { 1029 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine); 1030 union { 1031 struct nv50_disp_mthd_v0 v0; 1032 struct nv50_disp_mthd_v1 v1; 1033 } *args = data; 1034 struct nv50_disp *disp = (void *)object->engine; 1035 struct nvkm_output *outp = NULL; 1036 struct nvkm_output *temp; 1037 u16 type, mask = 0; 1038 int head, ret; 1039 1040 if (mthd != NV50_DISP_MTHD) 1041 return -EINVAL; 1042 1043 nvif_ioctl(object, "disp mthd size %d\n", size); 1044 if (nvif_unpack(args->v0, 0, 0, true)) { 1045 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", 1046 args->v0.version, args->v0.method, args->v0.head); 1047 mthd = args->v0.method; 1048 head = args->v0.head; 1049 } else 1050 if (nvif_unpack(args->v1, 1, 1, true)) { 1051 nvif_ioctl(object, "disp mthd vers %d mthd %02x " 1052 "type %04x mask %04x\n", 1053 args->v1.version, args->v1.method, 1054 args->v1.hasht, args->v1.hashm); 1055 mthd = args->v1.method; 1056 type = args->v1.hasht; 1057 mask = args->v1.hashm; 1058 head = ffs((mask >> 8) & 0x0f) - 1; 1059 } else 1060 return ret; 1061 1062 if (head < 0 || head >= disp->head.nr) 1063 return -ENXIO; 1064 1065 if (mask) { 1066 list_for_each_entry(temp, &disp->base.outp, head) { 1067 if ((temp->info.hasht == type) && 1068 (temp->info.hashm & mask) == mask) { 1069 outp = temp; 1070 break; 1071 } 1072 } 1073 if (outp == NULL) 1074 return -ENXIO; 1075 } 1076 1077 switch (mthd) { 1078 case NV50_DISP_SCANOUTPOS: 1079 return impl->head.scanoutpos(object, disp, data, size, head); 1080 default: 1081 break; 1082 } 1083 1084 switch (mthd * !!outp) { 1085 case NV50_DISP_MTHD_V1_DAC_PWR: 1086 return disp->dac.power(object, disp, data, size, head, outp); 1087 case NV50_DISP_MTHD_V1_DAC_LOAD: 1088 return disp->dac.sense(object, disp, data, size, head, outp); 1089 case NV50_DISP_MTHD_V1_SOR_PWR: 1090 return disp->sor.power(object, disp, data, size, head, outp); 1091 case NV50_DISP_MTHD_V1_SOR_HDA_ELD: 1092 if (!disp->sor.hda_eld) 1093 return -ENODEV; 1094 return disp->sor.hda_eld(object, disp, data, size, head, outp); 1095 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR: 1096 if (!disp->sor.hdmi) 1097 return -ENODEV; 1098 return disp->sor.hdmi(object, disp, data, size, head, outp); 1099 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: { 1100 union { 1101 struct nv50_disp_sor_lvds_script_v0 v0; 1102 } *args = data; 1103 nvif_ioctl(object, "disp sor lvds script size %d\n", size); 1104 if (nvif_unpack(args->v0, 0, 0, false)) { 1105 nvif_ioctl(object, "disp sor lvds script " 1106 "vers %d name %04x\n", 1107 args->v0.version, args->v0.script); 1108 disp->sor.lvdsconf = args->v0.script; 1109 return 0; 1110 } else 1111 return ret; 1112 } 1113 break; 1114 case NV50_DISP_MTHD_V1_SOR_DP_PWR: { 1115 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp); 1116 union { 1117 struct nv50_disp_sor_dp_pwr_v0 v0; 1118 } *args = data; 1119 nvif_ioctl(object, "disp sor dp pwr size %d\n", size); 1120 if (nvif_unpack(args->v0, 0, 0, false)) { 1121 nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n", 1122 args->v0.version, args->v0.state); 1123 if (args->v0.state == 0) { 1124 nvkm_notify_put(&outpdp->irq); 1125 outpdp->func->lnk_pwr(outpdp, 0); 1126 atomic_set(&outpdp->lt.done, 0); 1127 return 0; 1128 } else 1129 if (args->v0.state != 0) { 1130 nvkm_output_dp_train(&outpdp->base, 0, true); 1131 return 0; 1132 } 1133 } else 1134 return ret; 1135 } 1136 break; 1137 case NV50_DISP_MTHD_V1_PIOR_PWR: 1138 if (!disp->pior.power) 1139 return -ENODEV; 1140 return disp->pior.power(object, disp, data, size, head, outp); 1141 default: 1142 break; 1143 } 1144 1145 return -EINVAL; 1146 } 1147 1148 int 1149 nv50_disp_main_ctor(struct nvkm_object *parent, 1150 struct nvkm_object *engine, 1151 struct nvkm_oclass *oclass, void *data, u32 size, 1152 struct nvkm_object **pobject) 1153 { 1154 struct nv50_disp *disp = (void *)engine; 1155 struct nv50_disp_base *base; 1156 int ret; 1157 1158 ret = nvkm_parent_create(parent, engine, oclass, 0, 1159 disp->sclass, 0, &base); 1160 *pobject = nv_object(base); 1161 if (ret) 1162 return ret; 1163 1164 return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, 1165 &base->ramht); 1166 } 1167 1168 void 1169 nv50_disp_main_dtor(struct nvkm_object *object) 1170 { 1171 struct nv50_disp_base *base = (void *)object; 1172 nvkm_ramht_ref(NULL, &base->ramht); 1173 nvkm_parent_destroy(&base->base); 1174 } 1175 1176 static int 1177 nv50_disp_main_init(struct nvkm_object *object) 1178 { 1179 struct nv50_disp *disp = (void *)object->engine; 1180 struct nv50_disp_base *base = (void *)object; 1181 struct nvkm_device *device = disp->base.engine.subdev.device; 1182 int ret, i; 1183 u32 tmp; 1184 1185 ret = nvkm_parent_init(&base->base); 1186 if (ret) 1187 return ret; 1188 1189 /* The below segments of code copying values from one register to 1190 * another appear to inform EVO of the display capabilities or 1191 * something similar. NFI what the 0x614004 caps are for.. 1192 */ 1193 tmp = nvkm_rd32(device, 0x614004); 1194 nvkm_wr32(device, 0x610184, tmp); 1195 1196 /* ... CRTC caps */ 1197 for (i = 0; i < disp->head.nr; i++) { 1198 tmp = nvkm_rd32(device, 0x616100 + (i * 0x800)); 1199 nvkm_wr32(device, 0x610190 + (i * 0x10), tmp); 1200 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800)); 1201 nvkm_wr32(device, 0x610194 + (i * 0x10), tmp); 1202 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800)); 1203 nvkm_wr32(device, 0x610198 + (i * 0x10), tmp); 1204 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800)); 1205 nvkm_wr32(device, 0x61019c + (i * 0x10), tmp); 1206 } 1207 1208 /* ... DAC caps */ 1209 for (i = 0; i < disp->dac.nr; i++) { 1210 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800)); 1211 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp); 1212 } 1213 1214 /* ... SOR caps */ 1215 for (i = 0; i < disp->sor.nr; i++) { 1216 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800)); 1217 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp); 1218 } 1219 1220 /* ... PIOR caps */ 1221 for (i = 0; i < disp->pior.nr; i++) { 1222 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800)); 1223 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp); 1224 } 1225 1226 /* steal display away from vbios, or something like that */ 1227 if (nvkm_rd32(device, 0x610024) & 0x00000100) { 1228 nvkm_wr32(device, 0x610024, 0x00000100); 1229 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000); 1230 if (nvkm_msec(device, 2000, 1231 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002)) 1232 break; 1233 ) < 0) 1234 return -EBUSY; 1235 } 1236 1237 /* point at display engine memory area (hash table, objects) */ 1238 nvkm_wr32(device, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9); 1239 1240 /* enable supervisor interrupts, disable everything else */ 1241 nvkm_wr32(device, 0x61002c, 0x00000370); 1242 nvkm_wr32(device, 0x610028, 0x00000000); 1243 return 0; 1244 } 1245 1246 static int 1247 nv50_disp_main_fini(struct nvkm_object *object, bool suspend) 1248 { 1249 struct nv50_disp *disp = (void *)object->engine; 1250 struct nv50_disp_base *base = (void *)object; 1251 struct nvkm_device *device = disp->base.engine.subdev.device; 1252 1253 /* disable all interrupts */ 1254 nvkm_wr32(device, 0x610024, 0x00000000); 1255 nvkm_wr32(device, 0x610020, 0x00000000); 1256 1257 return nvkm_parent_fini(&base->base, suspend); 1258 } 1259 1260 struct nvkm_ofuncs 1261 nv50_disp_main_ofuncs = { 1262 .ctor = nv50_disp_main_ctor, 1263 .dtor = nv50_disp_main_dtor, 1264 .init = nv50_disp_main_init, 1265 .fini = nv50_disp_main_fini, 1266 .mthd = nv50_disp_main_mthd, 1267 .ntfy = nvkm_disp_ntfy, 1268 }; 1269 1270 static struct nvkm_oclass 1271 nv50_disp_main_oclass[] = { 1272 { NV50_DISP, &nv50_disp_main_ofuncs }, 1273 {} 1274 }; 1275 1276 static struct nvkm_oclass 1277 nv50_disp_sclass[] = { 1278 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base }, 1279 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base }, 1280 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 1281 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 1282 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 1283 {} 1284 }; 1285 1286 /******************************************************************************* 1287 * Display context, tracks instmem allocation and prevents more than one 1288 * client using the display hardware at any time. 1289 ******************************************************************************/ 1290 1291 static int 1292 nv50_disp_data_ctor(struct nvkm_object *parent, 1293 struct nvkm_object *engine, 1294 struct nvkm_oclass *oclass, void *data, u32 size, 1295 struct nvkm_object **pobject) 1296 { 1297 struct nv50_disp *disp = (void *)engine; 1298 struct nvkm_gpuobj *gpuobj; 1299 int ret; 1300 1301 /* no context needed for channel objects... */ 1302 if (nv_mclass(parent) != NV_DEVICE) { 1303 atomic_inc(&parent->refcount); 1304 *pobject = parent; 1305 return 1; 1306 } 1307 1308 /* allocate display hardware to client */ 1309 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, NULL, 1310 0x10000, 0x10000, NVOBJ_FLAG_HEAP, 1311 &gpuobj); 1312 *pobject = nv_object(gpuobj); 1313 mutex_lock(&nv_subdev(disp)->mutex); 1314 if (!list_empty(&nv_engine(disp)->contexts)) 1315 ret = -EBUSY; 1316 mutex_unlock(&nv_subdev(disp)->mutex); 1317 return ret; 1318 } 1319 1320 struct nvkm_oclass 1321 nv50_disp_cclass = { 1322 .ofuncs = &(struct nvkm_ofuncs) { 1323 .ctor = nv50_disp_data_ctor, 1324 .dtor = _nvkm_gpuobj_dtor, 1325 .init = _nvkm_gpuobj_init, 1326 .fini = _nvkm_gpuobj_fini, 1327 .rd32 = _nvkm_gpuobj_rd32, 1328 .wr32 = _nvkm_gpuobj_wr32, 1329 }, 1330 }; 1331 1332 /******************************************************************************* 1333 * Display engine implementation 1334 ******************************************************************************/ 1335 1336 static void 1337 nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head) 1338 { 1339 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1340 struct nvkm_device *device = disp->engine.subdev.device; 1341 nvkm_mask(device, 0x61002c, (4 << head), 0); 1342 } 1343 1344 static void 1345 nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) 1346 { 1347 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1348 struct nvkm_device *device = disp->engine.subdev.device; 1349 nvkm_mask(device, 0x61002c, (4 << head), (4 << head)); 1350 } 1351 1352 const struct nvkm_event_func 1353 nv50_disp_vblank_func = { 1354 .ctor = nvkm_disp_vblank_ctor, 1355 .init = nv50_disp_vblank_init, 1356 .fini = nv50_disp_vblank_fini, 1357 }; 1358 1359 static const struct nvkm_enum 1360 nv50_disp_intr_error_type[] = { 1361 { 3, "ILLEGAL_MTHD" }, 1362 { 4, "INVALID_VALUE" }, 1363 { 5, "INVALID_STATE" }, 1364 { 7, "INVALID_HANDLE" }, 1365 {} 1366 }; 1367 1368 static const struct nvkm_enum 1369 nv50_disp_intr_error_code[] = { 1370 { 0x00, "" }, 1371 {} 1372 }; 1373 1374 static void 1375 nv50_disp_intr_error(struct nv50_disp *disp, int chid) 1376 { 1377 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1378 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1379 struct nvkm_device *device = subdev->device; 1380 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08)); 1381 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08)); 1382 u32 code = (addr & 0x00ff0000) >> 16; 1383 u32 type = (addr & 0x00007000) >> 12; 1384 u32 mthd = (addr & 0x00000ffc); 1385 const struct nvkm_enum *ec, *et; 1386 1387 et = nvkm_enum_find(nv50_disp_intr_error_type, type); 1388 ec = nvkm_enum_find(nv50_disp_intr_error_code, code); 1389 1390 nvkm_error(subdev, 1391 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n", 1392 type, et ? et->name : "", code, ec ? ec->name : "", 1393 chid, mthd, data); 1394 1395 if (chid == 0) { 1396 switch (mthd) { 1397 case 0x0080: 1398 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 0, 1399 impl->mthd.core); 1400 break; 1401 default: 1402 break; 1403 } 1404 } else 1405 if (chid <= 2) { 1406 switch (mthd) { 1407 case 0x0080: 1408 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 1, 1409 impl->mthd.base); 1410 break; 1411 default: 1412 break; 1413 } 1414 } else 1415 if (chid <= 4) { 1416 switch (mthd) { 1417 case 0x0080: 1418 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 3, 1419 impl->mthd.ovly); 1420 break; 1421 default: 1422 break; 1423 } 1424 } 1425 1426 nvkm_wr32(device, 0x610020, 0x00010000 << chid); 1427 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000); 1428 } 1429 1430 static struct nvkm_output * 1431 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, 1432 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 1433 struct nvbios_outp *info) 1434 { 1435 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1436 struct nvkm_bios *bios = subdev->device->bios; 1437 struct nvkm_output *outp; 1438 u16 mask, type; 1439 1440 if (or < 4) { 1441 type = DCB_OUTPUT_ANALOG; 1442 mask = 0; 1443 } else 1444 if (or < 8) { 1445 switch (ctrl & 0x00000f00) { 1446 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; 1447 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; 1448 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; 1449 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; 1450 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; 1451 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 1452 default: 1453 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl); 1454 return NULL; 1455 } 1456 or -= 4; 1457 } else { 1458 or = or - 8; 1459 type = 0x0010; 1460 mask = 0; 1461 switch (ctrl & 0x00000f00) { 1462 case 0x00000000: type |= disp->pior.type[or]; break; 1463 default: 1464 nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl); 1465 return NULL; 1466 } 1467 } 1468 1469 mask = 0x00c0 & (mask << 6); 1470 mask |= 0x0001 << or; 1471 mask |= 0x0100 << head; 1472 1473 list_for_each_entry(outp, &disp->base.outp, head) { 1474 if ((outp->info.hasht & 0xff) == type && 1475 (outp->info.hashm & mask) == mask) { 1476 *data = nvbios_outp_match(bios, outp->info.hasht, 1477 outp->info.hashm, 1478 ver, hdr, cnt, len, info); 1479 if (!*data) 1480 return NULL; 1481 return outp; 1482 } 1483 } 1484 1485 return NULL; 1486 } 1487 1488 static struct nvkm_output * 1489 exec_script(struct nv50_disp *disp, int head, int id) 1490 { 1491 struct nvkm_device *device = disp->base.engine.subdev.device; 1492 struct nvkm_bios *bios = device->bios; 1493 struct nvkm_output *outp; 1494 struct nvbios_outp info; 1495 u8 ver, hdr, cnt, len; 1496 u32 data, ctrl = 0; 1497 u32 reg; 1498 int i; 1499 1500 /* DAC */ 1501 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1502 ctrl = nvkm_rd32(device, 0x610b5c + (i * 8)); 1503 1504 /* SOR */ 1505 if (!(ctrl & (1 << head))) { 1506 if (nv_device(disp)->chipset < 0x90 || 1507 nv_device(disp)->chipset == 0x92 || 1508 nv_device(disp)->chipset == 0xa0) { 1509 reg = 0x610b74; 1510 } else { 1511 reg = 0x610798; 1512 } 1513 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1514 ctrl = nvkm_rd32(device, reg + (i * 8)); 1515 i += 4; 1516 } 1517 1518 /* PIOR */ 1519 if (!(ctrl & (1 << head))) { 1520 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1521 ctrl = nvkm_rd32(device, 0x610b84 + (i * 8)); 1522 i += 8; 1523 } 1524 1525 if (!(ctrl & (1 << head))) 1526 return NULL; 1527 i--; 1528 1529 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info); 1530 if (outp) { 1531 struct nvbios_init init = { 1532 .subdev = nv_subdev(disp), 1533 .bios = bios, 1534 .offset = info.script[id], 1535 .outp = &outp->info, 1536 .crtc = head, 1537 .execute = 1, 1538 }; 1539 1540 nvbios_exec(&init); 1541 } 1542 1543 return outp; 1544 } 1545 1546 static struct nvkm_output * 1547 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) 1548 { 1549 struct nvkm_device *device = disp->base.engine.subdev.device; 1550 struct nvkm_bios *bios = device->bios; 1551 struct nvkm_output *outp; 1552 struct nvbios_outp info1; 1553 struct nvbios_ocfg info2; 1554 u8 ver, hdr, cnt, len; 1555 u32 data, ctrl = 0; 1556 u32 reg; 1557 int i; 1558 1559 /* DAC */ 1560 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1561 ctrl = nvkm_rd32(device, 0x610b58 + (i * 8)); 1562 1563 /* SOR */ 1564 if (!(ctrl & (1 << head))) { 1565 if (nv_device(disp)->chipset < 0x90 || 1566 nv_device(disp)->chipset == 0x92 || 1567 nv_device(disp)->chipset == 0xa0) { 1568 reg = 0x610b70; 1569 } else { 1570 reg = 0x610794; 1571 } 1572 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1573 ctrl = nvkm_rd32(device, reg + (i * 8)); 1574 i += 4; 1575 } 1576 1577 /* PIOR */ 1578 if (!(ctrl & (1 << head))) { 1579 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1580 ctrl = nvkm_rd32(device, 0x610b80 + (i * 8)); 1581 i += 8; 1582 } 1583 1584 if (!(ctrl & (1 << head))) 1585 return NULL; 1586 i--; 1587 1588 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); 1589 if (!outp) 1590 return NULL; 1591 1592 if (outp->info.location == 0) { 1593 switch (outp->info.type) { 1594 case DCB_OUTPUT_TMDS: 1595 *conf = (ctrl & 0x00000f00) >> 8; 1596 if (pclk >= 165000) 1597 *conf |= 0x0100; 1598 break; 1599 case DCB_OUTPUT_LVDS: 1600 *conf = disp->sor.lvdsconf; 1601 break; 1602 case DCB_OUTPUT_DP: 1603 *conf = (ctrl & 0x00000f00) >> 8; 1604 break; 1605 case DCB_OUTPUT_ANALOG: 1606 default: 1607 *conf = 0x00ff; 1608 break; 1609 } 1610 } else { 1611 *conf = (ctrl & 0x00000f00) >> 8; 1612 pclk = pclk / 2; 1613 } 1614 1615 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); 1616 if (data && id < 0xff) { 1617 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 1618 if (data) { 1619 struct nvbios_init init = { 1620 .subdev = nv_subdev(disp), 1621 .bios = bios, 1622 .offset = data, 1623 .outp = &outp->info, 1624 .crtc = head, 1625 .execute = 1, 1626 }; 1627 1628 nvbios_exec(&init); 1629 } 1630 } 1631 1632 return outp; 1633 } 1634 1635 static void 1636 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) 1637 { 1638 exec_script(disp, head, 1); 1639 } 1640 1641 static void 1642 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head) 1643 { 1644 struct nvkm_output *outp = exec_script(disp, head, 2); 1645 1646 /* the binary driver does this outside of the supervisor handling 1647 * (after the third supervisor from a detach). we (currently?) 1648 * allow both detach/attach to happen in the same set of 1649 * supervisor interrupts, so it would make sense to execute this 1650 * (full power down?) script after all the detach phases of the 1651 * supervisor handling. like with training if needed from the 1652 * second supervisor, nvidia doesn't do this, so who knows if it's 1653 * entirely safe, but it does appear to work.. 1654 * 1655 * without this script being run, on some configurations i've 1656 * seen, switching from DP to TMDS on a DP connector may result 1657 * in a blank screen (SOR_PWR off/on can restore it) 1658 */ 1659 if (outp && outp->info.type == DCB_OUTPUT_DP) { 1660 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp); 1661 struct nvbios_init init = { 1662 .subdev = nv_subdev(disp), 1663 .bios = nvkm_bios(disp), 1664 .outp = &outp->info, 1665 .crtc = head, 1666 .offset = outpdp->info.script[4], 1667 .execute = 1, 1668 }; 1669 1670 nvbios_exec(&init); 1671 atomic_set(&outpdp->lt.done, 0); 1672 } 1673 } 1674 1675 static void 1676 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head) 1677 { 1678 struct nvkm_device *device = disp->base.engine.subdev.device; 1679 struct nvkm_devinit *devinit = device->devinit; 1680 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1681 if (pclk) 1682 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk); 1683 } 1684 1685 static void 1686 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head, 1687 struct dcb_output *outp, u32 pclk) 1688 { 1689 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1690 struct nvkm_device *device = subdev->device; 1691 const int link = !(outp->sorconf.link & 1); 1692 const int or = ffs(outp->or) - 1; 1693 const u32 soff = ( or * 0x800); 1694 const u32 loff = (link * 0x080) + soff; 1695 const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8)); 1696 const u32 symbol = 100000; 1697 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff; 1698 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff; 1699 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff; 1700 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff); 1701 u32 clksor = nvkm_rd32(device, 0x614300 + soff); 1702 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 1703 int TU, VTUi, VTUf, VTUa; 1704 u64 link_data_rate, link_ratio, unk; 1705 u32 best_diff = 64 * symbol; 1706 u32 link_nr, link_bw, bits; 1707 u64 value; 1708 1709 link_bw = (clksor & 0x000c0000) ? 270000 : 162000; 1710 link_nr = hweight32(dpctrl & 0x000f0000); 1711 1712 /* symbols/hblank - algorithm taken from comments in tegra driver */ 1713 value = vblanke + vactive - vblanks - 7; 1714 value = value * link_bw; 1715 do_div(value, pclk); 1716 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr); 1717 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value); 1718 1719 /* symbols/vblank - algorithm taken from comments in tegra driver */ 1720 value = vblanks - vblanke - 25; 1721 value = value * link_bw; 1722 do_div(value, pclk); 1723 value = value - ((36 / link_nr) + 3) - 1; 1724 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value); 1725 1726 /* watermark / activesym */ 1727 if ((ctrl & 0xf0000) == 0x60000) bits = 30; 1728 else if ((ctrl & 0xf0000) == 0x50000) bits = 24; 1729 else bits = 18; 1730 1731 link_data_rate = (pclk * bits / 8) / link_nr; 1732 1733 /* calculate ratio of packed data rate to link symbol rate */ 1734 link_ratio = link_data_rate * symbol; 1735 do_div(link_ratio, link_bw); 1736 1737 for (TU = 64; TU >= 32; TU--) { 1738 /* calculate average number of valid symbols in each TU */ 1739 u32 tu_valid = link_ratio * TU; 1740 u32 calc, diff; 1741 1742 /* find a hw representation for the fraction.. */ 1743 VTUi = tu_valid / symbol; 1744 calc = VTUi * symbol; 1745 diff = tu_valid - calc; 1746 if (diff) { 1747 if (diff >= (symbol / 2)) { 1748 VTUf = symbol / (symbol - diff); 1749 if (symbol - (VTUf * diff)) 1750 VTUf++; 1751 1752 if (VTUf <= 15) { 1753 VTUa = 1; 1754 calc += symbol - (symbol / VTUf); 1755 } else { 1756 VTUa = 0; 1757 VTUf = 1; 1758 calc += symbol; 1759 } 1760 } else { 1761 VTUa = 0; 1762 VTUf = min((int)(symbol / diff), 15); 1763 calc += symbol / VTUf; 1764 } 1765 1766 diff = calc - tu_valid; 1767 } else { 1768 /* no remainder, but the hw doesn't like the fractional 1769 * part to be zero. decrement the integer part and 1770 * have the fraction add a whole symbol back 1771 */ 1772 VTUa = 0; 1773 VTUf = 1; 1774 VTUi--; 1775 } 1776 1777 if (diff < best_diff) { 1778 best_diff = diff; 1779 bestTU = TU; 1780 bestVTUa = VTUa; 1781 bestVTUf = VTUf; 1782 bestVTUi = VTUi; 1783 if (diff == 0) 1784 break; 1785 } 1786 } 1787 1788 if (!bestTU) { 1789 nvkm_error(subdev, "unable to find suitable dp config\n"); 1790 return; 1791 } 1792 1793 /* XXX close to vbios numbers, but not right */ 1794 unk = (symbol - link_ratio) * bestTU; 1795 unk *= link_ratio; 1796 do_div(unk, symbol); 1797 do_div(unk, symbol); 1798 unk += 6; 1799 1800 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2); 1801 nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | 1802 bestVTUf << 16 | 1803 bestVTUi << 8 | unk); 1804 } 1805 1806 static void 1807 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head) 1808 { 1809 struct nvkm_device *device = disp->base.engine.subdev.device; 1810 struct nvkm_output *outp; 1811 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1812 u32 hval, hreg = 0x614200 + (head * 0x800); 1813 u32 oval, oreg; 1814 u32 mask, conf; 1815 1816 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf); 1817 if (!outp) 1818 return; 1819 1820 /* we allow both encoder attach and detach operations to occur 1821 * within a single supervisor (ie. modeset) sequence. the 1822 * encoder detach scripts quite often switch off power to the 1823 * lanes, which requires the link to be re-trained. 1824 * 1825 * this is not generally an issue as the sink "must" (heh) 1826 * signal an irq when it's lost sync so the driver can 1827 * re-train. 1828 * 1829 * however, on some boards, if one does not configure at least 1830 * the gpu side of the link *before* attaching, then various 1831 * things can go horribly wrong (PDISP disappearing from mmio, 1832 * third supervisor never happens, etc). 1833 * 1834 * the solution is simply to retrain here, if necessary. last 1835 * i checked, the binary driver userspace does not appear to 1836 * trigger this situation (it forces an UPDATE between steps). 1837 */ 1838 if (outp->info.type == DCB_OUTPUT_DP) { 1839 u32 soff = (ffs(outp->info.or) - 1) * 0x08; 1840 u32 ctrl, datarate; 1841 1842 if (outp->info.location == 0) { 1843 ctrl = nvkm_rd32(device, 0x610794 + soff); 1844 soff = 1; 1845 } else { 1846 ctrl = nvkm_rd32(device, 0x610b80 + soff); 1847 soff = 2; 1848 } 1849 1850 switch ((ctrl & 0x000f0000) >> 16) { 1851 case 6: datarate = pclk * 30; break; 1852 case 5: datarate = pclk * 24; break; 1853 case 2: 1854 default: 1855 datarate = pclk * 18; 1856 break; 1857 } 1858 1859 if (nvkm_output_dp_train(outp, datarate / soff, true)) 1860 OUTP_ERR(outp, "link not trained before attach"); 1861 } 1862 1863 exec_clkcmp(disp, head, 0, pclk, &conf); 1864 1865 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) { 1866 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800; 1867 oval = 0x00000000; 1868 hval = 0x00000000; 1869 mask = 0xffffffff; 1870 } else 1871 if (!outp->info.location) { 1872 if (outp->info.type == DCB_OUTPUT_DP) 1873 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk); 1874 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800; 1875 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000; 1876 hval = 0x00000000; 1877 mask = 0x00000707; 1878 } else { 1879 oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800; 1880 oval = 0x00000001; 1881 hval = 0x00000001; 1882 mask = 0x00000707; 1883 } 1884 1885 nvkm_mask(device, hreg, 0x0000000f, hval); 1886 nvkm_mask(device, oreg, mask, oval); 1887 } 1888 1889 /* If programming a TMDS output on a SOR that can also be configured for 1890 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. 1891 * 1892 * It looks like the VBIOS TMDS scripts make an attempt at this, however, 1893 * the VBIOS scripts on at least one board I have only switch it off on 1894 * link 0, causing a blank display if the output has previously been 1895 * programmed for DisplayPort. 1896 */ 1897 static void 1898 nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp, 1899 struct dcb_output *outp) 1900 { 1901 struct nvkm_device *device = disp->base.engine.subdev.device; 1902 struct nvkm_bios *bios = device->bios; 1903 const int link = !(outp->sorconf.link & 1); 1904 const int or = ffs(outp->or) - 1; 1905 const u32 loff = (or * 0x800) + (link * 0x80); 1906 const u16 mask = (outp->sorconf.link << 6) | outp->or; 1907 struct dcb_output match; 1908 u8 ver, hdr; 1909 1910 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) 1911 nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000); 1912 } 1913 1914 static void 1915 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head) 1916 { 1917 struct nvkm_device *device = disp->base.engine.subdev.device; 1918 struct nvkm_output *outp; 1919 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1920 u32 conf; 1921 1922 outp = exec_clkcmp(disp, head, 1, pclk, &conf); 1923 if (!outp) 1924 return; 1925 1926 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) 1927 nv50_disp_intr_unk40_0_tmds(disp, &outp->info); 1928 } 1929 1930 void 1931 nv50_disp_intr_supervisor(struct work_struct *work) 1932 { 1933 struct nv50_disp *disp = 1934 container_of(work, struct nv50_disp, supervisor); 1935 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1936 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1937 struct nvkm_device *device = subdev->device; 1938 u32 super = nvkm_rd32(device, 0x610030); 1939 int head; 1940 1941 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super); 1942 1943 if (disp->super & 0x00000010) { 1944 nv50_disp_mthd_chan(disp, NV_DBG_DEBUG, 0, impl->mthd.core); 1945 for (head = 0; head < disp->head.nr; head++) { 1946 if (!(super & (0x00000020 << head))) 1947 continue; 1948 if (!(super & (0x00000080 << head))) 1949 continue; 1950 nv50_disp_intr_unk10_0(disp, head); 1951 } 1952 } else 1953 if (disp->super & 0x00000020) { 1954 for (head = 0; head < disp->head.nr; head++) { 1955 if (!(super & (0x00000080 << head))) 1956 continue; 1957 nv50_disp_intr_unk20_0(disp, head); 1958 } 1959 for (head = 0; head < disp->head.nr; head++) { 1960 if (!(super & (0x00000200 << head))) 1961 continue; 1962 nv50_disp_intr_unk20_1(disp, head); 1963 } 1964 for (head = 0; head < disp->head.nr; head++) { 1965 if (!(super & (0x00000080 << head))) 1966 continue; 1967 nv50_disp_intr_unk20_2(disp, head); 1968 } 1969 } else 1970 if (disp->super & 0x00000040) { 1971 for (head = 0; head < disp->head.nr; head++) { 1972 if (!(super & (0x00000080 << head))) 1973 continue; 1974 nv50_disp_intr_unk40_0(disp, head); 1975 } 1976 } 1977 1978 nvkm_wr32(device, 0x610030, 0x80000000); 1979 } 1980 1981 void 1982 nv50_disp_intr(struct nvkm_subdev *subdev) 1983 { 1984 struct nv50_disp *disp = (void *)subdev; 1985 struct nvkm_device *device = disp->base.engine.subdev.device; 1986 u32 intr0 = nvkm_rd32(device, 0x610020); 1987 u32 intr1 = nvkm_rd32(device, 0x610024); 1988 1989 while (intr0 & 0x001f0000) { 1990 u32 chid = __ffs(intr0 & 0x001f0000) - 16; 1991 nv50_disp_intr_error(disp, chid); 1992 intr0 &= ~(0x00010000 << chid); 1993 } 1994 1995 while (intr0 & 0x0000001f) { 1996 u32 chid = __ffs(intr0 & 0x0000001f); 1997 nv50_disp_chan_uevent_send(disp, chid); 1998 intr0 &= ~(0x00000001 << chid); 1999 } 2000 2001 if (intr1 & 0x00000004) { 2002 nvkm_disp_vblank(&disp->base, 0); 2003 nvkm_wr32(device, 0x610024, 0x00000004); 2004 } 2005 2006 if (intr1 & 0x00000008) { 2007 nvkm_disp_vblank(&disp->base, 1); 2008 nvkm_wr32(device, 0x610024, 0x00000008); 2009 } 2010 2011 if (intr1 & 0x00000070) { 2012 disp->super = (intr1 & 0x00000070); 2013 schedule_work(&disp->supervisor); 2014 nvkm_wr32(device, 0x610024, disp->super); 2015 } 2016 } 2017 2018 static int 2019 nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 2020 struct nvkm_oclass *oclass, void *data, u32 size, 2021 struct nvkm_object **pobject) 2022 { 2023 struct nv50_disp *disp; 2024 int ret; 2025 2026 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP", 2027 "display", &disp); 2028 *pobject = nv_object(disp); 2029 if (ret) 2030 return ret; 2031 2032 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &disp->uevent); 2033 if (ret) 2034 return ret; 2035 2036 nv_engine(disp)->sclass = nv50_disp_main_oclass; 2037 nv_engine(disp)->cclass = &nv50_disp_cclass; 2038 nv_subdev(disp)->intr = nv50_disp_intr; 2039 INIT_WORK(&disp->supervisor, nv50_disp_intr_supervisor); 2040 disp->sclass = nv50_disp_sclass; 2041 disp->head.nr = 2; 2042 disp->dac.nr = 3; 2043 disp->sor.nr = 2; 2044 disp->pior.nr = 3; 2045 disp->dac.power = nv50_dac_power; 2046 disp->dac.sense = nv50_dac_sense; 2047 disp->sor.power = nv50_sor_power; 2048 disp->pior.power = nv50_pior_power; 2049 return 0; 2050 } 2051 2052 struct nvkm_oclass * 2053 nv50_disp_oclass = &(struct nv50_disp_impl) { 2054 .base.base.handle = NV_ENGINE(DISP, 0x50), 2055 .base.base.ofuncs = &(struct nvkm_ofuncs) { 2056 .ctor = nv50_disp_ctor, 2057 .dtor = _nvkm_disp_dtor, 2058 .init = _nvkm_disp_init, 2059 .fini = _nvkm_disp_fini, 2060 }, 2061 .base.outp.internal.crt = nv50_dac_output_new, 2062 .base.outp.internal.tmds = nv50_sor_output_new, 2063 .base.outp.internal.lvds = nv50_sor_output_new, 2064 .base.outp.external.tmds = nv50_pior_output_new, 2065 .base.outp.external.dp = nv50_pior_dp_new, 2066 .base.vblank = &nv50_disp_vblank_func, 2067 .mthd.core = &nv50_disp_core_mthd_chan, 2068 .mthd.base = &nv50_disp_base_mthd_chan, 2069 .mthd.ovly = &nv50_disp_ovly_mthd_chan, 2070 .mthd.prev = 0x000004, 2071 .head.scanoutpos = nv50_disp_main_scanoutpos, 2072 }.base.base; 2073