1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv50.h" 25 #include "outp.h" 26 #include "outpdp.h" 27 28 #include <core/client.h> 29 #include <core/gpuobj.h> 30 #include <core/enum.h> 31 #include <core/handle.h> 32 #include <core/ramht.h> 33 #include <engine/dmaobj.h> 34 #include <subdev/bios.h> 35 #include <subdev/bios/dcb.h> 36 #include <subdev/bios/disp.h> 37 #include <subdev/bios/init.h> 38 #include <subdev/bios/pll.h> 39 #include <subdev/devinit.h> 40 #include <subdev/fb.h> 41 #include <subdev/timer.h> 42 43 #include <nvif/class.h> 44 #include <nvif/event.h> 45 #include <nvif/unpack.h> 46 47 /******************************************************************************* 48 * EVO channel base class 49 ******************************************************************************/ 50 51 static int 52 nv50_disp_chan_create_(struct nvkm_object *parent, 53 struct nvkm_object *engine, 54 struct nvkm_oclass *oclass, int head, 55 int length, void **pobject) 56 { 57 const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs; 58 struct nv50_disp_base *base = (void *)parent; 59 struct nv50_disp_chan *chan; 60 int chid = impl->chid + head; 61 int ret; 62 63 if (base->chan & (1 << chid)) 64 return -EBUSY; 65 base->chan |= (1 << chid); 66 67 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL, 68 (1ULL << NVDEV_ENGINE_DMAOBJ), 69 length, pobject); 70 chan = *pobject; 71 if (ret) 72 return ret; 73 chan->chid = chid; 74 75 nv_parent(chan)->object_attach = impl->attach; 76 nv_parent(chan)->object_detach = impl->detach; 77 return 0; 78 } 79 80 static void 81 nv50_disp_chan_destroy(struct nv50_disp_chan *chan) 82 { 83 struct nv50_disp_base *base = (void *)nv_object(chan)->parent; 84 base->chan &= ~(1 << chan->chid); 85 nvkm_namedb_destroy(&chan->base); 86 } 87 88 static void 89 nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index) 90 { 91 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 92 struct nvkm_device *device = disp->base.engine.subdev.device; 93 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index); 94 nvkm_wr32(device, 0x610020, 0x00000001 << index); 95 } 96 97 static void 98 nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) 99 { 100 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 101 struct nvkm_device *device = disp->base.engine.subdev.device; 102 nvkm_wr32(device, 0x610020, 0x00000001 << index); 103 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index); 104 } 105 106 void 107 nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid) 108 { 109 struct nvif_notify_uevent_rep { 110 } rep; 111 112 nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep)); 113 } 114 115 int 116 nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 117 struct nvkm_notify *notify) 118 { 119 struct nv50_disp_dmac *dmac = (void *)object; 120 union { 121 struct nvif_notify_uevent_req none; 122 } *args = data; 123 int ret; 124 125 if (nvif_unvers(args->none)) { 126 notify->size = sizeof(struct nvif_notify_uevent_rep); 127 notify->types = 1; 128 notify->index = dmac->base.chid; 129 return 0; 130 } 131 132 return ret; 133 } 134 135 const struct nvkm_event_func 136 nv50_disp_chan_uevent = { 137 .ctor = nv50_disp_chan_uevent_ctor, 138 .init = nv50_disp_chan_uevent_init, 139 .fini = nv50_disp_chan_uevent_fini, 140 }; 141 142 int 143 nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type, 144 struct nvkm_event **pevent) 145 { 146 struct nv50_disp *disp = (void *)object->engine; 147 switch (type) { 148 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT: 149 *pevent = &disp->uevent; 150 return 0; 151 default: 152 break; 153 } 154 return -EINVAL; 155 } 156 157 int 158 nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) 159 { 160 struct nv50_disp_chan *chan = (void *)object; 161 *addr = nv_device_resource_start(nv_device(object), 0) + 162 0x640000 + (chan->chid * 0x1000); 163 *size = 0x001000; 164 return 0; 165 } 166 167 u32 168 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr) 169 { 170 struct nv50_disp_chan *chan = (void *)object; 171 struct nvkm_device *device = object->engine->subdev.device; 172 return nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); 173 } 174 175 void 176 nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) 177 { 178 struct nv50_disp_chan *chan = (void *)object; 179 struct nvkm_device *device = object->engine->subdev.device; 180 nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); 181 } 182 183 /******************************************************************************* 184 * EVO DMA channel base class 185 ******************************************************************************/ 186 187 static int 188 nv50_disp_dmac_object_attach(struct nvkm_object *parent, 189 struct nvkm_object *object, u32 name) 190 { 191 struct nv50_disp_base *base = (void *)parent->parent; 192 struct nv50_disp_chan *chan = (void *)parent; 193 u32 addr = nv_gpuobj(object)->node->offset; 194 u32 chid = chan->chid; 195 u32 data = (chid << 28) | (addr << 10) | chid; 196 return nvkm_ramht_insert(base->ramht, NULL, chid, 0, name, data); 197 } 198 199 static void 200 nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie) 201 { 202 struct nv50_disp_base *base = (void *)parent->parent; 203 nvkm_ramht_remove(base->ramht, cookie); 204 } 205 206 static int 207 nv50_disp_dmac_create_(struct nvkm_object *parent, 208 struct nvkm_object *engine, 209 struct nvkm_oclass *oclass, u64 pushbuf, int head, 210 int length, void **pobject) 211 { 212 struct nvkm_client *client = nvkm_client(parent); 213 struct nvkm_handle *handle; 214 struct nvkm_dmaobj *dmaobj; 215 struct nv50_disp_dmac *dmac; 216 int ret; 217 218 ret = nv50_disp_chan_create_(parent, engine, oclass, head, 219 length, pobject); 220 dmac = *pobject; 221 if (ret) 222 return ret; 223 224 handle = nvkm_client_search(client, pushbuf); 225 if (!handle) 226 return -ENOENT; 227 dmaobj = (void *)handle->object; 228 229 switch (nv_mclass(dmaobj)) { 230 case 0x0002: 231 case 0x003d: 232 if (dmaobj->limit - dmaobj->start != 0xfff) 233 return -EINVAL; 234 235 switch (dmaobj->target) { 236 case NV_MEM_TARGET_VRAM: 237 dmac->push = 0x00000001 | dmaobj->start >> 8; 238 break; 239 case NV_MEM_TARGET_PCI_NOSNOOP: 240 dmac->push = 0x00000003 | dmaobj->start >> 8; 241 break; 242 default: 243 return -EINVAL; 244 } 245 break; 246 default: 247 return -EINVAL; 248 } 249 250 return 0; 251 } 252 253 void 254 nv50_disp_dmac_dtor(struct nvkm_object *object) 255 { 256 struct nv50_disp_dmac *dmac = (void *)object; 257 nv50_disp_chan_destroy(&dmac->base); 258 } 259 260 static int 261 nv50_disp_dmac_init(struct nvkm_object *object) 262 { 263 struct nv50_disp *disp = (void *)object->engine; 264 struct nv50_disp_dmac *dmac = (void *)object; 265 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 266 struct nvkm_device *device = subdev->device; 267 int chid = dmac->base.chid; 268 int ret; 269 270 ret = nv50_disp_chan_init(&dmac->base); 271 if (ret) 272 return ret; 273 274 /* enable error reporting */ 275 nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); 276 277 /* initialise channel for dma command submission */ 278 nvkm_wr32(device, 0x610204 + (chid * 0x0010), dmac->push); 279 nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); 280 nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); 281 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); 282 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); 283 nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); 284 285 /* wait for it to go inactive */ 286 if (nvkm_msec(device, 2000, 287 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) 288 break; 289 ) < 0) { 290 nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, 291 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 292 return -EBUSY; 293 } 294 295 return 0; 296 } 297 298 static int 299 nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend) 300 { 301 struct nv50_disp *disp = (void *)object->engine; 302 struct nv50_disp_dmac *dmac = (void *)object; 303 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 304 struct nvkm_device *device = subdev->device; 305 int chid = dmac->base.chid; 306 307 /* deactivate channel */ 308 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); 309 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); 310 if (nvkm_msec(device, 2000, 311 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) 312 break; 313 ) < 0) { 314 nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, 315 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 316 if (suspend) 317 return -EBUSY; 318 } 319 320 /* disable error reporting and completion notifications */ 321 nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); 322 323 return nv50_disp_chan_fini(&dmac->base, suspend); 324 } 325 326 /******************************************************************************* 327 * EVO master channel object 328 ******************************************************************************/ 329 330 static void 331 nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c, 332 const struct nv50_disp_mthd_list *list, int inst) 333 { 334 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 335 struct nvkm_device *device = subdev->device; 336 int i; 337 338 for (i = 0; list->data[i].mthd; i++) { 339 if (list->data[i].addr) { 340 u32 next = nvkm_rd32(device, list->data[i].addr + base + 0); 341 u32 prev = nvkm_rd32(device, list->data[i].addr + base + c); 342 u32 mthd = list->data[i].mthd + (list->mthd * inst); 343 const char *name = list->data[i].name; 344 char mods[16]; 345 346 if (prev != next) 347 snprintf(mods, sizeof(mods), "-> %08x", next); 348 else 349 snprintf(mods, sizeof(mods), "%13c", ' '); 350 351 nvkm_printk_(subdev, debug, info, 352 "\t%04x: %08x %s%s%s\n", 353 mthd, prev, mods, name ? " // " : "", 354 name ? name : ""); 355 } 356 } 357 } 358 359 void 360 nv50_disp_mthd_chan(struct nv50_disp *disp, int debug, int head, 361 const struct nv50_disp_mthd_chan *chan) 362 { 363 struct nvkm_object *object = nv_object(disp); 364 const struct nv50_disp_impl *impl = (void *)object->oclass; 365 const struct nv50_disp_mthd_list *list; 366 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 367 int i, j; 368 369 if (debug > nv_subdev(disp)->debug) 370 return; 371 372 for (i = 0; (list = chan->data[i].mthd) != NULL; i++) { 373 u32 base = head * chan->addr; 374 for (j = 0; j < chan->data[i].nr; j++, base += list->addr) { 375 const char *cname = chan->name; 376 const char *sname = ""; 377 char cname_[16], sname_[16]; 378 379 if (chan->addr) { 380 snprintf(cname_, sizeof(cname_), "%s %d", 381 chan->name, head); 382 cname = cname_; 383 } 384 385 if (chan->data[i].nr > 1) { 386 snprintf(sname_, sizeof(sname_), " - %s %d", 387 chan->data[i].name, j); 388 sname = sname_; 389 } 390 391 nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname); 392 nv50_disp_mthd_list(disp, debug, base, impl->mthd.prev, 393 list, j); 394 } 395 } 396 } 397 398 const struct nv50_disp_mthd_list 399 nv50_disp_core_mthd_base = { 400 .mthd = 0x0000, 401 .addr = 0x000000, 402 .data = { 403 { 0x0080, 0x000000 }, 404 { 0x0084, 0x610bb8 }, 405 { 0x0088, 0x610b9c }, 406 { 0x008c, 0x000000 }, 407 {} 408 } 409 }; 410 411 static const struct nv50_disp_mthd_list 412 nv50_disp_core_mthd_dac = { 413 .mthd = 0x0080, 414 .addr = 0x000008, 415 .data = { 416 { 0x0400, 0x610b58 }, 417 { 0x0404, 0x610bdc }, 418 { 0x0420, 0x610828 }, 419 {} 420 } 421 }; 422 423 const struct nv50_disp_mthd_list 424 nv50_disp_core_mthd_sor = { 425 .mthd = 0x0040, 426 .addr = 0x000008, 427 .data = { 428 { 0x0600, 0x610b70 }, 429 {} 430 } 431 }; 432 433 const struct nv50_disp_mthd_list 434 nv50_disp_core_mthd_pior = { 435 .mthd = 0x0040, 436 .addr = 0x000008, 437 .data = { 438 { 0x0700, 0x610b80 }, 439 {} 440 } 441 }; 442 443 static const struct nv50_disp_mthd_list 444 nv50_disp_core_mthd_head = { 445 .mthd = 0x0400, 446 .addr = 0x000540, 447 .data = { 448 { 0x0800, 0x610ad8 }, 449 { 0x0804, 0x610ad0 }, 450 { 0x0808, 0x610a48 }, 451 { 0x080c, 0x610a78 }, 452 { 0x0810, 0x610ac0 }, 453 { 0x0814, 0x610af8 }, 454 { 0x0818, 0x610b00 }, 455 { 0x081c, 0x610ae8 }, 456 { 0x0820, 0x610af0 }, 457 { 0x0824, 0x610b08 }, 458 { 0x0828, 0x610b10 }, 459 { 0x082c, 0x610a68 }, 460 { 0x0830, 0x610a60 }, 461 { 0x0834, 0x000000 }, 462 { 0x0838, 0x610a40 }, 463 { 0x0840, 0x610a24 }, 464 { 0x0844, 0x610a2c }, 465 { 0x0848, 0x610aa8 }, 466 { 0x084c, 0x610ab0 }, 467 { 0x0860, 0x610a84 }, 468 { 0x0864, 0x610a90 }, 469 { 0x0868, 0x610b18 }, 470 { 0x086c, 0x610b20 }, 471 { 0x0870, 0x610ac8 }, 472 { 0x0874, 0x610a38 }, 473 { 0x0880, 0x610a58 }, 474 { 0x0884, 0x610a9c }, 475 { 0x08a0, 0x610a70 }, 476 { 0x08a4, 0x610a50 }, 477 { 0x08a8, 0x610ae0 }, 478 { 0x08c0, 0x610b28 }, 479 { 0x08c4, 0x610b30 }, 480 { 0x08c8, 0x610b40 }, 481 { 0x08d4, 0x610b38 }, 482 { 0x08d8, 0x610b48 }, 483 { 0x08dc, 0x610b50 }, 484 { 0x0900, 0x610a18 }, 485 { 0x0904, 0x610ab8 }, 486 {} 487 } 488 }; 489 490 static const struct nv50_disp_mthd_chan 491 nv50_disp_core_mthd_chan = { 492 .name = "Core", 493 .addr = 0x000000, 494 .data = { 495 { "Global", 1, &nv50_disp_core_mthd_base }, 496 { "DAC", 3, &nv50_disp_core_mthd_dac }, 497 { "SOR", 2, &nv50_disp_core_mthd_sor }, 498 { "PIOR", 3, &nv50_disp_core_mthd_pior }, 499 { "HEAD", 2, &nv50_disp_core_mthd_head }, 500 {} 501 } 502 }; 503 504 int 505 nv50_disp_core_ctor(struct nvkm_object *parent, 506 struct nvkm_object *engine, 507 struct nvkm_oclass *oclass, void *data, u32 size, 508 struct nvkm_object **pobject) 509 { 510 union { 511 struct nv50_disp_core_channel_dma_v0 v0; 512 } *args = data; 513 struct nv50_disp_dmac *mast; 514 int ret; 515 516 nvif_ioctl(parent, "create disp core channel dma size %d\n", size); 517 if (nvif_unpack(args->v0, 0, 0, false)) { 518 nvif_ioctl(parent, "create disp core channel dma vers %d " 519 "pushbuf %016llx\n", 520 args->v0.version, args->v0.pushbuf); 521 } else 522 return ret; 523 524 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 525 0, sizeof(*mast), (void **)&mast); 526 *pobject = nv_object(mast); 527 if (ret) 528 return ret; 529 530 return 0; 531 } 532 533 static int 534 nv50_disp_core_init(struct nvkm_object *object) 535 { 536 struct nv50_disp *disp = (void *)object->engine; 537 struct nv50_disp_dmac *mast = (void *)object; 538 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 539 struct nvkm_device *device = subdev->device; 540 int ret; 541 542 ret = nv50_disp_chan_init(&mast->base); 543 if (ret) 544 return ret; 545 546 /* enable error reporting */ 547 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000); 548 549 /* attempt to unstick channel from some unknown state */ 550 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000) 551 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000); 552 if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000) 553 nvkm_mask(device, 0x610200, 0x00600000, 0x00600000); 554 555 /* initialise channel for dma command submission */ 556 nvkm_wr32(device, 0x610204, mast->push); 557 nvkm_wr32(device, 0x610208, 0x00010000); 558 nvkm_wr32(device, 0x61020c, 0x00000000); 559 nvkm_mask(device, 0x610200, 0x00000010, 0x00000010); 560 nvkm_wr32(device, 0x640000, 0x00000000); 561 nvkm_wr32(device, 0x610200, 0x01000013); 562 563 /* wait for it to go inactive */ 564 if (nvkm_msec(device, 2000, 565 if (!(nvkm_rd32(device, 0x610200) & 0x80000000)) 566 break; 567 ) < 0) { 568 nvkm_error(subdev, "core init: %08x\n", 569 nvkm_rd32(device, 0x610200)); 570 return -EBUSY; 571 } 572 573 return 0; 574 } 575 576 static int 577 nv50_disp_core_fini(struct nvkm_object *object, bool suspend) 578 { 579 struct nv50_disp *disp = (void *)object->engine; 580 struct nv50_disp_dmac *mast = (void *)object; 581 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 582 struct nvkm_device *device = subdev->device; 583 584 /* deactivate channel */ 585 nvkm_mask(device, 0x610200, 0x00000010, 0x00000000); 586 nvkm_mask(device, 0x610200, 0x00000003, 0x00000000); 587 if (nvkm_msec(device, 2000, 588 if (!(nvkm_rd32(device, 0x610200) & 0x001e0000)) 589 break; 590 ) < 0) { 591 nvkm_error(subdev, "core fini: %08x\n", 592 nvkm_rd32(device, 0x610200)); 593 if (suspend) 594 return -EBUSY; 595 } 596 597 /* disable error reporting and completion notifications */ 598 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000); 599 600 return nv50_disp_chan_fini(&mast->base, suspend); 601 } 602 603 struct nv50_disp_chan_impl 604 nv50_disp_core_ofuncs = { 605 .base.ctor = nv50_disp_core_ctor, 606 .base.dtor = nv50_disp_dmac_dtor, 607 .base.init = nv50_disp_core_init, 608 .base.fini = nv50_disp_core_fini, 609 .base.map = nv50_disp_chan_map, 610 .base.ntfy = nv50_disp_chan_ntfy, 611 .base.rd32 = nv50_disp_chan_rd32, 612 .base.wr32 = nv50_disp_chan_wr32, 613 .chid = 0, 614 .attach = nv50_disp_dmac_object_attach, 615 .detach = nv50_disp_dmac_object_detach, 616 }; 617 618 /******************************************************************************* 619 * EVO sync channel objects 620 ******************************************************************************/ 621 622 static const struct nv50_disp_mthd_list 623 nv50_disp_base_mthd_base = { 624 .mthd = 0x0000, 625 .addr = 0x000000, 626 .data = { 627 { 0x0080, 0x000000 }, 628 { 0x0084, 0x0008c4 }, 629 { 0x0088, 0x0008d0 }, 630 { 0x008c, 0x0008dc }, 631 { 0x0090, 0x0008e4 }, 632 { 0x0094, 0x610884 }, 633 { 0x00a0, 0x6108a0 }, 634 { 0x00a4, 0x610878 }, 635 { 0x00c0, 0x61086c }, 636 { 0x00e0, 0x610858 }, 637 { 0x00e4, 0x610860 }, 638 { 0x00e8, 0x6108ac }, 639 { 0x00ec, 0x6108b4 }, 640 { 0x0100, 0x610894 }, 641 { 0x0110, 0x6108bc }, 642 { 0x0114, 0x61088c }, 643 {} 644 } 645 }; 646 647 const struct nv50_disp_mthd_list 648 nv50_disp_base_mthd_image = { 649 .mthd = 0x0400, 650 .addr = 0x000000, 651 .data = { 652 { 0x0800, 0x6108f0 }, 653 { 0x0804, 0x6108fc }, 654 { 0x0808, 0x61090c }, 655 { 0x080c, 0x610914 }, 656 { 0x0810, 0x610904 }, 657 {} 658 } 659 }; 660 661 static const struct nv50_disp_mthd_chan 662 nv50_disp_base_mthd_chan = { 663 .name = "Base", 664 .addr = 0x000540, 665 .data = { 666 { "Global", 1, &nv50_disp_base_mthd_base }, 667 { "Image", 2, &nv50_disp_base_mthd_image }, 668 {} 669 } 670 }; 671 672 int 673 nv50_disp_base_ctor(struct nvkm_object *parent, 674 struct nvkm_object *engine, 675 struct nvkm_oclass *oclass, void *data, u32 size, 676 struct nvkm_object **pobject) 677 { 678 union { 679 struct nv50_disp_base_channel_dma_v0 v0; 680 } *args = data; 681 struct nv50_disp *disp = (void *)engine; 682 struct nv50_disp_dmac *dmac; 683 int ret; 684 685 nvif_ioctl(parent, "create disp base channel dma size %d\n", size); 686 if (nvif_unpack(args->v0, 0, 0, false)) { 687 nvif_ioctl(parent, "create disp base channel dma vers %d " 688 "pushbuf %016llx head %d\n", 689 args->v0.version, args->v0.pushbuf, args->v0.head); 690 if (args->v0.head > disp->head.nr) 691 return -EINVAL; 692 } else 693 return ret; 694 695 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 696 args->v0.head, sizeof(*dmac), 697 (void **)&dmac); 698 *pobject = nv_object(dmac); 699 if (ret) 700 return ret; 701 702 return 0; 703 } 704 705 struct nv50_disp_chan_impl 706 nv50_disp_base_ofuncs = { 707 .base.ctor = nv50_disp_base_ctor, 708 .base.dtor = nv50_disp_dmac_dtor, 709 .base.init = nv50_disp_dmac_init, 710 .base.fini = nv50_disp_dmac_fini, 711 .base.ntfy = nv50_disp_chan_ntfy, 712 .base.map = nv50_disp_chan_map, 713 .base.rd32 = nv50_disp_chan_rd32, 714 .base.wr32 = nv50_disp_chan_wr32, 715 .chid = 1, 716 .attach = nv50_disp_dmac_object_attach, 717 .detach = nv50_disp_dmac_object_detach, 718 }; 719 720 /******************************************************************************* 721 * EVO overlay channel objects 722 ******************************************************************************/ 723 724 const struct nv50_disp_mthd_list 725 nv50_disp_ovly_mthd_base = { 726 .mthd = 0x0000, 727 .addr = 0x000000, 728 .data = { 729 { 0x0080, 0x000000 }, 730 { 0x0084, 0x0009a0 }, 731 { 0x0088, 0x0009c0 }, 732 { 0x008c, 0x0009c8 }, 733 { 0x0090, 0x6109b4 }, 734 { 0x0094, 0x610970 }, 735 { 0x00a0, 0x610998 }, 736 { 0x00a4, 0x610964 }, 737 { 0x00c0, 0x610958 }, 738 { 0x00e0, 0x6109a8 }, 739 { 0x00e4, 0x6109d0 }, 740 { 0x00e8, 0x6109d8 }, 741 { 0x0100, 0x61094c }, 742 { 0x0104, 0x610984 }, 743 { 0x0108, 0x61098c }, 744 { 0x0800, 0x6109f8 }, 745 { 0x0808, 0x610a08 }, 746 { 0x080c, 0x610a10 }, 747 { 0x0810, 0x610a00 }, 748 {} 749 } 750 }; 751 752 static const struct nv50_disp_mthd_chan 753 nv50_disp_ovly_mthd_chan = { 754 .name = "Overlay", 755 .addr = 0x000540, 756 .data = { 757 { "Global", 1, &nv50_disp_ovly_mthd_base }, 758 {} 759 } 760 }; 761 762 int 763 nv50_disp_ovly_ctor(struct nvkm_object *parent, 764 struct nvkm_object *engine, 765 struct nvkm_oclass *oclass, void *data, u32 size, 766 struct nvkm_object **pobject) 767 { 768 union { 769 struct nv50_disp_overlay_channel_dma_v0 v0; 770 } *args = data; 771 struct nv50_disp *disp = (void *)engine; 772 struct nv50_disp_dmac *dmac; 773 int ret; 774 775 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size); 776 if (nvif_unpack(args->v0, 0, 0, false)) { 777 nvif_ioctl(parent, "create disp overlay channel dma vers %d " 778 "pushbuf %016llx head %d\n", 779 args->v0.version, args->v0.pushbuf, args->v0.head); 780 if (args->v0.head > disp->head.nr) 781 return -EINVAL; 782 } else 783 return ret; 784 785 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 786 args->v0.head, sizeof(*dmac), 787 (void **)&dmac); 788 *pobject = nv_object(dmac); 789 if (ret) 790 return ret; 791 792 return 0; 793 } 794 795 struct nv50_disp_chan_impl 796 nv50_disp_ovly_ofuncs = { 797 .base.ctor = nv50_disp_ovly_ctor, 798 .base.dtor = nv50_disp_dmac_dtor, 799 .base.init = nv50_disp_dmac_init, 800 .base.fini = nv50_disp_dmac_fini, 801 .base.ntfy = nv50_disp_chan_ntfy, 802 .base.map = nv50_disp_chan_map, 803 .base.rd32 = nv50_disp_chan_rd32, 804 .base.wr32 = nv50_disp_chan_wr32, 805 .chid = 3, 806 .attach = nv50_disp_dmac_object_attach, 807 .detach = nv50_disp_dmac_object_detach, 808 }; 809 810 /******************************************************************************* 811 * EVO PIO channel base class 812 ******************************************************************************/ 813 814 static int 815 nv50_disp_pioc_create_(struct nvkm_object *parent, 816 struct nvkm_object *engine, 817 struct nvkm_oclass *oclass, int head, 818 int length, void **pobject) 819 { 820 return nv50_disp_chan_create_(parent, engine, oclass, head, 821 length, pobject); 822 } 823 824 void 825 nv50_disp_pioc_dtor(struct nvkm_object *object) 826 { 827 struct nv50_disp_pioc *pioc = (void *)object; 828 nv50_disp_chan_destroy(&pioc->base); 829 } 830 831 static int 832 nv50_disp_pioc_init(struct nvkm_object *object) 833 { 834 struct nv50_disp *disp = (void *)object->engine; 835 struct nv50_disp_pioc *pioc = (void *)object; 836 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 837 struct nvkm_device *device = subdev->device; 838 int chid = pioc->base.chid; 839 int ret; 840 841 ret = nv50_disp_chan_init(&pioc->base); 842 if (ret) 843 return ret; 844 845 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); 846 if (nvkm_msec(device, 2000, 847 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 848 break; 849 ) < 0) { 850 nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, 851 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 852 return -EBUSY; 853 } 854 855 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); 856 if (nvkm_msec(device, 2000, 857 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); 858 if ((tmp & 0x00030000) == 0x00010000) 859 break; 860 ) < 0) { 861 nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, 862 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 863 return -EBUSY; 864 } 865 866 return 0; 867 } 868 869 static int 870 nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend) 871 { 872 struct nv50_disp *disp = (void *)object->engine; 873 struct nv50_disp_pioc *pioc = (void *)object; 874 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 875 struct nvkm_device *device = subdev->device; 876 int chid = pioc->base.chid; 877 878 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); 879 if (nvkm_msec(device, 2000, 880 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 881 break; 882 ) < 0) { 883 nvkm_error(subdev, "ch %d timeout: %08x\n", chid, 884 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 885 if (suspend) 886 return -EBUSY; 887 } 888 889 return nv50_disp_chan_fini(&pioc->base, suspend); 890 } 891 892 /******************************************************************************* 893 * EVO immediate overlay channel objects 894 ******************************************************************************/ 895 896 int 897 nv50_disp_oimm_ctor(struct nvkm_object *parent, 898 struct nvkm_object *engine, 899 struct nvkm_oclass *oclass, void *data, u32 size, 900 struct nvkm_object **pobject) 901 { 902 union { 903 struct nv50_disp_overlay_v0 v0; 904 } *args = data; 905 struct nv50_disp *disp = (void *)engine; 906 struct nv50_disp_pioc *pioc; 907 int ret; 908 909 nvif_ioctl(parent, "create disp overlay size %d\n", size); 910 if (nvif_unpack(args->v0, 0, 0, false)) { 911 nvif_ioctl(parent, "create disp overlay vers %d head %d\n", 912 args->v0.version, args->v0.head); 913 if (args->v0.head > disp->head.nr) 914 return -EINVAL; 915 } else 916 return ret; 917 918 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 919 sizeof(*pioc), (void **)&pioc); 920 *pobject = nv_object(pioc); 921 if (ret) 922 return ret; 923 924 return 0; 925 } 926 927 struct nv50_disp_chan_impl 928 nv50_disp_oimm_ofuncs = { 929 .base.ctor = nv50_disp_oimm_ctor, 930 .base.dtor = nv50_disp_pioc_dtor, 931 .base.init = nv50_disp_pioc_init, 932 .base.fini = nv50_disp_pioc_fini, 933 .base.ntfy = nv50_disp_chan_ntfy, 934 .base.map = nv50_disp_chan_map, 935 .base.rd32 = nv50_disp_chan_rd32, 936 .base.wr32 = nv50_disp_chan_wr32, 937 .chid = 5, 938 }; 939 940 /******************************************************************************* 941 * EVO cursor channel objects 942 ******************************************************************************/ 943 944 int 945 nv50_disp_curs_ctor(struct nvkm_object *parent, 946 struct nvkm_object *engine, 947 struct nvkm_oclass *oclass, void *data, u32 size, 948 struct nvkm_object **pobject) 949 { 950 union { 951 struct nv50_disp_cursor_v0 v0; 952 } *args = data; 953 struct nv50_disp *disp = (void *)engine; 954 struct nv50_disp_pioc *pioc; 955 int ret; 956 957 nvif_ioctl(parent, "create disp cursor size %d\n", size); 958 if (nvif_unpack(args->v0, 0, 0, false)) { 959 nvif_ioctl(parent, "create disp cursor vers %d head %d\n", 960 args->v0.version, args->v0.head); 961 if (args->v0.head > disp->head.nr) 962 return -EINVAL; 963 } else 964 return ret; 965 966 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 967 sizeof(*pioc), (void **)&pioc); 968 *pobject = nv_object(pioc); 969 if (ret) 970 return ret; 971 972 return 0; 973 } 974 975 struct nv50_disp_chan_impl 976 nv50_disp_curs_ofuncs = { 977 .base.ctor = nv50_disp_curs_ctor, 978 .base.dtor = nv50_disp_pioc_dtor, 979 .base.init = nv50_disp_pioc_init, 980 .base.fini = nv50_disp_pioc_fini, 981 .base.ntfy = nv50_disp_chan_ntfy, 982 .base.map = nv50_disp_chan_map, 983 .base.rd32 = nv50_disp_chan_rd32, 984 .base.wr32 = nv50_disp_chan_wr32, 985 .chid = 7, 986 }; 987 988 /******************************************************************************* 989 * Base display object 990 ******************************************************************************/ 991 992 int 993 nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0) 994 { 995 struct nvkm_device *device = disp->base.engine.subdev.device; 996 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540)); 997 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540)); 998 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540)); 999 union { 1000 struct nv04_disp_scanoutpos_v0 v0; 1001 } *args = data; 1002 int ret; 1003 1004 nvif_ioctl(object, "disp scanoutpos size %d\n", size); 1005 if (nvif_unpack(args->v0, 0, 0, false)) { 1006 nvif_ioctl(object, "disp scanoutpos vers %d\n", 1007 args->v0.version); 1008 args->v0.vblanke = (blanke & 0xffff0000) >> 16; 1009 args->v0.hblanke = (blanke & 0x0000ffff); 1010 args->v0.vblanks = (blanks & 0xffff0000) >> 16; 1011 args->v0.hblanks = (blanks & 0x0000ffff); 1012 args->v0.vtotal = ( total & 0xffff0000) >> 16; 1013 args->v0.htotal = ( total & 0x0000ffff); 1014 args->v0.time[0] = ktime_to_ns(ktime_get()); 1015 args->v0.vline = /* vline read locks hline */ 1016 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; 1017 args->v0.time[1] = ktime_to_ns(ktime_get()); 1018 args->v0.hline = 1019 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; 1020 } else 1021 return ret; 1022 1023 return 0; 1024 } 1025 1026 int 1027 nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 1028 { 1029 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine); 1030 union { 1031 struct nv50_disp_mthd_v0 v0; 1032 struct nv50_disp_mthd_v1 v1; 1033 } *args = data; 1034 struct nv50_disp *disp = (void *)object->engine; 1035 struct nvkm_output *outp = NULL; 1036 struct nvkm_output *temp; 1037 u16 type, mask = 0; 1038 int head, ret; 1039 1040 if (mthd != NV50_DISP_MTHD) 1041 return -EINVAL; 1042 1043 nvif_ioctl(object, "disp mthd size %d\n", size); 1044 if (nvif_unpack(args->v0, 0, 0, true)) { 1045 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", 1046 args->v0.version, args->v0.method, args->v0.head); 1047 mthd = args->v0.method; 1048 head = args->v0.head; 1049 } else 1050 if (nvif_unpack(args->v1, 1, 1, true)) { 1051 nvif_ioctl(object, "disp mthd vers %d mthd %02x " 1052 "type %04x mask %04x\n", 1053 args->v1.version, args->v1.method, 1054 args->v1.hasht, args->v1.hashm); 1055 mthd = args->v1.method; 1056 type = args->v1.hasht; 1057 mask = args->v1.hashm; 1058 head = ffs((mask >> 8) & 0x0f) - 1; 1059 } else 1060 return ret; 1061 1062 if (head < 0 || head >= disp->head.nr) 1063 return -ENXIO; 1064 1065 if (mask) { 1066 list_for_each_entry(temp, &disp->base.outp, head) { 1067 if ((temp->info.hasht == type) && 1068 (temp->info.hashm & mask) == mask) { 1069 outp = temp; 1070 break; 1071 } 1072 } 1073 if (outp == NULL) 1074 return -ENXIO; 1075 } 1076 1077 switch (mthd) { 1078 case NV50_DISP_SCANOUTPOS: 1079 return impl->head.scanoutpos(object, disp, data, size, head); 1080 default: 1081 break; 1082 } 1083 1084 switch (mthd * !!outp) { 1085 case NV50_DISP_MTHD_V1_DAC_PWR: 1086 return disp->dac.power(object, disp, data, size, head, outp); 1087 case NV50_DISP_MTHD_V1_DAC_LOAD: 1088 return disp->dac.sense(object, disp, data, size, head, outp); 1089 case NV50_DISP_MTHD_V1_SOR_PWR: 1090 return disp->sor.power(object, disp, data, size, head, outp); 1091 case NV50_DISP_MTHD_V1_SOR_HDA_ELD: 1092 if (!disp->sor.hda_eld) 1093 return -ENODEV; 1094 return disp->sor.hda_eld(object, disp, data, size, head, outp); 1095 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR: 1096 if (!disp->sor.hdmi) 1097 return -ENODEV; 1098 return disp->sor.hdmi(object, disp, data, size, head, outp); 1099 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: { 1100 union { 1101 struct nv50_disp_sor_lvds_script_v0 v0; 1102 } *args = data; 1103 nvif_ioctl(object, "disp sor lvds script size %d\n", size); 1104 if (nvif_unpack(args->v0, 0, 0, false)) { 1105 nvif_ioctl(object, "disp sor lvds script " 1106 "vers %d name %04x\n", 1107 args->v0.version, args->v0.script); 1108 disp->sor.lvdsconf = args->v0.script; 1109 return 0; 1110 } else 1111 return ret; 1112 } 1113 break; 1114 case NV50_DISP_MTHD_V1_SOR_DP_PWR: { 1115 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp); 1116 union { 1117 struct nv50_disp_sor_dp_pwr_v0 v0; 1118 } *args = data; 1119 nvif_ioctl(object, "disp sor dp pwr size %d\n", size); 1120 if (nvif_unpack(args->v0, 0, 0, false)) { 1121 nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n", 1122 args->v0.version, args->v0.state); 1123 if (args->v0.state == 0) { 1124 nvkm_notify_put(&outpdp->irq); 1125 outpdp->func->lnk_pwr(outpdp, 0); 1126 atomic_set(&outpdp->lt.done, 0); 1127 return 0; 1128 } else 1129 if (args->v0.state != 0) { 1130 nvkm_output_dp_train(&outpdp->base, 0, true); 1131 return 0; 1132 } 1133 } else 1134 return ret; 1135 } 1136 break; 1137 case NV50_DISP_MTHD_V1_PIOR_PWR: 1138 if (!disp->pior.power) 1139 return -ENODEV; 1140 return disp->pior.power(object, disp, data, size, head, outp); 1141 default: 1142 break; 1143 } 1144 1145 return -EINVAL; 1146 } 1147 1148 int 1149 nv50_disp_main_ctor(struct nvkm_object *parent, 1150 struct nvkm_object *engine, 1151 struct nvkm_oclass *oclass, void *data, u32 size, 1152 struct nvkm_object **pobject) 1153 { 1154 struct nv50_disp *disp = (void *)engine; 1155 struct nv50_disp_base *base; 1156 struct nvkm_device *device = disp->base.engine.subdev.device; 1157 struct nvkm_gpuobj *instmem = (void *)parent; 1158 int ret; 1159 1160 ret = nvkm_parent_create(parent, engine, oclass, 0, 1161 disp->sclass, 0, &base); 1162 *pobject = nv_object(base); 1163 if (ret) 1164 return ret; 1165 1166 return nvkm_ramht_new(device, 0x1000, 0, instmem, &base->ramht); 1167 } 1168 1169 void 1170 nv50_disp_main_dtor(struct nvkm_object *object) 1171 { 1172 struct nv50_disp_base *base = (void *)object; 1173 nvkm_ramht_del(&base->ramht); 1174 nvkm_parent_destroy(&base->base); 1175 } 1176 1177 static int 1178 nv50_disp_main_init(struct nvkm_object *object) 1179 { 1180 struct nv50_disp *disp = (void *)object->engine; 1181 struct nv50_disp_base *base = (void *)object; 1182 struct nvkm_device *device = disp->base.engine.subdev.device; 1183 int ret, i; 1184 u32 tmp; 1185 1186 ret = nvkm_parent_init(&base->base); 1187 if (ret) 1188 return ret; 1189 1190 /* The below segments of code copying values from one register to 1191 * another appear to inform EVO of the display capabilities or 1192 * something similar. NFI what the 0x614004 caps are for.. 1193 */ 1194 tmp = nvkm_rd32(device, 0x614004); 1195 nvkm_wr32(device, 0x610184, tmp); 1196 1197 /* ... CRTC caps */ 1198 for (i = 0; i < disp->head.nr; i++) { 1199 tmp = nvkm_rd32(device, 0x616100 + (i * 0x800)); 1200 nvkm_wr32(device, 0x610190 + (i * 0x10), tmp); 1201 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800)); 1202 nvkm_wr32(device, 0x610194 + (i * 0x10), tmp); 1203 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800)); 1204 nvkm_wr32(device, 0x610198 + (i * 0x10), tmp); 1205 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800)); 1206 nvkm_wr32(device, 0x61019c + (i * 0x10), tmp); 1207 } 1208 1209 /* ... DAC caps */ 1210 for (i = 0; i < disp->dac.nr; i++) { 1211 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800)); 1212 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp); 1213 } 1214 1215 /* ... SOR caps */ 1216 for (i = 0; i < disp->sor.nr; i++) { 1217 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800)); 1218 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp); 1219 } 1220 1221 /* ... PIOR caps */ 1222 for (i = 0; i < disp->pior.nr; i++) { 1223 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800)); 1224 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp); 1225 } 1226 1227 /* steal display away from vbios, or something like that */ 1228 if (nvkm_rd32(device, 0x610024) & 0x00000100) { 1229 nvkm_wr32(device, 0x610024, 0x00000100); 1230 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000); 1231 if (nvkm_msec(device, 2000, 1232 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002)) 1233 break; 1234 ) < 0) 1235 return -EBUSY; 1236 } 1237 1238 /* point at display engine memory area (hash table, objects) */ 1239 nvkm_wr32(device, 0x610010, (base->ramht->gpuobj->addr >> 8) | 9); 1240 1241 /* enable supervisor interrupts, disable everything else */ 1242 nvkm_wr32(device, 0x61002c, 0x00000370); 1243 nvkm_wr32(device, 0x610028, 0x00000000); 1244 return 0; 1245 } 1246 1247 static int 1248 nv50_disp_main_fini(struct nvkm_object *object, bool suspend) 1249 { 1250 struct nv50_disp *disp = (void *)object->engine; 1251 struct nv50_disp_base *base = (void *)object; 1252 struct nvkm_device *device = disp->base.engine.subdev.device; 1253 1254 /* disable all interrupts */ 1255 nvkm_wr32(device, 0x610024, 0x00000000); 1256 nvkm_wr32(device, 0x610020, 0x00000000); 1257 1258 return nvkm_parent_fini(&base->base, suspend); 1259 } 1260 1261 struct nvkm_ofuncs 1262 nv50_disp_main_ofuncs = { 1263 .ctor = nv50_disp_main_ctor, 1264 .dtor = nv50_disp_main_dtor, 1265 .init = nv50_disp_main_init, 1266 .fini = nv50_disp_main_fini, 1267 .mthd = nv50_disp_main_mthd, 1268 .ntfy = nvkm_disp_ntfy, 1269 }; 1270 1271 static struct nvkm_oclass 1272 nv50_disp_main_oclass[] = { 1273 { NV50_DISP, &nv50_disp_main_ofuncs }, 1274 {} 1275 }; 1276 1277 static struct nvkm_oclass 1278 nv50_disp_sclass[] = { 1279 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base }, 1280 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base }, 1281 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 1282 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 1283 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 1284 {} 1285 }; 1286 1287 /******************************************************************************* 1288 * Display context, tracks instmem allocation and prevents more than one 1289 * client using the display hardware at any time. 1290 ******************************************************************************/ 1291 1292 static int 1293 nv50_disp_data_ctor(struct nvkm_object *parent, 1294 struct nvkm_object *engine, 1295 struct nvkm_oclass *oclass, void *data, u32 size, 1296 struct nvkm_object **pobject) 1297 { 1298 struct nv50_disp *disp = (void *)engine; 1299 struct nvkm_gpuobj *gpuobj; 1300 int ret; 1301 1302 /* no context needed for channel objects... */ 1303 if (nv_mclass(parent) != NV_DEVICE) { 1304 atomic_inc(&parent->refcount); 1305 *pobject = parent; 1306 return 1; 1307 } 1308 1309 /* allocate display hardware to client */ 1310 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, NULL, 1311 0x10000, 0x10000, NVOBJ_FLAG_HEAP, 1312 &gpuobj); 1313 *pobject = nv_object(gpuobj); 1314 mutex_lock(&nv_subdev(disp)->mutex); 1315 if (!list_empty(&nv_engine(disp)->contexts)) 1316 ret = -EBUSY; 1317 mutex_unlock(&nv_subdev(disp)->mutex); 1318 return ret; 1319 } 1320 1321 struct nvkm_oclass 1322 nv50_disp_cclass = { 1323 .ofuncs = &(struct nvkm_ofuncs) { 1324 .ctor = nv50_disp_data_ctor, 1325 .dtor = _nvkm_gpuobj_dtor, 1326 .init = _nvkm_gpuobj_init, 1327 .fini = _nvkm_gpuobj_fini, 1328 .rd32 = _nvkm_gpuobj_rd32, 1329 .wr32 = _nvkm_gpuobj_wr32, 1330 }, 1331 }; 1332 1333 /******************************************************************************* 1334 * Display engine implementation 1335 ******************************************************************************/ 1336 1337 static void 1338 nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head) 1339 { 1340 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1341 struct nvkm_device *device = disp->engine.subdev.device; 1342 nvkm_mask(device, 0x61002c, (4 << head), 0); 1343 } 1344 1345 static void 1346 nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) 1347 { 1348 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1349 struct nvkm_device *device = disp->engine.subdev.device; 1350 nvkm_mask(device, 0x61002c, (4 << head), (4 << head)); 1351 } 1352 1353 const struct nvkm_event_func 1354 nv50_disp_vblank_func = { 1355 .ctor = nvkm_disp_vblank_ctor, 1356 .init = nv50_disp_vblank_init, 1357 .fini = nv50_disp_vblank_fini, 1358 }; 1359 1360 static const struct nvkm_enum 1361 nv50_disp_intr_error_type[] = { 1362 { 3, "ILLEGAL_MTHD" }, 1363 { 4, "INVALID_VALUE" }, 1364 { 5, "INVALID_STATE" }, 1365 { 7, "INVALID_HANDLE" }, 1366 {} 1367 }; 1368 1369 static const struct nvkm_enum 1370 nv50_disp_intr_error_code[] = { 1371 { 0x00, "" }, 1372 {} 1373 }; 1374 1375 static void 1376 nv50_disp_intr_error(struct nv50_disp *disp, int chid) 1377 { 1378 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1379 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1380 struct nvkm_device *device = subdev->device; 1381 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08)); 1382 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08)); 1383 u32 code = (addr & 0x00ff0000) >> 16; 1384 u32 type = (addr & 0x00007000) >> 12; 1385 u32 mthd = (addr & 0x00000ffc); 1386 const struct nvkm_enum *ec, *et; 1387 1388 et = nvkm_enum_find(nv50_disp_intr_error_type, type); 1389 ec = nvkm_enum_find(nv50_disp_intr_error_code, code); 1390 1391 nvkm_error(subdev, 1392 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n", 1393 type, et ? et->name : "", code, ec ? ec->name : "", 1394 chid, mthd, data); 1395 1396 if (chid == 0) { 1397 switch (mthd) { 1398 case 0x0080: 1399 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 0, 1400 impl->mthd.core); 1401 break; 1402 default: 1403 break; 1404 } 1405 } else 1406 if (chid <= 2) { 1407 switch (mthd) { 1408 case 0x0080: 1409 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 1, 1410 impl->mthd.base); 1411 break; 1412 default: 1413 break; 1414 } 1415 } else 1416 if (chid <= 4) { 1417 switch (mthd) { 1418 case 0x0080: 1419 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 3, 1420 impl->mthd.ovly); 1421 break; 1422 default: 1423 break; 1424 } 1425 } 1426 1427 nvkm_wr32(device, 0x610020, 0x00010000 << chid); 1428 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000); 1429 } 1430 1431 static struct nvkm_output * 1432 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, 1433 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 1434 struct nvbios_outp *info) 1435 { 1436 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1437 struct nvkm_bios *bios = subdev->device->bios; 1438 struct nvkm_output *outp; 1439 u16 mask, type; 1440 1441 if (or < 4) { 1442 type = DCB_OUTPUT_ANALOG; 1443 mask = 0; 1444 } else 1445 if (or < 8) { 1446 switch (ctrl & 0x00000f00) { 1447 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; 1448 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; 1449 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; 1450 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; 1451 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; 1452 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 1453 default: 1454 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl); 1455 return NULL; 1456 } 1457 or -= 4; 1458 } else { 1459 or = or - 8; 1460 type = 0x0010; 1461 mask = 0; 1462 switch (ctrl & 0x00000f00) { 1463 case 0x00000000: type |= disp->pior.type[or]; break; 1464 default: 1465 nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl); 1466 return NULL; 1467 } 1468 } 1469 1470 mask = 0x00c0 & (mask << 6); 1471 mask |= 0x0001 << or; 1472 mask |= 0x0100 << head; 1473 1474 list_for_each_entry(outp, &disp->base.outp, head) { 1475 if ((outp->info.hasht & 0xff) == type && 1476 (outp->info.hashm & mask) == mask) { 1477 *data = nvbios_outp_match(bios, outp->info.hasht, 1478 outp->info.hashm, 1479 ver, hdr, cnt, len, info); 1480 if (!*data) 1481 return NULL; 1482 return outp; 1483 } 1484 } 1485 1486 return NULL; 1487 } 1488 1489 static struct nvkm_output * 1490 exec_script(struct nv50_disp *disp, int head, int id) 1491 { 1492 struct nvkm_device *device = disp->base.engine.subdev.device; 1493 struct nvkm_bios *bios = device->bios; 1494 struct nvkm_output *outp; 1495 struct nvbios_outp info; 1496 u8 ver, hdr, cnt, len; 1497 u32 data, ctrl = 0; 1498 u32 reg; 1499 int i; 1500 1501 /* DAC */ 1502 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1503 ctrl = nvkm_rd32(device, 0x610b5c + (i * 8)); 1504 1505 /* SOR */ 1506 if (!(ctrl & (1 << head))) { 1507 if (nv_device(disp)->chipset < 0x90 || 1508 nv_device(disp)->chipset == 0x92 || 1509 nv_device(disp)->chipset == 0xa0) { 1510 reg = 0x610b74; 1511 } else { 1512 reg = 0x610798; 1513 } 1514 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1515 ctrl = nvkm_rd32(device, reg + (i * 8)); 1516 i += 4; 1517 } 1518 1519 /* PIOR */ 1520 if (!(ctrl & (1 << head))) { 1521 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1522 ctrl = nvkm_rd32(device, 0x610b84 + (i * 8)); 1523 i += 8; 1524 } 1525 1526 if (!(ctrl & (1 << head))) 1527 return NULL; 1528 i--; 1529 1530 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info); 1531 if (outp) { 1532 struct nvbios_init init = { 1533 .subdev = nv_subdev(disp), 1534 .bios = bios, 1535 .offset = info.script[id], 1536 .outp = &outp->info, 1537 .crtc = head, 1538 .execute = 1, 1539 }; 1540 1541 nvbios_exec(&init); 1542 } 1543 1544 return outp; 1545 } 1546 1547 static struct nvkm_output * 1548 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) 1549 { 1550 struct nvkm_device *device = disp->base.engine.subdev.device; 1551 struct nvkm_bios *bios = device->bios; 1552 struct nvkm_output *outp; 1553 struct nvbios_outp info1; 1554 struct nvbios_ocfg info2; 1555 u8 ver, hdr, cnt, len; 1556 u32 data, ctrl = 0; 1557 u32 reg; 1558 int i; 1559 1560 /* DAC */ 1561 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1562 ctrl = nvkm_rd32(device, 0x610b58 + (i * 8)); 1563 1564 /* SOR */ 1565 if (!(ctrl & (1 << head))) { 1566 if (nv_device(disp)->chipset < 0x90 || 1567 nv_device(disp)->chipset == 0x92 || 1568 nv_device(disp)->chipset == 0xa0) { 1569 reg = 0x610b70; 1570 } else { 1571 reg = 0x610794; 1572 } 1573 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1574 ctrl = nvkm_rd32(device, reg + (i * 8)); 1575 i += 4; 1576 } 1577 1578 /* PIOR */ 1579 if (!(ctrl & (1 << head))) { 1580 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1581 ctrl = nvkm_rd32(device, 0x610b80 + (i * 8)); 1582 i += 8; 1583 } 1584 1585 if (!(ctrl & (1 << head))) 1586 return NULL; 1587 i--; 1588 1589 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); 1590 if (!outp) 1591 return NULL; 1592 1593 if (outp->info.location == 0) { 1594 switch (outp->info.type) { 1595 case DCB_OUTPUT_TMDS: 1596 *conf = (ctrl & 0x00000f00) >> 8; 1597 if (pclk >= 165000) 1598 *conf |= 0x0100; 1599 break; 1600 case DCB_OUTPUT_LVDS: 1601 *conf = disp->sor.lvdsconf; 1602 break; 1603 case DCB_OUTPUT_DP: 1604 *conf = (ctrl & 0x00000f00) >> 8; 1605 break; 1606 case DCB_OUTPUT_ANALOG: 1607 default: 1608 *conf = 0x00ff; 1609 break; 1610 } 1611 } else { 1612 *conf = (ctrl & 0x00000f00) >> 8; 1613 pclk = pclk / 2; 1614 } 1615 1616 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); 1617 if (data && id < 0xff) { 1618 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 1619 if (data) { 1620 struct nvbios_init init = { 1621 .subdev = nv_subdev(disp), 1622 .bios = bios, 1623 .offset = data, 1624 .outp = &outp->info, 1625 .crtc = head, 1626 .execute = 1, 1627 }; 1628 1629 nvbios_exec(&init); 1630 } 1631 } 1632 1633 return outp; 1634 } 1635 1636 static void 1637 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) 1638 { 1639 exec_script(disp, head, 1); 1640 } 1641 1642 static void 1643 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head) 1644 { 1645 struct nvkm_output *outp = exec_script(disp, head, 2); 1646 1647 /* the binary driver does this outside of the supervisor handling 1648 * (after the third supervisor from a detach). we (currently?) 1649 * allow both detach/attach to happen in the same set of 1650 * supervisor interrupts, so it would make sense to execute this 1651 * (full power down?) script after all the detach phases of the 1652 * supervisor handling. like with training if needed from the 1653 * second supervisor, nvidia doesn't do this, so who knows if it's 1654 * entirely safe, but it does appear to work.. 1655 * 1656 * without this script being run, on some configurations i've 1657 * seen, switching from DP to TMDS on a DP connector may result 1658 * in a blank screen (SOR_PWR off/on can restore it) 1659 */ 1660 if (outp && outp->info.type == DCB_OUTPUT_DP) { 1661 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp); 1662 struct nvbios_init init = { 1663 .subdev = nv_subdev(disp), 1664 .bios = nvkm_bios(disp), 1665 .outp = &outp->info, 1666 .crtc = head, 1667 .offset = outpdp->info.script[4], 1668 .execute = 1, 1669 }; 1670 1671 nvbios_exec(&init); 1672 atomic_set(&outpdp->lt.done, 0); 1673 } 1674 } 1675 1676 static void 1677 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head) 1678 { 1679 struct nvkm_device *device = disp->base.engine.subdev.device; 1680 struct nvkm_devinit *devinit = device->devinit; 1681 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1682 if (pclk) 1683 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk); 1684 } 1685 1686 static void 1687 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head, 1688 struct dcb_output *outp, u32 pclk) 1689 { 1690 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1691 struct nvkm_device *device = subdev->device; 1692 const int link = !(outp->sorconf.link & 1); 1693 const int or = ffs(outp->or) - 1; 1694 const u32 soff = ( or * 0x800); 1695 const u32 loff = (link * 0x080) + soff; 1696 const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8)); 1697 const u32 symbol = 100000; 1698 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff; 1699 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff; 1700 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff; 1701 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff); 1702 u32 clksor = nvkm_rd32(device, 0x614300 + soff); 1703 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 1704 int TU, VTUi, VTUf, VTUa; 1705 u64 link_data_rate, link_ratio, unk; 1706 u32 best_diff = 64 * symbol; 1707 u32 link_nr, link_bw, bits; 1708 u64 value; 1709 1710 link_bw = (clksor & 0x000c0000) ? 270000 : 162000; 1711 link_nr = hweight32(dpctrl & 0x000f0000); 1712 1713 /* symbols/hblank - algorithm taken from comments in tegra driver */ 1714 value = vblanke + vactive - vblanks - 7; 1715 value = value * link_bw; 1716 do_div(value, pclk); 1717 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr); 1718 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value); 1719 1720 /* symbols/vblank - algorithm taken from comments in tegra driver */ 1721 value = vblanks - vblanke - 25; 1722 value = value * link_bw; 1723 do_div(value, pclk); 1724 value = value - ((36 / link_nr) + 3) - 1; 1725 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value); 1726 1727 /* watermark / activesym */ 1728 if ((ctrl & 0xf0000) == 0x60000) bits = 30; 1729 else if ((ctrl & 0xf0000) == 0x50000) bits = 24; 1730 else bits = 18; 1731 1732 link_data_rate = (pclk * bits / 8) / link_nr; 1733 1734 /* calculate ratio of packed data rate to link symbol rate */ 1735 link_ratio = link_data_rate * symbol; 1736 do_div(link_ratio, link_bw); 1737 1738 for (TU = 64; TU >= 32; TU--) { 1739 /* calculate average number of valid symbols in each TU */ 1740 u32 tu_valid = link_ratio * TU; 1741 u32 calc, diff; 1742 1743 /* find a hw representation for the fraction.. */ 1744 VTUi = tu_valid / symbol; 1745 calc = VTUi * symbol; 1746 diff = tu_valid - calc; 1747 if (diff) { 1748 if (diff >= (symbol / 2)) { 1749 VTUf = symbol / (symbol - diff); 1750 if (symbol - (VTUf * diff)) 1751 VTUf++; 1752 1753 if (VTUf <= 15) { 1754 VTUa = 1; 1755 calc += symbol - (symbol / VTUf); 1756 } else { 1757 VTUa = 0; 1758 VTUf = 1; 1759 calc += symbol; 1760 } 1761 } else { 1762 VTUa = 0; 1763 VTUf = min((int)(symbol / diff), 15); 1764 calc += symbol / VTUf; 1765 } 1766 1767 diff = calc - tu_valid; 1768 } else { 1769 /* no remainder, but the hw doesn't like the fractional 1770 * part to be zero. decrement the integer part and 1771 * have the fraction add a whole symbol back 1772 */ 1773 VTUa = 0; 1774 VTUf = 1; 1775 VTUi--; 1776 } 1777 1778 if (diff < best_diff) { 1779 best_diff = diff; 1780 bestTU = TU; 1781 bestVTUa = VTUa; 1782 bestVTUf = VTUf; 1783 bestVTUi = VTUi; 1784 if (diff == 0) 1785 break; 1786 } 1787 } 1788 1789 if (!bestTU) { 1790 nvkm_error(subdev, "unable to find suitable dp config\n"); 1791 return; 1792 } 1793 1794 /* XXX close to vbios numbers, but not right */ 1795 unk = (symbol - link_ratio) * bestTU; 1796 unk *= link_ratio; 1797 do_div(unk, symbol); 1798 do_div(unk, symbol); 1799 unk += 6; 1800 1801 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2); 1802 nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | 1803 bestVTUf << 16 | 1804 bestVTUi << 8 | unk); 1805 } 1806 1807 static void 1808 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head) 1809 { 1810 struct nvkm_device *device = disp->base.engine.subdev.device; 1811 struct nvkm_output *outp; 1812 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1813 u32 hval, hreg = 0x614200 + (head * 0x800); 1814 u32 oval, oreg; 1815 u32 mask, conf; 1816 1817 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf); 1818 if (!outp) 1819 return; 1820 1821 /* we allow both encoder attach and detach operations to occur 1822 * within a single supervisor (ie. modeset) sequence. the 1823 * encoder detach scripts quite often switch off power to the 1824 * lanes, which requires the link to be re-trained. 1825 * 1826 * this is not generally an issue as the sink "must" (heh) 1827 * signal an irq when it's lost sync so the driver can 1828 * re-train. 1829 * 1830 * however, on some boards, if one does not configure at least 1831 * the gpu side of the link *before* attaching, then various 1832 * things can go horribly wrong (PDISP disappearing from mmio, 1833 * third supervisor never happens, etc). 1834 * 1835 * the solution is simply to retrain here, if necessary. last 1836 * i checked, the binary driver userspace does not appear to 1837 * trigger this situation (it forces an UPDATE between steps). 1838 */ 1839 if (outp->info.type == DCB_OUTPUT_DP) { 1840 u32 soff = (ffs(outp->info.or) - 1) * 0x08; 1841 u32 ctrl, datarate; 1842 1843 if (outp->info.location == 0) { 1844 ctrl = nvkm_rd32(device, 0x610794 + soff); 1845 soff = 1; 1846 } else { 1847 ctrl = nvkm_rd32(device, 0x610b80 + soff); 1848 soff = 2; 1849 } 1850 1851 switch ((ctrl & 0x000f0000) >> 16) { 1852 case 6: datarate = pclk * 30; break; 1853 case 5: datarate = pclk * 24; break; 1854 case 2: 1855 default: 1856 datarate = pclk * 18; 1857 break; 1858 } 1859 1860 if (nvkm_output_dp_train(outp, datarate / soff, true)) 1861 OUTP_ERR(outp, "link not trained before attach"); 1862 } 1863 1864 exec_clkcmp(disp, head, 0, pclk, &conf); 1865 1866 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) { 1867 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800; 1868 oval = 0x00000000; 1869 hval = 0x00000000; 1870 mask = 0xffffffff; 1871 } else 1872 if (!outp->info.location) { 1873 if (outp->info.type == DCB_OUTPUT_DP) 1874 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk); 1875 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800; 1876 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000; 1877 hval = 0x00000000; 1878 mask = 0x00000707; 1879 } else { 1880 oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800; 1881 oval = 0x00000001; 1882 hval = 0x00000001; 1883 mask = 0x00000707; 1884 } 1885 1886 nvkm_mask(device, hreg, 0x0000000f, hval); 1887 nvkm_mask(device, oreg, mask, oval); 1888 } 1889 1890 /* If programming a TMDS output on a SOR that can also be configured for 1891 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. 1892 * 1893 * It looks like the VBIOS TMDS scripts make an attempt at this, however, 1894 * the VBIOS scripts on at least one board I have only switch it off on 1895 * link 0, causing a blank display if the output has previously been 1896 * programmed for DisplayPort. 1897 */ 1898 static void 1899 nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp, 1900 struct dcb_output *outp) 1901 { 1902 struct nvkm_device *device = disp->base.engine.subdev.device; 1903 struct nvkm_bios *bios = device->bios; 1904 const int link = !(outp->sorconf.link & 1); 1905 const int or = ffs(outp->or) - 1; 1906 const u32 loff = (or * 0x800) + (link * 0x80); 1907 const u16 mask = (outp->sorconf.link << 6) | outp->or; 1908 struct dcb_output match; 1909 u8 ver, hdr; 1910 1911 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) 1912 nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000); 1913 } 1914 1915 static void 1916 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head) 1917 { 1918 struct nvkm_device *device = disp->base.engine.subdev.device; 1919 struct nvkm_output *outp; 1920 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1921 u32 conf; 1922 1923 outp = exec_clkcmp(disp, head, 1, pclk, &conf); 1924 if (!outp) 1925 return; 1926 1927 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) 1928 nv50_disp_intr_unk40_0_tmds(disp, &outp->info); 1929 } 1930 1931 void 1932 nv50_disp_intr_supervisor(struct work_struct *work) 1933 { 1934 struct nv50_disp *disp = 1935 container_of(work, struct nv50_disp, supervisor); 1936 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1937 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 1938 struct nvkm_device *device = subdev->device; 1939 u32 super = nvkm_rd32(device, 0x610030); 1940 int head; 1941 1942 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super); 1943 1944 if (disp->super & 0x00000010) { 1945 nv50_disp_mthd_chan(disp, NV_DBG_DEBUG, 0, impl->mthd.core); 1946 for (head = 0; head < disp->head.nr; head++) { 1947 if (!(super & (0x00000020 << head))) 1948 continue; 1949 if (!(super & (0x00000080 << head))) 1950 continue; 1951 nv50_disp_intr_unk10_0(disp, head); 1952 } 1953 } else 1954 if (disp->super & 0x00000020) { 1955 for (head = 0; head < disp->head.nr; head++) { 1956 if (!(super & (0x00000080 << head))) 1957 continue; 1958 nv50_disp_intr_unk20_0(disp, head); 1959 } 1960 for (head = 0; head < disp->head.nr; head++) { 1961 if (!(super & (0x00000200 << head))) 1962 continue; 1963 nv50_disp_intr_unk20_1(disp, head); 1964 } 1965 for (head = 0; head < disp->head.nr; head++) { 1966 if (!(super & (0x00000080 << head))) 1967 continue; 1968 nv50_disp_intr_unk20_2(disp, head); 1969 } 1970 } else 1971 if (disp->super & 0x00000040) { 1972 for (head = 0; head < disp->head.nr; head++) { 1973 if (!(super & (0x00000080 << head))) 1974 continue; 1975 nv50_disp_intr_unk40_0(disp, head); 1976 } 1977 } 1978 1979 nvkm_wr32(device, 0x610030, 0x80000000); 1980 } 1981 1982 void 1983 nv50_disp_intr(struct nvkm_subdev *subdev) 1984 { 1985 struct nv50_disp *disp = (void *)subdev; 1986 struct nvkm_device *device = disp->base.engine.subdev.device; 1987 u32 intr0 = nvkm_rd32(device, 0x610020); 1988 u32 intr1 = nvkm_rd32(device, 0x610024); 1989 1990 while (intr0 & 0x001f0000) { 1991 u32 chid = __ffs(intr0 & 0x001f0000) - 16; 1992 nv50_disp_intr_error(disp, chid); 1993 intr0 &= ~(0x00010000 << chid); 1994 } 1995 1996 while (intr0 & 0x0000001f) { 1997 u32 chid = __ffs(intr0 & 0x0000001f); 1998 nv50_disp_chan_uevent_send(disp, chid); 1999 intr0 &= ~(0x00000001 << chid); 2000 } 2001 2002 if (intr1 & 0x00000004) { 2003 nvkm_disp_vblank(&disp->base, 0); 2004 nvkm_wr32(device, 0x610024, 0x00000004); 2005 } 2006 2007 if (intr1 & 0x00000008) { 2008 nvkm_disp_vblank(&disp->base, 1); 2009 nvkm_wr32(device, 0x610024, 0x00000008); 2010 } 2011 2012 if (intr1 & 0x00000070) { 2013 disp->super = (intr1 & 0x00000070); 2014 schedule_work(&disp->supervisor); 2015 nvkm_wr32(device, 0x610024, disp->super); 2016 } 2017 } 2018 2019 static int 2020 nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 2021 struct nvkm_oclass *oclass, void *data, u32 size, 2022 struct nvkm_object **pobject) 2023 { 2024 struct nv50_disp *disp; 2025 int ret; 2026 2027 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP", 2028 "display", &disp); 2029 *pobject = nv_object(disp); 2030 if (ret) 2031 return ret; 2032 2033 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &disp->uevent); 2034 if (ret) 2035 return ret; 2036 2037 nv_engine(disp)->sclass = nv50_disp_main_oclass; 2038 nv_engine(disp)->cclass = &nv50_disp_cclass; 2039 nv_subdev(disp)->intr = nv50_disp_intr; 2040 INIT_WORK(&disp->supervisor, nv50_disp_intr_supervisor); 2041 disp->sclass = nv50_disp_sclass; 2042 disp->head.nr = 2; 2043 disp->dac.nr = 3; 2044 disp->sor.nr = 2; 2045 disp->pior.nr = 3; 2046 disp->dac.power = nv50_dac_power; 2047 disp->dac.sense = nv50_dac_sense; 2048 disp->sor.power = nv50_sor_power; 2049 disp->pior.power = nv50_pior_power; 2050 return 0; 2051 } 2052 2053 struct nvkm_oclass * 2054 nv50_disp_oclass = &(struct nv50_disp_impl) { 2055 .base.base.handle = NV_ENGINE(DISP, 0x50), 2056 .base.base.ofuncs = &(struct nvkm_ofuncs) { 2057 .ctor = nv50_disp_ctor, 2058 .dtor = _nvkm_disp_dtor, 2059 .init = _nvkm_disp_init, 2060 .fini = _nvkm_disp_fini, 2061 }, 2062 .base.outp.internal.crt = nv50_dac_output_new, 2063 .base.outp.internal.tmds = nv50_sor_output_new, 2064 .base.outp.internal.lvds = nv50_sor_output_new, 2065 .base.outp.external.tmds = nv50_pior_output_new, 2066 .base.outp.external.dp = nv50_pior_dp_new, 2067 .base.vblank = &nv50_disp_vblank_func, 2068 .mthd.core = &nv50_disp_core_mthd_chan, 2069 .mthd.base = &nv50_disp_base_mthd_chan, 2070 .mthd.ovly = &nv50_disp_ovly_mthd_chan, 2071 .mthd.prev = 0x000004, 2072 .head.scanoutpos = nv50_disp_main_scanoutpos, 2073 }.base.base; 2074