1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "nv50.h" 25 #include "outp.h" 26 #include "outpdp.h" 27 28 #include <core/client.h> 29 #include <core/engctx.h> 30 #include <core/enum.h> 31 #include <core/handle.h> 32 #include <core/ramht.h> 33 #include <engine/dmaobj.h> 34 #include <subdev/bios.h> 35 #include <subdev/bios/dcb.h> 36 #include <subdev/bios/disp.h> 37 #include <subdev/bios/init.h> 38 #include <subdev/bios/pll.h> 39 #include <subdev/devinit.h> 40 #include <subdev/fb.h> 41 #include <subdev/timer.h> 42 43 #include <nvif/class.h> 44 #include <nvif/event.h> 45 #include <nvif/unpack.h> 46 47 /******************************************************************************* 48 * EVO channel base class 49 ******************************************************************************/ 50 51 static int 52 nv50_disp_chan_create_(struct nvkm_object *parent, 53 struct nvkm_object *engine, 54 struct nvkm_oclass *oclass, int head, 55 int length, void **pobject) 56 { 57 const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs; 58 struct nv50_disp_base *base = (void *)parent; 59 struct nv50_disp_chan *chan; 60 int chid = impl->chid + head; 61 int ret; 62 63 if (base->chan & (1 << chid)) 64 return -EBUSY; 65 base->chan |= (1 << chid); 66 67 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL, 68 (1ULL << NVDEV_ENGINE_DMAOBJ), 69 length, pobject); 70 chan = *pobject; 71 if (ret) 72 return ret; 73 chan->chid = chid; 74 75 nv_parent(chan)->object_attach = impl->attach; 76 nv_parent(chan)->object_detach = impl->detach; 77 return 0; 78 } 79 80 static void 81 nv50_disp_chan_destroy(struct nv50_disp_chan *chan) 82 { 83 struct nv50_disp_base *base = (void *)nv_object(chan)->parent; 84 base->chan &= ~(1 << chan->chid); 85 nvkm_namedb_destroy(&chan->base); 86 } 87 88 static void 89 nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index) 90 { 91 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 92 struct nvkm_device *device = disp->base.engine.subdev.device; 93 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index); 94 nvkm_wr32(device, 0x610020, 0x00000001 << index); 95 } 96 97 static void 98 nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) 99 { 100 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); 101 struct nvkm_device *device = disp->base.engine.subdev.device; 102 nvkm_wr32(device, 0x610020, 0x00000001 << index); 103 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index); 104 } 105 106 void 107 nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid) 108 { 109 struct nvif_notify_uevent_rep { 110 } rep; 111 112 nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep)); 113 } 114 115 int 116 nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 117 struct nvkm_notify *notify) 118 { 119 struct nv50_disp_dmac *dmac = (void *)object; 120 union { 121 struct nvif_notify_uevent_req none; 122 } *args = data; 123 int ret; 124 125 if (nvif_unvers(args->none)) { 126 notify->size = sizeof(struct nvif_notify_uevent_rep); 127 notify->types = 1; 128 notify->index = dmac->base.chid; 129 return 0; 130 } 131 132 return ret; 133 } 134 135 const struct nvkm_event_func 136 nv50_disp_chan_uevent = { 137 .ctor = nv50_disp_chan_uevent_ctor, 138 .init = nv50_disp_chan_uevent_init, 139 .fini = nv50_disp_chan_uevent_fini, 140 }; 141 142 int 143 nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type, 144 struct nvkm_event **pevent) 145 { 146 struct nv50_disp *disp = (void *)object->engine; 147 switch (type) { 148 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT: 149 *pevent = &disp->uevent; 150 return 0; 151 default: 152 break; 153 } 154 return -EINVAL; 155 } 156 157 int 158 nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) 159 { 160 struct nv50_disp_chan *chan = (void *)object; 161 *addr = nv_device_resource_start(nv_device(object), 0) + 162 0x640000 + (chan->chid * 0x1000); 163 *size = 0x001000; 164 return 0; 165 } 166 167 u32 168 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr) 169 { 170 struct nv50_disp_chan *chan = (void *)object; 171 struct nvkm_device *device = object->engine->subdev.device; 172 return nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); 173 } 174 175 void 176 nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) 177 { 178 struct nv50_disp_chan *chan = (void *)object; 179 struct nvkm_device *device = object->engine->subdev.device; 180 nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); 181 } 182 183 /******************************************************************************* 184 * EVO DMA channel base class 185 ******************************************************************************/ 186 187 static int 188 nv50_disp_dmac_object_attach(struct nvkm_object *parent, 189 struct nvkm_object *object, u32 name) 190 { 191 struct nv50_disp_base *base = (void *)parent->parent; 192 struct nv50_disp_chan *chan = (void *)parent; 193 u32 addr = nv_gpuobj(object)->node->offset; 194 u32 chid = chan->chid; 195 u32 data = (chid << 28) | (addr << 10) | chid; 196 return nvkm_ramht_insert(base->ramht, chid, name, data); 197 } 198 199 static void 200 nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie) 201 { 202 struct nv50_disp_base *base = (void *)parent->parent; 203 nvkm_ramht_remove(base->ramht, cookie); 204 } 205 206 static int 207 nv50_disp_dmac_create_(struct nvkm_object *parent, 208 struct nvkm_object *engine, 209 struct nvkm_oclass *oclass, u32 pushbuf, int head, 210 int length, void **pobject) 211 { 212 struct nv50_disp_dmac *dmac; 213 int ret; 214 215 ret = nv50_disp_chan_create_(parent, engine, oclass, head, 216 length, pobject); 217 dmac = *pobject; 218 if (ret) 219 return ret; 220 221 dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf); 222 if (!dmac->pushdma) 223 return -ENOENT; 224 225 switch (nv_mclass(dmac->pushdma)) { 226 case 0x0002: 227 case 0x003d: 228 if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff) 229 return -EINVAL; 230 231 switch (dmac->pushdma->target) { 232 case NV_MEM_TARGET_VRAM: 233 dmac->push = 0x00000001 | dmac->pushdma->start >> 8; 234 break; 235 case NV_MEM_TARGET_PCI_NOSNOOP: 236 dmac->push = 0x00000003 | dmac->pushdma->start >> 8; 237 break; 238 default: 239 return -EINVAL; 240 } 241 break; 242 default: 243 return -EINVAL; 244 } 245 246 return 0; 247 } 248 249 void 250 nv50_disp_dmac_dtor(struct nvkm_object *object) 251 { 252 struct nv50_disp_dmac *dmac = (void *)object; 253 nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma); 254 nv50_disp_chan_destroy(&dmac->base); 255 } 256 257 static int 258 nv50_disp_dmac_init(struct nvkm_object *object) 259 { 260 struct nv50_disp *disp = (void *)object->engine; 261 struct nv50_disp_dmac *dmac = (void *)object; 262 struct nvkm_device *device = disp->base.engine.subdev.device; 263 int chid = dmac->base.chid; 264 int ret; 265 266 ret = nv50_disp_chan_init(&dmac->base); 267 if (ret) 268 return ret; 269 270 /* enable error reporting */ 271 nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); 272 273 /* initialise channel for dma command submission */ 274 nvkm_wr32(device, 0x610204 + (chid * 0x0010), dmac->push); 275 nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); 276 nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); 277 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); 278 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); 279 nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); 280 281 /* wait for it to go inactive */ 282 if (nvkm_msec(device, 2000, 283 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) 284 break; 285 ) < 0) { 286 nv_error(dmac, "init timeout, 0x%08x\n", 287 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 288 return -EBUSY; 289 } 290 291 return 0; 292 } 293 294 static int 295 nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend) 296 { 297 struct nv50_disp *disp = (void *)object->engine; 298 struct nv50_disp_dmac *dmac = (void *)object; 299 struct nvkm_device *device = disp->base.engine.subdev.device; 300 int chid = dmac->base.chid; 301 302 /* deactivate channel */ 303 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); 304 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); 305 if (nvkm_msec(device, 2000, 306 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) 307 break; 308 ) < 0) { 309 nv_error(dmac, "fini timeout, 0x%08x\n", 310 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 311 if (suspend) 312 return -EBUSY; 313 } 314 315 /* disable error reporting and completion notifications */ 316 nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); 317 318 return nv50_disp_chan_fini(&dmac->base, suspend); 319 } 320 321 /******************************************************************************* 322 * EVO master channel object 323 ******************************************************************************/ 324 325 static void 326 nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c, 327 const struct nv50_disp_mthd_list *list, int inst) 328 { 329 struct nvkm_device *device = disp->base.engine.subdev.device; 330 struct nvkm_object *object = nv_object(disp); 331 int i; 332 333 for (i = 0; list->data[i].mthd; i++) { 334 if (list->data[i].addr) { 335 u32 next = nvkm_rd32(device, list->data[i].addr + base + 0); 336 u32 prev = nvkm_rd32(device, list->data[i].addr + base + c); 337 u32 mthd = list->data[i].mthd + (list->mthd * inst); 338 const char *name = list->data[i].name; 339 char mods[16]; 340 341 if (prev != next) 342 snprintf(mods, sizeof(mods), "-> 0x%08x", next); 343 else 344 snprintf(mods, sizeof(mods), "%13c", ' '); 345 346 nv_printk_(object, debug, "\t0x%04x: 0x%08x %s%s%s\n", 347 mthd, prev, mods, name ? " // " : "", 348 name ? name : ""); 349 } 350 } 351 } 352 353 void 354 nv50_disp_mthd_chan(struct nv50_disp *disp, int debug, int head, 355 const struct nv50_disp_mthd_chan *chan) 356 { 357 struct nvkm_object *object = nv_object(disp); 358 const struct nv50_disp_impl *impl = (void *)object->oclass; 359 const struct nv50_disp_mthd_list *list; 360 int i, j; 361 362 if (debug > nv_subdev(disp)->debug) 363 return; 364 365 for (i = 0; (list = chan->data[i].mthd) != NULL; i++) { 366 u32 base = head * chan->addr; 367 for (j = 0; j < chan->data[i].nr; j++, base += list->addr) { 368 const char *cname = chan->name; 369 const char *sname = ""; 370 char cname_[16], sname_[16]; 371 372 if (chan->addr) { 373 snprintf(cname_, sizeof(cname_), "%s %d", 374 chan->name, head); 375 cname = cname_; 376 } 377 378 if (chan->data[i].nr > 1) { 379 snprintf(sname_, sizeof(sname_), " - %s %d", 380 chan->data[i].name, j); 381 sname = sname_; 382 } 383 384 nv_printk_(object, debug, "%s%s:\n", cname, sname); 385 nv50_disp_mthd_list(disp, debug, base, impl->mthd.prev, 386 list, j); 387 } 388 } 389 } 390 391 const struct nv50_disp_mthd_list 392 nv50_disp_core_mthd_base = { 393 .mthd = 0x0000, 394 .addr = 0x000000, 395 .data = { 396 { 0x0080, 0x000000 }, 397 { 0x0084, 0x610bb8 }, 398 { 0x0088, 0x610b9c }, 399 { 0x008c, 0x000000 }, 400 {} 401 } 402 }; 403 404 static const struct nv50_disp_mthd_list 405 nv50_disp_core_mthd_dac = { 406 .mthd = 0x0080, 407 .addr = 0x000008, 408 .data = { 409 { 0x0400, 0x610b58 }, 410 { 0x0404, 0x610bdc }, 411 { 0x0420, 0x610828 }, 412 {} 413 } 414 }; 415 416 const struct nv50_disp_mthd_list 417 nv50_disp_core_mthd_sor = { 418 .mthd = 0x0040, 419 .addr = 0x000008, 420 .data = { 421 { 0x0600, 0x610b70 }, 422 {} 423 } 424 }; 425 426 const struct nv50_disp_mthd_list 427 nv50_disp_core_mthd_pior = { 428 .mthd = 0x0040, 429 .addr = 0x000008, 430 .data = { 431 { 0x0700, 0x610b80 }, 432 {} 433 } 434 }; 435 436 static const struct nv50_disp_mthd_list 437 nv50_disp_core_mthd_head = { 438 .mthd = 0x0400, 439 .addr = 0x000540, 440 .data = { 441 { 0x0800, 0x610ad8 }, 442 { 0x0804, 0x610ad0 }, 443 { 0x0808, 0x610a48 }, 444 { 0x080c, 0x610a78 }, 445 { 0x0810, 0x610ac0 }, 446 { 0x0814, 0x610af8 }, 447 { 0x0818, 0x610b00 }, 448 { 0x081c, 0x610ae8 }, 449 { 0x0820, 0x610af0 }, 450 { 0x0824, 0x610b08 }, 451 { 0x0828, 0x610b10 }, 452 { 0x082c, 0x610a68 }, 453 { 0x0830, 0x610a60 }, 454 { 0x0834, 0x000000 }, 455 { 0x0838, 0x610a40 }, 456 { 0x0840, 0x610a24 }, 457 { 0x0844, 0x610a2c }, 458 { 0x0848, 0x610aa8 }, 459 { 0x084c, 0x610ab0 }, 460 { 0x0860, 0x610a84 }, 461 { 0x0864, 0x610a90 }, 462 { 0x0868, 0x610b18 }, 463 { 0x086c, 0x610b20 }, 464 { 0x0870, 0x610ac8 }, 465 { 0x0874, 0x610a38 }, 466 { 0x0880, 0x610a58 }, 467 { 0x0884, 0x610a9c }, 468 { 0x08a0, 0x610a70 }, 469 { 0x08a4, 0x610a50 }, 470 { 0x08a8, 0x610ae0 }, 471 { 0x08c0, 0x610b28 }, 472 { 0x08c4, 0x610b30 }, 473 { 0x08c8, 0x610b40 }, 474 { 0x08d4, 0x610b38 }, 475 { 0x08d8, 0x610b48 }, 476 { 0x08dc, 0x610b50 }, 477 { 0x0900, 0x610a18 }, 478 { 0x0904, 0x610ab8 }, 479 {} 480 } 481 }; 482 483 static const struct nv50_disp_mthd_chan 484 nv50_disp_core_mthd_chan = { 485 .name = "Core", 486 .addr = 0x000000, 487 .data = { 488 { "Global", 1, &nv50_disp_core_mthd_base }, 489 { "DAC", 3, &nv50_disp_core_mthd_dac }, 490 { "SOR", 2, &nv50_disp_core_mthd_sor }, 491 { "PIOR", 3, &nv50_disp_core_mthd_pior }, 492 { "HEAD", 2, &nv50_disp_core_mthd_head }, 493 {} 494 } 495 }; 496 497 int 498 nv50_disp_core_ctor(struct nvkm_object *parent, 499 struct nvkm_object *engine, 500 struct nvkm_oclass *oclass, void *data, u32 size, 501 struct nvkm_object **pobject) 502 { 503 union { 504 struct nv50_disp_core_channel_dma_v0 v0; 505 } *args = data; 506 struct nv50_disp_dmac *mast; 507 int ret; 508 509 nv_ioctl(parent, "create disp core channel dma size %d\n", size); 510 if (nvif_unpack(args->v0, 0, 0, false)) { 511 nv_ioctl(parent, "create disp core channel dma vers %d " 512 "pushbuf %08x\n", 513 args->v0.version, args->v0.pushbuf); 514 } else 515 return ret; 516 517 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 518 0, sizeof(*mast), (void **)&mast); 519 *pobject = nv_object(mast); 520 if (ret) 521 return ret; 522 523 return 0; 524 } 525 526 static int 527 nv50_disp_core_init(struct nvkm_object *object) 528 { 529 struct nv50_disp *disp = (void *)object->engine; 530 struct nv50_disp_dmac *mast = (void *)object; 531 struct nvkm_device *device = disp->base.engine.subdev.device; 532 int ret; 533 534 ret = nv50_disp_chan_init(&mast->base); 535 if (ret) 536 return ret; 537 538 /* enable error reporting */ 539 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000); 540 541 /* attempt to unstick channel from some unknown state */ 542 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000) 543 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000); 544 if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000) 545 nvkm_mask(device, 0x610200, 0x00600000, 0x00600000); 546 547 /* initialise channel for dma command submission */ 548 nvkm_wr32(device, 0x610204, mast->push); 549 nvkm_wr32(device, 0x610208, 0x00010000); 550 nvkm_wr32(device, 0x61020c, 0x00000000); 551 nvkm_mask(device, 0x610200, 0x00000010, 0x00000010); 552 nvkm_wr32(device, 0x640000, 0x00000000); 553 nvkm_wr32(device, 0x610200, 0x01000013); 554 555 /* wait for it to go inactive */ 556 if (nvkm_msec(device, 2000, 557 if (!(nvkm_rd32(device, 0x610200) & 0x80000000)) 558 break; 559 ) < 0) { 560 nv_error(mast, "init: 0x%08x\n", nvkm_rd32(device, 0x610200)); 561 return -EBUSY; 562 } 563 564 return 0; 565 } 566 567 static int 568 nv50_disp_core_fini(struct nvkm_object *object, bool suspend) 569 { 570 struct nv50_disp *disp = (void *)object->engine; 571 struct nv50_disp_dmac *mast = (void *)object; 572 struct nvkm_device *device = disp->base.engine.subdev.device; 573 574 /* deactivate channel */ 575 nvkm_mask(device, 0x610200, 0x00000010, 0x00000000); 576 nvkm_mask(device, 0x610200, 0x00000003, 0x00000000); 577 if (nvkm_msec(device, 2000, 578 if (!(nvkm_rd32(device, 0x610200) & 0x001e0000)) 579 break; 580 ) < 0) { 581 nv_error(mast, "fini: 0x%08x\n", nvkm_rd32(device, 0x610200)); 582 if (suspend) 583 return -EBUSY; 584 } 585 586 /* disable error reporting and completion notifications */ 587 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000); 588 589 return nv50_disp_chan_fini(&mast->base, suspend); 590 } 591 592 struct nv50_disp_chan_impl 593 nv50_disp_core_ofuncs = { 594 .base.ctor = nv50_disp_core_ctor, 595 .base.dtor = nv50_disp_dmac_dtor, 596 .base.init = nv50_disp_core_init, 597 .base.fini = nv50_disp_core_fini, 598 .base.map = nv50_disp_chan_map, 599 .base.ntfy = nv50_disp_chan_ntfy, 600 .base.rd32 = nv50_disp_chan_rd32, 601 .base.wr32 = nv50_disp_chan_wr32, 602 .chid = 0, 603 .attach = nv50_disp_dmac_object_attach, 604 .detach = nv50_disp_dmac_object_detach, 605 }; 606 607 /******************************************************************************* 608 * EVO sync channel objects 609 ******************************************************************************/ 610 611 static const struct nv50_disp_mthd_list 612 nv50_disp_base_mthd_base = { 613 .mthd = 0x0000, 614 .addr = 0x000000, 615 .data = { 616 { 0x0080, 0x000000 }, 617 { 0x0084, 0x0008c4 }, 618 { 0x0088, 0x0008d0 }, 619 { 0x008c, 0x0008dc }, 620 { 0x0090, 0x0008e4 }, 621 { 0x0094, 0x610884 }, 622 { 0x00a0, 0x6108a0 }, 623 { 0x00a4, 0x610878 }, 624 { 0x00c0, 0x61086c }, 625 { 0x00e0, 0x610858 }, 626 { 0x00e4, 0x610860 }, 627 { 0x00e8, 0x6108ac }, 628 { 0x00ec, 0x6108b4 }, 629 { 0x0100, 0x610894 }, 630 { 0x0110, 0x6108bc }, 631 { 0x0114, 0x61088c }, 632 {} 633 } 634 }; 635 636 const struct nv50_disp_mthd_list 637 nv50_disp_base_mthd_image = { 638 .mthd = 0x0400, 639 .addr = 0x000000, 640 .data = { 641 { 0x0800, 0x6108f0 }, 642 { 0x0804, 0x6108fc }, 643 { 0x0808, 0x61090c }, 644 { 0x080c, 0x610914 }, 645 { 0x0810, 0x610904 }, 646 {} 647 } 648 }; 649 650 static const struct nv50_disp_mthd_chan 651 nv50_disp_base_mthd_chan = { 652 .name = "Base", 653 .addr = 0x000540, 654 .data = { 655 { "Global", 1, &nv50_disp_base_mthd_base }, 656 { "Image", 2, &nv50_disp_base_mthd_image }, 657 {} 658 } 659 }; 660 661 int 662 nv50_disp_base_ctor(struct nvkm_object *parent, 663 struct nvkm_object *engine, 664 struct nvkm_oclass *oclass, void *data, u32 size, 665 struct nvkm_object **pobject) 666 { 667 union { 668 struct nv50_disp_base_channel_dma_v0 v0; 669 } *args = data; 670 struct nv50_disp *disp = (void *)engine; 671 struct nv50_disp_dmac *dmac; 672 int ret; 673 674 nv_ioctl(parent, "create disp base channel dma size %d\n", size); 675 if (nvif_unpack(args->v0, 0, 0, false)) { 676 nv_ioctl(parent, "create disp base channel dma vers %d " 677 "pushbuf %08x head %d\n", 678 args->v0.version, args->v0.pushbuf, args->v0.head); 679 if (args->v0.head > disp->head.nr) 680 return -EINVAL; 681 } else 682 return ret; 683 684 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 685 args->v0.head, sizeof(*dmac), 686 (void **)&dmac); 687 *pobject = nv_object(dmac); 688 if (ret) 689 return ret; 690 691 return 0; 692 } 693 694 struct nv50_disp_chan_impl 695 nv50_disp_base_ofuncs = { 696 .base.ctor = nv50_disp_base_ctor, 697 .base.dtor = nv50_disp_dmac_dtor, 698 .base.init = nv50_disp_dmac_init, 699 .base.fini = nv50_disp_dmac_fini, 700 .base.ntfy = nv50_disp_chan_ntfy, 701 .base.map = nv50_disp_chan_map, 702 .base.rd32 = nv50_disp_chan_rd32, 703 .base.wr32 = nv50_disp_chan_wr32, 704 .chid = 1, 705 .attach = nv50_disp_dmac_object_attach, 706 .detach = nv50_disp_dmac_object_detach, 707 }; 708 709 /******************************************************************************* 710 * EVO overlay channel objects 711 ******************************************************************************/ 712 713 const struct nv50_disp_mthd_list 714 nv50_disp_ovly_mthd_base = { 715 .mthd = 0x0000, 716 .addr = 0x000000, 717 .data = { 718 { 0x0080, 0x000000 }, 719 { 0x0084, 0x0009a0 }, 720 { 0x0088, 0x0009c0 }, 721 { 0x008c, 0x0009c8 }, 722 { 0x0090, 0x6109b4 }, 723 { 0x0094, 0x610970 }, 724 { 0x00a0, 0x610998 }, 725 { 0x00a4, 0x610964 }, 726 { 0x00c0, 0x610958 }, 727 { 0x00e0, 0x6109a8 }, 728 { 0x00e4, 0x6109d0 }, 729 { 0x00e8, 0x6109d8 }, 730 { 0x0100, 0x61094c }, 731 { 0x0104, 0x610984 }, 732 { 0x0108, 0x61098c }, 733 { 0x0800, 0x6109f8 }, 734 { 0x0808, 0x610a08 }, 735 { 0x080c, 0x610a10 }, 736 { 0x0810, 0x610a00 }, 737 {} 738 } 739 }; 740 741 static const struct nv50_disp_mthd_chan 742 nv50_disp_ovly_mthd_chan = { 743 .name = "Overlay", 744 .addr = 0x000540, 745 .data = { 746 { "Global", 1, &nv50_disp_ovly_mthd_base }, 747 {} 748 } 749 }; 750 751 int 752 nv50_disp_ovly_ctor(struct nvkm_object *parent, 753 struct nvkm_object *engine, 754 struct nvkm_oclass *oclass, void *data, u32 size, 755 struct nvkm_object **pobject) 756 { 757 union { 758 struct nv50_disp_overlay_channel_dma_v0 v0; 759 } *args = data; 760 struct nv50_disp *disp = (void *)engine; 761 struct nv50_disp_dmac *dmac; 762 int ret; 763 764 nv_ioctl(parent, "create disp overlay channel dma size %d\n", size); 765 if (nvif_unpack(args->v0, 0, 0, false)) { 766 nv_ioctl(parent, "create disp overlay channel dma vers %d " 767 "pushbuf %08x head %d\n", 768 args->v0.version, args->v0.pushbuf, args->v0.head); 769 if (args->v0.head > disp->head.nr) 770 return -EINVAL; 771 } else 772 return ret; 773 774 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 775 args->v0.head, sizeof(*dmac), 776 (void **)&dmac); 777 *pobject = nv_object(dmac); 778 if (ret) 779 return ret; 780 781 return 0; 782 } 783 784 struct nv50_disp_chan_impl 785 nv50_disp_ovly_ofuncs = { 786 .base.ctor = nv50_disp_ovly_ctor, 787 .base.dtor = nv50_disp_dmac_dtor, 788 .base.init = nv50_disp_dmac_init, 789 .base.fini = nv50_disp_dmac_fini, 790 .base.ntfy = nv50_disp_chan_ntfy, 791 .base.map = nv50_disp_chan_map, 792 .base.rd32 = nv50_disp_chan_rd32, 793 .base.wr32 = nv50_disp_chan_wr32, 794 .chid = 3, 795 .attach = nv50_disp_dmac_object_attach, 796 .detach = nv50_disp_dmac_object_detach, 797 }; 798 799 /******************************************************************************* 800 * EVO PIO channel base class 801 ******************************************************************************/ 802 803 static int 804 nv50_disp_pioc_create_(struct nvkm_object *parent, 805 struct nvkm_object *engine, 806 struct nvkm_oclass *oclass, int head, 807 int length, void **pobject) 808 { 809 return nv50_disp_chan_create_(parent, engine, oclass, head, 810 length, pobject); 811 } 812 813 void 814 nv50_disp_pioc_dtor(struct nvkm_object *object) 815 { 816 struct nv50_disp_pioc *pioc = (void *)object; 817 nv50_disp_chan_destroy(&pioc->base); 818 } 819 820 static int 821 nv50_disp_pioc_init(struct nvkm_object *object) 822 { 823 struct nv50_disp *disp = (void *)object->engine; 824 struct nv50_disp_pioc *pioc = (void *)object; 825 struct nvkm_device *device = disp->base.engine.subdev.device; 826 int chid = pioc->base.chid; 827 int ret; 828 829 ret = nv50_disp_chan_init(&pioc->base); 830 if (ret) 831 return ret; 832 833 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); 834 if (nvkm_msec(device, 2000, 835 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 836 break; 837 ) < 0) { 838 nv_error(pioc, "timeout0: 0x%08x\n", 839 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 840 return -EBUSY; 841 } 842 843 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); 844 if (nvkm_msec(device, 2000, 845 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); 846 if ((tmp & 0x00030000) == 0x00010000) 847 break; 848 ) < 0) { 849 nv_error(pioc, "timeout1: 0x%08x\n", 850 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 851 return -EBUSY; 852 } 853 854 return 0; 855 } 856 857 static int 858 nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend) 859 { 860 struct nv50_disp *disp = (void *)object->engine; 861 struct nv50_disp_pioc *pioc = (void *)object; 862 struct nvkm_device *device = disp->base.engine.subdev.device; 863 int chid = pioc->base.chid; 864 865 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); 866 if (nvkm_msec(device, 2000, 867 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) 868 break; 869 ) < 0) { 870 nv_error(pioc, "timeout: 0x%08x\n", 871 nvkm_rd32(device, 0x610200 + (chid * 0x10))); 872 if (suspend) 873 return -EBUSY; 874 } 875 876 return nv50_disp_chan_fini(&pioc->base, suspend); 877 } 878 879 /******************************************************************************* 880 * EVO immediate overlay channel objects 881 ******************************************************************************/ 882 883 int 884 nv50_disp_oimm_ctor(struct nvkm_object *parent, 885 struct nvkm_object *engine, 886 struct nvkm_oclass *oclass, void *data, u32 size, 887 struct nvkm_object **pobject) 888 { 889 union { 890 struct nv50_disp_overlay_v0 v0; 891 } *args = data; 892 struct nv50_disp *disp = (void *)engine; 893 struct nv50_disp_pioc *pioc; 894 int ret; 895 896 nv_ioctl(parent, "create disp overlay size %d\n", size); 897 if (nvif_unpack(args->v0, 0, 0, false)) { 898 nv_ioctl(parent, "create disp overlay vers %d head %d\n", 899 args->v0.version, args->v0.head); 900 if (args->v0.head > disp->head.nr) 901 return -EINVAL; 902 } else 903 return ret; 904 905 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 906 sizeof(*pioc), (void **)&pioc); 907 *pobject = nv_object(pioc); 908 if (ret) 909 return ret; 910 911 return 0; 912 } 913 914 struct nv50_disp_chan_impl 915 nv50_disp_oimm_ofuncs = { 916 .base.ctor = nv50_disp_oimm_ctor, 917 .base.dtor = nv50_disp_pioc_dtor, 918 .base.init = nv50_disp_pioc_init, 919 .base.fini = nv50_disp_pioc_fini, 920 .base.ntfy = nv50_disp_chan_ntfy, 921 .base.map = nv50_disp_chan_map, 922 .base.rd32 = nv50_disp_chan_rd32, 923 .base.wr32 = nv50_disp_chan_wr32, 924 .chid = 5, 925 }; 926 927 /******************************************************************************* 928 * EVO cursor channel objects 929 ******************************************************************************/ 930 931 int 932 nv50_disp_curs_ctor(struct nvkm_object *parent, 933 struct nvkm_object *engine, 934 struct nvkm_oclass *oclass, void *data, u32 size, 935 struct nvkm_object **pobject) 936 { 937 union { 938 struct nv50_disp_cursor_v0 v0; 939 } *args = data; 940 struct nv50_disp *disp = (void *)engine; 941 struct nv50_disp_pioc *pioc; 942 int ret; 943 944 nv_ioctl(parent, "create disp cursor size %d\n", size); 945 if (nvif_unpack(args->v0, 0, 0, false)) { 946 nv_ioctl(parent, "create disp cursor vers %d head %d\n", 947 args->v0.version, args->v0.head); 948 if (args->v0.head > disp->head.nr) 949 return -EINVAL; 950 } else 951 return ret; 952 953 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, 954 sizeof(*pioc), (void **)&pioc); 955 *pobject = nv_object(pioc); 956 if (ret) 957 return ret; 958 959 return 0; 960 } 961 962 struct nv50_disp_chan_impl 963 nv50_disp_curs_ofuncs = { 964 .base.ctor = nv50_disp_curs_ctor, 965 .base.dtor = nv50_disp_pioc_dtor, 966 .base.init = nv50_disp_pioc_init, 967 .base.fini = nv50_disp_pioc_fini, 968 .base.ntfy = nv50_disp_chan_ntfy, 969 .base.map = nv50_disp_chan_map, 970 .base.rd32 = nv50_disp_chan_rd32, 971 .base.wr32 = nv50_disp_chan_wr32, 972 .chid = 7, 973 }; 974 975 /******************************************************************************* 976 * Base display object 977 ******************************************************************************/ 978 979 int 980 nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0) 981 { 982 struct nvkm_device *device = disp->base.engine.subdev.device; 983 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540)); 984 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540)); 985 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540)); 986 union { 987 struct nv04_disp_scanoutpos_v0 v0; 988 } *args = data; 989 int ret; 990 991 nv_ioctl(object, "disp scanoutpos size %d\n", size); 992 if (nvif_unpack(args->v0, 0, 0, false)) { 993 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); 994 args->v0.vblanke = (blanke & 0xffff0000) >> 16; 995 args->v0.hblanke = (blanke & 0x0000ffff); 996 args->v0.vblanks = (blanks & 0xffff0000) >> 16; 997 args->v0.hblanks = (blanks & 0x0000ffff); 998 args->v0.vtotal = ( total & 0xffff0000) >> 16; 999 args->v0.htotal = ( total & 0x0000ffff); 1000 args->v0.time[0] = ktime_to_ns(ktime_get()); 1001 args->v0.vline = /* vline read locks hline */ 1002 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff; 1003 args->v0.time[1] = ktime_to_ns(ktime_get()); 1004 args->v0.hline = 1005 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff; 1006 } else 1007 return ret; 1008 1009 return 0; 1010 } 1011 1012 int 1013 nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 1014 { 1015 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine); 1016 union { 1017 struct nv50_disp_mthd_v0 v0; 1018 struct nv50_disp_mthd_v1 v1; 1019 } *args = data; 1020 struct nv50_disp *disp = (void *)object->engine; 1021 struct nvkm_output *outp = NULL; 1022 struct nvkm_output *temp; 1023 u16 type, mask = 0; 1024 int head, ret; 1025 1026 if (mthd != NV50_DISP_MTHD) 1027 return -EINVAL; 1028 1029 nv_ioctl(object, "disp mthd size %d\n", size); 1030 if (nvif_unpack(args->v0, 0, 0, true)) { 1031 nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", 1032 args->v0.version, args->v0.method, args->v0.head); 1033 mthd = args->v0.method; 1034 head = args->v0.head; 1035 } else 1036 if (nvif_unpack(args->v1, 1, 1, true)) { 1037 nv_ioctl(object, "disp mthd vers %d mthd %02x " 1038 "type %04x mask %04x\n", 1039 args->v1.version, args->v1.method, 1040 args->v1.hasht, args->v1.hashm); 1041 mthd = args->v1.method; 1042 type = args->v1.hasht; 1043 mask = args->v1.hashm; 1044 head = ffs((mask >> 8) & 0x0f) - 1; 1045 } else 1046 return ret; 1047 1048 if (head < 0 || head >= disp->head.nr) 1049 return -ENXIO; 1050 1051 if (mask) { 1052 list_for_each_entry(temp, &disp->base.outp, head) { 1053 if ((temp->info.hasht == type) && 1054 (temp->info.hashm & mask) == mask) { 1055 outp = temp; 1056 break; 1057 } 1058 } 1059 if (outp == NULL) 1060 return -ENXIO; 1061 } 1062 1063 switch (mthd) { 1064 case NV50_DISP_SCANOUTPOS: 1065 return impl->head.scanoutpos(object, disp, data, size, head); 1066 default: 1067 break; 1068 } 1069 1070 switch (mthd * !!outp) { 1071 case NV50_DISP_MTHD_V1_DAC_PWR: 1072 return disp->dac.power(object, disp, data, size, head, outp); 1073 case NV50_DISP_MTHD_V1_DAC_LOAD: 1074 return disp->dac.sense(object, disp, data, size, head, outp); 1075 case NV50_DISP_MTHD_V1_SOR_PWR: 1076 return disp->sor.power(object, disp, data, size, head, outp); 1077 case NV50_DISP_MTHD_V1_SOR_HDA_ELD: 1078 if (!disp->sor.hda_eld) 1079 return -ENODEV; 1080 return disp->sor.hda_eld(object, disp, data, size, head, outp); 1081 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR: 1082 if (!disp->sor.hdmi) 1083 return -ENODEV; 1084 return disp->sor.hdmi(object, disp, data, size, head, outp); 1085 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: { 1086 union { 1087 struct nv50_disp_sor_lvds_script_v0 v0; 1088 } *args = data; 1089 nv_ioctl(object, "disp sor lvds script size %d\n", size); 1090 if (nvif_unpack(args->v0, 0, 0, false)) { 1091 nv_ioctl(object, "disp sor lvds script " 1092 "vers %d name %04x\n", 1093 args->v0.version, args->v0.script); 1094 disp->sor.lvdsconf = args->v0.script; 1095 return 0; 1096 } else 1097 return ret; 1098 } 1099 break; 1100 case NV50_DISP_MTHD_V1_SOR_DP_PWR: { 1101 struct nvkm_output_dp *outpdp = (void *)outp; 1102 union { 1103 struct nv50_disp_sor_dp_pwr_v0 v0; 1104 } *args = data; 1105 nv_ioctl(object, "disp sor dp pwr size %d\n", size); 1106 if (nvif_unpack(args->v0, 0, 0, false)) { 1107 nv_ioctl(object, "disp sor dp pwr vers %d state %d\n", 1108 args->v0.version, args->v0.state); 1109 if (args->v0.state == 0) { 1110 nvkm_notify_put(&outpdp->irq); 1111 ((struct nvkm_output_dp_impl *)nv_oclass(outp)) 1112 ->lnk_pwr(outpdp, 0); 1113 atomic_set(&outpdp->lt.done, 0); 1114 return 0; 1115 } else 1116 if (args->v0.state != 0) { 1117 nvkm_output_dp_train(&outpdp->base, 0, true); 1118 return 0; 1119 } 1120 } else 1121 return ret; 1122 } 1123 break; 1124 case NV50_DISP_MTHD_V1_PIOR_PWR: 1125 if (!disp->pior.power) 1126 return -ENODEV; 1127 return disp->pior.power(object, disp, data, size, head, outp); 1128 default: 1129 break; 1130 } 1131 1132 return -EINVAL; 1133 } 1134 1135 int 1136 nv50_disp_main_ctor(struct nvkm_object *parent, 1137 struct nvkm_object *engine, 1138 struct nvkm_oclass *oclass, void *data, u32 size, 1139 struct nvkm_object **pobject) 1140 { 1141 struct nv50_disp *disp = (void *)engine; 1142 struct nv50_disp_base *base; 1143 int ret; 1144 1145 ret = nvkm_parent_create(parent, engine, oclass, 0, 1146 disp->sclass, 0, &base); 1147 *pobject = nv_object(base); 1148 if (ret) 1149 return ret; 1150 1151 return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, 1152 &base->ramht); 1153 } 1154 1155 void 1156 nv50_disp_main_dtor(struct nvkm_object *object) 1157 { 1158 struct nv50_disp_base *base = (void *)object; 1159 nvkm_ramht_ref(NULL, &base->ramht); 1160 nvkm_parent_destroy(&base->base); 1161 } 1162 1163 static int 1164 nv50_disp_main_init(struct nvkm_object *object) 1165 { 1166 struct nv50_disp *disp = (void *)object->engine; 1167 struct nv50_disp_base *base = (void *)object; 1168 struct nvkm_device *device = disp->base.engine.subdev.device; 1169 int ret, i; 1170 u32 tmp; 1171 1172 ret = nvkm_parent_init(&base->base); 1173 if (ret) 1174 return ret; 1175 1176 /* The below segments of code copying values from one register to 1177 * another appear to inform EVO of the display capabilities or 1178 * something similar. NFI what the 0x614004 caps are for.. 1179 */ 1180 tmp = nvkm_rd32(device, 0x614004); 1181 nvkm_wr32(device, 0x610184, tmp); 1182 1183 /* ... CRTC caps */ 1184 for (i = 0; i < disp->head.nr; i++) { 1185 tmp = nvkm_rd32(device, 0x616100 + (i * 0x800)); 1186 nvkm_wr32(device, 0x610190 + (i * 0x10), tmp); 1187 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800)); 1188 nvkm_wr32(device, 0x610194 + (i * 0x10), tmp); 1189 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800)); 1190 nvkm_wr32(device, 0x610198 + (i * 0x10), tmp); 1191 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800)); 1192 nvkm_wr32(device, 0x61019c + (i * 0x10), tmp); 1193 } 1194 1195 /* ... DAC caps */ 1196 for (i = 0; i < disp->dac.nr; i++) { 1197 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800)); 1198 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp); 1199 } 1200 1201 /* ... SOR caps */ 1202 for (i = 0; i < disp->sor.nr; i++) { 1203 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800)); 1204 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp); 1205 } 1206 1207 /* ... PIOR caps */ 1208 for (i = 0; i < disp->pior.nr; i++) { 1209 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800)); 1210 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp); 1211 } 1212 1213 /* steal display away from vbios, or something like that */ 1214 if (nvkm_rd32(device, 0x610024) & 0x00000100) { 1215 nvkm_wr32(device, 0x610024, 0x00000100); 1216 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000); 1217 if (nvkm_msec(device, 2000, 1218 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002)) 1219 break; 1220 ) < 0) 1221 return -EBUSY; 1222 } 1223 1224 /* point at display engine memory area (hash table, objects) */ 1225 nvkm_wr32(device, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9); 1226 1227 /* enable supervisor interrupts, disable everything else */ 1228 nvkm_wr32(device, 0x61002c, 0x00000370); 1229 nvkm_wr32(device, 0x610028, 0x00000000); 1230 return 0; 1231 } 1232 1233 static int 1234 nv50_disp_main_fini(struct nvkm_object *object, bool suspend) 1235 { 1236 struct nv50_disp *disp = (void *)object->engine; 1237 struct nv50_disp_base *base = (void *)object; 1238 struct nvkm_device *device = disp->base.engine.subdev.device; 1239 1240 /* disable all interrupts */ 1241 nvkm_wr32(device, 0x610024, 0x00000000); 1242 nvkm_wr32(device, 0x610020, 0x00000000); 1243 1244 return nvkm_parent_fini(&base->base, suspend); 1245 } 1246 1247 struct nvkm_ofuncs 1248 nv50_disp_main_ofuncs = { 1249 .ctor = nv50_disp_main_ctor, 1250 .dtor = nv50_disp_main_dtor, 1251 .init = nv50_disp_main_init, 1252 .fini = nv50_disp_main_fini, 1253 .mthd = nv50_disp_main_mthd, 1254 .ntfy = nvkm_disp_ntfy, 1255 }; 1256 1257 static struct nvkm_oclass 1258 nv50_disp_main_oclass[] = { 1259 { NV50_DISP, &nv50_disp_main_ofuncs }, 1260 {} 1261 }; 1262 1263 static struct nvkm_oclass 1264 nv50_disp_sclass[] = { 1265 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base }, 1266 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base }, 1267 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 1268 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 1269 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 1270 {} 1271 }; 1272 1273 /******************************************************************************* 1274 * Display context, tracks instmem allocation and prevents more than one 1275 * client using the display hardware at any time. 1276 ******************************************************************************/ 1277 1278 static int 1279 nv50_disp_data_ctor(struct nvkm_object *parent, 1280 struct nvkm_object *engine, 1281 struct nvkm_oclass *oclass, void *data, u32 size, 1282 struct nvkm_object **pobject) 1283 { 1284 struct nv50_disp *disp = (void *)engine; 1285 struct nvkm_engctx *ectx; 1286 int ret = -EBUSY; 1287 1288 /* no context needed for channel objects... */ 1289 if (nv_mclass(parent) != NV_DEVICE) { 1290 atomic_inc(&parent->refcount); 1291 *pobject = parent; 1292 return 1; 1293 } 1294 1295 /* allocate display hardware to client */ 1296 mutex_lock(&nv_subdev(disp)->mutex); 1297 if (list_empty(&nv_engine(disp)->contexts)) { 1298 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 1299 0x10000, NVOBJ_FLAG_HEAP, &ectx); 1300 *pobject = nv_object(ectx); 1301 } 1302 mutex_unlock(&nv_subdev(disp)->mutex); 1303 return ret; 1304 } 1305 1306 struct nvkm_oclass 1307 nv50_disp_cclass = { 1308 .handle = NV_ENGCTX(DISP, 0x50), 1309 .ofuncs = &(struct nvkm_ofuncs) { 1310 .ctor = nv50_disp_data_ctor, 1311 .dtor = _nvkm_engctx_dtor, 1312 .init = _nvkm_engctx_init, 1313 .fini = _nvkm_engctx_fini, 1314 .rd32 = _nvkm_engctx_rd32, 1315 .wr32 = _nvkm_engctx_wr32, 1316 }, 1317 }; 1318 1319 /******************************************************************************* 1320 * Display engine implementation 1321 ******************************************************************************/ 1322 1323 static void 1324 nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head) 1325 { 1326 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1327 struct nvkm_device *device = disp->engine.subdev.device; 1328 nvkm_mask(device, 0x61002c, (4 << head), 0); 1329 } 1330 1331 static void 1332 nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) 1333 { 1334 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 1335 struct nvkm_device *device = disp->engine.subdev.device; 1336 nvkm_mask(device, 0x61002c, (4 << head), (4 << head)); 1337 } 1338 1339 const struct nvkm_event_func 1340 nv50_disp_vblank_func = { 1341 .ctor = nvkm_disp_vblank_ctor, 1342 .init = nv50_disp_vblank_init, 1343 .fini = nv50_disp_vblank_fini, 1344 }; 1345 1346 static const struct nvkm_enum 1347 nv50_disp_intr_error_type[] = { 1348 { 3, "ILLEGAL_MTHD" }, 1349 { 4, "INVALID_VALUE" }, 1350 { 5, "INVALID_STATE" }, 1351 { 7, "INVALID_HANDLE" }, 1352 {} 1353 }; 1354 1355 static const struct nvkm_enum 1356 nv50_disp_intr_error_code[] = { 1357 { 0x00, "" }, 1358 {} 1359 }; 1360 1361 static void 1362 nv50_disp_intr_error(struct nv50_disp *disp, int chid) 1363 { 1364 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1365 struct nvkm_device *device = disp->base.engine.subdev.device; 1366 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08)); 1367 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08)); 1368 u32 code = (addr & 0x00ff0000) >> 16; 1369 u32 type = (addr & 0x00007000) >> 12; 1370 u32 mthd = (addr & 0x00000ffc); 1371 const struct nvkm_enum *ec, *et; 1372 char ecunk[6], etunk[6]; 1373 1374 et = nvkm_enum_find(nv50_disp_intr_error_type, type); 1375 if (!et) 1376 snprintf(etunk, sizeof(etunk), "UNK%02X", type); 1377 1378 ec = nvkm_enum_find(nv50_disp_intr_error_code, code); 1379 if (!ec) 1380 snprintf(ecunk, sizeof(ecunk), "UNK%02X", code); 1381 1382 nv_error(disp, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n", 1383 et ? et->name : etunk, ec ? ec->name : ecunk, 1384 chid, mthd, data); 1385 1386 if (chid == 0) { 1387 switch (mthd) { 1388 case 0x0080: 1389 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 0, 1390 impl->mthd.core); 1391 break; 1392 default: 1393 break; 1394 } 1395 } else 1396 if (chid <= 2) { 1397 switch (mthd) { 1398 case 0x0080: 1399 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 1, 1400 impl->mthd.base); 1401 break; 1402 default: 1403 break; 1404 } 1405 } else 1406 if (chid <= 4) { 1407 switch (mthd) { 1408 case 0x0080: 1409 nv50_disp_mthd_chan(disp, NV_DBG_ERROR, chid - 3, 1410 impl->mthd.ovly); 1411 break; 1412 default: 1413 break; 1414 } 1415 } 1416 1417 nvkm_wr32(device, 0x610020, 0x00010000 << chid); 1418 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000); 1419 } 1420 1421 static struct nvkm_output * 1422 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, 1423 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 1424 struct nvbios_outp *info) 1425 { 1426 struct nvkm_bios *bios = nvkm_bios(disp); 1427 struct nvkm_output *outp; 1428 u16 mask, type; 1429 1430 if (or < 4) { 1431 type = DCB_OUTPUT_ANALOG; 1432 mask = 0; 1433 } else 1434 if (or < 8) { 1435 switch (ctrl & 0x00000f00) { 1436 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; 1437 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; 1438 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; 1439 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; 1440 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; 1441 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 1442 default: 1443 nv_error(disp, "unknown SOR mc 0x%08x\n", ctrl); 1444 return NULL; 1445 } 1446 or -= 4; 1447 } else { 1448 or = or - 8; 1449 type = 0x0010; 1450 mask = 0; 1451 switch (ctrl & 0x00000f00) { 1452 case 0x00000000: type |= disp->pior.type[or]; break; 1453 default: 1454 nv_error(disp, "unknown PIOR mc 0x%08x\n", ctrl); 1455 return NULL; 1456 } 1457 } 1458 1459 mask = 0x00c0 & (mask << 6); 1460 mask |= 0x0001 << or; 1461 mask |= 0x0100 << head; 1462 1463 list_for_each_entry(outp, &disp->base.outp, head) { 1464 if ((outp->info.hasht & 0xff) == type && 1465 (outp->info.hashm & mask) == mask) { 1466 *data = nvbios_outp_match(bios, outp->info.hasht, 1467 outp->info.hashm, 1468 ver, hdr, cnt, len, info); 1469 if (!*data) 1470 return NULL; 1471 return outp; 1472 } 1473 } 1474 1475 return NULL; 1476 } 1477 1478 static struct nvkm_output * 1479 exec_script(struct nv50_disp *disp, int head, int id) 1480 { 1481 struct nvkm_device *device = disp->base.engine.subdev.device; 1482 struct nvkm_bios *bios = device->bios; 1483 struct nvkm_output *outp; 1484 struct nvbios_outp info; 1485 u8 ver, hdr, cnt, len; 1486 u32 data, ctrl = 0; 1487 u32 reg; 1488 int i; 1489 1490 /* DAC */ 1491 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1492 ctrl = nvkm_rd32(device, 0x610b5c + (i * 8)); 1493 1494 /* SOR */ 1495 if (!(ctrl & (1 << head))) { 1496 if (nv_device(disp)->chipset < 0x90 || 1497 nv_device(disp)->chipset == 0x92 || 1498 nv_device(disp)->chipset == 0xa0) { 1499 reg = 0x610b74; 1500 } else { 1501 reg = 0x610798; 1502 } 1503 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1504 ctrl = nvkm_rd32(device, reg + (i * 8)); 1505 i += 4; 1506 } 1507 1508 /* PIOR */ 1509 if (!(ctrl & (1 << head))) { 1510 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1511 ctrl = nvkm_rd32(device, 0x610b84 + (i * 8)); 1512 i += 8; 1513 } 1514 1515 if (!(ctrl & (1 << head))) 1516 return NULL; 1517 i--; 1518 1519 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info); 1520 if (outp) { 1521 struct nvbios_init init = { 1522 .subdev = nv_subdev(disp), 1523 .bios = bios, 1524 .offset = info.script[id], 1525 .outp = &outp->info, 1526 .crtc = head, 1527 .execute = 1, 1528 }; 1529 1530 nvbios_exec(&init); 1531 } 1532 1533 return outp; 1534 } 1535 1536 static struct nvkm_output * 1537 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) 1538 { 1539 struct nvkm_device *device = disp->base.engine.subdev.device; 1540 struct nvkm_bios *bios = device->bios; 1541 struct nvkm_output *outp; 1542 struct nvbios_outp info1; 1543 struct nvbios_ocfg info2; 1544 u8 ver, hdr, cnt, len; 1545 u32 data, ctrl = 0; 1546 u32 reg; 1547 int i; 1548 1549 /* DAC */ 1550 for (i = 0; !(ctrl & (1 << head)) && i < disp->dac.nr; i++) 1551 ctrl = nvkm_rd32(device, 0x610b58 + (i * 8)); 1552 1553 /* SOR */ 1554 if (!(ctrl & (1 << head))) { 1555 if (nv_device(disp)->chipset < 0x90 || 1556 nv_device(disp)->chipset == 0x92 || 1557 nv_device(disp)->chipset == 0xa0) { 1558 reg = 0x610b70; 1559 } else { 1560 reg = 0x610794; 1561 } 1562 for (i = 0; !(ctrl & (1 << head)) && i < disp->sor.nr; i++) 1563 ctrl = nvkm_rd32(device, reg + (i * 8)); 1564 i += 4; 1565 } 1566 1567 /* PIOR */ 1568 if (!(ctrl & (1 << head))) { 1569 for (i = 0; !(ctrl & (1 << head)) && i < disp->pior.nr; i++) 1570 ctrl = nvkm_rd32(device, 0x610b80 + (i * 8)); 1571 i += 8; 1572 } 1573 1574 if (!(ctrl & (1 << head))) 1575 return NULL; 1576 i--; 1577 1578 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); 1579 if (!outp) 1580 return NULL; 1581 1582 if (outp->info.location == 0) { 1583 switch (outp->info.type) { 1584 case DCB_OUTPUT_TMDS: 1585 *conf = (ctrl & 0x00000f00) >> 8; 1586 if (pclk >= 165000) 1587 *conf |= 0x0100; 1588 break; 1589 case DCB_OUTPUT_LVDS: 1590 *conf = disp->sor.lvdsconf; 1591 break; 1592 case DCB_OUTPUT_DP: 1593 *conf = (ctrl & 0x00000f00) >> 8; 1594 break; 1595 case DCB_OUTPUT_ANALOG: 1596 default: 1597 *conf = 0x00ff; 1598 break; 1599 } 1600 } else { 1601 *conf = (ctrl & 0x00000f00) >> 8; 1602 pclk = pclk / 2; 1603 } 1604 1605 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); 1606 if (data && id < 0xff) { 1607 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 1608 if (data) { 1609 struct nvbios_init init = { 1610 .subdev = nv_subdev(disp), 1611 .bios = bios, 1612 .offset = data, 1613 .outp = &outp->info, 1614 .crtc = head, 1615 .execute = 1, 1616 }; 1617 1618 nvbios_exec(&init); 1619 } 1620 } 1621 1622 return outp; 1623 } 1624 1625 static void 1626 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) 1627 { 1628 exec_script(disp, head, 1); 1629 } 1630 1631 static void 1632 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head) 1633 { 1634 struct nvkm_output *outp = exec_script(disp, head, 2); 1635 1636 /* the binary driver does this outside of the supervisor handling 1637 * (after the third supervisor from a detach). we (currently?) 1638 * allow both detach/attach to happen in the same set of 1639 * supervisor interrupts, so it would make sense to execute this 1640 * (full power down?) script after all the detach phases of the 1641 * supervisor handling. like with training if needed from the 1642 * second supervisor, nvidia doesn't do this, so who knows if it's 1643 * entirely safe, but it does appear to work.. 1644 * 1645 * without this script being run, on some configurations i've 1646 * seen, switching from DP to TMDS on a DP connector may result 1647 * in a blank screen (SOR_PWR off/on can restore it) 1648 */ 1649 if (outp && outp->info.type == DCB_OUTPUT_DP) { 1650 struct nvkm_output_dp *outpdp = (void *)outp; 1651 struct nvbios_init init = { 1652 .subdev = nv_subdev(disp), 1653 .bios = nvkm_bios(disp), 1654 .outp = &outp->info, 1655 .crtc = head, 1656 .offset = outpdp->info.script[4], 1657 .execute = 1, 1658 }; 1659 1660 nvbios_exec(&init); 1661 atomic_set(&outpdp->lt.done, 0); 1662 } 1663 } 1664 1665 static void 1666 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head) 1667 { 1668 struct nvkm_device *device = disp->base.engine.subdev.device; 1669 struct nvkm_devinit *devinit = device->devinit; 1670 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1671 if (pclk) 1672 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk); 1673 } 1674 1675 static void 1676 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head, 1677 struct dcb_output *outp, u32 pclk) 1678 { 1679 struct nvkm_device *device = disp->base.engine.subdev.device; 1680 const int link = !(outp->sorconf.link & 1); 1681 const int or = ffs(outp->or) - 1; 1682 const u32 soff = ( or * 0x800); 1683 const u32 loff = (link * 0x080) + soff; 1684 const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8)); 1685 const u32 symbol = 100000; 1686 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff; 1687 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff; 1688 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff; 1689 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff); 1690 u32 clksor = nvkm_rd32(device, 0x614300 + soff); 1691 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 1692 int TU, VTUi, VTUf, VTUa; 1693 u64 link_data_rate, link_ratio, unk; 1694 u32 best_diff = 64 * symbol; 1695 u32 link_nr, link_bw, bits; 1696 u64 value; 1697 1698 link_bw = (clksor & 0x000c0000) ? 270000 : 162000; 1699 link_nr = hweight32(dpctrl & 0x000f0000); 1700 1701 /* symbols/hblank - algorithm taken from comments in tegra driver */ 1702 value = vblanke + vactive - vblanks - 7; 1703 value = value * link_bw; 1704 do_div(value, pclk); 1705 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr); 1706 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value); 1707 1708 /* symbols/vblank - algorithm taken from comments in tegra driver */ 1709 value = vblanks - vblanke - 25; 1710 value = value * link_bw; 1711 do_div(value, pclk); 1712 value = value - ((36 / link_nr) + 3) - 1; 1713 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value); 1714 1715 /* watermark / activesym */ 1716 if ((ctrl & 0xf0000) == 0x60000) bits = 30; 1717 else if ((ctrl & 0xf0000) == 0x50000) bits = 24; 1718 else bits = 18; 1719 1720 link_data_rate = (pclk * bits / 8) / link_nr; 1721 1722 /* calculate ratio of packed data rate to link symbol rate */ 1723 link_ratio = link_data_rate * symbol; 1724 do_div(link_ratio, link_bw); 1725 1726 for (TU = 64; TU >= 32; TU--) { 1727 /* calculate average number of valid symbols in each TU */ 1728 u32 tu_valid = link_ratio * TU; 1729 u32 calc, diff; 1730 1731 /* find a hw representation for the fraction.. */ 1732 VTUi = tu_valid / symbol; 1733 calc = VTUi * symbol; 1734 diff = tu_valid - calc; 1735 if (diff) { 1736 if (diff >= (symbol / 2)) { 1737 VTUf = symbol / (symbol - diff); 1738 if (symbol - (VTUf * diff)) 1739 VTUf++; 1740 1741 if (VTUf <= 15) { 1742 VTUa = 1; 1743 calc += symbol - (symbol / VTUf); 1744 } else { 1745 VTUa = 0; 1746 VTUf = 1; 1747 calc += symbol; 1748 } 1749 } else { 1750 VTUa = 0; 1751 VTUf = min((int)(symbol / diff), 15); 1752 calc += symbol / VTUf; 1753 } 1754 1755 diff = calc - tu_valid; 1756 } else { 1757 /* no remainder, but the hw doesn't like the fractional 1758 * part to be zero. decrement the integer part and 1759 * have the fraction add a whole symbol back 1760 */ 1761 VTUa = 0; 1762 VTUf = 1; 1763 VTUi--; 1764 } 1765 1766 if (diff < best_diff) { 1767 best_diff = diff; 1768 bestTU = TU; 1769 bestVTUa = VTUa; 1770 bestVTUf = VTUf; 1771 bestVTUi = VTUi; 1772 if (diff == 0) 1773 break; 1774 } 1775 } 1776 1777 if (!bestTU) { 1778 nv_error(disp, "unable to find suitable dp config\n"); 1779 return; 1780 } 1781 1782 /* XXX close to vbios numbers, but not right */ 1783 unk = (symbol - link_ratio) * bestTU; 1784 unk *= link_ratio; 1785 do_div(unk, symbol); 1786 do_div(unk, symbol); 1787 unk += 6; 1788 1789 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2); 1790 nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | 1791 bestVTUf << 16 | 1792 bestVTUi << 8 | unk); 1793 } 1794 1795 static void 1796 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head) 1797 { 1798 struct nvkm_device *device = disp->base.engine.subdev.device; 1799 struct nvkm_output *outp; 1800 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1801 u32 hval, hreg = 0x614200 + (head * 0x800); 1802 u32 oval, oreg; 1803 u32 mask, conf; 1804 1805 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf); 1806 if (!outp) 1807 return; 1808 1809 /* we allow both encoder attach and detach operations to occur 1810 * within a single supervisor (ie. modeset) sequence. the 1811 * encoder detach scripts quite often switch off power to the 1812 * lanes, which requires the link to be re-trained. 1813 * 1814 * this is not generally an issue as the sink "must" (heh) 1815 * signal an irq when it's lost sync so the driver can 1816 * re-train. 1817 * 1818 * however, on some boards, if one does not configure at least 1819 * the gpu side of the link *before* attaching, then various 1820 * things can go horribly wrong (PDISP disappearing from mmio, 1821 * third supervisor never happens, etc). 1822 * 1823 * the solution is simply to retrain here, if necessary. last 1824 * i checked, the binary driver userspace does not appear to 1825 * trigger this situation (it forces an UPDATE between steps). 1826 */ 1827 if (outp->info.type == DCB_OUTPUT_DP) { 1828 u32 soff = (ffs(outp->info.or) - 1) * 0x08; 1829 u32 ctrl, datarate; 1830 1831 if (outp->info.location == 0) { 1832 ctrl = nvkm_rd32(device, 0x610794 + soff); 1833 soff = 1; 1834 } else { 1835 ctrl = nvkm_rd32(device, 0x610b80 + soff); 1836 soff = 2; 1837 } 1838 1839 switch ((ctrl & 0x000f0000) >> 16) { 1840 case 6: datarate = pclk * 30; break; 1841 case 5: datarate = pclk * 24; break; 1842 case 2: 1843 default: 1844 datarate = pclk * 18; 1845 break; 1846 } 1847 1848 if (nvkm_output_dp_train(outp, datarate / soff, true)) 1849 ERR("link not trained before attach\n"); 1850 } 1851 1852 exec_clkcmp(disp, head, 0, pclk, &conf); 1853 1854 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) { 1855 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800; 1856 oval = 0x00000000; 1857 hval = 0x00000000; 1858 mask = 0xffffffff; 1859 } else 1860 if (!outp->info.location) { 1861 if (outp->info.type == DCB_OUTPUT_DP) 1862 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk); 1863 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800; 1864 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000; 1865 hval = 0x00000000; 1866 mask = 0x00000707; 1867 } else { 1868 oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800; 1869 oval = 0x00000001; 1870 hval = 0x00000001; 1871 mask = 0x00000707; 1872 } 1873 1874 nvkm_mask(device, hreg, 0x0000000f, hval); 1875 nvkm_mask(device, oreg, mask, oval); 1876 } 1877 1878 /* If programming a TMDS output on a SOR that can also be configured for 1879 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. 1880 * 1881 * It looks like the VBIOS TMDS scripts make an attempt at this, however, 1882 * the VBIOS scripts on at least one board I have only switch it off on 1883 * link 0, causing a blank display if the output has previously been 1884 * programmed for DisplayPort. 1885 */ 1886 static void 1887 nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp, 1888 struct dcb_output *outp) 1889 { 1890 struct nvkm_device *device = disp->base.engine.subdev.device; 1891 struct nvkm_bios *bios = device->bios; 1892 const int link = !(outp->sorconf.link & 1); 1893 const int or = ffs(outp->or) - 1; 1894 const u32 loff = (or * 0x800) + (link * 0x80); 1895 const u16 mask = (outp->sorconf.link << 6) | outp->or; 1896 struct dcb_output match; 1897 u8 ver, hdr; 1898 1899 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) 1900 nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000); 1901 } 1902 1903 static void 1904 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head) 1905 { 1906 struct nvkm_device *device = disp->base.engine.subdev.device; 1907 struct nvkm_output *outp; 1908 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1909 u32 conf; 1910 1911 outp = exec_clkcmp(disp, head, 1, pclk, &conf); 1912 if (!outp) 1913 return; 1914 1915 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) 1916 nv50_disp_intr_unk40_0_tmds(disp, &outp->info); 1917 } 1918 1919 void 1920 nv50_disp_intr_supervisor(struct work_struct *work) 1921 { 1922 struct nv50_disp *disp = 1923 container_of(work, struct nv50_disp, supervisor); 1924 struct nv50_disp_impl *impl = (void *)nv_object(disp)->oclass; 1925 struct nvkm_device *device = disp->base.engine.subdev.device; 1926 u32 super = nvkm_rd32(device, 0x610030); 1927 int head; 1928 1929 nv_debug(disp, "supervisor 0x%08x 0x%08x\n", disp->super, super); 1930 1931 if (disp->super & 0x00000010) { 1932 nv50_disp_mthd_chan(disp, NV_DBG_DEBUG, 0, impl->mthd.core); 1933 for (head = 0; head < disp->head.nr; head++) { 1934 if (!(super & (0x00000020 << head))) 1935 continue; 1936 if (!(super & (0x00000080 << head))) 1937 continue; 1938 nv50_disp_intr_unk10_0(disp, head); 1939 } 1940 } else 1941 if (disp->super & 0x00000020) { 1942 for (head = 0; head < disp->head.nr; head++) { 1943 if (!(super & (0x00000080 << head))) 1944 continue; 1945 nv50_disp_intr_unk20_0(disp, head); 1946 } 1947 for (head = 0; head < disp->head.nr; head++) { 1948 if (!(super & (0x00000200 << head))) 1949 continue; 1950 nv50_disp_intr_unk20_1(disp, head); 1951 } 1952 for (head = 0; head < disp->head.nr; head++) { 1953 if (!(super & (0x00000080 << head))) 1954 continue; 1955 nv50_disp_intr_unk20_2(disp, head); 1956 } 1957 } else 1958 if (disp->super & 0x00000040) { 1959 for (head = 0; head < disp->head.nr; head++) { 1960 if (!(super & (0x00000080 << head))) 1961 continue; 1962 nv50_disp_intr_unk40_0(disp, head); 1963 } 1964 } 1965 1966 nvkm_wr32(device, 0x610030, 0x80000000); 1967 } 1968 1969 void 1970 nv50_disp_intr(struct nvkm_subdev *subdev) 1971 { 1972 struct nv50_disp *disp = (void *)subdev; 1973 struct nvkm_device *device = disp->base.engine.subdev.device; 1974 u32 intr0 = nvkm_rd32(device, 0x610020); 1975 u32 intr1 = nvkm_rd32(device, 0x610024); 1976 1977 while (intr0 & 0x001f0000) { 1978 u32 chid = __ffs(intr0 & 0x001f0000) - 16; 1979 nv50_disp_intr_error(disp, chid); 1980 intr0 &= ~(0x00010000 << chid); 1981 } 1982 1983 while (intr0 & 0x0000001f) { 1984 u32 chid = __ffs(intr0 & 0x0000001f); 1985 nv50_disp_chan_uevent_send(disp, chid); 1986 intr0 &= ~(0x00000001 << chid); 1987 } 1988 1989 if (intr1 & 0x00000004) { 1990 nvkm_disp_vblank(&disp->base, 0); 1991 nvkm_wr32(device, 0x610024, 0x00000004); 1992 } 1993 1994 if (intr1 & 0x00000008) { 1995 nvkm_disp_vblank(&disp->base, 1); 1996 nvkm_wr32(device, 0x610024, 0x00000008); 1997 } 1998 1999 if (intr1 & 0x00000070) { 2000 disp->super = (intr1 & 0x00000070); 2001 schedule_work(&disp->supervisor); 2002 nvkm_wr32(device, 0x610024, disp->super); 2003 } 2004 } 2005 2006 static int 2007 nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 2008 struct nvkm_oclass *oclass, void *data, u32 size, 2009 struct nvkm_object **pobject) 2010 { 2011 struct nv50_disp *disp; 2012 int ret; 2013 2014 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP", 2015 "display", &disp); 2016 *pobject = nv_object(disp); 2017 if (ret) 2018 return ret; 2019 2020 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &disp->uevent); 2021 if (ret) 2022 return ret; 2023 2024 nv_engine(disp)->sclass = nv50_disp_main_oclass; 2025 nv_engine(disp)->cclass = &nv50_disp_cclass; 2026 nv_subdev(disp)->intr = nv50_disp_intr; 2027 INIT_WORK(&disp->supervisor, nv50_disp_intr_supervisor); 2028 disp->sclass = nv50_disp_sclass; 2029 disp->head.nr = 2; 2030 disp->dac.nr = 3; 2031 disp->sor.nr = 2; 2032 disp->pior.nr = 3; 2033 disp->dac.power = nv50_dac_power; 2034 disp->dac.sense = nv50_dac_sense; 2035 disp->sor.power = nv50_sor_power; 2036 disp->pior.power = nv50_pior_power; 2037 return 0; 2038 } 2039 2040 struct nvkm_oclass * 2041 nv50_disp_outp_sclass[] = { 2042 &nv50_pior_dp_impl.base.base, 2043 NULL 2044 }; 2045 2046 struct nvkm_oclass * 2047 nv50_disp_oclass = &(struct nv50_disp_impl) { 2048 .base.base.handle = NV_ENGINE(DISP, 0x50), 2049 .base.base.ofuncs = &(struct nvkm_ofuncs) { 2050 .ctor = nv50_disp_ctor, 2051 .dtor = _nvkm_disp_dtor, 2052 .init = _nvkm_disp_init, 2053 .fini = _nvkm_disp_fini, 2054 }, 2055 .base.vblank = &nv50_disp_vblank_func, 2056 .base.outp = nv50_disp_outp_sclass, 2057 .mthd.core = &nv50_disp_core_mthd_chan, 2058 .mthd.base = &nv50_disp_base_mthd_chan, 2059 .mthd.ovly = &nv50_disp_ovly_mthd_chan, 2060 .mthd.prev = 0x000004, 2061 .head.scanoutpos = nv50_disp_main_scanoutpos, 2062 }.base.base; 2063