1 /* 2 * Copyright 2011 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "disp.h" 25 #include "atom.h" 26 #include "core.h" 27 #include "head.h" 28 #include "wndw.h" 29 #include "handles.h" 30 31 #include <linux/dma-mapping.h> 32 #include <linux/hdmi.h> 33 #include <linux/component.h> 34 #include <linux/iopoll.h> 35 36 #include <drm/display/drm_dp_helper.h> 37 #include <drm/display/drm_scdc_helper.h> 38 #include <drm/drm_atomic.h> 39 #include <drm/drm_atomic_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_fb_helper.h> 42 #include <drm/drm_probe_helper.h> 43 #include <drm/drm_vblank.h> 44 45 #include <nvif/push507c.h> 46 47 #include <nvif/class.h> 48 #include <nvif/cl0002.h> 49 #include <nvif/event.h> 50 #include <nvif/if0012.h> 51 #include <nvif/if0014.h> 52 #include <nvif/timer.h> 53 54 #include <nvhw/class/cl507c.h> 55 #include <nvhw/class/cl507d.h> 56 #include <nvhw/class/cl837d.h> 57 #include <nvhw/class/cl887d.h> 58 #include <nvhw/class/cl907d.h> 59 #include <nvhw/class/cl917d.h> 60 61 #include "nouveau_drv.h" 62 #include "nouveau_dma.h" 63 #include "nouveau_gem.h" 64 #include "nouveau_connector.h" 65 #include "nouveau_encoder.h" 66 #include "nouveau_fence.h" 67 68 #include <subdev/bios/dp.h> 69 70 /****************************************************************************** 71 * EVO channel 72 *****************************************************************************/ 73 74 static int 75 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp, 76 const s32 *oclass, u8 head, void *data, u32 size, 77 struct nv50_chan *chan) 78 { 79 struct nvif_sclass *sclass; 80 int ret, i, n; 81 82 chan->device = device; 83 84 ret = n = nvif_object_sclass_get(disp, &sclass); 85 if (ret < 0) 86 return ret; 87 88 while (oclass[0]) { 89 for (i = 0; i < n; i++) { 90 if (sclass[i].oclass == oclass[0]) { 91 ret = nvif_object_ctor(disp, "kmsChan", 0, 92 oclass[0], data, size, 93 &chan->user); 94 if (ret == 0) 95 nvif_object_map(&chan->user, NULL, 0); 96 nvif_object_sclass_put(&sclass); 97 return ret; 98 } 99 } 100 oclass++; 101 } 102 103 nvif_object_sclass_put(&sclass); 104 return -ENOSYS; 105 } 106 107 static void 108 nv50_chan_destroy(struct nv50_chan *chan) 109 { 110 nvif_object_dtor(&chan->user); 111 } 112 113 /****************************************************************************** 114 * DMA EVO channel 115 *****************************************************************************/ 116 117 void 118 nv50_dmac_destroy(struct nv50_dmac *dmac) 119 { 120 nvif_object_dtor(&dmac->vram); 121 nvif_object_dtor(&dmac->sync); 122 123 nv50_chan_destroy(&dmac->base); 124 125 nvif_mem_dtor(&dmac->_push.mem); 126 } 127 128 static void 129 nv50_dmac_kick(struct nvif_push *push) 130 { 131 struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push); 132 133 dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr; 134 if (dmac->put != dmac->cur) { 135 /* Push buffer fetches are not coherent with BAR1, we need to ensure 136 * writes have been flushed right through to VRAM before writing PUT. 137 */ 138 if (dmac->push->mem.type & NVIF_MEM_VRAM) { 139 struct nvif_device *device = dmac->base.device; 140 nvif_wr32(&device->object, 0x070000, 0x00000001); 141 nvif_msec(device, 2000, 142 if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) 143 break; 144 ); 145 } 146 147 NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur); 148 dmac->put = dmac->cur; 149 } 150 151 push->bgn = push->cur; 152 } 153 154 static int 155 nv50_dmac_free(struct nv50_dmac *dmac) 156 { 157 u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR); 158 if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */ 159 return get - dmac->cur - 5; 160 return dmac->max - dmac->cur; 161 } 162 163 static int 164 nv50_dmac_wind(struct nv50_dmac *dmac) 165 { 166 /* Wait for GET to depart from the beginning of the push buffer to 167 * prevent writing PUT == GET, which would be ignored by HW. 168 */ 169 u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR); 170 if (get == 0) { 171 /* Corner-case, HW idle, but non-committed work pending. */ 172 if (dmac->put == 0) 173 nv50_dmac_kick(dmac->push); 174 175 if (nvif_msec(dmac->base.device, 2000, 176 if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0)) 177 break; 178 ) < 0) 179 return -ETIMEDOUT; 180 } 181 182 PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0)); 183 dmac->cur = 0; 184 return 0; 185 } 186 187 static int 188 nv50_dmac_wait(struct nvif_push *push, u32 size) 189 { 190 struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push); 191 int free; 192 193 if (WARN_ON(size > dmac->max)) 194 return -EINVAL; 195 196 dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr; 197 if (dmac->cur + size >= dmac->max) { 198 int ret = nv50_dmac_wind(dmac); 199 if (ret) 200 return ret; 201 202 push->cur = dmac->_push.mem.object.map.ptr; 203 push->cur = push->cur + dmac->cur; 204 nv50_dmac_kick(push); 205 } 206 207 if (nvif_msec(dmac->base.device, 2000, 208 if ((free = nv50_dmac_free(dmac)) >= size) 209 break; 210 ) < 0) { 211 WARN_ON(1); 212 return -ETIMEDOUT; 213 } 214 215 push->bgn = dmac->_push.mem.object.map.ptr; 216 push->bgn = push->bgn + dmac->cur; 217 push->cur = push->bgn; 218 push->end = push->cur + free; 219 return 0; 220 } 221 222 MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)"); 223 static int nv50_dmac_vram_pushbuf = -1; 224 module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400); 225 226 int 227 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, 228 const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf, 229 struct nv50_dmac *dmac) 230 { 231 struct nouveau_cli *cli = (void *)device->object.client; 232 struct nvif_disp_chan_v0 *args = data; 233 u8 type = NVIF_MEM_COHERENT; 234 int ret; 235 236 mutex_init(&dmac->lock); 237 238 /* Pascal added support for 47-bit physical addresses, but some 239 * parts of EVO still only accept 40-bit PAs. 240 * 241 * To avoid issues on systems with large amounts of RAM, and on 242 * systems where an IOMMU maps pages at a high address, we need 243 * to allocate push buffers in VRAM instead. 244 * 245 * This appears to match NVIDIA's behaviour on Pascal. 246 */ 247 if ((nv50_dmac_vram_pushbuf > 0) || 248 (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL)) 249 type |= NVIF_MEM_VRAM; 250 251 ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000, 252 &dmac->_push.mem); 253 if (ret) 254 return ret; 255 256 dmac->ptr = dmac->_push.mem.object.map.ptr; 257 dmac->_push.wait = nv50_dmac_wait; 258 dmac->_push.kick = nv50_dmac_kick; 259 dmac->push = &dmac->_push; 260 dmac->push->bgn = dmac->_push.mem.object.map.ptr; 261 dmac->push->cur = dmac->push->bgn; 262 dmac->push->end = dmac->push->bgn; 263 dmac->max = 0x1000/4 - 1; 264 265 /* EVO channels are affected by a HW bug where the last 12 DWORDs 266 * of the push buffer aren't able to be used safely. 267 */ 268 if (disp->oclass < GV100_DISP) 269 dmac->max -= 12; 270 271 args->pushbuf = nvif_handle(&dmac->_push.mem.object); 272 273 ret = nv50_chan_create(device, disp, oclass, head, data, size, 274 &dmac->base); 275 if (ret) 276 return ret; 277 278 if (syncbuf < 0) 279 return 0; 280 281 ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF, 282 NV_DMA_IN_MEMORY, 283 &(struct nv_dma_v0) { 284 .target = NV_DMA_V0_TARGET_VRAM, 285 .access = NV_DMA_V0_ACCESS_RDWR, 286 .start = syncbuf + 0x0000, 287 .limit = syncbuf + 0x0fff, 288 }, sizeof(struct nv_dma_v0), 289 &dmac->sync); 290 if (ret) 291 return ret; 292 293 ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM, 294 NV_DMA_IN_MEMORY, 295 &(struct nv_dma_v0) { 296 .target = NV_DMA_V0_TARGET_VRAM, 297 .access = NV_DMA_V0_ACCESS_RDWR, 298 .start = 0, 299 .limit = device->info.ram_user - 1, 300 }, sizeof(struct nv_dma_v0), 301 &dmac->vram); 302 if (ret) 303 return ret; 304 305 return ret; 306 } 307 308 /****************************************************************************** 309 * Output path helpers 310 *****************************************************************************/ 311 static void 312 nv50_outp_dump_caps(struct nouveau_drm *drm, 313 struct nouveau_encoder *outp) 314 { 315 NV_DEBUG(drm, "%s caps: dp_interlace=%d\n", 316 outp->base.base.name, outp->caps.dp_interlace); 317 } 318 319 static int 320 nv50_outp_atomic_check_view(struct drm_encoder *encoder, 321 struct drm_crtc_state *crtc_state, 322 struct drm_connector_state *conn_state, 323 struct drm_display_mode *native_mode) 324 { 325 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 326 struct drm_display_mode *mode = &crtc_state->mode; 327 struct drm_connector *connector = conn_state->connector; 328 struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state); 329 struct nouveau_drm *drm = nouveau_drm(encoder->dev); 330 331 NV_ATOMIC(drm, "%s atomic_check\n", encoder->name); 332 asyc->scaler.full = false; 333 if (!native_mode) 334 return 0; 335 336 if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) { 337 switch (connector->connector_type) { 338 case DRM_MODE_CONNECTOR_LVDS: 339 case DRM_MODE_CONNECTOR_eDP: 340 /* Don't force scaler for EDID modes with 341 * same size as the native one (e.g. different 342 * refresh rate) 343 */ 344 if (mode->hdisplay == native_mode->hdisplay && 345 mode->vdisplay == native_mode->vdisplay && 346 mode->type & DRM_MODE_TYPE_DRIVER) 347 break; 348 mode = native_mode; 349 asyc->scaler.full = true; 350 break; 351 default: 352 break; 353 } 354 } else { 355 mode = native_mode; 356 } 357 358 if (!drm_mode_equal(adjusted_mode, mode)) { 359 drm_mode_copy(adjusted_mode, mode); 360 crtc_state->mode_changed = true; 361 } 362 363 return 0; 364 } 365 366 static int 367 nv50_outp_atomic_check(struct drm_encoder *encoder, 368 struct drm_crtc_state *crtc_state, 369 struct drm_connector_state *conn_state) 370 { 371 struct drm_connector *connector = conn_state->connector; 372 struct nouveau_connector *nv_connector = nouveau_connector(connector); 373 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 374 int ret; 375 376 ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, 377 nv_connector->native_mode); 378 if (ret) 379 return ret; 380 381 if (crtc_state->mode_changed || crtc_state->connectors_changed) 382 asyh->or.bpc = connector->display_info.bpc; 383 384 return 0; 385 } 386 387 struct nouveau_connector * 388 nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp) 389 { 390 struct drm_connector *connector; 391 struct drm_connector_state *connector_state; 392 struct drm_encoder *encoder = to_drm_encoder(outp); 393 int i; 394 395 for_each_new_connector_in_state(state, connector, connector_state, i) { 396 if (connector_state->best_encoder == encoder) 397 return nouveau_connector(connector); 398 } 399 400 return NULL; 401 } 402 403 struct nouveau_connector * 404 nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp) 405 { 406 struct drm_connector *connector; 407 struct drm_connector_state *connector_state; 408 struct drm_encoder *encoder = to_drm_encoder(outp); 409 int i; 410 411 for_each_old_connector_in_state(state, connector, connector_state, i) { 412 if (connector_state->best_encoder == encoder) 413 return nouveau_connector(connector); 414 } 415 416 return NULL; 417 } 418 419 static struct nouveau_crtc * 420 nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp) 421 { 422 struct drm_crtc *crtc; 423 struct drm_crtc_state *crtc_state; 424 const u32 mask = drm_encoder_mask(&outp->base.base); 425 int i; 426 427 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 428 if (crtc_state->encoder_mask & mask) 429 return nouveau_crtc(crtc); 430 } 431 432 return NULL; 433 } 434 435 /****************************************************************************** 436 * DAC 437 *****************************************************************************/ 438 static void 439 nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) 440 { 441 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 442 struct nv50_core *core = nv50_disp(encoder->dev)->core; 443 const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE); 444 445 core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL); 446 nv_encoder->crtc = NULL; 447 nvif_outp_release(&nv_encoder->outp); 448 } 449 450 static void 451 nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) 452 { 453 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 454 struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder); 455 struct nv50_head_atom *asyh = 456 nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base)); 457 struct nv50_core *core = nv50_disp(encoder->dev)->core; 458 u32 ctrl = 0; 459 460 switch (nv_crtc->index) { 461 case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break; 462 case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break; 463 case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break; 464 case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break; 465 default: 466 WARN_ON(1); 467 break; 468 } 469 470 ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT); 471 472 nvif_outp_acquire_rgb_crt(&nv_encoder->outp); 473 474 core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh); 475 asyh->or.depth = 0; 476 477 nv_encoder->crtc = &nv_crtc->base; 478 } 479 480 static enum drm_connector_status 481 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) 482 { 483 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 484 u32 loadval; 485 int ret; 486 487 loadval = nouveau_drm(encoder->dev)->vbios.dactestval; 488 if (loadval == 0) 489 loadval = 340; 490 491 ret = nvif_outp_load_detect(&nv_encoder->outp, loadval); 492 if (ret <= 0) 493 return connector_status_disconnected; 494 495 return connector_status_connected; 496 } 497 498 static const struct drm_encoder_helper_funcs 499 nv50_dac_help = { 500 .atomic_check = nv50_outp_atomic_check, 501 .atomic_enable = nv50_dac_atomic_enable, 502 .atomic_disable = nv50_dac_atomic_disable, 503 .detect = nv50_dac_detect 504 }; 505 506 static void 507 nv50_dac_destroy(struct drm_encoder *encoder) 508 { 509 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 510 511 nvif_outp_dtor(&nv_encoder->outp); 512 513 drm_encoder_cleanup(encoder); 514 kfree(encoder); 515 } 516 517 static const struct drm_encoder_funcs 518 nv50_dac_func = { 519 .destroy = nv50_dac_destroy, 520 }; 521 522 static int 523 nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) 524 { 525 struct nouveau_drm *drm = nouveau_drm(connector->dev); 526 struct nv50_disp *disp = nv50_disp(connector->dev); 527 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); 528 struct nvkm_i2c_bus *bus; 529 struct nouveau_encoder *nv_encoder; 530 struct drm_encoder *encoder; 531 int type = DRM_MODE_ENCODER_DAC; 532 533 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); 534 if (!nv_encoder) 535 return -ENOMEM; 536 nv_encoder->dcb = dcbe; 537 538 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index); 539 if (bus) 540 nv_encoder->i2c = &bus->i2c; 541 542 encoder = to_drm_encoder(nv_encoder); 543 encoder->possible_crtcs = dcbe->heads; 544 encoder->possible_clones = 0; 545 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, 546 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm); 547 drm_encoder_helper_add(encoder, &nv50_dac_help); 548 549 drm_connector_attach_encoder(connector, encoder); 550 return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp); 551 } 552 553 /* 554 * audio component binding for ELD notification 555 */ 556 static void 557 nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port, 558 int dev_id) 559 { 560 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) 561 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 562 port, dev_id); 563 } 564 565 static int 566 nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id, 567 bool *enabled, unsigned char *buf, int max_bytes) 568 { 569 struct drm_device *drm_dev = dev_get_drvdata(kdev); 570 struct nouveau_drm *drm = nouveau_drm(drm_dev); 571 struct drm_encoder *encoder; 572 struct nouveau_encoder *nv_encoder; 573 struct nouveau_crtc *nv_crtc; 574 int ret = 0; 575 576 *enabled = false; 577 578 mutex_lock(&drm->audio.lock); 579 580 drm_for_each_encoder(encoder, drm->dev) { 581 struct nouveau_connector *nv_connector = NULL; 582 583 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) 584 continue; /* TODO */ 585 586 nv_encoder = nouveau_encoder(encoder); 587 nv_connector = nouveau_connector(nv_encoder->audio.connector); 588 nv_crtc = nouveau_crtc(nv_encoder->crtc); 589 590 if (!nv_crtc || nv_encoder->outp.or.id != port || nv_crtc->index != dev_id) 591 continue; 592 593 *enabled = nv_encoder->audio.enabled; 594 if (*enabled) { 595 ret = drm_eld_size(nv_connector->base.eld); 596 memcpy(buf, nv_connector->base.eld, 597 min(max_bytes, ret)); 598 } 599 break; 600 } 601 602 mutex_unlock(&drm->audio.lock); 603 604 return ret; 605 } 606 607 static const struct drm_audio_component_ops nv50_audio_component_ops = { 608 .get_eld = nv50_audio_component_get_eld, 609 }; 610 611 static int 612 nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev, 613 void *data) 614 { 615 struct drm_device *drm_dev = dev_get_drvdata(kdev); 616 struct nouveau_drm *drm = nouveau_drm(drm_dev); 617 struct drm_audio_component *acomp = data; 618 619 if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS))) 620 return -ENOMEM; 621 622 drm_modeset_lock_all(drm_dev); 623 acomp->ops = &nv50_audio_component_ops; 624 acomp->dev = kdev; 625 drm->audio.component = acomp; 626 drm_modeset_unlock_all(drm_dev); 627 return 0; 628 } 629 630 static void 631 nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev, 632 void *data) 633 { 634 struct drm_device *drm_dev = dev_get_drvdata(kdev); 635 struct nouveau_drm *drm = nouveau_drm(drm_dev); 636 struct drm_audio_component *acomp = data; 637 638 drm_modeset_lock_all(drm_dev); 639 drm->audio.component = NULL; 640 acomp->ops = NULL; 641 acomp->dev = NULL; 642 drm_modeset_unlock_all(drm_dev); 643 } 644 645 static const struct component_ops nv50_audio_component_bind_ops = { 646 .bind = nv50_audio_component_bind, 647 .unbind = nv50_audio_component_unbind, 648 }; 649 650 static void 651 nv50_audio_component_init(struct nouveau_drm *drm) 652 { 653 if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops)) 654 return; 655 656 drm->audio.component_registered = true; 657 mutex_init(&drm->audio.lock); 658 } 659 660 static void 661 nv50_audio_component_fini(struct nouveau_drm *drm) 662 { 663 if (!drm->audio.component_registered) 664 return; 665 666 component_del(drm->dev->dev, &nv50_audio_component_bind_ops); 667 drm->audio.component_registered = false; 668 mutex_destroy(&drm->audio.lock); 669 } 670 671 /****************************************************************************** 672 * Audio 673 *****************************************************************************/ 674 static bool 675 nv50_audio_supported(struct drm_encoder *encoder) 676 { 677 struct nv50_disp *disp = nv50_disp(encoder->dev); 678 679 if (disp->disp->object.oclass <= GT200_DISP || 680 disp->disp->object.oclass == GT206_DISP) 681 return false; 682 683 return true; 684 } 685 686 static void 687 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) 688 { 689 struct nouveau_drm *drm = nouveau_drm(encoder->dev); 690 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 691 struct nvif_outp *outp = &nv_encoder->outp; 692 693 if (!nv50_audio_supported(encoder)) 694 return; 695 696 mutex_lock(&drm->audio.lock); 697 if (nv_encoder->audio.enabled) { 698 nv_encoder->audio.enabled = false; 699 nv_encoder->audio.connector = NULL; 700 nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, NULL, 0); 701 } 702 mutex_unlock(&drm->audio.lock); 703 704 nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index); 705 } 706 707 static void 708 nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, 709 struct nouveau_connector *nv_connector, struct drm_atomic_state *state, 710 struct drm_display_mode *mode) 711 { 712 struct nouveau_drm *drm = nouveau_drm(encoder->dev); 713 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 714 struct nvif_outp *outp = &nv_encoder->outp; 715 716 if (!nv50_audio_supported(encoder) || !drm_detect_monitor_audio(nv_connector->edid)) 717 return; 718 719 mutex_lock(&drm->audio.lock); 720 721 nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, nv_connector->base.eld, 722 drm_eld_size(nv_connector->base.eld)); 723 nv_encoder->audio.enabled = true; 724 nv_encoder->audio.connector = &nv_connector->base; 725 726 mutex_unlock(&drm->audio.lock); 727 728 nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index); 729 } 730 731 /****************************************************************************** 732 * HDMI 733 *****************************************************************************/ 734 static void 735 nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, 736 struct nouveau_connector *nv_connector, struct drm_atomic_state *state, 737 struct drm_display_mode *mode, bool hda) 738 { 739 struct nouveau_drm *drm = nouveau_drm(encoder->dev); 740 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 741 struct drm_hdmi_info *hdmi = &nv_connector->base.display_info.hdmi; 742 union hdmi_infoframe infoframe = { 0 }; 743 const u8 rekey = 56; /* binary driver, and tegra, constant */ 744 u8 scdc = 0; 745 u32 max_ac_packet; 746 struct { 747 struct nvif_outp_infoframe_v0 infoframe; 748 u8 data[17]; 749 } args = { 0 }; 750 int ret, size; 751 752 max_ac_packet = mode->htotal - mode->hdisplay; 753 max_ac_packet -= rekey; 754 max_ac_packet -= 18; /* constant from tegra */ 755 max_ac_packet /= 32; 756 757 if (hdmi->scdc.scrambling.supported) { 758 const bool high_tmds_clock_ratio = mode->clock > 340000; 759 760 ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &scdc); 761 if (ret < 0) { 762 NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret); 763 return; 764 } 765 766 scdc &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE); 767 if (high_tmds_clock_ratio || hdmi->scdc.scrambling.low_rates) 768 scdc |= SCDC_SCRAMBLING_ENABLE; 769 if (high_tmds_clock_ratio) 770 scdc |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40; 771 772 ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, scdc); 773 if (ret < 0) 774 NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n", 775 scdc, ret); 776 } 777 778 ret = nvif_outp_acquire_tmds(&nv_encoder->outp, nv_crtc->index, true, 779 max_ac_packet, rekey, scdc, hda); 780 if (ret) 781 return; 782 783 /* AVI InfoFrame. */ 784 args.infoframe.version = 0; 785 args.infoframe.head = nv_crtc->index; 786 787 if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) { 788 drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode, 789 HDMI_QUANTIZATION_RANGE_FULL); 790 791 size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data)); 792 } else { 793 size = 0; 794 } 795 796 nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size); 797 798 /* Vendor InfoFrame. */ 799 memset(&args.data, 0, sizeof(args.data)); 800 if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi, 801 &nv_connector->base, mode)) 802 size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data)); 803 else 804 size = 0; 805 806 nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size); 807 808 nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode); 809 } 810 811 /****************************************************************************** 812 * MST 813 *****************************************************************************/ 814 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr) 815 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector) 816 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder) 817 818 struct nv50_mstc { 819 struct nv50_mstm *mstm; 820 struct drm_dp_mst_port *port; 821 struct drm_connector connector; 822 823 struct drm_display_mode *native; 824 struct edid *edid; 825 }; 826 827 struct nv50_msto { 828 struct drm_encoder encoder; 829 830 /* head is statically assigned on msto creation */ 831 struct nv50_head *head; 832 struct nv50_mstc *mstc; 833 bool disabled; 834 bool enabled; 835 }; 836 837 struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder) 838 { 839 struct nv50_msto *msto; 840 841 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) 842 return nouveau_encoder(encoder); 843 844 msto = nv50_msto(encoder); 845 if (!msto->mstc) 846 return NULL; 847 return msto->mstc->mstm->outp; 848 } 849 850 static void 851 nv50_msto_cleanup(struct drm_atomic_state *state, 852 struct drm_dp_mst_topology_state *mst_state, 853 struct drm_dp_mst_topology_mgr *mgr, 854 struct nv50_msto *msto) 855 { 856 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev); 857 struct drm_dp_mst_atomic_payload *payload = 858 drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port); 859 860 NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name); 861 862 if (msto->disabled) { 863 msto->mstc = NULL; 864 msto->disabled = false; 865 } else if (msto->enabled) { 866 drm_dp_add_payload_part2(mgr, state, payload); 867 msto->enabled = false; 868 } 869 } 870 871 static void 872 nv50_msto_prepare(struct drm_atomic_state *state, 873 struct drm_dp_mst_topology_state *mst_state, 874 struct drm_dp_mst_topology_mgr *mgr, 875 struct nv50_msto *msto) 876 { 877 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev); 878 struct nv50_mstc *mstc = msto->mstc; 879 struct nv50_mstm *mstm = mstc->mstm; 880 struct drm_dp_mst_atomic_payload *payload; 881 882 NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name); 883 884 payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port); 885 886 // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here? 887 if (msto->disabled) { 888 drm_dp_remove_payload(mgr, mst_state, payload, payload); 889 890 nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0); 891 } else { 892 if (msto->enabled) 893 drm_dp_add_payload_part1(mgr, mst_state, payload); 894 895 nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 896 payload->vc_start_slot, payload->time_slots, 897 payload->pbn, payload->time_slots * mst_state->pbn_div); 898 } 899 } 900 901 static int 902 nv50_msto_atomic_check(struct drm_encoder *encoder, 903 struct drm_crtc_state *crtc_state, 904 struct drm_connector_state *conn_state) 905 { 906 struct drm_atomic_state *state = crtc_state->state; 907 struct drm_connector *connector = conn_state->connector; 908 struct drm_dp_mst_topology_state *mst_state; 909 struct nv50_mstc *mstc = nv50_mstc(connector); 910 struct nv50_mstm *mstm = mstc->mstm; 911 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 912 int slots; 913 int ret; 914 915 ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, 916 mstc->native); 917 if (ret) 918 return ret; 919 920 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 921 return 0; 922 923 /* 924 * When restoring duplicated states, we need to make sure that the bw 925 * remains the same and avoid recalculating it, as the connector's bpc 926 * may have changed after the state was duplicated 927 */ 928 if (!state->duplicated) { 929 const int clock = crtc_state->adjusted_mode.clock; 930 931 asyh->or.bpc = connector->display_info.bpc; 932 asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, 933 false); 934 } 935 936 mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr); 937 if (IS_ERR(mst_state)) 938 return PTR_ERR(mst_state); 939 940 if (!mst_state->pbn_div) { 941 struct nouveau_encoder *outp = mstc->mstm->outp; 942 943 mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr, 944 outp->dp.link_bw, outp->dp.link_nr); 945 } 946 947 slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); 948 if (slots < 0) 949 return slots; 950 951 asyh->dp.tu = slots; 952 953 return 0; 954 } 955 956 static u8 957 nv50_dp_bpc_to_depth(unsigned int bpc) 958 { 959 switch (bpc) { 960 case 6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; 961 case 8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; 962 case 10: 963 default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; 964 } 965 } 966 967 static void 968 nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) 969 { 970 struct nv50_msto *msto = nv50_msto(encoder); 971 struct nv50_head *head = msto->head; 972 struct nv50_head_atom *asyh = 973 nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base)); 974 struct nv50_mstc *mstc = NULL; 975 struct nv50_mstm *mstm = NULL; 976 struct drm_connector *connector; 977 struct drm_connector_list_iter conn_iter; 978 u8 proto; 979 980 drm_connector_list_iter_begin(encoder->dev, &conn_iter); 981 drm_for_each_connector_iter(connector, &conn_iter) { 982 if (connector->state->best_encoder == &msto->encoder) { 983 mstc = nv50_mstc(connector); 984 mstm = mstc->mstm; 985 break; 986 } 987 } 988 drm_connector_list_iter_end(&conn_iter); 989 990 if (WARN_ON(!mstc)) 991 return; 992 993 if (!mstm->links++) { 994 /*XXX: MST audio. */ 995 nvif_outp_acquire_dp(&mstm->outp->outp, mstm->outp->dp.dpcd, 0, 0, false, true); 996 } 997 998 if (mstm->outp->outp.or.link & 1) 999 proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A; 1000 else 1001 proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B; 1002 1003 mstm->outp->update(mstm->outp, head->base.index, asyh, proto, 1004 nv50_dp_bpc_to_depth(asyh->or.bpc)); 1005 1006 msto->mstc = mstc; 1007 msto->enabled = true; 1008 mstm->modified = true; 1009 } 1010 1011 static void 1012 nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) 1013 { 1014 struct nv50_msto *msto = nv50_msto(encoder); 1015 struct nv50_mstc *mstc = msto->mstc; 1016 struct nv50_mstm *mstm = mstc->mstm; 1017 1018 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0); 1019 mstm->modified = true; 1020 if (!--mstm->links) 1021 mstm->disabled = true; 1022 msto->disabled = true; 1023 } 1024 1025 static const struct drm_encoder_helper_funcs 1026 nv50_msto_help = { 1027 .atomic_disable = nv50_msto_atomic_disable, 1028 .atomic_enable = nv50_msto_atomic_enable, 1029 .atomic_check = nv50_msto_atomic_check, 1030 }; 1031 1032 static void 1033 nv50_msto_destroy(struct drm_encoder *encoder) 1034 { 1035 struct nv50_msto *msto = nv50_msto(encoder); 1036 drm_encoder_cleanup(&msto->encoder); 1037 kfree(msto); 1038 } 1039 1040 static const struct drm_encoder_funcs 1041 nv50_msto = { 1042 .destroy = nv50_msto_destroy, 1043 }; 1044 1045 static struct nv50_msto * 1046 nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id) 1047 { 1048 struct nv50_msto *msto; 1049 int ret; 1050 1051 msto = kzalloc(sizeof(*msto), GFP_KERNEL); 1052 if (!msto) 1053 return ERR_PTR(-ENOMEM); 1054 1055 ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto, 1056 DRM_MODE_ENCODER_DPMST, "mst-%d", id); 1057 if (ret) { 1058 kfree(msto); 1059 return ERR_PTR(ret); 1060 } 1061 1062 drm_encoder_helper_add(&msto->encoder, &nv50_msto_help); 1063 msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base); 1064 msto->head = head; 1065 return msto; 1066 } 1067 1068 static struct drm_encoder * 1069 nv50_mstc_atomic_best_encoder(struct drm_connector *connector, 1070 struct drm_atomic_state *state) 1071 { 1072 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 1073 connector); 1074 struct nv50_mstc *mstc = nv50_mstc(connector); 1075 struct drm_crtc *crtc = connector_state->crtc; 1076 1077 if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc))) 1078 return NULL; 1079 1080 return &nv50_head(crtc)->msto->encoder; 1081 } 1082 1083 static enum drm_mode_status 1084 nv50_mstc_mode_valid(struct drm_connector *connector, 1085 struct drm_display_mode *mode) 1086 { 1087 struct nv50_mstc *mstc = nv50_mstc(connector); 1088 struct nouveau_encoder *outp = mstc->mstm->outp; 1089 1090 /* TODO: calculate the PBN from the dotclock and validate against the 1091 * MSTB's max possible PBN 1092 */ 1093 1094 return nv50_dp_mode_valid(connector, outp, mode, NULL); 1095 } 1096 1097 static int 1098 nv50_mstc_get_modes(struct drm_connector *connector) 1099 { 1100 struct nv50_mstc *mstc = nv50_mstc(connector); 1101 int ret = 0; 1102 1103 mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port); 1104 drm_connector_update_edid_property(&mstc->connector, mstc->edid); 1105 if (mstc->edid) 1106 ret = drm_add_edid_modes(&mstc->connector, mstc->edid); 1107 1108 /* 1109 * XXX: Since we don't use HDR in userspace quite yet, limit the bpc 1110 * to 8 to save bandwidth on the topology. In the future, we'll want 1111 * to properly fix this by dynamically selecting the highest possible 1112 * bpc that would fit in the topology 1113 */ 1114 if (connector->display_info.bpc) 1115 connector->display_info.bpc = 1116 clamp(connector->display_info.bpc, 6U, 8U); 1117 else 1118 connector->display_info.bpc = 8; 1119 1120 if (mstc->native) 1121 drm_mode_destroy(mstc->connector.dev, mstc->native); 1122 mstc->native = nouveau_conn_native_mode(&mstc->connector); 1123 return ret; 1124 } 1125 1126 static int 1127 nv50_mstc_atomic_check(struct drm_connector *connector, 1128 struct drm_atomic_state *state) 1129 { 1130 struct nv50_mstc *mstc = nv50_mstc(connector); 1131 struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr; 1132 1133 return drm_dp_atomic_release_time_slots(state, mgr, mstc->port); 1134 } 1135 1136 static int 1137 nv50_mstc_detect(struct drm_connector *connector, 1138 struct drm_modeset_acquire_ctx *ctx, bool force) 1139 { 1140 struct nv50_mstc *mstc = nv50_mstc(connector); 1141 int ret; 1142 1143 if (drm_connector_is_unregistered(connector)) 1144 return connector_status_disconnected; 1145 1146 ret = pm_runtime_get_sync(connector->dev->dev); 1147 if (ret < 0 && ret != -EACCES) { 1148 pm_runtime_put_autosuspend(connector->dev->dev); 1149 return connector_status_disconnected; 1150 } 1151 1152 ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr, 1153 mstc->port); 1154 if (ret != connector_status_connected) 1155 goto out; 1156 1157 out: 1158 pm_runtime_mark_last_busy(connector->dev->dev); 1159 pm_runtime_put_autosuspend(connector->dev->dev); 1160 return ret; 1161 } 1162 1163 static const struct drm_connector_helper_funcs 1164 nv50_mstc_help = { 1165 .get_modes = nv50_mstc_get_modes, 1166 .mode_valid = nv50_mstc_mode_valid, 1167 .atomic_best_encoder = nv50_mstc_atomic_best_encoder, 1168 .atomic_check = nv50_mstc_atomic_check, 1169 .detect_ctx = nv50_mstc_detect, 1170 }; 1171 1172 static void 1173 nv50_mstc_destroy(struct drm_connector *connector) 1174 { 1175 struct nv50_mstc *mstc = nv50_mstc(connector); 1176 1177 drm_connector_cleanup(&mstc->connector); 1178 drm_dp_mst_put_port_malloc(mstc->port); 1179 1180 kfree(mstc); 1181 } 1182 1183 static const struct drm_connector_funcs 1184 nv50_mstc = { 1185 .reset = nouveau_conn_reset, 1186 .fill_modes = drm_helper_probe_single_connector_modes, 1187 .destroy = nv50_mstc_destroy, 1188 .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state, 1189 .atomic_destroy_state = nouveau_conn_atomic_destroy_state, 1190 .atomic_set_property = nouveau_conn_atomic_set_property, 1191 .atomic_get_property = nouveau_conn_atomic_get_property, 1192 }; 1193 1194 static int 1195 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, 1196 const char *path, struct nv50_mstc **pmstc) 1197 { 1198 struct drm_device *dev = mstm->outp->base.base.dev; 1199 struct drm_crtc *crtc; 1200 struct nv50_mstc *mstc; 1201 int ret; 1202 1203 if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL))) 1204 return -ENOMEM; 1205 mstc->mstm = mstm; 1206 mstc->port = port; 1207 1208 ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc, 1209 DRM_MODE_CONNECTOR_DisplayPort); 1210 if (ret) { 1211 kfree(*pmstc); 1212 *pmstc = NULL; 1213 return ret; 1214 } 1215 1216 drm_connector_helper_add(&mstc->connector, &nv50_mstc_help); 1217 1218 mstc->connector.funcs->reset(&mstc->connector); 1219 nouveau_conn_attach_properties(&mstc->connector); 1220 1221 drm_for_each_crtc(crtc, dev) { 1222 if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc))) 1223 continue; 1224 1225 drm_connector_attach_encoder(&mstc->connector, 1226 &nv50_head(crtc)->msto->encoder); 1227 } 1228 1229 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0); 1230 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0); 1231 drm_connector_set_path_property(&mstc->connector, path); 1232 drm_dp_mst_get_port_malloc(port); 1233 return 0; 1234 } 1235 1236 static void 1237 nv50_mstm_cleanup(struct drm_atomic_state *state, 1238 struct drm_dp_mst_topology_state *mst_state, 1239 struct nv50_mstm *mstm) 1240 { 1241 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); 1242 struct drm_encoder *encoder; 1243 1244 NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name); 1245 drm_dp_check_act_status(&mstm->mgr); 1246 1247 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { 1248 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { 1249 struct nv50_msto *msto = nv50_msto(encoder); 1250 struct nv50_mstc *mstc = msto->mstc; 1251 if (mstc && mstc->mstm == mstm) 1252 nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto); 1253 } 1254 } 1255 1256 mstm->modified = false; 1257 } 1258 1259 static void 1260 nv50_mstm_prepare(struct drm_atomic_state *state, 1261 struct drm_dp_mst_topology_state *mst_state, 1262 struct nv50_mstm *mstm) 1263 { 1264 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); 1265 struct drm_encoder *encoder; 1266 1267 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name); 1268 1269 /* Disable payloads first */ 1270 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { 1271 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { 1272 struct nv50_msto *msto = nv50_msto(encoder); 1273 struct nv50_mstc *mstc = msto->mstc; 1274 if (mstc && mstc->mstm == mstm && msto->disabled) 1275 nv50_msto_prepare(state, mst_state, &mstm->mgr, msto); 1276 } 1277 } 1278 1279 /* Add payloads for new heads, while also updating the start slots of any unmodified (but 1280 * active) heads that may have had their VC slots shifted left after the previous step 1281 */ 1282 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { 1283 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { 1284 struct nv50_msto *msto = nv50_msto(encoder); 1285 struct nv50_mstc *mstc = msto->mstc; 1286 if (mstc && mstc->mstm == mstm && !msto->disabled) 1287 nv50_msto_prepare(state, mst_state, &mstm->mgr, msto); 1288 } 1289 } 1290 1291 if (mstm->disabled) { 1292 if (!mstm->links) 1293 nvif_outp_release(&mstm->outp->outp); 1294 mstm->disabled = false; 1295 } 1296 } 1297 1298 static struct drm_connector * 1299 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr, 1300 struct drm_dp_mst_port *port, const char *path) 1301 { 1302 struct nv50_mstm *mstm = nv50_mstm(mgr); 1303 struct nv50_mstc *mstc; 1304 int ret; 1305 1306 ret = nv50_mstc_new(mstm, port, path, &mstc); 1307 if (ret) 1308 return NULL; 1309 1310 return &mstc->connector; 1311 } 1312 1313 static const struct drm_dp_mst_topology_cbs 1314 nv50_mstm = { 1315 .add_connector = nv50_mstm_add_connector, 1316 }; 1317 1318 bool 1319 nv50_mstm_service(struct nouveau_drm *drm, 1320 struct nouveau_connector *nv_connector, 1321 struct nv50_mstm *mstm) 1322 { 1323 struct drm_dp_aux *aux = &nv_connector->aux; 1324 bool handled = true, ret = true; 1325 int rc; 1326 u8 esi[8] = {}; 1327 1328 while (handled) { 1329 rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); 1330 if (rc != 8) { 1331 ret = false; 1332 break; 1333 } 1334 1335 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled); 1336 if (!handled) 1337 break; 1338 1339 rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 1340 3); 1341 if (rc != 3) { 1342 ret = false; 1343 break; 1344 } 1345 } 1346 1347 if (!ret) 1348 NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n", 1349 nv_connector->base.name, rc); 1350 1351 return ret; 1352 } 1353 1354 void 1355 nv50_mstm_remove(struct nv50_mstm *mstm) 1356 { 1357 mstm->is_mst = false; 1358 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); 1359 } 1360 1361 int 1362 nv50_mstm_detect(struct nouveau_encoder *outp) 1363 { 1364 struct nv50_mstm *mstm = outp->dp.mstm; 1365 struct drm_dp_aux *aux; 1366 int ret; 1367 1368 if (!mstm || !mstm->can_mst) 1369 return 0; 1370 1371 aux = mstm->mgr.aux; 1372 1373 /* Clear any leftover MST state we didn't set ourselves by first 1374 * disabling MST if it was already enabled 1375 */ 1376 ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0); 1377 if (ret < 0) 1378 return ret; 1379 1380 /* And start enabling */ 1381 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true); 1382 if (ret) 1383 return ret; 1384 1385 mstm->is_mst = true; 1386 return 1; 1387 } 1388 1389 static void 1390 nv50_mstm_fini(struct nouveau_encoder *outp) 1391 { 1392 struct nv50_mstm *mstm = outp->dp.mstm; 1393 1394 if (!mstm) 1395 return; 1396 1397 /* Don't change the MST state of this connector until we've finished 1398 * resuming, since we can't safely grab hpd_irq_lock in our resume 1399 * path to protect mstm->is_mst without potentially deadlocking 1400 */ 1401 mutex_lock(&outp->dp.hpd_irq_lock); 1402 mstm->suspended = true; 1403 mutex_unlock(&outp->dp.hpd_irq_lock); 1404 1405 if (mstm->is_mst) 1406 drm_dp_mst_topology_mgr_suspend(&mstm->mgr); 1407 } 1408 1409 static void 1410 nv50_mstm_init(struct nouveau_encoder *outp, bool runtime) 1411 { 1412 struct nv50_mstm *mstm = outp->dp.mstm; 1413 int ret = 0; 1414 1415 if (!mstm) 1416 return; 1417 1418 if (mstm->is_mst) { 1419 ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime); 1420 if (ret == -1) 1421 nv50_mstm_remove(mstm); 1422 } 1423 1424 mutex_lock(&outp->dp.hpd_irq_lock); 1425 mstm->suspended = false; 1426 mutex_unlock(&outp->dp.hpd_irq_lock); 1427 1428 if (ret == -1) 1429 drm_kms_helper_hotplug_event(mstm->mgr.dev); 1430 } 1431 1432 static void 1433 nv50_mstm_del(struct nv50_mstm **pmstm) 1434 { 1435 struct nv50_mstm *mstm = *pmstm; 1436 if (mstm) { 1437 drm_dp_mst_topology_mgr_destroy(&mstm->mgr); 1438 kfree(*pmstm); 1439 *pmstm = NULL; 1440 } 1441 } 1442 1443 static int 1444 nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, 1445 int conn_base_id, struct nv50_mstm **pmstm) 1446 { 1447 const int max_payloads = hweight8(outp->dcb->heads); 1448 struct drm_device *dev = outp->base.base.dev; 1449 struct nv50_mstm *mstm; 1450 int ret; 1451 1452 if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL))) 1453 return -ENOMEM; 1454 mstm->outp = outp; 1455 mstm->mgr.cbs = &nv50_mstm; 1456 1457 ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max, 1458 max_payloads, conn_base_id); 1459 if (ret) 1460 return ret; 1461 1462 return 0; 1463 } 1464 1465 /****************************************************************************** 1466 * SOR 1467 *****************************************************************************/ 1468 static void 1469 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head, 1470 struct nv50_head_atom *asyh, u8 proto, u8 depth) 1471 { 1472 struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev); 1473 struct nv50_core *core = disp->core; 1474 1475 if (!asyh) { 1476 nv_encoder->ctrl &= ~BIT(head); 1477 if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE)) 1478 nv_encoder->ctrl = 0; 1479 } else { 1480 nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto); 1481 nv_encoder->ctrl |= BIT(head); 1482 asyh->or.depth = depth; 1483 } 1484 1485 core->func->sor->ctrl(core, nv_encoder->outp.or.id, nv_encoder->ctrl, asyh); 1486 } 1487 1488 /* TODO: Should we extend this to PWM-only backlights? 1489 * As well, should we add a DRM helper for waiting for the backlight to acknowledge 1490 * the panel backlight has been shut off? Intel doesn't seem to do this, and uses a 1491 * fixed time delay from the vbios… 1492 */ 1493 static void 1494 nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) 1495 { 1496 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1497 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); 1498 struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder); 1499 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1500 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 1501 struct nouveau_backlight *backlight = nv_connector->backlight; 1502 #endif 1503 struct drm_dp_aux *aux = &nv_connector->aux; 1504 int ret; 1505 u8 pwr; 1506 1507 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1508 if (backlight && backlight->uses_dpcd) { 1509 ret = drm_edp_backlight_disable(aux, &backlight->edp_info); 1510 if (ret < 0) 1511 NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n", 1512 nv_connector->base.base.id, nv_connector->base.name, ret); 1513 } 1514 #endif 1515 1516 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 1517 ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr); 1518 1519 if (ret == 0) { 1520 pwr &= ~DP_SET_POWER_MASK; 1521 pwr |= DP_SET_POWER_D3; 1522 drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr); 1523 } 1524 } 1525 1526 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0); 1527 nv50_audio_disable(encoder, nv_crtc); 1528 nvif_outp_release(&nv_encoder->outp); 1529 nv_encoder->crtc = NULL; 1530 } 1531 1532 static void 1533 nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) 1534 { 1535 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1536 struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder); 1537 struct nv50_head_atom *asyh = 1538 nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base)); 1539 struct drm_display_mode *mode = &asyh->state.adjusted_mode; 1540 struct nv50_disp *disp = nv50_disp(encoder->dev); 1541 struct nvif_outp *outp = &nv_encoder->outp; 1542 struct drm_device *dev = encoder->dev; 1543 struct nouveau_drm *drm = nouveau_drm(dev); 1544 struct nouveau_connector *nv_connector; 1545 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1546 struct nouveau_backlight *backlight; 1547 #endif 1548 struct nvbios *bios = &drm->vbios; 1549 bool lvds_dual = false, lvds_8bpc = false, hda = false; 1550 u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM; 1551 u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; 1552 1553 nv_connector = nv50_outp_get_new_connector(state, nv_encoder); 1554 nv_encoder->crtc = &nv_crtc->base; 1555 1556 if ((disp->disp->object.oclass == GT214_DISP || 1557 disp->disp->object.oclass >= GF110_DISP) && 1558 drm_detect_monitor_audio(nv_connector->edid)) 1559 hda = true; 1560 1561 switch (nv_encoder->dcb->type) { 1562 case DCB_OUTPUT_TMDS: 1563 if (disp->disp->object.oclass == NV50_DISP || 1564 !drm_detect_hdmi_monitor(nv_connector->edid)) 1565 nvif_outp_acquire_tmds(outp, nv_crtc->index, false, 0, 0, 0, false); 1566 else 1567 nv50_hdmi_enable(encoder, nv_crtc, nv_connector, state, mode, hda); 1568 1569 if (nv_encoder->outp.or.link & 1) { 1570 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A; 1571 /* Only enable dual-link if: 1572 * - Need to (i.e. rate > 165MHz) 1573 * - DCB says we can 1574 * - Not an HDMI monitor, since there's no dual-link 1575 * on HDMI. 1576 */ 1577 if (mode->clock >= 165000 && 1578 nv_encoder->dcb->duallink_possible && 1579 !drm_detect_hdmi_monitor(nv_connector->edid)) 1580 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS; 1581 } else { 1582 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; 1583 } 1584 break; 1585 case DCB_OUTPUT_LVDS: 1586 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; 1587 1588 if (bios->fp_no_ddc) { 1589 lvds_dual = bios->fp.dual_link; 1590 lvds_8bpc = bios->fp.if_is_24bit; 1591 } else { 1592 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { 1593 if (((u8 *)nv_connector->edid)[121] == 2) 1594 lvds_dual = true; 1595 } else 1596 if (mode->clock >= bios->fp.duallink_transition_clk) { 1597 lvds_dual = true; 1598 } 1599 1600 if (lvds_dual) { 1601 if (bios->fp.strapless_is_24bit & 2) 1602 lvds_8bpc = true; 1603 } else { 1604 if (bios->fp.strapless_is_24bit & 1) 1605 lvds_8bpc = true; 1606 } 1607 1608 if (asyh->or.bpc == 8) 1609 lvds_8bpc = true; 1610 } 1611 1612 nvif_outp_acquire_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc); 1613 break; 1614 case DCB_OUTPUT_DP: 1615 nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, hda, false); 1616 depth = nv50_dp_bpc_to_depth(asyh->or.bpc); 1617 1618 if (nv_encoder->outp.or.link & 1) 1619 proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A; 1620 else 1621 proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B; 1622 1623 nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode); 1624 1625 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1626 backlight = nv_connector->backlight; 1627 if (backlight && backlight->uses_dpcd) 1628 drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info, 1629 (u16)backlight->dev->props.brightness); 1630 #endif 1631 1632 break; 1633 default: 1634 BUG(); 1635 break; 1636 } 1637 1638 nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth); 1639 } 1640 1641 static const struct drm_encoder_helper_funcs 1642 nv50_sor_help = { 1643 .atomic_check = nv50_outp_atomic_check, 1644 .atomic_enable = nv50_sor_atomic_enable, 1645 .atomic_disable = nv50_sor_atomic_disable, 1646 }; 1647 1648 static void 1649 nv50_sor_destroy(struct drm_encoder *encoder) 1650 { 1651 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1652 1653 nvif_outp_dtor(&nv_encoder->outp); 1654 1655 nv50_mstm_del(&nv_encoder->dp.mstm); 1656 drm_encoder_cleanup(encoder); 1657 1658 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) 1659 mutex_destroy(&nv_encoder->dp.hpd_irq_lock); 1660 1661 kfree(encoder); 1662 } 1663 1664 static const struct drm_encoder_funcs 1665 nv50_sor_func = { 1666 .destroy = nv50_sor_destroy, 1667 }; 1668 1669 bool nv50_has_mst(struct nouveau_drm *drm) 1670 { 1671 struct nvkm_bios *bios = nvxx_bios(&drm->client.device); 1672 u32 data; 1673 u8 ver, hdr, cnt, len; 1674 1675 data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len); 1676 return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04); 1677 } 1678 1679 static int 1680 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) 1681 { 1682 struct nouveau_connector *nv_connector = nouveau_connector(connector); 1683 struct nouveau_drm *drm = nouveau_drm(connector->dev); 1684 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); 1685 struct nouveau_encoder *nv_encoder; 1686 struct drm_encoder *encoder; 1687 struct nv50_disp *disp = nv50_disp(connector->dev); 1688 int type, ret; 1689 1690 switch (dcbe->type) { 1691 case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break; 1692 case DCB_OUTPUT_TMDS: 1693 case DCB_OUTPUT_DP: 1694 default: 1695 type = DRM_MODE_ENCODER_TMDS; 1696 break; 1697 } 1698 1699 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); 1700 if (!nv_encoder) 1701 return -ENOMEM; 1702 nv_encoder->dcb = dcbe; 1703 nv_encoder->update = nv50_sor_update; 1704 1705 encoder = to_drm_encoder(nv_encoder); 1706 encoder->possible_crtcs = dcbe->heads; 1707 encoder->possible_clones = 0; 1708 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, 1709 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm); 1710 drm_encoder_helper_add(encoder, &nv50_sor_help); 1711 1712 drm_connector_attach_encoder(connector, encoder); 1713 1714 disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1); 1715 nv50_outp_dump_caps(drm, nv_encoder); 1716 1717 if (dcbe->type == DCB_OUTPUT_DP) { 1718 struct nvkm_i2c_aux *aux = 1719 nvkm_i2c_aux_find(i2c, dcbe->i2c_index); 1720 1721 mutex_init(&nv_encoder->dp.hpd_irq_lock); 1722 1723 if (aux) { 1724 if (disp->disp->object.oclass < GF110_DISP) { 1725 /* HW has no support for address-only 1726 * transactions, so we're required to 1727 * use custom I2C-over-AUX code. 1728 */ 1729 nv_encoder->i2c = &aux->i2c; 1730 } else { 1731 nv_encoder->i2c = &nv_connector->aux.ddc; 1732 } 1733 nv_encoder->aux = aux; 1734 } 1735 1736 if (nv_connector->type != DCB_CONNECTOR_eDP && 1737 nv50_has_mst(drm)) { 1738 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 1739 16, nv_connector->base.base.id, 1740 &nv_encoder->dp.mstm); 1741 if (ret) 1742 return ret; 1743 } 1744 } else { 1745 struct nvkm_i2c_bus *bus = 1746 nvkm_i2c_bus_find(i2c, dcbe->i2c_index); 1747 if (bus) 1748 nv_encoder->i2c = &bus->i2c; 1749 } 1750 1751 return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp); 1752 } 1753 1754 /****************************************************************************** 1755 * PIOR 1756 *****************************************************************************/ 1757 static int 1758 nv50_pior_atomic_check(struct drm_encoder *encoder, 1759 struct drm_crtc_state *crtc_state, 1760 struct drm_connector_state *conn_state) 1761 { 1762 int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state); 1763 if (ret) 1764 return ret; 1765 crtc_state->adjusted_mode.clock *= 2; 1766 return 0; 1767 } 1768 1769 static void 1770 nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) 1771 { 1772 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1773 struct nv50_core *core = nv50_disp(encoder->dev)->core; 1774 const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE); 1775 1776 core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL); 1777 nv_encoder->crtc = NULL; 1778 nvif_outp_release(&nv_encoder->outp); 1779 } 1780 1781 static void 1782 nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) 1783 { 1784 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1785 struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder); 1786 struct nv50_head_atom *asyh = 1787 nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base)); 1788 struct nv50_core *core = nv50_disp(encoder->dev)->core; 1789 u32 ctrl = 0; 1790 1791 switch (nv_crtc->index) { 1792 case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break; 1793 case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break; 1794 default: 1795 WARN_ON(1); 1796 break; 1797 } 1798 1799 switch (asyh->or.bpc) { 1800 case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break; 1801 case 8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break; 1802 case 6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break; 1803 default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break; 1804 } 1805 1806 switch (nv_encoder->dcb->type) { 1807 case DCB_OUTPUT_TMDS: 1808 ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC); 1809 nvif_outp_acquire_tmds(&nv_encoder->outp, false, false, 0, 0, 0, false); 1810 break; 1811 case DCB_OUTPUT_DP: 1812 ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC); 1813 nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, false, false); 1814 break; 1815 default: 1816 BUG(); 1817 break; 1818 } 1819 1820 core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh); 1821 nv_encoder->crtc = &nv_crtc->base; 1822 } 1823 1824 static const struct drm_encoder_helper_funcs 1825 nv50_pior_help = { 1826 .atomic_check = nv50_pior_atomic_check, 1827 .atomic_enable = nv50_pior_atomic_enable, 1828 .atomic_disable = nv50_pior_atomic_disable, 1829 }; 1830 1831 static void 1832 nv50_pior_destroy(struct drm_encoder *encoder) 1833 { 1834 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1835 1836 nvif_outp_dtor(&nv_encoder->outp); 1837 1838 drm_encoder_cleanup(encoder); 1839 kfree(encoder); 1840 } 1841 1842 static const struct drm_encoder_funcs 1843 nv50_pior_func = { 1844 .destroy = nv50_pior_destroy, 1845 }; 1846 1847 static int 1848 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) 1849 { 1850 struct drm_device *dev = connector->dev; 1851 struct nouveau_drm *drm = nouveau_drm(dev); 1852 struct nv50_disp *disp = nv50_disp(dev); 1853 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); 1854 struct nvkm_i2c_bus *bus = NULL; 1855 struct nvkm_i2c_aux *aux = NULL; 1856 struct i2c_adapter *ddc; 1857 struct nouveau_encoder *nv_encoder; 1858 struct drm_encoder *encoder; 1859 int type; 1860 1861 switch (dcbe->type) { 1862 case DCB_OUTPUT_TMDS: 1863 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev)); 1864 ddc = bus ? &bus->i2c : NULL; 1865 type = DRM_MODE_ENCODER_TMDS; 1866 break; 1867 case DCB_OUTPUT_DP: 1868 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev)); 1869 ddc = aux ? &aux->i2c : NULL; 1870 type = DRM_MODE_ENCODER_TMDS; 1871 break; 1872 default: 1873 return -ENODEV; 1874 } 1875 1876 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); 1877 if (!nv_encoder) 1878 return -ENOMEM; 1879 nv_encoder->dcb = dcbe; 1880 nv_encoder->i2c = ddc; 1881 nv_encoder->aux = aux; 1882 1883 encoder = to_drm_encoder(nv_encoder); 1884 encoder->possible_crtcs = dcbe->heads; 1885 encoder->possible_clones = 0; 1886 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, 1887 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm); 1888 drm_encoder_helper_add(encoder, &nv50_pior_help); 1889 1890 drm_connector_attach_encoder(connector, encoder); 1891 1892 disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1); 1893 nv50_outp_dump_caps(drm, nv_encoder); 1894 1895 return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp); 1896 } 1897 1898 /****************************************************************************** 1899 * Atomic 1900 *****************************************************************************/ 1901 1902 static void 1903 nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock) 1904 { 1905 struct drm_dp_mst_topology_mgr *mgr; 1906 struct drm_dp_mst_topology_state *mst_state; 1907 struct nouveau_drm *drm = nouveau_drm(state->dev); 1908 struct nv50_disp *disp = nv50_disp(drm->dev); 1909 struct nv50_core *core = disp->core; 1910 struct nv50_mstm *mstm; 1911 int i; 1912 1913 NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]); 1914 1915 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 1916 mstm = nv50_mstm(mgr); 1917 if (mstm->modified) 1918 nv50_mstm_prepare(state, mst_state, mstm); 1919 } 1920 1921 core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY); 1922 core->func->update(core, interlock, true); 1923 if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY, 1924 disp->core->chan.base.device)) 1925 NV_ERROR(drm, "core notifier timeout\n"); 1926 1927 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 1928 mstm = nv50_mstm(mgr); 1929 if (mstm->modified) 1930 nv50_mstm_cleanup(state, mst_state, mstm); 1931 } 1932 } 1933 1934 static void 1935 nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock) 1936 { 1937 struct drm_plane_state *new_plane_state; 1938 struct drm_plane *plane; 1939 int i; 1940 1941 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1942 struct nv50_wndw *wndw = nv50_wndw(plane); 1943 if (interlock[wndw->interlock.type] & wndw->interlock.data) { 1944 if (wndw->func->update) 1945 wndw->func->update(wndw, interlock); 1946 } 1947 } 1948 } 1949 1950 static void 1951 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) 1952 { 1953 struct drm_device *dev = state->dev; 1954 struct drm_crtc_state *new_crtc_state, *old_crtc_state; 1955 struct drm_crtc *crtc; 1956 struct drm_plane_state *new_plane_state; 1957 struct drm_plane *plane; 1958 struct nouveau_drm *drm = nouveau_drm(dev); 1959 struct nv50_disp *disp = nv50_disp(dev); 1960 struct nv50_atom *atom = nv50_atom(state); 1961 struct nv50_core *core = disp->core; 1962 struct nv50_outp_atom *outp, *outt; 1963 u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {}; 1964 int i; 1965 bool flushed = false; 1966 1967 NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable); 1968 nv50_crc_atomic_stop_reporting(state); 1969 drm_atomic_helper_wait_for_fences(dev, state, false); 1970 drm_atomic_helper_wait_for_dependencies(state); 1971 drm_dp_mst_atomic_wait_for_dependencies(state); 1972 drm_atomic_helper_update_legacy_modeset_state(dev, state); 1973 drm_atomic_helper_calc_timestamping_constants(state); 1974 1975 if (atom->lock_core) 1976 mutex_lock(&disp->mutex); 1977 1978 /* Disable head(s). */ 1979 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1980 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); 1981 struct nv50_head *head = nv50_head(crtc); 1982 1983 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, 1984 asyh->clr.mask, asyh->set.mask); 1985 1986 if (old_crtc_state->active && !new_crtc_state->active) { 1987 pm_runtime_put_noidle(dev->dev); 1988 drm_crtc_vblank_off(crtc); 1989 } 1990 1991 if (asyh->clr.mask) { 1992 nv50_head_flush_clr(head, asyh, atom->flush_disable); 1993 interlock[NV50_DISP_INTERLOCK_CORE] |= 1; 1994 } 1995 } 1996 1997 /* Disable plane(s). */ 1998 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1999 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); 2000 struct nv50_wndw *wndw = nv50_wndw(plane); 2001 2002 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name, 2003 asyw->clr.mask, asyw->set.mask); 2004 if (!asyw->clr.mask) 2005 continue; 2006 2007 nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw); 2008 } 2009 2010 /* Disable output path(s). */ 2011 list_for_each_entry(outp, &atom->outp, head) { 2012 const struct drm_encoder_helper_funcs *help; 2013 struct drm_encoder *encoder; 2014 2015 encoder = outp->encoder; 2016 help = encoder->helper_private; 2017 2018 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name, 2019 outp->clr.mask, outp->set.mask); 2020 2021 if (outp->clr.mask) { 2022 help->atomic_disable(encoder, state); 2023 interlock[NV50_DISP_INTERLOCK_CORE] |= 1; 2024 if (outp->flush_disable) { 2025 nv50_disp_atomic_commit_wndw(state, interlock); 2026 nv50_disp_atomic_commit_core(state, interlock); 2027 memset(interlock, 0x00, sizeof(interlock)); 2028 2029 flushed = true; 2030 } 2031 } 2032 } 2033 2034 /* Flush disable. */ 2035 if (interlock[NV50_DISP_INTERLOCK_CORE]) { 2036 if (atom->flush_disable) { 2037 nv50_disp_atomic_commit_wndw(state, interlock); 2038 nv50_disp_atomic_commit_core(state, interlock); 2039 memset(interlock, 0x00, sizeof(interlock)); 2040 2041 flushed = true; 2042 } 2043 } 2044 2045 if (flushed) 2046 nv50_crc_atomic_release_notifier_contexts(state); 2047 nv50_crc_atomic_init_notifier_contexts(state); 2048 2049 /* Update output path(s). */ 2050 list_for_each_entry_safe(outp, outt, &atom->outp, head) { 2051 const struct drm_encoder_helper_funcs *help; 2052 struct drm_encoder *encoder; 2053 2054 encoder = outp->encoder; 2055 help = encoder->helper_private; 2056 2057 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name, 2058 outp->set.mask, outp->clr.mask); 2059 2060 if (outp->set.mask) { 2061 help->atomic_enable(encoder, state); 2062 interlock[NV50_DISP_INTERLOCK_CORE] = 1; 2063 } 2064 2065 list_del(&outp->head); 2066 kfree(outp); 2067 } 2068 2069 /* Update head(s). */ 2070 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2071 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); 2072 struct nv50_head *head = nv50_head(crtc); 2073 2074 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name, 2075 asyh->set.mask, asyh->clr.mask); 2076 2077 if (asyh->set.mask) { 2078 nv50_head_flush_set(head, asyh); 2079 interlock[NV50_DISP_INTERLOCK_CORE] = 1; 2080 } 2081 2082 if (new_crtc_state->active) { 2083 if (!old_crtc_state->active) { 2084 drm_crtc_vblank_on(crtc); 2085 pm_runtime_get_noresume(dev->dev); 2086 } 2087 if (new_crtc_state->event) 2088 drm_crtc_vblank_get(crtc); 2089 } 2090 } 2091 2092 /* Update window->head assignment. 2093 * 2094 * This has to happen in an update that's not interlocked with 2095 * any window channels to avoid hitting HW error checks. 2096 * 2097 *TODO: Proper handling of window ownership (Turing apparently 2098 * supports non-fixed mappings). 2099 */ 2100 if (core->assign_windows) { 2101 core->func->wndw.owner(core); 2102 nv50_disp_atomic_commit_core(state, interlock); 2103 core->assign_windows = false; 2104 interlock[NV50_DISP_INTERLOCK_CORE] = 0; 2105 } 2106 2107 /* Finish updating head(s)... 2108 * 2109 * NVD is rather picky about both where window assignments can change, 2110 * *and* about certain core and window channel states matching. 2111 * 2112 * The EFI GOP driver on newer GPUs configures window channels with a 2113 * different output format to what we do, and the core channel update 2114 * in the assign_windows case above would result in a state mismatch. 2115 * 2116 * Delay some of the head update until after that point to workaround 2117 * the issue. This only affects the initial modeset. 2118 * 2119 * TODO: handle this better when adding flexible window mapping 2120 */ 2121 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2122 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); 2123 struct nv50_head *head = nv50_head(crtc); 2124 2125 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name, 2126 asyh->set.mask, asyh->clr.mask); 2127 2128 if (asyh->set.mask) { 2129 nv50_head_flush_set_wndw(head, asyh); 2130 interlock[NV50_DISP_INTERLOCK_CORE] = 1; 2131 } 2132 } 2133 2134 /* Update plane(s). */ 2135 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2136 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); 2137 struct nv50_wndw *wndw = nv50_wndw(plane); 2138 2139 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name, 2140 asyw->set.mask, asyw->clr.mask); 2141 if ( !asyw->set.mask && 2142 (!asyw->clr.mask || atom->flush_disable)) 2143 continue; 2144 2145 nv50_wndw_flush_set(wndw, interlock, asyw); 2146 } 2147 2148 /* Flush update. */ 2149 nv50_disp_atomic_commit_wndw(state, interlock); 2150 2151 if (interlock[NV50_DISP_INTERLOCK_CORE]) { 2152 if (interlock[NV50_DISP_INTERLOCK_BASE] || 2153 interlock[NV50_DISP_INTERLOCK_OVLY] || 2154 interlock[NV50_DISP_INTERLOCK_WNDW] || 2155 !atom->state.legacy_cursor_update) 2156 nv50_disp_atomic_commit_core(state, interlock); 2157 else 2158 disp->core->func->update(disp->core, interlock, false); 2159 } 2160 2161 if (atom->lock_core) 2162 mutex_unlock(&disp->mutex); 2163 2164 /* Wait for HW to signal completion. */ 2165 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2166 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); 2167 struct nv50_wndw *wndw = nv50_wndw(plane); 2168 int ret = nv50_wndw_wait_armed(wndw, asyw); 2169 if (ret) 2170 NV_ERROR(drm, "%s: timeout\n", plane->name); 2171 } 2172 2173 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 2174 if (new_crtc_state->event) { 2175 unsigned long flags; 2176 /* Get correct count/ts if racing with vblank irq */ 2177 if (new_crtc_state->active) 2178 drm_crtc_accurate_vblank_count(crtc); 2179 spin_lock_irqsave(&crtc->dev->event_lock, flags); 2180 drm_crtc_send_vblank_event(crtc, new_crtc_state->event); 2181 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 2182 2183 new_crtc_state->event = NULL; 2184 if (new_crtc_state->active) 2185 drm_crtc_vblank_put(crtc); 2186 } 2187 } 2188 2189 nv50_crc_atomic_start_reporting(state); 2190 if (!flushed) 2191 nv50_crc_atomic_release_notifier_contexts(state); 2192 2193 drm_atomic_helper_commit_hw_done(state); 2194 drm_atomic_helper_cleanup_planes(dev, state); 2195 drm_atomic_helper_commit_cleanup_done(state); 2196 drm_atomic_state_put(state); 2197 2198 /* Drop the RPM ref we got from nv50_disp_atomic_commit() */ 2199 pm_runtime_mark_last_busy(dev->dev); 2200 pm_runtime_put_autosuspend(dev->dev); 2201 } 2202 2203 static void 2204 nv50_disp_atomic_commit_work(struct work_struct *work) 2205 { 2206 struct drm_atomic_state *state = 2207 container_of(work, typeof(*state), commit_work); 2208 nv50_disp_atomic_commit_tail(state); 2209 } 2210 2211 static int 2212 nv50_disp_atomic_commit(struct drm_device *dev, 2213 struct drm_atomic_state *state, bool nonblock) 2214 { 2215 struct drm_plane_state *new_plane_state; 2216 struct drm_plane *plane; 2217 int ret, i; 2218 2219 ret = pm_runtime_get_sync(dev->dev); 2220 if (ret < 0 && ret != -EACCES) { 2221 pm_runtime_put_autosuspend(dev->dev); 2222 return ret; 2223 } 2224 2225 ret = drm_atomic_helper_setup_commit(state, nonblock); 2226 if (ret) 2227 goto done; 2228 2229 INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work); 2230 2231 ret = drm_atomic_helper_prepare_planes(dev, state); 2232 if (ret) 2233 goto done; 2234 2235 if (!nonblock) { 2236 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 2237 if (ret) 2238 goto err_cleanup; 2239 } 2240 2241 ret = drm_atomic_helper_swap_state(state, true); 2242 if (ret) 2243 goto err_cleanup; 2244 2245 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2246 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); 2247 struct nv50_wndw *wndw = nv50_wndw(plane); 2248 2249 if (asyw->set.image) 2250 nv50_wndw_ntfy_enable(wndw, asyw); 2251 } 2252 2253 drm_atomic_state_get(state); 2254 2255 /* 2256 * Grab another RPM ref for the commit tail, which will release the 2257 * ref when it's finished 2258 */ 2259 pm_runtime_get_noresume(dev->dev); 2260 2261 if (nonblock) 2262 queue_work(system_unbound_wq, &state->commit_work); 2263 else 2264 nv50_disp_atomic_commit_tail(state); 2265 2266 err_cleanup: 2267 if (ret) 2268 drm_atomic_helper_cleanup_planes(dev, state); 2269 done: 2270 pm_runtime_put_autosuspend(dev->dev); 2271 return ret; 2272 } 2273 2274 static struct nv50_outp_atom * 2275 nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder) 2276 { 2277 struct nv50_outp_atom *outp; 2278 2279 list_for_each_entry(outp, &atom->outp, head) { 2280 if (outp->encoder == encoder) 2281 return outp; 2282 } 2283 2284 outp = kzalloc(sizeof(*outp), GFP_KERNEL); 2285 if (!outp) 2286 return ERR_PTR(-ENOMEM); 2287 2288 list_add(&outp->head, &atom->outp); 2289 outp->encoder = encoder; 2290 return outp; 2291 } 2292 2293 static int 2294 nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom, 2295 struct drm_connector_state *old_connector_state) 2296 { 2297 struct drm_encoder *encoder = old_connector_state->best_encoder; 2298 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2299 struct drm_crtc *crtc; 2300 struct nv50_outp_atom *outp; 2301 2302 if (!(crtc = old_connector_state->crtc)) 2303 return 0; 2304 2305 old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc); 2306 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc); 2307 if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) { 2308 outp = nv50_disp_outp_atomic_add(atom, encoder); 2309 if (IS_ERR(outp)) 2310 return PTR_ERR(outp); 2311 2312 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { 2313 outp->flush_disable = true; 2314 atom->flush_disable = true; 2315 } 2316 outp->clr.ctrl = true; 2317 atom->lock_core = true; 2318 } 2319 2320 return 0; 2321 } 2322 2323 static int 2324 nv50_disp_outp_atomic_check_set(struct nv50_atom *atom, 2325 struct drm_connector_state *connector_state) 2326 { 2327 struct drm_encoder *encoder = connector_state->best_encoder; 2328 struct drm_crtc_state *new_crtc_state; 2329 struct drm_crtc *crtc; 2330 struct nv50_outp_atom *outp; 2331 2332 if (!(crtc = connector_state->crtc)) 2333 return 0; 2334 2335 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc); 2336 if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) { 2337 outp = nv50_disp_outp_atomic_add(atom, encoder); 2338 if (IS_ERR(outp)) 2339 return PTR_ERR(outp); 2340 2341 outp->set.ctrl = true; 2342 atom->lock_core = true; 2343 } 2344 2345 return 0; 2346 } 2347 2348 static int 2349 nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 2350 { 2351 struct nv50_atom *atom = nv50_atom(state); 2352 struct nv50_core *core = nv50_disp(dev)->core; 2353 struct drm_connector_state *old_connector_state, *new_connector_state; 2354 struct drm_connector *connector; 2355 struct drm_crtc_state *new_crtc_state; 2356 struct drm_crtc *crtc; 2357 struct nv50_head *head; 2358 struct nv50_head_atom *asyh; 2359 int ret, i; 2360 2361 if (core->assign_windows && core->func->head->static_wndw_map) { 2362 drm_for_each_crtc(crtc, dev) { 2363 new_crtc_state = drm_atomic_get_crtc_state(state, 2364 crtc); 2365 if (IS_ERR(new_crtc_state)) 2366 return PTR_ERR(new_crtc_state); 2367 2368 head = nv50_head(crtc); 2369 asyh = nv50_head_atom(new_crtc_state); 2370 core->func->head->static_wndw_map(head, asyh); 2371 } 2372 } 2373 2374 /* We need to handle colour management on a per-plane basis. */ 2375 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 2376 if (new_crtc_state->color_mgmt_changed) { 2377 ret = drm_atomic_add_affected_planes(state, crtc); 2378 if (ret) 2379 return ret; 2380 } 2381 } 2382 2383 ret = drm_atomic_helper_check(dev, state); 2384 if (ret) 2385 return ret; 2386 2387 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 2388 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state); 2389 if (ret) 2390 return ret; 2391 2392 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state); 2393 if (ret) 2394 return ret; 2395 } 2396 2397 ret = drm_dp_mst_atomic_check(state); 2398 if (ret) 2399 return ret; 2400 2401 nv50_crc_atomic_check_outp(atom); 2402 2403 return 0; 2404 } 2405 2406 static void 2407 nv50_disp_atomic_state_clear(struct drm_atomic_state *state) 2408 { 2409 struct nv50_atom *atom = nv50_atom(state); 2410 struct nv50_outp_atom *outp, *outt; 2411 2412 list_for_each_entry_safe(outp, outt, &atom->outp, head) { 2413 list_del(&outp->head); 2414 kfree(outp); 2415 } 2416 2417 drm_atomic_state_default_clear(state); 2418 } 2419 2420 static void 2421 nv50_disp_atomic_state_free(struct drm_atomic_state *state) 2422 { 2423 struct nv50_atom *atom = nv50_atom(state); 2424 drm_atomic_state_default_release(&atom->state); 2425 kfree(atom); 2426 } 2427 2428 static struct drm_atomic_state * 2429 nv50_disp_atomic_state_alloc(struct drm_device *dev) 2430 { 2431 struct nv50_atom *atom; 2432 if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) || 2433 drm_atomic_state_init(dev, &atom->state) < 0) { 2434 kfree(atom); 2435 return NULL; 2436 } 2437 INIT_LIST_HEAD(&atom->outp); 2438 return &atom->state; 2439 } 2440 2441 static const struct drm_mode_config_funcs 2442 nv50_disp_func = { 2443 .fb_create = nouveau_user_framebuffer_create, 2444 .output_poll_changed = drm_fb_helper_output_poll_changed, 2445 .atomic_check = nv50_disp_atomic_check, 2446 .atomic_commit = nv50_disp_atomic_commit, 2447 .atomic_state_alloc = nv50_disp_atomic_state_alloc, 2448 .atomic_state_clear = nv50_disp_atomic_state_clear, 2449 .atomic_state_free = nv50_disp_atomic_state_free, 2450 }; 2451 2452 static const struct drm_mode_config_helper_funcs 2453 nv50_disp_helper_func = { 2454 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 2455 }; 2456 2457 /****************************************************************************** 2458 * Init 2459 *****************************************************************************/ 2460 2461 static void 2462 nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend) 2463 { 2464 struct nouveau_drm *drm = nouveau_drm(dev); 2465 struct drm_encoder *encoder; 2466 2467 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2468 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) 2469 nv50_mstm_fini(nouveau_encoder(encoder)); 2470 } 2471 2472 if (!runtime) 2473 cancel_work_sync(&drm->hpd_work); 2474 } 2475 2476 static int 2477 nv50_display_init(struct drm_device *dev, bool resume, bool runtime) 2478 { 2479 struct nv50_core *core = nv50_disp(dev)->core; 2480 struct drm_encoder *encoder; 2481 2482 if (resume || runtime) 2483 core->func->init(core); 2484 2485 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2486 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { 2487 struct nouveau_encoder *nv_encoder = 2488 nouveau_encoder(encoder); 2489 nv50_mstm_init(nv_encoder, runtime); 2490 } 2491 } 2492 2493 return 0; 2494 } 2495 2496 static void 2497 nv50_display_destroy(struct drm_device *dev) 2498 { 2499 struct nv50_disp *disp = nv50_disp(dev); 2500 2501 nv50_audio_component_fini(nouveau_drm(dev)); 2502 2503 nvif_object_unmap(&disp->caps); 2504 nvif_object_dtor(&disp->caps); 2505 nv50_core_del(&disp->core); 2506 2507 nouveau_bo_unmap(disp->sync); 2508 if (disp->sync) 2509 nouveau_bo_unpin(disp->sync); 2510 nouveau_bo_ref(NULL, &disp->sync); 2511 2512 nouveau_display(dev)->priv = NULL; 2513 kfree(disp); 2514 } 2515 2516 int 2517 nv50_display_create(struct drm_device *dev) 2518 { 2519 struct nvif_device *device = &nouveau_drm(dev)->client.device; 2520 struct nouveau_drm *drm = nouveau_drm(dev); 2521 struct dcb_table *dcb = &drm->vbios.dcb; 2522 struct drm_connector *connector, *tmp; 2523 struct nv50_disp *disp; 2524 struct dcb_output *dcbe; 2525 int crtcs, ret, i; 2526 bool has_mst = nv50_has_mst(drm); 2527 2528 disp = kzalloc(sizeof(*disp), GFP_KERNEL); 2529 if (!disp) 2530 return -ENOMEM; 2531 2532 mutex_init(&disp->mutex); 2533 2534 nouveau_display(dev)->priv = disp; 2535 nouveau_display(dev)->dtor = nv50_display_destroy; 2536 nouveau_display(dev)->init = nv50_display_init; 2537 nouveau_display(dev)->fini = nv50_display_fini; 2538 disp->disp = &nouveau_display(dev)->disp; 2539 dev->mode_config.funcs = &nv50_disp_func; 2540 dev->mode_config.helper_private = &nv50_disp_helper_func; 2541 dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true; 2542 dev->mode_config.normalize_zpos = true; 2543 2544 /* small shared memory area we use for notifiers and semaphores */ 2545 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, 2546 NOUVEAU_GEM_DOMAIN_VRAM, 2547 0, 0x0000, NULL, NULL, &disp->sync); 2548 if (!ret) { 2549 ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true); 2550 if (!ret) { 2551 ret = nouveau_bo_map(disp->sync); 2552 if (ret) 2553 nouveau_bo_unpin(disp->sync); 2554 } 2555 if (ret) 2556 nouveau_bo_ref(NULL, &disp->sync); 2557 } 2558 2559 if (ret) 2560 goto out; 2561 2562 /* allocate master evo channel */ 2563 ret = nv50_core_new(drm, &disp->core); 2564 if (ret) 2565 goto out; 2566 2567 disp->core->func->init(disp->core); 2568 if (disp->core->func->caps_init) { 2569 ret = disp->core->func->caps_init(drm, disp); 2570 if (ret) 2571 goto out; 2572 } 2573 2574 /* Assign the correct format modifiers */ 2575 if (disp->disp->object.oclass >= TU102_DISP) 2576 nouveau_display(dev)->format_modifiers = wndwc57e_modifiers; 2577 else 2578 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) 2579 nouveau_display(dev)->format_modifiers = disp90xx_modifiers; 2580 else 2581 nouveau_display(dev)->format_modifiers = disp50xx_modifiers; 2582 2583 /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later 2584 * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The 2585 * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to 2586 * small page allocations in prepare_fb(). When this is implemented, we should also force 2587 * large pages (128K) for ovly fbs in order to fix Kepler ovlys. 2588 * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using 2589 * large pages. 2590 */ 2591 if (disp->disp->object.oclass >= GM107_DISP) { 2592 dev->mode_config.cursor_width = 256; 2593 dev->mode_config.cursor_height = 256; 2594 } else if (disp->disp->object.oclass >= GK104_DISP) { 2595 dev->mode_config.cursor_width = 128; 2596 dev->mode_config.cursor_height = 128; 2597 } else { 2598 dev->mode_config.cursor_width = 64; 2599 dev->mode_config.cursor_height = 64; 2600 } 2601 2602 /* create crtc objects to represent the hw heads */ 2603 if (disp->disp->object.oclass >= GV100_DISP) 2604 crtcs = nvif_rd32(&device->object, 0x610060) & 0xff; 2605 else 2606 if (disp->disp->object.oclass >= GF110_DISP) 2607 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf; 2608 else 2609 crtcs = 0x3; 2610 2611 for (i = 0; i < fls(crtcs); i++) { 2612 struct nv50_head *head; 2613 2614 if (!(crtcs & (1 << i))) 2615 continue; 2616 2617 head = nv50_head_create(dev, i); 2618 if (IS_ERR(head)) { 2619 ret = PTR_ERR(head); 2620 goto out; 2621 } 2622 2623 if (has_mst) { 2624 head->msto = nv50_msto_new(dev, head, i); 2625 if (IS_ERR(head->msto)) { 2626 ret = PTR_ERR(head->msto); 2627 head->msto = NULL; 2628 goto out; 2629 } 2630 2631 /* 2632 * FIXME: This is a hack to workaround the following 2633 * issues: 2634 * 2635 * https://gitlab.gnome.org/GNOME/mutter/issues/759 2636 * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277 2637 * 2638 * Once these issues are closed, this should be 2639 * removed 2640 */ 2641 head->msto->encoder.possible_crtcs = crtcs; 2642 } 2643 } 2644 2645 /* create encoder/connector objects based on VBIOS DCB table */ 2646 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { 2647 connector = nouveau_connector_create(dev, dcbe); 2648 if (IS_ERR(connector)) 2649 continue; 2650 2651 if (dcbe->location == DCB_LOC_ON_CHIP) { 2652 switch (dcbe->type) { 2653 case DCB_OUTPUT_TMDS: 2654 case DCB_OUTPUT_LVDS: 2655 case DCB_OUTPUT_DP: 2656 ret = nv50_sor_create(connector, dcbe); 2657 break; 2658 case DCB_OUTPUT_ANALOG: 2659 ret = nv50_dac_create(connector, dcbe); 2660 break; 2661 default: 2662 ret = -ENODEV; 2663 break; 2664 } 2665 } else { 2666 ret = nv50_pior_create(connector, dcbe); 2667 } 2668 2669 if (ret) { 2670 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n", 2671 dcbe->location, dcbe->type, 2672 ffs(dcbe->or) - 1, ret); 2673 ret = 0; 2674 } 2675 } 2676 2677 /* cull any connectors we created that don't have an encoder */ 2678 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { 2679 if (connector->possible_encoders) 2680 continue; 2681 2682 NV_WARN(drm, "%s has no encoders, removing\n", 2683 connector->name); 2684 connector->funcs->destroy(connector); 2685 } 2686 2687 /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */ 2688 dev->vblank_disable_immediate = true; 2689 2690 nv50_audio_component_init(drm); 2691 2692 out: 2693 if (ret) 2694 nv50_display_destroy(dev); 2695 return ret; 2696 } 2697 2698 /****************************************************************************** 2699 * Format modifiers 2700 *****************************************************************************/ 2701 2702 /**************************************************************** 2703 * Log2(block height) ----------------------------+ * 2704 * Page Kind ----------------------------------+ | * 2705 * Gob Height/Page Kind Generation ------+ | | * 2706 * Sector layout -------+ | | | * 2707 * Compression ------+ | | | | */ 2708 const u64 disp50xx_modifiers[] = { /* | | | | | */ 2709 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0), 2710 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1), 2711 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2), 2712 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3), 2713 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4), 2714 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5), 2715 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0), 2716 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1), 2717 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2), 2718 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3), 2719 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4), 2720 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5), 2721 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0), 2722 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1), 2723 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2), 2724 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3), 2725 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4), 2726 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5), 2727 DRM_FORMAT_MOD_LINEAR, 2728 DRM_FORMAT_MOD_INVALID 2729 }; 2730 2731 /**************************************************************** 2732 * Log2(block height) ----------------------------+ * 2733 * Page Kind ----------------------------------+ | * 2734 * Gob Height/Page Kind Generation ------+ | | * 2735 * Sector layout -------+ | | | * 2736 * Compression ------+ | | | | */ 2737 const u64 disp90xx_modifiers[] = { /* | | | | | */ 2738 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0), 2739 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1), 2740 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2), 2741 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3), 2742 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4), 2743 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5), 2744 DRM_FORMAT_MOD_LINEAR, 2745 DRM_FORMAT_MOD_INVALID 2746 }; 2747