1 // SPDX-License-Identifier: MIT 2 #include <linux/string.h> 3 #include <drm/drm_crtc.h> 4 #include <drm/drm_atomic_helper.h> 5 #include <drm/drm_vblank.h> 6 #include <drm/drm_vblank_work.h> 7 8 #include <nvif/class.h> 9 #include <nvif/cl0002.h> 10 #include <nvif/timer.h> 11 12 #include "nouveau_drv.h" 13 #include "core.h" 14 #include "head.h" 15 #include "wndw.h" 16 #include "handles.h" 17 #include "crc.h" 18 19 static const char * const nv50_crc_sources[] = { 20 [NV50_CRC_SOURCE_NONE] = "none", 21 [NV50_CRC_SOURCE_AUTO] = "auto", 22 [NV50_CRC_SOURCE_RG] = "rg", 23 [NV50_CRC_SOURCE_OUTP_ACTIVE] = "outp-active", 24 [NV50_CRC_SOURCE_OUTP_COMPLETE] = "outp-complete", 25 [NV50_CRC_SOURCE_OUTP_INACTIVE] = "outp-inactive", 26 }; 27 28 static int nv50_crc_parse_source(const char *buf, enum nv50_crc_source *s) 29 { 30 int i; 31 32 if (!buf) { 33 *s = NV50_CRC_SOURCE_NONE; 34 return 0; 35 } 36 37 i = match_string(nv50_crc_sources, ARRAY_SIZE(nv50_crc_sources), buf); 38 if (i < 0) 39 return i; 40 41 *s = i; 42 return 0; 43 } 44 45 int 46 nv50_crc_verify_source(struct drm_crtc *crtc, const char *source_name, 47 size_t *values_cnt) 48 { 49 struct nouveau_drm *drm = nouveau_drm(crtc->dev); 50 enum nv50_crc_source source; 51 52 if (nv50_crc_parse_source(source_name, &source) < 0) { 53 NV_DEBUG(drm, "unknown source %s\n", source_name); 54 return -EINVAL; 55 } 56 57 *values_cnt = 1; 58 return 0; 59 } 60 61 const char *const *nv50_crc_get_sources(struct drm_crtc *crtc, size_t *count) 62 { 63 *count = ARRAY_SIZE(nv50_crc_sources); 64 return nv50_crc_sources; 65 } 66 67 static void 68 nv50_crc_program_ctx(struct nv50_head *head, 69 struct nv50_crc_notifier_ctx *ctx) 70 { 71 struct nv50_disp *disp = nv50_disp(head->base.base.dev); 72 struct nv50_core *core = disp->core; 73 u32 interlock[NV50_DISP_INTERLOCK__SIZE] = { 0 }; 74 75 core->func->crc->set_ctx(head, ctx); 76 core->func->update(core, interlock, false); 77 } 78 79 static void nv50_crc_ctx_flip_work(struct kthread_work *base) 80 { 81 struct drm_vblank_work *work = to_drm_vblank_work(base); 82 struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work); 83 struct nv50_head *head = container_of(crc, struct nv50_head, crc); 84 struct drm_crtc *crtc = &head->base.base; 85 struct nv50_disp *disp = nv50_disp(crtc->dev); 86 u8 new_idx = crc->ctx_idx ^ 1; 87 88 /* 89 * We don't want to accidentally wait for longer then the vblank, so 90 * try again for the next vblank if we don't grab the lock 91 */ 92 if (!mutex_trylock(&disp->mutex)) { 93 DRM_DEV_DEBUG_KMS(crtc->dev->dev, 94 "Lock contended, delaying CRC ctx flip for head-%d\n", 95 head->base.index); 96 drm_vblank_work_schedule(work, 97 drm_crtc_vblank_count(crtc) + 1, 98 true); 99 return; 100 } 101 102 DRM_DEV_DEBUG_KMS(crtc->dev->dev, 103 "Flipping notifier ctx for head %d (%d -> %d)\n", 104 drm_crtc_index(crtc), crc->ctx_idx, new_idx); 105 106 nv50_crc_program_ctx(head, NULL); 107 nv50_crc_program_ctx(head, &crc->ctx[new_idx]); 108 mutex_unlock(&disp->mutex); 109 110 spin_lock_irq(&crc->lock); 111 crc->ctx_changed = true; 112 spin_unlock_irq(&crc->lock); 113 } 114 115 static inline void nv50_crc_reset_ctx(struct nv50_crc_notifier_ctx *ctx) 116 { 117 memset_io(ctx->mem.object.map.ptr, 0, ctx->mem.object.map.size); 118 } 119 120 static void 121 nv50_crc_get_entries(struct nv50_head *head, 122 const struct nv50_crc_func *func, 123 enum nv50_crc_source source) 124 { 125 struct drm_crtc *crtc = &head->base.base; 126 struct nv50_crc *crc = &head->crc; 127 u32 output_crc; 128 129 while (crc->entry_idx < func->num_entries) { 130 /* 131 * While Nvidia's documentation says CRCs are written on each 132 * subsequent vblank after being enabled, in practice they 133 * aren't written immediately. 134 */ 135 output_crc = func->get_entry(head, &crc->ctx[crc->ctx_idx], 136 source, crc->entry_idx); 137 if (!output_crc) 138 return; 139 140 drm_crtc_add_crc_entry(crtc, true, crc->frame, &output_crc); 141 crc->frame++; 142 crc->entry_idx++; 143 } 144 } 145 146 void nv50_crc_handle_vblank(struct nv50_head *head) 147 { 148 struct drm_crtc *crtc = &head->base.base; 149 struct nv50_crc *crc = &head->crc; 150 const struct nv50_crc_func *func = 151 nv50_disp(head->base.base.dev)->core->func->crc; 152 struct nv50_crc_notifier_ctx *ctx; 153 bool need_reschedule = false; 154 155 if (!func) 156 return; 157 158 /* 159 * We don't lose events if we aren't able to report CRCs until the 160 * next vblank, so only report CRCs if the locks we need aren't 161 * contended to prevent missing an actual vblank event 162 */ 163 if (!spin_trylock(&crc->lock)) 164 return; 165 166 if (!crc->src) 167 goto out; 168 169 ctx = &crc->ctx[crc->ctx_idx]; 170 if (crc->ctx_changed && func->ctx_finished(head, ctx)) { 171 nv50_crc_get_entries(head, func, crc->src); 172 173 crc->ctx_idx ^= 1; 174 crc->entry_idx = 0; 175 crc->ctx_changed = false; 176 177 /* 178 * Unfortunately when notifier contexts are changed during CRC 179 * capture, we will inevitably lose the CRC entry for the 180 * frame where the hardware actually latched onto the first 181 * UPDATE. According to Nvidia's hardware engineers, there's 182 * no workaround for this. 183 * 184 * Now, we could try to be smart here and calculate the number 185 * of missed CRCs based on audit timestamps, but those were 186 * removed starting with volta. Since we always flush our 187 * updates back-to-back without waiting, we'll just be 188 * optimistic and assume we always miss exactly one frame. 189 */ 190 DRM_DEV_DEBUG_KMS(head->base.base.dev->dev, 191 "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n", 192 head->base.index, crc->frame); 193 crc->frame++; 194 195 nv50_crc_reset_ctx(ctx); 196 need_reschedule = true; 197 } 198 199 nv50_crc_get_entries(head, func, crc->src); 200 201 if (need_reschedule) 202 drm_vblank_work_schedule(&crc->flip_work, 203 drm_crtc_vblank_count(crtc) 204 + crc->flip_threshold 205 - crc->entry_idx, 206 true); 207 208 out: 209 spin_unlock(&crc->lock); 210 } 211 212 static void nv50_crc_wait_ctx_finished(struct nv50_head *head, 213 const struct nv50_crc_func *func, 214 struct nv50_crc_notifier_ctx *ctx) 215 { 216 struct drm_device *dev = head->base.base.dev; 217 struct nouveau_drm *drm = nouveau_drm(dev); 218 s64 ret; 219 220 ret = nvif_msec(&drm->client.device, 50, 221 if (func->ctx_finished(head, ctx)) break;); 222 if (ret == -ETIMEDOUT) 223 NV_ERROR(drm, 224 "CRC notifier ctx for head %d not finished after 50ms\n", 225 head->base.index); 226 else if (ret) 227 NV_ATOMIC(drm, 228 "CRC notifier ctx for head-%d finished after %lldns\n", 229 head->base.index, ret); 230 } 231 232 void nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state) 233 { 234 struct drm_crtc_state *crtc_state; 235 struct drm_crtc *crtc; 236 int i; 237 238 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 239 struct nv50_head *head = nv50_head(crtc); 240 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 241 struct nv50_crc *crc = &head->crc; 242 243 if (!asyh->clr.crc) 244 continue; 245 246 spin_lock_irq(&crc->lock); 247 crc->src = NV50_CRC_SOURCE_NONE; 248 spin_unlock_irq(&crc->lock); 249 250 drm_crtc_vblank_put(crtc); 251 drm_vblank_work_cancel_sync(&crc->flip_work); 252 253 NV_ATOMIC(nouveau_drm(crtc->dev), 254 "CRC reporting on vblank for head-%d disabled\n", 255 head->base.index); 256 257 /* CRC generation is still enabled in hw, we'll just report 258 * any remaining CRC entries ourselves after it gets disabled 259 * in hardware 260 */ 261 } 262 } 263 264 void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state) 265 { 266 struct drm_crtc_state *new_crtc_state; 267 struct drm_crtc *crtc; 268 int i; 269 270 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 271 struct nv50_head *head = nv50_head(crtc); 272 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); 273 struct nv50_crc *crc = &head->crc; 274 int i; 275 276 if (!asyh->set.crc) 277 continue; 278 279 crc->entry_idx = 0; 280 crc->ctx_changed = false; 281 for (i = 0; i < ARRAY_SIZE(crc->ctx); i++) 282 nv50_crc_reset_ctx(&crc->ctx[i]); 283 } 284 } 285 286 void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state) 287 { 288 const struct nv50_crc_func *func = 289 nv50_disp(state->dev)->core->func->crc; 290 struct drm_crtc_state *new_crtc_state; 291 struct drm_crtc *crtc; 292 int i; 293 294 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 295 struct nv50_head *head = nv50_head(crtc); 296 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); 297 struct nv50_crc *crc = &head->crc; 298 struct nv50_crc_notifier_ctx *ctx = &crc->ctx[crc->ctx_idx]; 299 300 if (!asyh->clr.crc) 301 continue; 302 303 if (crc->ctx_changed) { 304 nv50_crc_wait_ctx_finished(head, func, ctx); 305 ctx = &crc->ctx[crc->ctx_idx ^ 1]; 306 } 307 nv50_crc_wait_ctx_finished(head, func, ctx); 308 } 309 } 310 311 void nv50_crc_atomic_start_reporting(struct drm_atomic_state *state) 312 { 313 struct drm_crtc_state *crtc_state; 314 struct drm_crtc *crtc; 315 int i; 316 317 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 318 struct nv50_head *head = nv50_head(crtc); 319 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 320 struct nv50_crc *crc = &head->crc; 321 u64 vbl_count; 322 323 if (!asyh->set.crc) 324 continue; 325 326 drm_crtc_vblank_get(crtc); 327 328 spin_lock_irq(&crc->lock); 329 vbl_count = drm_crtc_vblank_count(crtc); 330 crc->frame = vbl_count; 331 crc->src = asyh->crc.src; 332 drm_vblank_work_schedule(&crc->flip_work, 333 vbl_count + crc->flip_threshold, 334 true); 335 spin_unlock_irq(&crc->lock); 336 337 NV_ATOMIC(nouveau_drm(crtc->dev), 338 "CRC reporting on vblank for head-%d enabled\n", 339 head->base.index); 340 } 341 } 342 343 int nv50_crc_atomic_check_head(struct nv50_head *head, 344 struct nv50_head_atom *asyh, 345 struct nv50_head_atom *armh) 346 { 347 struct nv50_atom *atom = nv50_atom(asyh->state.state); 348 struct drm_device *dev = head->base.base.dev; 349 struct nv50_disp *disp = nv50_disp(dev); 350 bool changed = armh->crc.src != asyh->crc.src; 351 352 if (!armh->crc.src && !asyh->crc.src) { 353 asyh->set.crc = false; 354 asyh->clr.crc = false; 355 return 0; 356 } 357 358 /* While we don't care about entry tags, Volta+ hw always needs the 359 * controlling wndw channel programmed to a wndw that's owned by our 360 * head 361 */ 362 if (asyh->crc.src && disp->disp->object.oclass >= GV100_DISP && 363 !(BIT(asyh->crc.wndw) & asyh->wndw.owned)) { 364 if (!asyh->wndw.owned) { 365 /* TODO: once we support flexible channel ownership, 366 * we should write some code here to handle attempting 367 * to "steal" a plane: e.g. take a plane that is 368 * currently not-visible and owned by another head, 369 * and reassign it to this head. If we fail to do so, 370 * we shuld reject the mode outright as CRC capture 371 * then becomes impossible. 372 */ 373 NV_ATOMIC(nouveau_drm(dev), 374 "No available wndws for CRC readback\n"); 375 return -EINVAL; 376 } 377 asyh->crc.wndw = ffs(asyh->wndw.owned) - 1; 378 } 379 380 if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed || 381 armh->crc.wndw != asyh->crc.wndw) { 382 asyh->clr.crc = armh->crc.src && armh->state.active; 383 asyh->set.crc = asyh->crc.src && asyh->state.active; 384 if (changed) 385 asyh->set.or |= armh->or.crc_raster != 386 asyh->or.crc_raster; 387 388 if (asyh->clr.crc && asyh->set.crc) 389 atom->flush_disable = true; 390 } else { 391 asyh->set.crc = false; 392 asyh->clr.crc = false; 393 } 394 395 return 0; 396 } 397 398 void nv50_crc_atomic_check_outp(struct nv50_atom *atom) 399 { 400 struct drm_crtc *crtc; 401 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 402 int i; 403 404 if (atom->flush_disable) 405 return; 406 407 for_each_oldnew_crtc_in_state(&atom->state, crtc, old_crtc_state, 408 new_crtc_state, i) { 409 struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state); 410 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); 411 struct nv50_outp_atom *outp_atom; 412 struct nouveau_encoder *outp = 413 nv50_real_outp(nv50_head_atom_get_encoder(armh)); 414 struct drm_encoder *encoder = &outp->base.base; 415 416 if (!asyh->clr.crc) 417 continue; 418 419 /* 420 * Re-programming ORs can't be done in the same flush as 421 * disabling CRCs 422 */ 423 list_for_each_entry(outp_atom, &atom->outp, head) { 424 if (outp_atom->encoder == encoder) { 425 if (outp_atom->set.mask) { 426 atom->flush_disable = true; 427 return; 428 } else { 429 break; 430 } 431 } 432 } 433 } 434 } 435 436 static enum nv50_crc_source_type 437 nv50_crc_source_type(struct nouveau_encoder *outp, 438 enum nv50_crc_source source) 439 { 440 struct dcb_output *dcbe = outp->dcb; 441 442 switch (source) { 443 case NV50_CRC_SOURCE_NONE: return NV50_CRC_SOURCE_TYPE_NONE; 444 case NV50_CRC_SOURCE_RG: return NV50_CRC_SOURCE_TYPE_RG; 445 default: break; 446 } 447 448 if (dcbe->location != DCB_LOC_ON_CHIP) 449 return NV50_CRC_SOURCE_TYPE_PIOR; 450 451 switch (dcbe->type) { 452 case DCB_OUTPUT_DP: return NV50_CRC_SOURCE_TYPE_SF; 453 case DCB_OUTPUT_ANALOG: return NV50_CRC_SOURCE_TYPE_DAC; 454 default: return NV50_CRC_SOURCE_TYPE_SOR; 455 } 456 } 457 458 void nv50_crc_atomic_set(struct nv50_head *head, 459 struct nv50_head_atom *asyh) 460 { 461 struct drm_crtc *crtc = &head->base.base; 462 struct drm_device *dev = crtc->dev; 463 struct nv50_crc *crc = &head->crc; 464 const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc; 465 struct nouveau_encoder *outp = 466 nv50_real_outp(nv50_head_atom_get_encoder(asyh)); 467 468 func->set_src(head, outp->or, 469 nv50_crc_source_type(outp, asyh->crc.src), 470 &crc->ctx[crc->ctx_idx], asyh->crc.wndw); 471 } 472 473 void nv50_crc_atomic_clr(struct nv50_head *head) 474 { 475 const struct nv50_crc_func *func = 476 nv50_disp(head->base.base.dev)->core->func->crc; 477 478 func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL, 0); 479 } 480 481 #define NV50_CRC_RASTER_ACTIVE 0 482 #define NV50_CRC_RASTER_COMPLETE 1 483 #define NV50_CRC_RASTER_INACTIVE 2 484 485 static inline int 486 nv50_crc_raster_type(enum nv50_crc_source source) 487 { 488 switch (source) { 489 case NV50_CRC_SOURCE_NONE: 490 case NV50_CRC_SOURCE_AUTO: 491 case NV50_CRC_SOURCE_RG: 492 case NV50_CRC_SOURCE_OUTP_ACTIVE: 493 return NV50_CRC_RASTER_ACTIVE; 494 case NV50_CRC_SOURCE_OUTP_COMPLETE: 495 return NV50_CRC_RASTER_COMPLETE; 496 case NV50_CRC_SOURCE_OUTP_INACTIVE: 497 return NV50_CRC_RASTER_INACTIVE; 498 } 499 500 return 0; 501 } 502 503 /* We handle mapping the memory for CRC notifiers ourselves, since each 504 * notifier needs it's own handle 505 */ 506 static inline int 507 nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu, 508 struct nv50_crc_notifier_ctx *ctx, size_t len, int idx) 509 { 510 struct nv50_core *core = nv50_disp(head->base.base.dev)->core; 511 int ret; 512 513 ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, len, &ctx->mem); 514 if (ret) 515 return ret; 516 517 ret = nvif_object_init(&core->chan.base.user, 518 NV50_DISP_HANDLE_CRC_CTX(head, idx), 519 NV_DMA_IN_MEMORY, 520 &(struct nv_dma_v0) { 521 .target = NV_DMA_V0_TARGET_VRAM, 522 .access = NV_DMA_V0_ACCESS_RDWR, 523 .start = ctx->mem.addr, 524 .limit = ctx->mem.addr 525 + ctx->mem.size - 1, 526 }, sizeof(struct nv_dma_v0), 527 &ctx->ntfy); 528 if (ret) 529 goto fail_fini; 530 531 return 0; 532 533 fail_fini: 534 nvif_mem_fini(&ctx->mem); 535 return ret; 536 } 537 538 static inline void 539 nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx) 540 { 541 nvif_object_fini(&ctx->ntfy); 542 nvif_mem_fini(&ctx->mem); 543 } 544 545 int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str) 546 { 547 struct drm_device *dev = crtc->dev; 548 struct drm_atomic_state *state; 549 struct drm_modeset_acquire_ctx ctx; 550 struct nv50_head *head = nv50_head(crtc); 551 struct nv50_crc *crc = &head->crc; 552 const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc; 553 struct nvif_mmu *mmu = &nouveau_drm(dev)->client.mmu; 554 struct nv50_head_atom *asyh; 555 struct drm_crtc_state *crtc_state; 556 enum nv50_crc_source source; 557 int ret = 0, ctx_flags = 0, i; 558 559 ret = nv50_crc_parse_source(source_str, &source); 560 if (ret) 561 return ret; 562 563 /* 564 * Since we don't want the user to accidentally interrupt us as we're 565 * disabling CRCs 566 */ 567 if (source) 568 ctx_flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE; 569 drm_modeset_acquire_init(&ctx, ctx_flags); 570 571 state = drm_atomic_state_alloc(dev); 572 if (!state) { 573 ret = -ENOMEM; 574 goto out_acquire_fini; 575 } 576 state->acquire_ctx = &ctx; 577 578 if (source) { 579 for (i = 0; i < ARRAY_SIZE(head->crc.ctx); i++) { 580 ret = nv50_crc_ctx_init(head, mmu, &crc->ctx[i], 581 func->notifier_len, i); 582 if (ret) 583 goto out_ctx_fini; 584 } 585 } 586 587 retry: 588 crtc_state = drm_atomic_get_crtc_state(state, &head->base.base); 589 if (IS_ERR(crtc_state)) { 590 ret = PTR_ERR(crtc_state); 591 if (ret == -EDEADLK) 592 goto deadlock; 593 else if (ret) 594 goto out_drop_locks; 595 } 596 asyh = nv50_head_atom(crtc_state); 597 asyh->crc.src = source; 598 asyh->or.crc_raster = nv50_crc_raster_type(source); 599 600 ret = drm_atomic_commit(state); 601 if (ret == -EDEADLK) 602 goto deadlock; 603 else if (ret) 604 goto out_drop_locks; 605 606 if (!source) { 607 /* 608 * If the user specified a custom flip threshold through 609 * debugfs, reset it 610 */ 611 crc->flip_threshold = func->flip_threshold; 612 } 613 614 out_drop_locks: 615 drm_modeset_drop_locks(&ctx); 616 out_ctx_fini: 617 if (!source || ret) { 618 for (i = 0; i < ARRAY_SIZE(crc->ctx); i++) 619 nv50_crc_ctx_fini(&crc->ctx[i]); 620 } 621 drm_atomic_state_put(state); 622 out_acquire_fini: 623 drm_modeset_acquire_fini(&ctx); 624 return ret; 625 626 deadlock: 627 drm_atomic_state_clear(state); 628 drm_modeset_backoff(&ctx); 629 goto retry; 630 } 631 632 static int 633 nv50_crc_debugfs_flip_threshold_get(struct seq_file *m, void *data) 634 { 635 struct nv50_head *head = m->private; 636 struct drm_crtc *crtc = &head->base.base; 637 struct nv50_crc *crc = &head->crc; 638 int ret; 639 640 ret = drm_modeset_lock_single_interruptible(&crtc->mutex); 641 if (ret) 642 return ret; 643 644 seq_printf(m, "%d\n", crc->flip_threshold); 645 646 drm_modeset_unlock(&crtc->mutex); 647 return ret; 648 } 649 650 static int 651 nv50_crc_debugfs_flip_threshold_open(struct inode *inode, struct file *file) 652 { 653 return single_open(file, nv50_crc_debugfs_flip_threshold_get, 654 inode->i_private); 655 } 656 657 static ssize_t 658 nv50_crc_debugfs_flip_threshold_set(struct file *file, 659 const char __user *ubuf, size_t len, 660 loff_t *offp) 661 { 662 struct seq_file *m = file->private_data; 663 struct nv50_head *head = m->private; 664 struct nv50_head_atom *armh; 665 struct drm_crtc *crtc = &head->base.base; 666 struct nouveau_drm *drm = nouveau_drm(crtc->dev); 667 struct nv50_crc *crc = &head->crc; 668 const struct nv50_crc_func *func = 669 nv50_disp(crtc->dev)->core->func->crc; 670 int value, ret; 671 672 ret = kstrtoint_from_user(ubuf, len, 10, &value); 673 if (ret) 674 return ret; 675 676 if (value > func->flip_threshold) 677 return -EINVAL; 678 else if (value == -1) 679 value = func->flip_threshold; 680 else if (value < -1) 681 return -EINVAL; 682 683 ret = drm_modeset_lock_single_interruptible(&crtc->mutex); 684 if (ret) 685 return ret; 686 687 armh = nv50_head_atom(crtc->state); 688 if (armh->crc.src) { 689 ret = -EBUSY; 690 goto out; 691 } 692 693 NV_DEBUG(drm, 694 "Changing CRC flip threshold for next capture on head-%d to %d\n", 695 head->base.index, value); 696 crc->flip_threshold = value; 697 ret = len; 698 699 out: 700 drm_modeset_unlock(&crtc->mutex); 701 return ret; 702 } 703 704 static const struct file_operations nv50_crc_flip_threshold_fops = { 705 .owner = THIS_MODULE, 706 .open = nv50_crc_debugfs_flip_threshold_open, 707 .read = seq_read, 708 .write = nv50_crc_debugfs_flip_threshold_set, 709 }; 710 711 int nv50_head_crc_late_register(struct nv50_head *head) 712 { 713 struct drm_crtc *crtc = &head->base.base; 714 const struct nv50_crc_func *func = 715 nv50_disp(crtc->dev)->core->func->crc; 716 struct dentry *root; 717 718 if (!func || !crtc->debugfs_entry) 719 return 0; 720 721 root = debugfs_create_dir("nv_crc", crtc->debugfs_entry); 722 debugfs_create_file("flip_threshold", 0644, root, head, 723 &nv50_crc_flip_threshold_fops); 724 725 return 0; 726 } 727 728 static inline void 729 nv50_crc_init_head(struct nv50_disp *disp, const struct nv50_crc_func *func, 730 struct nv50_head *head) 731 { 732 struct nv50_crc *crc = &head->crc; 733 734 crc->flip_threshold = func->flip_threshold; 735 spin_lock_init(&crc->lock); 736 drm_vblank_work_init(&crc->flip_work, &head->base.base, 737 nv50_crc_ctx_flip_work); 738 } 739 740 void nv50_crc_init(struct drm_device *dev) 741 { 742 struct nv50_disp *disp = nv50_disp(dev); 743 struct drm_crtc *crtc; 744 const struct nv50_crc_func *func = disp->core->func->crc; 745 746 if (!func) 747 return; 748 749 drm_for_each_crtc(crtc, dev) 750 nv50_crc_init_head(disp, func, nv50_head(crtc)); 751 } 752