1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 5 * Copyright (C) 2013 Red Hat 6 * Author: Rob Clark <robdclark@gmail.com> 7 */ 8 9 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 10 #include <linux/sort.h> 11 #include <linux/debugfs.h> 12 #include <linux/ktime.h> 13 #include <linux/bits.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_blend.h> 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_flip_work.h> 19 #include <drm/drm_framebuffer.h> 20 #include <drm/drm_mode.h> 21 #include <drm/drm_probe_helper.h> 22 #include <drm/drm_rect.h> 23 #include <drm/drm_vblank.h> 24 #include <drm/drm_self_refresh_helper.h> 25 26 #include "dpu_kms.h" 27 #include "dpu_hw_lm.h" 28 #include "dpu_hw_ctl.h" 29 #include "dpu_hw_dspp.h" 30 #include "dpu_crtc.h" 31 #include "dpu_plane.h" 32 #include "dpu_encoder.h" 33 #include "dpu_vbif.h" 34 #include "dpu_core_perf.h" 35 #include "dpu_trace.h" 36 37 /* layer mixer index on dpu_crtc */ 38 #define LEFT_MIXER 0 39 #define RIGHT_MIXER 1 40 41 /* timeout in ms waiting for frame done */ 42 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 43 44 #define CONVERT_S3_15(val) \ 45 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) 46 47 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 48 { 49 struct msm_drm_private *priv = crtc->dev->dev_private; 50 51 return to_dpu_kms(priv->kms); 52 } 53 54 static void dpu_crtc_destroy(struct drm_crtc *crtc) 55 { 56 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 57 58 if (!crtc) 59 return; 60 61 drm_crtc_cleanup(crtc); 62 kfree(dpu_crtc); 63 } 64 65 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 66 { 67 struct drm_device *dev = crtc->dev; 68 struct drm_encoder *encoder; 69 70 drm_for_each_encoder(encoder, dev) 71 if (encoder->crtc == crtc) 72 return encoder; 73 74 return NULL; 75 } 76 77 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) 78 { 79 if (!src_name || 80 !strcmp(src_name, "none")) 81 return DPU_CRTC_CRC_SOURCE_NONE; 82 if (!strcmp(src_name, "auto") || 83 !strcmp(src_name, "lm")) 84 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; 85 if (!strcmp(src_name, "encoder")) 86 return DPU_CRTC_CRC_SOURCE_ENCODER; 87 88 return DPU_CRTC_CRC_SOURCE_INVALID; 89 } 90 91 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, 92 const char *src_name, size_t *values_cnt) 93 { 94 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 95 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 96 97 if (source < 0) { 98 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); 99 return -EINVAL; 100 } 101 102 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) { 103 *values_cnt = crtc_state->num_mixers; 104 } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) { 105 struct drm_encoder *drm_enc; 106 107 *values_cnt = 0; 108 109 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 110 *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc); 111 } 112 113 return 0; 114 } 115 116 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) 117 { 118 struct dpu_crtc_mixer *m; 119 int i; 120 121 for (i = 0; i < crtc_state->num_mixers; ++i) { 122 m = &crtc_state->mixers[i]; 123 124 if (!m->hw_lm || !m->hw_lm->ops.setup_misr) 125 continue; 126 127 /* Calculate MISR over 1 frame */ 128 m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); 129 } 130 } 131 132 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc) 133 { 134 struct drm_encoder *drm_enc; 135 136 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 137 dpu_encoder_setup_misr(drm_enc); 138 } 139 140 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) 141 { 142 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 143 enum dpu_crtc_crc_source current_source; 144 struct dpu_crtc_state *crtc_state; 145 struct drm_device *drm_dev = crtc->dev; 146 147 bool was_enabled; 148 bool enable = false; 149 int ret = 0; 150 151 if (source < 0) { 152 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); 153 return -EINVAL; 154 } 155 156 ret = drm_modeset_lock(&crtc->mutex, NULL); 157 158 if (ret) 159 return ret; 160 161 enable = (source != DPU_CRTC_CRC_SOURCE_NONE); 162 crtc_state = to_dpu_crtc_state(crtc->state); 163 164 spin_lock_irq(&drm_dev->event_lock); 165 current_source = crtc_state->crc_source; 166 spin_unlock_irq(&drm_dev->event_lock); 167 168 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); 169 170 if (!was_enabled && enable) { 171 ret = drm_crtc_vblank_get(crtc); 172 173 if (ret) 174 goto cleanup; 175 176 } else if (was_enabled && !enable) { 177 drm_crtc_vblank_put(crtc); 178 } 179 180 spin_lock_irq(&drm_dev->event_lock); 181 crtc_state->crc_source = source; 182 spin_unlock_irq(&drm_dev->event_lock); 183 184 crtc_state->crc_frame_skip_count = 0; 185 186 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 187 dpu_crtc_setup_lm_misr(crtc_state); 188 else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) 189 dpu_crtc_setup_encoder_misr(crtc); 190 else 191 ret = -EINVAL; 192 193 cleanup: 194 drm_modeset_unlock(&crtc->mutex); 195 196 return ret; 197 } 198 199 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) 200 { 201 struct drm_encoder *encoder = get_encoder_from_crtc(crtc); 202 if (!encoder) { 203 DRM_ERROR("no encoder found for crtc %d\n", crtc->index); 204 return 0; 205 } 206 207 return dpu_encoder_get_vsync_count(encoder); 208 } 209 210 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, 211 struct dpu_crtc_state *crtc_state) 212 { 213 struct dpu_crtc_mixer *m; 214 u32 crcs[CRTC_DUAL_MIXERS]; 215 216 int rc = 0; 217 int i; 218 219 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); 220 221 for (i = 0; i < crtc_state->num_mixers; ++i) { 222 223 m = &crtc_state->mixers[i]; 224 225 if (!m->hw_lm || !m->hw_lm->ops.collect_misr) 226 continue; 227 228 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); 229 230 if (rc) { 231 if (rc != -ENODATA) 232 DRM_DEBUG_DRIVER("MISR read failed\n"); 233 return rc; 234 } 235 } 236 237 return drm_crtc_add_crc_entry(crtc, true, 238 drm_crtc_accurate_vblank_count(crtc), crcs); 239 } 240 241 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc) 242 { 243 struct drm_encoder *drm_enc; 244 int rc, pos = 0; 245 u32 crcs[INTF_MAX]; 246 247 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) { 248 rc = dpu_encoder_get_crc(drm_enc, crcs, pos); 249 if (rc < 0) { 250 if (rc != -ENODATA) 251 DRM_DEBUG_DRIVER("MISR read failed\n"); 252 253 return rc; 254 } 255 256 pos += rc; 257 } 258 259 return drm_crtc_add_crc_entry(crtc, true, 260 drm_crtc_accurate_vblank_count(crtc), crcs); 261 } 262 263 static int dpu_crtc_get_crc(struct drm_crtc *crtc) 264 { 265 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 266 267 /* Skip first 2 frames in case of "uncooked" CRCs */ 268 if (crtc_state->crc_frame_skip_count < 2) { 269 crtc_state->crc_frame_skip_count++; 270 return 0; 271 } 272 273 if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 274 return dpu_crtc_get_lm_crc(crtc, crtc_state); 275 else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER) 276 return dpu_crtc_get_encoder_crc(crtc); 277 278 return -EINVAL; 279 } 280 281 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, 282 bool in_vblank_irq, 283 int *vpos, int *hpos, 284 ktime_t *stime, ktime_t *etime, 285 const struct drm_display_mode *mode) 286 { 287 unsigned int pipe = crtc->index; 288 struct drm_encoder *encoder; 289 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 290 291 encoder = get_encoder_from_crtc(crtc); 292 if (!encoder) { 293 DRM_ERROR("no encoder found for crtc %d\n", pipe); 294 return false; 295 } 296 297 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 298 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 299 300 /* 301 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 302 * the end of VFP. Translate the porch values relative to the line 303 * counter positions. 304 */ 305 306 vactive_start = vsw + vbp + 1; 307 vactive_end = vactive_start + mode->crtc_vdisplay; 308 309 /* last scan line before VSYNC */ 310 vfp_end = mode->crtc_vtotal; 311 312 if (stime) 313 *stime = ktime_get(); 314 315 line = dpu_encoder_get_linecount(encoder); 316 317 if (line < vactive_start) 318 line -= vactive_start; 319 else if (line > vactive_end) 320 line = line - vfp_end - vactive_start; 321 else 322 line -= vactive_start; 323 324 *vpos = line; 325 *hpos = 0; 326 327 if (etime) 328 *etime = ktime_get(); 329 330 return true; 331 } 332 333 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 334 struct dpu_plane_state *pstate, struct dpu_format *format) 335 { 336 struct dpu_hw_mixer *lm = mixer->hw_lm; 337 uint32_t blend_op; 338 uint32_t fg_alpha, bg_alpha; 339 340 fg_alpha = pstate->base.alpha >> 8; 341 bg_alpha = 0xff - fg_alpha; 342 343 /* default to opaque blending */ 344 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || 345 !format->alpha_enable) { 346 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 347 DPU_BLEND_BG_ALPHA_BG_CONST; 348 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 349 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 350 DPU_BLEND_BG_ALPHA_FG_PIXEL; 351 if (fg_alpha != 0xff) { 352 bg_alpha = fg_alpha; 353 blend_op |= DPU_BLEND_BG_MOD_ALPHA | 354 DPU_BLEND_BG_INV_MOD_ALPHA; 355 } else { 356 blend_op |= DPU_BLEND_BG_INV_ALPHA; 357 } 358 } else { 359 /* coverage blending */ 360 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 361 DPU_BLEND_BG_ALPHA_FG_PIXEL; 362 if (fg_alpha != 0xff) { 363 bg_alpha = fg_alpha; 364 blend_op |= DPU_BLEND_FG_MOD_ALPHA | 365 DPU_BLEND_FG_INV_MOD_ALPHA | 366 DPU_BLEND_BG_MOD_ALPHA | 367 DPU_BLEND_BG_INV_MOD_ALPHA; 368 } else { 369 blend_op |= DPU_BLEND_BG_INV_ALPHA; 370 } 371 } 372 373 lm->ops.setup_blend_config(lm, pstate->stage, 374 fg_alpha, bg_alpha, blend_op); 375 376 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", 377 &format->base.pixel_format, format->alpha_enable, blend_op); 378 } 379 380 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 381 { 382 struct dpu_crtc_state *crtc_state; 383 int lm_idx, lm_horiz_position; 384 385 crtc_state = to_dpu_crtc_state(crtc->state); 386 387 lm_horiz_position = 0; 388 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 389 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 390 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 391 struct dpu_hw_mixer_cfg cfg; 392 393 if (!lm_roi || !drm_rect_visible(lm_roi)) 394 continue; 395 396 cfg.out_width = drm_rect_width(lm_roi); 397 cfg.out_height = drm_rect_height(lm_roi); 398 cfg.right_mixer = lm_horiz_position++; 399 cfg.flags = 0; 400 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 401 } 402 } 403 404 static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, 405 struct drm_plane *plane, 406 struct dpu_crtc_mixer *mixer, 407 u32 num_mixers, 408 enum dpu_stage stage, 409 struct dpu_format *format, 410 uint64_t modifier, 411 struct dpu_sw_pipe *pipe, 412 unsigned int stage_idx, 413 struct dpu_hw_stage_cfg *stage_cfg 414 ) 415 { 416 uint32_t lm_idx; 417 enum dpu_sspp sspp_idx; 418 struct drm_plane_state *state; 419 420 sspp_idx = pipe->sspp->idx; 421 422 state = plane->state; 423 424 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 425 state, to_dpu_plane_state(state), stage_idx, 426 format->base.pixel_format, 427 modifier); 428 429 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n", 430 crtc->base.id, 431 stage, 432 plane->base.id, 433 sspp_idx - SSPP_NONE, 434 state->fb ? state->fb->base.id : -1); 435 436 stage_cfg->stage[stage][stage_idx] = sspp_idx; 437 stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index; 438 439 /* blend config update */ 440 for (lm_idx = 0; lm_idx < num_mixers; lm_idx++) 441 mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx); 442 } 443 444 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 445 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer, 446 struct dpu_hw_stage_cfg *stage_cfg) 447 { 448 struct drm_plane *plane; 449 struct drm_framebuffer *fb; 450 struct drm_plane_state *state; 451 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 452 struct dpu_plane_state *pstate = NULL; 453 struct dpu_format *format; 454 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 455 456 uint32_t lm_idx; 457 bool bg_alpha_enable = false; 458 DECLARE_BITMAP(fetch_active, SSPP_MAX); 459 460 memset(fetch_active, 0, sizeof(fetch_active)); 461 drm_atomic_crtc_for_each_plane(plane, crtc) { 462 state = plane->state; 463 if (!state) 464 continue; 465 466 if (!state->visible) 467 continue; 468 469 pstate = to_dpu_plane_state(state); 470 fb = state->fb; 471 472 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); 473 474 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 475 bg_alpha_enable = true; 476 477 set_bit(pstate->pipe.sspp->idx, fetch_active); 478 _dpu_crtc_blend_setup_pipe(crtc, plane, 479 mixer, cstate->num_mixers, 480 pstate->stage, 481 format, fb ? fb->modifier : 0, 482 &pstate->pipe, 0, stage_cfg); 483 484 if (pstate->r_pipe.sspp) { 485 set_bit(pstate->r_pipe.sspp->idx, fetch_active); 486 _dpu_crtc_blend_setup_pipe(crtc, plane, 487 mixer, cstate->num_mixers, 488 pstate->stage, 489 format, fb ? fb->modifier : 0, 490 &pstate->r_pipe, 1, stage_cfg); 491 } 492 493 /* blend config update */ 494 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 495 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format); 496 497 if (bg_alpha_enable && !format->alpha_enable) 498 mixer[lm_idx].mixer_op_mode = 0; 499 else 500 mixer[lm_idx].mixer_op_mode |= 501 1 << pstate->stage; 502 } 503 } 504 505 if (ctl->ops.set_active_pipes) 506 ctl->ops.set_active_pipes(ctl, fetch_active); 507 508 _dpu_crtc_program_lm_output_roi(crtc); 509 } 510 511 /** 512 * _dpu_crtc_blend_setup - configure crtc mixers 513 * @crtc: Pointer to drm crtc structure 514 */ 515 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 516 { 517 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 518 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 519 struct dpu_crtc_mixer *mixer = cstate->mixers; 520 struct dpu_hw_ctl *ctl; 521 struct dpu_hw_mixer *lm; 522 struct dpu_hw_stage_cfg stage_cfg; 523 int i; 524 525 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); 526 527 for (i = 0; i < cstate->num_mixers; i++) { 528 mixer[i].mixer_op_mode = 0; 529 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 530 mixer[i].lm_ctl->ops.clear_all_blendstages( 531 mixer[i].lm_ctl); 532 } 533 534 /* initialize stage cfg */ 535 memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 536 537 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); 538 539 for (i = 0; i < cstate->num_mixers; i++) { 540 ctl = mixer[i].lm_ctl; 541 lm = mixer[i].hw_lm; 542 543 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 544 545 /* stage config flush mask */ 546 ctl->ops.update_pending_flush_mixer(ctl, 547 mixer[i].hw_lm->idx); 548 549 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", 550 mixer[i].hw_lm->idx - LM_0, 551 mixer[i].mixer_op_mode, 552 ctl->idx - CTL_0); 553 554 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 555 &stage_cfg); 556 } 557 } 558 559 /** 560 * _dpu_crtc_complete_flip - signal pending page_flip events 561 * Any pending vblank events are added to the vblank_event_list 562 * so that the next vblank interrupt shall signal them. 563 * However PAGE_FLIP events are not handled through the vblank_event_list. 564 * This API signals any pending PAGE_FLIP events requested through 565 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 566 * @crtc: Pointer to drm crtc structure 567 */ 568 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 569 { 570 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 571 struct drm_device *dev = crtc->dev; 572 unsigned long flags; 573 574 spin_lock_irqsave(&dev->event_lock, flags); 575 if (dpu_crtc->event) { 576 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, 577 dpu_crtc->event); 578 trace_dpu_crtc_complete_flip(DRMID(crtc)); 579 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 580 dpu_crtc->event = NULL; 581 } 582 spin_unlock_irqrestore(&dev->event_lock, flags); 583 } 584 585 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 586 { 587 struct drm_encoder *encoder; 588 589 /* 590 * TODO: This function is called from dpu debugfs and as part of atomic 591 * check. When called from debugfs, the crtc->mutex must be held to 592 * read crtc->state. However reading crtc->state from atomic check isn't 593 * allowed (unless you have a good reason, a big comment, and a deep 594 * understanding of how the atomic/modeset locks work (<- and this is 595 * probably not possible)). So we'll keep the WARN_ON here for now, but 596 * really we need to figure out a better way to track our operating mode 597 */ 598 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 599 600 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 601 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 602 return dpu_encoder_get_intf_mode(encoder); 603 604 return INTF_MODE_NONE; 605 } 606 607 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 608 { 609 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 610 611 /* keep statistics on vblank callback - with auto reset via debugfs */ 612 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 613 dpu_crtc->vblank_cb_time = ktime_get(); 614 else 615 dpu_crtc->vblank_cb_count++; 616 617 dpu_crtc_get_crc(crtc); 618 619 drm_crtc_handle_vblank(crtc); 620 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 621 } 622 623 static void dpu_crtc_frame_event_work(struct kthread_work *work) 624 { 625 struct dpu_crtc_frame_event *fevent = container_of(work, 626 struct dpu_crtc_frame_event, work); 627 struct drm_crtc *crtc = fevent->crtc; 628 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 629 unsigned long flags; 630 bool frame_done = false; 631 632 DPU_ATRACE_BEGIN("crtc_frame_event"); 633 634 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 635 ktime_to_ns(fevent->ts)); 636 637 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 638 | DPU_ENCODER_FRAME_EVENT_ERROR 639 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 640 641 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 642 /* ignore vblank when not pending */ 643 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 644 /* release bandwidth and other resources */ 645 trace_dpu_crtc_frame_event_done(DRMID(crtc), 646 fevent->event); 647 dpu_core_perf_crtc_release_bw(crtc); 648 } else { 649 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 650 fevent->event); 651 } 652 653 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 654 | DPU_ENCODER_FRAME_EVENT_ERROR)) 655 frame_done = true; 656 } 657 658 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 659 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 660 crtc->base.id, ktime_to_ns(fevent->ts)); 661 662 if (frame_done) 663 complete_all(&dpu_crtc->frame_done_comp); 664 665 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 666 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 667 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 668 DPU_ATRACE_END("crtc_frame_event"); 669 } 670 671 /* 672 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module 673 * registers this API to encoder for all frame event callbacks like 674 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events 675 * from different context - IRQ, user thread, commit_thread, etc. Each event 676 * should be carefully reviewed and should be processed in proper task context 677 * to avoid schedulin delay or properly manage the irq context's bottom half 678 * processing. 679 */ 680 static void dpu_crtc_frame_event_cb(void *data, u32 event) 681 { 682 struct drm_crtc *crtc = (struct drm_crtc *)data; 683 struct dpu_crtc *dpu_crtc; 684 struct msm_drm_private *priv; 685 struct dpu_crtc_frame_event *fevent; 686 unsigned long flags; 687 u32 crtc_id; 688 689 /* Nothing to do on idle event */ 690 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 691 return; 692 693 dpu_crtc = to_dpu_crtc(crtc); 694 priv = crtc->dev->dev_private; 695 crtc_id = drm_crtc_index(crtc); 696 697 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 698 699 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 700 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 701 struct dpu_crtc_frame_event, list); 702 if (fevent) 703 list_del_init(&fevent->list); 704 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 705 706 if (!fevent) { 707 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); 708 return; 709 } 710 711 fevent->event = event; 712 fevent->crtc = crtc; 713 fevent->ts = ktime_get(); 714 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); 715 } 716 717 void dpu_crtc_complete_commit(struct drm_crtc *crtc) 718 { 719 trace_dpu_crtc_complete_commit(DRMID(crtc)); 720 dpu_core_perf_crtc_update(crtc, 0, false); 721 _dpu_crtc_complete_flip(crtc); 722 } 723 724 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, 725 struct drm_crtc_state *state) 726 { 727 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 728 struct drm_display_mode *adj_mode = &state->adjusted_mode; 729 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 730 int i; 731 732 for (i = 0; i < cstate->num_mixers; i++) { 733 struct drm_rect *r = &cstate->lm_bounds[i]; 734 r->x1 = crtc_split_width * i; 735 r->y1 = 0; 736 r->x2 = r->x1 + crtc_split_width; 737 r->y2 = adj_mode->vdisplay; 738 739 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 740 } 741 } 742 743 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, 744 struct dpu_hw_pcc_cfg *cfg) 745 { 746 struct drm_color_ctm *ctm; 747 748 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); 749 750 ctm = (struct drm_color_ctm *)state->ctm->data; 751 752 if (!ctm) 753 return; 754 755 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); 756 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); 757 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); 758 759 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); 760 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); 761 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); 762 763 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); 764 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); 765 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); 766 } 767 768 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) 769 { 770 struct drm_crtc_state *state = crtc->state; 771 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 772 struct dpu_crtc_mixer *mixer = cstate->mixers; 773 struct dpu_hw_pcc_cfg cfg; 774 struct dpu_hw_ctl *ctl; 775 struct dpu_hw_dspp *dspp; 776 int i; 777 778 779 if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state)) 780 return; 781 782 for (i = 0; i < cstate->num_mixers; i++) { 783 ctl = mixer[i].lm_ctl; 784 dspp = mixer[i].hw_dspp; 785 786 if (!dspp || !dspp->ops.setup_pcc) 787 continue; 788 789 if (!state->ctm) { 790 dspp->ops.setup_pcc(dspp, NULL); 791 } else { 792 _dpu_crtc_get_pcc_coeff(state, &cfg); 793 dspp->ops.setup_pcc(dspp, &cfg); 794 } 795 796 /* stage config flush mask */ 797 ctl->ops.update_pending_flush_dspp(ctl, 798 mixer[i].hw_dspp->idx, DPU_DSPP_PCC); 799 } 800 } 801 802 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 803 struct drm_atomic_state *state) 804 { 805 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 806 struct drm_encoder *encoder; 807 808 if (!crtc->state->enable) { 809 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n", 810 crtc->base.id, crtc->state->enable); 811 return; 812 } 813 814 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 815 816 _dpu_crtc_setup_lm_bounds(crtc, crtc->state); 817 818 /* encoder will trigger pending mask now */ 819 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 820 dpu_encoder_trigger_kickoff_pending(encoder); 821 822 /* 823 * If no mixers have been allocated in dpu_crtc_atomic_check(), 824 * it means we are trying to flush a CRTC whose state is disabled: 825 * nothing else needs to be done. 826 */ 827 if (unlikely(!cstate->num_mixers)) 828 return; 829 830 _dpu_crtc_blend_setup(crtc); 831 832 _dpu_crtc_setup_cp_blocks(crtc); 833 834 /* 835 * PP_DONE irq is only used by command mode for now. 836 * It is better to request pending before FLUSH and START trigger 837 * to make sure no pp_done irq missed. 838 * This is safe because no pp_done will happen before SW trigger 839 * in command mode. 840 */ 841 } 842 843 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 844 struct drm_atomic_state *state) 845 { 846 struct dpu_crtc *dpu_crtc; 847 struct drm_device *dev; 848 struct drm_plane *plane; 849 struct msm_drm_private *priv; 850 unsigned long flags; 851 struct dpu_crtc_state *cstate; 852 853 if (!crtc->state->enable) { 854 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n", 855 crtc->base.id, crtc->state->enable); 856 return; 857 } 858 859 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 860 861 dpu_crtc = to_dpu_crtc(crtc); 862 cstate = to_dpu_crtc_state(crtc->state); 863 dev = crtc->dev; 864 priv = dev->dev_private; 865 866 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { 867 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 868 return; 869 } 870 871 WARN_ON(dpu_crtc->event); 872 spin_lock_irqsave(&dev->event_lock, flags); 873 dpu_crtc->event = crtc->state->event; 874 crtc->state->event = NULL; 875 spin_unlock_irqrestore(&dev->event_lock, flags); 876 877 /* 878 * If no mixers has been allocated in dpu_crtc_atomic_check(), 879 * it means we are trying to flush a CRTC whose state is disabled: 880 * nothing else needs to be done. 881 */ 882 if (unlikely(!cstate->num_mixers)) 883 return; 884 885 /* update performance setting before crtc kickoff */ 886 dpu_core_perf_crtc_update(crtc, 1, false); 887 888 /* 889 * Final plane updates: Give each plane a chance to complete all 890 * required writes/flushing before crtc's "flush 891 * everything" call below. 892 */ 893 drm_atomic_crtc_for_each_plane(plane, crtc) { 894 if (dpu_crtc->smmu_state.transition_error) 895 dpu_plane_set_error(plane, true); 896 dpu_plane_flush(plane); 897 } 898 899 /* Kickoff will be scheduled by outer layer */ 900 } 901 902 /** 903 * dpu_crtc_destroy_state - state destroy hook 904 * @crtc: drm CRTC 905 * @state: CRTC state object to release 906 */ 907 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 908 struct drm_crtc_state *state) 909 { 910 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 911 912 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 913 914 __drm_atomic_helper_crtc_destroy_state(state); 915 916 kfree(cstate); 917 } 918 919 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 920 { 921 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 922 int ret, rc = 0; 923 924 if (!atomic_read(&dpu_crtc->frame_pending)) { 925 DRM_DEBUG_ATOMIC("no frames pending\n"); 926 return 0; 927 } 928 929 DPU_ATRACE_BEGIN("frame done completion wait"); 930 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 931 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 932 if (!ret) { 933 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 934 rc = -ETIMEDOUT; 935 } 936 DPU_ATRACE_END("frame done completion wait"); 937 938 return rc; 939 } 940 941 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) 942 { 943 struct drm_encoder *encoder; 944 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 945 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 946 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 947 948 /* 949 * If no mixers has been allocated in dpu_crtc_atomic_check(), 950 * it means we are trying to start a CRTC whose state is disabled: 951 * nothing else needs to be done. 952 */ 953 if (unlikely(!cstate->num_mixers)) 954 return; 955 956 DPU_ATRACE_BEGIN("crtc_commit"); 957 958 drm_for_each_encoder_mask(encoder, crtc->dev, 959 crtc->state->encoder_mask) { 960 if (!dpu_encoder_is_valid_for_commit(encoder)) { 961 DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n"); 962 goto end; 963 } 964 } 965 /* 966 * Encoder will flush/start now, unless it has a tx pending. If so, it 967 * may delay and flush at an irq event (e.g. ppdone) 968 */ 969 drm_for_each_encoder_mask(encoder, crtc->dev, 970 crtc->state->encoder_mask) 971 dpu_encoder_prepare_for_kickoff(encoder); 972 973 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 974 /* acquire bandwidth and other resources */ 975 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id); 976 } else 977 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id); 978 979 dpu_crtc->play_count++; 980 981 dpu_vbif_clear_errors(dpu_kms); 982 983 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 984 dpu_encoder_kickoff(encoder); 985 986 reinit_completion(&dpu_crtc->frame_done_comp); 987 988 end: 989 DPU_ATRACE_END("crtc_commit"); 990 } 991 992 static void dpu_crtc_reset(struct drm_crtc *crtc) 993 { 994 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 995 996 if (crtc->state) 997 dpu_crtc_destroy_state(crtc, crtc->state); 998 999 if (cstate) 1000 __drm_atomic_helper_crtc_reset(crtc, &cstate->base); 1001 else 1002 __drm_atomic_helper_crtc_reset(crtc, NULL); 1003 } 1004 1005 /** 1006 * dpu_crtc_duplicate_state - state duplicate hook 1007 * @crtc: Pointer to drm crtc structure 1008 */ 1009 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 1010 { 1011 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); 1012 1013 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 1014 if (!cstate) { 1015 DPU_ERROR("failed to allocate state\n"); 1016 return NULL; 1017 } 1018 1019 /* duplicate base helper */ 1020 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 1021 1022 return &cstate->base; 1023 } 1024 1025 static void dpu_crtc_atomic_print_state(struct drm_printer *p, 1026 const struct drm_crtc_state *state) 1027 { 1028 const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 1029 int i; 1030 1031 for (i = 0; i < cstate->num_mixers; i++) { 1032 drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0); 1033 drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); 1034 if (cstate->mixers[i].hw_dspp) 1035 drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0); 1036 } 1037 } 1038 1039 static void dpu_crtc_disable(struct drm_crtc *crtc, 1040 struct drm_atomic_state *state) 1041 { 1042 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, 1043 crtc); 1044 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1045 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 1046 struct drm_encoder *encoder; 1047 unsigned long flags; 1048 bool release_bandwidth = false; 1049 1050 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1051 1052 /* If disable is triggered while in self refresh mode, 1053 * reset the encoder software state so that in enable 1054 * it won't trigger a warn while assigning crtc. 1055 */ 1056 if (old_crtc_state->self_refresh_active) { 1057 drm_for_each_encoder_mask(encoder, crtc->dev, 1058 old_crtc_state->encoder_mask) { 1059 dpu_encoder_assign_crtc(encoder, NULL); 1060 } 1061 return; 1062 } 1063 1064 /* Disable/save vblank irq handling */ 1065 drm_crtc_vblank_off(crtc); 1066 1067 drm_for_each_encoder_mask(encoder, crtc->dev, 1068 old_crtc_state->encoder_mask) { 1069 /* in video mode, we hold an extra bandwidth reference 1070 * as we cannot drop bandwidth at frame-done if any 1071 * crtc is being used in video mode. 1072 */ 1073 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1074 release_bandwidth = true; 1075 1076 /* 1077 * If disable is triggered during psr active(e.g: screen dim in PSR), 1078 * we will need encoder->crtc connection to process the device sleep & 1079 * preserve it during psr sequence. 1080 */ 1081 if (!crtc->state->self_refresh_active) 1082 dpu_encoder_assign_crtc(encoder, NULL); 1083 } 1084 1085 /* wait for frame_event_done completion */ 1086 if (_dpu_crtc_wait_for_frame_done(crtc)) 1087 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 1088 crtc->base.id, 1089 atomic_read(&dpu_crtc->frame_pending)); 1090 1091 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 1092 dpu_crtc->enabled = false; 1093 1094 if (atomic_read(&dpu_crtc->frame_pending)) { 1095 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 1096 atomic_read(&dpu_crtc->frame_pending)); 1097 if (release_bandwidth) 1098 dpu_core_perf_crtc_release_bw(crtc); 1099 atomic_set(&dpu_crtc->frame_pending, 0); 1100 } 1101 1102 dpu_core_perf_crtc_update(crtc, 0, true); 1103 1104 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1105 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); 1106 1107 memset(cstate->mixers, 0, sizeof(cstate->mixers)); 1108 cstate->num_mixers = 0; 1109 1110 /* disable clk & bw control until clk & bw properties are set */ 1111 cstate->bw_control = false; 1112 cstate->bw_split_vote = false; 1113 1114 if (crtc->state->event && !crtc->state->active) { 1115 spin_lock_irqsave(&crtc->dev->event_lock, flags); 1116 drm_crtc_send_vblank_event(crtc, crtc->state->event); 1117 crtc->state->event = NULL; 1118 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 1119 } 1120 1121 pm_runtime_put_sync(crtc->dev->dev); 1122 } 1123 1124 static void dpu_crtc_enable(struct drm_crtc *crtc, 1125 struct drm_atomic_state *state) 1126 { 1127 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1128 struct drm_encoder *encoder; 1129 bool request_bandwidth = false; 1130 struct drm_crtc_state *old_crtc_state; 1131 1132 old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); 1133 1134 pm_runtime_get_sync(crtc->dev->dev); 1135 1136 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1137 1138 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { 1139 /* in video mode, we hold an extra bandwidth reference 1140 * as we cannot drop bandwidth at frame-done if any 1141 * crtc is being used in video mode. 1142 */ 1143 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1144 request_bandwidth = true; 1145 dpu_encoder_register_frame_event_callback(encoder, 1146 dpu_crtc_frame_event_cb, (void *)crtc); 1147 } 1148 1149 if (request_bandwidth) 1150 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1151 1152 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 1153 dpu_crtc->enabled = true; 1154 1155 if (!old_crtc_state->self_refresh_active) { 1156 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1157 dpu_encoder_assign_crtc(encoder, crtc); 1158 } 1159 1160 /* Enable/restore vblank irq handling */ 1161 drm_crtc_vblank_on(crtc); 1162 } 1163 1164 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) 1165 { 1166 struct drm_crtc *crtc = cstate->crtc; 1167 struct drm_encoder *encoder; 1168 1169 drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) { 1170 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) { 1171 return true; 1172 } 1173 } 1174 1175 return false; 1176 } 1177 1178 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 1179 struct drm_atomic_state *state) 1180 { 1181 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 1182 crtc); 1183 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1184 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); 1185 1186 const struct drm_plane_state *pstate; 1187 struct drm_plane *plane; 1188 1189 int rc = 0; 1190 1191 bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); 1192 1193 if (!crtc_state->enable || !crtc_state->active) { 1194 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n", 1195 crtc->base.id, crtc_state->enable, 1196 crtc_state->active); 1197 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf)); 1198 return 0; 1199 } 1200 1201 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); 1202 1203 /* force a full mode set if active state changed */ 1204 if (crtc_state->active_changed) 1205 crtc_state->mode_changed = true; 1206 1207 if (cstate->num_mixers) 1208 _dpu_crtc_setup_lm_bounds(crtc, crtc_state); 1209 1210 /* FIXME: move this to dpu_plane_atomic_check? */ 1211 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 1212 struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate); 1213 1214 if (IS_ERR_OR_NULL(pstate)) { 1215 rc = PTR_ERR(pstate); 1216 DPU_ERROR("%s: failed to get plane%d state, %d\n", 1217 dpu_crtc->name, plane->base.id, rc); 1218 return rc; 1219 } 1220 1221 if (!pstate->visible) 1222 continue; 1223 1224 dpu_pstate->needs_dirtyfb = needs_dirtyfb; 1225 } 1226 1227 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1228 1229 rc = dpu_core_perf_crtc_check(crtc, crtc_state); 1230 if (rc) { 1231 DPU_ERROR("crtc%d failed performance check %d\n", 1232 crtc->base.id, rc); 1233 return rc; 1234 } 1235 1236 return 0; 1237 } 1238 1239 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1240 { 1241 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1242 struct drm_encoder *enc; 1243 1244 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1245 1246 /* 1247 * Normally we would iterate through encoder_mask in crtc state to find 1248 * attached encoders. In this case, we might be disabling vblank _after_ 1249 * encoder_mask has been cleared. 1250 * 1251 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1252 * disable (which is also after encoder_mask is cleared). So instead of 1253 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1254 * currently assigned to our crtc. 1255 * 1256 * Note also that this function cannot be called while crtc is disabled 1257 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1258 * about the assigned crtcs being inconsistent with the current state 1259 * (which means no need to worry about modeset locks). 1260 */ 1261 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1262 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1263 dpu_crtc); 1264 1265 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1266 } 1267 1268 return 0; 1269 } 1270 1271 #ifdef CONFIG_DEBUG_FS 1272 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1273 { 1274 struct dpu_crtc *dpu_crtc; 1275 struct dpu_plane_state *pstate = NULL; 1276 struct dpu_crtc_mixer *m; 1277 1278 struct drm_crtc *crtc; 1279 struct drm_plane *plane; 1280 struct drm_display_mode *mode; 1281 struct drm_framebuffer *fb; 1282 struct drm_plane_state *state; 1283 struct dpu_crtc_state *cstate; 1284 1285 int i, out_width; 1286 1287 dpu_crtc = s->private; 1288 crtc = &dpu_crtc->base; 1289 1290 drm_modeset_lock_all(crtc->dev); 1291 cstate = to_dpu_crtc_state(crtc->state); 1292 1293 mode = &crtc->state->adjusted_mode; 1294 out_width = mode->hdisplay / cstate->num_mixers; 1295 1296 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1297 mode->hdisplay, mode->vdisplay); 1298 1299 seq_puts(s, "\n"); 1300 1301 for (i = 0; i < cstate->num_mixers; ++i) { 1302 m = &cstate->mixers[i]; 1303 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1304 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1305 out_width, mode->vdisplay); 1306 } 1307 1308 seq_puts(s, "\n"); 1309 1310 drm_atomic_crtc_for_each_plane(plane, crtc) { 1311 pstate = to_dpu_plane_state(plane->state); 1312 state = plane->state; 1313 1314 if (!pstate || !state) 1315 continue; 1316 1317 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1318 pstate->stage); 1319 1320 if (plane->state->fb) { 1321 fb = plane->state->fb; 1322 1323 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1324 fb->base.id, (char *) &fb->format->format, 1325 fb->width, fb->height); 1326 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1327 seq_printf(s, "cpp[%d]:%u ", 1328 i, fb->format->cpp[i]); 1329 seq_puts(s, "\n\t"); 1330 1331 seq_printf(s, "modifier:%8llu ", fb->modifier); 1332 seq_puts(s, "\n"); 1333 1334 seq_puts(s, "\t"); 1335 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1336 seq_printf(s, "pitches[%d]:%8u ", i, 1337 fb->pitches[i]); 1338 seq_puts(s, "\n"); 1339 1340 seq_puts(s, "\t"); 1341 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1342 seq_printf(s, "offsets[%d]:%8u ", i, 1343 fb->offsets[i]); 1344 seq_puts(s, "\n"); 1345 } 1346 1347 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1348 state->src_x, state->src_y, state->src_w, state->src_h); 1349 1350 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1351 state->crtc_x, state->crtc_y, state->crtc_w, 1352 state->crtc_h); 1353 seq_printf(s, "\tsspp[0]:%s\n", 1354 pstate->pipe.sspp->cap->name); 1355 seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n", 1356 pstate->pipe.multirect_mode, pstate->pipe.multirect_index); 1357 if (pstate->r_pipe.sspp) { 1358 seq_printf(s, "\tsspp[1]:%s\n", 1359 pstate->r_pipe.sspp->cap->name); 1360 seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n", 1361 pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index); 1362 } 1363 1364 seq_puts(s, "\n"); 1365 } 1366 if (dpu_crtc->vblank_cb_count) { 1367 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1368 s64 diff_ms = ktime_to_ms(diff); 1369 s64 fps = diff_ms ? div_s64( 1370 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1371 1372 seq_printf(s, 1373 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1374 fps, dpu_crtc->vblank_cb_count, 1375 ktime_to_ms(diff), dpu_crtc->play_count); 1376 1377 /* reset time & count for next measurement */ 1378 dpu_crtc->vblank_cb_count = 0; 1379 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1380 } 1381 1382 drm_modeset_unlock_all(crtc->dev); 1383 1384 return 0; 1385 } 1386 1387 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); 1388 1389 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1390 { 1391 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1392 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1393 1394 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1395 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1396 seq_printf(s, "core_clk_rate: %llu\n", 1397 dpu_crtc->cur_perf.core_clk_rate); 1398 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); 1399 seq_printf(s, "max_per_pipe_ib: %llu\n", 1400 dpu_crtc->cur_perf.max_per_pipe_ib); 1401 1402 return 0; 1403 } 1404 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); 1405 1406 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1407 { 1408 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1409 1410 debugfs_create_file("status", 0400, 1411 crtc->debugfs_entry, 1412 dpu_crtc, &_dpu_debugfs_status_fops); 1413 debugfs_create_file("state", 0600, 1414 crtc->debugfs_entry, 1415 &dpu_crtc->base, 1416 &dpu_crtc_debugfs_state_fops); 1417 1418 return 0; 1419 } 1420 #else 1421 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1422 { 1423 return 0; 1424 } 1425 #endif /* CONFIG_DEBUG_FS */ 1426 1427 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1428 { 1429 return _dpu_crtc_init_debugfs(crtc); 1430 } 1431 1432 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1433 .set_config = drm_atomic_helper_set_config, 1434 .destroy = dpu_crtc_destroy, 1435 .page_flip = drm_atomic_helper_page_flip, 1436 .reset = dpu_crtc_reset, 1437 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1438 .atomic_destroy_state = dpu_crtc_destroy_state, 1439 .atomic_print_state = dpu_crtc_atomic_print_state, 1440 .late_register = dpu_crtc_late_register, 1441 .verify_crc_source = dpu_crtc_verify_crc_source, 1442 .set_crc_source = dpu_crtc_set_crc_source, 1443 .enable_vblank = msm_crtc_enable_vblank, 1444 .disable_vblank = msm_crtc_disable_vblank, 1445 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 1446 .get_vblank_counter = dpu_crtc_get_vblank_counter, 1447 }; 1448 1449 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1450 .atomic_disable = dpu_crtc_disable, 1451 .atomic_enable = dpu_crtc_enable, 1452 .atomic_check = dpu_crtc_atomic_check, 1453 .atomic_begin = dpu_crtc_atomic_begin, 1454 .atomic_flush = dpu_crtc_atomic_flush, 1455 .get_scanout_position = dpu_crtc_get_scanout_position, 1456 }; 1457 1458 /* initialize crtc */ 1459 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1460 struct drm_plane *cursor) 1461 { 1462 struct drm_crtc *crtc = NULL; 1463 struct dpu_crtc *dpu_crtc = NULL; 1464 int i, ret; 1465 1466 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1467 if (!dpu_crtc) 1468 return ERR_PTR(-ENOMEM); 1469 1470 crtc = &dpu_crtc->base; 1471 crtc->dev = dev; 1472 1473 spin_lock_init(&dpu_crtc->spin_lock); 1474 atomic_set(&dpu_crtc->frame_pending, 0); 1475 1476 init_completion(&dpu_crtc->frame_done_comp); 1477 1478 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1479 1480 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1481 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1482 list_add(&dpu_crtc->frame_events[i].list, 1483 &dpu_crtc->frame_event_list); 1484 kthread_init_work(&dpu_crtc->frame_events[i].work, 1485 dpu_crtc_frame_event_work); 1486 } 1487 1488 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, 1489 NULL); 1490 1491 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1492 1493 drm_crtc_enable_color_mgmt(crtc, 0, true, 0); 1494 1495 /* save user friendly CRTC name for later */ 1496 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1497 1498 /* initialize event handling */ 1499 spin_lock_init(&dpu_crtc->event_lock); 1500 1501 ret = drm_self_refresh_helper_init(crtc); 1502 if (ret) { 1503 DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n", 1504 crtc->name, ret); 1505 return ERR_PTR(ret); 1506 } 1507 1508 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name); 1509 return crtc; 1510 } 1511