1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 5 * Copyright (C) 2013 Red Hat 6 * Author: Rob Clark <robdclark@gmail.com> 7 */ 8 9 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 10 #include <linux/sort.h> 11 #include <linux/debugfs.h> 12 #include <linux/ktime.h> 13 #include <linux/bits.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_blend.h> 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_flip_work.h> 19 #include <drm/drm_framebuffer.h> 20 #include <drm/drm_mode.h> 21 #include <drm/drm_probe_helper.h> 22 #include <drm/drm_rect.h> 23 #include <drm/drm_vblank.h> 24 25 #include "dpu_kms.h" 26 #include "dpu_hw_lm.h" 27 #include "dpu_hw_ctl.h" 28 #include "dpu_hw_dspp.h" 29 #include "dpu_crtc.h" 30 #include "dpu_plane.h" 31 #include "dpu_encoder.h" 32 #include "dpu_vbif.h" 33 #include "dpu_core_perf.h" 34 #include "dpu_trace.h" 35 36 /* layer mixer index on dpu_crtc */ 37 #define LEFT_MIXER 0 38 #define RIGHT_MIXER 1 39 40 /* timeout in ms waiting for frame done */ 41 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 42 43 #define CONVERT_S3_15(val) \ 44 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) 45 46 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 47 { 48 struct msm_drm_private *priv = crtc->dev->dev_private; 49 50 return to_dpu_kms(priv->kms); 51 } 52 53 static void dpu_crtc_destroy(struct drm_crtc *crtc) 54 { 55 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 56 57 if (!crtc) 58 return; 59 60 drm_crtc_cleanup(crtc); 61 kfree(dpu_crtc); 62 } 63 64 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 65 { 66 struct drm_device *dev = crtc->dev; 67 struct drm_encoder *encoder; 68 69 drm_for_each_encoder(encoder, dev) 70 if (encoder->crtc == crtc) 71 return encoder; 72 73 return NULL; 74 } 75 76 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) 77 { 78 if (!src_name || 79 !strcmp(src_name, "none")) 80 return DPU_CRTC_CRC_SOURCE_NONE; 81 if (!strcmp(src_name, "auto") || 82 !strcmp(src_name, "lm")) 83 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; 84 if (!strcmp(src_name, "encoder")) 85 return DPU_CRTC_CRC_SOURCE_ENCODER; 86 87 return DPU_CRTC_CRC_SOURCE_INVALID; 88 } 89 90 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, 91 const char *src_name, size_t *values_cnt) 92 { 93 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 94 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 95 96 if (source < 0) { 97 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); 98 return -EINVAL; 99 } 100 101 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) { 102 *values_cnt = crtc_state->num_mixers; 103 } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) { 104 struct drm_encoder *drm_enc; 105 106 *values_cnt = 0; 107 108 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 109 *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc); 110 } 111 112 return 0; 113 } 114 115 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) 116 { 117 struct dpu_crtc_mixer *m; 118 int i; 119 120 for (i = 0; i < crtc_state->num_mixers; ++i) { 121 m = &crtc_state->mixers[i]; 122 123 if (!m->hw_lm || !m->hw_lm->ops.setup_misr) 124 continue; 125 126 /* Calculate MISR over 1 frame */ 127 m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); 128 } 129 } 130 131 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc) 132 { 133 struct drm_encoder *drm_enc; 134 135 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 136 dpu_encoder_setup_misr(drm_enc); 137 } 138 139 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) 140 { 141 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 142 enum dpu_crtc_crc_source current_source; 143 struct dpu_crtc_state *crtc_state; 144 struct drm_device *drm_dev = crtc->dev; 145 146 bool was_enabled; 147 bool enable = false; 148 int ret = 0; 149 150 if (source < 0) { 151 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); 152 return -EINVAL; 153 } 154 155 ret = drm_modeset_lock(&crtc->mutex, NULL); 156 157 if (ret) 158 return ret; 159 160 enable = (source != DPU_CRTC_CRC_SOURCE_NONE); 161 crtc_state = to_dpu_crtc_state(crtc->state); 162 163 spin_lock_irq(&drm_dev->event_lock); 164 current_source = crtc_state->crc_source; 165 spin_unlock_irq(&drm_dev->event_lock); 166 167 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); 168 169 if (!was_enabled && enable) { 170 ret = drm_crtc_vblank_get(crtc); 171 172 if (ret) 173 goto cleanup; 174 175 } else if (was_enabled && !enable) { 176 drm_crtc_vblank_put(crtc); 177 } 178 179 spin_lock_irq(&drm_dev->event_lock); 180 crtc_state->crc_source = source; 181 spin_unlock_irq(&drm_dev->event_lock); 182 183 crtc_state->crc_frame_skip_count = 0; 184 185 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 186 dpu_crtc_setup_lm_misr(crtc_state); 187 else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) 188 dpu_crtc_setup_encoder_misr(crtc); 189 else 190 ret = -EINVAL; 191 192 cleanup: 193 drm_modeset_unlock(&crtc->mutex); 194 195 return ret; 196 } 197 198 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) 199 { 200 struct drm_encoder *encoder = get_encoder_from_crtc(crtc); 201 if (!encoder) { 202 DRM_ERROR("no encoder found for crtc %d\n", crtc->index); 203 return 0; 204 } 205 206 return dpu_encoder_get_vsync_count(encoder); 207 } 208 209 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, 210 struct dpu_crtc_state *crtc_state) 211 { 212 struct dpu_crtc_mixer *m; 213 u32 crcs[CRTC_DUAL_MIXERS]; 214 215 int rc = 0; 216 int i; 217 218 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); 219 220 for (i = 0; i < crtc_state->num_mixers; ++i) { 221 222 m = &crtc_state->mixers[i]; 223 224 if (!m->hw_lm || !m->hw_lm->ops.collect_misr) 225 continue; 226 227 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); 228 229 if (rc) { 230 if (rc != -ENODATA) 231 DRM_DEBUG_DRIVER("MISR read failed\n"); 232 return rc; 233 } 234 } 235 236 return drm_crtc_add_crc_entry(crtc, true, 237 drm_crtc_accurate_vblank_count(crtc), crcs); 238 } 239 240 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc) 241 { 242 struct drm_encoder *drm_enc; 243 int rc, pos = 0; 244 u32 crcs[INTF_MAX]; 245 246 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) { 247 rc = dpu_encoder_get_crc(drm_enc, crcs, pos); 248 if (rc < 0) { 249 if (rc != -ENODATA) 250 DRM_DEBUG_DRIVER("MISR read failed\n"); 251 252 return rc; 253 } 254 255 pos += rc; 256 } 257 258 return drm_crtc_add_crc_entry(crtc, true, 259 drm_crtc_accurate_vblank_count(crtc), crcs); 260 } 261 262 static int dpu_crtc_get_crc(struct drm_crtc *crtc) 263 { 264 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 265 266 /* Skip first 2 frames in case of "uncooked" CRCs */ 267 if (crtc_state->crc_frame_skip_count < 2) { 268 crtc_state->crc_frame_skip_count++; 269 return 0; 270 } 271 272 if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 273 return dpu_crtc_get_lm_crc(crtc, crtc_state); 274 else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER) 275 return dpu_crtc_get_encoder_crc(crtc); 276 277 return -EINVAL; 278 } 279 280 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, 281 bool in_vblank_irq, 282 int *vpos, int *hpos, 283 ktime_t *stime, ktime_t *etime, 284 const struct drm_display_mode *mode) 285 { 286 unsigned int pipe = crtc->index; 287 struct drm_encoder *encoder; 288 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 289 290 encoder = get_encoder_from_crtc(crtc); 291 if (!encoder) { 292 DRM_ERROR("no encoder found for crtc %d\n", pipe); 293 return false; 294 } 295 296 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 297 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 298 299 /* 300 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 301 * the end of VFP. Translate the porch values relative to the line 302 * counter positions. 303 */ 304 305 vactive_start = vsw + vbp + 1; 306 vactive_end = vactive_start + mode->crtc_vdisplay; 307 308 /* last scan line before VSYNC */ 309 vfp_end = mode->crtc_vtotal; 310 311 if (stime) 312 *stime = ktime_get(); 313 314 line = dpu_encoder_get_linecount(encoder); 315 316 if (line < vactive_start) 317 line -= vactive_start; 318 else if (line > vactive_end) 319 line = line - vfp_end - vactive_start; 320 else 321 line -= vactive_start; 322 323 *vpos = line; 324 *hpos = 0; 325 326 if (etime) 327 *etime = ktime_get(); 328 329 return true; 330 } 331 332 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 333 struct dpu_plane_state *pstate, struct dpu_format *format) 334 { 335 struct dpu_hw_mixer *lm = mixer->hw_lm; 336 uint32_t blend_op; 337 uint32_t fg_alpha, bg_alpha; 338 339 fg_alpha = pstate->base.alpha >> 8; 340 bg_alpha = 0xff - fg_alpha; 341 342 /* default to opaque blending */ 343 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || 344 !format->alpha_enable) { 345 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 346 DPU_BLEND_BG_ALPHA_BG_CONST; 347 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 348 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 349 DPU_BLEND_BG_ALPHA_FG_PIXEL; 350 if (fg_alpha != 0xff) { 351 bg_alpha = fg_alpha; 352 blend_op |= DPU_BLEND_BG_MOD_ALPHA | 353 DPU_BLEND_BG_INV_MOD_ALPHA; 354 } else { 355 blend_op |= DPU_BLEND_BG_INV_ALPHA; 356 } 357 } else { 358 /* coverage blending */ 359 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 360 DPU_BLEND_BG_ALPHA_FG_PIXEL; 361 if (fg_alpha != 0xff) { 362 bg_alpha = fg_alpha; 363 blend_op |= DPU_BLEND_FG_MOD_ALPHA | 364 DPU_BLEND_FG_INV_MOD_ALPHA | 365 DPU_BLEND_BG_MOD_ALPHA | 366 DPU_BLEND_BG_INV_MOD_ALPHA; 367 } else { 368 blend_op |= DPU_BLEND_BG_INV_ALPHA; 369 } 370 } 371 372 lm->ops.setup_blend_config(lm, pstate->stage, 373 fg_alpha, bg_alpha, blend_op); 374 375 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", 376 &format->base.pixel_format, format->alpha_enable, blend_op); 377 } 378 379 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 380 { 381 struct dpu_crtc_state *crtc_state; 382 int lm_idx, lm_horiz_position; 383 384 crtc_state = to_dpu_crtc_state(crtc->state); 385 386 lm_horiz_position = 0; 387 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 388 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 389 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 390 struct dpu_hw_mixer_cfg cfg; 391 392 if (!lm_roi || !drm_rect_visible(lm_roi)) 393 continue; 394 395 cfg.out_width = drm_rect_width(lm_roi); 396 cfg.out_height = drm_rect_height(lm_roi); 397 cfg.right_mixer = lm_horiz_position++; 398 cfg.flags = 0; 399 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 400 } 401 } 402 403 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 404 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer, 405 struct dpu_hw_stage_cfg *stage_cfg) 406 { 407 struct drm_plane *plane; 408 struct drm_framebuffer *fb; 409 struct drm_plane_state *state; 410 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 411 struct dpu_plane_state *pstate = NULL; 412 struct dpu_format *format; 413 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 414 415 u32 flush_mask; 416 uint32_t stage_idx, lm_idx; 417 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; 418 bool bg_alpha_enable = false; 419 DECLARE_BITMAP(fetch_active, SSPP_MAX); 420 421 memset(fetch_active, 0, sizeof(fetch_active)); 422 drm_atomic_crtc_for_each_plane(plane, crtc) { 423 state = plane->state; 424 if (!state) 425 continue; 426 427 if (!state->visible) 428 continue; 429 430 pstate = to_dpu_plane_state(state); 431 fb = state->fb; 432 433 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask); 434 set_bit(dpu_plane_pipe(plane), fetch_active); 435 436 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n", 437 crtc->base.id, 438 pstate->stage, 439 plane->base.id, 440 dpu_plane_pipe(plane) - SSPP_VIG0, 441 state->fb ? state->fb->base.id : -1); 442 443 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); 444 445 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 446 bg_alpha_enable = true; 447 448 stage_idx = zpos_cnt[pstate->stage]++; 449 stage_cfg->stage[pstate->stage][stage_idx] = 450 dpu_plane_pipe(plane); 451 stage_cfg->multirect_index[pstate->stage][stage_idx] = 452 pstate->multirect_index; 453 454 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 455 state, pstate, stage_idx, 456 dpu_plane_pipe(plane) - SSPP_VIG0, 457 format->base.pixel_format, 458 fb ? fb->modifier : 0); 459 460 /* blend config update */ 461 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 462 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, 463 pstate, format); 464 465 mixer[lm_idx].flush_mask |= flush_mask; 466 467 if (bg_alpha_enable && !format->alpha_enable) 468 mixer[lm_idx].mixer_op_mode = 0; 469 else 470 mixer[lm_idx].mixer_op_mode |= 471 1 << pstate->stage; 472 } 473 } 474 475 if (ctl->ops.set_active_pipes) 476 ctl->ops.set_active_pipes(ctl, fetch_active); 477 478 _dpu_crtc_program_lm_output_roi(crtc); 479 } 480 481 /** 482 * _dpu_crtc_blend_setup - configure crtc mixers 483 * @crtc: Pointer to drm crtc structure 484 */ 485 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 486 { 487 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 488 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 489 struct dpu_crtc_mixer *mixer = cstate->mixers; 490 struct dpu_hw_ctl *ctl; 491 struct dpu_hw_mixer *lm; 492 struct dpu_hw_stage_cfg stage_cfg; 493 int i; 494 495 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); 496 497 for (i = 0; i < cstate->num_mixers; i++) { 498 mixer[i].mixer_op_mode = 0; 499 mixer[i].flush_mask = 0; 500 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 501 mixer[i].lm_ctl->ops.clear_all_blendstages( 502 mixer[i].lm_ctl); 503 } 504 505 /* initialize stage cfg */ 506 memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 507 508 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); 509 510 for (i = 0; i < cstate->num_mixers; i++) { 511 ctl = mixer[i].lm_ctl; 512 lm = mixer[i].hw_lm; 513 514 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 515 516 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, 517 mixer[i].hw_lm->idx); 518 519 /* stage config flush mask */ 520 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 521 522 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", 523 mixer[i].hw_lm->idx - LM_0, 524 mixer[i].mixer_op_mode, 525 ctl->idx - CTL_0, 526 mixer[i].flush_mask); 527 528 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 529 &stage_cfg); 530 } 531 } 532 533 /** 534 * _dpu_crtc_complete_flip - signal pending page_flip events 535 * Any pending vblank events are added to the vblank_event_list 536 * so that the next vblank interrupt shall signal them. 537 * However PAGE_FLIP events are not handled through the vblank_event_list. 538 * This API signals any pending PAGE_FLIP events requested through 539 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 540 * @crtc: Pointer to drm crtc structure 541 */ 542 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 543 { 544 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 545 struct drm_device *dev = crtc->dev; 546 unsigned long flags; 547 548 spin_lock_irqsave(&dev->event_lock, flags); 549 if (dpu_crtc->event) { 550 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, 551 dpu_crtc->event); 552 trace_dpu_crtc_complete_flip(DRMID(crtc)); 553 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 554 dpu_crtc->event = NULL; 555 } 556 spin_unlock_irqrestore(&dev->event_lock, flags); 557 } 558 559 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 560 { 561 struct drm_encoder *encoder; 562 563 /* 564 * TODO: This function is called from dpu debugfs and as part of atomic 565 * check. When called from debugfs, the crtc->mutex must be held to 566 * read crtc->state. However reading crtc->state from atomic check isn't 567 * allowed (unless you have a good reason, a big comment, and a deep 568 * understanding of how the atomic/modeset locks work (<- and this is 569 * probably not possible)). So we'll keep the WARN_ON here for now, but 570 * really we need to figure out a better way to track our operating mode 571 */ 572 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 573 574 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 575 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 576 return dpu_encoder_get_intf_mode(encoder); 577 578 return INTF_MODE_NONE; 579 } 580 581 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 582 { 583 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 584 585 /* keep statistics on vblank callback - with auto reset via debugfs */ 586 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 587 dpu_crtc->vblank_cb_time = ktime_get(); 588 else 589 dpu_crtc->vblank_cb_count++; 590 591 dpu_crtc_get_crc(crtc); 592 593 drm_crtc_handle_vblank(crtc); 594 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 595 } 596 597 static void dpu_crtc_frame_event_work(struct kthread_work *work) 598 { 599 struct dpu_crtc_frame_event *fevent = container_of(work, 600 struct dpu_crtc_frame_event, work); 601 struct drm_crtc *crtc = fevent->crtc; 602 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 603 unsigned long flags; 604 bool frame_done = false; 605 606 DPU_ATRACE_BEGIN("crtc_frame_event"); 607 608 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 609 ktime_to_ns(fevent->ts)); 610 611 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 612 | DPU_ENCODER_FRAME_EVENT_ERROR 613 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 614 615 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 616 /* ignore vblank when not pending */ 617 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 618 /* release bandwidth and other resources */ 619 trace_dpu_crtc_frame_event_done(DRMID(crtc), 620 fevent->event); 621 dpu_core_perf_crtc_release_bw(crtc); 622 } else { 623 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 624 fevent->event); 625 } 626 627 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 628 | DPU_ENCODER_FRAME_EVENT_ERROR)) 629 frame_done = true; 630 } 631 632 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 633 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 634 crtc->base.id, ktime_to_ns(fevent->ts)); 635 636 if (frame_done) 637 complete_all(&dpu_crtc->frame_done_comp); 638 639 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 640 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 641 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 642 DPU_ATRACE_END("crtc_frame_event"); 643 } 644 645 /* 646 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module 647 * registers this API to encoder for all frame event callbacks like 648 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events 649 * from different context - IRQ, user thread, commit_thread, etc. Each event 650 * should be carefully reviewed and should be processed in proper task context 651 * to avoid schedulin delay or properly manage the irq context's bottom half 652 * processing. 653 */ 654 static void dpu_crtc_frame_event_cb(void *data, u32 event) 655 { 656 struct drm_crtc *crtc = (struct drm_crtc *)data; 657 struct dpu_crtc *dpu_crtc; 658 struct msm_drm_private *priv; 659 struct dpu_crtc_frame_event *fevent; 660 unsigned long flags; 661 u32 crtc_id; 662 663 /* Nothing to do on idle event */ 664 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 665 return; 666 667 dpu_crtc = to_dpu_crtc(crtc); 668 priv = crtc->dev->dev_private; 669 crtc_id = drm_crtc_index(crtc); 670 671 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 672 673 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 674 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 675 struct dpu_crtc_frame_event, list); 676 if (fevent) 677 list_del_init(&fevent->list); 678 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 679 680 if (!fevent) { 681 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); 682 return; 683 } 684 685 fevent->event = event; 686 fevent->crtc = crtc; 687 fevent->ts = ktime_get(); 688 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); 689 } 690 691 void dpu_crtc_complete_commit(struct drm_crtc *crtc) 692 { 693 trace_dpu_crtc_complete_commit(DRMID(crtc)); 694 dpu_core_perf_crtc_update(crtc, 0, false); 695 _dpu_crtc_complete_flip(crtc); 696 } 697 698 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, 699 struct drm_crtc_state *state) 700 { 701 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 702 struct drm_display_mode *adj_mode = &state->adjusted_mode; 703 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 704 int i; 705 706 for (i = 0; i < cstate->num_mixers; i++) { 707 struct drm_rect *r = &cstate->lm_bounds[i]; 708 r->x1 = crtc_split_width * i; 709 r->y1 = 0; 710 r->x2 = r->x1 + crtc_split_width; 711 r->y2 = adj_mode->vdisplay; 712 713 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 714 } 715 } 716 717 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, 718 struct dpu_hw_pcc_cfg *cfg) 719 { 720 struct drm_color_ctm *ctm; 721 722 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); 723 724 ctm = (struct drm_color_ctm *)state->ctm->data; 725 726 if (!ctm) 727 return; 728 729 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); 730 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); 731 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); 732 733 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); 734 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); 735 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); 736 737 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); 738 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); 739 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); 740 } 741 742 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) 743 { 744 struct drm_crtc_state *state = crtc->state; 745 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 746 struct dpu_crtc_mixer *mixer = cstate->mixers; 747 struct dpu_hw_pcc_cfg cfg; 748 struct dpu_hw_ctl *ctl; 749 struct dpu_hw_dspp *dspp; 750 int i; 751 752 753 if (!state->color_mgmt_changed) 754 return; 755 756 for (i = 0; i < cstate->num_mixers; i++) { 757 ctl = mixer[i].lm_ctl; 758 dspp = mixer[i].hw_dspp; 759 760 if (!dspp || !dspp->ops.setup_pcc) 761 continue; 762 763 if (!state->ctm) { 764 dspp->ops.setup_pcc(dspp, NULL); 765 } else { 766 _dpu_crtc_get_pcc_coeff(state, &cfg); 767 dspp->ops.setup_pcc(dspp, &cfg); 768 } 769 770 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl, 771 mixer[i].hw_dspp->idx); 772 773 /* stage config flush mask */ 774 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 775 776 DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n", 777 mixer[i].hw_lm->idx - DSPP_0, 778 ctl->idx - CTL_0, 779 mixer[i].flush_mask); 780 } 781 } 782 783 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 784 struct drm_atomic_state *state) 785 { 786 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 787 struct drm_encoder *encoder; 788 789 if (!crtc->state->enable) { 790 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n", 791 crtc->base.id, crtc->state->enable); 792 return; 793 } 794 795 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 796 797 _dpu_crtc_setup_lm_bounds(crtc, crtc->state); 798 799 /* encoder will trigger pending mask now */ 800 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 801 dpu_encoder_trigger_kickoff_pending(encoder); 802 803 /* 804 * If no mixers have been allocated in dpu_crtc_atomic_check(), 805 * it means we are trying to flush a CRTC whose state is disabled: 806 * nothing else needs to be done. 807 */ 808 if (unlikely(!cstate->num_mixers)) 809 return; 810 811 _dpu_crtc_blend_setup(crtc); 812 813 _dpu_crtc_setup_cp_blocks(crtc); 814 815 /* 816 * PP_DONE irq is only used by command mode for now. 817 * It is better to request pending before FLUSH and START trigger 818 * to make sure no pp_done irq missed. 819 * This is safe because no pp_done will happen before SW trigger 820 * in command mode. 821 */ 822 } 823 824 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 825 struct drm_atomic_state *state) 826 { 827 struct dpu_crtc *dpu_crtc; 828 struct drm_device *dev; 829 struct drm_plane *plane; 830 struct msm_drm_private *priv; 831 unsigned long flags; 832 struct dpu_crtc_state *cstate; 833 834 if (!crtc->state->enable) { 835 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n", 836 crtc->base.id, crtc->state->enable); 837 return; 838 } 839 840 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 841 842 dpu_crtc = to_dpu_crtc(crtc); 843 cstate = to_dpu_crtc_state(crtc->state); 844 dev = crtc->dev; 845 priv = dev->dev_private; 846 847 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { 848 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 849 return; 850 } 851 852 WARN_ON(dpu_crtc->event); 853 spin_lock_irqsave(&dev->event_lock, flags); 854 dpu_crtc->event = crtc->state->event; 855 crtc->state->event = NULL; 856 spin_unlock_irqrestore(&dev->event_lock, flags); 857 858 /* 859 * If no mixers has been allocated in dpu_crtc_atomic_check(), 860 * it means we are trying to flush a CRTC whose state is disabled: 861 * nothing else needs to be done. 862 */ 863 if (unlikely(!cstate->num_mixers)) 864 return; 865 866 /* update performance setting before crtc kickoff */ 867 dpu_core_perf_crtc_update(crtc, 1, false); 868 869 /* 870 * Final plane updates: Give each plane a chance to complete all 871 * required writes/flushing before crtc's "flush 872 * everything" call below. 873 */ 874 drm_atomic_crtc_for_each_plane(plane, crtc) { 875 if (dpu_crtc->smmu_state.transition_error) 876 dpu_plane_set_error(plane, true); 877 dpu_plane_flush(plane); 878 } 879 880 /* Kickoff will be scheduled by outer layer */ 881 } 882 883 /** 884 * dpu_crtc_destroy_state - state destroy hook 885 * @crtc: drm CRTC 886 * @state: CRTC state object to release 887 */ 888 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 889 struct drm_crtc_state *state) 890 { 891 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 892 893 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 894 895 __drm_atomic_helper_crtc_destroy_state(state); 896 897 kfree(cstate); 898 } 899 900 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 901 { 902 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 903 int ret, rc = 0; 904 905 if (!atomic_read(&dpu_crtc->frame_pending)) { 906 DRM_DEBUG_ATOMIC("no frames pending\n"); 907 return 0; 908 } 909 910 DPU_ATRACE_BEGIN("frame done completion wait"); 911 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 912 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 913 if (!ret) { 914 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 915 rc = -ETIMEDOUT; 916 } 917 DPU_ATRACE_END("frame done completion wait"); 918 919 return rc; 920 } 921 922 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) 923 { 924 struct drm_encoder *encoder; 925 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 926 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 927 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 928 929 /* 930 * If no mixers has been allocated in dpu_crtc_atomic_check(), 931 * it means we are trying to start a CRTC whose state is disabled: 932 * nothing else needs to be done. 933 */ 934 if (unlikely(!cstate->num_mixers)) 935 return; 936 937 DPU_ATRACE_BEGIN("crtc_commit"); 938 939 drm_for_each_encoder_mask(encoder, crtc->dev, 940 crtc->state->encoder_mask) { 941 if (!dpu_encoder_is_valid_for_commit(encoder)) { 942 DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n"); 943 goto end; 944 } 945 } 946 /* 947 * Encoder will flush/start now, unless it has a tx pending. If so, it 948 * may delay and flush at an irq event (e.g. ppdone) 949 */ 950 drm_for_each_encoder_mask(encoder, crtc->dev, 951 crtc->state->encoder_mask) 952 dpu_encoder_prepare_for_kickoff(encoder); 953 954 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 955 /* acquire bandwidth and other resources */ 956 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id); 957 } else 958 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id); 959 960 dpu_crtc->play_count++; 961 962 dpu_vbif_clear_errors(dpu_kms); 963 964 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 965 dpu_encoder_kickoff(encoder); 966 967 reinit_completion(&dpu_crtc->frame_done_comp); 968 969 end: 970 DPU_ATRACE_END("crtc_commit"); 971 } 972 973 static void dpu_crtc_reset(struct drm_crtc *crtc) 974 { 975 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 976 977 if (crtc->state) 978 dpu_crtc_destroy_state(crtc, crtc->state); 979 980 __drm_atomic_helper_crtc_reset(crtc, &cstate->base); 981 } 982 983 /** 984 * dpu_crtc_duplicate_state - state duplicate hook 985 * @crtc: Pointer to drm crtc structure 986 */ 987 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 988 { 989 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); 990 991 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 992 if (!cstate) { 993 DPU_ERROR("failed to allocate state\n"); 994 return NULL; 995 } 996 997 /* duplicate base helper */ 998 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 999 1000 return &cstate->base; 1001 } 1002 1003 static void dpu_crtc_atomic_print_state(struct drm_printer *p, 1004 const struct drm_crtc_state *state) 1005 { 1006 const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 1007 int i; 1008 1009 for (i = 0; i < cstate->num_mixers; i++) { 1010 drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0); 1011 drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); 1012 if (cstate->mixers[i].hw_dspp) 1013 drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0); 1014 } 1015 } 1016 1017 static void dpu_crtc_disable(struct drm_crtc *crtc, 1018 struct drm_atomic_state *state) 1019 { 1020 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, 1021 crtc); 1022 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1023 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 1024 struct drm_encoder *encoder; 1025 unsigned long flags; 1026 bool release_bandwidth = false; 1027 1028 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1029 1030 /* Disable/save vblank irq handling */ 1031 drm_crtc_vblank_off(crtc); 1032 1033 drm_for_each_encoder_mask(encoder, crtc->dev, 1034 old_crtc_state->encoder_mask) { 1035 /* in video mode, we hold an extra bandwidth reference 1036 * as we cannot drop bandwidth at frame-done if any 1037 * crtc is being used in video mode. 1038 */ 1039 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1040 release_bandwidth = true; 1041 dpu_encoder_assign_crtc(encoder, NULL); 1042 } 1043 1044 /* wait for frame_event_done completion */ 1045 if (_dpu_crtc_wait_for_frame_done(crtc)) 1046 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 1047 crtc->base.id, 1048 atomic_read(&dpu_crtc->frame_pending)); 1049 1050 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 1051 dpu_crtc->enabled = false; 1052 1053 if (atomic_read(&dpu_crtc->frame_pending)) { 1054 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 1055 atomic_read(&dpu_crtc->frame_pending)); 1056 if (release_bandwidth) 1057 dpu_core_perf_crtc_release_bw(crtc); 1058 atomic_set(&dpu_crtc->frame_pending, 0); 1059 } 1060 1061 dpu_core_perf_crtc_update(crtc, 0, true); 1062 1063 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1064 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); 1065 1066 memset(cstate->mixers, 0, sizeof(cstate->mixers)); 1067 cstate->num_mixers = 0; 1068 1069 /* disable clk & bw control until clk & bw properties are set */ 1070 cstate->bw_control = false; 1071 cstate->bw_split_vote = false; 1072 1073 if (crtc->state->event && !crtc->state->active) { 1074 spin_lock_irqsave(&crtc->dev->event_lock, flags); 1075 drm_crtc_send_vblank_event(crtc, crtc->state->event); 1076 crtc->state->event = NULL; 1077 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 1078 } 1079 1080 pm_runtime_put_sync(crtc->dev->dev); 1081 } 1082 1083 static void dpu_crtc_enable(struct drm_crtc *crtc, 1084 struct drm_atomic_state *state) 1085 { 1086 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1087 struct drm_encoder *encoder; 1088 bool request_bandwidth = false; 1089 1090 pm_runtime_get_sync(crtc->dev->dev); 1091 1092 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1093 1094 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { 1095 /* in video mode, we hold an extra bandwidth reference 1096 * as we cannot drop bandwidth at frame-done if any 1097 * crtc is being used in video mode. 1098 */ 1099 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1100 request_bandwidth = true; 1101 dpu_encoder_register_frame_event_callback(encoder, 1102 dpu_crtc_frame_event_cb, (void *)crtc); 1103 } 1104 1105 if (request_bandwidth) 1106 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1107 1108 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 1109 dpu_crtc->enabled = true; 1110 1111 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1112 dpu_encoder_assign_crtc(encoder, crtc); 1113 1114 /* Enable/restore vblank irq handling */ 1115 drm_crtc_vblank_on(crtc); 1116 } 1117 1118 struct plane_state { 1119 struct dpu_plane_state *dpu_pstate; 1120 const struct drm_plane_state *drm_pstate; 1121 int stage; 1122 u32 pipe_id; 1123 }; 1124 1125 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) 1126 { 1127 struct drm_crtc *crtc = cstate->crtc; 1128 struct drm_encoder *encoder; 1129 1130 drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) { 1131 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) { 1132 return true; 1133 } 1134 } 1135 1136 return false; 1137 } 1138 1139 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 1140 struct drm_atomic_state *state) 1141 { 1142 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 1143 crtc); 1144 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1145 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); 1146 struct plane_state *pstates; 1147 1148 const struct drm_plane_state *pstate; 1149 struct drm_plane *plane; 1150 struct drm_display_mode *mode; 1151 1152 int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; 1153 1154 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; 1155 int multirect_count = 0; 1156 const struct drm_plane_state *pipe_staged[SSPP_MAX]; 1157 int left_zpos_cnt = 0, right_zpos_cnt = 0; 1158 struct drm_rect crtc_rect = { 0 }; 1159 bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); 1160 1161 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); 1162 1163 if (!crtc_state->enable || !crtc_state->active) { 1164 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n", 1165 crtc->base.id, crtc_state->enable, 1166 crtc_state->active); 1167 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf)); 1168 goto end; 1169 } 1170 1171 mode = &crtc_state->adjusted_mode; 1172 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); 1173 1174 /* force a full mode set if active state changed */ 1175 if (crtc_state->active_changed) 1176 crtc_state->mode_changed = true; 1177 1178 memset(pipe_staged, 0, sizeof(pipe_staged)); 1179 1180 if (cstate->num_mixers) { 1181 mixer_width = mode->hdisplay / cstate->num_mixers; 1182 1183 _dpu_crtc_setup_lm_bounds(crtc, crtc_state); 1184 } 1185 1186 crtc_rect.x2 = mode->hdisplay; 1187 crtc_rect.y2 = mode->vdisplay; 1188 1189 /* get plane state for all drm planes associated with crtc state */ 1190 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 1191 struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate); 1192 struct drm_rect dst, clip = crtc_rect; 1193 1194 if (IS_ERR_OR_NULL(pstate)) { 1195 rc = PTR_ERR(pstate); 1196 DPU_ERROR("%s: failed to get plane%d state, %d\n", 1197 dpu_crtc->name, plane->base.id, rc); 1198 goto end; 1199 } 1200 if (cnt >= DPU_STAGE_MAX * 4) 1201 continue; 1202 1203 if (!pstate->visible) 1204 continue; 1205 1206 pstates[cnt].dpu_pstate = dpu_pstate; 1207 pstates[cnt].drm_pstate = pstate; 1208 pstates[cnt].stage = pstate->normalized_zpos; 1209 pstates[cnt].pipe_id = dpu_plane_pipe(plane); 1210 1211 dpu_pstate->needs_dirtyfb = needs_dirtyfb; 1212 1213 if (pipe_staged[pstates[cnt].pipe_id]) { 1214 multirect_plane[multirect_count].r0 = 1215 pipe_staged[pstates[cnt].pipe_id]; 1216 multirect_plane[multirect_count].r1 = pstate; 1217 multirect_count++; 1218 1219 pipe_staged[pstates[cnt].pipe_id] = NULL; 1220 } else { 1221 pipe_staged[pstates[cnt].pipe_id] = pstate; 1222 } 1223 1224 cnt++; 1225 1226 dst = drm_plane_state_dest(pstate); 1227 if (!drm_rect_intersect(&clip, &dst)) { 1228 DPU_ERROR("invalid vertical/horizontal destination\n"); 1229 DPU_ERROR("display: " DRM_RECT_FMT " plane: " 1230 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), 1231 DRM_RECT_ARG(&dst)); 1232 rc = -E2BIG; 1233 goto end; 1234 } 1235 } 1236 1237 for (i = 1; i < SSPP_MAX; i++) { 1238 if (pipe_staged[i]) { 1239 dpu_plane_clear_multirect(pipe_staged[i]); 1240 1241 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) { 1242 DPU_ERROR( 1243 "r1 only virt plane:%d not supported\n", 1244 pipe_staged[i]->plane->base.id); 1245 rc = -EINVAL; 1246 goto end; 1247 } 1248 } 1249 } 1250 1251 z_pos = -1; 1252 for (i = 0; i < cnt; i++) { 1253 /* reset counts at every new blend stage */ 1254 if (pstates[i].stage != z_pos) { 1255 left_zpos_cnt = 0; 1256 right_zpos_cnt = 0; 1257 z_pos = pstates[i].stage; 1258 } 1259 1260 /* verify z_pos setting before using it */ 1261 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) { 1262 DPU_ERROR("> %d plane stages assigned\n", 1263 DPU_STAGE_MAX - DPU_STAGE_0); 1264 rc = -EINVAL; 1265 goto end; 1266 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { 1267 if (left_zpos_cnt == 2) { 1268 DPU_ERROR("> 2 planes @ stage %d on left\n", 1269 z_pos); 1270 rc = -EINVAL; 1271 goto end; 1272 } 1273 left_zpos_cnt++; 1274 1275 } else { 1276 if (right_zpos_cnt == 2) { 1277 DPU_ERROR("> 2 planes @ stage %d on right\n", 1278 z_pos); 1279 rc = -EINVAL; 1280 goto end; 1281 } 1282 right_zpos_cnt++; 1283 } 1284 1285 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; 1286 DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos); 1287 } 1288 1289 for (i = 0; i < multirect_count; i++) { 1290 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) { 1291 DPU_ERROR( 1292 "multirect validation failed for planes (%d - %d)\n", 1293 multirect_plane[i].r0->plane->base.id, 1294 multirect_plane[i].r1->plane->base.id); 1295 rc = -EINVAL; 1296 goto end; 1297 } 1298 } 1299 1300 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1301 1302 rc = dpu_core_perf_crtc_check(crtc, crtc_state); 1303 if (rc) { 1304 DPU_ERROR("crtc%d failed performance check %d\n", 1305 crtc->base.id, rc); 1306 goto end; 1307 } 1308 1309 /* validate source split: 1310 * use pstates sorted by stage to check planes on same stage 1311 * we assume that all pipes are in source split so its valid to compare 1312 * without taking into account left/right mixer placement 1313 */ 1314 for (i = 1; i < cnt; i++) { 1315 struct plane_state *prv_pstate, *cur_pstate; 1316 struct drm_rect left_rect, right_rect; 1317 int32_t left_pid, right_pid; 1318 int32_t stage; 1319 1320 prv_pstate = &pstates[i - 1]; 1321 cur_pstate = &pstates[i]; 1322 if (prv_pstate->stage != cur_pstate->stage) 1323 continue; 1324 1325 stage = cur_pstate->stage; 1326 1327 left_pid = prv_pstate->dpu_pstate->base.plane->base.id; 1328 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate); 1329 1330 right_pid = cur_pstate->dpu_pstate->base.plane->base.id; 1331 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate); 1332 1333 if (right_rect.x1 < left_rect.x1) { 1334 swap(left_pid, right_pid); 1335 swap(left_rect, right_rect); 1336 } 1337 1338 /** 1339 * - planes are enumerated in pipe-priority order such that 1340 * planes with lower drm_id must be left-most in a shared 1341 * blend-stage when using source split. 1342 * - planes in source split must be contiguous in width 1343 * - planes in source split must have same dest yoff and height 1344 */ 1345 if (right_pid < left_pid) { 1346 DPU_ERROR( 1347 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", 1348 stage, left_pid, right_pid); 1349 rc = -EINVAL; 1350 goto end; 1351 } else if (right_rect.x1 != drm_rect_width(&left_rect)) { 1352 DPU_ERROR("non-contiguous coordinates for src split. " 1353 "stage: %d left: " DRM_RECT_FMT " right: " 1354 DRM_RECT_FMT "\n", stage, 1355 DRM_RECT_ARG(&left_rect), 1356 DRM_RECT_ARG(&right_rect)); 1357 rc = -EINVAL; 1358 goto end; 1359 } else if (left_rect.y1 != right_rect.y1 || 1360 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) { 1361 DPU_ERROR("source split at stage: %d. invalid " 1362 "yoff/height: left: " DRM_RECT_FMT " right: " 1363 DRM_RECT_FMT "\n", stage, 1364 DRM_RECT_ARG(&left_rect), 1365 DRM_RECT_ARG(&right_rect)); 1366 rc = -EINVAL; 1367 goto end; 1368 } 1369 } 1370 1371 end: 1372 kfree(pstates); 1373 return rc; 1374 } 1375 1376 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1377 { 1378 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1379 struct drm_encoder *enc; 1380 1381 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1382 1383 /* 1384 * Normally we would iterate through encoder_mask in crtc state to find 1385 * attached encoders. In this case, we might be disabling vblank _after_ 1386 * encoder_mask has been cleared. 1387 * 1388 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1389 * disable (which is also after encoder_mask is cleared). So instead of 1390 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1391 * currently assigned to our crtc. 1392 * 1393 * Note also that this function cannot be called while crtc is disabled 1394 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1395 * about the assigned crtcs being inconsistent with the current state 1396 * (which means no need to worry about modeset locks). 1397 */ 1398 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1399 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1400 dpu_crtc); 1401 1402 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1403 } 1404 1405 return 0; 1406 } 1407 1408 #ifdef CONFIG_DEBUG_FS 1409 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1410 { 1411 struct dpu_crtc *dpu_crtc; 1412 struct dpu_plane_state *pstate = NULL; 1413 struct dpu_crtc_mixer *m; 1414 1415 struct drm_crtc *crtc; 1416 struct drm_plane *plane; 1417 struct drm_display_mode *mode; 1418 struct drm_framebuffer *fb; 1419 struct drm_plane_state *state; 1420 struct dpu_crtc_state *cstate; 1421 1422 int i, out_width; 1423 1424 dpu_crtc = s->private; 1425 crtc = &dpu_crtc->base; 1426 1427 drm_modeset_lock_all(crtc->dev); 1428 cstate = to_dpu_crtc_state(crtc->state); 1429 1430 mode = &crtc->state->adjusted_mode; 1431 out_width = mode->hdisplay / cstate->num_mixers; 1432 1433 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1434 mode->hdisplay, mode->vdisplay); 1435 1436 seq_puts(s, "\n"); 1437 1438 for (i = 0; i < cstate->num_mixers; ++i) { 1439 m = &cstate->mixers[i]; 1440 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1441 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1442 out_width, mode->vdisplay); 1443 } 1444 1445 seq_puts(s, "\n"); 1446 1447 drm_atomic_crtc_for_each_plane(plane, crtc) { 1448 pstate = to_dpu_plane_state(plane->state); 1449 state = plane->state; 1450 1451 if (!pstate || !state) 1452 continue; 1453 1454 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1455 pstate->stage); 1456 1457 if (plane->state->fb) { 1458 fb = plane->state->fb; 1459 1460 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1461 fb->base.id, (char *) &fb->format->format, 1462 fb->width, fb->height); 1463 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1464 seq_printf(s, "cpp[%d]:%u ", 1465 i, fb->format->cpp[i]); 1466 seq_puts(s, "\n\t"); 1467 1468 seq_printf(s, "modifier:%8llu ", fb->modifier); 1469 seq_puts(s, "\n"); 1470 1471 seq_puts(s, "\t"); 1472 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1473 seq_printf(s, "pitches[%d]:%8u ", i, 1474 fb->pitches[i]); 1475 seq_puts(s, "\n"); 1476 1477 seq_puts(s, "\t"); 1478 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1479 seq_printf(s, "offsets[%d]:%8u ", i, 1480 fb->offsets[i]); 1481 seq_puts(s, "\n"); 1482 } 1483 1484 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1485 state->src_x, state->src_y, state->src_w, state->src_h); 1486 1487 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1488 state->crtc_x, state->crtc_y, state->crtc_w, 1489 state->crtc_h); 1490 seq_printf(s, "\tmultirect: mode: %d index: %d\n", 1491 pstate->multirect_mode, pstate->multirect_index); 1492 1493 seq_puts(s, "\n"); 1494 } 1495 if (dpu_crtc->vblank_cb_count) { 1496 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1497 s64 diff_ms = ktime_to_ms(diff); 1498 s64 fps = diff_ms ? div_s64( 1499 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1500 1501 seq_printf(s, 1502 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1503 fps, dpu_crtc->vblank_cb_count, 1504 ktime_to_ms(diff), dpu_crtc->play_count); 1505 1506 /* reset time & count for next measurement */ 1507 dpu_crtc->vblank_cb_count = 0; 1508 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1509 } 1510 1511 drm_modeset_unlock_all(crtc->dev); 1512 1513 return 0; 1514 } 1515 1516 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); 1517 1518 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1519 { 1520 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1521 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1522 1523 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1524 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1525 seq_printf(s, "core_clk_rate: %llu\n", 1526 dpu_crtc->cur_perf.core_clk_rate); 1527 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); 1528 seq_printf(s, "max_per_pipe_ib: %llu\n", 1529 dpu_crtc->cur_perf.max_per_pipe_ib); 1530 1531 return 0; 1532 } 1533 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); 1534 1535 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1536 { 1537 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1538 struct dentry *debugfs_root; 1539 1540 debugfs_root = debugfs_create_dir(dpu_crtc->name, 1541 crtc->dev->primary->debugfs_root); 1542 1543 debugfs_create_file("status", 0400, 1544 debugfs_root, 1545 dpu_crtc, &_dpu_debugfs_status_fops); 1546 debugfs_create_file("state", 0600, 1547 debugfs_root, 1548 &dpu_crtc->base, 1549 &dpu_crtc_debugfs_state_fops); 1550 1551 return 0; 1552 } 1553 #else 1554 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1555 { 1556 return 0; 1557 } 1558 #endif /* CONFIG_DEBUG_FS */ 1559 1560 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1561 { 1562 return _dpu_crtc_init_debugfs(crtc); 1563 } 1564 1565 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1566 .set_config = drm_atomic_helper_set_config, 1567 .destroy = dpu_crtc_destroy, 1568 .page_flip = drm_atomic_helper_page_flip, 1569 .reset = dpu_crtc_reset, 1570 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1571 .atomic_destroy_state = dpu_crtc_destroy_state, 1572 .atomic_print_state = dpu_crtc_atomic_print_state, 1573 .late_register = dpu_crtc_late_register, 1574 .verify_crc_source = dpu_crtc_verify_crc_source, 1575 .set_crc_source = dpu_crtc_set_crc_source, 1576 .enable_vblank = msm_crtc_enable_vblank, 1577 .disable_vblank = msm_crtc_disable_vblank, 1578 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 1579 .get_vblank_counter = dpu_crtc_get_vblank_counter, 1580 }; 1581 1582 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1583 .atomic_disable = dpu_crtc_disable, 1584 .atomic_enable = dpu_crtc_enable, 1585 .atomic_check = dpu_crtc_atomic_check, 1586 .atomic_begin = dpu_crtc_atomic_begin, 1587 .atomic_flush = dpu_crtc_atomic_flush, 1588 .get_scanout_position = dpu_crtc_get_scanout_position, 1589 }; 1590 1591 /* initialize crtc */ 1592 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1593 struct drm_plane *cursor) 1594 { 1595 struct drm_crtc *crtc = NULL; 1596 struct dpu_crtc *dpu_crtc = NULL; 1597 int i; 1598 1599 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1600 if (!dpu_crtc) 1601 return ERR_PTR(-ENOMEM); 1602 1603 crtc = &dpu_crtc->base; 1604 crtc->dev = dev; 1605 1606 spin_lock_init(&dpu_crtc->spin_lock); 1607 atomic_set(&dpu_crtc->frame_pending, 0); 1608 1609 init_completion(&dpu_crtc->frame_done_comp); 1610 1611 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1612 1613 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1614 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1615 list_add(&dpu_crtc->frame_events[i].list, 1616 &dpu_crtc->frame_event_list); 1617 kthread_init_work(&dpu_crtc->frame_events[i].work, 1618 dpu_crtc_frame_event_work); 1619 } 1620 1621 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, 1622 NULL); 1623 1624 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1625 1626 drm_crtc_enable_color_mgmt(crtc, 0, true, 0); 1627 1628 /* save user friendly CRTC name for later */ 1629 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1630 1631 /* initialize event handling */ 1632 spin_lock_init(&dpu_crtc->event_lock); 1633 1634 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name); 1635 return crtc; 1636 } 1637