1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 5 * Copyright (C) 2013 Red Hat 6 * Author: Rob Clark <robdclark@gmail.com> 7 */ 8 9 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 10 #include <linux/sort.h> 11 #include <linux/debugfs.h> 12 #include <linux/ktime.h> 13 #include <linux/bits.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_blend.h> 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_flip_work.h> 19 #include <drm/drm_framebuffer.h> 20 #include <drm/drm_mode.h> 21 #include <drm/drm_probe_helper.h> 22 #include <drm/drm_rect.h> 23 #include <drm/drm_vblank.h> 24 #include <drm/drm_self_refresh_helper.h> 25 26 #include "dpu_kms.h" 27 #include "dpu_hw_lm.h" 28 #include "dpu_hw_ctl.h" 29 #include "dpu_hw_dspp.h" 30 #include "dpu_crtc.h" 31 #include "dpu_plane.h" 32 #include "dpu_encoder.h" 33 #include "dpu_vbif.h" 34 #include "dpu_core_perf.h" 35 #include "dpu_trace.h" 36 37 /* layer mixer index on dpu_crtc */ 38 #define LEFT_MIXER 0 39 #define RIGHT_MIXER 1 40 41 /* timeout in ms waiting for frame done */ 42 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 43 44 #define CONVERT_S3_15(val) \ 45 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) 46 47 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 48 { 49 struct msm_drm_private *priv = crtc->dev->dev_private; 50 51 return to_dpu_kms(priv->kms); 52 } 53 54 static void dpu_crtc_destroy(struct drm_crtc *crtc) 55 { 56 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 57 58 if (!crtc) 59 return; 60 61 drm_crtc_cleanup(crtc); 62 kfree(dpu_crtc); 63 } 64 65 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 66 { 67 struct drm_device *dev = crtc->dev; 68 struct drm_encoder *encoder; 69 70 drm_for_each_encoder(encoder, dev) 71 if (encoder->crtc == crtc) 72 return encoder; 73 74 return NULL; 75 } 76 77 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) 78 { 79 if (!src_name || 80 !strcmp(src_name, "none")) 81 return DPU_CRTC_CRC_SOURCE_NONE; 82 if (!strcmp(src_name, "auto") || 83 !strcmp(src_name, "lm")) 84 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; 85 if (!strcmp(src_name, "encoder")) 86 return DPU_CRTC_CRC_SOURCE_ENCODER; 87 88 return DPU_CRTC_CRC_SOURCE_INVALID; 89 } 90 91 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, 92 const char *src_name, size_t *values_cnt) 93 { 94 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 95 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 96 97 if (source < 0) { 98 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); 99 return -EINVAL; 100 } 101 102 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) { 103 *values_cnt = crtc_state->num_mixers; 104 } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) { 105 struct drm_encoder *drm_enc; 106 107 *values_cnt = 0; 108 109 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 110 *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc); 111 } 112 113 return 0; 114 } 115 116 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) 117 { 118 struct dpu_crtc_mixer *m; 119 int i; 120 121 for (i = 0; i < crtc_state->num_mixers; ++i) { 122 m = &crtc_state->mixers[i]; 123 124 if (!m->hw_lm || !m->hw_lm->ops.setup_misr) 125 continue; 126 127 /* Calculate MISR over 1 frame */ 128 m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); 129 } 130 } 131 132 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc) 133 { 134 struct drm_encoder *drm_enc; 135 136 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) 137 dpu_encoder_setup_misr(drm_enc); 138 } 139 140 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) 141 { 142 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); 143 enum dpu_crtc_crc_source current_source; 144 struct dpu_crtc_state *crtc_state; 145 struct drm_device *drm_dev = crtc->dev; 146 147 bool was_enabled; 148 bool enable = false; 149 int ret = 0; 150 151 if (source < 0) { 152 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); 153 return -EINVAL; 154 } 155 156 ret = drm_modeset_lock(&crtc->mutex, NULL); 157 158 if (ret) 159 return ret; 160 161 enable = (source != DPU_CRTC_CRC_SOURCE_NONE); 162 crtc_state = to_dpu_crtc_state(crtc->state); 163 164 spin_lock_irq(&drm_dev->event_lock); 165 current_source = crtc_state->crc_source; 166 spin_unlock_irq(&drm_dev->event_lock); 167 168 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); 169 170 if (!was_enabled && enable) { 171 ret = drm_crtc_vblank_get(crtc); 172 173 if (ret) 174 goto cleanup; 175 176 } else if (was_enabled && !enable) { 177 drm_crtc_vblank_put(crtc); 178 } 179 180 spin_lock_irq(&drm_dev->event_lock); 181 crtc_state->crc_source = source; 182 spin_unlock_irq(&drm_dev->event_lock); 183 184 crtc_state->crc_frame_skip_count = 0; 185 186 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 187 dpu_crtc_setup_lm_misr(crtc_state); 188 else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) 189 dpu_crtc_setup_encoder_misr(crtc); 190 else 191 ret = -EINVAL; 192 193 cleanup: 194 drm_modeset_unlock(&crtc->mutex); 195 196 return ret; 197 } 198 199 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) 200 { 201 struct drm_encoder *encoder = get_encoder_from_crtc(crtc); 202 if (!encoder) { 203 DRM_ERROR("no encoder found for crtc %d\n", crtc->index); 204 return 0; 205 } 206 207 return dpu_encoder_get_vsync_count(encoder); 208 } 209 210 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, 211 struct dpu_crtc_state *crtc_state) 212 { 213 struct dpu_crtc_mixer *m; 214 u32 crcs[CRTC_DUAL_MIXERS]; 215 216 int rc = 0; 217 int i; 218 219 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); 220 221 for (i = 0; i < crtc_state->num_mixers; ++i) { 222 223 m = &crtc_state->mixers[i]; 224 225 if (!m->hw_lm || !m->hw_lm->ops.collect_misr) 226 continue; 227 228 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); 229 230 if (rc) { 231 if (rc != -ENODATA) 232 DRM_DEBUG_DRIVER("MISR read failed\n"); 233 return rc; 234 } 235 } 236 237 return drm_crtc_add_crc_entry(crtc, true, 238 drm_crtc_accurate_vblank_count(crtc), crcs); 239 } 240 241 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc) 242 { 243 struct drm_encoder *drm_enc; 244 int rc, pos = 0; 245 u32 crcs[INTF_MAX]; 246 247 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) { 248 rc = dpu_encoder_get_crc(drm_enc, crcs, pos); 249 if (rc < 0) { 250 if (rc != -ENODATA) 251 DRM_DEBUG_DRIVER("MISR read failed\n"); 252 253 return rc; 254 } 255 256 pos += rc; 257 } 258 259 return drm_crtc_add_crc_entry(crtc, true, 260 drm_crtc_accurate_vblank_count(crtc), crcs); 261 } 262 263 static int dpu_crtc_get_crc(struct drm_crtc *crtc) 264 { 265 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); 266 267 /* Skip first 2 frames in case of "uncooked" CRCs */ 268 if (crtc_state->crc_frame_skip_count < 2) { 269 crtc_state->crc_frame_skip_count++; 270 return 0; 271 } 272 273 if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) 274 return dpu_crtc_get_lm_crc(crtc, crtc_state); 275 else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER) 276 return dpu_crtc_get_encoder_crc(crtc); 277 278 return -EINVAL; 279 } 280 281 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, 282 bool in_vblank_irq, 283 int *vpos, int *hpos, 284 ktime_t *stime, ktime_t *etime, 285 const struct drm_display_mode *mode) 286 { 287 unsigned int pipe = crtc->index; 288 struct drm_encoder *encoder; 289 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 290 291 encoder = get_encoder_from_crtc(crtc); 292 if (!encoder) { 293 DRM_ERROR("no encoder found for crtc %d\n", pipe); 294 return false; 295 } 296 297 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 298 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 299 300 /* 301 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 302 * the end of VFP. Translate the porch values relative to the line 303 * counter positions. 304 */ 305 306 vactive_start = vsw + vbp + 1; 307 vactive_end = vactive_start + mode->crtc_vdisplay; 308 309 /* last scan line before VSYNC */ 310 vfp_end = mode->crtc_vtotal; 311 312 if (stime) 313 *stime = ktime_get(); 314 315 line = dpu_encoder_get_linecount(encoder); 316 317 if (line < vactive_start) 318 line -= vactive_start; 319 else if (line > vactive_end) 320 line = line - vfp_end - vactive_start; 321 else 322 line -= vactive_start; 323 324 *vpos = line; 325 *hpos = 0; 326 327 if (etime) 328 *etime = ktime_get(); 329 330 return true; 331 } 332 333 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 334 struct dpu_plane_state *pstate, struct dpu_format *format) 335 { 336 struct dpu_hw_mixer *lm = mixer->hw_lm; 337 uint32_t blend_op; 338 uint32_t fg_alpha, bg_alpha; 339 340 fg_alpha = pstate->base.alpha >> 8; 341 bg_alpha = 0xff - fg_alpha; 342 343 /* default to opaque blending */ 344 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || 345 !format->alpha_enable) { 346 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 347 DPU_BLEND_BG_ALPHA_BG_CONST; 348 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 349 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 350 DPU_BLEND_BG_ALPHA_FG_PIXEL; 351 if (fg_alpha != 0xff) { 352 bg_alpha = fg_alpha; 353 blend_op |= DPU_BLEND_BG_MOD_ALPHA | 354 DPU_BLEND_BG_INV_MOD_ALPHA; 355 } else { 356 blend_op |= DPU_BLEND_BG_INV_ALPHA; 357 } 358 } else { 359 /* coverage blending */ 360 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 361 DPU_BLEND_BG_ALPHA_FG_PIXEL; 362 if (fg_alpha != 0xff) { 363 bg_alpha = fg_alpha; 364 blend_op |= DPU_BLEND_FG_MOD_ALPHA | 365 DPU_BLEND_FG_INV_MOD_ALPHA | 366 DPU_BLEND_BG_MOD_ALPHA | 367 DPU_BLEND_BG_INV_MOD_ALPHA; 368 } else { 369 blend_op |= DPU_BLEND_BG_INV_ALPHA; 370 } 371 } 372 373 lm->ops.setup_blend_config(lm, pstate->stage, 374 fg_alpha, bg_alpha, blend_op); 375 376 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", 377 &format->base.pixel_format, format->alpha_enable, blend_op); 378 } 379 380 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 381 { 382 struct dpu_crtc_state *crtc_state; 383 int lm_idx, lm_horiz_position; 384 385 crtc_state = to_dpu_crtc_state(crtc->state); 386 387 lm_horiz_position = 0; 388 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 389 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 390 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 391 struct dpu_hw_mixer_cfg cfg; 392 393 if (!lm_roi || !drm_rect_visible(lm_roi)) 394 continue; 395 396 cfg.out_width = drm_rect_width(lm_roi); 397 cfg.out_height = drm_rect_height(lm_roi); 398 cfg.right_mixer = lm_horiz_position++; 399 cfg.flags = 0; 400 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 401 } 402 } 403 404 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 405 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer, 406 struct dpu_hw_stage_cfg *stage_cfg) 407 { 408 struct drm_plane *plane; 409 struct drm_framebuffer *fb; 410 struct drm_plane_state *state; 411 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 412 struct dpu_plane_state *pstate = NULL; 413 struct dpu_format *format; 414 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 415 416 uint32_t stage_idx, lm_idx; 417 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; 418 bool bg_alpha_enable = false; 419 DECLARE_BITMAP(fetch_active, SSPP_MAX); 420 421 memset(fetch_active, 0, sizeof(fetch_active)); 422 drm_atomic_crtc_for_each_plane(plane, crtc) { 423 enum dpu_sspp sspp_idx; 424 425 state = plane->state; 426 if (!state) 427 continue; 428 429 if (!state->visible) 430 continue; 431 432 pstate = to_dpu_plane_state(state); 433 fb = state->fb; 434 435 sspp_idx = dpu_plane_pipe(plane); 436 set_bit(sspp_idx, fetch_active); 437 438 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n", 439 crtc->base.id, 440 pstate->stage, 441 plane->base.id, 442 sspp_idx - SSPP_VIG0, 443 state->fb ? state->fb->base.id : -1); 444 445 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); 446 447 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 448 bg_alpha_enable = true; 449 450 stage_idx = zpos_cnt[pstate->stage]++; 451 stage_cfg->stage[pstate->stage][stage_idx] = 452 sspp_idx; 453 stage_cfg->multirect_index[pstate->stage][stage_idx] = 454 pstate->multirect_index; 455 456 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 457 state, pstate, stage_idx, 458 sspp_idx - SSPP_VIG0, 459 format->base.pixel_format, 460 fb ? fb->modifier : 0); 461 462 /* blend config update */ 463 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 464 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, 465 pstate, format); 466 467 mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, 468 sspp_idx); 469 470 if (bg_alpha_enable && !format->alpha_enable) 471 mixer[lm_idx].mixer_op_mode = 0; 472 else 473 mixer[lm_idx].mixer_op_mode |= 474 1 << pstate->stage; 475 } 476 } 477 478 if (ctl->ops.set_active_pipes) 479 ctl->ops.set_active_pipes(ctl, fetch_active); 480 481 _dpu_crtc_program_lm_output_roi(crtc); 482 } 483 484 /** 485 * _dpu_crtc_blend_setup - configure crtc mixers 486 * @crtc: Pointer to drm crtc structure 487 */ 488 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 489 { 490 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 491 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 492 struct dpu_crtc_mixer *mixer = cstate->mixers; 493 struct dpu_hw_ctl *ctl; 494 struct dpu_hw_mixer *lm; 495 struct dpu_hw_stage_cfg stage_cfg; 496 int i; 497 498 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); 499 500 for (i = 0; i < cstate->num_mixers; i++) { 501 mixer[i].mixer_op_mode = 0; 502 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 503 mixer[i].lm_ctl->ops.clear_all_blendstages( 504 mixer[i].lm_ctl); 505 } 506 507 /* initialize stage cfg */ 508 memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 509 510 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); 511 512 for (i = 0; i < cstate->num_mixers; i++) { 513 ctl = mixer[i].lm_ctl; 514 lm = mixer[i].hw_lm; 515 516 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 517 518 /* stage config flush mask */ 519 ctl->ops.update_pending_flush_mixer(ctl, 520 mixer[i].hw_lm->idx); 521 522 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", 523 mixer[i].hw_lm->idx - LM_0, 524 mixer[i].mixer_op_mode, 525 ctl->idx - CTL_0); 526 527 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 528 &stage_cfg); 529 } 530 } 531 532 /** 533 * _dpu_crtc_complete_flip - signal pending page_flip events 534 * Any pending vblank events are added to the vblank_event_list 535 * so that the next vblank interrupt shall signal them. 536 * However PAGE_FLIP events are not handled through the vblank_event_list. 537 * This API signals any pending PAGE_FLIP events requested through 538 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 539 * @crtc: Pointer to drm crtc structure 540 */ 541 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 542 { 543 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 544 struct drm_device *dev = crtc->dev; 545 unsigned long flags; 546 547 spin_lock_irqsave(&dev->event_lock, flags); 548 if (dpu_crtc->event) { 549 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, 550 dpu_crtc->event); 551 trace_dpu_crtc_complete_flip(DRMID(crtc)); 552 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 553 dpu_crtc->event = NULL; 554 } 555 spin_unlock_irqrestore(&dev->event_lock, flags); 556 } 557 558 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 559 { 560 struct drm_encoder *encoder; 561 562 /* 563 * TODO: This function is called from dpu debugfs and as part of atomic 564 * check. When called from debugfs, the crtc->mutex must be held to 565 * read crtc->state. However reading crtc->state from atomic check isn't 566 * allowed (unless you have a good reason, a big comment, and a deep 567 * understanding of how the atomic/modeset locks work (<- and this is 568 * probably not possible)). So we'll keep the WARN_ON here for now, but 569 * really we need to figure out a better way to track our operating mode 570 */ 571 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 572 573 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 574 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 575 return dpu_encoder_get_intf_mode(encoder); 576 577 return INTF_MODE_NONE; 578 } 579 580 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 581 { 582 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 583 584 /* keep statistics on vblank callback - with auto reset via debugfs */ 585 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 586 dpu_crtc->vblank_cb_time = ktime_get(); 587 else 588 dpu_crtc->vblank_cb_count++; 589 590 dpu_crtc_get_crc(crtc); 591 592 drm_crtc_handle_vblank(crtc); 593 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 594 } 595 596 static void dpu_crtc_frame_event_work(struct kthread_work *work) 597 { 598 struct dpu_crtc_frame_event *fevent = container_of(work, 599 struct dpu_crtc_frame_event, work); 600 struct drm_crtc *crtc = fevent->crtc; 601 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 602 unsigned long flags; 603 bool frame_done = false; 604 605 DPU_ATRACE_BEGIN("crtc_frame_event"); 606 607 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 608 ktime_to_ns(fevent->ts)); 609 610 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 611 | DPU_ENCODER_FRAME_EVENT_ERROR 612 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 613 614 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 615 /* ignore vblank when not pending */ 616 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 617 /* release bandwidth and other resources */ 618 trace_dpu_crtc_frame_event_done(DRMID(crtc), 619 fevent->event); 620 dpu_core_perf_crtc_release_bw(crtc); 621 } else { 622 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 623 fevent->event); 624 } 625 626 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 627 | DPU_ENCODER_FRAME_EVENT_ERROR)) 628 frame_done = true; 629 } 630 631 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 632 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 633 crtc->base.id, ktime_to_ns(fevent->ts)); 634 635 if (frame_done) 636 complete_all(&dpu_crtc->frame_done_comp); 637 638 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 639 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 640 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 641 DPU_ATRACE_END("crtc_frame_event"); 642 } 643 644 /* 645 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module 646 * registers this API to encoder for all frame event callbacks like 647 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events 648 * from different context - IRQ, user thread, commit_thread, etc. Each event 649 * should be carefully reviewed and should be processed in proper task context 650 * to avoid schedulin delay or properly manage the irq context's bottom half 651 * processing. 652 */ 653 static void dpu_crtc_frame_event_cb(void *data, u32 event) 654 { 655 struct drm_crtc *crtc = (struct drm_crtc *)data; 656 struct dpu_crtc *dpu_crtc; 657 struct msm_drm_private *priv; 658 struct dpu_crtc_frame_event *fevent; 659 unsigned long flags; 660 u32 crtc_id; 661 662 /* Nothing to do on idle event */ 663 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 664 return; 665 666 dpu_crtc = to_dpu_crtc(crtc); 667 priv = crtc->dev->dev_private; 668 crtc_id = drm_crtc_index(crtc); 669 670 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 671 672 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 673 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 674 struct dpu_crtc_frame_event, list); 675 if (fevent) 676 list_del_init(&fevent->list); 677 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 678 679 if (!fevent) { 680 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); 681 return; 682 } 683 684 fevent->event = event; 685 fevent->crtc = crtc; 686 fevent->ts = ktime_get(); 687 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); 688 } 689 690 void dpu_crtc_complete_commit(struct drm_crtc *crtc) 691 { 692 trace_dpu_crtc_complete_commit(DRMID(crtc)); 693 dpu_core_perf_crtc_update(crtc, 0, false); 694 _dpu_crtc_complete_flip(crtc); 695 } 696 697 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, 698 struct drm_crtc_state *state) 699 { 700 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 701 struct drm_display_mode *adj_mode = &state->adjusted_mode; 702 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 703 int i; 704 705 for (i = 0; i < cstate->num_mixers; i++) { 706 struct drm_rect *r = &cstate->lm_bounds[i]; 707 r->x1 = crtc_split_width * i; 708 r->y1 = 0; 709 r->x2 = r->x1 + crtc_split_width; 710 r->y2 = adj_mode->vdisplay; 711 712 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 713 } 714 } 715 716 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, 717 struct dpu_hw_pcc_cfg *cfg) 718 { 719 struct drm_color_ctm *ctm; 720 721 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); 722 723 ctm = (struct drm_color_ctm *)state->ctm->data; 724 725 if (!ctm) 726 return; 727 728 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); 729 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); 730 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); 731 732 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); 733 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); 734 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); 735 736 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); 737 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); 738 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); 739 } 740 741 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) 742 { 743 struct drm_crtc_state *state = crtc->state; 744 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 745 struct dpu_crtc_mixer *mixer = cstate->mixers; 746 struct dpu_hw_pcc_cfg cfg; 747 struct dpu_hw_ctl *ctl; 748 struct dpu_hw_dspp *dspp; 749 int i; 750 751 752 if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state)) 753 return; 754 755 for (i = 0; i < cstate->num_mixers; i++) { 756 ctl = mixer[i].lm_ctl; 757 dspp = mixer[i].hw_dspp; 758 759 if (!dspp || !dspp->ops.setup_pcc) 760 continue; 761 762 if (!state->ctm) { 763 dspp->ops.setup_pcc(dspp, NULL); 764 } else { 765 _dpu_crtc_get_pcc_coeff(state, &cfg); 766 dspp->ops.setup_pcc(dspp, &cfg); 767 } 768 769 /* stage config flush mask */ 770 ctl->ops.update_pending_flush_dspp(ctl, 771 mixer[i].hw_dspp->idx); 772 } 773 } 774 775 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 776 struct drm_atomic_state *state) 777 { 778 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 779 struct drm_encoder *encoder; 780 781 if (!crtc->state->enable) { 782 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n", 783 crtc->base.id, crtc->state->enable); 784 return; 785 } 786 787 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 788 789 _dpu_crtc_setup_lm_bounds(crtc, crtc->state); 790 791 /* encoder will trigger pending mask now */ 792 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 793 dpu_encoder_trigger_kickoff_pending(encoder); 794 795 /* 796 * If no mixers have been allocated in dpu_crtc_atomic_check(), 797 * it means we are trying to flush a CRTC whose state is disabled: 798 * nothing else needs to be done. 799 */ 800 if (unlikely(!cstate->num_mixers)) 801 return; 802 803 _dpu_crtc_blend_setup(crtc); 804 805 _dpu_crtc_setup_cp_blocks(crtc); 806 807 /* 808 * PP_DONE irq is only used by command mode for now. 809 * It is better to request pending before FLUSH and START trigger 810 * to make sure no pp_done irq missed. 811 * This is safe because no pp_done will happen before SW trigger 812 * in command mode. 813 */ 814 } 815 816 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 817 struct drm_atomic_state *state) 818 { 819 struct dpu_crtc *dpu_crtc; 820 struct drm_device *dev; 821 struct drm_plane *plane; 822 struct msm_drm_private *priv; 823 unsigned long flags; 824 struct dpu_crtc_state *cstate; 825 826 if (!crtc->state->enable) { 827 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n", 828 crtc->base.id, crtc->state->enable); 829 return; 830 } 831 832 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 833 834 dpu_crtc = to_dpu_crtc(crtc); 835 cstate = to_dpu_crtc_state(crtc->state); 836 dev = crtc->dev; 837 priv = dev->dev_private; 838 839 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { 840 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 841 return; 842 } 843 844 WARN_ON(dpu_crtc->event); 845 spin_lock_irqsave(&dev->event_lock, flags); 846 dpu_crtc->event = crtc->state->event; 847 crtc->state->event = NULL; 848 spin_unlock_irqrestore(&dev->event_lock, flags); 849 850 /* 851 * If no mixers has been allocated in dpu_crtc_atomic_check(), 852 * it means we are trying to flush a CRTC whose state is disabled: 853 * nothing else needs to be done. 854 */ 855 if (unlikely(!cstate->num_mixers)) 856 return; 857 858 /* update performance setting before crtc kickoff */ 859 dpu_core_perf_crtc_update(crtc, 1, false); 860 861 /* 862 * Final plane updates: Give each plane a chance to complete all 863 * required writes/flushing before crtc's "flush 864 * everything" call below. 865 */ 866 drm_atomic_crtc_for_each_plane(plane, crtc) { 867 if (dpu_crtc->smmu_state.transition_error) 868 dpu_plane_set_error(plane, true); 869 dpu_plane_flush(plane); 870 } 871 872 /* Kickoff will be scheduled by outer layer */ 873 } 874 875 /** 876 * dpu_crtc_destroy_state - state destroy hook 877 * @crtc: drm CRTC 878 * @state: CRTC state object to release 879 */ 880 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 881 struct drm_crtc_state *state) 882 { 883 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 884 885 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); 886 887 __drm_atomic_helper_crtc_destroy_state(state); 888 889 kfree(cstate); 890 } 891 892 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 893 { 894 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 895 int ret, rc = 0; 896 897 if (!atomic_read(&dpu_crtc->frame_pending)) { 898 DRM_DEBUG_ATOMIC("no frames pending\n"); 899 return 0; 900 } 901 902 DPU_ATRACE_BEGIN("frame done completion wait"); 903 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 904 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 905 if (!ret) { 906 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 907 rc = -ETIMEDOUT; 908 } 909 DPU_ATRACE_END("frame done completion wait"); 910 911 return rc; 912 } 913 914 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) 915 { 916 struct drm_encoder *encoder; 917 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 918 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 919 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 920 921 /* 922 * If no mixers has been allocated in dpu_crtc_atomic_check(), 923 * it means we are trying to start a CRTC whose state is disabled: 924 * nothing else needs to be done. 925 */ 926 if (unlikely(!cstate->num_mixers)) 927 return; 928 929 DPU_ATRACE_BEGIN("crtc_commit"); 930 931 drm_for_each_encoder_mask(encoder, crtc->dev, 932 crtc->state->encoder_mask) { 933 if (!dpu_encoder_is_valid_for_commit(encoder)) { 934 DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n"); 935 goto end; 936 } 937 } 938 /* 939 * Encoder will flush/start now, unless it has a tx pending. If so, it 940 * may delay and flush at an irq event (e.g. ppdone) 941 */ 942 drm_for_each_encoder_mask(encoder, crtc->dev, 943 crtc->state->encoder_mask) 944 dpu_encoder_prepare_for_kickoff(encoder); 945 946 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 947 /* acquire bandwidth and other resources */ 948 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id); 949 } else 950 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id); 951 952 dpu_crtc->play_count++; 953 954 dpu_vbif_clear_errors(dpu_kms); 955 956 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 957 dpu_encoder_kickoff(encoder); 958 959 reinit_completion(&dpu_crtc->frame_done_comp); 960 961 end: 962 DPU_ATRACE_END("crtc_commit"); 963 } 964 965 static void dpu_crtc_reset(struct drm_crtc *crtc) 966 { 967 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 968 969 if (crtc->state) 970 dpu_crtc_destroy_state(crtc, crtc->state); 971 972 if (cstate) 973 __drm_atomic_helper_crtc_reset(crtc, &cstate->base); 974 else 975 __drm_atomic_helper_crtc_reset(crtc, NULL); 976 } 977 978 /** 979 * dpu_crtc_duplicate_state - state duplicate hook 980 * @crtc: Pointer to drm crtc structure 981 */ 982 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 983 { 984 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); 985 986 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 987 if (!cstate) { 988 DPU_ERROR("failed to allocate state\n"); 989 return NULL; 990 } 991 992 /* duplicate base helper */ 993 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 994 995 return &cstate->base; 996 } 997 998 static void dpu_crtc_atomic_print_state(struct drm_printer *p, 999 const struct drm_crtc_state *state) 1000 { 1001 const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 1002 int i; 1003 1004 for (i = 0; i < cstate->num_mixers; i++) { 1005 drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0); 1006 drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); 1007 if (cstate->mixers[i].hw_dspp) 1008 drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0); 1009 } 1010 } 1011 1012 static void dpu_crtc_disable(struct drm_crtc *crtc, 1013 struct drm_atomic_state *state) 1014 { 1015 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, 1016 crtc); 1017 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1018 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 1019 struct drm_encoder *encoder; 1020 unsigned long flags; 1021 bool release_bandwidth = false; 1022 1023 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1024 1025 /* If disable is triggered while in self refresh mode, 1026 * reset the encoder software state so that in enable 1027 * it won't trigger a warn while assigning crtc. 1028 */ 1029 if (old_crtc_state->self_refresh_active) { 1030 drm_for_each_encoder_mask(encoder, crtc->dev, 1031 old_crtc_state->encoder_mask) { 1032 dpu_encoder_assign_crtc(encoder, NULL); 1033 } 1034 return; 1035 } 1036 1037 /* Disable/save vblank irq handling */ 1038 drm_crtc_vblank_off(crtc); 1039 1040 drm_for_each_encoder_mask(encoder, crtc->dev, 1041 old_crtc_state->encoder_mask) { 1042 /* in video mode, we hold an extra bandwidth reference 1043 * as we cannot drop bandwidth at frame-done if any 1044 * crtc is being used in video mode. 1045 */ 1046 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1047 release_bandwidth = true; 1048 1049 /* 1050 * If disable is triggered during psr active(e.g: screen dim in PSR), 1051 * we will need encoder->crtc connection to process the device sleep & 1052 * preserve it during psr sequence. 1053 */ 1054 if (!crtc->state->self_refresh_active) 1055 dpu_encoder_assign_crtc(encoder, NULL); 1056 } 1057 1058 /* wait for frame_event_done completion */ 1059 if (_dpu_crtc_wait_for_frame_done(crtc)) 1060 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 1061 crtc->base.id, 1062 atomic_read(&dpu_crtc->frame_pending)); 1063 1064 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 1065 dpu_crtc->enabled = false; 1066 1067 if (atomic_read(&dpu_crtc->frame_pending)) { 1068 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 1069 atomic_read(&dpu_crtc->frame_pending)); 1070 if (release_bandwidth) 1071 dpu_core_perf_crtc_release_bw(crtc); 1072 atomic_set(&dpu_crtc->frame_pending, 0); 1073 } 1074 1075 dpu_core_perf_crtc_update(crtc, 0, true); 1076 1077 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1078 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); 1079 1080 memset(cstate->mixers, 0, sizeof(cstate->mixers)); 1081 cstate->num_mixers = 0; 1082 1083 /* disable clk & bw control until clk & bw properties are set */ 1084 cstate->bw_control = false; 1085 cstate->bw_split_vote = false; 1086 1087 if (crtc->state->event && !crtc->state->active) { 1088 spin_lock_irqsave(&crtc->dev->event_lock, flags); 1089 drm_crtc_send_vblank_event(crtc, crtc->state->event); 1090 crtc->state->event = NULL; 1091 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 1092 } 1093 1094 pm_runtime_put_sync(crtc->dev->dev); 1095 } 1096 1097 static void dpu_crtc_enable(struct drm_crtc *crtc, 1098 struct drm_atomic_state *state) 1099 { 1100 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1101 struct drm_encoder *encoder; 1102 bool request_bandwidth = false; 1103 struct drm_crtc_state *old_crtc_state; 1104 1105 old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); 1106 1107 pm_runtime_get_sync(crtc->dev->dev); 1108 1109 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1110 1111 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { 1112 /* in video mode, we hold an extra bandwidth reference 1113 * as we cannot drop bandwidth at frame-done if any 1114 * crtc is being used in video mode. 1115 */ 1116 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 1117 request_bandwidth = true; 1118 dpu_encoder_register_frame_event_callback(encoder, 1119 dpu_crtc_frame_event_cb, (void *)crtc); 1120 } 1121 1122 if (request_bandwidth) 1123 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1124 1125 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 1126 dpu_crtc->enabled = true; 1127 1128 if (!old_crtc_state->self_refresh_active) { 1129 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 1130 dpu_encoder_assign_crtc(encoder, crtc); 1131 } 1132 1133 /* Enable/restore vblank irq handling */ 1134 drm_crtc_vblank_on(crtc); 1135 } 1136 1137 struct plane_state { 1138 struct dpu_plane_state *dpu_pstate; 1139 const struct drm_plane_state *drm_pstate; 1140 int stage; 1141 u32 pipe_id; 1142 }; 1143 1144 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) 1145 { 1146 struct drm_crtc *crtc = cstate->crtc; 1147 struct drm_encoder *encoder; 1148 1149 drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) { 1150 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) { 1151 return true; 1152 } 1153 } 1154 1155 return false; 1156 } 1157 1158 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 1159 struct drm_atomic_state *state) 1160 { 1161 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 1162 crtc); 1163 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1164 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); 1165 struct plane_state *pstates; 1166 1167 const struct drm_plane_state *pstate; 1168 struct drm_plane *plane; 1169 struct drm_display_mode *mode; 1170 1171 int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; 1172 1173 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; 1174 int multirect_count = 0; 1175 const struct drm_plane_state *pipe_staged[SSPP_MAX]; 1176 int left_zpos_cnt = 0, right_zpos_cnt = 0; 1177 struct drm_rect crtc_rect = { 0 }; 1178 bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); 1179 1180 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); 1181 if (!pstates) 1182 return -ENOMEM; 1183 1184 if (!crtc_state->enable || !crtc_state->active) { 1185 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n", 1186 crtc->base.id, crtc_state->enable, 1187 crtc_state->active); 1188 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf)); 1189 goto end; 1190 } 1191 1192 mode = &crtc_state->adjusted_mode; 1193 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); 1194 1195 /* force a full mode set if active state changed */ 1196 if (crtc_state->active_changed) 1197 crtc_state->mode_changed = true; 1198 1199 memset(pipe_staged, 0, sizeof(pipe_staged)); 1200 1201 if (cstate->num_mixers) { 1202 mixer_width = mode->hdisplay / cstate->num_mixers; 1203 1204 _dpu_crtc_setup_lm_bounds(crtc, crtc_state); 1205 } 1206 1207 crtc_rect.x2 = mode->hdisplay; 1208 crtc_rect.y2 = mode->vdisplay; 1209 1210 /* get plane state for all drm planes associated with crtc state */ 1211 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 1212 struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate); 1213 struct drm_rect dst, clip = crtc_rect; 1214 1215 if (IS_ERR_OR_NULL(pstate)) { 1216 rc = PTR_ERR(pstate); 1217 DPU_ERROR("%s: failed to get plane%d state, %d\n", 1218 dpu_crtc->name, plane->base.id, rc); 1219 goto end; 1220 } 1221 if (cnt >= DPU_STAGE_MAX * 4) 1222 continue; 1223 1224 if (!pstate->visible) 1225 continue; 1226 1227 pstates[cnt].dpu_pstate = dpu_pstate; 1228 pstates[cnt].drm_pstate = pstate; 1229 pstates[cnt].stage = pstate->normalized_zpos; 1230 pstates[cnt].pipe_id = dpu_plane_pipe(plane); 1231 1232 dpu_pstate->needs_dirtyfb = needs_dirtyfb; 1233 1234 if (pipe_staged[pstates[cnt].pipe_id]) { 1235 multirect_plane[multirect_count].r0 = 1236 pipe_staged[pstates[cnt].pipe_id]; 1237 multirect_plane[multirect_count].r1 = pstate; 1238 multirect_count++; 1239 1240 pipe_staged[pstates[cnt].pipe_id] = NULL; 1241 } else { 1242 pipe_staged[pstates[cnt].pipe_id] = pstate; 1243 } 1244 1245 cnt++; 1246 1247 dst = drm_plane_state_dest(pstate); 1248 if (!drm_rect_intersect(&clip, &dst)) { 1249 DPU_ERROR("invalid vertical/horizontal destination\n"); 1250 DPU_ERROR("display: " DRM_RECT_FMT " plane: " 1251 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), 1252 DRM_RECT_ARG(&dst)); 1253 rc = -E2BIG; 1254 goto end; 1255 } 1256 } 1257 1258 for (i = 1; i < SSPP_MAX; i++) { 1259 if (pipe_staged[i]) 1260 dpu_plane_clear_multirect(pipe_staged[i]); 1261 } 1262 1263 z_pos = -1; 1264 for (i = 0; i < cnt; i++) { 1265 /* reset counts at every new blend stage */ 1266 if (pstates[i].stage != z_pos) { 1267 left_zpos_cnt = 0; 1268 right_zpos_cnt = 0; 1269 z_pos = pstates[i].stage; 1270 } 1271 1272 /* verify z_pos setting before using it */ 1273 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) { 1274 DPU_ERROR("> %d plane stages assigned\n", 1275 DPU_STAGE_MAX - DPU_STAGE_0); 1276 rc = -EINVAL; 1277 goto end; 1278 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { 1279 if (left_zpos_cnt == 2) { 1280 DPU_ERROR("> 2 planes @ stage %d on left\n", 1281 z_pos); 1282 rc = -EINVAL; 1283 goto end; 1284 } 1285 left_zpos_cnt++; 1286 1287 } else { 1288 if (right_zpos_cnt == 2) { 1289 DPU_ERROR("> 2 planes @ stage %d on right\n", 1290 z_pos); 1291 rc = -EINVAL; 1292 goto end; 1293 } 1294 right_zpos_cnt++; 1295 } 1296 1297 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; 1298 DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos); 1299 } 1300 1301 for (i = 0; i < multirect_count; i++) { 1302 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) { 1303 DPU_ERROR( 1304 "multirect validation failed for planes (%d - %d)\n", 1305 multirect_plane[i].r0->plane->base.id, 1306 multirect_plane[i].r1->plane->base.id); 1307 rc = -EINVAL; 1308 goto end; 1309 } 1310 } 1311 1312 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 1313 1314 rc = dpu_core_perf_crtc_check(crtc, crtc_state); 1315 if (rc) { 1316 DPU_ERROR("crtc%d failed performance check %d\n", 1317 crtc->base.id, rc); 1318 goto end; 1319 } 1320 1321 /* validate source split: 1322 * use pstates sorted by stage to check planes on same stage 1323 * we assume that all pipes are in source split so its valid to compare 1324 * without taking into account left/right mixer placement 1325 */ 1326 for (i = 1; i < cnt; i++) { 1327 struct plane_state *prv_pstate, *cur_pstate; 1328 struct drm_rect left_rect, right_rect; 1329 int32_t left_pid, right_pid; 1330 int32_t stage; 1331 1332 prv_pstate = &pstates[i - 1]; 1333 cur_pstate = &pstates[i]; 1334 if (prv_pstate->stage != cur_pstate->stage) 1335 continue; 1336 1337 stage = cur_pstate->stage; 1338 1339 left_pid = prv_pstate->dpu_pstate->base.plane->base.id; 1340 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate); 1341 1342 right_pid = cur_pstate->dpu_pstate->base.plane->base.id; 1343 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate); 1344 1345 if (right_rect.x1 < left_rect.x1) { 1346 swap(left_pid, right_pid); 1347 swap(left_rect, right_rect); 1348 } 1349 1350 /** 1351 * - planes are enumerated in pipe-priority order such that 1352 * planes with lower drm_id must be left-most in a shared 1353 * blend-stage when using source split. 1354 * - planes in source split must be contiguous in width 1355 * - planes in source split must have same dest yoff and height 1356 */ 1357 if (right_pid < left_pid) { 1358 DPU_ERROR( 1359 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", 1360 stage, left_pid, right_pid); 1361 rc = -EINVAL; 1362 goto end; 1363 } else if (right_rect.x1 != drm_rect_width(&left_rect)) { 1364 DPU_ERROR("non-contiguous coordinates for src split. " 1365 "stage: %d left: " DRM_RECT_FMT " right: " 1366 DRM_RECT_FMT "\n", stage, 1367 DRM_RECT_ARG(&left_rect), 1368 DRM_RECT_ARG(&right_rect)); 1369 rc = -EINVAL; 1370 goto end; 1371 } else if (left_rect.y1 != right_rect.y1 || 1372 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) { 1373 DPU_ERROR("source split at stage: %d. invalid " 1374 "yoff/height: left: " DRM_RECT_FMT " right: " 1375 DRM_RECT_FMT "\n", stage, 1376 DRM_RECT_ARG(&left_rect), 1377 DRM_RECT_ARG(&right_rect)); 1378 rc = -EINVAL; 1379 goto end; 1380 } 1381 } 1382 1383 end: 1384 kfree(pstates); 1385 return rc; 1386 } 1387 1388 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1389 { 1390 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1391 struct drm_encoder *enc; 1392 1393 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1394 1395 /* 1396 * Normally we would iterate through encoder_mask in crtc state to find 1397 * attached encoders. In this case, we might be disabling vblank _after_ 1398 * encoder_mask has been cleared. 1399 * 1400 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1401 * disable (which is also after encoder_mask is cleared). So instead of 1402 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1403 * currently assigned to our crtc. 1404 * 1405 * Note also that this function cannot be called while crtc is disabled 1406 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1407 * about the assigned crtcs being inconsistent with the current state 1408 * (which means no need to worry about modeset locks). 1409 */ 1410 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1411 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1412 dpu_crtc); 1413 1414 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1415 } 1416 1417 return 0; 1418 } 1419 1420 #ifdef CONFIG_DEBUG_FS 1421 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1422 { 1423 struct dpu_crtc *dpu_crtc; 1424 struct dpu_plane_state *pstate = NULL; 1425 struct dpu_crtc_mixer *m; 1426 1427 struct drm_crtc *crtc; 1428 struct drm_plane *plane; 1429 struct drm_display_mode *mode; 1430 struct drm_framebuffer *fb; 1431 struct drm_plane_state *state; 1432 struct dpu_crtc_state *cstate; 1433 1434 int i, out_width; 1435 1436 dpu_crtc = s->private; 1437 crtc = &dpu_crtc->base; 1438 1439 drm_modeset_lock_all(crtc->dev); 1440 cstate = to_dpu_crtc_state(crtc->state); 1441 1442 mode = &crtc->state->adjusted_mode; 1443 out_width = mode->hdisplay / cstate->num_mixers; 1444 1445 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1446 mode->hdisplay, mode->vdisplay); 1447 1448 seq_puts(s, "\n"); 1449 1450 for (i = 0; i < cstate->num_mixers; ++i) { 1451 m = &cstate->mixers[i]; 1452 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1453 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1454 out_width, mode->vdisplay); 1455 } 1456 1457 seq_puts(s, "\n"); 1458 1459 drm_atomic_crtc_for_each_plane(plane, crtc) { 1460 pstate = to_dpu_plane_state(plane->state); 1461 state = plane->state; 1462 1463 if (!pstate || !state) 1464 continue; 1465 1466 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1467 pstate->stage); 1468 1469 if (plane->state->fb) { 1470 fb = plane->state->fb; 1471 1472 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1473 fb->base.id, (char *) &fb->format->format, 1474 fb->width, fb->height); 1475 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1476 seq_printf(s, "cpp[%d]:%u ", 1477 i, fb->format->cpp[i]); 1478 seq_puts(s, "\n\t"); 1479 1480 seq_printf(s, "modifier:%8llu ", fb->modifier); 1481 seq_puts(s, "\n"); 1482 1483 seq_puts(s, "\t"); 1484 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1485 seq_printf(s, "pitches[%d]:%8u ", i, 1486 fb->pitches[i]); 1487 seq_puts(s, "\n"); 1488 1489 seq_puts(s, "\t"); 1490 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1491 seq_printf(s, "offsets[%d]:%8u ", i, 1492 fb->offsets[i]); 1493 seq_puts(s, "\n"); 1494 } 1495 1496 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1497 state->src_x, state->src_y, state->src_w, state->src_h); 1498 1499 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1500 state->crtc_x, state->crtc_y, state->crtc_w, 1501 state->crtc_h); 1502 seq_printf(s, "\tmultirect: mode: %d index: %d\n", 1503 pstate->multirect_mode, pstate->multirect_index); 1504 1505 seq_puts(s, "\n"); 1506 } 1507 if (dpu_crtc->vblank_cb_count) { 1508 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1509 s64 diff_ms = ktime_to_ms(diff); 1510 s64 fps = diff_ms ? div_s64( 1511 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1512 1513 seq_printf(s, 1514 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1515 fps, dpu_crtc->vblank_cb_count, 1516 ktime_to_ms(diff), dpu_crtc->play_count); 1517 1518 /* reset time & count for next measurement */ 1519 dpu_crtc->vblank_cb_count = 0; 1520 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1521 } 1522 1523 drm_modeset_unlock_all(crtc->dev); 1524 1525 return 0; 1526 } 1527 1528 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); 1529 1530 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1531 { 1532 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1533 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1534 1535 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1536 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1537 seq_printf(s, "core_clk_rate: %llu\n", 1538 dpu_crtc->cur_perf.core_clk_rate); 1539 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); 1540 seq_printf(s, "max_per_pipe_ib: %llu\n", 1541 dpu_crtc->cur_perf.max_per_pipe_ib); 1542 1543 return 0; 1544 } 1545 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); 1546 1547 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1548 { 1549 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1550 1551 debugfs_create_file("status", 0400, 1552 crtc->debugfs_entry, 1553 dpu_crtc, &_dpu_debugfs_status_fops); 1554 debugfs_create_file("state", 0600, 1555 crtc->debugfs_entry, 1556 &dpu_crtc->base, 1557 &dpu_crtc_debugfs_state_fops); 1558 1559 return 0; 1560 } 1561 #else 1562 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1563 { 1564 return 0; 1565 } 1566 #endif /* CONFIG_DEBUG_FS */ 1567 1568 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1569 { 1570 return _dpu_crtc_init_debugfs(crtc); 1571 } 1572 1573 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1574 .set_config = drm_atomic_helper_set_config, 1575 .destroy = dpu_crtc_destroy, 1576 .page_flip = drm_atomic_helper_page_flip, 1577 .reset = dpu_crtc_reset, 1578 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1579 .atomic_destroy_state = dpu_crtc_destroy_state, 1580 .atomic_print_state = dpu_crtc_atomic_print_state, 1581 .late_register = dpu_crtc_late_register, 1582 .verify_crc_source = dpu_crtc_verify_crc_source, 1583 .set_crc_source = dpu_crtc_set_crc_source, 1584 .enable_vblank = msm_crtc_enable_vblank, 1585 .disable_vblank = msm_crtc_disable_vblank, 1586 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 1587 .get_vblank_counter = dpu_crtc_get_vblank_counter, 1588 }; 1589 1590 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1591 .atomic_disable = dpu_crtc_disable, 1592 .atomic_enable = dpu_crtc_enable, 1593 .atomic_check = dpu_crtc_atomic_check, 1594 .atomic_begin = dpu_crtc_atomic_begin, 1595 .atomic_flush = dpu_crtc_atomic_flush, 1596 .get_scanout_position = dpu_crtc_get_scanout_position, 1597 }; 1598 1599 /* initialize crtc */ 1600 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1601 struct drm_plane *cursor) 1602 { 1603 struct drm_crtc *crtc = NULL; 1604 struct dpu_crtc *dpu_crtc = NULL; 1605 int i, ret; 1606 1607 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1608 if (!dpu_crtc) 1609 return ERR_PTR(-ENOMEM); 1610 1611 crtc = &dpu_crtc->base; 1612 crtc->dev = dev; 1613 1614 spin_lock_init(&dpu_crtc->spin_lock); 1615 atomic_set(&dpu_crtc->frame_pending, 0); 1616 1617 init_completion(&dpu_crtc->frame_done_comp); 1618 1619 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1620 1621 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1622 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1623 list_add(&dpu_crtc->frame_events[i].list, 1624 &dpu_crtc->frame_event_list); 1625 kthread_init_work(&dpu_crtc->frame_events[i].work, 1626 dpu_crtc_frame_event_work); 1627 } 1628 1629 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, 1630 NULL); 1631 1632 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1633 1634 drm_crtc_enable_color_mgmt(crtc, 0, true, 0); 1635 1636 /* save user friendly CRTC name for later */ 1637 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1638 1639 /* initialize event handling */ 1640 spin_lock_init(&dpu_crtc->event_lock); 1641 1642 ret = drm_self_refresh_helper_init(crtc); 1643 if (ret) { 1644 DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n", 1645 crtc->name, ret); 1646 return ERR_PTR(ret); 1647 } 1648 1649 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name); 1650 return crtc; 1651 } 1652