1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 9 #include <linux/sort.h> 10 #include <linux/debugfs.h> 11 #include <linux/ktime.h> 12 #include <linux/bits.h> 13 14 #include <drm/drm_crtc.h> 15 #include <drm/drm_flip_work.h> 16 #include <drm/drm_mode.h> 17 #include <drm/drm_probe_helper.h> 18 #include <drm/drm_rect.h> 19 #include <drm/drm_vblank.h> 20 21 #include "dpu_kms.h" 22 #include "dpu_hw_lm.h" 23 #include "dpu_hw_ctl.h" 24 #include "dpu_hw_dspp.h" 25 #include "dpu_crtc.h" 26 #include "dpu_plane.h" 27 #include "dpu_encoder.h" 28 #include "dpu_vbif.h" 29 #include "dpu_core_perf.h" 30 #include "dpu_trace.h" 31 32 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0 33 #define DPU_DRM_BLEND_OP_OPAQUE 1 34 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2 35 #define DPU_DRM_BLEND_OP_COVERAGE 3 36 #define DPU_DRM_BLEND_OP_MAX 4 37 38 /* layer mixer index on dpu_crtc */ 39 #define LEFT_MIXER 0 40 #define RIGHT_MIXER 1 41 42 /* timeout in ms waiting for frame done */ 43 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 44 45 #define CONVERT_S3_15(val) \ 46 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) 47 48 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 49 { 50 struct msm_drm_private *priv = crtc->dev->dev_private; 51 52 return to_dpu_kms(priv->kms); 53 } 54 55 static void dpu_crtc_destroy(struct drm_crtc *crtc) 56 { 57 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 58 59 DPU_DEBUG("\n"); 60 61 if (!crtc) 62 return; 63 64 drm_crtc_cleanup(crtc); 65 kfree(dpu_crtc); 66 } 67 68 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 69 struct dpu_plane_state *pstate, struct dpu_format *format) 70 { 71 struct dpu_hw_mixer *lm = mixer->hw_lm; 72 uint32_t blend_op; 73 struct drm_format_name_buf format_name; 74 75 /* default to opaque blending */ 76 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 77 DPU_BLEND_BG_ALPHA_BG_CONST; 78 79 if (format->alpha_enable) { 80 /* coverage blending */ 81 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 82 DPU_BLEND_BG_ALPHA_FG_PIXEL | 83 DPU_BLEND_BG_INV_ALPHA; 84 } 85 86 lm->ops.setup_blend_config(lm, pstate->stage, 87 0xFF, 0, blend_op); 88 89 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n", 90 drm_get_format_name(format->base.pixel_format, &format_name), 91 format->alpha_enable, blend_op); 92 } 93 94 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 95 { 96 struct dpu_crtc_state *crtc_state; 97 int lm_idx, lm_horiz_position; 98 99 crtc_state = to_dpu_crtc_state(crtc->state); 100 101 lm_horiz_position = 0; 102 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 103 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 104 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 105 struct dpu_hw_mixer_cfg cfg; 106 107 if (!lm_roi || !drm_rect_visible(lm_roi)) 108 continue; 109 110 cfg.out_width = drm_rect_width(lm_roi); 111 cfg.out_height = drm_rect_height(lm_roi); 112 cfg.right_mixer = lm_horiz_position++; 113 cfg.flags = 0; 114 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 115 } 116 } 117 118 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 119 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer) 120 { 121 struct drm_plane *plane; 122 struct drm_framebuffer *fb; 123 struct drm_plane_state *state; 124 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 125 struct dpu_plane_state *pstate = NULL; 126 struct dpu_format *format; 127 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 128 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg; 129 130 u32 flush_mask; 131 uint32_t stage_idx, lm_idx; 132 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; 133 bool bg_alpha_enable = false; 134 135 drm_atomic_crtc_for_each_plane(plane, crtc) { 136 state = plane->state; 137 if (!state) 138 continue; 139 140 pstate = to_dpu_plane_state(state); 141 fb = state->fb; 142 143 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask); 144 145 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n", 146 crtc->base.id, 147 pstate->stage, 148 plane->base.id, 149 dpu_plane_pipe(plane) - SSPP_VIG0, 150 state->fb ? state->fb->base.id : -1); 151 152 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); 153 154 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 155 bg_alpha_enable = true; 156 157 stage_idx = zpos_cnt[pstate->stage]++; 158 stage_cfg->stage[pstate->stage][stage_idx] = 159 dpu_plane_pipe(plane); 160 stage_cfg->multirect_index[pstate->stage][stage_idx] = 161 pstate->multirect_index; 162 163 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 164 state, pstate, stage_idx, 165 dpu_plane_pipe(plane) - SSPP_VIG0, 166 format->base.pixel_format, 167 fb ? fb->modifier : 0); 168 169 /* blend config update */ 170 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 171 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, 172 pstate, format); 173 174 mixer[lm_idx].flush_mask |= flush_mask; 175 176 if (bg_alpha_enable && !format->alpha_enable) 177 mixer[lm_idx].mixer_op_mode = 0; 178 else 179 mixer[lm_idx].mixer_op_mode |= 180 1 << pstate->stage; 181 } 182 } 183 184 _dpu_crtc_program_lm_output_roi(crtc); 185 } 186 187 /** 188 * _dpu_crtc_blend_setup - configure crtc mixers 189 * @crtc: Pointer to drm crtc structure 190 */ 191 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 192 { 193 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 194 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 195 struct dpu_crtc_mixer *mixer = cstate->mixers; 196 struct dpu_hw_ctl *ctl; 197 struct dpu_hw_mixer *lm; 198 int i; 199 200 DPU_DEBUG("%s\n", dpu_crtc->name); 201 202 for (i = 0; i < cstate->num_mixers; i++) { 203 mixer[i].mixer_op_mode = 0; 204 mixer[i].flush_mask = 0; 205 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 206 mixer[i].lm_ctl->ops.clear_all_blendstages( 207 mixer[i].lm_ctl); 208 } 209 210 /* initialize stage cfg */ 211 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 212 213 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer); 214 215 for (i = 0; i < cstate->num_mixers; i++) { 216 ctl = mixer[i].lm_ctl; 217 lm = mixer[i].hw_lm; 218 219 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 220 221 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, 222 mixer[i].hw_lm->idx); 223 224 /* stage config flush mask */ 225 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 226 227 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", 228 mixer[i].hw_lm->idx - LM_0, 229 mixer[i].mixer_op_mode, 230 ctl->idx - CTL_0, 231 mixer[i].flush_mask); 232 233 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 234 &dpu_crtc->stage_cfg); 235 } 236 } 237 238 /** 239 * _dpu_crtc_complete_flip - signal pending page_flip events 240 * Any pending vblank events are added to the vblank_event_list 241 * so that the next vblank interrupt shall signal them. 242 * However PAGE_FLIP events are not handled through the vblank_event_list. 243 * This API signals any pending PAGE_FLIP events requested through 244 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 245 * @crtc: Pointer to drm crtc structure 246 */ 247 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 248 { 249 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 250 struct drm_device *dev = crtc->dev; 251 unsigned long flags; 252 253 spin_lock_irqsave(&dev->event_lock, flags); 254 if (dpu_crtc->event) { 255 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, 256 dpu_crtc->event); 257 trace_dpu_crtc_complete_flip(DRMID(crtc)); 258 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 259 dpu_crtc->event = NULL; 260 } 261 spin_unlock_irqrestore(&dev->event_lock, flags); 262 } 263 264 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 265 { 266 struct drm_encoder *encoder; 267 268 /* 269 * TODO: This function is called from dpu debugfs and as part of atomic 270 * check. When called from debugfs, the crtc->mutex must be held to 271 * read crtc->state. However reading crtc->state from atomic check isn't 272 * allowed (unless you have a good reason, a big comment, and a deep 273 * understanding of how the atomic/modeset locks work (<- and this is 274 * probably not possible)). So we'll keep the WARN_ON here for now, but 275 * really we need to figure out a better way to track our operating mode 276 */ 277 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 278 279 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 280 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 281 return dpu_encoder_get_intf_mode(encoder); 282 283 return INTF_MODE_NONE; 284 } 285 286 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 287 { 288 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 289 290 /* keep statistics on vblank callback - with auto reset via debugfs */ 291 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 292 dpu_crtc->vblank_cb_time = ktime_get(); 293 else 294 dpu_crtc->vblank_cb_count++; 295 drm_crtc_handle_vblank(crtc); 296 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 297 } 298 299 static void dpu_crtc_frame_event_work(struct kthread_work *work) 300 { 301 struct dpu_crtc_frame_event *fevent = container_of(work, 302 struct dpu_crtc_frame_event, work); 303 struct drm_crtc *crtc = fevent->crtc; 304 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 305 unsigned long flags; 306 bool frame_done = false; 307 308 DPU_ATRACE_BEGIN("crtc_frame_event"); 309 310 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 311 ktime_to_ns(fevent->ts)); 312 313 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 314 | DPU_ENCODER_FRAME_EVENT_ERROR 315 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 316 317 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 318 /* ignore vblank when not pending */ 319 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 320 /* release bandwidth and other resources */ 321 trace_dpu_crtc_frame_event_done(DRMID(crtc), 322 fevent->event); 323 dpu_core_perf_crtc_release_bw(crtc); 324 } else { 325 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 326 fevent->event); 327 } 328 329 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE) 330 dpu_core_perf_crtc_update(crtc, 0, false); 331 332 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 333 | DPU_ENCODER_FRAME_EVENT_ERROR)) 334 frame_done = true; 335 } 336 337 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 338 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 339 crtc->base.id, ktime_to_ns(fevent->ts)); 340 341 if (frame_done) 342 complete_all(&dpu_crtc->frame_done_comp); 343 344 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 345 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 346 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 347 DPU_ATRACE_END("crtc_frame_event"); 348 } 349 350 /* 351 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module 352 * registers this API to encoder for all frame event callbacks like 353 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events 354 * from different context - IRQ, user thread, commit_thread, etc. Each event 355 * should be carefully reviewed and should be processed in proper task context 356 * to avoid schedulin delay or properly manage the irq context's bottom half 357 * processing. 358 */ 359 static void dpu_crtc_frame_event_cb(void *data, u32 event) 360 { 361 struct drm_crtc *crtc = (struct drm_crtc *)data; 362 struct dpu_crtc *dpu_crtc; 363 struct msm_drm_private *priv; 364 struct dpu_crtc_frame_event *fevent; 365 unsigned long flags; 366 u32 crtc_id; 367 368 /* Nothing to do on idle event */ 369 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 370 return; 371 372 dpu_crtc = to_dpu_crtc(crtc); 373 priv = crtc->dev->dev_private; 374 crtc_id = drm_crtc_index(crtc); 375 376 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 377 378 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 379 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 380 struct dpu_crtc_frame_event, list); 381 if (fevent) 382 list_del_init(&fevent->list); 383 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 384 385 if (!fevent) { 386 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); 387 return; 388 } 389 390 fevent->event = event; 391 fevent->crtc = crtc; 392 fevent->ts = ktime_get(); 393 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); 394 } 395 396 void dpu_crtc_complete_commit(struct drm_crtc *crtc) 397 { 398 trace_dpu_crtc_complete_commit(DRMID(crtc)); 399 _dpu_crtc_complete_flip(crtc); 400 } 401 402 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, 403 struct drm_crtc_state *state) 404 { 405 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 406 struct drm_display_mode *adj_mode = &state->adjusted_mode; 407 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 408 int i; 409 410 for (i = 0; i < cstate->num_mixers; i++) { 411 struct drm_rect *r = &cstate->lm_bounds[i]; 412 r->x1 = crtc_split_width * i; 413 r->y1 = 0; 414 r->x2 = r->x1 + crtc_split_width; 415 r->y2 = adj_mode->vdisplay; 416 417 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 418 } 419 } 420 421 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, 422 struct dpu_hw_pcc_cfg *cfg) 423 { 424 struct drm_color_ctm *ctm; 425 426 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); 427 428 ctm = (struct drm_color_ctm *)state->ctm->data; 429 430 if (!ctm) 431 return; 432 433 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); 434 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); 435 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); 436 437 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); 438 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); 439 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); 440 441 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); 442 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); 443 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); 444 } 445 446 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) 447 { 448 struct drm_crtc_state *state = crtc->state; 449 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 450 struct dpu_crtc_mixer *mixer = cstate->mixers; 451 struct dpu_hw_pcc_cfg cfg; 452 struct dpu_hw_ctl *ctl; 453 struct dpu_hw_dspp *dspp; 454 int i; 455 456 457 if (!state->color_mgmt_changed) 458 return; 459 460 for (i = 0; i < cstate->num_mixers; i++) { 461 ctl = mixer[i].lm_ctl; 462 dspp = mixer[i].hw_dspp; 463 464 if (!dspp || !dspp->ops.setup_pcc) 465 continue; 466 467 if (!state->ctm) { 468 dspp->ops.setup_pcc(dspp, NULL); 469 } else { 470 _dpu_crtc_get_pcc_coeff(state, &cfg); 471 dspp->ops.setup_pcc(dspp, &cfg); 472 } 473 474 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl, 475 mixer[i].hw_dspp->idx); 476 477 /* stage config flush mask */ 478 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 479 480 DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n", 481 mixer[i].hw_lm->idx - DSPP_0, 482 ctl->idx - CTL_0, 483 mixer[i].flush_mask); 484 } 485 } 486 487 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 488 struct drm_crtc_state *old_state) 489 { 490 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 491 struct drm_encoder *encoder; 492 493 if (!crtc->state->enable) { 494 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n", 495 crtc->base.id, crtc->state->enable); 496 return; 497 } 498 499 DPU_DEBUG("crtc%d\n", crtc->base.id); 500 501 _dpu_crtc_setup_lm_bounds(crtc, crtc->state); 502 503 /* encoder will trigger pending mask now */ 504 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 505 dpu_encoder_trigger_kickoff_pending(encoder); 506 507 /* 508 * If no mixers have been allocated in dpu_crtc_atomic_check(), 509 * it means we are trying to flush a CRTC whose state is disabled: 510 * nothing else needs to be done. 511 */ 512 if (unlikely(!cstate->num_mixers)) 513 return; 514 515 _dpu_crtc_blend_setup(crtc); 516 517 _dpu_crtc_setup_cp_blocks(crtc); 518 519 /* 520 * PP_DONE irq is only used by command mode for now. 521 * It is better to request pending before FLUSH and START trigger 522 * to make sure no pp_done irq missed. 523 * This is safe because no pp_done will happen before SW trigger 524 * in command mode. 525 */ 526 } 527 528 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 529 struct drm_crtc_state *old_crtc_state) 530 { 531 struct dpu_crtc *dpu_crtc; 532 struct drm_device *dev; 533 struct drm_plane *plane; 534 struct msm_drm_private *priv; 535 unsigned long flags; 536 struct dpu_crtc_state *cstate; 537 538 if (!crtc->state->enable) { 539 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n", 540 crtc->base.id, crtc->state->enable); 541 return; 542 } 543 544 DPU_DEBUG("crtc%d\n", crtc->base.id); 545 546 dpu_crtc = to_dpu_crtc(crtc); 547 cstate = to_dpu_crtc_state(crtc->state); 548 dev = crtc->dev; 549 priv = dev->dev_private; 550 551 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { 552 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 553 return; 554 } 555 556 WARN_ON(dpu_crtc->event); 557 spin_lock_irqsave(&dev->event_lock, flags); 558 dpu_crtc->event = crtc->state->event; 559 crtc->state->event = NULL; 560 spin_unlock_irqrestore(&dev->event_lock, flags); 561 562 /* 563 * If no mixers has been allocated in dpu_crtc_atomic_check(), 564 * it means we are trying to flush a CRTC whose state is disabled: 565 * nothing else needs to be done. 566 */ 567 if (unlikely(!cstate->num_mixers)) 568 return; 569 570 /* 571 * For planes without commit update, drm framework will not add 572 * those planes to current state since hardware update is not 573 * required. However, if those planes were power collapsed since 574 * last commit cycle, driver has to restore the hardware state 575 * of those planes explicitly here prior to plane flush. 576 */ 577 drm_atomic_crtc_for_each_plane(plane, crtc) 578 dpu_plane_restore(plane); 579 580 /* update performance setting before crtc kickoff */ 581 dpu_core_perf_crtc_update(crtc, 1, false); 582 583 /* 584 * Final plane updates: Give each plane a chance to complete all 585 * required writes/flushing before crtc's "flush 586 * everything" call below. 587 */ 588 drm_atomic_crtc_for_each_plane(plane, crtc) { 589 if (dpu_crtc->smmu_state.transition_error) 590 dpu_plane_set_error(plane, true); 591 dpu_plane_flush(plane); 592 } 593 594 /* Kickoff will be scheduled by outer layer */ 595 } 596 597 /** 598 * dpu_crtc_destroy_state - state destroy hook 599 * @crtc: drm CRTC 600 * @state: CRTC state object to release 601 */ 602 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 603 struct drm_crtc_state *state) 604 { 605 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 606 607 DPU_DEBUG("crtc%d\n", crtc->base.id); 608 609 __drm_atomic_helper_crtc_destroy_state(state); 610 611 kfree(cstate); 612 } 613 614 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 615 { 616 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 617 int ret, rc = 0; 618 619 if (!atomic_read(&dpu_crtc->frame_pending)) { 620 DPU_DEBUG("no frames pending\n"); 621 return 0; 622 } 623 624 DPU_ATRACE_BEGIN("frame done completion wait"); 625 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 626 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 627 if (!ret) { 628 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 629 rc = -ETIMEDOUT; 630 } 631 DPU_ATRACE_END("frame done completion wait"); 632 633 return rc; 634 } 635 636 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) 637 { 638 struct drm_encoder *encoder; 639 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 640 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 641 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 642 643 /* 644 * If no mixers has been allocated in dpu_crtc_atomic_check(), 645 * it means we are trying to start a CRTC whose state is disabled: 646 * nothing else needs to be done. 647 */ 648 if (unlikely(!cstate->num_mixers)) 649 return; 650 651 DPU_ATRACE_BEGIN("crtc_commit"); 652 653 /* 654 * Encoder will flush/start now, unless it has a tx pending. If so, it 655 * may delay and flush at an irq event (e.g. ppdone) 656 */ 657 drm_for_each_encoder_mask(encoder, crtc->dev, 658 crtc->state->encoder_mask) 659 dpu_encoder_prepare_for_kickoff(encoder); 660 661 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 662 /* acquire bandwidth and other resources */ 663 DPU_DEBUG("crtc%d first commit\n", crtc->base.id); 664 } else 665 DPU_DEBUG("crtc%d commit\n", crtc->base.id); 666 667 dpu_crtc->play_count++; 668 669 dpu_vbif_clear_errors(dpu_kms); 670 671 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 672 dpu_encoder_kickoff(encoder); 673 674 reinit_completion(&dpu_crtc->frame_done_comp); 675 DPU_ATRACE_END("crtc_commit"); 676 } 677 678 static void dpu_crtc_reset(struct drm_crtc *crtc) 679 { 680 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 681 682 if (crtc->state) 683 dpu_crtc_destroy_state(crtc, crtc->state); 684 685 __drm_atomic_helper_crtc_reset(crtc, &cstate->base); 686 } 687 688 /** 689 * dpu_crtc_duplicate_state - state duplicate hook 690 * @crtc: Pointer to drm crtc structure 691 */ 692 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 693 { 694 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); 695 696 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 697 if (!cstate) { 698 DPU_ERROR("failed to allocate state\n"); 699 return NULL; 700 } 701 702 /* duplicate base helper */ 703 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 704 705 return &cstate->base; 706 } 707 708 static void dpu_crtc_disable(struct drm_crtc *crtc, 709 struct drm_crtc_state *old_crtc_state) 710 { 711 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 712 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 713 struct drm_encoder *encoder; 714 unsigned long flags; 715 bool release_bandwidth = false; 716 717 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 718 719 /* Disable/save vblank irq handling */ 720 drm_crtc_vblank_off(crtc); 721 722 drm_for_each_encoder_mask(encoder, crtc->dev, 723 old_crtc_state->encoder_mask) { 724 /* in video mode, we hold an extra bandwidth reference 725 * as we cannot drop bandwidth at frame-done if any 726 * crtc is being used in video mode. 727 */ 728 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 729 release_bandwidth = true; 730 dpu_encoder_assign_crtc(encoder, NULL); 731 } 732 733 /* wait for frame_event_done completion */ 734 if (_dpu_crtc_wait_for_frame_done(crtc)) 735 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 736 crtc->base.id, 737 atomic_read(&dpu_crtc->frame_pending)); 738 739 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 740 dpu_crtc->enabled = false; 741 742 if (atomic_read(&dpu_crtc->frame_pending)) { 743 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 744 atomic_read(&dpu_crtc->frame_pending)); 745 if (release_bandwidth) 746 dpu_core_perf_crtc_release_bw(crtc); 747 atomic_set(&dpu_crtc->frame_pending, 0); 748 } 749 750 dpu_core_perf_crtc_update(crtc, 0, true); 751 752 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 753 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); 754 755 memset(cstate->mixers, 0, sizeof(cstate->mixers)); 756 cstate->num_mixers = 0; 757 758 /* disable clk & bw control until clk & bw properties are set */ 759 cstate->bw_control = false; 760 cstate->bw_split_vote = false; 761 762 if (crtc->state->event && !crtc->state->active) { 763 spin_lock_irqsave(&crtc->dev->event_lock, flags); 764 drm_crtc_send_vblank_event(crtc, crtc->state->event); 765 crtc->state->event = NULL; 766 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 767 } 768 769 pm_runtime_put_sync(crtc->dev->dev); 770 } 771 772 static void dpu_crtc_enable(struct drm_crtc *crtc, 773 struct drm_crtc_state *old_crtc_state) 774 { 775 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 776 struct drm_encoder *encoder; 777 bool request_bandwidth = false; 778 779 pm_runtime_get_sync(crtc->dev->dev); 780 781 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 782 783 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { 784 /* in video mode, we hold an extra bandwidth reference 785 * as we cannot drop bandwidth at frame-done if any 786 * crtc is being used in video mode. 787 */ 788 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) 789 request_bandwidth = true; 790 dpu_encoder_register_frame_event_callback(encoder, 791 dpu_crtc_frame_event_cb, (void *)crtc); 792 } 793 794 if (request_bandwidth) 795 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 796 797 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 798 dpu_crtc->enabled = true; 799 800 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 801 dpu_encoder_assign_crtc(encoder, crtc); 802 803 /* Enable/restore vblank irq handling */ 804 drm_crtc_vblank_on(crtc); 805 } 806 807 struct plane_state { 808 struct dpu_plane_state *dpu_pstate; 809 const struct drm_plane_state *drm_pstate; 810 int stage; 811 u32 pipe_id; 812 }; 813 814 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 815 struct drm_crtc_state *state) 816 { 817 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 818 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 819 struct plane_state *pstates; 820 821 const struct drm_plane_state *pstate; 822 struct drm_plane *plane; 823 struct drm_display_mode *mode; 824 825 int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; 826 827 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; 828 int multirect_count = 0; 829 const struct drm_plane_state *pipe_staged[SSPP_MAX]; 830 int left_zpos_cnt = 0, right_zpos_cnt = 0; 831 struct drm_rect crtc_rect = { 0 }; 832 833 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); 834 835 if (!state->enable || !state->active) { 836 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n", 837 crtc->base.id, state->enable, state->active); 838 goto end; 839 } 840 841 mode = &state->adjusted_mode; 842 DPU_DEBUG("%s: check", dpu_crtc->name); 843 844 /* force a full mode set if active state changed */ 845 if (state->active_changed) 846 state->mode_changed = true; 847 848 memset(pipe_staged, 0, sizeof(pipe_staged)); 849 850 if (cstate->num_mixers) { 851 mixer_width = mode->hdisplay / cstate->num_mixers; 852 853 _dpu_crtc_setup_lm_bounds(crtc, state); 854 } 855 856 crtc_rect.x2 = mode->hdisplay; 857 crtc_rect.y2 = mode->vdisplay; 858 859 /* get plane state for all drm planes associated with crtc state */ 860 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { 861 struct drm_rect dst, clip = crtc_rect; 862 863 if (IS_ERR_OR_NULL(pstate)) { 864 rc = PTR_ERR(pstate); 865 DPU_ERROR("%s: failed to get plane%d state, %d\n", 866 dpu_crtc->name, plane->base.id, rc); 867 goto end; 868 } 869 if (cnt >= DPU_STAGE_MAX * 4) 870 continue; 871 872 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate); 873 pstates[cnt].drm_pstate = pstate; 874 pstates[cnt].stage = pstate->normalized_zpos; 875 pstates[cnt].pipe_id = dpu_plane_pipe(plane); 876 877 if (pipe_staged[pstates[cnt].pipe_id]) { 878 multirect_plane[multirect_count].r0 = 879 pipe_staged[pstates[cnt].pipe_id]; 880 multirect_plane[multirect_count].r1 = pstate; 881 multirect_count++; 882 883 pipe_staged[pstates[cnt].pipe_id] = NULL; 884 } else { 885 pipe_staged[pstates[cnt].pipe_id] = pstate; 886 } 887 888 cnt++; 889 890 dst = drm_plane_state_dest(pstate); 891 if (!drm_rect_intersect(&clip, &dst)) { 892 DPU_ERROR("invalid vertical/horizontal destination\n"); 893 DPU_ERROR("display: " DRM_RECT_FMT " plane: " 894 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), 895 DRM_RECT_ARG(&dst)); 896 rc = -E2BIG; 897 goto end; 898 } 899 } 900 901 for (i = 1; i < SSPP_MAX; i++) { 902 if (pipe_staged[i]) { 903 dpu_plane_clear_multirect(pipe_staged[i]); 904 905 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) { 906 DPU_ERROR( 907 "r1 only virt plane:%d not supported\n", 908 pipe_staged[i]->plane->base.id); 909 rc = -EINVAL; 910 goto end; 911 } 912 } 913 } 914 915 z_pos = -1; 916 for (i = 0; i < cnt; i++) { 917 /* reset counts at every new blend stage */ 918 if (pstates[i].stage != z_pos) { 919 left_zpos_cnt = 0; 920 right_zpos_cnt = 0; 921 z_pos = pstates[i].stage; 922 } 923 924 /* verify z_pos setting before using it */ 925 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) { 926 DPU_ERROR("> %d plane stages assigned\n", 927 DPU_STAGE_MAX - DPU_STAGE_0); 928 rc = -EINVAL; 929 goto end; 930 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { 931 if (left_zpos_cnt == 2) { 932 DPU_ERROR("> 2 planes @ stage %d on left\n", 933 z_pos); 934 rc = -EINVAL; 935 goto end; 936 } 937 left_zpos_cnt++; 938 939 } else { 940 if (right_zpos_cnt == 2) { 941 DPU_ERROR("> 2 planes @ stage %d on right\n", 942 z_pos); 943 rc = -EINVAL; 944 goto end; 945 } 946 right_zpos_cnt++; 947 } 948 949 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; 950 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos); 951 } 952 953 for (i = 0; i < multirect_count; i++) { 954 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) { 955 DPU_ERROR( 956 "multirect validation failed for planes (%d - %d)\n", 957 multirect_plane[i].r0->plane->base.id, 958 multirect_plane[i].r1->plane->base.id); 959 rc = -EINVAL; 960 goto end; 961 } 962 } 963 964 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); 965 966 rc = dpu_core_perf_crtc_check(crtc, state); 967 if (rc) { 968 DPU_ERROR("crtc%d failed performance check %d\n", 969 crtc->base.id, rc); 970 goto end; 971 } 972 973 /* validate source split: 974 * use pstates sorted by stage to check planes on same stage 975 * we assume that all pipes are in source split so its valid to compare 976 * without taking into account left/right mixer placement 977 */ 978 for (i = 1; i < cnt; i++) { 979 struct plane_state *prv_pstate, *cur_pstate; 980 struct drm_rect left_rect, right_rect; 981 int32_t left_pid, right_pid; 982 int32_t stage; 983 984 prv_pstate = &pstates[i - 1]; 985 cur_pstate = &pstates[i]; 986 if (prv_pstate->stage != cur_pstate->stage) 987 continue; 988 989 stage = cur_pstate->stage; 990 991 left_pid = prv_pstate->dpu_pstate->base.plane->base.id; 992 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate); 993 994 right_pid = cur_pstate->dpu_pstate->base.plane->base.id; 995 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate); 996 997 if (right_rect.x1 < left_rect.x1) { 998 swap(left_pid, right_pid); 999 swap(left_rect, right_rect); 1000 } 1001 1002 /** 1003 * - planes are enumerated in pipe-priority order such that 1004 * planes with lower drm_id must be left-most in a shared 1005 * blend-stage when using source split. 1006 * - planes in source split must be contiguous in width 1007 * - planes in source split must have same dest yoff and height 1008 */ 1009 if (right_pid < left_pid) { 1010 DPU_ERROR( 1011 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", 1012 stage, left_pid, right_pid); 1013 rc = -EINVAL; 1014 goto end; 1015 } else if (right_rect.x1 != drm_rect_width(&left_rect)) { 1016 DPU_ERROR("non-contiguous coordinates for src split. " 1017 "stage: %d left: " DRM_RECT_FMT " right: " 1018 DRM_RECT_FMT "\n", stage, 1019 DRM_RECT_ARG(&left_rect), 1020 DRM_RECT_ARG(&right_rect)); 1021 rc = -EINVAL; 1022 goto end; 1023 } else if (left_rect.y1 != right_rect.y1 || 1024 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) { 1025 DPU_ERROR("source split at stage: %d. invalid " 1026 "yoff/height: left: " DRM_RECT_FMT " right: " 1027 DRM_RECT_FMT "\n", stage, 1028 DRM_RECT_ARG(&left_rect), 1029 DRM_RECT_ARG(&right_rect)); 1030 rc = -EINVAL; 1031 goto end; 1032 } 1033 } 1034 1035 end: 1036 kfree(pstates); 1037 return rc; 1038 } 1039 1040 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1041 { 1042 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1043 struct drm_encoder *enc; 1044 1045 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1046 1047 /* 1048 * Normally we would iterate through encoder_mask in crtc state to find 1049 * attached encoders. In this case, we might be disabling vblank _after_ 1050 * encoder_mask has been cleared. 1051 * 1052 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1053 * disable (which is also after encoder_mask is cleared). So instead of 1054 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1055 * currently assigned to our crtc. 1056 * 1057 * Note also that this function cannot be called while crtc is disabled 1058 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1059 * about the assigned crtcs being inconsistent with the current state 1060 * (which means no need to worry about modeset locks). 1061 */ 1062 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1063 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1064 dpu_crtc); 1065 1066 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1067 } 1068 1069 return 0; 1070 } 1071 1072 #ifdef CONFIG_DEBUG_FS 1073 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1074 { 1075 struct dpu_crtc *dpu_crtc; 1076 struct dpu_plane_state *pstate = NULL; 1077 struct dpu_crtc_mixer *m; 1078 1079 struct drm_crtc *crtc; 1080 struct drm_plane *plane; 1081 struct drm_display_mode *mode; 1082 struct drm_framebuffer *fb; 1083 struct drm_plane_state *state; 1084 struct dpu_crtc_state *cstate; 1085 1086 int i, out_width; 1087 1088 dpu_crtc = s->private; 1089 crtc = &dpu_crtc->base; 1090 1091 drm_modeset_lock_all(crtc->dev); 1092 cstate = to_dpu_crtc_state(crtc->state); 1093 1094 mode = &crtc->state->adjusted_mode; 1095 out_width = mode->hdisplay / cstate->num_mixers; 1096 1097 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1098 mode->hdisplay, mode->vdisplay); 1099 1100 seq_puts(s, "\n"); 1101 1102 for (i = 0; i < cstate->num_mixers; ++i) { 1103 m = &cstate->mixers[i]; 1104 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1105 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1106 out_width, mode->vdisplay); 1107 } 1108 1109 seq_puts(s, "\n"); 1110 1111 drm_atomic_crtc_for_each_plane(plane, crtc) { 1112 pstate = to_dpu_plane_state(plane->state); 1113 state = plane->state; 1114 1115 if (!pstate || !state) 1116 continue; 1117 1118 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1119 pstate->stage); 1120 1121 if (plane->state->fb) { 1122 fb = plane->state->fb; 1123 1124 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1125 fb->base.id, (char *) &fb->format->format, 1126 fb->width, fb->height); 1127 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1128 seq_printf(s, "cpp[%d]:%u ", 1129 i, fb->format->cpp[i]); 1130 seq_puts(s, "\n\t"); 1131 1132 seq_printf(s, "modifier:%8llu ", fb->modifier); 1133 seq_puts(s, "\n"); 1134 1135 seq_puts(s, "\t"); 1136 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1137 seq_printf(s, "pitches[%d]:%8u ", i, 1138 fb->pitches[i]); 1139 seq_puts(s, "\n"); 1140 1141 seq_puts(s, "\t"); 1142 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1143 seq_printf(s, "offsets[%d]:%8u ", i, 1144 fb->offsets[i]); 1145 seq_puts(s, "\n"); 1146 } 1147 1148 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1149 state->src_x, state->src_y, state->src_w, state->src_h); 1150 1151 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1152 state->crtc_x, state->crtc_y, state->crtc_w, 1153 state->crtc_h); 1154 seq_printf(s, "\tmultirect: mode: %d index: %d\n", 1155 pstate->multirect_mode, pstate->multirect_index); 1156 1157 seq_puts(s, "\n"); 1158 } 1159 if (dpu_crtc->vblank_cb_count) { 1160 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1161 s64 diff_ms = ktime_to_ms(diff); 1162 s64 fps = diff_ms ? div_s64( 1163 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1164 1165 seq_printf(s, 1166 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1167 fps, dpu_crtc->vblank_cb_count, 1168 ktime_to_ms(diff), dpu_crtc->play_count); 1169 1170 /* reset time & count for next measurement */ 1171 dpu_crtc->vblank_cb_count = 0; 1172 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1173 } 1174 1175 drm_modeset_unlock_all(crtc->dev); 1176 1177 return 0; 1178 } 1179 1180 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); 1181 1182 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1183 { 1184 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1185 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1186 1187 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1188 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1189 seq_printf(s, "core_clk_rate: %llu\n", 1190 dpu_crtc->cur_perf.core_clk_rate); 1191 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); 1192 seq_printf(s, "max_per_pipe_ib: %llu\n", 1193 dpu_crtc->cur_perf.max_per_pipe_ib); 1194 1195 return 0; 1196 } 1197 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); 1198 1199 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1200 { 1201 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1202 1203 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, 1204 crtc->dev->primary->debugfs_root); 1205 1206 debugfs_create_file("status", 0400, 1207 dpu_crtc->debugfs_root, 1208 dpu_crtc, &_dpu_debugfs_status_fops); 1209 debugfs_create_file("state", 0600, 1210 dpu_crtc->debugfs_root, 1211 &dpu_crtc->base, 1212 &dpu_crtc_debugfs_state_fops); 1213 1214 return 0; 1215 } 1216 #else 1217 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1218 { 1219 return 0; 1220 } 1221 #endif /* CONFIG_DEBUG_FS */ 1222 1223 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1224 { 1225 return _dpu_crtc_init_debugfs(crtc); 1226 } 1227 1228 static void dpu_crtc_early_unregister(struct drm_crtc *crtc) 1229 { 1230 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1231 1232 debugfs_remove_recursive(dpu_crtc->debugfs_root); 1233 } 1234 1235 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1236 .set_config = drm_atomic_helper_set_config, 1237 .destroy = dpu_crtc_destroy, 1238 .page_flip = drm_atomic_helper_page_flip, 1239 .reset = dpu_crtc_reset, 1240 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1241 .atomic_destroy_state = dpu_crtc_destroy_state, 1242 .late_register = dpu_crtc_late_register, 1243 .early_unregister = dpu_crtc_early_unregister, 1244 .enable_vblank = msm_crtc_enable_vblank, 1245 .disable_vblank = msm_crtc_disable_vblank, 1246 }; 1247 1248 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1249 .atomic_disable = dpu_crtc_disable, 1250 .atomic_enable = dpu_crtc_enable, 1251 .atomic_check = dpu_crtc_atomic_check, 1252 .atomic_begin = dpu_crtc_atomic_begin, 1253 .atomic_flush = dpu_crtc_atomic_flush, 1254 }; 1255 1256 /* initialize crtc */ 1257 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1258 struct drm_plane *cursor) 1259 { 1260 struct drm_crtc *crtc = NULL; 1261 struct dpu_crtc *dpu_crtc = NULL; 1262 int i; 1263 1264 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1265 if (!dpu_crtc) 1266 return ERR_PTR(-ENOMEM); 1267 1268 crtc = &dpu_crtc->base; 1269 crtc->dev = dev; 1270 1271 spin_lock_init(&dpu_crtc->spin_lock); 1272 atomic_set(&dpu_crtc->frame_pending, 0); 1273 1274 init_completion(&dpu_crtc->frame_done_comp); 1275 1276 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1277 1278 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1279 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1280 list_add(&dpu_crtc->frame_events[i].list, 1281 &dpu_crtc->frame_event_list); 1282 kthread_init_work(&dpu_crtc->frame_events[i].work, 1283 dpu_crtc_frame_event_work); 1284 } 1285 1286 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, 1287 NULL); 1288 1289 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1290 1291 drm_crtc_enable_color_mgmt(crtc, 0, true, 0); 1292 1293 /* save user friendly CRTC name for later */ 1294 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1295 1296 /* initialize event handling */ 1297 spin_lock_init(&dpu_crtc->event_lock); 1298 1299 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name); 1300 return crtc; 1301 } 1302