1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 20 #include <linux/sort.h> 21 #include <linux/debugfs.h> 22 #include <linux/ktime.h> 23 #include <drm/drm_crtc.h> 24 #include <drm/drm_flip_work.h> 25 #include <drm/drm_mode.h> 26 #include <drm/drm_probe_helper.h> 27 #include <drm/drm_rect.h> 28 29 #include "dpu_kms.h" 30 #include "dpu_hw_lm.h" 31 #include "dpu_hw_ctl.h" 32 #include "dpu_crtc.h" 33 #include "dpu_plane.h" 34 #include "dpu_encoder.h" 35 #include "dpu_vbif.h" 36 #include "dpu_core_perf.h" 37 #include "dpu_trace.h" 38 39 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0 40 #define DPU_DRM_BLEND_OP_OPAQUE 1 41 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2 42 #define DPU_DRM_BLEND_OP_COVERAGE 3 43 #define DPU_DRM_BLEND_OP_MAX 4 44 45 /* layer mixer index on dpu_crtc */ 46 #define LEFT_MIXER 0 47 #define RIGHT_MIXER 1 48 49 /* timeout in ms waiting for frame done */ 50 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 51 52 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 53 { 54 struct msm_drm_private *priv = crtc->dev->dev_private; 55 56 return to_dpu_kms(priv->kms); 57 } 58 59 static void dpu_crtc_destroy(struct drm_crtc *crtc) 60 { 61 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 62 63 DPU_DEBUG("\n"); 64 65 if (!crtc) 66 return; 67 68 drm_crtc_cleanup(crtc); 69 kfree(dpu_crtc); 70 } 71 72 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 73 struct dpu_plane_state *pstate, struct dpu_format *format) 74 { 75 struct dpu_hw_mixer *lm = mixer->hw_lm; 76 uint32_t blend_op; 77 struct drm_format_name_buf format_name; 78 79 /* default to opaque blending */ 80 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 81 DPU_BLEND_BG_ALPHA_BG_CONST; 82 83 if (format->alpha_enable) { 84 /* coverage blending */ 85 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 86 DPU_BLEND_BG_ALPHA_FG_PIXEL | 87 DPU_BLEND_BG_INV_ALPHA; 88 } 89 90 lm->ops.setup_blend_config(lm, pstate->stage, 91 0xFF, 0, blend_op); 92 93 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n", 94 drm_get_format_name(format->base.pixel_format, &format_name), 95 format->alpha_enable, blend_op); 96 } 97 98 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 99 { 100 struct dpu_crtc *dpu_crtc; 101 struct dpu_crtc_state *crtc_state; 102 int lm_idx, lm_horiz_position; 103 104 dpu_crtc = to_dpu_crtc(crtc); 105 crtc_state = to_dpu_crtc_state(crtc->state); 106 107 lm_horiz_position = 0; 108 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 109 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 110 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 111 struct dpu_hw_mixer_cfg cfg; 112 113 if (!lm_roi || !drm_rect_visible(lm_roi)) 114 continue; 115 116 cfg.out_width = drm_rect_width(lm_roi); 117 cfg.out_height = drm_rect_height(lm_roi); 118 cfg.right_mixer = lm_horiz_position++; 119 cfg.flags = 0; 120 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 121 } 122 } 123 124 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 125 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer) 126 { 127 struct drm_plane *plane; 128 struct drm_framebuffer *fb; 129 struct drm_plane_state *state; 130 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 131 struct dpu_plane_state *pstate = NULL; 132 struct dpu_format *format; 133 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 134 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg; 135 136 u32 flush_mask; 137 uint32_t stage_idx, lm_idx; 138 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; 139 bool bg_alpha_enable = false; 140 141 drm_atomic_crtc_for_each_plane(plane, crtc) { 142 state = plane->state; 143 if (!state) 144 continue; 145 146 pstate = to_dpu_plane_state(state); 147 fb = state->fb; 148 149 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask); 150 151 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n", 152 crtc->base.id, 153 pstate->stage, 154 plane->base.id, 155 dpu_plane_pipe(plane) - SSPP_VIG0, 156 state->fb ? state->fb->base.id : -1); 157 158 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); 159 160 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 161 bg_alpha_enable = true; 162 163 stage_idx = zpos_cnt[pstate->stage]++; 164 stage_cfg->stage[pstate->stage][stage_idx] = 165 dpu_plane_pipe(plane); 166 stage_cfg->multirect_index[pstate->stage][stage_idx] = 167 pstate->multirect_index; 168 169 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 170 state, pstate, stage_idx, 171 dpu_plane_pipe(plane) - SSPP_VIG0, 172 format->base.pixel_format, 173 fb ? fb->modifier : 0); 174 175 /* blend config update */ 176 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 177 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, 178 pstate, format); 179 180 mixer[lm_idx].flush_mask |= flush_mask; 181 182 if (bg_alpha_enable && !format->alpha_enable) 183 mixer[lm_idx].mixer_op_mode = 0; 184 else 185 mixer[lm_idx].mixer_op_mode |= 186 1 << pstate->stage; 187 } 188 } 189 190 _dpu_crtc_program_lm_output_roi(crtc); 191 } 192 193 /** 194 * _dpu_crtc_blend_setup - configure crtc mixers 195 * @crtc: Pointer to drm crtc structure 196 */ 197 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 198 { 199 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 200 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 201 struct dpu_crtc_mixer *mixer = cstate->mixers; 202 struct dpu_hw_ctl *ctl; 203 struct dpu_hw_mixer *lm; 204 int i; 205 206 DPU_DEBUG("%s\n", dpu_crtc->name); 207 208 for (i = 0; i < cstate->num_mixers; i++) { 209 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) { 210 DPU_ERROR("invalid lm or ctl assigned to mixer\n"); 211 return; 212 } 213 mixer[i].mixer_op_mode = 0; 214 mixer[i].flush_mask = 0; 215 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 216 mixer[i].lm_ctl->ops.clear_all_blendstages( 217 mixer[i].lm_ctl); 218 } 219 220 /* initialize stage cfg */ 221 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 222 223 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer); 224 225 for (i = 0; i < cstate->num_mixers; i++) { 226 ctl = mixer[i].lm_ctl; 227 lm = mixer[i].hw_lm; 228 229 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 230 231 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, 232 mixer[i].hw_lm->idx); 233 234 /* stage config flush mask */ 235 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 236 237 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", 238 mixer[i].hw_lm->idx - LM_0, 239 mixer[i].mixer_op_mode, 240 ctl->idx - CTL_0, 241 mixer[i].flush_mask); 242 243 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 244 &dpu_crtc->stage_cfg); 245 } 246 } 247 248 /** 249 * _dpu_crtc_complete_flip - signal pending page_flip events 250 * Any pending vblank events are added to the vblank_event_list 251 * so that the next vblank interrupt shall signal them. 252 * However PAGE_FLIP events are not handled through the vblank_event_list. 253 * This API signals any pending PAGE_FLIP events requested through 254 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 255 * @crtc: Pointer to drm crtc structure 256 */ 257 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 258 { 259 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 260 struct drm_device *dev = crtc->dev; 261 unsigned long flags; 262 263 spin_lock_irqsave(&dev->event_lock, flags); 264 if (dpu_crtc->event) { 265 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, 266 dpu_crtc->event); 267 trace_dpu_crtc_complete_flip(DRMID(crtc)); 268 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 269 dpu_crtc->event = NULL; 270 } 271 spin_unlock_irqrestore(&dev->event_lock, flags); 272 } 273 274 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 275 { 276 struct drm_encoder *encoder; 277 278 if (!crtc || !crtc->dev) { 279 DPU_ERROR("invalid crtc\n"); 280 return INTF_MODE_NONE; 281 } 282 283 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 284 285 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 286 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 287 return dpu_encoder_get_intf_mode(encoder); 288 289 return INTF_MODE_NONE; 290 } 291 292 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 293 { 294 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 295 296 /* keep statistics on vblank callback - with auto reset via debugfs */ 297 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 298 dpu_crtc->vblank_cb_time = ktime_get(); 299 else 300 dpu_crtc->vblank_cb_count++; 301 _dpu_crtc_complete_flip(crtc); 302 drm_crtc_handle_vblank(crtc); 303 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 304 } 305 306 static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc) 307 { 308 int ret = 0; 309 struct drm_modeset_acquire_ctx ctx; 310 311 DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret); 312 dpu_core_perf_crtc_release_bw(crtc); 313 DRM_MODESET_LOCK_ALL_END(ctx, ret); 314 if (ret) 315 DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n", 316 ret); 317 } 318 319 static void dpu_crtc_frame_event_work(struct kthread_work *work) 320 { 321 struct dpu_crtc_frame_event *fevent = container_of(work, 322 struct dpu_crtc_frame_event, work); 323 struct drm_crtc *crtc = fevent->crtc; 324 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 325 unsigned long flags; 326 bool frame_done = false; 327 328 DPU_ATRACE_BEGIN("crtc_frame_event"); 329 330 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 331 ktime_to_ns(fevent->ts)); 332 333 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 334 | DPU_ENCODER_FRAME_EVENT_ERROR 335 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 336 337 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 338 /* this should not happen */ 339 DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n", 340 crtc->base.id, 341 fevent->event, 342 ktime_to_ns(fevent->ts), 343 atomic_read(&dpu_crtc->frame_pending)); 344 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 345 /* release bandwidth and other resources */ 346 trace_dpu_crtc_frame_event_done(DRMID(crtc), 347 fevent->event); 348 dpu_crtc_release_bw_unlocked(crtc); 349 } else { 350 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 351 fevent->event); 352 } 353 354 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE) 355 dpu_core_perf_crtc_update(crtc, 0, false); 356 357 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 358 | DPU_ENCODER_FRAME_EVENT_ERROR)) 359 frame_done = true; 360 } 361 362 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 363 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 364 crtc->base.id, ktime_to_ns(fevent->ts)); 365 366 if (frame_done) 367 complete_all(&dpu_crtc->frame_done_comp); 368 369 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 370 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 371 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 372 DPU_ATRACE_END("crtc_frame_event"); 373 } 374 375 /* 376 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module 377 * registers this API to encoder for all frame event callbacks like 378 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events 379 * from different context - IRQ, user thread, commit_thread, etc. Each event 380 * should be carefully reviewed and should be processed in proper task context 381 * to avoid schedulin delay or properly manage the irq context's bottom half 382 * processing. 383 */ 384 static void dpu_crtc_frame_event_cb(void *data, u32 event) 385 { 386 struct drm_crtc *crtc = (struct drm_crtc *)data; 387 struct dpu_crtc *dpu_crtc; 388 struct msm_drm_private *priv; 389 struct dpu_crtc_frame_event *fevent; 390 unsigned long flags; 391 u32 crtc_id; 392 393 /* Nothing to do on idle event */ 394 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 395 return; 396 397 dpu_crtc = to_dpu_crtc(crtc); 398 priv = crtc->dev->dev_private; 399 crtc_id = drm_crtc_index(crtc); 400 401 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 402 403 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 404 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 405 struct dpu_crtc_frame_event, list); 406 if (fevent) 407 list_del_init(&fevent->list); 408 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 409 410 if (!fevent) { 411 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); 412 return; 413 } 414 415 fevent->event = event; 416 fevent->crtc = crtc; 417 fevent->ts = ktime_get(); 418 kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); 419 } 420 421 void dpu_crtc_complete_commit(struct drm_crtc *crtc, 422 struct drm_crtc_state *old_state) 423 { 424 if (!crtc || !crtc->state) { 425 DPU_ERROR("invalid crtc\n"); 426 return; 427 } 428 trace_dpu_crtc_complete_commit(DRMID(crtc)); 429 } 430 431 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, 432 struct drm_crtc_state *state) 433 { 434 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 435 struct drm_display_mode *adj_mode = &state->adjusted_mode; 436 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 437 int i; 438 439 for (i = 0; i < cstate->num_mixers; i++) { 440 struct drm_rect *r = &cstate->lm_bounds[i]; 441 r->x1 = crtc_split_width * i; 442 r->y1 = 0; 443 r->x2 = r->x1 + crtc_split_width; 444 r->y2 = adj_mode->vdisplay; 445 446 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 447 } 448 449 drm_mode_debug_printmodeline(adj_mode); 450 } 451 452 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 453 struct drm_crtc_state *old_state) 454 { 455 struct dpu_crtc *dpu_crtc; 456 struct dpu_crtc_state *cstate; 457 struct drm_encoder *encoder; 458 struct drm_device *dev; 459 unsigned long flags; 460 struct dpu_crtc_smmu_state_data *smmu_state; 461 462 if (!crtc) { 463 DPU_ERROR("invalid crtc\n"); 464 return; 465 } 466 467 if (!crtc->state->enable) { 468 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n", 469 crtc->base.id, crtc->state->enable); 470 return; 471 } 472 473 DPU_DEBUG("crtc%d\n", crtc->base.id); 474 475 dpu_crtc = to_dpu_crtc(crtc); 476 cstate = to_dpu_crtc_state(crtc->state); 477 dev = crtc->dev; 478 smmu_state = &dpu_crtc->smmu_state; 479 480 _dpu_crtc_setup_lm_bounds(crtc, crtc->state); 481 482 if (dpu_crtc->event) { 483 WARN_ON(dpu_crtc->event); 484 } else { 485 spin_lock_irqsave(&dev->event_lock, flags); 486 dpu_crtc->event = crtc->state->event; 487 crtc->state->event = NULL; 488 spin_unlock_irqrestore(&dev->event_lock, flags); 489 } 490 491 /* encoder will trigger pending mask now */ 492 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 493 dpu_encoder_trigger_kickoff_pending(encoder); 494 495 /* 496 * If no mixers have been allocated in dpu_crtc_atomic_check(), 497 * it means we are trying to flush a CRTC whose state is disabled: 498 * nothing else needs to be done. 499 */ 500 if (unlikely(!cstate->num_mixers)) 501 return; 502 503 _dpu_crtc_blend_setup(crtc); 504 505 /* 506 * PP_DONE irq is only used by command mode for now. 507 * It is better to request pending before FLUSH and START trigger 508 * to make sure no pp_done irq missed. 509 * This is safe because no pp_done will happen before SW trigger 510 * in command mode. 511 */ 512 } 513 514 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 515 struct drm_crtc_state *old_crtc_state) 516 { 517 struct dpu_crtc *dpu_crtc; 518 struct drm_device *dev; 519 struct drm_plane *plane; 520 struct msm_drm_private *priv; 521 struct msm_drm_thread *event_thread; 522 unsigned long flags; 523 struct dpu_crtc_state *cstate; 524 525 if (!crtc->state->enable) { 526 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n", 527 crtc->base.id, crtc->state->enable); 528 return; 529 } 530 531 DPU_DEBUG("crtc%d\n", crtc->base.id); 532 533 dpu_crtc = to_dpu_crtc(crtc); 534 cstate = to_dpu_crtc_state(crtc->state); 535 dev = crtc->dev; 536 priv = dev->dev_private; 537 538 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { 539 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 540 return; 541 } 542 543 event_thread = &priv->event_thread[crtc->index]; 544 545 if (dpu_crtc->event) { 546 DPU_DEBUG("already received dpu_crtc->event\n"); 547 } else { 548 spin_lock_irqsave(&dev->event_lock, flags); 549 dpu_crtc->event = crtc->state->event; 550 crtc->state->event = NULL; 551 spin_unlock_irqrestore(&dev->event_lock, flags); 552 } 553 554 /* 555 * If no mixers has been allocated in dpu_crtc_atomic_check(), 556 * it means we are trying to flush a CRTC whose state is disabled: 557 * nothing else needs to be done. 558 */ 559 if (unlikely(!cstate->num_mixers)) 560 return; 561 562 /* 563 * For planes without commit update, drm framework will not add 564 * those planes to current state since hardware update is not 565 * required. However, if those planes were power collapsed since 566 * last commit cycle, driver has to restore the hardware state 567 * of those planes explicitly here prior to plane flush. 568 */ 569 drm_atomic_crtc_for_each_plane(plane, crtc) 570 dpu_plane_restore(plane); 571 572 /* update performance setting before crtc kickoff */ 573 dpu_core_perf_crtc_update(crtc, 1, false); 574 575 /* 576 * Final plane updates: Give each plane a chance to complete all 577 * required writes/flushing before crtc's "flush 578 * everything" call below. 579 */ 580 drm_atomic_crtc_for_each_plane(plane, crtc) { 581 if (dpu_crtc->smmu_state.transition_error) 582 dpu_plane_set_error(plane, true); 583 dpu_plane_flush(plane); 584 } 585 586 /* Kickoff will be scheduled by outer layer */ 587 } 588 589 /** 590 * dpu_crtc_destroy_state - state destroy hook 591 * @crtc: drm CRTC 592 * @state: CRTC state object to release 593 */ 594 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 595 struct drm_crtc_state *state) 596 { 597 struct dpu_crtc *dpu_crtc; 598 struct dpu_crtc_state *cstate; 599 600 if (!crtc || !state) { 601 DPU_ERROR("invalid argument(s)\n"); 602 return; 603 } 604 605 dpu_crtc = to_dpu_crtc(crtc); 606 cstate = to_dpu_crtc_state(state); 607 608 DPU_DEBUG("crtc%d\n", crtc->base.id); 609 610 __drm_atomic_helper_crtc_destroy_state(state); 611 612 kfree(cstate); 613 } 614 615 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 616 { 617 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 618 int ret, rc = 0; 619 620 if (!atomic_read(&dpu_crtc->frame_pending)) { 621 DPU_DEBUG("no frames pending\n"); 622 return 0; 623 } 624 625 DPU_ATRACE_BEGIN("frame done completion wait"); 626 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 627 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 628 if (!ret) { 629 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 630 rc = -ETIMEDOUT; 631 } 632 DPU_ATRACE_END("frame done completion wait"); 633 634 return rc; 635 } 636 637 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async) 638 { 639 struct drm_encoder *encoder; 640 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 641 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 642 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 643 int ret; 644 645 /* 646 * If no mixers has been allocated in dpu_crtc_atomic_check(), 647 * it means we are trying to start a CRTC whose state is disabled: 648 * nothing else needs to be done. 649 */ 650 if (unlikely(!cstate->num_mixers)) 651 return; 652 653 DPU_ATRACE_BEGIN("crtc_commit"); 654 655 /* 656 * Encoder will flush/start now, unless it has a tx pending. If so, it 657 * may delay and flush at an irq event (e.g. ppdone) 658 */ 659 drm_for_each_encoder_mask(encoder, crtc->dev, 660 crtc->state->encoder_mask) 661 dpu_encoder_prepare_for_kickoff(encoder, async); 662 663 if (!async) { 664 /* wait for frame_event_done completion */ 665 DPU_ATRACE_BEGIN("wait_for_frame_done_event"); 666 ret = _dpu_crtc_wait_for_frame_done(crtc); 667 DPU_ATRACE_END("wait_for_frame_done_event"); 668 if (ret) { 669 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 670 crtc->base.id, 671 atomic_read(&dpu_crtc->frame_pending)); 672 goto end; 673 } 674 675 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 676 /* acquire bandwidth and other resources */ 677 DPU_DEBUG("crtc%d first commit\n", crtc->base.id); 678 } else 679 DPU_DEBUG("crtc%d commit\n", crtc->base.id); 680 681 dpu_crtc->play_count++; 682 } 683 684 dpu_vbif_clear_errors(dpu_kms); 685 686 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 687 dpu_encoder_kickoff(encoder, async); 688 689 end: 690 if (!async) 691 reinit_completion(&dpu_crtc->frame_done_comp); 692 DPU_ATRACE_END("crtc_commit"); 693 } 694 695 static void dpu_crtc_reset(struct drm_crtc *crtc) 696 { 697 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 698 699 if (crtc->state) 700 dpu_crtc_destroy_state(crtc, crtc->state); 701 702 __drm_atomic_helper_crtc_reset(crtc, &cstate->base); 703 } 704 705 /** 706 * dpu_crtc_duplicate_state - state duplicate hook 707 * @crtc: Pointer to drm crtc structure 708 * @Returns: Pointer to new drm_crtc_state structure 709 */ 710 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 711 { 712 struct dpu_crtc *dpu_crtc; 713 struct dpu_crtc_state *cstate, *old_cstate; 714 715 if (!crtc || !crtc->state) { 716 DPU_ERROR("invalid argument(s)\n"); 717 return NULL; 718 } 719 720 dpu_crtc = to_dpu_crtc(crtc); 721 old_cstate = to_dpu_crtc_state(crtc->state); 722 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 723 if (!cstate) { 724 DPU_ERROR("failed to allocate state\n"); 725 return NULL; 726 } 727 728 /* duplicate base helper */ 729 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 730 731 return &cstate->base; 732 } 733 734 static void dpu_crtc_disable(struct drm_crtc *crtc, 735 struct drm_crtc_state *old_crtc_state) 736 { 737 struct dpu_crtc *dpu_crtc; 738 struct dpu_crtc_state *cstate; 739 struct drm_display_mode *mode; 740 struct drm_encoder *encoder; 741 struct msm_drm_private *priv; 742 unsigned long flags; 743 744 if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { 745 DPU_ERROR("invalid crtc\n"); 746 return; 747 } 748 dpu_crtc = to_dpu_crtc(crtc); 749 cstate = to_dpu_crtc_state(crtc->state); 750 mode = &cstate->base.adjusted_mode; 751 priv = crtc->dev->dev_private; 752 753 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 754 755 /* Disable/save vblank irq handling */ 756 drm_crtc_vblank_off(crtc); 757 758 drm_for_each_encoder_mask(encoder, crtc->dev, 759 old_crtc_state->encoder_mask) 760 dpu_encoder_assign_crtc(encoder, NULL); 761 762 /* wait for frame_event_done completion */ 763 if (_dpu_crtc_wait_for_frame_done(crtc)) 764 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 765 crtc->base.id, 766 atomic_read(&dpu_crtc->frame_pending)); 767 768 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 769 dpu_crtc->enabled = false; 770 771 if (atomic_read(&dpu_crtc->frame_pending)) { 772 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 773 atomic_read(&dpu_crtc->frame_pending)); 774 dpu_core_perf_crtc_release_bw(crtc); 775 atomic_set(&dpu_crtc->frame_pending, 0); 776 } 777 778 dpu_core_perf_crtc_update(crtc, 0, true); 779 780 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 781 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); 782 783 memset(cstate->mixers, 0, sizeof(cstate->mixers)); 784 cstate->num_mixers = 0; 785 786 /* disable clk & bw control until clk & bw properties are set */ 787 cstate->bw_control = false; 788 cstate->bw_split_vote = false; 789 790 if (crtc->state->event && !crtc->state->active) { 791 spin_lock_irqsave(&crtc->dev->event_lock, flags); 792 drm_crtc_send_vblank_event(crtc, crtc->state->event); 793 crtc->state->event = NULL; 794 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 795 } 796 797 pm_runtime_put_sync(crtc->dev->dev); 798 } 799 800 static void dpu_crtc_enable(struct drm_crtc *crtc, 801 struct drm_crtc_state *old_crtc_state) 802 { 803 struct dpu_crtc *dpu_crtc; 804 struct drm_encoder *encoder; 805 struct msm_drm_private *priv; 806 807 if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 808 DPU_ERROR("invalid crtc\n"); 809 return; 810 } 811 priv = crtc->dev->dev_private; 812 813 pm_runtime_get_sync(crtc->dev->dev); 814 815 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 816 dpu_crtc = to_dpu_crtc(crtc); 817 818 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 819 dpu_encoder_register_frame_event_callback(encoder, 820 dpu_crtc_frame_event_cb, (void *)crtc); 821 822 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 823 dpu_crtc->enabled = true; 824 825 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 826 dpu_encoder_assign_crtc(encoder, crtc); 827 828 /* Enable/restore vblank irq handling */ 829 drm_crtc_vblank_on(crtc); 830 } 831 832 struct plane_state { 833 struct dpu_plane_state *dpu_pstate; 834 const struct drm_plane_state *drm_pstate; 835 int stage; 836 u32 pipe_id; 837 }; 838 839 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 840 struct drm_crtc_state *state) 841 { 842 struct dpu_crtc *dpu_crtc; 843 struct plane_state *pstates; 844 struct dpu_crtc_state *cstate; 845 846 const struct drm_plane_state *pstate; 847 struct drm_plane *plane; 848 struct drm_display_mode *mode; 849 850 int cnt = 0, rc = 0, mixer_width, i, z_pos; 851 852 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; 853 int multirect_count = 0; 854 const struct drm_plane_state *pipe_staged[SSPP_MAX]; 855 int left_zpos_cnt = 0, right_zpos_cnt = 0; 856 struct drm_rect crtc_rect = { 0 }; 857 858 if (!crtc) { 859 DPU_ERROR("invalid crtc\n"); 860 return -EINVAL; 861 } 862 863 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); 864 865 dpu_crtc = to_dpu_crtc(crtc); 866 cstate = to_dpu_crtc_state(state); 867 868 if (!state->enable || !state->active) { 869 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n", 870 crtc->base.id, state->enable, state->active); 871 goto end; 872 } 873 874 mode = &state->adjusted_mode; 875 DPU_DEBUG("%s: check", dpu_crtc->name); 876 877 /* force a full mode set if active state changed */ 878 if (state->active_changed) 879 state->mode_changed = true; 880 881 memset(pipe_staged, 0, sizeof(pipe_staged)); 882 883 mixer_width = mode->hdisplay / cstate->num_mixers; 884 885 _dpu_crtc_setup_lm_bounds(crtc, state); 886 887 crtc_rect.x2 = mode->hdisplay; 888 crtc_rect.y2 = mode->vdisplay; 889 890 /* get plane state for all drm planes associated with crtc state */ 891 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { 892 struct drm_rect dst, clip = crtc_rect; 893 894 if (IS_ERR_OR_NULL(pstate)) { 895 rc = PTR_ERR(pstate); 896 DPU_ERROR("%s: failed to get plane%d state, %d\n", 897 dpu_crtc->name, plane->base.id, rc); 898 goto end; 899 } 900 if (cnt >= DPU_STAGE_MAX * 4) 901 continue; 902 903 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate); 904 pstates[cnt].drm_pstate = pstate; 905 pstates[cnt].stage = pstate->normalized_zpos; 906 pstates[cnt].pipe_id = dpu_plane_pipe(plane); 907 908 if (pipe_staged[pstates[cnt].pipe_id]) { 909 multirect_plane[multirect_count].r0 = 910 pipe_staged[pstates[cnt].pipe_id]; 911 multirect_plane[multirect_count].r1 = pstate; 912 multirect_count++; 913 914 pipe_staged[pstates[cnt].pipe_id] = NULL; 915 } else { 916 pipe_staged[pstates[cnt].pipe_id] = pstate; 917 } 918 919 cnt++; 920 921 dst = drm_plane_state_dest(pstate); 922 if (!drm_rect_intersect(&clip, &dst)) { 923 DPU_ERROR("invalid vertical/horizontal destination\n"); 924 DPU_ERROR("display: " DRM_RECT_FMT " plane: " 925 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), 926 DRM_RECT_ARG(&dst)); 927 rc = -E2BIG; 928 goto end; 929 } 930 } 931 932 for (i = 1; i < SSPP_MAX; i++) { 933 if (pipe_staged[i]) { 934 dpu_plane_clear_multirect(pipe_staged[i]); 935 936 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) { 937 DPU_ERROR( 938 "r1 only virt plane:%d not supported\n", 939 pipe_staged[i]->plane->base.id); 940 rc = -EINVAL; 941 goto end; 942 } 943 } 944 } 945 946 z_pos = -1; 947 for (i = 0; i < cnt; i++) { 948 /* reset counts at every new blend stage */ 949 if (pstates[i].stage != z_pos) { 950 left_zpos_cnt = 0; 951 right_zpos_cnt = 0; 952 z_pos = pstates[i].stage; 953 } 954 955 /* verify z_pos setting before using it */ 956 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) { 957 DPU_ERROR("> %d plane stages assigned\n", 958 DPU_STAGE_MAX - DPU_STAGE_0); 959 rc = -EINVAL; 960 goto end; 961 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { 962 if (left_zpos_cnt == 2) { 963 DPU_ERROR("> 2 planes @ stage %d on left\n", 964 z_pos); 965 rc = -EINVAL; 966 goto end; 967 } 968 left_zpos_cnt++; 969 970 } else { 971 if (right_zpos_cnt == 2) { 972 DPU_ERROR("> 2 planes @ stage %d on right\n", 973 z_pos); 974 rc = -EINVAL; 975 goto end; 976 } 977 right_zpos_cnt++; 978 } 979 980 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; 981 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos); 982 } 983 984 for (i = 0; i < multirect_count; i++) { 985 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) { 986 DPU_ERROR( 987 "multirect validation failed for planes (%d - %d)\n", 988 multirect_plane[i].r0->plane->base.id, 989 multirect_plane[i].r1->plane->base.id); 990 rc = -EINVAL; 991 goto end; 992 } 993 } 994 995 rc = dpu_core_perf_crtc_check(crtc, state); 996 if (rc) { 997 DPU_ERROR("crtc%d failed performance check %d\n", 998 crtc->base.id, rc); 999 goto end; 1000 } 1001 1002 /* validate source split: 1003 * use pstates sorted by stage to check planes on same stage 1004 * we assume that all pipes are in source split so its valid to compare 1005 * without taking into account left/right mixer placement 1006 */ 1007 for (i = 1; i < cnt; i++) { 1008 struct plane_state *prv_pstate, *cur_pstate; 1009 struct drm_rect left_rect, right_rect; 1010 int32_t left_pid, right_pid; 1011 int32_t stage; 1012 1013 prv_pstate = &pstates[i - 1]; 1014 cur_pstate = &pstates[i]; 1015 if (prv_pstate->stage != cur_pstate->stage) 1016 continue; 1017 1018 stage = cur_pstate->stage; 1019 1020 left_pid = prv_pstate->dpu_pstate->base.plane->base.id; 1021 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate); 1022 1023 right_pid = cur_pstate->dpu_pstate->base.plane->base.id; 1024 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate); 1025 1026 if (right_rect.x1 < left_rect.x1) { 1027 swap(left_pid, right_pid); 1028 swap(left_rect, right_rect); 1029 } 1030 1031 /** 1032 * - planes are enumerated in pipe-priority order such that 1033 * planes with lower drm_id must be left-most in a shared 1034 * blend-stage when using source split. 1035 * - planes in source split must be contiguous in width 1036 * - planes in source split must have same dest yoff and height 1037 */ 1038 if (right_pid < left_pid) { 1039 DPU_ERROR( 1040 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", 1041 stage, left_pid, right_pid); 1042 rc = -EINVAL; 1043 goto end; 1044 } else if (right_rect.x1 != drm_rect_width(&left_rect)) { 1045 DPU_ERROR("non-contiguous coordinates for src split. " 1046 "stage: %d left: " DRM_RECT_FMT " right: " 1047 DRM_RECT_FMT "\n", stage, 1048 DRM_RECT_ARG(&left_rect), 1049 DRM_RECT_ARG(&right_rect)); 1050 rc = -EINVAL; 1051 goto end; 1052 } else if (left_rect.y1 != right_rect.y1 || 1053 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) { 1054 DPU_ERROR("source split at stage: %d. invalid " 1055 "yoff/height: left: " DRM_RECT_FMT " right: " 1056 DRM_RECT_FMT "\n", stage, 1057 DRM_RECT_ARG(&left_rect), 1058 DRM_RECT_ARG(&right_rect)); 1059 rc = -EINVAL; 1060 goto end; 1061 } 1062 } 1063 1064 end: 1065 kfree(pstates); 1066 return rc; 1067 } 1068 1069 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1070 { 1071 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1072 struct drm_encoder *enc; 1073 1074 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1075 1076 /* 1077 * Normally we would iterate through encoder_mask in crtc state to find 1078 * attached encoders. In this case, we might be disabling vblank _after_ 1079 * encoder_mask has been cleared. 1080 * 1081 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1082 * disable (which is also after encoder_mask is cleared). So instead of 1083 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1084 * currently assigned to our crtc. 1085 * 1086 * Note also that this function cannot be called while crtc is disabled 1087 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1088 * about the assigned crtcs being inconsistent with the current state 1089 * (which means no need to worry about modeset locks). 1090 */ 1091 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1092 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1093 dpu_crtc); 1094 1095 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1096 } 1097 1098 return 0; 1099 } 1100 1101 #ifdef CONFIG_DEBUG_FS 1102 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1103 { 1104 struct dpu_crtc *dpu_crtc; 1105 struct dpu_plane_state *pstate = NULL; 1106 struct dpu_crtc_mixer *m; 1107 1108 struct drm_crtc *crtc; 1109 struct drm_plane *plane; 1110 struct drm_display_mode *mode; 1111 struct drm_framebuffer *fb; 1112 struct drm_plane_state *state; 1113 struct dpu_crtc_state *cstate; 1114 1115 int i, out_width; 1116 1117 dpu_crtc = s->private; 1118 crtc = &dpu_crtc->base; 1119 1120 drm_modeset_lock_all(crtc->dev); 1121 cstate = to_dpu_crtc_state(crtc->state); 1122 1123 mode = &crtc->state->adjusted_mode; 1124 out_width = mode->hdisplay / cstate->num_mixers; 1125 1126 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1127 mode->hdisplay, mode->vdisplay); 1128 1129 seq_puts(s, "\n"); 1130 1131 for (i = 0; i < cstate->num_mixers; ++i) { 1132 m = &cstate->mixers[i]; 1133 if (!m->hw_lm) 1134 seq_printf(s, "\tmixer[%d] has no lm\n", i); 1135 else if (!m->lm_ctl) 1136 seq_printf(s, "\tmixer[%d] has no ctl\n", i); 1137 else 1138 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1139 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1140 out_width, mode->vdisplay); 1141 } 1142 1143 seq_puts(s, "\n"); 1144 1145 drm_atomic_crtc_for_each_plane(plane, crtc) { 1146 pstate = to_dpu_plane_state(plane->state); 1147 state = plane->state; 1148 1149 if (!pstate || !state) 1150 continue; 1151 1152 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1153 pstate->stage); 1154 1155 if (plane->state->fb) { 1156 fb = plane->state->fb; 1157 1158 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1159 fb->base.id, (char *) &fb->format->format, 1160 fb->width, fb->height); 1161 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1162 seq_printf(s, "cpp[%d]:%u ", 1163 i, fb->format->cpp[i]); 1164 seq_puts(s, "\n\t"); 1165 1166 seq_printf(s, "modifier:%8llu ", fb->modifier); 1167 seq_puts(s, "\n"); 1168 1169 seq_puts(s, "\t"); 1170 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1171 seq_printf(s, "pitches[%d]:%8u ", i, 1172 fb->pitches[i]); 1173 seq_puts(s, "\n"); 1174 1175 seq_puts(s, "\t"); 1176 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1177 seq_printf(s, "offsets[%d]:%8u ", i, 1178 fb->offsets[i]); 1179 seq_puts(s, "\n"); 1180 } 1181 1182 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1183 state->src_x, state->src_y, state->src_w, state->src_h); 1184 1185 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1186 state->crtc_x, state->crtc_y, state->crtc_w, 1187 state->crtc_h); 1188 seq_printf(s, "\tmultirect: mode: %d index: %d\n", 1189 pstate->multirect_mode, pstate->multirect_index); 1190 1191 seq_puts(s, "\n"); 1192 } 1193 if (dpu_crtc->vblank_cb_count) { 1194 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1195 s64 diff_ms = ktime_to_ms(diff); 1196 s64 fps = diff_ms ? div_s64( 1197 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1198 1199 seq_printf(s, 1200 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1201 fps, dpu_crtc->vblank_cb_count, 1202 ktime_to_ms(diff), dpu_crtc->play_count); 1203 1204 /* reset time & count for next measurement */ 1205 dpu_crtc->vblank_cb_count = 0; 1206 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1207 } 1208 1209 drm_modeset_unlock_all(crtc->dev); 1210 1211 return 0; 1212 } 1213 1214 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file) 1215 { 1216 return single_open(file, _dpu_debugfs_status_show, inode->i_private); 1217 } 1218 1219 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ 1220 static int __prefix ## _open(struct inode *inode, struct file *file) \ 1221 { \ 1222 return single_open(file, __prefix ## _show, inode->i_private); \ 1223 } \ 1224 static const struct file_operations __prefix ## _fops = { \ 1225 .owner = THIS_MODULE, \ 1226 .open = __prefix ## _open, \ 1227 .release = single_release, \ 1228 .read = seq_read, \ 1229 .llseek = seq_lseek, \ 1230 } 1231 1232 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1233 { 1234 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1235 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1236 int i; 1237 1238 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1239 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1240 seq_printf(s, "core_clk_rate: %llu\n", 1241 dpu_crtc->cur_perf.core_clk_rate); 1242 for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC; 1243 i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 1244 seq_printf(s, "bw_ctl[%d]: %llu\n", i, 1245 dpu_crtc->cur_perf.bw_ctl[i]); 1246 seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i, 1247 dpu_crtc->cur_perf.max_per_pipe_ib[i]); 1248 } 1249 1250 return 0; 1251 } 1252 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state); 1253 1254 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1255 { 1256 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1257 1258 static const struct file_operations debugfs_status_fops = { 1259 .open = _dpu_debugfs_status_open, 1260 .read = seq_read, 1261 .llseek = seq_lseek, 1262 .release = single_release, 1263 }; 1264 1265 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, 1266 crtc->dev->primary->debugfs_root); 1267 if (!dpu_crtc->debugfs_root) 1268 return -ENOMEM; 1269 1270 /* don't error check these */ 1271 debugfs_create_file("status", 0400, 1272 dpu_crtc->debugfs_root, 1273 dpu_crtc, &debugfs_status_fops); 1274 debugfs_create_file("state", 0600, 1275 dpu_crtc->debugfs_root, 1276 &dpu_crtc->base, 1277 &dpu_crtc_debugfs_state_fops); 1278 1279 return 0; 1280 } 1281 #else 1282 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1283 { 1284 return 0; 1285 } 1286 #endif /* CONFIG_DEBUG_FS */ 1287 1288 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1289 { 1290 return _dpu_crtc_init_debugfs(crtc); 1291 } 1292 1293 static void dpu_crtc_early_unregister(struct drm_crtc *crtc) 1294 { 1295 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1296 1297 debugfs_remove_recursive(dpu_crtc->debugfs_root); 1298 } 1299 1300 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1301 .set_config = drm_atomic_helper_set_config, 1302 .destroy = dpu_crtc_destroy, 1303 .page_flip = drm_atomic_helper_page_flip, 1304 .reset = dpu_crtc_reset, 1305 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1306 .atomic_destroy_state = dpu_crtc_destroy_state, 1307 .late_register = dpu_crtc_late_register, 1308 .early_unregister = dpu_crtc_early_unregister, 1309 }; 1310 1311 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1312 .atomic_disable = dpu_crtc_disable, 1313 .atomic_enable = dpu_crtc_enable, 1314 .atomic_check = dpu_crtc_atomic_check, 1315 .atomic_begin = dpu_crtc_atomic_begin, 1316 .atomic_flush = dpu_crtc_atomic_flush, 1317 }; 1318 1319 /* initialize crtc */ 1320 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1321 struct drm_plane *cursor) 1322 { 1323 struct drm_crtc *crtc = NULL; 1324 struct dpu_crtc *dpu_crtc = NULL; 1325 struct msm_drm_private *priv = NULL; 1326 struct dpu_kms *kms = NULL; 1327 int i; 1328 1329 priv = dev->dev_private; 1330 kms = to_dpu_kms(priv->kms); 1331 1332 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1333 if (!dpu_crtc) 1334 return ERR_PTR(-ENOMEM); 1335 1336 crtc = &dpu_crtc->base; 1337 crtc->dev = dev; 1338 1339 spin_lock_init(&dpu_crtc->spin_lock); 1340 atomic_set(&dpu_crtc->frame_pending, 0); 1341 1342 init_completion(&dpu_crtc->frame_done_comp); 1343 1344 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1345 1346 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1347 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1348 list_add(&dpu_crtc->frame_events[i].list, 1349 &dpu_crtc->frame_event_list); 1350 kthread_init_work(&dpu_crtc->frame_events[i].work, 1351 dpu_crtc_frame_event_work); 1352 } 1353 1354 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, 1355 NULL); 1356 1357 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1358 1359 /* save user friendly CRTC name for later */ 1360 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1361 1362 /* initialize event handling */ 1363 spin_lock_init(&dpu_crtc->event_lock); 1364 1365 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name); 1366 return crtc; 1367 } 1368