1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 20 #include <linux/sort.h> 21 #include <linux/debugfs.h> 22 #include <linux/ktime.h> 23 #include <drm/drm_crtc.h> 24 #include <drm/drm_flip_work.h> 25 #include <drm/drm_mode.h> 26 #include <drm/drm_probe_helper.h> 27 #include <drm/drm_rect.h> 28 29 #include "dpu_kms.h" 30 #include "dpu_hw_lm.h" 31 #include "dpu_hw_ctl.h" 32 #include "dpu_crtc.h" 33 #include "dpu_plane.h" 34 #include "dpu_encoder.h" 35 #include "dpu_vbif.h" 36 #include "dpu_core_perf.h" 37 #include "dpu_trace.h" 38 39 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0 40 #define DPU_DRM_BLEND_OP_OPAQUE 1 41 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2 42 #define DPU_DRM_BLEND_OP_COVERAGE 3 43 #define DPU_DRM_BLEND_OP_MAX 4 44 45 /* layer mixer index on dpu_crtc */ 46 #define LEFT_MIXER 0 47 #define RIGHT_MIXER 1 48 49 /* timeout in ms waiting for frame done */ 50 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 51 52 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 53 { 54 struct msm_drm_private *priv = crtc->dev->dev_private; 55 56 return to_dpu_kms(priv->kms); 57 } 58 59 static void dpu_crtc_destroy(struct drm_crtc *crtc) 60 { 61 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 62 63 DPU_DEBUG("\n"); 64 65 if (!crtc) 66 return; 67 68 drm_crtc_cleanup(crtc); 69 kfree(dpu_crtc); 70 } 71 72 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 73 struct dpu_plane_state *pstate, struct dpu_format *format) 74 { 75 struct dpu_hw_mixer *lm = mixer->hw_lm; 76 uint32_t blend_op; 77 struct drm_format_name_buf format_name; 78 79 /* default to opaque blending */ 80 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | 81 DPU_BLEND_BG_ALPHA_BG_CONST; 82 83 if (format->alpha_enable) { 84 /* coverage blending */ 85 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | 86 DPU_BLEND_BG_ALPHA_FG_PIXEL | 87 DPU_BLEND_BG_INV_ALPHA; 88 } 89 90 lm->ops.setup_blend_config(lm, pstate->stage, 91 0xFF, 0, blend_op); 92 93 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n", 94 drm_get_format_name(format->base.pixel_format, &format_name), 95 format->alpha_enable, blend_op); 96 } 97 98 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 99 { 100 struct dpu_crtc *dpu_crtc; 101 struct dpu_crtc_state *crtc_state; 102 int lm_idx, lm_horiz_position; 103 104 dpu_crtc = to_dpu_crtc(crtc); 105 crtc_state = to_dpu_crtc_state(crtc->state); 106 107 lm_horiz_position = 0; 108 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { 109 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 110 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; 111 struct dpu_hw_mixer_cfg cfg; 112 113 if (!lm_roi || !drm_rect_visible(lm_roi)) 114 continue; 115 116 cfg.out_width = drm_rect_width(lm_roi); 117 cfg.out_height = drm_rect_height(lm_roi); 118 cfg.right_mixer = lm_horiz_position++; 119 cfg.flags = 0; 120 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 121 } 122 } 123 124 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 125 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer) 126 { 127 struct drm_plane *plane; 128 struct drm_framebuffer *fb; 129 struct drm_plane_state *state; 130 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 131 struct dpu_plane_state *pstate = NULL; 132 struct dpu_format *format; 133 struct dpu_hw_ctl *ctl = mixer->lm_ctl; 134 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg; 135 136 u32 flush_mask; 137 uint32_t stage_idx, lm_idx; 138 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; 139 bool bg_alpha_enable = false; 140 141 drm_atomic_crtc_for_each_plane(plane, crtc) { 142 state = plane->state; 143 if (!state) 144 continue; 145 146 pstate = to_dpu_plane_state(state); 147 fb = state->fb; 148 149 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask); 150 151 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n", 152 crtc->base.id, 153 pstate->stage, 154 plane->base.id, 155 dpu_plane_pipe(plane) - SSPP_VIG0, 156 state->fb ? state->fb->base.id : -1); 157 158 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); 159 160 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 161 bg_alpha_enable = true; 162 163 stage_idx = zpos_cnt[pstate->stage]++; 164 stage_cfg->stage[pstate->stage][stage_idx] = 165 dpu_plane_pipe(plane); 166 stage_cfg->multirect_index[pstate->stage][stage_idx] = 167 pstate->multirect_index; 168 169 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 170 state, pstate, stage_idx, 171 dpu_plane_pipe(plane) - SSPP_VIG0, 172 format->base.pixel_format, 173 fb ? fb->modifier : 0); 174 175 /* blend config update */ 176 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { 177 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, 178 pstate, format); 179 180 mixer[lm_idx].flush_mask |= flush_mask; 181 182 if (bg_alpha_enable && !format->alpha_enable) 183 mixer[lm_idx].mixer_op_mode = 0; 184 else 185 mixer[lm_idx].mixer_op_mode |= 186 1 << pstate->stage; 187 } 188 } 189 190 _dpu_crtc_program_lm_output_roi(crtc); 191 } 192 193 /** 194 * _dpu_crtc_blend_setup - configure crtc mixers 195 * @crtc: Pointer to drm crtc structure 196 */ 197 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 198 { 199 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 200 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 201 struct dpu_crtc_mixer *mixer = cstate->mixers; 202 struct dpu_hw_ctl *ctl; 203 struct dpu_hw_mixer *lm; 204 int i; 205 206 DPU_DEBUG("%s\n", dpu_crtc->name); 207 208 for (i = 0; i < cstate->num_mixers; i++) { 209 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) { 210 DPU_ERROR("invalid lm or ctl assigned to mixer\n"); 211 return; 212 } 213 mixer[i].mixer_op_mode = 0; 214 mixer[i].flush_mask = 0; 215 if (mixer[i].lm_ctl->ops.clear_all_blendstages) 216 mixer[i].lm_ctl->ops.clear_all_blendstages( 217 mixer[i].lm_ctl); 218 } 219 220 /* initialize stage cfg */ 221 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 222 223 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer); 224 225 for (i = 0; i < cstate->num_mixers; i++) { 226 ctl = mixer[i].lm_ctl; 227 lm = mixer[i].hw_lm; 228 229 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 230 231 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, 232 mixer[i].hw_lm->idx); 233 234 /* stage config flush mask */ 235 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 236 237 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", 238 mixer[i].hw_lm->idx - LM_0, 239 mixer[i].mixer_op_mode, 240 ctl->idx - CTL_0, 241 mixer[i].flush_mask); 242 243 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 244 &dpu_crtc->stage_cfg); 245 } 246 } 247 248 /** 249 * _dpu_crtc_complete_flip - signal pending page_flip events 250 * Any pending vblank events are added to the vblank_event_list 251 * so that the next vblank interrupt shall signal them. 252 * However PAGE_FLIP events are not handled through the vblank_event_list. 253 * This API signals any pending PAGE_FLIP events requested through 254 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 255 * @crtc: Pointer to drm crtc structure 256 */ 257 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 258 { 259 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 260 struct drm_device *dev = crtc->dev; 261 unsigned long flags; 262 263 spin_lock_irqsave(&dev->event_lock, flags); 264 if (dpu_crtc->event) { 265 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, 266 dpu_crtc->event); 267 trace_dpu_crtc_complete_flip(DRMID(crtc)); 268 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 269 dpu_crtc->event = NULL; 270 } 271 spin_unlock_irqrestore(&dev->event_lock, flags); 272 } 273 274 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 275 { 276 struct drm_encoder *encoder; 277 278 if (!crtc || !crtc->dev) { 279 DPU_ERROR("invalid crtc\n"); 280 return INTF_MODE_NONE; 281 } 282 283 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 284 285 /* TODO: Returns the first INTF_MODE, could there be multiple values? */ 286 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 287 return dpu_encoder_get_intf_mode(encoder); 288 289 return INTF_MODE_NONE; 290 } 291 292 void dpu_crtc_vblank_callback(struct drm_crtc *crtc) 293 { 294 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 295 296 /* keep statistics on vblank callback - with auto reset via debugfs */ 297 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 298 dpu_crtc->vblank_cb_time = ktime_get(); 299 else 300 dpu_crtc->vblank_cb_count++; 301 _dpu_crtc_complete_flip(crtc); 302 drm_crtc_handle_vblank(crtc); 303 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 304 } 305 306 static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc) 307 { 308 int ret = 0; 309 struct drm_modeset_acquire_ctx ctx; 310 311 DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret); 312 dpu_core_perf_crtc_release_bw(crtc); 313 DRM_MODESET_LOCK_ALL_END(ctx, ret); 314 if (ret) 315 DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n", 316 ret); 317 } 318 319 static void dpu_crtc_frame_event_work(struct kthread_work *work) 320 { 321 struct dpu_crtc_frame_event *fevent = container_of(work, 322 struct dpu_crtc_frame_event, work); 323 struct drm_crtc *crtc = fevent->crtc; 324 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 325 unsigned long flags; 326 bool frame_done = false; 327 328 DPU_ATRACE_BEGIN("crtc_frame_event"); 329 330 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 331 ktime_to_ns(fevent->ts)); 332 333 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 334 | DPU_ENCODER_FRAME_EVENT_ERROR 335 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 336 337 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 338 /* this should not happen */ 339 DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n", 340 crtc->base.id, 341 fevent->event, 342 ktime_to_ns(fevent->ts), 343 atomic_read(&dpu_crtc->frame_pending)); 344 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 345 /* release bandwidth and other resources */ 346 trace_dpu_crtc_frame_event_done(DRMID(crtc), 347 fevent->event); 348 dpu_crtc_release_bw_unlocked(crtc); 349 } else { 350 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 351 fevent->event); 352 } 353 354 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE) 355 dpu_core_perf_crtc_update(crtc, 0, false); 356 357 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 358 | DPU_ENCODER_FRAME_EVENT_ERROR)) 359 frame_done = true; 360 } 361 362 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 363 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 364 crtc->base.id, ktime_to_ns(fevent->ts)); 365 366 if (frame_done) 367 complete_all(&dpu_crtc->frame_done_comp); 368 369 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 370 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 371 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 372 DPU_ATRACE_END("crtc_frame_event"); 373 } 374 375 /* 376 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module 377 * registers this API to encoder for all frame event callbacks like 378 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events 379 * from different context - IRQ, user thread, commit_thread, etc. Each event 380 * should be carefully reviewed and should be processed in proper task context 381 * to avoid schedulin delay or properly manage the irq context's bottom half 382 * processing. 383 */ 384 static void dpu_crtc_frame_event_cb(void *data, u32 event) 385 { 386 struct drm_crtc *crtc = (struct drm_crtc *)data; 387 struct dpu_crtc *dpu_crtc; 388 struct msm_drm_private *priv; 389 struct dpu_crtc_frame_event *fevent; 390 unsigned long flags; 391 u32 crtc_id; 392 393 /* Nothing to do on idle event */ 394 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 395 return; 396 397 dpu_crtc = to_dpu_crtc(crtc); 398 priv = crtc->dev->dev_private; 399 crtc_id = drm_crtc_index(crtc); 400 401 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 402 403 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 404 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 405 struct dpu_crtc_frame_event, list); 406 if (fevent) 407 list_del_init(&fevent->list); 408 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 409 410 if (!fevent) { 411 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); 412 return; 413 } 414 415 fevent->event = event; 416 fevent->crtc = crtc; 417 fevent->ts = ktime_get(); 418 kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); 419 } 420 421 void dpu_crtc_complete_commit(struct drm_crtc *crtc, 422 struct drm_crtc_state *old_state) 423 { 424 if (!crtc || !crtc->state) { 425 DPU_ERROR("invalid crtc\n"); 426 return; 427 } 428 trace_dpu_crtc_complete_commit(DRMID(crtc)); 429 } 430 431 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, 432 struct drm_crtc_state *state) 433 { 434 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 435 struct drm_display_mode *adj_mode = &state->adjusted_mode; 436 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; 437 int i; 438 439 for (i = 0; i < cstate->num_mixers; i++) { 440 struct drm_rect *r = &cstate->lm_bounds[i]; 441 r->x1 = crtc_split_width * i; 442 r->y1 = 0; 443 r->x2 = r->x1 + crtc_split_width; 444 r->y2 = adj_mode->vdisplay; 445 446 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 447 } 448 449 drm_mode_debug_printmodeline(adj_mode); 450 } 451 452 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 453 struct drm_crtc_state *old_state) 454 { 455 struct dpu_crtc *dpu_crtc; 456 struct dpu_crtc_state *cstate; 457 struct drm_encoder *encoder; 458 struct drm_device *dev; 459 unsigned long flags; 460 struct dpu_crtc_smmu_state_data *smmu_state; 461 462 if (!crtc) { 463 DPU_ERROR("invalid crtc\n"); 464 return; 465 } 466 467 if (!crtc->state->enable) { 468 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n", 469 crtc->base.id, crtc->state->enable); 470 return; 471 } 472 473 DPU_DEBUG("crtc%d\n", crtc->base.id); 474 475 dpu_crtc = to_dpu_crtc(crtc); 476 cstate = to_dpu_crtc_state(crtc->state); 477 dev = crtc->dev; 478 smmu_state = &dpu_crtc->smmu_state; 479 480 _dpu_crtc_setup_lm_bounds(crtc, crtc->state); 481 482 if (dpu_crtc->event) { 483 WARN_ON(dpu_crtc->event); 484 } else { 485 spin_lock_irqsave(&dev->event_lock, flags); 486 dpu_crtc->event = crtc->state->event; 487 crtc->state->event = NULL; 488 spin_unlock_irqrestore(&dev->event_lock, flags); 489 } 490 491 /* encoder will trigger pending mask now */ 492 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 493 dpu_encoder_trigger_kickoff_pending(encoder); 494 495 /* 496 * If no mixers have been allocated in dpu_crtc_atomic_check(), 497 * it means we are trying to flush a CRTC whose state is disabled: 498 * nothing else needs to be done. 499 */ 500 if (unlikely(!cstate->num_mixers)) 501 return; 502 503 _dpu_crtc_blend_setup(crtc); 504 505 /* 506 * PP_DONE irq is only used by command mode for now. 507 * It is better to request pending before FLUSH and START trigger 508 * to make sure no pp_done irq missed. 509 * This is safe because no pp_done will happen before SW trigger 510 * in command mode. 511 */ 512 } 513 514 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 515 struct drm_crtc_state *old_crtc_state) 516 { 517 struct dpu_crtc *dpu_crtc; 518 struct drm_device *dev; 519 struct drm_plane *plane; 520 struct msm_drm_private *priv; 521 struct msm_drm_thread *event_thread; 522 unsigned long flags; 523 struct dpu_crtc_state *cstate; 524 525 if (!crtc->state->enable) { 526 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n", 527 crtc->base.id, crtc->state->enable); 528 return; 529 } 530 531 DPU_DEBUG("crtc%d\n", crtc->base.id); 532 533 dpu_crtc = to_dpu_crtc(crtc); 534 cstate = to_dpu_crtc_state(crtc->state); 535 dev = crtc->dev; 536 priv = dev->dev_private; 537 538 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { 539 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 540 return; 541 } 542 543 event_thread = &priv->event_thread[crtc->index]; 544 545 if (dpu_crtc->event) { 546 DPU_DEBUG("already received dpu_crtc->event\n"); 547 } else { 548 spin_lock_irqsave(&dev->event_lock, flags); 549 dpu_crtc->event = crtc->state->event; 550 crtc->state->event = NULL; 551 spin_unlock_irqrestore(&dev->event_lock, flags); 552 } 553 554 /* 555 * If no mixers has been allocated in dpu_crtc_atomic_check(), 556 * it means we are trying to flush a CRTC whose state is disabled: 557 * nothing else needs to be done. 558 */ 559 if (unlikely(!cstate->num_mixers)) 560 return; 561 562 /* 563 * For planes without commit update, drm framework will not add 564 * those planes to current state since hardware update is not 565 * required. However, if those planes were power collapsed since 566 * last commit cycle, driver has to restore the hardware state 567 * of those planes explicitly here prior to plane flush. 568 */ 569 drm_atomic_crtc_for_each_plane(plane, crtc) 570 dpu_plane_restore(plane); 571 572 /* update performance setting before crtc kickoff */ 573 dpu_core_perf_crtc_update(crtc, 1, false); 574 575 /* 576 * Final plane updates: Give each plane a chance to complete all 577 * required writes/flushing before crtc's "flush 578 * everything" call below. 579 */ 580 drm_atomic_crtc_for_each_plane(plane, crtc) { 581 if (dpu_crtc->smmu_state.transition_error) 582 dpu_plane_set_error(plane, true); 583 dpu_plane_flush(plane); 584 } 585 586 /* Kickoff will be scheduled by outer layer */ 587 } 588 589 /** 590 * dpu_crtc_destroy_state - state destroy hook 591 * @crtc: drm CRTC 592 * @state: CRTC state object to release 593 */ 594 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 595 struct drm_crtc_state *state) 596 { 597 struct dpu_crtc *dpu_crtc; 598 struct dpu_crtc_state *cstate; 599 600 if (!crtc || !state) { 601 DPU_ERROR("invalid argument(s)\n"); 602 return; 603 } 604 605 dpu_crtc = to_dpu_crtc(crtc); 606 cstate = to_dpu_crtc_state(state); 607 608 DPU_DEBUG("crtc%d\n", crtc->base.id); 609 610 __drm_atomic_helper_crtc_destroy_state(state); 611 612 kfree(cstate); 613 } 614 615 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 616 { 617 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 618 int ret, rc = 0; 619 620 if (!atomic_read(&dpu_crtc->frame_pending)) { 621 DPU_DEBUG("no frames pending\n"); 622 return 0; 623 } 624 625 DPU_ATRACE_BEGIN("frame done completion wait"); 626 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 627 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); 628 if (!ret) { 629 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 630 rc = -ETIMEDOUT; 631 } 632 DPU_ATRACE_END("frame done completion wait"); 633 634 return rc; 635 } 636 637 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async) 638 { 639 struct drm_encoder *encoder; 640 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 641 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 642 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); 643 int ret; 644 645 /* 646 * If no mixers has been allocated in dpu_crtc_atomic_check(), 647 * it means we are trying to start a CRTC whose state is disabled: 648 * nothing else needs to be done. 649 */ 650 if (unlikely(!cstate->num_mixers)) 651 return; 652 653 DPU_ATRACE_BEGIN("crtc_commit"); 654 655 /* 656 * Encoder will flush/start now, unless it has a tx pending. If so, it 657 * may delay and flush at an irq event (e.g. ppdone) 658 */ 659 drm_for_each_encoder_mask(encoder, crtc->dev, 660 crtc->state->encoder_mask) 661 dpu_encoder_prepare_for_kickoff(encoder, async); 662 663 if (!async) { 664 /* wait for frame_event_done completion */ 665 DPU_ATRACE_BEGIN("wait_for_frame_done_event"); 666 ret = _dpu_crtc_wait_for_frame_done(crtc); 667 DPU_ATRACE_END("wait_for_frame_done_event"); 668 if (ret) { 669 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 670 crtc->base.id, 671 atomic_read(&dpu_crtc->frame_pending)); 672 goto end; 673 } 674 675 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 676 /* acquire bandwidth and other resources */ 677 DPU_DEBUG("crtc%d first commit\n", crtc->base.id); 678 } else 679 DPU_DEBUG("crtc%d commit\n", crtc->base.id); 680 681 dpu_crtc->play_count++; 682 } 683 684 dpu_vbif_clear_errors(dpu_kms); 685 686 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 687 dpu_encoder_kickoff(encoder, async); 688 689 end: 690 if (!async) 691 reinit_completion(&dpu_crtc->frame_done_comp); 692 DPU_ATRACE_END("crtc_commit"); 693 } 694 695 static void dpu_crtc_reset(struct drm_crtc *crtc) 696 { 697 struct dpu_crtc_state *cstate; 698 699 if (crtc->state) 700 dpu_crtc_destroy_state(crtc, crtc->state); 701 702 crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL); 703 if (crtc->state) 704 crtc->state->crtc = crtc; 705 } 706 707 /** 708 * dpu_crtc_duplicate_state - state duplicate hook 709 * @crtc: Pointer to drm crtc structure 710 * @Returns: Pointer to new drm_crtc_state structure 711 */ 712 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 713 { 714 struct dpu_crtc *dpu_crtc; 715 struct dpu_crtc_state *cstate, *old_cstate; 716 717 if (!crtc || !crtc->state) { 718 DPU_ERROR("invalid argument(s)\n"); 719 return NULL; 720 } 721 722 dpu_crtc = to_dpu_crtc(crtc); 723 old_cstate = to_dpu_crtc_state(crtc->state); 724 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 725 if (!cstate) { 726 DPU_ERROR("failed to allocate state\n"); 727 return NULL; 728 } 729 730 /* duplicate base helper */ 731 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 732 733 return &cstate->base; 734 } 735 736 static void dpu_crtc_disable(struct drm_crtc *crtc, 737 struct drm_crtc_state *old_crtc_state) 738 { 739 struct dpu_crtc *dpu_crtc; 740 struct dpu_crtc_state *cstate; 741 struct drm_display_mode *mode; 742 struct drm_encoder *encoder; 743 struct msm_drm_private *priv; 744 unsigned long flags; 745 746 if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { 747 DPU_ERROR("invalid crtc\n"); 748 return; 749 } 750 dpu_crtc = to_dpu_crtc(crtc); 751 cstate = to_dpu_crtc_state(crtc->state); 752 mode = &cstate->base.adjusted_mode; 753 priv = crtc->dev->dev_private; 754 755 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 756 757 /* Disable/save vblank irq handling */ 758 drm_crtc_vblank_off(crtc); 759 760 drm_for_each_encoder_mask(encoder, crtc->dev, 761 old_crtc_state->encoder_mask) 762 dpu_encoder_assign_crtc(encoder, NULL); 763 764 /* wait for frame_event_done completion */ 765 if (_dpu_crtc_wait_for_frame_done(crtc)) 766 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 767 crtc->base.id, 768 atomic_read(&dpu_crtc->frame_pending)); 769 770 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 771 dpu_crtc->enabled = false; 772 773 if (atomic_read(&dpu_crtc->frame_pending)) { 774 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 775 atomic_read(&dpu_crtc->frame_pending)); 776 dpu_core_perf_crtc_release_bw(crtc); 777 atomic_set(&dpu_crtc->frame_pending, 0); 778 } 779 780 dpu_core_perf_crtc_update(crtc, 0, true); 781 782 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 783 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); 784 785 memset(cstate->mixers, 0, sizeof(cstate->mixers)); 786 cstate->num_mixers = 0; 787 788 /* disable clk & bw control until clk & bw properties are set */ 789 cstate->bw_control = false; 790 cstate->bw_split_vote = false; 791 792 if (crtc->state->event && !crtc->state->active) { 793 spin_lock_irqsave(&crtc->dev->event_lock, flags); 794 drm_crtc_send_vblank_event(crtc, crtc->state->event); 795 crtc->state->event = NULL; 796 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 797 } 798 799 pm_runtime_put_sync(crtc->dev->dev); 800 } 801 802 static void dpu_crtc_enable(struct drm_crtc *crtc, 803 struct drm_crtc_state *old_crtc_state) 804 { 805 struct dpu_crtc *dpu_crtc; 806 struct drm_encoder *encoder; 807 struct msm_drm_private *priv; 808 809 if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 810 DPU_ERROR("invalid crtc\n"); 811 return; 812 } 813 priv = crtc->dev->dev_private; 814 815 pm_runtime_get_sync(crtc->dev->dev); 816 817 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 818 dpu_crtc = to_dpu_crtc(crtc); 819 820 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 821 dpu_encoder_register_frame_event_callback(encoder, 822 dpu_crtc_frame_event_cb, (void *)crtc); 823 824 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 825 dpu_crtc->enabled = true; 826 827 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) 828 dpu_encoder_assign_crtc(encoder, crtc); 829 830 /* Enable/restore vblank irq handling */ 831 drm_crtc_vblank_on(crtc); 832 } 833 834 struct plane_state { 835 struct dpu_plane_state *dpu_pstate; 836 const struct drm_plane_state *drm_pstate; 837 int stage; 838 u32 pipe_id; 839 }; 840 841 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 842 struct drm_crtc_state *state) 843 { 844 struct dpu_crtc *dpu_crtc; 845 struct plane_state *pstates; 846 struct dpu_crtc_state *cstate; 847 848 const struct drm_plane_state *pstate; 849 struct drm_plane *plane; 850 struct drm_display_mode *mode; 851 852 int cnt = 0, rc = 0, mixer_width, i, z_pos; 853 854 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; 855 int multirect_count = 0; 856 const struct drm_plane_state *pipe_staged[SSPP_MAX]; 857 int left_zpos_cnt = 0, right_zpos_cnt = 0; 858 struct drm_rect crtc_rect = { 0 }; 859 860 if (!crtc) { 861 DPU_ERROR("invalid crtc\n"); 862 return -EINVAL; 863 } 864 865 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); 866 867 dpu_crtc = to_dpu_crtc(crtc); 868 cstate = to_dpu_crtc_state(state); 869 870 if (!state->enable || !state->active) { 871 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n", 872 crtc->base.id, state->enable, state->active); 873 goto end; 874 } 875 876 mode = &state->adjusted_mode; 877 DPU_DEBUG("%s: check", dpu_crtc->name); 878 879 /* force a full mode set if active state changed */ 880 if (state->active_changed) 881 state->mode_changed = true; 882 883 memset(pipe_staged, 0, sizeof(pipe_staged)); 884 885 mixer_width = mode->hdisplay / cstate->num_mixers; 886 887 _dpu_crtc_setup_lm_bounds(crtc, state); 888 889 crtc_rect.x2 = mode->hdisplay; 890 crtc_rect.y2 = mode->vdisplay; 891 892 /* get plane state for all drm planes associated with crtc state */ 893 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { 894 struct drm_rect dst, clip = crtc_rect; 895 896 if (IS_ERR_OR_NULL(pstate)) { 897 rc = PTR_ERR(pstate); 898 DPU_ERROR("%s: failed to get plane%d state, %d\n", 899 dpu_crtc->name, plane->base.id, rc); 900 goto end; 901 } 902 if (cnt >= DPU_STAGE_MAX * 4) 903 continue; 904 905 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate); 906 pstates[cnt].drm_pstate = pstate; 907 pstates[cnt].stage = pstate->normalized_zpos; 908 pstates[cnt].pipe_id = dpu_plane_pipe(plane); 909 910 if (pipe_staged[pstates[cnt].pipe_id]) { 911 multirect_plane[multirect_count].r0 = 912 pipe_staged[pstates[cnt].pipe_id]; 913 multirect_plane[multirect_count].r1 = pstate; 914 multirect_count++; 915 916 pipe_staged[pstates[cnt].pipe_id] = NULL; 917 } else { 918 pipe_staged[pstates[cnt].pipe_id] = pstate; 919 } 920 921 cnt++; 922 923 dst = drm_plane_state_dest(pstate); 924 if (!drm_rect_intersect(&clip, &dst)) { 925 DPU_ERROR("invalid vertical/horizontal destination\n"); 926 DPU_ERROR("display: " DRM_RECT_FMT " plane: " 927 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), 928 DRM_RECT_ARG(&dst)); 929 rc = -E2BIG; 930 goto end; 931 } 932 } 933 934 for (i = 1; i < SSPP_MAX; i++) { 935 if (pipe_staged[i]) { 936 dpu_plane_clear_multirect(pipe_staged[i]); 937 938 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) { 939 DPU_ERROR( 940 "r1 only virt plane:%d not supported\n", 941 pipe_staged[i]->plane->base.id); 942 rc = -EINVAL; 943 goto end; 944 } 945 } 946 } 947 948 z_pos = -1; 949 for (i = 0; i < cnt; i++) { 950 /* reset counts at every new blend stage */ 951 if (pstates[i].stage != z_pos) { 952 left_zpos_cnt = 0; 953 right_zpos_cnt = 0; 954 z_pos = pstates[i].stage; 955 } 956 957 /* verify z_pos setting before using it */ 958 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) { 959 DPU_ERROR("> %d plane stages assigned\n", 960 DPU_STAGE_MAX - DPU_STAGE_0); 961 rc = -EINVAL; 962 goto end; 963 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { 964 if (left_zpos_cnt == 2) { 965 DPU_ERROR("> 2 planes @ stage %d on left\n", 966 z_pos); 967 rc = -EINVAL; 968 goto end; 969 } 970 left_zpos_cnt++; 971 972 } else { 973 if (right_zpos_cnt == 2) { 974 DPU_ERROR("> 2 planes @ stage %d on right\n", 975 z_pos); 976 rc = -EINVAL; 977 goto end; 978 } 979 right_zpos_cnt++; 980 } 981 982 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; 983 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos); 984 } 985 986 for (i = 0; i < multirect_count; i++) { 987 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) { 988 DPU_ERROR( 989 "multirect validation failed for planes (%d - %d)\n", 990 multirect_plane[i].r0->plane->base.id, 991 multirect_plane[i].r1->plane->base.id); 992 rc = -EINVAL; 993 goto end; 994 } 995 } 996 997 rc = dpu_core_perf_crtc_check(crtc, state); 998 if (rc) { 999 DPU_ERROR("crtc%d failed performance check %d\n", 1000 crtc->base.id, rc); 1001 goto end; 1002 } 1003 1004 /* validate source split: 1005 * use pstates sorted by stage to check planes on same stage 1006 * we assume that all pipes are in source split so its valid to compare 1007 * without taking into account left/right mixer placement 1008 */ 1009 for (i = 1; i < cnt; i++) { 1010 struct plane_state *prv_pstate, *cur_pstate; 1011 struct drm_rect left_rect, right_rect; 1012 int32_t left_pid, right_pid; 1013 int32_t stage; 1014 1015 prv_pstate = &pstates[i - 1]; 1016 cur_pstate = &pstates[i]; 1017 if (prv_pstate->stage != cur_pstate->stage) 1018 continue; 1019 1020 stage = cur_pstate->stage; 1021 1022 left_pid = prv_pstate->dpu_pstate->base.plane->base.id; 1023 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate); 1024 1025 right_pid = cur_pstate->dpu_pstate->base.plane->base.id; 1026 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate); 1027 1028 if (right_rect.x1 < left_rect.x1) { 1029 swap(left_pid, right_pid); 1030 swap(left_rect, right_rect); 1031 } 1032 1033 /** 1034 * - planes are enumerated in pipe-priority order such that 1035 * planes with lower drm_id must be left-most in a shared 1036 * blend-stage when using source split. 1037 * - planes in source split must be contiguous in width 1038 * - planes in source split must have same dest yoff and height 1039 */ 1040 if (right_pid < left_pid) { 1041 DPU_ERROR( 1042 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", 1043 stage, left_pid, right_pid); 1044 rc = -EINVAL; 1045 goto end; 1046 } else if (right_rect.x1 != drm_rect_width(&left_rect)) { 1047 DPU_ERROR("non-contiguous coordinates for src split. " 1048 "stage: %d left: " DRM_RECT_FMT " right: " 1049 DRM_RECT_FMT "\n", stage, 1050 DRM_RECT_ARG(&left_rect), 1051 DRM_RECT_ARG(&right_rect)); 1052 rc = -EINVAL; 1053 goto end; 1054 } else if (left_rect.y1 != right_rect.y1 || 1055 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) { 1056 DPU_ERROR("source split at stage: %d. invalid " 1057 "yoff/height: left: " DRM_RECT_FMT " right: " 1058 DRM_RECT_FMT "\n", stage, 1059 DRM_RECT_ARG(&left_rect), 1060 DRM_RECT_ARG(&right_rect)); 1061 rc = -EINVAL; 1062 goto end; 1063 } 1064 } 1065 1066 end: 1067 kfree(pstates); 1068 return rc; 1069 } 1070 1071 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1072 { 1073 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1074 struct drm_encoder *enc; 1075 1076 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1077 1078 /* 1079 * Normally we would iterate through encoder_mask in crtc state to find 1080 * attached encoders. In this case, we might be disabling vblank _after_ 1081 * encoder_mask has been cleared. 1082 * 1083 * Instead, we "assign" a crtc to the encoder in enable and clear it in 1084 * disable (which is also after encoder_mask is cleared). So instead of 1085 * using encoder mask, we'll ask the encoder to toggle itself iff it's 1086 * currently assigned to our crtc. 1087 * 1088 * Note also that this function cannot be called while crtc is disabled 1089 * since we use drm_crtc_vblank_on/off. So we don't need to worry 1090 * about the assigned crtcs being inconsistent with the current state 1091 * (which means no need to worry about modeset locks). 1092 */ 1093 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 1094 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, 1095 dpu_crtc); 1096 1097 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); 1098 } 1099 1100 return 0; 1101 } 1102 1103 #ifdef CONFIG_DEBUG_FS 1104 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1105 { 1106 struct dpu_crtc *dpu_crtc; 1107 struct dpu_plane_state *pstate = NULL; 1108 struct dpu_crtc_mixer *m; 1109 1110 struct drm_crtc *crtc; 1111 struct drm_plane *plane; 1112 struct drm_display_mode *mode; 1113 struct drm_framebuffer *fb; 1114 struct drm_plane_state *state; 1115 struct dpu_crtc_state *cstate; 1116 1117 int i, out_width; 1118 1119 dpu_crtc = s->private; 1120 crtc = &dpu_crtc->base; 1121 1122 drm_modeset_lock_all(crtc->dev); 1123 cstate = to_dpu_crtc_state(crtc->state); 1124 1125 mode = &crtc->state->adjusted_mode; 1126 out_width = mode->hdisplay / cstate->num_mixers; 1127 1128 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1129 mode->hdisplay, mode->vdisplay); 1130 1131 seq_puts(s, "\n"); 1132 1133 for (i = 0; i < cstate->num_mixers; ++i) { 1134 m = &cstate->mixers[i]; 1135 if (!m->hw_lm) 1136 seq_printf(s, "\tmixer[%d] has no lm\n", i); 1137 else if (!m->lm_ctl) 1138 seq_printf(s, "\tmixer[%d] has no ctl\n", i); 1139 else 1140 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1141 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, 1142 out_width, mode->vdisplay); 1143 } 1144 1145 seq_puts(s, "\n"); 1146 1147 drm_atomic_crtc_for_each_plane(plane, crtc) { 1148 pstate = to_dpu_plane_state(plane->state); 1149 state = plane->state; 1150 1151 if (!pstate || !state) 1152 continue; 1153 1154 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1155 pstate->stage); 1156 1157 if (plane->state->fb) { 1158 fb = plane->state->fb; 1159 1160 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1161 fb->base.id, (char *) &fb->format->format, 1162 fb->width, fb->height); 1163 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1164 seq_printf(s, "cpp[%d]:%u ", 1165 i, fb->format->cpp[i]); 1166 seq_puts(s, "\n\t"); 1167 1168 seq_printf(s, "modifier:%8llu ", fb->modifier); 1169 seq_puts(s, "\n"); 1170 1171 seq_puts(s, "\t"); 1172 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1173 seq_printf(s, "pitches[%d]:%8u ", i, 1174 fb->pitches[i]); 1175 seq_puts(s, "\n"); 1176 1177 seq_puts(s, "\t"); 1178 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1179 seq_printf(s, "offsets[%d]:%8u ", i, 1180 fb->offsets[i]); 1181 seq_puts(s, "\n"); 1182 } 1183 1184 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1185 state->src_x, state->src_y, state->src_w, state->src_h); 1186 1187 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1188 state->crtc_x, state->crtc_y, state->crtc_w, 1189 state->crtc_h); 1190 seq_printf(s, "\tmultirect: mode: %d index: %d\n", 1191 pstate->multirect_mode, pstate->multirect_index); 1192 1193 seq_puts(s, "\n"); 1194 } 1195 if (dpu_crtc->vblank_cb_count) { 1196 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1197 s64 diff_ms = ktime_to_ms(diff); 1198 s64 fps = diff_ms ? div_s64( 1199 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1200 1201 seq_printf(s, 1202 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1203 fps, dpu_crtc->vblank_cb_count, 1204 ktime_to_ms(diff), dpu_crtc->play_count); 1205 1206 /* reset time & count for next measurement */ 1207 dpu_crtc->vblank_cb_count = 0; 1208 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1209 } 1210 1211 drm_modeset_unlock_all(crtc->dev); 1212 1213 return 0; 1214 } 1215 1216 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file) 1217 { 1218 return single_open(file, _dpu_debugfs_status_show, inode->i_private); 1219 } 1220 1221 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ 1222 static int __prefix ## _open(struct inode *inode, struct file *file) \ 1223 { \ 1224 return single_open(file, __prefix ## _show, inode->i_private); \ 1225 } \ 1226 static const struct file_operations __prefix ## _fops = { \ 1227 .owner = THIS_MODULE, \ 1228 .open = __prefix ## _open, \ 1229 .release = single_release, \ 1230 .read = seq_read, \ 1231 .llseek = seq_lseek, \ 1232 } 1233 1234 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1235 { 1236 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1237 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1238 int i; 1239 1240 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1241 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1242 seq_printf(s, "core_clk_rate: %llu\n", 1243 dpu_crtc->cur_perf.core_clk_rate); 1244 for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC; 1245 i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) { 1246 seq_printf(s, "bw_ctl[%d]: %llu\n", i, 1247 dpu_crtc->cur_perf.bw_ctl[i]); 1248 seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i, 1249 dpu_crtc->cur_perf.max_per_pipe_ib[i]); 1250 } 1251 1252 return 0; 1253 } 1254 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state); 1255 1256 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1257 { 1258 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1259 1260 static const struct file_operations debugfs_status_fops = { 1261 .open = _dpu_debugfs_status_open, 1262 .read = seq_read, 1263 .llseek = seq_lseek, 1264 .release = single_release, 1265 }; 1266 1267 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, 1268 crtc->dev->primary->debugfs_root); 1269 if (!dpu_crtc->debugfs_root) 1270 return -ENOMEM; 1271 1272 /* don't error check these */ 1273 debugfs_create_file("status", 0400, 1274 dpu_crtc->debugfs_root, 1275 dpu_crtc, &debugfs_status_fops); 1276 debugfs_create_file("state", 0600, 1277 dpu_crtc->debugfs_root, 1278 &dpu_crtc->base, 1279 &dpu_crtc_debugfs_state_fops); 1280 1281 return 0; 1282 } 1283 #else 1284 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1285 { 1286 return 0; 1287 } 1288 #endif /* CONFIG_DEBUG_FS */ 1289 1290 static int dpu_crtc_late_register(struct drm_crtc *crtc) 1291 { 1292 return _dpu_crtc_init_debugfs(crtc); 1293 } 1294 1295 static void dpu_crtc_early_unregister(struct drm_crtc *crtc) 1296 { 1297 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1298 1299 debugfs_remove_recursive(dpu_crtc->debugfs_root); 1300 } 1301 1302 static const struct drm_crtc_funcs dpu_crtc_funcs = { 1303 .set_config = drm_atomic_helper_set_config, 1304 .destroy = dpu_crtc_destroy, 1305 .page_flip = drm_atomic_helper_page_flip, 1306 .reset = dpu_crtc_reset, 1307 .atomic_duplicate_state = dpu_crtc_duplicate_state, 1308 .atomic_destroy_state = dpu_crtc_destroy_state, 1309 .late_register = dpu_crtc_late_register, 1310 .early_unregister = dpu_crtc_early_unregister, 1311 }; 1312 1313 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 1314 .atomic_disable = dpu_crtc_disable, 1315 .atomic_enable = dpu_crtc_enable, 1316 .atomic_check = dpu_crtc_atomic_check, 1317 .atomic_begin = dpu_crtc_atomic_begin, 1318 .atomic_flush = dpu_crtc_atomic_flush, 1319 }; 1320 1321 /* initialize crtc */ 1322 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, 1323 struct drm_plane *cursor) 1324 { 1325 struct drm_crtc *crtc = NULL; 1326 struct dpu_crtc *dpu_crtc = NULL; 1327 struct msm_drm_private *priv = NULL; 1328 struct dpu_kms *kms = NULL; 1329 int i; 1330 1331 priv = dev->dev_private; 1332 kms = to_dpu_kms(priv->kms); 1333 1334 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 1335 if (!dpu_crtc) 1336 return ERR_PTR(-ENOMEM); 1337 1338 crtc = &dpu_crtc->base; 1339 crtc->dev = dev; 1340 1341 spin_lock_init(&dpu_crtc->spin_lock); 1342 atomic_set(&dpu_crtc->frame_pending, 0); 1343 1344 init_completion(&dpu_crtc->frame_done_comp); 1345 1346 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 1347 1348 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 1349 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 1350 list_add(&dpu_crtc->frame_events[i].list, 1351 &dpu_crtc->frame_event_list); 1352 kthread_init_work(&dpu_crtc->frame_events[i].work, 1353 dpu_crtc_frame_event_work); 1354 } 1355 1356 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, 1357 NULL); 1358 1359 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 1360 1361 /* save user friendly CRTC name for later */ 1362 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 1363 1364 /* initialize event handling */ 1365 spin_lock_init(&dpu_crtc->event_lock); 1366 1367 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name); 1368 return crtc; 1369 } 1370