1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 20 #include <linux/sort.h> 21 #include <linux/debugfs.h> 22 #include <linux/ktime.h> 23 #include <drm/drm_mode.h> 24 #include <drm/drm_crtc.h> 25 #include <drm/drm_crtc_helper.h> 26 #include <drm/drm_flip_work.h> 27 #include <drm/drm_rect.h> 28 29 #include "dpu_kms.h" 30 #include "dpu_hw_lm.h" 31 #include "dpu_hw_ctl.h" 32 #include "dpu_crtc.h" 33 #include "dpu_plane.h" 34 #include "dpu_encoder.h" 35 #include "dpu_vbif.h" 36 #include "dpu_power_handle.h" 37 #include "dpu_core_perf.h" 38 #include "dpu_trace.h" 39 40 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0 41 #define DPU_DRM_BLEND_OP_OPAQUE 1 42 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2 43 #define DPU_DRM_BLEND_OP_COVERAGE 3 44 #define DPU_DRM_BLEND_OP_MAX 4 45 46 /* layer mixer index on dpu_crtc */ 47 #define LEFT_MIXER 0 48 #define RIGHT_MIXER 1 49 50 #define MISR_BUFF_SIZE 256 51 52 static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) 53 { 54 struct msm_drm_private *priv; 55 56 if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 57 DPU_ERROR("invalid crtc\n"); 58 return NULL; 59 } 60 priv = crtc->dev->dev_private; 61 if (!priv || !priv->kms) { 62 DPU_ERROR("invalid kms\n"); 63 return NULL; 64 } 65 66 return to_dpu_kms(priv->kms); 67 } 68 69 static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable) 70 { 71 struct drm_crtc *crtc; 72 struct msm_drm_private *priv; 73 struct dpu_kms *dpu_kms; 74 75 if (!dpu_crtc) { 76 DPU_ERROR("invalid dpu crtc\n"); 77 return -EINVAL; 78 } 79 80 crtc = &dpu_crtc->base; 81 if (!crtc->dev || !crtc->dev->dev_private) { 82 DPU_ERROR("invalid drm device\n"); 83 return -EINVAL; 84 } 85 86 priv = crtc->dev->dev_private; 87 if (!priv->kms) { 88 DPU_ERROR("invalid kms\n"); 89 return -EINVAL; 90 } 91 92 dpu_kms = to_dpu_kms(priv->kms); 93 94 if (enable) 95 pm_runtime_get_sync(&dpu_kms->pdev->dev); 96 else 97 pm_runtime_put_sync(&dpu_kms->pdev->dev); 98 99 return 0; 100 } 101 102 /** 103 * _dpu_crtc_rp_to_crtc - get crtc from resource pool object 104 * @rp: Pointer to resource pool 105 * return: Pointer to drm crtc if success; null otherwise 106 */ 107 static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp) 108 { 109 if (!rp) 110 return NULL; 111 112 return container_of(rp, struct dpu_crtc_state, rp)->base.crtc; 113 } 114 115 /** 116 * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool 117 * @rp: Pointer to resource pool 118 * @force: True to reclaim all resources; otherwise, reclaim only unused ones 119 * return: None 120 */ 121 static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force) 122 { 123 struct dpu_crtc_res *res, *next; 124 struct drm_crtc *crtc; 125 126 crtc = _dpu_crtc_rp_to_crtc(rp); 127 if (!crtc) { 128 DPU_ERROR("invalid crtc\n"); 129 return; 130 } 131 132 DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id, 133 force ? "destroy" : "free_unused"); 134 135 list_for_each_entry_safe(res, next, &rp->res_list, list) { 136 if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE)) 137 continue; 138 DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n", 139 crtc->base.id, rp->sequence_id, 140 res->type, res->tag, res->val, 141 atomic_read(&res->refcount)); 142 list_del(&res->list); 143 if (res->ops.put) 144 res->ops.put(res->val); 145 kfree(res); 146 } 147 } 148 149 /** 150 * _dpu_crtc_rp_free_unused - free unused resource in pool 151 * @rp: Pointer to resource pool 152 * return: none 153 */ 154 static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp) 155 { 156 mutex_lock(rp->rp_lock); 157 _dpu_crtc_rp_reclaim(rp, false); 158 mutex_unlock(rp->rp_lock); 159 } 160 161 /** 162 * _dpu_crtc_rp_destroy - destroy resource pool 163 * @rp: Pointer to resource pool 164 * return: None 165 */ 166 static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp) 167 { 168 mutex_lock(rp->rp_lock); 169 list_del_init(&rp->rp_list); 170 _dpu_crtc_rp_reclaim(rp, true); 171 mutex_unlock(rp->rp_lock); 172 } 173 174 /** 175 * _dpu_crtc_hw_blk_get - get callback for hardware block 176 * @val: Resource handle 177 * @type: Resource type 178 * @tag: Search tag for given resource 179 * return: Resource handle 180 */ 181 static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag) 182 { 183 DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val); 184 return dpu_hw_blk_get(val, type, tag); 185 } 186 187 /** 188 * _dpu_crtc_hw_blk_put - put callback for hardware block 189 * @val: Resource handle 190 * return: None 191 */ 192 static void _dpu_crtc_hw_blk_put(void *val) 193 { 194 DPU_DEBUG("res://%pK\n", val); 195 dpu_hw_blk_put(val); 196 } 197 198 /** 199 * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count 200 * @rp: Pointer to original resource pool 201 * @dup_rp: Pointer to duplicated resource pool 202 * return: None 203 */ 204 static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp, 205 struct dpu_crtc_respool *dup_rp) 206 { 207 struct dpu_crtc_res *res, *dup_res; 208 struct drm_crtc *crtc; 209 210 if (!rp || !dup_rp || !rp->rp_head) { 211 DPU_ERROR("invalid resource pool\n"); 212 return; 213 } 214 215 crtc = _dpu_crtc_rp_to_crtc(rp); 216 if (!crtc) { 217 DPU_ERROR("invalid crtc\n"); 218 return; 219 } 220 221 DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id); 222 223 mutex_lock(rp->rp_lock); 224 dup_rp->sequence_id = rp->sequence_id + 1; 225 INIT_LIST_HEAD(&dup_rp->res_list); 226 dup_rp->ops = rp->ops; 227 list_for_each_entry(res, &rp->res_list, list) { 228 dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL); 229 if (!dup_res) { 230 mutex_unlock(rp->rp_lock); 231 return; 232 } 233 INIT_LIST_HEAD(&dup_res->list); 234 atomic_set(&dup_res->refcount, 0); 235 dup_res->type = res->type; 236 dup_res->tag = res->tag; 237 dup_res->val = res->val; 238 dup_res->ops = res->ops; 239 dup_res->flags = DPU_CRTC_RES_FLAG_FREE; 240 DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n", 241 crtc->base.id, dup_rp->sequence_id, 242 dup_res->type, dup_res->tag, dup_res->val, 243 atomic_read(&dup_res->refcount)); 244 list_add_tail(&dup_res->list, &dup_rp->res_list); 245 if (dup_res->ops.get) 246 dup_res->ops.get(dup_res->val, 0, -1); 247 } 248 249 dup_rp->rp_lock = rp->rp_lock; 250 dup_rp->rp_head = rp->rp_head; 251 INIT_LIST_HEAD(&dup_rp->rp_list); 252 list_add_tail(&dup_rp->rp_list, rp->rp_head); 253 mutex_unlock(rp->rp_lock); 254 } 255 256 /** 257 * _dpu_crtc_rp_reset - reset resource pool after allocation 258 * @rp: Pointer to original resource pool 259 * @rp_lock: Pointer to serialization resource pool lock 260 * @rp_head: Pointer to crtc resource pool head 261 * return: None 262 */ 263 static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp, 264 struct mutex *rp_lock, struct list_head *rp_head) 265 { 266 if (!rp || !rp_lock || !rp_head) { 267 DPU_ERROR("invalid resource pool\n"); 268 return; 269 } 270 271 mutex_lock(rp_lock); 272 rp->rp_lock = rp_lock; 273 rp->rp_head = rp_head; 274 INIT_LIST_HEAD(&rp->rp_list); 275 rp->sequence_id = 0; 276 INIT_LIST_HEAD(&rp->res_list); 277 rp->ops.get = _dpu_crtc_hw_blk_get; 278 rp->ops.put = _dpu_crtc_hw_blk_put; 279 list_add_tail(&rp->rp_list, rp->rp_head); 280 mutex_unlock(rp_lock); 281 } 282 283 static void dpu_crtc_destroy(struct drm_crtc *crtc) 284 { 285 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 286 287 DPU_DEBUG("\n"); 288 289 if (!crtc) 290 return; 291 292 dpu_crtc->phandle = NULL; 293 294 drm_crtc_cleanup(crtc); 295 mutex_destroy(&dpu_crtc->crtc_lock); 296 kfree(dpu_crtc); 297 } 298 299 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, 300 struct dpu_plane_state *pstate) 301 { 302 struct dpu_hw_mixer *lm = mixer->hw_lm; 303 304 /* default to opaque blending */ 305 lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0, 306 DPU_BLEND_FG_ALPHA_FG_CONST | 307 DPU_BLEND_BG_ALPHA_BG_CONST); 308 } 309 310 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) 311 { 312 struct dpu_crtc *dpu_crtc; 313 struct dpu_crtc_state *crtc_state; 314 int lm_idx, lm_horiz_position; 315 316 dpu_crtc = to_dpu_crtc(crtc); 317 crtc_state = to_dpu_crtc_state(crtc->state); 318 319 lm_horiz_position = 0; 320 for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) { 321 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; 322 struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm; 323 struct dpu_hw_mixer_cfg cfg; 324 325 if (!lm_roi || !drm_rect_visible(lm_roi)) 326 continue; 327 328 cfg.out_width = drm_rect_width(lm_roi); 329 cfg.out_height = drm_rect_height(lm_roi); 330 cfg.right_mixer = lm_horiz_position++; 331 cfg.flags = 0; 332 hw_lm->ops.setup_mixer_out(hw_lm, &cfg); 333 } 334 } 335 336 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, 337 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer) 338 { 339 struct drm_plane *plane; 340 struct drm_framebuffer *fb; 341 struct drm_plane_state *state; 342 struct dpu_crtc_state *cstate; 343 struct dpu_plane_state *pstate = NULL; 344 struct dpu_format *format; 345 struct dpu_hw_ctl *ctl; 346 struct dpu_hw_mixer *lm; 347 struct dpu_hw_stage_cfg *stage_cfg; 348 349 u32 flush_mask; 350 uint32_t stage_idx, lm_idx; 351 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; 352 bool bg_alpha_enable = false; 353 354 if (!dpu_crtc || !mixer) { 355 DPU_ERROR("invalid dpu_crtc or mixer\n"); 356 return; 357 } 358 359 ctl = mixer->hw_ctl; 360 lm = mixer->hw_lm; 361 stage_cfg = &dpu_crtc->stage_cfg; 362 cstate = to_dpu_crtc_state(crtc->state); 363 364 drm_atomic_crtc_for_each_plane(plane, crtc) { 365 state = plane->state; 366 if (!state) 367 continue; 368 369 pstate = to_dpu_plane_state(state); 370 fb = state->fb; 371 372 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask); 373 374 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n", 375 crtc->base.id, 376 pstate->stage, 377 plane->base.id, 378 dpu_plane_pipe(plane) - SSPP_VIG0, 379 state->fb ? state->fb->base.id : -1); 380 381 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); 382 if (!format) { 383 DPU_ERROR("invalid format\n"); 384 return; 385 } 386 387 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) 388 bg_alpha_enable = true; 389 390 stage_idx = zpos_cnt[pstate->stage]++; 391 stage_cfg->stage[pstate->stage][stage_idx] = 392 dpu_plane_pipe(plane); 393 stage_cfg->multirect_index[pstate->stage][stage_idx] = 394 pstate->multirect_index; 395 396 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), 397 state, pstate, stage_idx, 398 dpu_plane_pipe(plane) - SSPP_VIG0, 399 format->base.pixel_format, 400 fb ? fb->modifier : 0); 401 402 /* blend config update */ 403 for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) { 404 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate); 405 406 mixer[lm_idx].flush_mask |= flush_mask; 407 408 if (bg_alpha_enable && !format->alpha_enable) 409 mixer[lm_idx].mixer_op_mode = 0; 410 else 411 mixer[lm_idx].mixer_op_mode |= 412 1 << pstate->stage; 413 } 414 } 415 416 _dpu_crtc_program_lm_output_roi(crtc); 417 } 418 419 /** 420 * _dpu_crtc_blend_setup - configure crtc mixers 421 * @crtc: Pointer to drm crtc structure 422 */ 423 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) 424 { 425 struct dpu_crtc *dpu_crtc; 426 struct dpu_crtc_state *dpu_crtc_state; 427 struct dpu_crtc_mixer *mixer; 428 struct dpu_hw_ctl *ctl; 429 struct dpu_hw_mixer *lm; 430 431 int i; 432 433 if (!crtc) 434 return; 435 436 dpu_crtc = to_dpu_crtc(crtc); 437 dpu_crtc_state = to_dpu_crtc_state(crtc->state); 438 mixer = dpu_crtc->mixers; 439 440 DPU_DEBUG("%s\n", dpu_crtc->name); 441 442 if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) { 443 DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers); 444 return; 445 } 446 447 for (i = 0; i < dpu_crtc->num_mixers; i++) { 448 if (!mixer[i].hw_lm || !mixer[i].hw_ctl) { 449 DPU_ERROR("invalid lm or ctl assigned to mixer\n"); 450 return; 451 } 452 mixer[i].mixer_op_mode = 0; 453 mixer[i].flush_mask = 0; 454 if (mixer[i].hw_ctl->ops.clear_all_blendstages) 455 mixer[i].hw_ctl->ops.clear_all_blendstages( 456 mixer[i].hw_ctl); 457 } 458 459 /* initialize stage cfg */ 460 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); 461 462 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer); 463 464 for (i = 0; i < dpu_crtc->num_mixers; i++) { 465 ctl = mixer[i].hw_ctl; 466 lm = mixer[i].hw_lm; 467 468 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); 469 470 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, 471 mixer[i].hw_lm->idx); 472 473 /* stage config flush mask */ 474 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); 475 476 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", 477 mixer[i].hw_lm->idx - LM_0, 478 mixer[i].mixer_op_mode, 479 ctl->idx - CTL_0, 480 mixer[i].flush_mask); 481 482 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, 483 &dpu_crtc->stage_cfg); 484 } 485 } 486 487 /** 488 * _dpu_crtc_complete_flip - signal pending page_flip events 489 * Any pending vblank events are added to the vblank_event_list 490 * so that the next vblank interrupt shall signal them. 491 * However PAGE_FLIP events are not handled through the vblank_event_list. 492 * This API signals any pending PAGE_FLIP events requested through 493 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. 494 * @crtc: Pointer to drm crtc structure 495 */ 496 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) 497 { 498 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 499 struct drm_device *dev = crtc->dev; 500 unsigned long flags; 501 502 spin_lock_irqsave(&dev->event_lock, flags); 503 if (dpu_crtc->event) { 504 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, 505 dpu_crtc->event); 506 trace_dpu_crtc_complete_flip(DRMID(crtc)); 507 drm_crtc_send_vblank_event(crtc, dpu_crtc->event); 508 dpu_crtc->event = NULL; 509 } 510 spin_unlock_irqrestore(&dev->event_lock, flags); 511 } 512 513 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) 514 { 515 struct drm_encoder *encoder; 516 517 if (!crtc || !crtc->dev) { 518 DPU_ERROR("invalid crtc\n"); 519 return INTF_MODE_NONE; 520 } 521 522 drm_for_each_encoder(encoder, crtc->dev) 523 if (encoder->crtc == crtc) 524 return dpu_encoder_get_intf_mode(encoder); 525 526 return INTF_MODE_NONE; 527 } 528 529 static void dpu_crtc_vblank_cb(void *data) 530 { 531 struct drm_crtc *crtc = (struct drm_crtc *)data; 532 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 533 534 /* keep statistics on vblank callback - with auto reset via debugfs */ 535 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) 536 dpu_crtc->vblank_cb_time = ktime_get(); 537 else 538 dpu_crtc->vblank_cb_count++; 539 _dpu_crtc_complete_flip(crtc); 540 drm_crtc_handle_vblank(crtc); 541 trace_dpu_crtc_vblank_cb(DRMID(crtc)); 542 } 543 544 static void dpu_crtc_frame_event_work(struct kthread_work *work) 545 { 546 struct msm_drm_private *priv; 547 struct dpu_crtc_frame_event *fevent; 548 struct drm_crtc *crtc; 549 struct dpu_crtc *dpu_crtc; 550 struct dpu_kms *dpu_kms; 551 unsigned long flags; 552 bool frame_done = false; 553 554 if (!work) { 555 DPU_ERROR("invalid work handle\n"); 556 return; 557 } 558 559 fevent = container_of(work, struct dpu_crtc_frame_event, work); 560 if (!fevent->crtc || !fevent->crtc->state) { 561 DPU_ERROR("invalid crtc\n"); 562 return; 563 } 564 565 crtc = fevent->crtc; 566 dpu_crtc = to_dpu_crtc(crtc); 567 568 dpu_kms = _dpu_crtc_get_kms(crtc); 569 if (!dpu_kms) { 570 DPU_ERROR("invalid kms handle\n"); 571 return; 572 } 573 priv = dpu_kms->dev->dev_private; 574 DPU_ATRACE_BEGIN("crtc_frame_event"); 575 576 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, 577 ktime_to_ns(fevent->ts)); 578 579 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 580 | DPU_ENCODER_FRAME_EVENT_ERROR 581 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 582 583 if (atomic_read(&dpu_crtc->frame_pending) < 1) { 584 /* this should not happen */ 585 DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n", 586 crtc->base.id, 587 fevent->event, 588 ktime_to_ns(fevent->ts), 589 atomic_read(&dpu_crtc->frame_pending)); 590 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { 591 /* release bandwidth and other resources */ 592 trace_dpu_crtc_frame_event_done(DRMID(crtc), 593 fevent->event); 594 dpu_core_perf_crtc_release_bw(crtc); 595 } else { 596 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), 597 fevent->event); 598 } 599 600 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE) 601 dpu_core_perf_crtc_update(crtc, 0, false); 602 603 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE 604 | DPU_ENCODER_FRAME_EVENT_ERROR)) 605 frame_done = true; 606 } 607 608 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) 609 DPU_ERROR("crtc%d ts:%lld received panel dead event\n", 610 crtc->base.id, ktime_to_ns(fevent->ts)); 611 612 if (frame_done) 613 complete_all(&dpu_crtc->frame_done_comp); 614 615 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 616 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); 617 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 618 DPU_ATRACE_END("crtc_frame_event"); 619 } 620 621 /* 622 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module 623 * registers this API to encoder for all frame event callbacks like 624 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events 625 * from different context - IRQ, user thread, commit_thread, etc. Each event 626 * should be carefully reviewed and should be processed in proper task context 627 * to avoid schedulin delay or properly manage the irq context's bottom half 628 * processing. 629 */ 630 static void dpu_crtc_frame_event_cb(void *data, u32 event) 631 { 632 struct drm_crtc *crtc = (struct drm_crtc *)data; 633 struct dpu_crtc *dpu_crtc; 634 struct msm_drm_private *priv; 635 struct dpu_crtc_frame_event *fevent; 636 unsigned long flags; 637 u32 crtc_id; 638 639 if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 640 DPU_ERROR("invalid parameters\n"); 641 return; 642 } 643 644 /* Nothing to do on idle event */ 645 if (event & DPU_ENCODER_FRAME_EVENT_IDLE) 646 return; 647 648 dpu_crtc = to_dpu_crtc(crtc); 649 priv = crtc->dev->dev_private; 650 crtc_id = drm_crtc_index(crtc); 651 652 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); 653 654 spin_lock_irqsave(&dpu_crtc->spin_lock, flags); 655 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, 656 struct dpu_crtc_frame_event, list); 657 if (fevent) 658 list_del_init(&fevent->list); 659 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); 660 661 if (!fevent) { 662 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); 663 return; 664 } 665 666 fevent->event = event; 667 fevent->crtc = crtc; 668 fevent->ts = ktime_get(); 669 kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); 670 } 671 672 void dpu_crtc_complete_commit(struct drm_crtc *crtc, 673 struct drm_crtc_state *old_state) 674 { 675 if (!crtc || !crtc->state) { 676 DPU_ERROR("invalid crtc\n"); 677 return; 678 } 679 trace_dpu_crtc_complete_commit(DRMID(crtc)); 680 } 681 682 static void _dpu_crtc_setup_mixer_for_encoder( 683 struct drm_crtc *crtc, 684 struct drm_encoder *enc) 685 { 686 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 687 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); 688 struct dpu_rm *rm = &dpu_kms->rm; 689 struct dpu_crtc_mixer *mixer; 690 struct dpu_hw_ctl *last_valid_ctl = NULL; 691 int i; 692 struct dpu_rm_hw_iter lm_iter, ctl_iter; 693 694 dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM); 695 dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL); 696 697 /* Set up all the mixers and ctls reserved by this encoder */ 698 for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) { 699 mixer = &dpu_crtc->mixers[i]; 700 701 if (!dpu_rm_get_hw(rm, &lm_iter)) 702 break; 703 mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw; 704 705 /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */ 706 if (!dpu_rm_get_hw(rm, &ctl_iter)) { 707 DPU_DEBUG("no ctl assigned to lm %d, using previous\n", 708 mixer->hw_lm->idx - LM_0); 709 mixer->hw_ctl = last_valid_ctl; 710 } else { 711 mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw; 712 last_valid_ctl = mixer->hw_ctl; 713 } 714 715 /* Shouldn't happen, mixers are always >= ctls */ 716 if (!mixer->hw_ctl) { 717 DPU_ERROR("no valid ctls found for lm %d\n", 718 mixer->hw_lm->idx - LM_0); 719 return; 720 } 721 722 mixer->encoder = enc; 723 724 dpu_crtc->num_mixers++; 725 DPU_DEBUG("setup mixer %d: lm %d\n", 726 i, mixer->hw_lm->idx - LM_0); 727 DPU_DEBUG("setup mixer %d: ctl %d\n", 728 i, mixer->hw_ctl->idx - CTL_0); 729 } 730 } 731 732 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc) 733 { 734 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 735 struct drm_encoder *enc; 736 737 dpu_crtc->num_mixers = 0; 738 dpu_crtc->mixers_swapped = false; 739 memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers)); 740 741 mutex_lock(&dpu_crtc->crtc_lock); 742 /* Check for mixers on all encoders attached to this crtc */ 743 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { 744 if (enc->crtc != crtc) 745 continue; 746 747 _dpu_crtc_setup_mixer_for_encoder(crtc, enc); 748 } 749 750 mutex_unlock(&dpu_crtc->crtc_lock); 751 } 752 753 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, 754 struct drm_crtc_state *state) 755 { 756 struct dpu_crtc *dpu_crtc; 757 struct dpu_crtc_state *cstate; 758 struct drm_display_mode *adj_mode; 759 u32 crtc_split_width; 760 int i; 761 762 if (!crtc || !state) { 763 DPU_ERROR("invalid args\n"); 764 return; 765 } 766 767 dpu_crtc = to_dpu_crtc(crtc); 768 cstate = to_dpu_crtc_state(state); 769 770 adj_mode = &state->adjusted_mode; 771 crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode); 772 773 for (i = 0; i < dpu_crtc->num_mixers; i++) { 774 struct drm_rect *r = &cstate->lm_bounds[i]; 775 r->x1 = crtc_split_width * i; 776 r->y1 = 0; 777 r->x2 = r->x1 + crtc_split_width; 778 r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode); 779 780 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); 781 } 782 783 drm_mode_debug_printmodeline(adj_mode); 784 } 785 786 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, 787 struct drm_crtc_state *old_state) 788 { 789 struct dpu_crtc *dpu_crtc; 790 struct drm_encoder *encoder; 791 struct drm_device *dev; 792 unsigned long flags; 793 struct dpu_crtc_smmu_state_data *smmu_state; 794 795 if (!crtc) { 796 DPU_ERROR("invalid crtc\n"); 797 return; 798 } 799 800 if (!crtc->state->enable) { 801 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n", 802 crtc->base.id, crtc->state->enable); 803 return; 804 } 805 806 DPU_DEBUG("crtc%d\n", crtc->base.id); 807 808 dpu_crtc = to_dpu_crtc(crtc); 809 dev = crtc->dev; 810 smmu_state = &dpu_crtc->smmu_state; 811 812 if (!dpu_crtc->num_mixers) { 813 _dpu_crtc_setup_mixers(crtc); 814 _dpu_crtc_setup_lm_bounds(crtc, crtc->state); 815 } 816 817 if (dpu_crtc->event) { 818 WARN_ON(dpu_crtc->event); 819 } else { 820 spin_lock_irqsave(&dev->event_lock, flags); 821 dpu_crtc->event = crtc->state->event; 822 crtc->state->event = NULL; 823 spin_unlock_irqrestore(&dev->event_lock, flags); 824 } 825 826 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 827 if (encoder->crtc != crtc) 828 continue; 829 830 /* encoder will trigger pending mask now */ 831 dpu_encoder_trigger_kickoff_pending(encoder); 832 } 833 834 /* 835 * If no mixers have been allocated in dpu_crtc_atomic_check(), 836 * it means we are trying to flush a CRTC whose state is disabled: 837 * nothing else needs to be done. 838 */ 839 if (unlikely(!dpu_crtc->num_mixers)) 840 return; 841 842 _dpu_crtc_blend_setup(crtc); 843 844 /* 845 * PP_DONE irq is only used by command mode for now. 846 * It is better to request pending before FLUSH and START trigger 847 * to make sure no pp_done irq missed. 848 * This is safe because no pp_done will happen before SW trigger 849 * in command mode. 850 */ 851 } 852 853 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, 854 struct drm_crtc_state *old_crtc_state) 855 { 856 struct dpu_crtc *dpu_crtc; 857 struct drm_device *dev; 858 struct drm_plane *plane; 859 struct msm_drm_private *priv; 860 struct msm_drm_thread *event_thread; 861 unsigned long flags; 862 struct dpu_crtc_state *cstate; 863 864 if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 865 DPU_ERROR("invalid crtc\n"); 866 return; 867 } 868 869 if (!crtc->state->enable) { 870 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n", 871 crtc->base.id, crtc->state->enable); 872 return; 873 } 874 875 DPU_DEBUG("crtc%d\n", crtc->base.id); 876 877 dpu_crtc = to_dpu_crtc(crtc); 878 cstate = to_dpu_crtc_state(crtc->state); 879 dev = crtc->dev; 880 priv = dev->dev_private; 881 882 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { 883 DPU_ERROR("invalid crtc index[%d]\n", crtc->index); 884 return; 885 } 886 887 event_thread = &priv->event_thread[crtc->index]; 888 889 if (dpu_crtc->event) { 890 DPU_DEBUG("already received dpu_crtc->event\n"); 891 } else { 892 spin_lock_irqsave(&dev->event_lock, flags); 893 dpu_crtc->event = crtc->state->event; 894 crtc->state->event = NULL; 895 spin_unlock_irqrestore(&dev->event_lock, flags); 896 } 897 898 /* 899 * If no mixers has been allocated in dpu_crtc_atomic_check(), 900 * it means we are trying to flush a CRTC whose state is disabled: 901 * nothing else needs to be done. 902 */ 903 if (unlikely(!dpu_crtc->num_mixers)) 904 return; 905 906 /* 907 * For planes without commit update, drm framework will not add 908 * those planes to current state since hardware update is not 909 * required. However, if those planes were power collapsed since 910 * last commit cycle, driver has to restore the hardware state 911 * of those planes explicitly here prior to plane flush. 912 */ 913 drm_atomic_crtc_for_each_plane(plane, crtc) 914 dpu_plane_restore(plane); 915 916 /* update performance setting before crtc kickoff */ 917 dpu_core_perf_crtc_update(crtc, 1, false); 918 919 /* 920 * Final plane updates: Give each plane a chance to complete all 921 * required writes/flushing before crtc's "flush 922 * everything" call below. 923 */ 924 drm_atomic_crtc_for_each_plane(plane, crtc) { 925 if (dpu_crtc->smmu_state.transition_error) 926 dpu_plane_set_error(plane, true); 927 dpu_plane_flush(plane); 928 } 929 930 /* Kickoff will be scheduled by outer layer */ 931 } 932 933 /** 934 * dpu_crtc_destroy_state - state destroy hook 935 * @crtc: drm CRTC 936 * @state: CRTC state object to release 937 */ 938 static void dpu_crtc_destroy_state(struct drm_crtc *crtc, 939 struct drm_crtc_state *state) 940 { 941 struct dpu_crtc *dpu_crtc; 942 struct dpu_crtc_state *cstate; 943 944 if (!crtc || !state) { 945 DPU_ERROR("invalid argument(s)\n"); 946 return; 947 } 948 949 dpu_crtc = to_dpu_crtc(crtc); 950 cstate = to_dpu_crtc_state(state); 951 952 DPU_DEBUG("crtc%d\n", crtc->base.id); 953 954 _dpu_crtc_rp_destroy(&cstate->rp); 955 956 __drm_atomic_helper_crtc_destroy_state(state); 957 958 kfree(cstate); 959 } 960 961 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) 962 { 963 struct dpu_crtc *dpu_crtc; 964 int ret, rc = 0; 965 966 if (!crtc) { 967 DPU_ERROR("invalid argument\n"); 968 return -EINVAL; 969 } 970 dpu_crtc = to_dpu_crtc(crtc); 971 972 if (!atomic_read(&dpu_crtc->frame_pending)) { 973 DPU_DEBUG("no frames pending\n"); 974 return 0; 975 } 976 977 DPU_ATRACE_BEGIN("frame done completion wait"); 978 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, 979 msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT)); 980 if (!ret) { 981 DRM_ERROR("frame done wait timed out, ret:%d\n", ret); 982 rc = -ETIMEDOUT; 983 } 984 DPU_ATRACE_END("frame done completion wait"); 985 986 return rc; 987 } 988 989 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) 990 { 991 struct drm_encoder *encoder; 992 struct drm_device *dev; 993 struct dpu_crtc *dpu_crtc; 994 struct msm_drm_private *priv; 995 struct dpu_kms *dpu_kms; 996 struct dpu_crtc_state *cstate; 997 int ret; 998 999 if (!crtc) { 1000 DPU_ERROR("invalid argument\n"); 1001 return; 1002 } 1003 dev = crtc->dev; 1004 dpu_crtc = to_dpu_crtc(crtc); 1005 dpu_kms = _dpu_crtc_get_kms(crtc); 1006 1007 if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) { 1008 DPU_ERROR("invalid argument\n"); 1009 return; 1010 } 1011 1012 priv = dpu_kms->dev->dev_private; 1013 cstate = to_dpu_crtc_state(crtc->state); 1014 1015 /* 1016 * If no mixers has been allocated in dpu_crtc_atomic_check(), 1017 * it means we are trying to start a CRTC whose state is disabled: 1018 * nothing else needs to be done. 1019 */ 1020 if (unlikely(!dpu_crtc->num_mixers)) 1021 return; 1022 1023 DPU_ATRACE_BEGIN("crtc_commit"); 1024 1025 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1026 struct dpu_encoder_kickoff_params params = { 0 }; 1027 1028 if (encoder->crtc != crtc) 1029 continue; 1030 1031 /* 1032 * Encoder will flush/start now, unless it has a tx pending. 1033 * If so, it may delay and flush at an irq event (e.g. ppdone) 1034 */ 1035 dpu_encoder_prepare_for_kickoff(encoder, ¶ms); 1036 } 1037 1038 /* wait for frame_event_done completion */ 1039 DPU_ATRACE_BEGIN("wait_for_frame_done_event"); 1040 ret = _dpu_crtc_wait_for_frame_done(crtc); 1041 DPU_ATRACE_END("wait_for_frame_done_event"); 1042 if (ret) { 1043 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 1044 crtc->base.id, 1045 atomic_read(&dpu_crtc->frame_pending)); 1046 goto end; 1047 } 1048 1049 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { 1050 /* acquire bandwidth and other resources */ 1051 DPU_DEBUG("crtc%d first commit\n", crtc->base.id); 1052 } else 1053 DPU_DEBUG("crtc%d commit\n", crtc->base.id); 1054 1055 dpu_crtc->play_count++; 1056 1057 dpu_vbif_clear_errors(dpu_kms); 1058 1059 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1060 if (encoder->crtc != crtc) 1061 continue; 1062 1063 dpu_encoder_kickoff(encoder); 1064 } 1065 1066 end: 1067 reinit_completion(&dpu_crtc->frame_done_comp); 1068 DPU_ATRACE_END("crtc_commit"); 1069 } 1070 1071 /** 1072 * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request 1073 * @dpu_crtc: Pointer to dpu crtc structure 1074 * @enable: Whether to enable/disable vblanks 1075 * 1076 * @Return: error code 1077 */ 1078 static int _dpu_crtc_vblank_enable_no_lock( 1079 struct dpu_crtc *dpu_crtc, bool enable) 1080 { 1081 struct drm_device *dev; 1082 struct drm_crtc *crtc; 1083 struct drm_encoder *enc; 1084 1085 if (!dpu_crtc) { 1086 DPU_ERROR("invalid crtc\n"); 1087 return -EINVAL; 1088 } 1089 1090 crtc = &dpu_crtc->base; 1091 dev = crtc->dev; 1092 1093 if (enable) { 1094 int ret; 1095 1096 /* drop lock since power crtc cb may try to re-acquire lock */ 1097 mutex_unlock(&dpu_crtc->crtc_lock); 1098 ret = _dpu_crtc_power_enable(dpu_crtc, true); 1099 mutex_lock(&dpu_crtc->crtc_lock); 1100 if (ret) 1101 return ret; 1102 1103 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { 1104 if (enc->crtc != crtc) 1105 continue; 1106 1107 trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base), 1108 DRMID(enc), enable, 1109 dpu_crtc); 1110 1111 dpu_encoder_register_vblank_callback(enc, 1112 dpu_crtc_vblank_cb, (void *)crtc); 1113 } 1114 } else { 1115 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { 1116 if (enc->crtc != crtc) 1117 continue; 1118 1119 trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base), 1120 DRMID(enc), enable, 1121 dpu_crtc); 1122 1123 dpu_encoder_register_vblank_callback(enc, NULL, NULL); 1124 } 1125 1126 /* drop lock since power crtc cb may try to re-acquire lock */ 1127 mutex_unlock(&dpu_crtc->crtc_lock); 1128 _dpu_crtc_power_enable(dpu_crtc, false); 1129 mutex_lock(&dpu_crtc->crtc_lock); 1130 } 1131 1132 return 0; 1133 } 1134 1135 /** 1136 * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable 1137 * @crtc: Pointer to drm crtc object 1138 * @enable: true to enable suspend, false to indicate resume 1139 */ 1140 static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable) 1141 { 1142 struct dpu_crtc *dpu_crtc; 1143 struct msm_drm_private *priv; 1144 struct dpu_kms *dpu_kms; 1145 int ret = 0; 1146 1147 if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 1148 DPU_ERROR("invalid crtc\n"); 1149 return; 1150 } 1151 dpu_crtc = to_dpu_crtc(crtc); 1152 priv = crtc->dev->dev_private; 1153 1154 if (!priv->kms) { 1155 DPU_ERROR("invalid crtc kms\n"); 1156 return; 1157 } 1158 dpu_kms = to_dpu_kms(priv->kms); 1159 1160 DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable); 1161 1162 mutex_lock(&dpu_crtc->crtc_lock); 1163 1164 /* 1165 * If the vblank is enabled, release a power reference on suspend 1166 * and take it back during resume (if it is still enabled). 1167 */ 1168 trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc); 1169 if (dpu_crtc->suspend == enable) 1170 DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n", 1171 crtc->base.id, enable); 1172 else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) { 1173 ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable); 1174 if (ret) 1175 DPU_ERROR("%s vblank enable failed: %d\n", 1176 dpu_crtc->name, ret); 1177 } 1178 1179 dpu_crtc->suspend = enable; 1180 mutex_unlock(&dpu_crtc->crtc_lock); 1181 } 1182 1183 /** 1184 * dpu_crtc_duplicate_state - state duplicate hook 1185 * @crtc: Pointer to drm crtc structure 1186 * @Returns: Pointer to new drm_crtc_state structure 1187 */ 1188 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) 1189 { 1190 struct dpu_crtc *dpu_crtc; 1191 struct dpu_crtc_state *cstate, *old_cstate; 1192 1193 if (!crtc || !crtc->state) { 1194 DPU_ERROR("invalid argument(s)\n"); 1195 return NULL; 1196 } 1197 1198 dpu_crtc = to_dpu_crtc(crtc); 1199 old_cstate = to_dpu_crtc_state(crtc->state); 1200 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); 1201 if (!cstate) { 1202 DPU_ERROR("failed to allocate state\n"); 1203 return NULL; 1204 } 1205 1206 /* duplicate base helper */ 1207 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); 1208 1209 _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp); 1210 1211 return &cstate->base; 1212 } 1213 1214 /** 1215 * dpu_crtc_reset - reset hook for CRTCs 1216 * Resets the atomic state for @crtc by freeing the state pointer (which might 1217 * be NULL, e.g. at driver load time) and allocating a new empty state object. 1218 * @crtc: Pointer to drm crtc structure 1219 */ 1220 static void dpu_crtc_reset(struct drm_crtc *crtc) 1221 { 1222 struct dpu_crtc *dpu_crtc; 1223 struct dpu_crtc_state *cstate; 1224 1225 if (!crtc) { 1226 DPU_ERROR("invalid crtc\n"); 1227 return; 1228 } 1229 1230 /* revert suspend actions, if necessary */ 1231 if (dpu_kms_is_suspend_state(crtc->dev)) 1232 _dpu_crtc_set_suspend(crtc, false); 1233 1234 /* remove previous state, if present */ 1235 if (crtc->state) { 1236 dpu_crtc_destroy_state(crtc, crtc->state); 1237 crtc->state = 0; 1238 } 1239 1240 dpu_crtc = to_dpu_crtc(crtc); 1241 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); 1242 if (!cstate) { 1243 DPU_ERROR("failed to allocate state\n"); 1244 return; 1245 } 1246 1247 _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock, 1248 &dpu_crtc->rp_head); 1249 1250 cstate->base.crtc = crtc; 1251 crtc->state = &cstate->base; 1252 } 1253 1254 static void dpu_crtc_handle_power_event(u32 event_type, void *arg) 1255 { 1256 struct drm_crtc *crtc = arg; 1257 struct dpu_crtc *dpu_crtc; 1258 struct drm_encoder *encoder; 1259 struct dpu_crtc_mixer *m; 1260 u32 i, misr_status; 1261 1262 if (!crtc) { 1263 DPU_ERROR("invalid crtc\n"); 1264 return; 1265 } 1266 dpu_crtc = to_dpu_crtc(crtc); 1267 1268 mutex_lock(&dpu_crtc->crtc_lock); 1269 1270 trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type); 1271 1272 switch (event_type) { 1273 case DPU_POWER_EVENT_POST_ENABLE: 1274 /* restore encoder; crtc will be programmed during commit */ 1275 drm_for_each_encoder(encoder, crtc->dev) { 1276 if (encoder->crtc != crtc) 1277 continue; 1278 1279 dpu_encoder_virt_restore(encoder); 1280 } 1281 1282 for (i = 0; i < dpu_crtc->num_mixers; ++i) { 1283 m = &dpu_crtc->mixers[i]; 1284 if (!m->hw_lm || !m->hw_lm->ops.setup_misr || 1285 !dpu_crtc->misr_enable) 1286 continue; 1287 1288 m->hw_lm->ops.setup_misr(m->hw_lm, true, 1289 dpu_crtc->misr_frame_count); 1290 } 1291 break; 1292 case DPU_POWER_EVENT_PRE_DISABLE: 1293 for (i = 0; i < dpu_crtc->num_mixers; ++i) { 1294 m = &dpu_crtc->mixers[i]; 1295 if (!m->hw_lm || !m->hw_lm->ops.collect_misr || 1296 !dpu_crtc->misr_enable) 1297 continue; 1298 1299 misr_status = m->hw_lm->ops.collect_misr(m->hw_lm); 1300 dpu_crtc->misr_data[i] = misr_status ? misr_status : 1301 dpu_crtc->misr_data[i]; 1302 } 1303 break; 1304 case DPU_POWER_EVENT_POST_DISABLE: 1305 /** 1306 * Nothing to do. All the planes on the CRTC will be 1307 * programmed for every frame 1308 */ 1309 break; 1310 default: 1311 DPU_DEBUG("event:%d not handled\n", event_type); 1312 break; 1313 } 1314 1315 mutex_unlock(&dpu_crtc->crtc_lock); 1316 } 1317 1318 static void dpu_crtc_disable(struct drm_crtc *crtc) 1319 { 1320 struct dpu_crtc *dpu_crtc; 1321 struct dpu_crtc_state *cstate; 1322 struct drm_display_mode *mode; 1323 struct drm_encoder *encoder; 1324 struct msm_drm_private *priv; 1325 int ret; 1326 unsigned long flags; 1327 1328 if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { 1329 DPU_ERROR("invalid crtc\n"); 1330 return; 1331 } 1332 dpu_crtc = to_dpu_crtc(crtc); 1333 cstate = to_dpu_crtc_state(crtc->state); 1334 mode = &cstate->base.adjusted_mode; 1335 priv = crtc->dev->dev_private; 1336 1337 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1338 1339 if (dpu_kms_is_suspend_state(crtc->dev)) 1340 _dpu_crtc_set_suspend(crtc, true); 1341 1342 /* Disable/save vblank irq handling */ 1343 drm_crtc_vblank_off(crtc); 1344 1345 mutex_lock(&dpu_crtc->crtc_lock); 1346 1347 /* wait for frame_event_done completion */ 1348 if (_dpu_crtc_wait_for_frame_done(crtc)) 1349 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", 1350 crtc->base.id, 1351 atomic_read(&dpu_crtc->frame_pending)); 1352 1353 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); 1354 if (dpu_crtc->enabled && !dpu_crtc->suspend && 1355 dpu_crtc->vblank_requested) { 1356 ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false); 1357 if (ret) 1358 DPU_ERROR("%s vblank enable failed: %d\n", 1359 dpu_crtc->name, ret); 1360 } 1361 dpu_crtc->enabled = false; 1362 1363 if (atomic_read(&dpu_crtc->frame_pending)) { 1364 trace_dpu_crtc_disable_frame_pending(DRMID(crtc), 1365 atomic_read(&dpu_crtc->frame_pending)); 1366 dpu_core_perf_crtc_release_bw(crtc); 1367 atomic_set(&dpu_crtc->frame_pending, 0); 1368 } 1369 1370 dpu_core_perf_crtc_update(crtc, 0, true); 1371 1372 drm_for_each_encoder(encoder, crtc->dev) { 1373 if (encoder->crtc != crtc) 1374 continue; 1375 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); 1376 } 1377 1378 if (dpu_crtc->power_event) 1379 dpu_power_handle_unregister_event(dpu_crtc->phandle, 1380 dpu_crtc->power_event); 1381 1382 memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers)); 1383 dpu_crtc->num_mixers = 0; 1384 dpu_crtc->mixers_swapped = false; 1385 1386 /* disable clk & bw control until clk & bw properties are set */ 1387 cstate->bw_control = false; 1388 cstate->bw_split_vote = false; 1389 1390 mutex_unlock(&dpu_crtc->crtc_lock); 1391 1392 if (crtc->state->event && !crtc->state->active) { 1393 spin_lock_irqsave(&crtc->dev->event_lock, flags); 1394 drm_crtc_send_vblank_event(crtc, crtc->state->event); 1395 crtc->state->event = NULL; 1396 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 1397 } 1398 } 1399 1400 static void dpu_crtc_enable(struct drm_crtc *crtc, 1401 struct drm_crtc_state *old_crtc_state) 1402 { 1403 struct dpu_crtc *dpu_crtc; 1404 struct drm_encoder *encoder; 1405 struct msm_drm_private *priv; 1406 int ret; 1407 1408 if (!crtc || !crtc->dev || !crtc->dev->dev_private) { 1409 DPU_ERROR("invalid crtc\n"); 1410 return; 1411 } 1412 priv = crtc->dev->dev_private; 1413 1414 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); 1415 dpu_crtc = to_dpu_crtc(crtc); 1416 1417 drm_for_each_encoder(encoder, crtc->dev) { 1418 if (encoder->crtc != crtc) 1419 continue; 1420 dpu_encoder_register_frame_event_callback(encoder, 1421 dpu_crtc_frame_event_cb, (void *)crtc); 1422 } 1423 1424 mutex_lock(&dpu_crtc->crtc_lock); 1425 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); 1426 if (!dpu_crtc->enabled && !dpu_crtc->suspend && 1427 dpu_crtc->vblank_requested) { 1428 ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true); 1429 if (ret) 1430 DPU_ERROR("%s vblank enable failed: %d\n", 1431 dpu_crtc->name, ret); 1432 } 1433 dpu_crtc->enabled = true; 1434 1435 mutex_unlock(&dpu_crtc->crtc_lock); 1436 1437 /* Enable/restore vblank irq handling */ 1438 drm_crtc_vblank_on(crtc); 1439 1440 dpu_crtc->power_event = dpu_power_handle_register_event( 1441 dpu_crtc->phandle, 1442 DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE | 1443 DPU_POWER_EVENT_PRE_DISABLE, 1444 dpu_crtc_handle_power_event, crtc, dpu_crtc->name); 1445 1446 } 1447 1448 struct plane_state { 1449 struct dpu_plane_state *dpu_pstate; 1450 const struct drm_plane_state *drm_pstate; 1451 int stage; 1452 u32 pipe_id; 1453 }; 1454 1455 static int dpu_crtc_atomic_check(struct drm_crtc *crtc, 1456 struct drm_crtc_state *state) 1457 { 1458 struct dpu_crtc *dpu_crtc; 1459 struct plane_state *pstates; 1460 struct dpu_crtc_state *cstate; 1461 1462 const struct drm_plane_state *pstate; 1463 struct drm_plane *plane; 1464 struct drm_display_mode *mode; 1465 1466 int cnt = 0, rc = 0, mixer_width, i, z_pos; 1467 1468 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; 1469 int multirect_count = 0; 1470 const struct drm_plane_state *pipe_staged[SSPP_MAX]; 1471 int left_zpos_cnt = 0, right_zpos_cnt = 0; 1472 struct drm_rect crtc_rect = { 0 }; 1473 1474 if (!crtc) { 1475 DPU_ERROR("invalid crtc\n"); 1476 return -EINVAL; 1477 } 1478 1479 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); 1480 1481 dpu_crtc = to_dpu_crtc(crtc); 1482 cstate = to_dpu_crtc_state(state); 1483 1484 if (!state->enable || !state->active) { 1485 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n", 1486 crtc->base.id, state->enable, state->active); 1487 goto end; 1488 } 1489 1490 mode = &state->adjusted_mode; 1491 DPU_DEBUG("%s: check", dpu_crtc->name); 1492 1493 /* force a full mode set if active state changed */ 1494 if (state->active_changed) 1495 state->mode_changed = true; 1496 1497 memset(pipe_staged, 0, sizeof(pipe_staged)); 1498 1499 mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode); 1500 1501 _dpu_crtc_setup_lm_bounds(crtc, state); 1502 1503 crtc_rect.x2 = mode->hdisplay; 1504 crtc_rect.y2 = mode->vdisplay; 1505 1506 /* get plane state for all drm planes associated with crtc state */ 1507 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { 1508 struct drm_rect dst, clip = crtc_rect; 1509 1510 if (IS_ERR_OR_NULL(pstate)) { 1511 rc = PTR_ERR(pstate); 1512 DPU_ERROR("%s: failed to get plane%d state, %d\n", 1513 dpu_crtc->name, plane->base.id, rc); 1514 goto end; 1515 } 1516 if (cnt >= DPU_STAGE_MAX * 4) 1517 continue; 1518 1519 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate); 1520 pstates[cnt].drm_pstate = pstate; 1521 pstates[cnt].stage = pstate->normalized_zpos; 1522 pstates[cnt].pipe_id = dpu_plane_pipe(plane); 1523 1524 if (pipe_staged[pstates[cnt].pipe_id]) { 1525 multirect_plane[multirect_count].r0 = 1526 pipe_staged[pstates[cnt].pipe_id]; 1527 multirect_plane[multirect_count].r1 = pstate; 1528 multirect_count++; 1529 1530 pipe_staged[pstates[cnt].pipe_id] = NULL; 1531 } else { 1532 pipe_staged[pstates[cnt].pipe_id] = pstate; 1533 } 1534 1535 cnt++; 1536 1537 dst = drm_plane_state_dest(pstate); 1538 if (!drm_rect_intersect(&clip, &dst) || 1539 !drm_rect_equals(&clip, &dst)) { 1540 DPU_ERROR("invalid vertical/horizontal destination\n"); 1541 DPU_ERROR("display: " DRM_RECT_FMT " plane: " 1542 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), 1543 DRM_RECT_ARG(&dst)); 1544 rc = -E2BIG; 1545 goto end; 1546 } 1547 } 1548 1549 for (i = 1; i < SSPP_MAX; i++) { 1550 if (pipe_staged[i]) { 1551 dpu_plane_clear_multirect(pipe_staged[i]); 1552 1553 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) { 1554 DPU_ERROR( 1555 "r1 only virt plane:%d not supported\n", 1556 pipe_staged[i]->plane->base.id); 1557 rc = -EINVAL; 1558 goto end; 1559 } 1560 } 1561 } 1562 1563 z_pos = -1; 1564 for (i = 0; i < cnt; i++) { 1565 /* reset counts at every new blend stage */ 1566 if (pstates[i].stage != z_pos) { 1567 left_zpos_cnt = 0; 1568 right_zpos_cnt = 0; 1569 z_pos = pstates[i].stage; 1570 } 1571 1572 /* verify z_pos setting before using it */ 1573 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) { 1574 DPU_ERROR("> %d plane stages assigned\n", 1575 DPU_STAGE_MAX - DPU_STAGE_0); 1576 rc = -EINVAL; 1577 goto end; 1578 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { 1579 if (left_zpos_cnt == 2) { 1580 DPU_ERROR("> 2 planes @ stage %d on left\n", 1581 z_pos); 1582 rc = -EINVAL; 1583 goto end; 1584 } 1585 left_zpos_cnt++; 1586 1587 } else { 1588 if (right_zpos_cnt == 2) { 1589 DPU_ERROR("> 2 planes @ stage %d on right\n", 1590 z_pos); 1591 rc = -EINVAL; 1592 goto end; 1593 } 1594 right_zpos_cnt++; 1595 } 1596 1597 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; 1598 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos); 1599 } 1600 1601 for (i = 0; i < multirect_count; i++) { 1602 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) { 1603 DPU_ERROR( 1604 "multirect validation failed for planes (%d - %d)\n", 1605 multirect_plane[i].r0->plane->base.id, 1606 multirect_plane[i].r1->plane->base.id); 1607 rc = -EINVAL; 1608 goto end; 1609 } 1610 } 1611 1612 rc = dpu_core_perf_crtc_check(crtc, state); 1613 if (rc) { 1614 DPU_ERROR("crtc%d failed performance check %d\n", 1615 crtc->base.id, rc); 1616 goto end; 1617 } 1618 1619 /* validate source split: 1620 * use pstates sorted by stage to check planes on same stage 1621 * we assume that all pipes are in source split so its valid to compare 1622 * without taking into account left/right mixer placement 1623 */ 1624 for (i = 1; i < cnt; i++) { 1625 struct plane_state *prv_pstate, *cur_pstate; 1626 struct drm_rect left_rect, right_rect; 1627 int32_t left_pid, right_pid; 1628 int32_t stage; 1629 1630 prv_pstate = &pstates[i - 1]; 1631 cur_pstate = &pstates[i]; 1632 if (prv_pstate->stage != cur_pstate->stage) 1633 continue; 1634 1635 stage = cur_pstate->stage; 1636 1637 left_pid = prv_pstate->dpu_pstate->base.plane->base.id; 1638 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate); 1639 1640 right_pid = cur_pstate->dpu_pstate->base.plane->base.id; 1641 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate); 1642 1643 if (right_rect.x1 < left_rect.x1) { 1644 swap(left_pid, right_pid); 1645 swap(left_rect, right_rect); 1646 } 1647 1648 /** 1649 * - planes are enumerated in pipe-priority order such that 1650 * planes with lower drm_id must be left-most in a shared 1651 * blend-stage when using source split. 1652 * - planes in source split must be contiguous in width 1653 * - planes in source split must have same dest yoff and height 1654 */ 1655 if (right_pid < left_pid) { 1656 DPU_ERROR( 1657 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", 1658 stage, left_pid, right_pid); 1659 rc = -EINVAL; 1660 goto end; 1661 } else if (right_rect.x1 != drm_rect_width(&left_rect)) { 1662 DPU_ERROR("non-contiguous coordinates for src split. " 1663 "stage: %d left: " DRM_RECT_FMT " right: " 1664 DRM_RECT_FMT "\n", stage, 1665 DRM_RECT_ARG(&left_rect), 1666 DRM_RECT_ARG(&right_rect)); 1667 rc = -EINVAL; 1668 goto end; 1669 } else if (left_rect.y1 != right_rect.y1 || 1670 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) { 1671 DPU_ERROR("source split at stage: %d. invalid " 1672 "yoff/height: left: " DRM_RECT_FMT " right: " 1673 DRM_RECT_FMT "\n", stage, 1674 DRM_RECT_ARG(&left_rect), 1675 DRM_RECT_ARG(&right_rect)); 1676 rc = -EINVAL; 1677 goto end; 1678 } 1679 } 1680 1681 end: 1682 _dpu_crtc_rp_free_unused(&cstate->rp); 1683 kfree(pstates); 1684 return rc; 1685 } 1686 1687 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) 1688 { 1689 struct dpu_crtc *dpu_crtc; 1690 int ret; 1691 1692 if (!crtc) { 1693 DPU_ERROR("invalid crtc\n"); 1694 return -EINVAL; 1695 } 1696 dpu_crtc = to_dpu_crtc(crtc); 1697 1698 mutex_lock(&dpu_crtc->crtc_lock); 1699 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); 1700 if (dpu_crtc->enabled && !dpu_crtc->suspend) { 1701 ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en); 1702 if (ret) 1703 DPU_ERROR("%s vblank enable failed: %d\n", 1704 dpu_crtc->name, ret); 1705 } 1706 dpu_crtc->vblank_requested = en; 1707 mutex_unlock(&dpu_crtc->crtc_lock); 1708 1709 return 0; 1710 } 1711 1712 #ifdef CONFIG_DEBUG_FS 1713 static int _dpu_debugfs_status_show(struct seq_file *s, void *data) 1714 { 1715 struct dpu_crtc *dpu_crtc; 1716 struct dpu_plane_state *pstate = NULL; 1717 struct dpu_crtc_mixer *m; 1718 1719 struct drm_crtc *crtc; 1720 struct drm_plane *plane; 1721 struct drm_display_mode *mode; 1722 struct drm_framebuffer *fb; 1723 struct drm_plane_state *state; 1724 struct dpu_crtc_state *cstate; 1725 1726 int i, out_width; 1727 1728 if (!s || !s->private) 1729 return -EINVAL; 1730 1731 dpu_crtc = s->private; 1732 crtc = &dpu_crtc->base; 1733 cstate = to_dpu_crtc_state(crtc->state); 1734 1735 mutex_lock(&dpu_crtc->crtc_lock); 1736 mode = &crtc->state->adjusted_mode; 1737 out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode); 1738 1739 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, 1740 mode->hdisplay, mode->vdisplay); 1741 1742 seq_puts(s, "\n"); 1743 1744 for (i = 0; i < dpu_crtc->num_mixers; ++i) { 1745 m = &dpu_crtc->mixers[i]; 1746 if (!m->hw_lm) 1747 seq_printf(s, "\tmixer[%d] has no lm\n", i); 1748 else if (!m->hw_ctl) 1749 seq_printf(s, "\tmixer[%d] has no ctl\n", i); 1750 else 1751 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", 1752 m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0, 1753 out_width, mode->vdisplay); 1754 } 1755 1756 seq_puts(s, "\n"); 1757 1758 drm_atomic_crtc_for_each_plane(plane, crtc) { 1759 pstate = to_dpu_plane_state(plane->state); 1760 state = plane->state; 1761 1762 if (!pstate || !state) 1763 continue; 1764 1765 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, 1766 pstate->stage); 1767 1768 if (plane->state->fb) { 1769 fb = plane->state->fb; 1770 1771 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", 1772 fb->base.id, (char *) &fb->format->format, 1773 fb->width, fb->height); 1774 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) 1775 seq_printf(s, "cpp[%d]:%u ", 1776 i, fb->format->cpp[i]); 1777 seq_puts(s, "\n\t"); 1778 1779 seq_printf(s, "modifier:%8llu ", fb->modifier); 1780 seq_puts(s, "\n"); 1781 1782 seq_puts(s, "\t"); 1783 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) 1784 seq_printf(s, "pitches[%d]:%8u ", i, 1785 fb->pitches[i]); 1786 seq_puts(s, "\n"); 1787 1788 seq_puts(s, "\t"); 1789 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) 1790 seq_printf(s, "offsets[%d]:%8u ", i, 1791 fb->offsets[i]); 1792 seq_puts(s, "\n"); 1793 } 1794 1795 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", 1796 state->src_x, state->src_y, state->src_w, state->src_h); 1797 1798 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", 1799 state->crtc_x, state->crtc_y, state->crtc_w, 1800 state->crtc_h); 1801 seq_printf(s, "\tmultirect: mode: %d index: %d\n", 1802 pstate->multirect_mode, pstate->multirect_index); 1803 1804 seq_puts(s, "\n"); 1805 } 1806 if (dpu_crtc->vblank_cb_count) { 1807 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); 1808 s64 diff_ms = ktime_to_ms(diff); 1809 s64 fps = diff_ms ? div_s64( 1810 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; 1811 1812 seq_printf(s, 1813 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", 1814 fps, dpu_crtc->vblank_cb_count, 1815 ktime_to_ms(diff), dpu_crtc->play_count); 1816 1817 /* reset time & count for next measurement */ 1818 dpu_crtc->vblank_cb_count = 0; 1819 dpu_crtc->vblank_cb_time = ktime_set(0, 0); 1820 } 1821 1822 seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested); 1823 1824 mutex_unlock(&dpu_crtc->crtc_lock); 1825 1826 return 0; 1827 } 1828 1829 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file) 1830 { 1831 return single_open(file, _dpu_debugfs_status_show, inode->i_private); 1832 } 1833 1834 static ssize_t _dpu_crtc_misr_setup(struct file *file, 1835 const char __user *user_buf, size_t count, loff_t *ppos) 1836 { 1837 struct dpu_crtc *dpu_crtc; 1838 struct dpu_crtc_mixer *m; 1839 int i = 0, rc; 1840 char buf[MISR_BUFF_SIZE + 1]; 1841 u32 frame_count, enable; 1842 size_t buff_copy; 1843 1844 if (!file || !file->private_data) 1845 return -EINVAL; 1846 1847 dpu_crtc = file->private_data; 1848 buff_copy = min_t(size_t, count, MISR_BUFF_SIZE); 1849 if (copy_from_user(buf, user_buf, buff_copy)) { 1850 DPU_ERROR("buffer copy failed\n"); 1851 return -EINVAL; 1852 } 1853 1854 buf[buff_copy] = 0; /* end of string */ 1855 1856 if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) 1857 return -EINVAL; 1858 1859 rc = _dpu_crtc_power_enable(dpu_crtc, true); 1860 if (rc) 1861 return rc; 1862 1863 mutex_lock(&dpu_crtc->crtc_lock); 1864 dpu_crtc->misr_enable = enable; 1865 dpu_crtc->misr_frame_count = frame_count; 1866 for (i = 0; i < dpu_crtc->num_mixers; ++i) { 1867 dpu_crtc->misr_data[i] = 0; 1868 m = &dpu_crtc->mixers[i]; 1869 if (!m->hw_lm || !m->hw_lm->ops.setup_misr) 1870 continue; 1871 1872 m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count); 1873 } 1874 mutex_unlock(&dpu_crtc->crtc_lock); 1875 _dpu_crtc_power_enable(dpu_crtc, false); 1876 1877 return count; 1878 } 1879 1880 static ssize_t _dpu_crtc_misr_read(struct file *file, 1881 char __user *user_buff, size_t count, loff_t *ppos) 1882 { 1883 struct dpu_crtc *dpu_crtc; 1884 struct dpu_crtc_mixer *m; 1885 int i = 0, rc; 1886 u32 misr_status; 1887 ssize_t len = 0; 1888 char buf[MISR_BUFF_SIZE + 1] = {'\0'}; 1889 1890 if (*ppos) 1891 return 0; 1892 1893 if (!file || !file->private_data) 1894 return -EINVAL; 1895 1896 dpu_crtc = file->private_data; 1897 rc = _dpu_crtc_power_enable(dpu_crtc, true); 1898 if (rc) 1899 return rc; 1900 1901 mutex_lock(&dpu_crtc->crtc_lock); 1902 if (!dpu_crtc->misr_enable) { 1903 len += snprintf(buf + len, MISR_BUFF_SIZE - len, 1904 "disabled\n"); 1905 goto buff_check; 1906 } 1907 1908 for (i = 0; i < dpu_crtc->num_mixers; ++i) { 1909 m = &dpu_crtc->mixers[i]; 1910 if (!m->hw_lm || !m->hw_lm->ops.collect_misr) 1911 continue; 1912 1913 misr_status = m->hw_lm->ops.collect_misr(m->hw_lm); 1914 dpu_crtc->misr_data[i] = misr_status ? misr_status : 1915 dpu_crtc->misr_data[i]; 1916 len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n", 1917 m->hw_lm->idx - LM_0); 1918 len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n", 1919 dpu_crtc->misr_data[i]); 1920 } 1921 1922 buff_check: 1923 if (count <= len) { 1924 len = 0; 1925 goto end; 1926 } 1927 1928 if (copy_to_user(user_buff, buf, len)) { 1929 len = -EFAULT; 1930 goto end; 1931 } 1932 1933 *ppos += len; /* increase offset */ 1934 1935 end: 1936 mutex_unlock(&dpu_crtc->crtc_lock); 1937 _dpu_crtc_power_enable(dpu_crtc, false); 1938 return len; 1939 } 1940 1941 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ 1942 static int __prefix ## _open(struct inode *inode, struct file *file) \ 1943 { \ 1944 return single_open(file, __prefix ## _show, inode->i_private); \ 1945 } \ 1946 static const struct file_operations __prefix ## _fops = { \ 1947 .owner = THIS_MODULE, \ 1948 .open = __prefix ## _open, \ 1949 .release = single_release, \ 1950 .read = seq_read, \ 1951 .llseek = seq_lseek, \ 1952 } 1953 1954 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) 1955 { 1956 struct drm_crtc *crtc = (struct drm_crtc *) s->private; 1957 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); 1958 struct dpu_crtc_res *res; 1959 struct dpu_crtc_respool *rp; 1960 int i; 1961 1962 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); 1963 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); 1964 seq_printf(s, "core_clk_rate: %llu\n", 1965 dpu_crtc->cur_perf.core_clk_rate); 1966 for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC; 1967 i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) { 1968 seq_printf(s, "bw_ctl[%s]: %llu\n", 1969 dpu_power_handle_get_dbus_name(i), 1970 dpu_crtc->cur_perf.bw_ctl[i]); 1971 seq_printf(s, "max_per_pipe_ib[%s]: %llu\n", 1972 dpu_power_handle_get_dbus_name(i), 1973 dpu_crtc->cur_perf.max_per_pipe_ib[i]); 1974 } 1975 1976 mutex_lock(&dpu_crtc->rp_lock); 1977 list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) { 1978 seq_printf(s, "rp.%d: ", rp->sequence_id); 1979 list_for_each_entry(res, &rp->res_list, list) 1980 seq_printf(s, "0x%x/0x%llx/%pK/%d ", 1981 res->type, res->tag, res->val, 1982 atomic_read(&res->refcount)); 1983 seq_puts(s, "\n"); 1984 } 1985 mutex_unlock(&dpu_crtc->rp_lock); 1986 1987 return 0; 1988 } 1989 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state); 1990 1991 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 1992 { 1993 struct dpu_crtc *dpu_crtc; 1994 struct dpu_kms *dpu_kms; 1995 1996 static const struct file_operations debugfs_status_fops = { 1997 .open = _dpu_debugfs_status_open, 1998 .read = seq_read, 1999 .llseek = seq_lseek, 2000 .release = single_release, 2001 }; 2002 static const struct file_operations debugfs_misr_fops = { 2003 .open = simple_open, 2004 .read = _dpu_crtc_misr_read, 2005 .write = _dpu_crtc_misr_setup, 2006 }; 2007 2008 if (!crtc) 2009 return -EINVAL; 2010 dpu_crtc = to_dpu_crtc(crtc); 2011 2012 dpu_kms = _dpu_crtc_get_kms(crtc); 2013 if (!dpu_kms) 2014 return -EINVAL; 2015 2016 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, 2017 crtc->dev->primary->debugfs_root); 2018 if (!dpu_crtc->debugfs_root) 2019 return -ENOMEM; 2020 2021 /* don't error check these */ 2022 debugfs_create_file("status", 0400, 2023 dpu_crtc->debugfs_root, 2024 dpu_crtc, &debugfs_status_fops); 2025 debugfs_create_file("state", 0600, 2026 dpu_crtc->debugfs_root, 2027 &dpu_crtc->base, 2028 &dpu_crtc_debugfs_state_fops); 2029 debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root, 2030 dpu_crtc, &debugfs_misr_fops); 2031 2032 return 0; 2033 } 2034 2035 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc) 2036 { 2037 struct dpu_crtc *dpu_crtc; 2038 2039 if (!crtc) 2040 return; 2041 dpu_crtc = to_dpu_crtc(crtc); 2042 debugfs_remove_recursive(dpu_crtc->debugfs_root); 2043 } 2044 #else 2045 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) 2046 { 2047 return 0; 2048 } 2049 2050 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc) 2051 { 2052 } 2053 #endif /* CONFIG_DEBUG_FS */ 2054 2055 static int dpu_crtc_late_register(struct drm_crtc *crtc) 2056 { 2057 return _dpu_crtc_init_debugfs(crtc); 2058 } 2059 2060 static void dpu_crtc_early_unregister(struct drm_crtc *crtc) 2061 { 2062 _dpu_crtc_destroy_debugfs(crtc); 2063 } 2064 2065 static const struct drm_crtc_funcs dpu_crtc_funcs = { 2066 .set_config = drm_atomic_helper_set_config, 2067 .destroy = dpu_crtc_destroy, 2068 .page_flip = drm_atomic_helper_page_flip, 2069 .reset = dpu_crtc_reset, 2070 .atomic_duplicate_state = dpu_crtc_duplicate_state, 2071 .atomic_destroy_state = dpu_crtc_destroy_state, 2072 .late_register = dpu_crtc_late_register, 2073 .early_unregister = dpu_crtc_early_unregister, 2074 }; 2075 2076 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { 2077 .disable = dpu_crtc_disable, 2078 .atomic_enable = dpu_crtc_enable, 2079 .atomic_check = dpu_crtc_atomic_check, 2080 .atomic_begin = dpu_crtc_atomic_begin, 2081 .atomic_flush = dpu_crtc_atomic_flush, 2082 }; 2083 2084 /* initialize crtc */ 2085 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane) 2086 { 2087 struct drm_crtc *crtc = NULL; 2088 struct dpu_crtc *dpu_crtc = NULL; 2089 struct msm_drm_private *priv = NULL; 2090 struct dpu_kms *kms = NULL; 2091 int i; 2092 2093 priv = dev->dev_private; 2094 kms = to_dpu_kms(priv->kms); 2095 2096 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); 2097 if (!dpu_crtc) 2098 return ERR_PTR(-ENOMEM); 2099 2100 crtc = &dpu_crtc->base; 2101 crtc->dev = dev; 2102 2103 mutex_init(&dpu_crtc->crtc_lock); 2104 spin_lock_init(&dpu_crtc->spin_lock); 2105 atomic_set(&dpu_crtc->frame_pending, 0); 2106 2107 mutex_init(&dpu_crtc->rp_lock); 2108 INIT_LIST_HEAD(&dpu_crtc->rp_head); 2109 2110 init_completion(&dpu_crtc->frame_done_comp); 2111 2112 INIT_LIST_HEAD(&dpu_crtc->frame_event_list); 2113 2114 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { 2115 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); 2116 list_add(&dpu_crtc->frame_events[i].list, 2117 &dpu_crtc->frame_event_list); 2118 kthread_init_work(&dpu_crtc->frame_events[i].work, 2119 dpu_crtc_frame_event_work); 2120 } 2121 2122 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs, 2123 NULL); 2124 2125 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); 2126 plane->crtc = crtc; 2127 2128 /* save user friendly CRTC name for later */ 2129 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); 2130 2131 /* initialize event handling */ 2132 spin_lock_init(&dpu_crtc->event_lock); 2133 2134 dpu_crtc->phandle = &kms->phandle; 2135 2136 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name); 2137 return crtc; 2138 } 2139