1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 6 * 7 * Author: Rob Clark <robdclark@gmail.com> 8 */ 9 10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 11 #include <linux/debugfs.h> 12 #include <linux/kthread.h> 13 #include <linux/seq_file.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_crtc.h> 17 #include <drm/drm_file.h> 18 #include <drm/drm_probe_helper.h> 19 20 #include "msm_drv.h" 21 #include "dpu_kms.h" 22 #include "dpu_hwio.h" 23 #include "dpu_hw_catalog.h" 24 #include "dpu_hw_intf.h" 25 #include "dpu_hw_ctl.h" 26 #include "dpu_hw_dspp.h" 27 #include "dpu_hw_dsc.h" 28 #include "dpu_hw_merge3d.h" 29 #include "dpu_formats.h" 30 #include "dpu_encoder_phys.h" 31 #include "dpu_crtc.h" 32 #include "dpu_trace.h" 33 #include "dpu_core_irq.h" 34 #include "disp/msm_disp_snapshot.h" 35 36 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\ 37 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 38 39 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ 40 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 41 42 /* 43 * Two to anticipate panels that can do cmd/vid dynamic switching 44 * plan is to create all possible physical encoder types, and switch between 45 * them at runtime 46 */ 47 #define NUM_PHYS_ENCODER_TYPES 2 48 49 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ 50 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) 51 52 #define MAX_CHANNELS_PER_ENC 2 53 54 #define IDLE_SHORT_TIMEOUT 1 55 56 #define MAX_HDISPLAY_SPLIT 1080 57 58 /* timeout in frames waiting for frame done */ 59 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 60 61 /** 62 * enum dpu_enc_rc_events - events for resource control state machine 63 * @DPU_ENC_RC_EVENT_KICKOFF: 64 * This event happens at NORMAL priority. 65 * Event that signals the start of the transfer. When this event is 66 * received, enable MDP/DSI core clocks. Regardless of the previous 67 * state, the resource should be in ON state at the end of this event. 68 * @DPU_ENC_RC_EVENT_FRAME_DONE: 69 * This event happens at INTERRUPT level. 70 * Event signals the end of the data transfer after the PP FRAME_DONE 71 * event. At the end of this event, a delayed work is scheduled to go to 72 * IDLE_PC state after IDLE_TIMEOUT time. 73 * @DPU_ENC_RC_EVENT_PRE_STOP: 74 * This event happens at NORMAL priority. 75 * This event, when received during the ON state, leave the RC STATE 76 * in the PRE_OFF state. It should be followed by the STOP event as 77 * part of encoder disable. 78 * If received during IDLE or OFF states, it will do nothing. 79 * @DPU_ENC_RC_EVENT_STOP: 80 * This event happens at NORMAL priority. 81 * When this event is received, disable all the MDP/DSI core clocks, and 82 * disable IRQs. It should be called from the PRE_OFF or IDLE states. 83 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. 84 * PRE_OFF is expected when PRE_STOP was executed during the ON state. 85 * Resource state should be in OFF at the end of the event. 86 * @DPU_ENC_RC_EVENT_ENTER_IDLE: 87 * This event happens at NORMAL priority from a work item. 88 * Event signals that there were no frame updates for IDLE_TIMEOUT time. 89 * This would disable MDP/DSI core clocks and change the resource state 90 * to IDLE. 91 */ 92 enum dpu_enc_rc_events { 93 DPU_ENC_RC_EVENT_KICKOFF = 1, 94 DPU_ENC_RC_EVENT_FRAME_DONE, 95 DPU_ENC_RC_EVENT_PRE_STOP, 96 DPU_ENC_RC_EVENT_STOP, 97 DPU_ENC_RC_EVENT_ENTER_IDLE 98 }; 99 100 /* 101 * enum dpu_enc_rc_states - states that the resource control maintains 102 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state 103 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state 104 * @DPU_ENC_RC_STATE_ON: Resource is in ON state 105 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state 106 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state 107 */ 108 enum dpu_enc_rc_states { 109 DPU_ENC_RC_STATE_OFF, 110 DPU_ENC_RC_STATE_PRE_OFF, 111 DPU_ENC_RC_STATE_ON, 112 DPU_ENC_RC_STATE_IDLE 113 }; 114 115 /** 116 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical 117 * encoders. Virtual encoder manages one "logical" display. Physical 118 * encoders manage one intf block, tied to a specific panel/sub-panel. 119 * Virtual encoder defers as much as possible to the physical encoders. 120 * Virtual encoder registers itself with the DRM Framework as the encoder. 121 * @base: drm_encoder base class for registration with DRM 122 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes 123 * @enabled: True if the encoder is active, protected by enc_lock 124 * @num_phys_encs: Actual number of physical encoders contained. 125 * @phys_encs: Container of physical encoders managed. 126 * @cur_master: Pointer to the current master in this mode. Optimization 127 * Only valid after enable. Cleared as disable. 128 * @cur_slave: As above but for the slave encoder. 129 * @hw_pp: Handle to the pingpong blocks used for the display. No. 130 * pingpong blocks can be different than num_phys_encs. 131 * @hw_dsc: Handle to the DSC blocks used for the display. 132 * @dsc_mask: Bitmask of used DSC blocks. 133 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped 134 * for partial update right-only cases, such as pingpong 135 * split where virtual pingpong does not generate IRQs 136 * @crtc: Pointer to the currently assigned crtc. Normally you 137 * would use crtc->state->encoder_mask to determine the 138 * link between encoder/crtc. However in this case we need 139 * to track crtc in the disable() hook which is called 140 * _after_ encoder_mask is cleared. 141 * @connector: If a mode is set, cached pointer to the active connector 142 * @crtc_kickoff_cb: Callback into CRTC that will flush & start 143 * all CTL paths 144 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb 145 * @debugfs_root: Debug file system root file node 146 * @enc_lock: Lock around physical encoder 147 * create/destroy/enable/disable 148 * @frame_busy_mask: Bitmask tracking which phys_enc we are still 149 * busy processing current command. 150 * Bit0 = phys_encs[0] etc. 151 * @crtc_frame_event_cb: callback handler for frame event 152 * @crtc_frame_event_cb_data: callback handler private data 153 * @frame_done_timeout_ms: frame done timeout in ms 154 * @frame_done_timer: watchdog timer for frame done event 155 * @vsync_event_timer: vsync timer 156 * @disp_info: local copy of msm_display_info struct 157 * @idle_pc_supported: indicate if idle power collaps is supported 158 * @rc_lock: resource control mutex lock to protect 159 * virt encoder over various state changes 160 * @rc_state: resource controller state 161 * @delayed_off_work: delayed worker to schedule disabling of 162 * clks and resources after IDLE_TIMEOUT time. 163 * @vsync_event_work: worker to handle vsync event for autorefresh 164 * @topology: topology of the display 165 * @idle_timeout: idle timeout duration in milliseconds 166 * @wide_bus_en: wide bus is enabled on this interface 167 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders 168 */ 169 struct dpu_encoder_virt { 170 struct drm_encoder base; 171 spinlock_t enc_spinlock; 172 173 bool enabled; 174 175 unsigned int num_phys_encs; 176 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; 177 struct dpu_encoder_phys *cur_master; 178 struct dpu_encoder_phys *cur_slave; 179 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 180 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 181 182 unsigned int dsc_mask; 183 184 bool intfs_swapped; 185 186 struct drm_crtc *crtc; 187 struct drm_connector *connector; 188 189 struct dentry *debugfs_root; 190 struct mutex enc_lock; 191 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); 192 void (*crtc_frame_event_cb)(void *, u32 event); 193 void *crtc_frame_event_cb_data; 194 195 atomic_t frame_done_timeout_ms; 196 struct timer_list frame_done_timer; 197 struct timer_list vsync_event_timer; 198 199 struct msm_display_info disp_info; 200 201 bool idle_pc_supported; 202 struct mutex rc_lock; 203 enum dpu_enc_rc_states rc_state; 204 struct delayed_work delayed_off_work; 205 struct kthread_work vsync_event_work; 206 struct msm_display_topology topology; 207 208 u32 idle_timeout; 209 210 bool wide_bus_en; 211 212 /* DSC configuration */ 213 struct drm_dsc_config *dsc; 214 }; 215 216 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) 217 218 static u32 dither_matrix[DITHER_MATRIX_SZ] = { 219 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 220 }; 221 222 223 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) 224 { 225 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 226 227 return dpu_enc->wide_bus_en; 228 } 229 230 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) 231 { 232 struct dpu_encoder_virt *dpu_enc; 233 int i, num_intf = 0; 234 235 dpu_enc = to_dpu_encoder_virt(drm_enc); 236 237 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 238 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 239 240 if (phys->hw_intf && phys->hw_intf->ops.setup_misr 241 && phys->hw_intf->ops.collect_misr) 242 num_intf++; 243 } 244 245 return num_intf; 246 } 247 248 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) 249 { 250 struct dpu_encoder_virt *dpu_enc; 251 252 int i; 253 254 dpu_enc = to_dpu_encoder_virt(drm_enc); 255 256 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 257 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 258 259 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) 260 continue; 261 262 phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1); 263 } 264 } 265 266 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) 267 { 268 struct dpu_encoder_virt *dpu_enc; 269 270 int i, rc = 0, entries_added = 0; 271 272 if (!drm_enc->crtc) { 273 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); 274 return -EINVAL; 275 } 276 277 dpu_enc = to_dpu_encoder_virt(drm_enc); 278 279 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 280 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 281 282 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) 283 continue; 284 285 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]); 286 if (rc) 287 return rc; 288 entries_added++; 289 } 290 291 return entries_added; 292 } 293 294 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc) 295 { 296 struct dpu_hw_dither_cfg dither_cfg = { 0 }; 297 298 if (!hw_pp->ops.setup_dither) 299 return; 300 301 switch (bpc) { 302 case 6: 303 dither_cfg.c0_bitdepth = 6; 304 dither_cfg.c1_bitdepth = 6; 305 dither_cfg.c2_bitdepth = 6; 306 dither_cfg.c3_bitdepth = 6; 307 dither_cfg.temporal_en = 0; 308 break; 309 default: 310 hw_pp->ops.setup_dither(hw_pp, NULL); 311 return; 312 } 313 314 memcpy(&dither_cfg.matrix, dither_matrix, 315 sizeof(u32) * DITHER_MATRIX_SZ); 316 317 hw_pp->ops.setup_dither(hw_pp, &dither_cfg); 318 } 319 320 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) 321 { 322 switch (intf_mode) { 323 case INTF_MODE_VIDEO: 324 return "INTF_MODE_VIDEO"; 325 case INTF_MODE_CMD: 326 return "INTF_MODE_CMD"; 327 case INTF_MODE_WB_BLOCK: 328 return "INTF_MODE_WB_BLOCK"; 329 case INTF_MODE_WB_LINE: 330 return "INTF_MODE_WB_LINE"; 331 default: 332 return "INTF_MODE_UNKNOWN"; 333 } 334 } 335 336 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, 337 enum dpu_intr_idx intr_idx) 338 { 339 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n", 340 DRMID(phys_enc->parent), 341 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode), 342 phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0, 343 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); 344 345 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, 346 DPU_ENCODER_FRAME_EVENT_ERROR); 347 } 348 349 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, 350 u32 irq_idx, struct dpu_encoder_wait_info *info); 351 352 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, 353 int irq, 354 void (*func)(void *arg, int irq_idx), 355 struct dpu_encoder_wait_info *wait_info) 356 { 357 u32 irq_status; 358 int ret; 359 360 if (!wait_info) { 361 DPU_ERROR("invalid params\n"); 362 return -EINVAL; 363 } 364 /* note: do master / slave checking outside */ 365 366 /* return EWOULDBLOCK since we know the wait isn't necessary */ 367 if (phys_enc->enable_state == DPU_ENC_DISABLED) { 368 DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n", 369 DRMID(phys_enc->parent), func, 370 irq); 371 return -EWOULDBLOCK; 372 } 373 374 if (irq < 0) { 375 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", 376 DRMID(phys_enc->parent), func); 377 return 0; 378 } 379 380 DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n", 381 DRMID(phys_enc->parent), func, 382 irq, phys_enc->hw_pp->idx - PINGPONG_0, 383 atomic_read(wait_info->atomic_cnt)); 384 385 ret = dpu_encoder_helper_wait_event_timeout( 386 DRMID(phys_enc->parent), 387 irq, 388 wait_info); 389 390 if (ret <= 0) { 391 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq); 392 if (irq_status) { 393 unsigned long flags; 394 395 DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n", 396 DRMID(phys_enc->parent), func, 397 irq, 398 phys_enc->hw_pp->idx - PINGPONG_0, 399 atomic_read(wait_info->atomic_cnt)); 400 local_irq_save(flags); 401 func(phys_enc, irq); 402 local_irq_restore(flags); 403 ret = 0; 404 } else { 405 ret = -ETIMEDOUT; 406 DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n", 407 DRMID(phys_enc->parent), func, 408 irq, 409 phys_enc->hw_pp->idx - PINGPONG_0, 410 atomic_read(wait_info->atomic_cnt)); 411 } 412 } else { 413 ret = 0; 414 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), 415 func, irq, 416 phys_enc->hw_pp->idx - PINGPONG_0, 417 atomic_read(wait_info->atomic_cnt)); 418 } 419 420 return ret; 421 } 422 423 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) 424 { 425 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 426 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; 427 return phys ? atomic_read(&phys->vsync_cnt) : 0; 428 } 429 430 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) 431 { 432 struct dpu_encoder_virt *dpu_enc; 433 struct dpu_encoder_phys *phys; 434 int linecount = 0; 435 436 dpu_enc = to_dpu_encoder_virt(drm_enc); 437 phys = dpu_enc ? dpu_enc->cur_master : NULL; 438 439 if (phys && phys->ops.get_line_count) 440 linecount = phys->ops.get_line_count(phys); 441 442 return linecount; 443 } 444 445 static void dpu_encoder_destroy(struct drm_encoder *drm_enc) 446 { 447 struct dpu_encoder_virt *dpu_enc = NULL; 448 int i = 0; 449 450 if (!drm_enc) { 451 DPU_ERROR("invalid encoder\n"); 452 return; 453 } 454 455 dpu_enc = to_dpu_encoder_virt(drm_enc); 456 DPU_DEBUG_ENC(dpu_enc, "\n"); 457 458 mutex_lock(&dpu_enc->enc_lock); 459 460 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 461 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 462 463 if (phys->ops.destroy) { 464 phys->ops.destroy(phys); 465 --dpu_enc->num_phys_encs; 466 dpu_enc->phys_encs[i] = NULL; 467 } 468 } 469 470 if (dpu_enc->num_phys_encs) 471 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n", 472 dpu_enc->num_phys_encs); 473 dpu_enc->num_phys_encs = 0; 474 mutex_unlock(&dpu_enc->enc_lock); 475 476 drm_encoder_cleanup(drm_enc); 477 mutex_destroy(&dpu_enc->enc_lock); 478 } 479 480 void dpu_encoder_helper_split_config( 481 struct dpu_encoder_phys *phys_enc, 482 enum dpu_intf interface) 483 { 484 struct dpu_encoder_virt *dpu_enc; 485 struct split_pipe_cfg cfg = { 0 }; 486 struct dpu_hw_mdp *hw_mdptop; 487 struct msm_display_info *disp_info; 488 489 if (!phys_enc->hw_mdptop || !phys_enc->parent) { 490 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 491 return; 492 } 493 494 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 495 hw_mdptop = phys_enc->hw_mdptop; 496 disp_info = &dpu_enc->disp_info; 497 498 if (disp_info->intf_type != DRM_MODE_ENCODER_DSI) 499 return; 500 501 /** 502 * disable split modes since encoder will be operating in as the only 503 * encoder, either for the entire use case in the case of, for example, 504 * single DSI, or for this frame in the case of left/right only partial 505 * update. 506 */ 507 if (phys_enc->split_role == ENC_ROLE_SOLO) { 508 if (hw_mdptop->ops.setup_split_pipe) 509 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 510 return; 511 } 512 513 cfg.en = true; 514 cfg.mode = phys_enc->intf_mode; 515 cfg.intf = interface; 516 517 if (cfg.en && phys_enc->ops.needs_single_flush && 518 phys_enc->ops.needs_single_flush(phys_enc)) 519 cfg.split_flush_en = true; 520 521 if (phys_enc->split_role == ENC_ROLE_MASTER) { 522 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); 523 524 if (hw_mdptop->ops.setup_split_pipe) 525 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 526 } 527 } 528 529 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) 530 { 531 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 532 int i, intf_count = 0, num_dsc = 0; 533 534 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 535 if (dpu_enc->phys_encs[i]) 536 intf_count++; 537 538 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ 539 if (dpu_enc->dsc) 540 num_dsc = 2; 541 542 return (num_dsc > 0) && (num_dsc > intf_count); 543 } 544 545 static struct msm_display_topology dpu_encoder_get_topology( 546 struct dpu_encoder_virt *dpu_enc, 547 struct dpu_kms *dpu_kms, 548 struct drm_display_mode *mode, 549 struct drm_crtc_state *crtc_state) 550 { 551 struct msm_display_topology topology = {0}; 552 int i, intf_count = 0; 553 554 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 555 if (dpu_enc->phys_encs[i]) 556 intf_count++; 557 558 /* Datapath topology selection 559 * 560 * Dual display 561 * 2 LM, 2 INTF ( Split display using 2 interfaces) 562 * 563 * Single display 564 * 1 LM, 1 INTF 565 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 566 * 567 * Add dspps to the reservation requirements if ctm is requested 568 */ 569 if (intf_count == 2) 570 topology.num_lm = 2; 571 else if (!dpu_kms->catalog->caps->has_3d_merge) 572 topology.num_lm = 1; 573 else 574 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 575 576 if (crtc_state->ctm) 577 topology.num_dspp = topology.num_lm; 578 579 topology.num_intf = intf_count; 580 581 if (dpu_enc->dsc) { 582 /* 583 * In case of Display Stream Compression (DSC), we would use 584 * 2 DSC encoders, 2 layer mixers and 1 interface 585 * this is power optimal and can drive up to (including) 4k 586 * screens 587 */ 588 topology.num_dsc = 2; 589 topology.num_lm = 2; 590 topology.num_intf = 1; 591 } 592 593 return topology; 594 } 595 596 static int dpu_encoder_virt_atomic_check( 597 struct drm_encoder *drm_enc, 598 struct drm_crtc_state *crtc_state, 599 struct drm_connector_state *conn_state) 600 { 601 struct dpu_encoder_virt *dpu_enc; 602 struct msm_drm_private *priv; 603 struct dpu_kms *dpu_kms; 604 struct drm_display_mode *adj_mode; 605 struct msm_display_topology topology; 606 struct dpu_global_state *global_state; 607 int i = 0; 608 int ret = 0; 609 610 if (!drm_enc || !crtc_state || !conn_state) { 611 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", 612 drm_enc != NULL, crtc_state != NULL, conn_state != NULL); 613 return -EINVAL; 614 } 615 616 dpu_enc = to_dpu_encoder_virt(drm_enc); 617 DPU_DEBUG_ENC(dpu_enc, "\n"); 618 619 priv = drm_enc->dev->dev_private; 620 dpu_kms = to_dpu_kms(priv->kms); 621 adj_mode = &crtc_state->adjusted_mode; 622 global_state = dpu_kms_get_global_state(crtc_state->state); 623 if (IS_ERR(global_state)) 624 return PTR_ERR(global_state); 625 626 trace_dpu_enc_atomic_check(DRMID(drm_enc)); 627 628 /* perform atomic check on the first physical encoder (master) */ 629 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 630 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 631 632 if (phys->ops.atomic_check) 633 ret = phys->ops.atomic_check(phys, crtc_state, 634 conn_state); 635 if (ret) { 636 DPU_ERROR_ENC(dpu_enc, 637 "mode unsupported, phys idx %d\n", i); 638 return ret; 639 } 640 } 641 642 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state); 643 644 /* 645 * Release and Allocate resources on every modeset 646 * Dont allocate when active is false. 647 */ 648 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 649 dpu_rm_release(global_state, drm_enc); 650 651 if (!crtc_state->active_changed || crtc_state->enable) 652 ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 653 drm_enc, crtc_state, topology); 654 } 655 656 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); 657 658 return ret; 659 } 660 661 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, 662 struct msm_display_info *disp_info) 663 { 664 struct dpu_vsync_source_cfg vsync_cfg = { 0 }; 665 struct msm_drm_private *priv; 666 struct dpu_kms *dpu_kms; 667 struct dpu_hw_mdp *hw_mdptop; 668 struct drm_encoder *drm_enc; 669 int i; 670 671 if (!dpu_enc || !disp_info) { 672 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", 673 dpu_enc != NULL, disp_info != NULL); 674 return; 675 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { 676 DPU_ERROR("invalid num phys enc %d/%d\n", 677 dpu_enc->num_phys_encs, 678 (int) ARRAY_SIZE(dpu_enc->hw_pp)); 679 return; 680 } 681 682 drm_enc = &dpu_enc->base; 683 /* this pointers are checked in virt_enable_helper */ 684 priv = drm_enc->dev->dev_private; 685 686 dpu_kms = to_dpu_kms(priv->kms); 687 hw_mdptop = dpu_kms->hw_mdp; 688 if (!hw_mdptop) { 689 DPU_ERROR("invalid mdptop\n"); 690 return; 691 } 692 693 if (hw_mdptop->ops.setup_vsync_source && 694 disp_info->is_cmd_mode) { 695 for (i = 0; i < dpu_enc->num_phys_encs; i++) 696 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; 697 698 vsync_cfg.pp_count = dpu_enc->num_phys_encs; 699 if (disp_info->is_te_using_watchdog_timer) 700 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0; 701 else 702 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO; 703 704 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); 705 } 706 } 707 708 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable) 709 { 710 struct dpu_encoder_virt *dpu_enc; 711 int i; 712 713 if (!drm_enc) { 714 DPU_ERROR("invalid encoder\n"); 715 return; 716 } 717 718 dpu_enc = to_dpu_encoder_virt(drm_enc); 719 720 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable); 721 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 722 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 723 724 if (phys->ops.irq_control) 725 phys->ops.irq_control(phys, enable); 726 } 727 728 } 729 730 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc, 731 bool enable) 732 { 733 struct msm_drm_private *priv; 734 struct dpu_kms *dpu_kms; 735 struct dpu_encoder_virt *dpu_enc; 736 737 dpu_enc = to_dpu_encoder_virt(drm_enc); 738 priv = drm_enc->dev->dev_private; 739 dpu_kms = to_dpu_kms(priv->kms); 740 741 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable); 742 743 if (!dpu_enc->cur_master) { 744 DPU_ERROR("encoder master not set\n"); 745 return; 746 } 747 748 if (enable) { 749 /* enable DPU core clks */ 750 pm_runtime_get_sync(&dpu_kms->pdev->dev); 751 752 /* enable all the irq */ 753 _dpu_encoder_irq_control(drm_enc, true); 754 755 } else { 756 /* disable all the irq */ 757 _dpu_encoder_irq_control(drm_enc, false); 758 759 /* disable DPU core clks */ 760 pm_runtime_put_sync(&dpu_kms->pdev->dev); 761 } 762 763 } 764 765 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, 766 u32 sw_event) 767 { 768 struct dpu_encoder_virt *dpu_enc; 769 struct msm_drm_private *priv; 770 bool is_vid_mode = false; 771 772 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { 773 DPU_ERROR("invalid parameters\n"); 774 return -EINVAL; 775 } 776 dpu_enc = to_dpu_encoder_virt(drm_enc); 777 priv = drm_enc->dev->dev_private; 778 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode; 779 780 /* 781 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET 782 * events and return early for other events (ie wb display). 783 */ 784 if (!dpu_enc->idle_pc_supported && 785 (sw_event != DPU_ENC_RC_EVENT_KICKOFF && 786 sw_event != DPU_ENC_RC_EVENT_STOP && 787 sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) 788 return 0; 789 790 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, 791 dpu_enc->rc_state, "begin"); 792 793 switch (sw_event) { 794 case DPU_ENC_RC_EVENT_KICKOFF: 795 /* cancel delayed off work, if any */ 796 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 797 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 798 sw_event); 799 800 mutex_lock(&dpu_enc->rc_lock); 801 802 /* return if the resource control is already in ON state */ 803 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 804 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n", 805 DRMID(drm_enc), sw_event); 806 mutex_unlock(&dpu_enc->rc_lock); 807 return 0; 808 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && 809 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { 810 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n", 811 DRMID(drm_enc), sw_event, 812 dpu_enc->rc_state); 813 mutex_unlock(&dpu_enc->rc_lock); 814 return -EINVAL; 815 } 816 817 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) 818 _dpu_encoder_irq_control(drm_enc, true); 819 else 820 _dpu_encoder_resource_control_helper(drm_enc, true); 821 822 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; 823 824 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 825 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 826 "kickoff"); 827 828 mutex_unlock(&dpu_enc->rc_lock); 829 break; 830 831 case DPU_ENC_RC_EVENT_FRAME_DONE: 832 /* 833 * mutex lock is not used as this event happens at interrupt 834 * context. And locking is not required as, the other events 835 * like KICKOFF and STOP does a wait-for-idle before executing 836 * the resource_control 837 */ 838 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 839 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", 840 DRMID(drm_enc), sw_event, 841 dpu_enc->rc_state); 842 return -EINVAL; 843 } 844 845 /* 846 * schedule off work item only when there are no 847 * frames pending 848 */ 849 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { 850 DRM_DEBUG_KMS("id:%d skip schedule work\n", 851 DRMID(drm_enc)); 852 return 0; 853 } 854 855 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, 856 msecs_to_jiffies(dpu_enc->idle_timeout)); 857 858 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 859 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 860 "frame done"); 861 break; 862 863 case DPU_ENC_RC_EVENT_PRE_STOP: 864 /* cancel delayed off work, if any */ 865 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 866 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 867 sw_event); 868 869 mutex_lock(&dpu_enc->rc_lock); 870 871 if (is_vid_mode && 872 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 873 _dpu_encoder_irq_control(drm_enc, true); 874 } 875 /* skip if is already OFF or IDLE, resources are off already */ 876 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || 877 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 878 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", 879 DRMID(drm_enc), sw_event, 880 dpu_enc->rc_state); 881 mutex_unlock(&dpu_enc->rc_lock); 882 return 0; 883 } 884 885 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; 886 887 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 888 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 889 "pre stop"); 890 891 mutex_unlock(&dpu_enc->rc_lock); 892 break; 893 894 case DPU_ENC_RC_EVENT_STOP: 895 mutex_lock(&dpu_enc->rc_lock); 896 897 /* return if the resource control is already in OFF state */ 898 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { 899 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", 900 DRMID(drm_enc), sw_event); 901 mutex_unlock(&dpu_enc->rc_lock); 902 return 0; 903 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 904 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", 905 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 906 mutex_unlock(&dpu_enc->rc_lock); 907 return -EINVAL; 908 } 909 910 /** 911 * expect to arrive here only if in either idle state or pre-off 912 * and in IDLE state the resources are already disabled 913 */ 914 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) 915 _dpu_encoder_resource_control_helper(drm_enc, false); 916 917 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; 918 919 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 920 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 921 "stop"); 922 923 mutex_unlock(&dpu_enc->rc_lock); 924 break; 925 926 case DPU_ENC_RC_EVENT_ENTER_IDLE: 927 mutex_lock(&dpu_enc->rc_lock); 928 929 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 930 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", 931 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 932 mutex_unlock(&dpu_enc->rc_lock); 933 return 0; 934 } 935 936 /* 937 * if we are in ON but a frame was just kicked off, 938 * ignore the IDLE event, it's probably a stale timer event 939 */ 940 if (dpu_enc->frame_busy_mask[0]) { 941 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", 942 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 943 mutex_unlock(&dpu_enc->rc_lock); 944 return 0; 945 } 946 947 if (is_vid_mode) 948 _dpu_encoder_irq_control(drm_enc, false); 949 else 950 _dpu_encoder_resource_control_helper(drm_enc, false); 951 952 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; 953 954 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 955 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 956 "idle"); 957 958 mutex_unlock(&dpu_enc->rc_lock); 959 break; 960 961 default: 962 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), 963 sw_event); 964 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 965 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 966 "error"); 967 break; 968 } 969 970 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 971 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 972 "end"); 973 return 0; 974 } 975 976 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, 977 struct drm_writeback_job *job) 978 { 979 struct dpu_encoder_virt *dpu_enc; 980 int i; 981 982 dpu_enc = to_dpu_encoder_virt(drm_enc); 983 984 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 985 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 986 987 if (phys->ops.prepare_wb_job) 988 phys->ops.prepare_wb_job(phys, job); 989 990 } 991 } 992 993 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, 994 struct drm_writeback_job *job) 995 { 996 struct dpu_encoder_virt *dpu_enc; 997 int i; 998 999 dpu_enc = to_dpu_encoder_virt(drm_enc); 1000 1001 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1002 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1003 1004 if (phys->ops.cleanup_wb_job) 1005 phys->ops.cleanup_wb_job(phys, job); 1006 1007 } 1008 } 1009 1010 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, 1011 struct drm_crtc_state *crtc_state, 1012 struct drm_connector_state *conn_state) 1013 { 1014 struct dpu_encoder_virt *dpu_enc; 1015 struct msm_drm_private *priv; 1016 struct dpu_kms *dpu_kms; 1017 struct dpu_crtc_state *cstate; 1018 struct dpu_global_state *global_state; 1019 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; 1020 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 1021 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; 1022 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; 1023 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; 1024 int num_lm, num_ctl, num_pp, num_dsc; 1025 unsigned int dsc_mask = 0; 1026 int i; 1027 1028 if (!drm_enc) { 1029 DPU_ERROR("invalid encoder\n"); 1030 return; 1031 } 1032 1033 dpu_enc = to_dpu_encoder_virt(drm_enc); 1034 DPU_DEBUG_ENC(dpu_enc, "\n"); 1035 1036 priv = drm_enc->dev->dev_private; 1037 dpu_kms = to_dpu_kms(priv->kms); 1038 1039 global_state = dpu_kms_get_existing_global_state(dpu_kms); 1040 if (IS_ERR_OR_NULL(global_state)) { 1041 DPU_ERROR("Failed to get global state"); 1042 return; 1043 } 1044 1045 trace_dpu_enc_mode_set(DRMID(drm_enc)); 1046 1047 /* Query resource that have been reserved in atomic check step. */ 1048 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1049 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, 1050 ARRAY_SIZE(hw_pp)); 1051 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1052 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 1053 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1054 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 1055 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1056 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, 1057 ARRAY_SIZE(hw_dspp)); 1058 1059 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1060 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) 1061 : NULL; 1062 1063 if (dpu_enc->dsc) { 1064 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1065 drm_enc->base.id, DPU_HW_BLK_DSC, 1066 hw_dsc, ARRAY_SIZE(hw_dsc)); 1067 for (i = 0; i < num_dsc; i++) { 1068 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); 1069 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); 1070 } 1071 } 1072 1073 dpu_enc->dsc_mask = dsc_mask; 1074 1075 cstate = to_dpu_crtc_state(crtc_state); 1076 1077 for (i = 0; i < num_lm; i++) { 1078 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); 1079 1080 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); 1081 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); 1082 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); 1083 } 1084 1085 cstate->num_mixers = num_lm; 1086 1087 dpu_enc->connector = conn_state->connector; 1088 1089 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1090 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1091 1092 if (!dpu_enc->hw_pp[i]) { 1093 DPU_ERROR_ENC(dpu_enc, 1094 "no pp block assigned at idx: %d\n", i); 1095 return; 1096 } 1097 1098 if (!hw_ctl[i]) { 1099 DPU_ERROR_ENC(dpu_enc, 1100 "no ctl block assigned at idx: %d\n", i); 1101 return; 1102 } 1103 1104 phys->hw_pp = dpu_enc->hw_pp[i]; 1105 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); 1106 1107 phys->cached_mode = crtc_state->adjusted_mode; 1108 if (phys->ops.atomic_mode_set) 1109 phys->ops.atomic_mode_set(phys, crtc_state, conn_state); 1110 } 1111 } 1112 1113 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) 1114 { 1115 struct dpu_encoder_virt *dpu_enc = NULL; 1116 int i; 1117 1118 if (!drm_enc || !drm_enc->dev) { 1119 DPU_ERROR("invalid parameters\n"); 1120 return; 1121 } 1122 1123 dpu_enc = to_dpu_encoder_virt(drm_enc); 1124 if (!dpu_enc || !dpu_enc->cur_master) { 1125 DPU_ERROR("invalid dpu encoder/master\n"); 1126 return; 1127 } 1128 1129 1130 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS && 1131 dpu_enc->cur_master->hw_mdptop && 1132 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) 1133 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( 1134 dpu_enc->cur_master->hw_mdptop); 1135 1136 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); 1137 1138 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && 1139 !WARN_ON(dpu_enc->num_phys_encs == 0)) { 1140 unsigned bpc = dpu_enc->connector->display_info.bpc; 1141 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1142 if (!dpu_enc->hw_pp[i]) 1143 continue; 1144 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc); 1145 } 1146 } 1147 } 1148 1149 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) 1150 { 1151 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1152 1153 mutex_lock(&dpu_enc->enc_lock); 1154 1155 if (!dpu_enc->enabled) 1156 goto out; 1157 1158 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore) 1159 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); 1160 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) 1161 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); 1162 1163 _dpu_encoder_virt_enable_helper(drm_enc); 1164 1165 out: 1166 mutex_unlock(&dpu_enc->enc_lock); 1167 } 1168 1169 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, 1170 struct drm_atomic_state *state) 1171 { 1172 struct dpu_encoder_virt *dpu_enc = NULL; 1173 int ret = 0; 1174 struct drm_display_mode *cur_mode = NULL; 1175 1176 dpu_enc = to_dpu_encoder_virt(drm_enc); 1177 1178 mutex_lock(&dpu_enc->enc_lock); 1179 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; 1180 1181 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, 1182 cur_mode->vdisplay); 1183 1184 /* always enable slave encoder before master */ 1185 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) 1186 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); 1187 1188 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) 1189 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); 1190 1191 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1192 if (ret) { 1193 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", 1194 ret); 1195 goto out; 1196 } 1197 1198 _dpu_encoder_virt_enable_helper(drm_enc); 1199 1200 dpu_enc->enabled = true; 1201 1202 out: 1203 mutex_unlock(&dpu_enc->enc_lock); 1204 } 1205 1206 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, 1207 struct drm_atomic_state *state) 1208 { 1209 struct dpu_encoder_virt *dpu_enc = NULL; 1210 struct drm_crtc *crtc; 1211 struct drm_crtc_state *old_state = NULL; 1212 int i = 0; 1213 1214 dpu_enc = to_dpu_encoder_virt(drm_enc); 1215 DPU_DEBUG_ENC(dpu_enc, "\n"); 1216 1217 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); 1218 if (crtc) 1219 old_state = drm_atomic_get_old_crtc_state(state, crtc); 1220 1221 /* 1222 * The encoder is already disabled if self refresh mode was set earlier, 1223 * in the old_state for the corresponding crtc. 1224 */ 1225 if (old_state && old_state->self_refresh_active) 1226 return; 1227 1228 mutex_lock(&dpu_enc->enc_lock); 1229 dpu_enc->enabled = false; 1230 1231 trace_dpu_enc_disable(DRMID(drm_enc)); 1232 1233 /* wait for idle */ 1234 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); 1235 1236 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); 1237 1238 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1239 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1240 1241 if (phys->ops.disable) 1242 phys->ops.disable(phys); 1243 } 1244 1245 1246 /* after phys waits for frame-done, should be no more frames pending */ 1247 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1248 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1249 del_timer_sync(&dpu_enc->frame_done_timer); 1250 } 1251 1252 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); 1253 1254 dpu_enc->connector = NULL; 1255 1256 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1257 1258 mutex_unlock(&dpu_enc->enc_lock); 1259 } 1260 1261 static enum dpu_intf dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog, 1262 enum dpu_intf_type type, u32 controller_id) 1263 { 1264 int i = 0; 1265 1266 if (type == INTF_WB) 1267 return INTF_MAX; 1268 1269 for (i = 0; i < catalog->intf_count; i++) { 1270 if (catalog->intf[i].type == type 1271 && catalog->intf[i].controller_id == controller_id) { 1272 return catalog->intf[i].id; 1273 } 1274 } 1275 1276 return INTF_MAX; 1277 } 1278 1279 static enum dpu_wb dpu_encoder_get_wb(const struct dpu_mdss_cfg *catalog, 1280 enum dpu_intf_type type, u32 controller_id) 1281 { 1282 int i = 0; 1283 1284 if (type != INTF_WB) 1285 return WB_MAX; 1286 1287 for (i = 0; i < catalog->wb_count; i++) { 1288 if (catalog->wb[i].id == controller_id) 1289 return catalog->wb[i].id; 1290 } 1291 1292 return WB_MAX; 1293 } 1294 1295 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, 1296 struct dpu_encoder_phys *phy_enc) 1297 { 1298 struct dpu_encoder_virt *dpu_enc = NULL; 1299 unsigned long lock_flags; 1300 1301 if (!drm_enc || !phy_enc) 1302 return; 1303 1304 DPU_ATRACE_BEGIN("encoder_vblank_callback"); 1305 dpu_enc = to_dpu_encoder_virt(drm_enc); 1306 1307 atomic_inc(&phy_enc->vsync_cnt); 1308 1309 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1310 if (dpu_enc->crtc) 1311 dpu_crtc_vblank_callback(dpu_enc->crtc); 1312 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1313 1314 DPU_ATRACE_END("encoder_vblank_callback"); 1315 } 1316 1317 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, 1318 struct dpu_encoder_phys *phy_enc) 1319 { 1320 if (!phy_enc) 1321 return; 1322 1323 DPU_ATRACE_BEGIN("encoder_underrun_callback"); 1324 atomic_inc(&phy_enc->underrun_cnt); 1325 1326 /* trigger dump only on the first underrun */ 1327 if (atomic_read(&phy_enc->underrun_cnt) == 1) 1328 msm_disp_snapshot_state(drm_enc->dev); 1329 1330 trace_dpu_enc_underrun_cb(DRMID(drm_enc), 1331 atomic_read(&phy_enc->underrun_cnt)); 1332 DPU_ATRACE_END("encoder_underrun_callback"); 1333 } 1334 1335 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) 1336 { 1337 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1338 unsigned long lock_flags; 1339 1340 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1341 /* crtc should always be cleared before re-assigning */ 1342 WARN_ON(crtc && dpu_enc->crtc); 1343 dpu_enc->crtc = crtc; 1344 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1345 } 1346 1347 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, 1348 struct drm_crtc *crtc, bool enable) 1349 { 1350 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1351 unsigned long lock_flags; 1352 int i; 1353 1354 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); 1355 1356 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1357 if (dpu_enc->crtc != crtc) { 1358 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1359 return; 1360 } 1361 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1362 1363 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1364 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1365 1366 if (phys->ops.control_vblank_irq) 1367 phys->ops.control_vblank_irq(phys, enable); 1368 } 1369 } 1370 1371 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, 1372 void (*frame_event_cb)(void *, u32 event), 1373 void *frame_event_cb_data) 1374 { 1375 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1376 unsigned long lock_flags; 1377 bool enable; 1378 1379 enable = frame_event_cb ? true : false; 1380 1381 if (!drm_enc) { 1382 DPU_ERROR("invalid encoder\n"); 1383 return; 1384 } 1385 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable); 1386 1387 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1388 dpu_enc->crtc_frame_event_cb = frame_event_cb; 1389 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data; 1390 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1391 } 1392 1393 void dpu_encoder_frame_done_callback( 1394 struct drm_encoder *drm_enc, 1395 struct dpu_encoder_phys *ready_phys, u32 event) 1396 { 1397 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1398 unsigned int i; 1399 1400 if (event & (DPU_ENCODER_FRAME_EVENT_DONE 1401 | DPU_ENCODER_FRAME_EVENT_ERROR 1402 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 1403 1404 if (!dpu_enc->frame_busy_mask[0]) { 1405 /** 1406 * suppress frame_done without waiter, 1407 * likely autorefresh 1408 */ 1409 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event, 1410 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode), 1411 ready_phys->intf_idx, ready_phys->wb_idx); 1412 return; 1413 } 1414 1415 /* One of the physical encoders has become idle */ 1416 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1417 if (dpu_enc->phys_encs[i] == ready_phys) { 1418 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, 1419 dpu_enc->frame_busy_mask[0]); 1420 clear_bit(i, dpu_enc->frame_busy_mask); 1421 } 1422 } 1423 1424 if (!dpu_enc->frame_busy_mask[0]) { 1425 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1426 del_timer(&dpu_enc->frame_done_timer); 1427 1428 dpu_encoder_resource_control(drm_enc, 1429 DPU_ENC_RC_EVENT_FRAME_DONE); 1430 1431 if (dpu_enc->crtc_frame_event_cb) 1432 dpu_enc->crtc_frame_event_cb( 1433 dpu_enc->crtc_frame_event_cb_data, 1434 event); 1435 } 1436 } else { 1437 if (dpu_enc->crtc_frame_event_cb) 1438 dpu_enc->crtc_frame_event_cb( 1439 dpu_enc->crtc_frame_event_cb_data, event); 1440 } 1441 } 1442 1443 static void dpu_encoder_off_work(struct work_struct *work) 1444 { 1445 struct dpu_encoder_virt *dpu_enc = container_of(work, 1446 struct dpu_encoder_virt, delayed_off_work.work); 1447 1448 dpu_encoder_resource_control(&dpu_enc->base, 1449 DPU_ENC_RC_EVENT_ENTER_IDLE); 1450 1451 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, 1452 DPU_ENCODER_FRAME_EVENT_IDLE); 1453 } 1454 1455 /** 1456 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder 1457 * @drm_enc: Pointer to drm encoder structure 1458 * @phys: Pointer to physical encoder structure 1459 * @extra_flush_bits: Additional bit mask to include in flush trigger 1460 */ 1461 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, 1462 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) 1463 { 1464 struct dpu_hw_ctl *ctl; 1465 int pending_kickoff_cnt; 1466 u32 ret = UINT_MAX; 1467 1468 if (!phys->hw_pp) { 1469 DPU_ERROR("invalid pingpong hw\n"); 1470 return; 1471 } 1472 1473 ctl = phys->hw_ctl; 1474 if (!ctl->ops.trigger_flush) { 1475 DPU_ERROR("missing trigger cb\n"); 1476 return; 1477 } 1478 1479 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); 1480 1481 if (extra_flush_bits && ctl->ops.update_pending_flush) 1482 ctl->ops.update_pending_flush(ctl, extra_flush_bits); 1483 1484 ctl->ops.trigger_flush(ctl); 1485 1486 if (ctl->ops.get_pending_flush) 1487 ret = ctl->ops.get_pending_flush(ctl); 1488 1489 trace_dpu_enc_trigger_flush(DRMID(drm_enc), 1490 dpu_encoder_helper_get_intf_type(phys->intf_mode), 1491 phys->intf_idx, phys->wb_idx, 1492 pending_kickoff_cnt, ctl->idx, 1493 extra_flush_bits, ret); 1494 } 1495 1496 /** 1497 * _dpu_encoder_trigger_start - trigger start for a physical encoder 1498 * @phys: Pointer to physical encoder structure 1499 */ 1500 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) 1501 { 1502 if (!phys) { 1503 DPU_ERROR("invalid argument(s)\n"); 1504 return; 1505 } 1506 1507 if (!phys->hw_pp) { 1508 DPU_ERROR("invalid pingpong hw\n"); 1509 return; 1510 } 1511 1512 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) 1513 phys->ops.trigger_start(phys); 1514 } 1515 1516 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) 1517 { 1518 struct dpu_hw_ctl *ctl; 1519 1520 ctl = phys_enc->hw_ctl; 1521 if (ctl->ops.trigger_start) { 1522 ctl->ops.trigger_start(ctl); 1523 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); 1524 } 1525 } 1526 1527 static int dpu_encoder_helper_wait_event_timeout( 1528 int32_t drm_id, 1529 u32 irq_idx, 1530 struct dpu_encoder_wait_info *info) 1531 { 1532 int rc = 0; 1533 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; 1534 s64 jiffies = msecs_to_jiffies(info->timeout_ms); 1535 s64 time; 1536 1537 do { 1538 rc = wait_event_timeout(*(info->wq), 1539 atomic_read(info->atomic_cnt) == 0, jiffies); 1540 time = ktime_to_ms(ktime_get()); 1541 1542 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time, 1543 expected_time, 1544 atomic_read(info->atomic_cnt)); 1545 /* If we timed out, counter is valid and time is less, wait again */ 1546 } while (atomic_read(info->atomic_cnt) && (rc == 0) && 1547 (time < expected_time)); 1548 1549 return rc; 1550 } 1551 1552 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) 1553 { 1554 struct dpu_encoder_virt *dpu_enc; 1555 struct dpu_hw_ctl *ctl; 1556 int rc; 1557 struct drm_encoder *drm_enc; 1558 1559 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 1560 ctl = phys_enc->hw_ctl; 1561 drm_enc = phys_enc->parent; 1562 1563 if (!ctl->ops.reset) 1564 return; 1565 1566 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc), 1567 ctl->idx); 1568 1569 rc = ctl->ops.reset(ctl); 1570 if (rc) { 1571 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); 1572 msm_disp_snapshot_state(drm_enc->dev); 1573 } 1574 1575 phys_enc->enable_state = DPU_ENC_ENABLED; 1576 } 1577 1578 /** 1579 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff 1580 * Iterate through the physical encoders and perform consolidated flush 1581 * and/or control start triggering as needed. This is done in the virtual 1582 * encoder rather than the individual physical ones in order to handle 1583 * use cases that require visibility into multiple physical encoders at 1584 * a time. 1585 * @dpu_enc: Pointer to virtual encoder structure 1586 */ 1587 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) 1588 { 1589 struct dpu_hw_ctl *ctl; 1590 uint32_t i, pending_flush; 1591 unsigned long lock_flags; 1592 1593 pending_flush = 0x0; 1594 1595 /* update pending counts and trigger kickoff ctl flush atomically */ 1596 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1597 1598 /* don't perform flush/start operations for slave encoders */ 1599 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1600 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1601 1602 if (phys->enable_state == DPU_ENC_DISABLED) 1603 continue; 1604 1605 ctl = phys->hw_ctl; 1606 1607 /* 1608 * This is cleared in frame_done worker, which isn't invoked 1609 * for async commits. So don't set this for async, since it'll 1610 * roll over to the next commit. 1611 */ 1612 if (phys->split_role != ENC_ROLE_SLAVE) 1613 set_bit(i, dpu_enc->frame_busy_mask); 1614 1615 if (!phys->ops.needs_single_flush || 1616 !phys->ops.needs_single_flush(phys)) 1617 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); 1618 else if (ctl->ops.get_pending_flush) 1619 pending_flush |= ctl->ops.get_pending_flush(ctl); 1620 } 1621 1622 /* for split flush, combine pending flush masks and send to master */ 1623 if (pending_flush && dpu_enc->cur_master) { 1624 _dpu_encoder_trigger_flush( 1625 &dpu_enc->base, 1626 dpu_enc->cur_master, 1627 pending_flush); 1628 } 1629 1630 _dpu_encoder_trigger_start(dpu_enc->cur_master); 1631 1632 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1633 } 1634 1635 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) 1636 { 1637 struct dpu_encoder_virt *dpu_enc; 1638 struct dpu_encoder_phys *phys; 1639 unsigned int i; 1640 struct dpu_hw_ctl *ctl; 1641 struct msm_display_info *disp_info; 1642 1643 if (!drm_enc) { 1644 DPU_ERROR("invalid encoder\n"); 1645 return; 1646 } 1647 dpu_enc = to_dpu_encoder_virt(drm_enc); 1648 disp_info = &dpu_enc->disp_info; 1649 1650 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1651 phys = dpu_enc->phys_encs[i]; 1652 1653 ctl = phys->hw_ctl; 1654 if (ctl->ops.clear_pending_flush) 1655 ctl->ops.clear_pending_flush(ctl); 1656 1657 /* update only for command mode primary ctl */ 1658 if ((phys == dpu_enc->cur_master) && 1659 disp_info->is_cmd_mode 1660 && ctl->ops.trigger_pending) 1661 ctl->ops.trigger_pending(ctl); 1662 } 1663 } 1664 1665 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, 1666 struct drm_display_mode *mode) 1667 { 1668 u64 pclk_rate; 1669 u32 pclk_period; 1670 u32 line_time; 1671 1672 /* 1673 * For linetime calculation, only operate on master encoder. 1674 */ 1675 if (!dpu_enc->cur_master) 1676 return 0; 1677 1678 if (!dpu_enc->cur_master->ops.get_line_count) { 1679 DPU_ERROR("get_line_count function not defined\n"); 1680 return 0; 1681 } 1682 1683 pclk_rate = mode->clock; /* pixel clock in kHz */ 1684 if (pclk_rate == 0) { 1685 DPU_ERROR("pclk is 0, cannot calculate line time\n"); 1686 return 0; 1687 } 1688 1689 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); 1690 if (pclk_period == 0) { 1691 DPU_ERROR("pclk period is 0\n"); 1692 return 0; 1693 } 1694 1695 /* 1696 * Line time calculation based on Pixel clock and HTOTAL. 1697 * Final unit is in ns. 1698 */ 1699 line_time = (pclk_period * mode->htotal) / 1000; 1700 if (line_time == 0) { 1701 DPU_ERROR("line time calculation is 0\n"); 1702 return 0; 1703 } 1704 1705 DPU_DEBUG_ENC(dpu_enc, 1706 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", 1707 pclk_rate, pclk_period, line_time); 1708 1709 return line_time; 1710 } 1711 1712 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) 1713 { 1714 struct drm_display_mode *mode; 1715 struct dpu_encoder_virt *dpu_enc; 1716 u32 cur_line; 1717 u32 line_time; 1718 u32 vtotal, time_to_vsync; 1719 ktime_t cur_time; 1720 1721 dpu_enc = to_dpu_encoder_virt(drm_enc); 1722 1723 if (!drm_enc->crtc || !drm_enc->crtc->state) { 1724 DPU_ERROR("crtc/crtc state object is NULL\n"); 1725 return -EINVAL; 1726 } 1727 mode = &drm_enc->crtc->state->adjusted_mode; 1728 1729 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); 1730 if (!line_time) 1731 return -EINVAL; 1732 1733 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); 1734 1735 vtotal = mode->vtotal; 1736 if (cur_line >= vtotal) 1737 time_to_vsync = line_time * vtotal; 1738 else 1739 time_to_vsync = line_time * (vtotal - cur_line); 1740 1741 if (time_to_vsync == 0) { 1742 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", 1743 vtotal); 1744 return -EINVAL; 1745 } 1746 1747 cur_time = ktime_get(); 1748 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); 1749 1750 DPU_DEBUG_ENC(dpu_enc, 1751 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", 1752 cur_line, vtotal, time_to_vsync, 1753 ktime_to_ms(cur_time), 1754 ktime_to_ms(*wakeup_time)); 1755 return 0; 1756 } 1757 1758 static void dpu_encoder_vsync_event_handler(struct timer_list *t) 1759 { 1760 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, 1761 vsync_event_timer); 1762 struct drm_encoder *drm_enc = &dpu_enc->base; 1763 struct msm_drm_private *priv; 1764 struct msm_drm_thread *event_thread; 1765 1766 if (!drm_enc->dev || !drm_enc->crtc) { 1767 DPU_ERROR("invalid parameters\n"); 1768 return; 1769 } 1770 1771 priv = drm_enc->dev->dev_private; 1772 1773 if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) { 1774 DPU_ERROR("invalid crtc index\n"); 1775 return; 1776 } 1777 event_thread = &priv->event_thread[drm_enc->crtc->index]; 1778 if (!event_thread) { 1779 DPU_ERROR("event_thread not found for crtc:%d\n", 1780 drm_enc->crtc->index); 1781 return; 1782 } 1783 1784 del_timer(&dpu_enc->vsync_event_timer); 1785 } 1786 1787 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) 1788 { 1789 struct dpu_encoder_virt *dpu_enc = container_of(work, 1790 struct dpu_encoder_virt, vsync_event_work); 1791 ktime_t wakeup_time; 1792 1793 if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time)) 1794 return; 1795 1796 trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time); 1797 mod_timer(&dpu_enc->vsync_event_timer, 1798 nsecs_to_jiffies(ktime_to_ns(wakeup_time))); 1799 } 1800 1801 static u32 1802 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, 1803 u32 enc_ip_width) 1804 { 1805 int ssm_delay, total_pixels, soft_slice_per_enc; 1806 1807 soft_slice_per_enc = enc_ip_width / dsc->slice_width; 1808 1809 /* 1810 * minimum number of initial line pixels is a sum of: 1811 * 1. sub-stream multiplexer delay (83 groups for 8bpc, 1812 * 91 for 10 bpc) * 3 1813 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 1814 * 3. the initial xmit delay 1815 * 4. total pipeline delay through the "lock step" of encoder (47) 1816 * 5. 6 additional pixels as the output of the rate buffer is 1817 * 48 bits wide 1818 */ 1819 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); 1820 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; 1821 if (soft_slice_per_enc > 1) 1822 total_pixels += (ssm_delay * 3); 1823 return DIV_ROUND_UP(total_pixels, dsc->slice_width); 1824 } 1825 1826 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc, 1827 struct dpu_hw_pingpong *hw_pp, 1828 struct drm_dsc_config *dsc, 1829 u32 common_mode, 1830 u32 initial_lines) 1831 { 1832 if (hw_dsc->ops.dsc_config) 1833 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines); 1834 1835 if (hw_dsc->ops.dsc_config_thresh) 1836 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc); 1837 1838 if (hw_pp->ops.setup_dsc) 1839 hw_pp->ops.setup_dsc(hw_pp); 1840 1841 if (hw_dsc->ops.dsc_bind_pingpong_blk) 1842 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, true, hw_pp->idx); 1843 1844 if (hw_pp->ops.enable_dsc) 1845 hw_pp->ops.enable_dsc(hw_pp); 1846 } 1847 1848 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, 1849 struct drm_dsc_config *dsc) 1850 { 1851 /* coding only for 2LM, 2enc, 1 dsc config */ 1852 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 1853 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 1854 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 1855 int this_frame_slices; 1856 int intf_ip_w, enc_ip_w; 1857 int dsc_common_mode; 1858 int pic_width; 1859 u32 initial_lines; 1860 int i; 1861 1862 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1863 hw_pp[i] = dpu_enc->hw_pp[i]; 1864 hw_dsc[i] = dpu_enc->hw_dsc[i]; 1865 1866 if (!hw_pp[i] || !hw_dsc[i]) { 1867 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); 1868 return; 1869 } 1870 } 1871 1872 dsc_common_mode = 0; 1873 pic_width = dsc->pic_width; 1874 1875 dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL; 1876 if (enc_master->intf_mode == INTF_MODE_VIDEO) 1877 dsc_common_mode |= DSC_MODE_VIDEO; 1878 1879 this_frame_slices = pic_width / dsc->slice_width; 1880 intf_ip_w = this_frame_slices * dsc->slice_width; 1881 1882 /* 1883 * dsc merge case: when using 2 encoders for the same stream, 1884 * no. of slices need to be same on both the encoders. 1885 */ 1886 enc_ip_w = intf_ip_w / 2; 1887 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); 1888 1889 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1890 dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines); 1891 } 1892 1893 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) 1894 { 1895 struct dpu_encoder_virt *dpu_enc; 1896 struct dpu_encoder_phys *phys; 1897 bool needs_hw_reset = false; 1898 unsigned int i; 1899 1900 dpu_enc = to_dpu_encoder_virt(drm_enc); 1901 1902 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); 1903 1904 /* prepare for next kickoff, may include waiting on previous kickoff */ 1905 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); 1906 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1907 phys = dpu_enc->phys_encs[i]; 1908 if (phys->ops.prepare_for_kickoff) 1909 phys->ops.prepare_for_kickoff(phys); 1910 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 1911 needs_hw_reset = true; 1912 } 1913 DPU_ATRACE_END("enc_prepare_for_kickoff"); 1914 1915 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1916 1917 /* if any phys needs reset, reset all phys, in-order */ 1918 if (needs_hw_reset) { 1919 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); 1920 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1921 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]); 1922 } 1923 } 1924 1925 if (dpu_enc->dsc) 1926 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); 1927 } 1928 1929 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) 1930 { 1931 struct dpu_encoder_virt *dpu_enc; 1932 unsigned int i; 1933 struct dpu_encoder_phys *phys; 1934 1935 dpu_enc = to_dpu_encoder_virt(drm_enc); 1936 1937 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { 1938 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1939 phys = dpu_enc->phys_encs[i]; 1940 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) { 1941 DPU_DEBUG("invalid FB not kicking off\n"); 1942 return false; 1943 } 1944 } 1945 } 1946 1947 return true; 1948 } 1949 1950 void dpu_encoder_kickoff(struct drm_encoder *drm_enc) 1951 { 1952 struct dpu_encoder_virt *dpu_enc; 1953 struct dpu_encoder_phys *phys; 1954 ktime_t wakeup_time; 1955 unsigned long timeout_ms; 1956 unsigned int i; 1957 1958 DPU_ATRACE_BEGIN("encoder_kickoff"); 1959 dpu_enc = to_dpu_encoder_virt(drm_enc); 1960 1961 trace_dpu_enc_kickoff(DRMID(drm_enc)); 1962 1963 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / 1964 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); 1965 1966 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); 1967 mod_timer(&dpu_enc->frame_done_timer, 1968 jiffies + msecs_to_jiffies(timeout_ms)); 1969 1970 /* All phys encs are ready to go, trigger the kickoff */ 1971 _dpu_encoder_kickoff_phys(dpu_enc); 1972 1973 /* allow phys encs to handle any post-kickoff business */ 1974 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1975 phys = dpu_enc->phys_encs[i]; 1976 if (phys->ops.handle_post_kickoff) 1977 phys->ops.handle_post_kickoff(phys); 1978 } 1979 1980 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && 1981 !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) { 1982 trace_dpu_enc_early_kickoff(DRMID(drm_enc), 1983 ktime_to_ms(wakeup_time)); 1984 mod_timer(&dpu_enc->vsync_event_timer, 1985 nsecs_to_jiffies(ktime_to_ns(wakeup_time))); 1986 } 1987 1988 DPU_ATRACE_END("encoder_kickoff"); 1989 } 1990 1991 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) 1992 { 1993 struct dpu_hw_mixer_cfg mixer; 1994 int i, num_lm; 1995 struct dpu_global_state *global_state; 1996 struct dpu_hw_blk *hw_lm[2]; 1997 struct dpu_hw_mixer *hw_mixer[2]; 1998 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 1999 2000 memset(&mixer, 0, sizeof(mixer)); 2001 2002 /* reset all mixers for this encoder */ 2003 if (phys_enc->hw_ctl->ops.clear_all_blendstages) 2004 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl); 2005 2006 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); 2007 2008 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, 2009 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 2010 2011 for (i = 0; i < num_lm; i++) { 2012 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); 2013 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) 2014 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); 2015 2016 /* clear all blendstages */ 2017 if (phys_enc->hw_ctl->ops.setup_blendstage) 2018 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); 2019 } 2020 } 2021 2022 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) 2023 { 2024 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2025 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 2026 int i; 2027 struct dpu_encoder_virt *dpu_enc; 2028 2029 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2030 2031 phys_enc->hw_ctl->ops.reset(ctl); 2032 2033 dpu_encoder_helper_reset_mixers(phys_enc); 2034 2035 /* 2036 * TODO: move the once-only operation like CTL flush/trigger 2037 * into dpu_encoder_virt_disable() and all operations which need 2038 * to be done per phys encoder into the phys_disable() op. 2039 */ 2040 if (phys_enc->hw_wb) { 2041 /* disable the PP block */ 2042 if (phys_enc->hw_wb->ops.bind_pingpong_blk) 2043 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false, 2044 phys_enc->hw_pp->idx); 2045 2046 /* mark WB flush as pending */ 2047 if (phys_enc->hw_ctl->ops.update_pending_flush_wb) 2048 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); 2049 } else { 2050 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2051 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) 2052 phys_enc->hw_intf->ops.bind_pingpong_blk( 2053 dpu_enc->phys_encs[i]->hw_intf, false, 2054 dpu_enc->phys_encs[i]->hw_pp->idx); 2055 2056 /* mark INTF flush as pending */ 2057 if (phys_enc->hw_ctl->ops.update_pending_flush_intf) 2058 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl, 2059 dpu_enc->phys_encs[i]->hw_intf->idx); 2060 } 2061 } 2062 2063 /* reset the merge 3D HW block */ 2064 if (phys_enc->hw_pp->merge_3d) { 2065 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 2066 BLEND_3D_NONE); 2067 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) 2068 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl, 2069 phys_enc->hw_pp->merge_3d->idx); 2070 } 2071 2072 intf_cfg.stream_sel = 0; /* Don't care value for video mode */ 2073 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); 2074 2075 if (phys_enc->hw_intf) 2076 intf_cfg.intf = phys_enc->hw_intf->idx; 2077 if (phys_enc->hw_wb) 2078 intf_cfg.wb = phys_enc->hw_wb->idx; 2079 2080 if (phys_enc->hw_pp->merge_3d) 2081 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; 2082 2083 if (ctl->ops.reset_intf_cfg) 2084 ctl->ops.reset_intf_cfg(ctl, &intf_cfg); 2085 2086 ctl->ops.trigger_flush(ctl); 2087 ctl->ops.trigger_start(ctl); 2088 ctl->ops.clear_pending_flush(ctl); 2089 } 2090 2091 #ifdef CONFIG_DEBUG_FS 2092 static int _dpu_encoder_status_show(struct seq_file *s, void *data) 2093 { 2094 struct dpu_encoder_virt *dpu_enc = s->private; 2095 int i; 2096 2097 mutex_lock(&dpu_enc->enc_lock); 2098 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2099 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2100 2101 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ", 2102 phys->intf_idx - INTF_0, phys->wb_idx - WB_0, 2103 atomic_read(&phys->vsync_cnt), 2104 atomic_read(&phys->underrun_cnt)); 2105 2106 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); 2107 } 2108 mutex_unlock(&dpu_enc->enc_lock); 2109 2110 return 0; 2111 } 2112 2113 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); 2114 2115 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2116 { 2117 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2118 int i; 2119 2120 char name[DPU_NAME_SIZE]; 2121 2122 if (!drm_enc->dev) { 2123 DPU_ERROR("invalid encoder or kms\n"); 2124 return -EINVAL; 2125 } 2126 2127 snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id); 2128 2129 /* create overall sub-directory for the encoder */ 2130 dpu_enc->debugfs_root = debugfs_create_dir(name, 2131 drm_enc->dev->primary->debugfs_root); 2132 2133 /* don't error check these */ 2134 debugfs_create_file("status", 0600, 2135 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); 2136 2137 for (i = 0; i < dpu_enc->num_phys_encs; i++) 2138 if (dpu_enc->phys_encs[i]->ops.late_register) 2139 dpu_enc->phys_encs[i]->ops.late_register( 2140 dpu_enc->phys_encs[i], 2141 dpu_enc->debugfs_root); 2142 2143 return 0; 2144 } 2145 #else 2146 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2147 { 2148 return 0; 2149 } 2150 #endif 2151 2152 static int dpu_encoder_late_register(struct drm_encoder *encoder) 2153 { 2154 return _dpu_encoder_init_debugfs(encoder); 2155 } 2156 2157 static void dpu_encoder_early_unregister(struct drm_encoder *encoder) 2158 { 2159 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2160 2161 debugfs_remove_recursive(dpu_enc->debugfs_root); 2162 } 2163 2164 static int dpu_encoder_virt_add_phys_encs( 2165 struct msm_display_info *disp_info, 2166 struct dpu_encoder_virt *dpu_enc, 2167 struct dpu_enc_phys_init_params *params) 2168 { 2169 struct dpu_encoder_phys *enc = NULL; 2170 2171 DPU_DEBUG_ENC(dpu_enc, "\n"); 2172 2173 /* 2174 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types 2175 * in this function, check up-front. 2176 */ 2177 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= 2178 ARRAY_SIZE(dpu_enc->phys_encs)) { 2179 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", 2180 dpu_enc->num_phys_encs); 2181 return -EINVAL; 2182 } 2183 2184 2185 if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) { 2186 enc = dpu_encoder_phys_wb_init(params); 2187 2188 if (IS_ERR(enc)) { 2189 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", 2190 PTR_ERR(enc)); 2191 return PTR_ERR(enc); 2192 } 2193 2194 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2195 ++dpu_enc->num_phys_encs; 2196 } else if (disp_info->is_cmd_mode) { 2197 enc = dpu_encoder_phys_cmd_init(params); 2198 2199 if (IS_ERR(enc)) { 2200 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", 2201 PTR_ERR(enc)); 2202 return PTR_ERR(enc); 2203 } 2204 2205 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2206 ++dpu_enc->num_phys_encs; 2207 } else { 2208 enc = dpu_encoder_phys_vid_init(params); 2209 2210 if (IS_ERR(enc)) { 2211 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", 2212 PTR_ERR(enc)); 2213 return PTR_ERR(enc); 2214 } 2215 2216 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2217 ++dpu_enc->num_phys_encs; 2218 } 2219 2220 if (params->split_role == ENC_ROLE_SLAVE) 2221 dpu_enc->cur_slave = enc; 2222 else 2223 dpu_enc->cur_master = enc; 2224 2225 return 0; 2226 } 2227 2228 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, 2229 struct dpu_kms *dpu_kms, 2230 struct msm_display_info *disp_info) 2231 { 2232 int ret = 0; 2233 int i = 0; 2234 enum dpu_intf_type intf_type = INTF_NONE; 2235 struct dpu_enc_phys_init_params phys_params; 2236 2237 if (!dpu_enc) { 2238 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); 2239 return -EINVAL; 2240 } 2241 2242 dpu_enc->cur_master = NULL; 2243 2244 memset(&phys_params, 0, sizeof(phys_params)); 2245 phys_params.dpu_kms = dpu_kms; 2246 phys_params.parent = &dpu_enc->base; 2247 phys_params.enc_spinlock = &dpu_enc->enc_spinlock; 2248 2249 switch (disp_info->intf_type) { 2250 case DRM_MODE_ENCODER_DSI: 2251 intf_type = INTF_DSI; 2252 break; 2253 case DRM_MODE_ENCODER_TMDS: 2254 intf_type = INTF_DP; 2255 break; 2256 case DRM_MODE_ENCODER_VIRTUAL: 2257 intf_type = INTF_WB; 2258 break; 2259 } 2260 2261 WARN_ON(disp_info->num_of_h_tiles < 1); 2262 2263 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); 2264 2265 if (disp_info->intf_type != DRM_MODE_ENCODER_VIRTUAL) 2266 dpu_enc->idle_pc_supported = 2267 dpu_kms->catalog->caps->has_idle_pc; 2268 2269 dpu_enc->dsc = disp_info->dsc; 2270 2271 mutex_lock(&dpu_enc->enc_lock); 2272 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { 2273 /* 2274 * Left-most tile is at index 0, content is controller id 2275 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right 2276 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right 2277 */ 2278 u32 controller_id = disp_info->h_tile_instance[i]; 2279 2280 if (disp_info->num_of_h_tiles > 1) { 2281 if (i == 0) 2282 phys_params.split_role = ENC_ROLE_MASTER; 2283 else 2284 phys_params.split_role = ENC_ROLE_SLAVE; 2285 } else { 2286 phys_params.split_role = ENC_ROLE_SOLO; 2287 } 2288 2289 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", 2290 i, controller_id, phys_params.split_role); 2291 2292 phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog, 2293 intf_type, 2294 controller_id); 2295 2296 phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog, 2297 intf_type, controller_id); 2298 /* 2299 * The phys_params might represent either an INTF or a WB unit, but not 2300 * both of them at the same time. 2301 */ 2302 if ((phys_params.intf_idx == INTF_MAX) && 2303 (phys_params.wb_idx == WB_MAX)) { 2304 DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n", 2305 intf_type, controller_id); 2306 ret = -EINVAL; 2307 } 2308 2309 if ((phys_params.intf_idx != INTF_MAX) && 2310 (phys_params.wb_idx != WB_MAX)) { 2311 DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n", 2312 intf_type, controller_id); 2313 ret = -EINVAL; 2314 } 2315 2316 if (!ret) { 2317 ret = dpu_encoder_virt_add_phys_encs(disp_info, 2318 dpu_enc, &phys_params); 2319 if (ret) 2320 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); 2321 } 2322 } 2323 2324 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2325 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2326 atomic_set(&phys->vsync_cnt, 0); 2327 atomic_set(&phys->underrun_cnt, 0); 2328 2329 if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX) 2330 phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx); 2331 2332 if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX) 2333 phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx); 2334 2335 if (!phys->hw_intf && !phys->hw_wb) { 2336 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i); 2337 ret = -EINVAL; 2338 } 2339 2340 if (phys->hw_intf && phys->hw_wb) { 2341 DPU_ERROR_ENC(dpu_enc, 2342 "invalid phys both intf and wb block at idx: %d\n", i); 2343 ret = -EINVAL; 2344 } 2345 } 2346 2347 mutex_unlock(&dpu_enc->enc_lock); 2348 2349 return ret; 2350 } 2351 2352 static void dpu_encoder_frame_done_timeout(struct timer_list *t) 2353 { 2354 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, 2355 frame_done_timer); 2356 struct drm_encoder *drm_enc = &dpu_enc->base; 2357 u32 event; 2358 2359 if (!drm_enc->dev) { 2360 DPU_ERROR("invalid parameters\n"); 2361 return; 2362 } 2363 2364 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { 2365 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", 2366 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); 2367 return; 2368 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 2369 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); 2370 return; 2371 } 2372 2373 DPU_ERROR_ENC(dpu_enc, "frame done timeout\n"); 2374 2375 event = DPU_ENCODER_FRAME_EVENT_ERROR; 2376 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); 2377 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event); 2378 } 2379 2380 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { 2381 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, 2382 .atomic_disable = dpu_encoder_virt_atomic_disable, 2383 .atomic_enable = dpu_encoder_virt_atomic_enable, 2384 .atomic_check = dpu_encoder_virt_atomic_check, 2385 }; 2386 2387 static const struct drm_encoder_funcs dpu_encoder_funcs = { 2388 .destroy = dpu_encoder_destroy, 2389 .late_register = dpu_encoder_late_register, 2390 .early_unregister = dpu_encoder_early_unregister, 2391 }; 2392 2393 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, 2394 struct msm_display_info *disp_info) 2395 { 2396 struct msm_drm_private *priv = dev->dev_private; 2397 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 2398 struct drm_encoder *drm_enc = NULL; 2399 struct dpu_encoder_virt *dpu_enc = NULL; 2400 int ret = 0; 2401 2402 dpu_enc = to_dpu_encoder_virt(enc); 2403 2404 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2405 if (ret) 2406 goto fail; 2407 2408 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2409 timer_setup(&dpu_enc->frame_done_timer, 2410 dpu_encoder_frame_done_timeout, 0); 2411 2412 if (disp_info->intf_type == DRM_MODE_ENCODER_DSI) 2413 timer_setup(&dpu_enc->vsync_event_timer, 2414 dpu_encoder_vsync_event_handler, 2415 0); 2416 else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS) 2417 dpu_enc->wide_bus_en = msm_dp_wide_bus_available( 2418 priv->dp[disp_info->h_tile_instance[0]]); 2419 2420 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2421 dpu_encoder_off_work); 2422 dpu_enc->idle_timeout = IDLE_TIMEOUT; 2423 2424 kthread_init_work(&dpu_enc->vsync_event_work, 2425 dpu_encoder_vsync_event_work_handler); 2426 2427 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); 2428 2429 DPU_DEBUG_ENC(dpu_enc, "created\n"); 2430 2431 return ret; 2432 2433 fail: 2434 DPU_ERROR("failed to create encoder\n"); 2435 if (drm_enc) 2436 dpu_encoder_destroy(drm_enc); 2437 2438 return ret; 2439 2440 2441 } 2442 2443 struct drm_encoder *dpu_encoder_init(struct drm_device *dev, 2444 int drm_enc_mode) 2445 { 2446 struct dpu_encoder_virt *dpu_enc = NULL; 2447 int rc = 0; 2448 2449 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); 2450 if (!dpu_enc) 2451 return ERR_PTR(-ENOMEM); 2452 2453 2454 rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, 2455 drm_enc_mode, NULL); 2456 if (rc) { 2457 devm_kfree(dev->dev, dpu_enc); 2458 return ERR_PTR(rc); 2459 } 2460 2461 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2462 2463 spin_lock_init(&dpu_enc->enc_spinlock); 2464 dpu_enc->enabled = false; 2465 mutex_init(&dpu_enc->enc_lock); 2466 mutex_init(&dpu_enc->rc_lock); 2467 2468 return &dpu_enc->base; 2469 } 2470 2471 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, 2472 enum msm_event_wait event) 2473 { 2474 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL; 2475 struct dpu_encoder_virt *dpu_enc = NULL; 2476 int i, ret = 0; 2477 2478 if (!drm_enc) { 2479 DPU_ERROR("invalid encoder\n"); 2480 return -EINVAL; 2481 } 2482 dpu_enc = to_dpu_encoder_virt(drm_enc); 2483 DPU_DEBUG_ENC(dpu_enc, "\n"); 2484 2485 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2486 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2487 2488 switch (event) { 2489 case MSM_ENC_COMMIT_DONE: 2490 fn_wait = phys->ops.wait_for_commit_done; 2491 break; 2492 case MSM_ENC_TX_COMPLETE: 2493 fn_wait = phys->ops.wait_for_tx_complete; 2494 break; 2495 case MSM_ENC_VBLANK: 2496 fn_wait = phys->ops.wait_for_vblank; 2497 break; 2498 default: 2499 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", 2500 event); 2501 return -EINVAL; 2502 } 2503 2504 if (fn_wait) { 2505 DPU_ATRACE_BEGIN("wait_for_completion_event"); 2506 ret = fn_wait(phys); 2507 DPU_ATRACE_END("wait_for_completion_event"); 2508 if (ret) 2509 return ret; 2510 } 2511 } 2512 2513 return ret; 2514 } 2515 2516 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) 2517 { 2518 struct dpu_encoder_virt *dpu_enc = NULL; 2519 2520 if (!encoder) { 2521 DPU_ERROR("invalid encoder\n"); 2522 return INTF_MODE_NONE; 2523 } 2524 dpu_enc = to_dpu_encoder_virt(encoder); 2525 2526 if (dpu_enc->cur_master) 2527 return dpu_enc->cur_master->intf_mode; 2528 2529 if (dpu_enc->num_phys_encs) 2530 return dpu_enc->phys_encs[0]->intf_mode; 2531 2532 return INTF_MODE_NONE; 2533 } 2534 2535 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) 2536 { 2537 struct drm_encoder *encoder = phys_enc->parent; 2538 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2539 2540 return dpu_enc->dsc_mask; 2541 } 2542