1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 6 * 7 * Author: Rob Clark <robdclark@gmail.com> 8 */ 9 10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 11 #include <linux/debugfs.h> 12 #include <linux/kthread.h> 13 #include <linux/seq_file.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_crtc.h> 17 #include <drm/drm_file.h> 18 #include <drm/drm_probe_helper.h> 19 20 #include "msm_drv.h" 21 #include "dpu_kms.h" 22 #include "dpu_hwio.h" 23 #include "dpu_hw_catalog.h" 24 #include "dpu_hw_intf.h" 25 #include "dpu_hw_ctl.h" 26 #include "dpu_hw_dspp.h" 27 #include "dpu_hw_dsc.h" 28 #include "dpu_hw_merge3d.h" 29 #include "dpu_formats.h" 30 #include "dpu_encoder_phys.h" 31 #include "dpu_crtc.h" 32 #include "dpu_trace.h" 33 #include "dpu_core_irq.h" 34 #include "disp/msm_disp_snapshot.h" 35 36 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\ 37 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 38 39 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ 40 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 41 42 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\ 43 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 44 45 /* 46 * Two to anticipate panels that can do cmd/vid dynamic switching 47 * plan is to create all possible physical encoder types, and switch between 48 * them at runtime 49 */ 50 #define NUM_PHYS_ENCODER_TYPES 2 51 52 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ 53 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) 54 55 #define MAX_CHANNELS_PER_ENC 2 56 57 #define IDLE_SHORT_TIMEOUT 1 58 59 #define MAX_HDISPLAY_SPLIT 1080 60 61 /* timeout in frames waiting for frame done */ 62 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 63 64 /** 65 * enum dpu_enc_rc_events - events for resource control state machine 66 * @DPU_ENC_RC_EVENT_KICKOFF: 67 * This event happens at NORMAL priority. 68 * Event that signals the start of the transfer. When this event is 69 * received, enable MDP/DSI core clocks. Regardless of the previous 70 * state, the resource should be in ON state at the end of this event. 71 * @DPU_ENC_RC_EVENT_FRAME_DONE: 72 * This event happens at INTERRUPT level. 73 * Event signals the end of the data transfer after the PP FRAME_DONE 74 * event. At the end of this event, a delayed work is scheduled to go to 75 * IDLE_PC state after IDLE_TIMEOUT time. 76 * @DPU_ENC_RC_EVENT_PRE_STOP: 77 * This event happens at NORMAL priority. 78 * This event, when received during the ON state, leave the RC STATE 79 * in the PRE_OFF state. It should be followed by the STOP event as 80 * part of encoder disable. 81 * If received during IDLE or OFF states, it will do nothing. 82 * @DPU_ENC_RC_EVENT_STOP: 83 * This event happens at NORMAL priority. 84 * When this event is received, disable all the MDP/DSI core clocks, and 85 * disable IRQs. It should be called from the PRE_OFF or IDLE states. 86 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. 87 * PRE_OFF is expected when PRE_STOP was executed during the ON state. 88 * Resource state should be in OFF at the end of the event. 89 * @DPU_ENC_RC_EVENT_ENTER_IDLE: 90 * This event happens at NORMAL priority from a work item. 91 * Event signals that there were no frame updates for IDLE_TIMEOUT time. 92 * This would disable MDP/DSI core clocks and change the resource state 93 * to IDLE. 94 */ 95 enum dpu_enc_rc_events { 96 DPU_ENC_RC_EVENT_KICKOFF = 1, 97 DPU_ENC_RC_EVENT_FRAME_DONE, 98 DPU_ENC_RC_EVENT_PRE_STOP, 99 DPU_ENC_RC_EVENT_STOP, 100 DPU_ENC_RC_EVENT_ENTER_IDLE 101 }; 102 103 /* 104 * enum dpu_enc_rc_states - states that the resource control maintains 105 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state 106 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state 107 * @DPU_ENC_RC_STATE_ON: Resource is in ON state 108 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state 109 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state 110 */ 111 enum dpu_enc_rc_states { 112 DPU_ENC_RC_STATE_OFF, 113 DPU_ENC_RC_STATE_PRE_OFF, 114 DPU_ENC_RC_STATE_ON, 115 DPU_ENC_RC_STATE_IDLE 116 }; 117 118 /** 119 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical 120 * encoders. Virtual encoder manages one "logical" display. Physical 121 * encoders manage one intf block, tied to a specific panel/sub-panel. 122 * Virtual encoder defers as much as possible to the physical encoders. 123 * Virtual encoder registers itself with the DRM Framework as the encoder. 124 * @base: drm_encoder base class for registration with DRM 125 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes 126 * @enabled: True if the encoder is active, protected by enc_lock 127 * @commit_done_timedout: True if there has been a timeout on commit after 128 * enabling the encoder. 129 * @num_phys_encs: Actual number of physical encoders contained. 130 * @phys_encs: Container of physical encoders managed. 131 * @cur_master: Pointer to the current master in this mode. Optimization 132 * Only valid after enable. Cleared as disable. 133 * @cur_slave: As above but for the slave encoder. 134 * @hw_pp: Handle to the pingpong blocks used for the display. No. 135 * pingpong blocks can be different than num_phys_encs. 136 * @hw_dsc: Handle to the DSC blocks used for the display. 137 * @dsc_mask: Bitmask of used DSC blocks. 138 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped 139 * for partial update right-only cases, such as pingpong 140 * split where virtual pingpong does not generate IRQs 141 * @crtc: Pointer to the currently assigned crtc. Normally you 142 * would use crtc->state->encoder_mask to determine the 143 * link between encoder/crtc. However in this case we need 144 * to track crtc in the disable() hook which is called 145 * _after_ encoder_mask is cleared. 146 * @connector: If a mode is set, cached pointer to the active connector 147 * @crtc_kickoff_cb: Callback into CRTC that will flush & start 148 * all CTL paths 149 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb 150 * @debugfs_root: Debug file system root file node 151 * @enc_lock: Lock around physical encoder 152 * create/destroy/enable/disable 153 * @frame_busy_mask: Bitmask tracking which phys_enc we are still 154 * busy processing current command. 155 * Bit0 = phys_encs[0] etc. 156 * @crtc_frame_event_cb: callback handler for frame event 157 * @crtc_frame_event_cb_data: callback handler private data 158 * @frame_done_timeout_ms: frame done timeout in ms 159 * @frame_done_timer: watchdog timer for frame done event 160 * @disp_info: local copy of msm_display_info struct 161 * @idle_pc_supported: indicate if idle power collaps is supported 162 * @rc_lock: resource control mutex lock to protect 163 * virt encoder over various state changes 164 * @rc_state: resource controller state 165 * @delayed_off_work: delayed worker to schedule disabling of 166 * clks and resources after IDLE_TIMEOUT time. 167 * @topology: topology of the display 168 * @idle_timeout: idle timeout duration in milliseconds 169 * @wide_bus_en: wide bus is enabled on this interface 170 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders 171 */ 172 struct dpu_encoder_virt { 173 struct drm_encoder base; 174 spinlock_t enc_spinlock; 175 176 bool enabled; 177 bool commit_done_timedout; 178 179 unsigned int num_phys_encs; 180 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; 181 struct dpu_encoder_phys *cur_master; 182 struct dpu_encoder_phys *cur_slave; 183 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 184 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 185 186 unsigned int dsc_mask; 187 188 bool intfs_swapped; 189 190 struct drm_crtc *crtc; 191 struct drm_connector *connector; 192 193 struct dentry *debugfs_root; 194 struct mutex enc_lock; 195 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); 196 void (*crtc_frame_event_cb)(void *, u32 event); 197 void *crtc_frame_event_cb_data; 198 199 atomic_t frame_done_timeout_ms; 200 struct timer_list frame_done_timer; 201 202 struct msm_display_info disp_info; 203 204 bool idle_pc_supported; 205 struct mutex rc_lock; 206 enum dpu_enc_rc_states rc_state; 207 struct delayed_work delayed_off_work; 208 struct msm_display_topology topology; 209 210 u32 idle_timeout; 211 212 bool wide_bus_en; 213 214 /* DSC configuration */ 215 struct drm_dsc_config *dsc; 216 }; 217 218 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) 219 220 static u32 dither_matrix[DITHER_MATRIX_SZ] = { 221 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 222 }; 223 224 225 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) 226 { 227 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 228 229 return dpu_enc->wide_bus_en; 230 } 231 232 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) 233 { 234 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 235 236 return dpu_enc->dsc ? true : false; 237 } 238 239 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) 240 { 241 struct dpu_encoder_virt *dpu_enc; 242 int i, num_intf = 0; 243 244 dpu_enc = to_dpu_encoder_virt(drm_enc); 245 246 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 247 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 248 249 if (phys->hw_intf && phys->hw_intf->ops.setup_misr 250 && phys->hw_intf->ops.collect_misr) 251 num_intf++; 252 } 253 254 return num_intf; 255 } 256 257 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) 258 { 259 struct dpu_encoder_virt *dpu_enc; 260 261 int i; 262 263 dpu_enc = to_dpu_encoder_virt(drm_enc); 264 265 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 266 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 267 268 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) 269 continue; 270 271 phys->hw_intf->ops.setup_misr(phys->hw_intf); 272 } 273 } 274 275 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) 276 { 277 struct dpu_encoder_virt *dpu_enc; 278 279 int i, rc = 0, entries_added = 0; 280 281 if (!drm_enc->crtc) { 282 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); 283 return -EINVAL; 284 } 285 286 dpu_enc = to_dpu_encoder_virt(drm_enc); 287 288 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 289 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 290 291 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) 292 continue; 293 294 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]); 295 if (rc) 296 return rc; 297 entries_added++; 298 } 299 300 return entries_added; 301 } 302 303 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc) 304 { 305 struct dpu_hw_dither_cfg dither_cfg = { 0 }; 306 307 if (!hw_pp->ops.setup_dither) 308 return; 309 310 switch (bpc) { 311 case 6: 312 dither_cfg.c0_bitdepth = 6; 313 dither_cfg.c1_bitdepth = 6; 314 dither_cfg.c2_bitdepth = 6; 315 dither_cfg.c3_bitdepth = 6; 316 dither_cfg.temporal_en = 0; 317 break; 318 default: 319 hw_pp->ops.setup_dither(hw_pp, NULL); 320 return; 321 } 322 323 memcpy(&dither_cfg.matrix, dither_matrix, 324 sizeof(u32) * DITHER_MATRIX_SZ); 325 326 hw_pp->ops.setup_dither(hw_pp, &dither_cfg); 327 } 328 329 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) 330 { 331 switch (intf_mode) { 332 case INTF_MODE_VIDEO: 333 return "INTF_MODE_VIDEO"; 334 case INTF_MODE_CMD: 335 return "INTF_MODE_CMD"; 336 case INTF_MODE_WB_BLOCK: 337 return "INTF_MODE_WB_BLOCK"; 338 case INTF_MODE_WB_LINE: 339 return "INTF_MODE_WB_LINE"; 340 default: 341 return "INTF_MODE_UNKNOWN"; 342 } 343 } 344 345 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, 346 enum dpu_intr_idx intr_idx) 347 { 348 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n", 349 DRMID(phys_enc->parent), 350 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode), 351 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1, 352 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1, 353 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); 354 355 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, 356 DPU_ENCODER_FRAME_EVENT_ERROR); 357 } 358 359 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, 360 u32 irq_idx, struct dpu_encoder_wait_info *info); 361 362 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, 363 int irq_idx, 364 void (*func)(void *arg), 365 struct dpu_encoder_wait_info *wait_info) 366 { 367 u32 irq_status; 368 int ret; 369 370 if (!wait_info) { 371 DPU_ERROR("invalid params\n"); 372 return -EINVAL; 373 } 374 /* note: do master / slave checking outside */ 375 376 /* return EWOULDBLOCK since we know the wait isn't necessary */ 377 if (phys_enc->enable_state == DPU_ENC_DISABLED) { 378 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n", 379 DRMID(phys_enc->parent), func, 380 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); 381 return -EWOULDBLOCK; 382 } 383 384 if (irq_idx < 0) { 385 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", 386 DRMID(phys_enc->parent), func); 387 return 0; 388 } 389 390 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n", 391 DRMID(phys_enc->parent), func, 392 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0, 393 atomic_read(wait_info->atomic_cnt)); 394 395 ret = dpu_encoder_helper_wait_event_timeout( 396 DRMID(phys_enc->parent), 397 irq_idx, 398 wait_info); 399 400 if (ret <= 0) { 401 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx); 402 if (irq_status) { 403 unsigned long flags; 404 405 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 406 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 407 DRMID(phys_enc->parent), func, 408 phys_enc->hw_pp->idx - PINGPONG_0, 409 atomic_read(wait_info->atomic_cnt)); 410 local_irq_save(flags); 411 func(phys_enc); 412 local_irq_restore(flags); 413 ret = 0; 414 } else { 415 ret = -ETIMEDOUT; 416 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n", 417 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), 418 DRMID(phys_enc->parent), func, 419 phys_enc->hw_pp->idx - PINGPONG_0, 420 atomic_read(wait_info->atomic_cnt)); 421 } 422 } else { 423 ret = 0; 424 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), 425 func, irq_idx, 426 phys_enc->hw_pp->idx - PINGPONG_0, 427 atomic_read(wait_info->atomic_cnt)); 428 } 429 430 return ret; 431 } 432 433 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) 434 { 435 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 436 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; 437 return phys ? atomic_read(&phys->vsync_cnt) : 0; 438 } 439 440 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) 441 { 442 struct dpu_encoder_virt *dpu_enc; 443 struct dpu_encoder_phys *phys; 444 int linecount = 0; 445 446 dpu_enc = to_dpu_encoder_virt(drm_enc); 447 phys = dpu_enc ? dpu_enc->cur_master : NULL; 448 449 if (phys && phys->ops.get_line_count) 450 linecount = phys->ops.get_line_count(phys); 451 452 return linecount; 453 } 454 455 static void dpu_encoder_destroy(struct drm_encoder *drm_enc) 456 { 457 struct dpu_encoder_virt *dpu_enc = NULL; 458 int i = 0; 459 460 if (!drm_enc) { 461 DPU_ERROR("invalid encoder\n"); 462 return; 463 } 464 465 dpu_enc = to_dpu_encoder_virt(drm_enc); 466 DPU_DEBUG_ENC(dpu_enc, "\n"); 467 468 mutex_lock(&dpu_enc->enc_lock); 469 470 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 471 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 472 473 if (phys->ops.destroy) { 474 phys->ops.destroy(phys); 475 --dpu_enc->num_phys_encs; 476 dpu_enc->phys_encs[i] = NULL; 477 } 478 } 479 480 if (dpu_enc->num_phys_encs) 481 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n", 482 dpu_enc->num_phys_encs); 483 dpu_enc->num_phys_encs = 0; 484 mutex_unlock(&dpu_enc->enc_lock); 485 486 drm_encoder_cleanup(drm_enc); 487 mutex_destroy(&dpu_enc->enc_lock); 488 } 489 490 void dpu_encoder_helper_split_config( 491 struct dpu_encoder_phys *phys_enc, 492 enum dpu_intf interface) 493 { 494 struct dpu_encoder_virt *dpu_enc; 495 struct split_pipe_cfg cfg = { 0 }; 496 struct dpu_hw_mdp *hw_mdptop; 497 struct msm_display_info *disp_info; 498 499 if (!phys_enc->hw_mdptop || !phys_enc->parent) { 500 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 501 return; 502 } 503 504 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 505 hw_mdptop = phys_enc->hw_mdptop; 506 disp_info = &dpu_enc->disp_info; 507 508 if (disp_info->intf_type != INTF_DSI) 509 return; 510 511 /** 512 * disable split modes since encoder will be operating in as the only 513 * encoder, either for the entire use case in the case of, for example, 514 * single DSI, or for this frame in the case of left/right only partial 515 * update. 516 */ 517 if (phys_enc->split_role == ENC_ROLE_SOLO) { 518 if (hw_mdptop->ops.setup_split_pipe) 519 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 520 return; 521 } 522 523 cfg.en = true; 524 cfg.mode = phys_enc->intf_mode; 525 cfg.intf = interface; 526 527 if (cfg.en && phys_enc->ops.needs_single_flush && 528 phys_enc->ops.needs_single_flush(phys_enc)) 529 cfg.split_flush_en = true; 530 531 if (phys_enc->split_role == ENC_ROLE_MASTER) { 532 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); 533 534 if (hw_mdptop->ops.setup_split_pipe) 535 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 536 } 537 } 538 539 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) 540 { 541 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 542 int i, intf_count = 0, num_dsc = 0; 543 544 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 545 if (dpu_enc->phys_encs[i]) 546 intf_count++; 547 548 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ 549 if (dpu_enc->dsc) 550 num_dsc = 2; 551 552 return (num_dsc > 0) && (num_dsc > intf_count); 553 } 554 555 static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) 556 { 557 struct msm_drm_private *priv = drm_enc->dev->dev_private; 558 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 559 int index = dpu_enc->disp_info.h_tile_instance[0]; 560 561 if (dpu_enc->disp_info.intf_type == INTF_DSI) 562 return msm_dsi_get_dsc_config(priv->dsi[index]); 563 564 return NULL; 565 } 566 567 static struct msm_display_topology dpu_encoder_get_topology( 568 struct dpu_encoder_virt *dpu_enc, 569 struct dpu_kms *dpu_kms, 570 struct drm_display_mode *mode, 571 struct drm_crtc_state *crtc_state, 572 struct drm_dsc_config *dsc) 573 { 574 struct msm_display_topology topology = {0}; 575 int i, intf_count = 0; 576 577 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 578 if (dpu_enc->phys_encs[i]) 579 intf_count++; 580 581 /* Datapath topology selection 582 * 583 * Dual display 584 * 2 LM, 2 INTF ( Split display using 2 interfaces) 585 * 586 * Single display 587 * 1 LM, 1 INTF 588 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 589 * 590 * Add dspps to the reservation requirements if ctm is requested 591 */ 592 if (intf_count == 2) 593 topology.num_lm = 2; 594 else if (!dpu_kms->catalog->caps->has_3d_merge) 595 topology.num_lm = 1; 596 else 597 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 598 599 if (crtc_state->ctm) 600 topology.num_dspp = topology.num_lm; 601 602 topology.num_intf = intf_count; 603 604 if (dsc) { 605 /* 606 * In case of Display Stream Compression (DSC), we would use 607 * 2 DSC encoders, 2 layer mixers and 1 interface 608 * this is power optimal and can drive up to (including) 4k 609 * screens 610 */ 611 topology.num_dsc = 2; 612 topology.num_lm = 2; 613 topology.num_intf = 1; 614 } 615 616 return topology; 617 } 618 619 static int dpu_encoder_virt_atomic_check( 620 struct drm_encoder *drm_enc, 621 struct drm_crtc_state *crtc_state, 622 struct drm_connector_state *conn_state) 623 { 624 struct dpu_encoder_virt *dpu_enc; 625 struct msm_drm_private *priv; 626 struct dpu_kms *dpu_kms; 627 struct drm_display_mode *adj_mode; 628 struct msm_display_topology topology; 629 struct dpu_global_state *global_state; 630 struct drm_dsc_config *dsc; 631 int i = 0; 632 int ret = 0; 633 634 if (!drm_enc || !crtc_state || !conn_state) { 635 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", 636 drm_enc != NULL, crtc_state != NULL, conn_state != NULL); 637 return -EINVAL; 638 } 639 640 dpu_enc = to_dpu_encoder_virt(drm_enc); 641 DPU_DEBUG_ENC(dpu_enc, "\n"); 642 643 priv = drm_enc->dev->dev_private; 644 dpu_kms = to_dpu_kms(priv->kms); 645 adj_mode = &crtc_state->adjusted_mode; 646 global_state = dpu_kms_get_global_state(crtc_state->state); 647 if (IS_ERR(global_state)) 648 return PTR_ERR(global_state); 649 650 trace_dpu_enc_atomic_check(DRMID(drm_enc)); 651 652 /* perform atomic check on the first physical encoder (master) */ 653 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 654 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 655 656 if (phys->ops.atomic_check) 657 ret = phys->ops.atomic_check(phys, crtc_state, 658 conn_state); 659 if (ret) { 660 DPU_ERROR_ENC(dpu_enc, 661 "mode unsupported, phys idx %d\n", i); 662 return ret; 663 } 664 } 665 666 dsc = dpu_encoder_get_dsc_config(drm_enc); 667 668 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); 669 670 /* 671 * Release and Allocate resources on every modeset 672 * Dont allocate when active is false. 673 */ 674 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 675 dpu_rm_release(global_state, drm_enc); 676 677 if (!crtc_state->active_changed || crtc_state->enable) 678 ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 679 drm_enc, crtc_state, topology); 680 } 681 682 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); 683 684 return ret; 685 } 686 687 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, 688 struct msm_display_info *disp_info) 689 { 690 struct dpu_vsync_source_cfg vsync_cfg = { 0 }; 691 struct msm_drm_private *priv; 692 struct dpu_kms *dpu_kms; 693 struct dpu_hw_mdp *hw_mdptop; 694 struct drm_encoder *drm_enc; 695 struct dpu_encoder_phys *phys_enc; 696 int i; 697 698 if (!dpu_enc || !disp_info) { 699 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", 700 dpu_enc != NULL, disp_info != NULL); 701 return; 702 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { 703 DPU_ERROR("invalid num phys enc %d/%d\n", 704 dpu_enc->num_phys_encs, 705 (int) ARRAY_SIZE(dpu_enc->hw_pp)); 706 return; 707 } 708 709 drm_enc = &dpu_enc->base; 710 /* this pointers are checked in virt_enable_helper */ 711 priv = drm_enc->dev->dev_private; 712 713 dpu_kms = to_dpu_kms(priv->kms); 714 hw_mdptop = dpu_kms->hw_mdp; 715 if (!hw_mdptop) { 716 DPU_ERROR("invalid mdptop\n"); 717 return; 718 } 719 720 if (hw_mdptop->ops.setup_vsync_source && 721 disp_info->is_cmd_mode) { 722 for (i = 0; i < dpu_enc->num_phys_encs; i++) 723 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; 724 725 vsync_cfg.pp_count = dpu_enc->num_phys_encs; 726 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode); 727 728 if (disp_info->is_te_using_watchdog_timer) 729 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0; 730 else 731 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO; 732 733 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); 734 735 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 736 phys_enc = dpu_enc->phys_encs[i]; 737 738 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel) 739 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf, 740 vsync_cfg.vsync_source); 741 } 742 } 743 } 744 745 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable) 746 { 747 struct dpu_encoder_virt *dpu_enc; 748 int i; 749 750 if (!drm_enc) { 751 DPU_ERROR("invalid encoder\n"); 752 return; 753 } 754 755 dpu_enc = to_dpu_encoder_virt(drm_enc); 756 757 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable); 758 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 759 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 760 761 if (phys->ops.irq_control) 762 phys->ops.irq_control(phys, enable); 763 } 764 765 } 766 767 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc, 768 bool enable) 769 { 770 struct msm_drm_private *priv; 771 struct dpu_kms *dpu_kms; 772 struct dpu_encoder_virt *dpu_enc; 773 774 dpu_enc = to_dpu_encoder_virt(drm_enc); 775 priv = drm_enc->dev->dev_private; 776 dpu_kms = to_dpu_kms(priv->kms); 777 778 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable); 779 780 if (!dpu_enc->cur_master) { 781 DPU_ERROR("encoder master not set\n"); 782 return; 783 } 784 785 if (enable) { 786 /* enable DPU core clks */ 787 pm_runtime_get_sync(&dpu_kms->pdev->dev); 788 789 /* enable all the irq */ 790 _dpu_encoder_irq_control(drm_enc, true); 791 792 } else { 793 /* disable all the irq */ 794 _dpu_encoder_irq_control(drm_enc, false); 795 796 /* disable DPU core clks */ 797 pm_runtime_put_sync(&dpu_kms->pdev->dev); 798 } 799 800 } 801 802 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, 803 u32 sw_event) 804 { 805 struct dpu_encoder_virt *dpu_enc; 806 struct msm_drm_private *priv; 807 bool is_vid_mode = false; 808 809 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { 810 DPU_ERROR("invalid parameters\n"); 811 return -EINVAL; 812 } 813 dpu_enc = to_dpu_encoder_virt(drm_enc); 814 priv = drm_enc->dev->dev_private; 815 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode; 816 817 /* 818 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET 819 * events and return early for other events (ie wb display). 820 */ 821 if (!dpu_enc->idle_pc_supported && 822 (sw_event != DPU_ENC_RC_EVENT_KICKOFF && 823 sw_event != DPU_ENC_RC_EVENT_STOP && 824 sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) 825 return 0; 826 827 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, 828 dpu_enc->rc_state, "begin"); 829 830 switch (sw_event) { 831 case DPU_ENC_RC_EVENT_KICKOFF: 832 /* cancel delayed off work, if any */ 833 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 834 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 835 sw_event); 836 837 mutex_lock(&dpu_enc->rc_lock); 838 839 /* return if the resource control is already in ON state */ 840 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 841 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n", 842 DRMID(drm_enc), sw_event); 843 mutex_unlock(&dpu_enc->rc_lock); 844 return 0; 845 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && 846 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { 847 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n", 848 DRMID(drm_enc), sw_event, 849 dpu_enc->rc_state); 850 mutex_unlock(&dpu_enc->rc_lock); 851 return -EINVAL; 852 } 853 854 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) 855 _dpu_encoder_irq_control(drm_enc, true); 856 else 857 _dpu_encoder_resource_control_helper(drm_enc, true); 858 859 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; 860 861 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 862 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 863 "kickoff"); 864 865 mutex_unlock(&dpu_enc->rc_lock); 866 break; 867 868 case DPU_ENC_RC_EVENT_FRAME_DONE: 869 /* 870 * mutex lock is not used as this event happens at interrupt 871 * context. And locking is not required as, the other events 872 * like KICKOFF and STOP does a wait-for-idle before executing 873 * the resource_control 874 */ 875 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 876 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", 877 DRMID(drm_enc), sw_event, 878 dpu_enc->rc_state); 879 return -EINVAL; 880 } 881 882 /* 883 * schedule off work item only when there are no 884 * frames pending 885 */ 886 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { 887 DRM_DEBUG_KMS("id:%d skip schedule work\n", 888 DRMID(drm_enc)); 889 return 0; 890 } 891 892 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, 893 msecs_to_jiffies(dpu_enc->idle_timeout)); 894 895 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 896 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 897 "frame done"); 898 break; 899 900 case DPU_ENC_RC_EVENT_PRE_STOP: 901 /* cancel delayed off work, if any */ 902 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 903 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 904 sw_event); 905 906 mutex_lock(&dpu_enc->rc_lock); 907 908 if (is_vid_mode && 909 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 910 _dpu_encoder_irq_control(drm_enc, true); 911 } 912 /* skip if is already OFF or IDLE, resources are off already */ 913 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || 914 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 915 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", 916 DRMID(drm_enc), sw_event, 917 dpu_enc->rc_state); 918 mutex_unlock(&dpu_enc->rc_lock); 919 return 0; 920 } 921 922 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; 923 924 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 925 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 926 "pre stop"); 927 928 mutex_unlock(&dpu_enc->rc_lock); 929 break; 930 931 case DPU_ENC_RC_EVENT_STOP: 932 mutex_lock(&dpu_enc->rc_lock); 933 934 /* return if the resource control is already in OFF state */ 935 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { 936 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", 937 DRMID(drm_enc), sw_event); 938 mutex_unlock(&dpu_enc->rc_lock); 939 return 0; 940 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 941 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", 942 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 943 mutex_unlock(&dpu_enc->rc_lock); 944 return -EINVAL; 945 } 946 947 /** 948 * expect to arrive here only if in either idle state or pre-off 949 * and in IDLE state the resources are already disabled 950 */ 951 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) 952 _dpu_encoder_resource_control_helper(drm_enc, false); 953 954 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; 955 956 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 957 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 958 "stop"); 959 960 mutex_unlock(&dpu_enc->rc_lock); 961 break; 962 963 case DPU_ENC_RC_EVENT_ENTER_IDLE: 964 mutex_lock(&dpu_enc->rc_lock); 965 966 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 967 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", 968 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 969 mutex_unlock(&dpu_enc->rc_lock); 970 return 0; 971 } 972 973 /* 974 * if we are in ON but a frame was just kicked off, 975 * ignore the IDLE event, it's probably a stale timer event 976 */ 977 if (dpu_enc->frame_busy_mask[0]) { 978 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", 979 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 980 mutex_unlock(&dpu_enc->rc_lock); 981 return 0; 982 } 983 984 if (is_vid_mode) 985 _dpu_encoder_irq_control(drm_enc, false); 986 else 987 _dpu_encoder_resource_control_helper(drm_enc, false); 988 989 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; 990 991 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 992 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 993 "idle"); 994 995 mutex_unlock(&dpu_enc->rc_lock); 996 break; 997 998 default: 999 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), 1000 sw_event); 1001 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1002 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1003 "error"); 1004 break; 1005 } 1006 1007 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 1008 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 1009 "end"); 1010 return 0; 1011 } 1012 1013 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, 1014 struct drm_writeback_job *job) 1015 { 1016 struct dpu_encoder_virt *dpu_enc; 1017 int i; 1018 1019 dpu_enc = to_dpu_encoder_virt(drm_enc); 1020 1021 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1022 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1023 1024 if (phys->ops.prepare_wb_job) 1025 phys->ops.prepare_wb_job(phys, job); 1026 1027 } 1028 } 1029 1030 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, 1031 struct drm_writeback_job *job) 1032 { 1033 struct dpu_encoder_virt *dpu_enc; 1034 int i; 1035 1036 dpu_enc = to_dpu_encoder_virt(drm_enc); 1037 1038 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1039 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1040 1041 if (phys->ops.cleanup_wb_job) 1042 phys->ops.cleanup_wb_job(phys, job); 1043 1044 } 1045 } 1046 1047 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, 1048 struct drm_crtc_state *crtc_state, 1049 struct drm_connector_state *conn_state) 1050 { 1051 struct dpu_encoder_virt *dpu_enc; 1052 struct msm_drm_private *priv; 1053 struct dpu_kms *dpu_kms; 1054 struct dpu_crtc_state *cstate; 1055 struct dpu_global_state *global_state; 1056 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; 1057 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 1058 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; 1059 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; 1060 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; 1061 int num_lm, num_ctl, num_pp, num_dsc; 1062 unsigned int dsc_mask = 0; 1063 int i; 1064 1065 if (!drm_enc) { 1066 DPU_ERROR("invalid encoder\n"); 1067 return; 1068 } 1069 1070 dpu_enc = to_dpu_encoder_virt(drm_enc); 1071 DPU_DEBUG_ENC(dpu_enc, "\n"); 1072 1073 priv = drm_enc->dev->dev_private; 1074 dpu_kms = to_dpu_kms(priv->kms); 1075 1076 global_state = dpu_kms_get_existing_global_state(dpu_kms); 1077 if (IS_ERR_OR_NULL(global_state)) { 1078 DPU_ERROR("Failed to get global state"); 1079 return; 1080 } 1081 1082 trace_dpu_enc_mode_set(DRMID(drm_enc)); 1083 1084 /* Query resource that have been reserved in atomic check step. */ 1085 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1086 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, 1087 ARRAY_SIZE(hw_pp)); 1088 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1089 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 1090 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1091 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 1092 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1093 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, 1094 ARRAY_SIZE(hw_dspp)); 1095 1096 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1097 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) 1098 : NULL; 1099 1100 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1101 drm_enc->base.id, DPU_HW_BLK_DSC, 1102 hw_dsc, ARRAY_SIZE(hw_dsc)); 1103 for (i = 0; i < num_dsc; i++) { 1104 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); 1105 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); 1106 } 1107 1108 dpu_enc->dsc_mask = dsc_mask; 1109 1110 cstate = to_dpu_crtc_state(crtc_state); 1111 1112 for (i = 0; i < num_lm; i++) { 1113 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); 1114 1115 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); 1116 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); 1117 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); 1118 } 1119 1120 cstate->num_mixers = num_lm; 1121 1122 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1123 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1124 1125 if (!dpu_enc->hw_pp[i]) { 1126 DPU_ERROR_ENC(dpu_enc, 1127 "no pp block assigned at idx: %d\n", i); 1128 return; 1129 } 1130 1131 if (!hw_ctl[i]) { 1132 DPU_ERROR_ENC(dpu_enc, 1133 "no ctl block assigned at idx: %d\n", i); 1134 return; 1135 } 1136 1137 phys->hw_pp = dpu_enc->hw_pp[i]; 1138 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); 1139 1140 phys->cached_mode = crtc_state->adjusted_mode; 1141 if (phys->ops.atomic_mode_set) 1142 phys->ops.atomic_mode_set(phys, crtc_state, conn_state); 1143 } 1144 } 1145 1146 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) 1147 { 1148 struct dpu_encoder_virt *dpu_enc = NULL; 1149 int i; 1150 1151 if (!drm_enc || !drm_enc->dev) { 1152 DPU_ERROR("invalid parameters\n"); 1153 return; 1154 } 1155 1156 dpu_enc = to_dpu_encoder_virt(drm_enc); 1157 if (!dpu_enc || !dpu_enc->cur_master) { 1158 DPU_ERROR("invalid dpu encoder/master\n"); 1159 return; 1160 } 1161 1162 1163 if (dpu_enc->disp_info.intf_type == INTF_DP && 1164 dpu_enc->cur_master->hw_mdptop && 1165 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) 1166 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( 1167 dpu_enc->cur_master->hw_mdptop); 1168 1169 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); 1170 1171 if (dpu_enc->disp_info.intf_type == INTF_DSI && 1172 !WARN_ON(dpu_enc->num_phys_encs == 0)) { 1173 unsigned bpc = dpu_enc->connector->display_info.bpc; 1174 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1175 if (!dpu_enc->hw_pp[i]) 1176 continue; 1177 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc); 1178 } 1179 } 1180 } 1181 1182 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) 1183 { 1184 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1185 1186 mutex_lock(&dpu_enc->enc_lock); 1187 1188 if (!dpu_enc->enabled) 1189 goto out; 1190 1191 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore) 1192 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); 1193 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) 1194 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); 1195 1196 _dpu_encoder_virt_enable_helper(drm_enc); 1197 1198 out: 1199 mutex_unlock(&dpu_enc->enc_lock); 1200 } 1201 1202 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, 1203 struct drm_atomic_state *state) 1204 { 1205 struct dpu_encoder_virt *dpu_enc = NULL; 1206 int ret = 0; 1207 struct drm_display_mode *cur_mode = NULL; 1208 1209 dpu_enc = to_dpu_encoder_virt(drm_enc); 1210 1211 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); 1212 1213 mutex_lock(&dpu_enc->enc_lock); 1214 1215 dpu_enc->commit_done_timedout = false; 1216 1217 dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 1218 1219 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; 1220 1221 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, 1222 cur_mode->vdisplay); 1223 1224 /* always enable slave encoder before master */ 1225 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) 1226 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); 1227 1228 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) 1229 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); 1230 1231 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1232 if (ret) { 1233 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", 1234 ret); 1235 goto out; 1236 } 1237 1238 _dpu_encoder_virt_enable_helper(drm_enc); 1239 1240 dpu_enc->enabled = true; 1241 1242 out: 1243 mutex_unlock(&dpu_enc->enc_lock); 1244 } 1245 1246 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, 1247 struct drm_atomic_state *state) 1248 { 1249 struct dpu_encoder_virt *dpu_enc = NULL; 1250 struct drm_crtc *crtc; 1251 struct drm_crtc_state *old_state = NULL; 1252 int i = 0; 1253 1254 dpu_enc = to_dpu_encoder_virt(drm_enc); 1255 DPU_DEBUG_ENC(dpu_enc, "\n"); 1256 1257 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); 1258 if (crtc) 1259 old_state = drm_atomic_get_old_crtc_state(state, crtc); 1260 1261 /* 1262 * The encoder is already disabled if self refresh mode was set earlier, 1263 * in the old_state for the corresponding crtc. 1264 */ 1265 if (old_state && old_state->self_refresh_active) 1266 return; 1267 1268 mutex_lock(&dpu_enc->enc_lock); 1269 dpu_enc->enabled = false; 1270 1271 trace_dpu_enc_disable(DRMID(drm_enc)); 1272 1273 /* wait for idle */ 1274 dpu_encoder_wait_for_tx_complete(drm_enc); 1275 1276 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); 1277 1278 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1279 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1280 1281 if (phys->ops.disable) 1282 phys->ops.disable(phys); 1283 } 1284 1285 1286 /* after phys waits for frame-done, should be no more frames pending */ 1287 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1288 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1289 del_timer_sync(&dpu_enc->frame_done_timer); 1290 } 1291 1292 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); 1293 1294 dpu_enc->connector = NULL; 1295 1296 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1297 1298 mutex_unlock(&dpu_enc->enc_lock); 1299 } 1300 1301 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog, 1302 struct dpu_rm *dpu_rm, 1303 enum dpu_intf_type type, u32 controller_id) 1304 { 1305 int i = 0; 1306 1307 if (type == INTF_WB) 1308 return NULL; 1309 1310 for (i = 0; i < catalog->intf_count; i++) { 1311 if (catalog->intf[i].type == type 1312 && catalog->intf[i].controller_id == controller_id) { 1313 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id); 1314 } 1315 } 1316 1317 return NULL; 1318 } 1319 1320 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, 1321 struct dpu_encoder_phys *phy_enc) 1322 { 1323 struct dpu_encoder_virt *dpu_enc = NULL; 1324 unsigned long lock_flags; 1325 1326 if (!drm_enc || !phy_enc) 1327 return; 1328 1329 DPU_ATRACE_BEGIN("encoder_vblank_callback"); 1330 dpu_enc = to_dpu_encoder_virt(drm_enc); 1331 1332 atomic_inc(&phy_enc->vsync_cnt); 1333 1334 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1335 if (dpu_enc->crtc) 1336 dpu_crtc_vblank_callback(dpu_enc->crtc); 1337 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1338 1339 DPU_ATRACE_END("encoder_vblank_callback"); 1340 } 1341 1342 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, 1343 struct dpu_encoder_phys *phy_enc) 1344 { 1345 if (!phy_enc) 1346 return; 1347 1348 DPU_ATRACE_BEGIN("encoder_underrun_callback"); 1349 atomic_inc(&phy_enc->underrun_cnt); 1350 1351 /* trigger dump only on the first underrun */ 1352 if (atomic_read(&phy_enc->underrun_cnt) == 1) 1353 msm_disp_snapshot_state(drm_enc->dev); 1354 1355 trace_dpu_enc_underrun_cb(DRMID(drm_enc), 1356 atomic_read(&phy_enc->underrun_cnt)); 1357 DPU_ATRACE_END("encoder_underrun_callback"); 1358 } 1359 1360 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) 1361 { 1362 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1363 unsigned long lock_flags; 1364 1365 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1366 /* crtc should always be cleared before re-assigning */ 1367 WARN_ON(crtc && dpu_enc->crtc); 1368 dpu_enc->crtc = crtc; 1369 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1370 } 1371 1372 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, 1373 struct drm_crtc *crtc, bool enable) 1374 { 1375 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1376 unsigned long lock_flags; 1377 int i; 1378 1379 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); 1380 1381 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1382 if (dpu_enc->crtc != crtc) { 1383 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1384 return; 1385 } 1386 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1387 1388 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1389 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1390 1391 if (phys->ops.control_vblank_irq) 1392 phys->ops.control_vblank_irq(phys, enable); 1393 } 1394 } 1395 1396 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, 1397 void (*frame_event_cb)(void *, u32 event), 1398 void *frame_event_cb_data) 1399 { 1400 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1401 unsigned long lock_flags; 1402 bool enable; 1403 1404 enable = frame_event_cb ? true : false; 1405 1406 if (!drm_enc) { 1407 DPU_ERROR("invalid encoder\n"); 1408 return; 1409 } 1410 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable); 1411 1412 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1413 dpu_enc->crtc_frame_event_cb = frame_event_cb; 1414 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data; 1415 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1416 } 1417 1418 void dpu_encoder_frame_done_callback( 1419 struct drm_encoder *drm_enc, 1420 struct dpu_encoder_phys *ready_phys, u32 event) 1421 { 1422 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1423 unsigned int i; 1424 1425 if (event & (DPU_ENCODER_FRAME_EVENT_DONE 1426 | DPU_ENCODER_FRAME_EVENT_ERROR 1427 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 1428 1429 if (!dpu_enc->frame_busy_mask[0]) { 1430 /** 1431 * suppress frame_done without waiter, 1432 * likely autorefresh 1433 */ 1434 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event, 1435 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode), 1436 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1, 1437 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1); 1438 return; 1439 } 1440 1441 /* One of the physical encoders has become idle */ 1442 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1443 if (dpu_enc->phys_encs[i] == ready_phys) { 1444 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, 1445 dpu_enc->frame_busy_mask[0]); 1446 clear_bit(i, dpu_enc->frame_busy_mask); 1447 } 1448 } 1449 1450 if (!dpu_enc->frame_busy_mask[0]) { 1451 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1452 del_timer(&dpu_enc->frame_done_timer); 1453 1454 dpu_encoder_resource_control(drm_enc, 1455 DPU_ENC_RC_EVENT_FRAME_DONE); 1456 1457 if (dpu_enc->crtc_frame_event_cb) 1458 dpu_enc->crtc_frame_event_cb( 1459 dpu_enc->crtc_frame_event_cb_data, 1460 event); 1461 } 1462 } else { 1463 if (dpu_enc->crtc_frame_event_cb) 1464 dpu_enc->crtc_frame_event_cb( 1465 dpu_enc->crtc_frame_event_cb_data, event); 1466 } 1467 } 1468 1469 static void dpu_encoder_off_work(struct work_struct *work) 1470 { 1471 struct dpu_encoder_virt *dpu_enc = container_of(work, 1472 struct dpu_encoder_virt, delayed_off_work.work); 1473 1474 dpu_encoder_resource_control(&dpu_enc->base, 1475 DPU_ENC_RC_EVENT_ENTER_IDLE); 1476 1477 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, 1478 DPU_ENCODER_FRAME_EVENT_IDLE); 1479 } 1480 1481 /** 1482 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder 1483 * @drm_enc: Pointer to drm encoder structure 1484 * @phys: Pointer to physical encoder structure 1485 * @extra_flush_bits: Additional bit mask to include in flush trigger 1486 */ 1487 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, 1488 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) 1489 { 1490 struct dpu_hw_ctl *ctl; 1491 int pending_kickoff_cnt; 1492 u32 ret = UINT_MAX; 1493 1494 if (!phys->hw_pp) { 1495 DPU_ERROR("invalid pingpong hw\n"); 1496 return; 1497 } 1498 1499 ctl = phys->hw_ctl; 1500 if (!ctl->ops.trigger_flush) { 1501 DPU_ERROR("missing trigger cb\n"); 1502 return; 1503 } 1504 1505 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); 1506 1507 if (extra_flush_bits && ctl->ops.update_pending_flush) 1508 ctl->ops.update_pending_flush(ctl, extra_flush_bits); 1509 1510 ctl->ops.trigger_flush(ctl); 1511 1512 if (ctl->ops.get_pending_flush) 1513 ret = ctl->ops.get_pending_flush(ctl); 1514 1515 trace_dpu_enc_trigger_flush(DRMID(drm_enc), 1516 dpu_encoder_helper_get_intf_type(phys->intf_mode), 1517 phys->hw_intf ? phys->hw_intf->idx : -1, 1518 phys->hw_wb ? phys->hw_wb->idx : -1, 1519 pending_kickoff_cnt, ctl->idx, 1520 extra_flush_bits, ret); 1521 } 1522 1523 /** 1524 * _dpu_encoder_trigger_start - trigger start for a physical encoder 1525 * @phys: Pointer to physical encoder structure 1526 */ 1527 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) 1528 { 1529 if (!phys) { 1530 DPU_ERROR("invalid argument(s)\n"); 1531 return; 1532 } 1533 1534 if (!phys->hw_pp) { 1535 DPU_ERROR("invalid pingpong hw\n"); 1536 return; 1537 } 1538 1539 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) 1540 phys->ops.trigger_start(phys); 1541 } 1542 1543 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) 1544 { 1545 struct dpu_hw_ctl *ctl; 1546 1547 ctl = phys_enc->hw_ctl; 1548 if (ctl->ops.trigger_start) { 1549 ctl->ops.trigger_start(ctl); 1550 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); 1551 } 1552 } 1553 1554 static int dpu_encoder_helper_wait_event_timeout( 1555 int32_t drm_id, 1556 u32 irq_idx, 1557 struct dpu_encoder_wait_info *info) 1558 { 1559 int rc = 0; 1560 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; 1561 s64 jiffies = msecs_to_jiffies(info->timeout_ms); 1562 s64 time; 1563 1564 do { 1565 rc = wait_event_timeout(*(info->wq), 1566 atomic_read(info->atomic_cnt) == 0, jiffies); 1567 time = ktime_to_ms(ktime_get()); 1568 1569 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time, 1570 expected_time, 1571 atomic_read(info->atomic_cnt)); 1572 /* If we timed out, counter is valid and time is less, wait again */ 1573 } while (atomic_read(info->atomic_cnt) && (rc == 0) && 1574 (time < expected_time)); 1575 1576 return rc; 1577 } 1578 1579 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) 1580 { 1581 struct dpu_encoder_virt *dpu_enc; 1582 struct dpu_hw_ctl *ctl; 1583 int rc; 1584 struct drm_encoder *drm_enc; 1585 1586 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 1587 ctl = phys_enc->hw_ctl; 1588 drm_enc = phys_enc->parent; 1589 1590 if (!ctl->ops.reset) 1591 return; 1592 1593 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc), 1594 ctl->idx); 1595 1596 rc = ctl->ops.reset(ctl); 1597 if (rc) { 1598 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); 1599 msm_disp_snapshot_state(drm_enc->dev); 1600 } 1601 1602 phys_enc->enable_state = DPU_ENC_ENABLED; 1603 } 1604 1605 /** 1606 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff 1607 * Iterate through the physical encoders and perform consolidated flush 1608 * and/or control start triggering as needed. This is done in the virtual 1609 * encoder rather than the individual physical ones in order to handle 1610 * use cases that require visibility into multiple physical encoders at 1611 * a time. 1612 * @dpu_enc: Pointer to virtual encoder structure 1613 */ 1614 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) 1615 { 1616 struct dpu_hw_ctl *ctl; 1617 uint32_t i, pending_flush; 1618 unsigned long lock_flags; 1619 1620 pending_flush = 0x0; 1621 1622 /* update pending counts and trigger kickoff ctl flush atomically */ 1623 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1624 1625 /* don't perform flush/start operations for slave encoders */ 1626 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1627 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1628 1629 if (phys->enable_state == DPU_ENC_DISABLED) 1630 continue; 1631 1632 ctl = phys->hw_ctl; 1633 1634 /* 1635 * This is cleared in frame_done worker, which isn't invoked 1636 * for async commits. So don't set this for async, since it'll 1637 * roll over to the next commit. 1638 */ 1639 if (phys->split_role != ENC_ROLE_SLAVE) 1640 set_bit(i, dpu_enc->frame_busy_mask); 1641 1642 if (!phys->ops.needs_single_flush || 1643 !phys->ops.needs_single_flush(phys)) 1644 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); 1645 else if (ctl->ops.get_pending_flush) 1646 pending_flush |= ctl->ops.get_pending_flush(ctl); 1647 } 1648 1649 /* for split flush, combine pending flush masks and send to master */ 1650 if (pending_flush && dpu_enc->cur_master) { 1651 _dpu_encoder_trigger_flush( 1652 &dpu_enc->base, 1653 dpu_enc->cur_master, 1654 pending_flush); 1655 } 1656 1657 _dpu_encoder_trigger_start(dpu_enc->cur_master); 1658 1659 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1660 } 1661 1662 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) 1663 { 1664 struct dpu_encoder_virt *dpu_enc; 1665 struct dpu_encoder_phys *phys; 1666 unsigned int i; 1667 struct dpu_hw_ctl *ctl; 1668 struct msm_display_info *disp_info; 1669 1670 if (!drm_enc) { 1671 DPU_ERROR("invalid encoder\n"); 1672 return; 1673 } 1674 dpu_enc = to_dpu_encoder_virt(drm_enc); 1675 disp_info = &dpu_enc->disp_info; 1676 1677 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1678 phys = dpu_enc->phys_encs[i]; 1679 1680 ctl = phys->hw_ctl; 1681 ctl->ops.clear_pending_flush(ctl); 1682 1683 /* update only for command mode primary ctl */ 1684 if ((phys == dpu_enc->cur_master) && 1685 disp_info->is_cmd_mode 1686 && ctl->ops.trigger_pending) 1687 ctl->ops.trigger_pending(ctl); 1688 } 1689 } 1690 1691 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, 1692 struct drm_display_mode *mode) 1693 { 1694 u64 pclk_rate; 1695 u32 pclk_period; 1696 u32 line_time; 1697 1698 /* 1699 * For linetime calculation, only operate on master encoder. 1700 */ 1701 if (!dpu_enc->cur_master) 1702 return 0; 1703 1704 if (!dpu_enc->cur_master->ops.get_line_count) { 1705 DPU_ERROR("get_line_count function not defined\n"); 1706 return 0; 1707 } 1708 1709 pclk_rate = mode->clock; /* pixel clock in kHz */ 1710 if (pclk_rate == 0) { 1711 DPU_ERROR("pclk is 0, cannot calculate line time\n"); 1712 return 0; 1713 } 1714 1715 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); 1716 if (pclk_period == 0) { 1717 DPU_ERROR("pclk period is 0\n"); 1718 return 0; 1719 } 1720 1721 /* 1722 * Line time calculation based on Pixel clock and HTOTAL. 1723 * Final unit is in ns. 1724 */ 1725 line_time = (pclk_period * mode->htotal) / 1000; 1726 if (line_time == 0) { 1727 DPU_ERROR("line time calculation is 0\n"); 1728 return 0; 1729 } 1730 1731 DPU_DEBUG_ENC(dpu_enc, 1732 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", 1733 pclk_rate, pclk_period, line_time); 1734 1735 return line_time; 1736 } 1737 1738 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) 1739 { 1740 struct drm_display_mode *mode; 1741 struct dpu_encoder_virt *dpu_enc; 1742 u32 cur_line; 1743 u32 line_time; 1744 u32 vtotal, time_to_vsync; 1745 ktime_t cur_time; 1746 1747 dpu_enc = to_dpu_encoder_virt(drm_enc); 1748 1749 if (!drm_enc->crtc || !drm_enc->crtc->state) { 1750 DPU_ERROR("crtc/crtc state object is NULL\n"); 1751 return -EINVAL; 1752 } 1753 mode = &drm_enc->crtc->state->adjusted_mode; 1754 1755 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); 1756 if (!line_time) 1757 return -EINVAL; 1758 1759 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); 1760 1761 vtotal = mode->vtotal; 1762 if (cur_line >= vtotal) 1763 time_to_vsync = line_time * vtotal; 1764 else 1765 time_to_vsync = line_time * (vtotal - cur_line); 1766 1767 if (time_to_vsync == 0) { 1768 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", 1769 vtotal); 1770 return -EINVAL; 1771 } 1772 1773 cur_time = ktime_get(); 1774 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); 1775 1776 DPU_DEBUG_ENC(dpu_enc, 1777 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", 1778 cur_line, vtotal, time_to_vsync, 1779 ktime_to_ms(cur_time), 1780 ktime_to_ms(*wakeup_time)); 1781 return 0; 1782 } 1783 1784 static u32 1785 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, 1786 u32 enc_ip_width) 1787 { 1788 int ssm_delay, total_pixels, soft_slice_per_enc; 1789 1790 soft_slice_per_enc = enc_ip_width / dsc->slice_width; 1791 1792 /* 1793 * minimum number of initial line pixels is a sum of: 1794 * 1. sub-stream multiplexer delay (83 groups for 8bpc, 1795 * 91 for 10 bpc) * 3 1796 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 1797 * 3. the initial xmit delay 1798 * 4. total pipeline delay through the "lock step" of encoder (47) 1799 * 5. 6 additional pixels as the output of the rate buffer is 1800 * 48 bits wide 1801 */ 1802 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); 1803 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; 1804 if (soft_slice_per_enc > 1) 1805 total_pixels += (ssm_delay * 3); 1806 return DIV_ROUND_UP(total_pixels, dsc->slice_width); 1807 } 1808 1809 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, 1810 struct dpu_hw_dsc *hw_dsc, 1811 struct dpu_hw_pingpong *hw_pp, 1812 struct drm_dsc_config *dsc, 1813 u32 common_mode, 1814 u32 initial_lines) 1815 { 1816 if (hw_dsc->ops.dsc_config) 1817 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines); 1818 1819 if (hw_dsc->ops.dsc_config_thresh) 1820 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc); 1821 1822 if (hw_pp->ops.setup_dsc) 1823 hw_pp->ops.setup_dsc(hw_pp); 1824 1825 if (hw_dsc->ops.dsc_bind_pingpong_blk) 1826 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx); 1827 1828 if (hw_pp->ops.enable_dsc) 1829 hw_pp->ops.enable_dsc(hw_pp); 1830 1831 if (ctl->ops.update_pending_flush_dsc) 1832 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 1833 } 1834 1835 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, 1836 struct drm_dsc_config *dsc) 1837 { 1838 /* coding only for 2LM, 2enc, 1 dsc config */ 1839 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 1840 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 1841 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 1842 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 1843 int this_frame_slices; 1844 int intf_ip_w, enc_ip_w; 1845 int dsc_common_mode; 1846 int pic_width; 1847 u32 initial_lines; 1848 int i; 1849 1850 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1851 hw_pp[i] = dpu_enc->hw_pp[i]; 1852 hw_dsc[i] = dpu_enc->hw_dsc[i]; 1853 1854 if (!hw_pp[i] || !hw_dsc[i]) { 1855 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); 1856 return; 1857 } 1858 } 1859 1860 dsc_common_mode = 0; 1861 pic_width = dsc->pic_width; 1862 1863 dsc_common_mode = DSC_MODE_SPLIT_PANEL; 1864 if (dpu_encoder_use_dsc_merge(enc_master->parent)) 1865 dsc_common_mode |= DSC_MODE_MULTIPLEX; 1866 if (enc_master->intf_mode == INTF_MODE_VIDEO) 1867 dsc_common_mode |= DSC_MODE_VIDEO; 1868 1869 this_frame_slices = pic_width / dsc->slice_width; 1870 intf_ip_w = this_frame_slices * dsc->slice_width; 1871 1872 /* 1873 * dsc merge case: when using 2 encoders for the same stream, 1874 * no. of slices need to be same on both the encoders. 1875 */ 1876 enc_ip_w = intf_ip_w / 2; 1877 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); 1878 1879 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1880 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], 1881 dsc, dsc_common_mode, initial_lines); 1882 } 1883 1884 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) 1885 { 1886 struct dpu_encoder_virt *dpu_enc; 1887 struct dpu_encoder_phys *phys; 1888 bool needs_hw_reset = false; 1889 unsigned int i; 1890 1891 dpu_enc = to_dpu_encoder_virt(drm_enc); 1892 1893 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); 1894 1895 /* prepare for next kickoff, may include waiting on previous kickoff */ 1896 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); 1897 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1898 phys = dpu_enc->phys_encs[i]; 1899 if (phys->ops.prepare_for_kickoff) 1900 phys->ops.prepare_for_kickoff(phys); 1901 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 1902 needs_hw_reset = true; 1903 } 1904 DPU_ATRACE_END("enc_prepare_for_kickoff"); 1905 1906 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1907 1908 /* if any phys needs reset, reset all phys, in-order */ 1909 if (needs_hw_reset) { 1910 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); 1911 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1912 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]); 1913 } 1914 } 1915 1916 if (dpu_enc->dsc) 1917 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); 1918 } 1919 1920 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) 1921 { 1922 struct dpu_encoder_virt *dpu_enc; 1923 unsigned int i; 1924 struct dpu_encoder_phys *phys; 1925 1926 dpu_enc = to_dpu_encoder_virt(drm_enc); 1927 1928 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { 1929 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1930 phys = dpu_enc->phys_encs[i]; 1931 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) { 1932 DPU_DEBUG("invalid FB not kicking off\n"); 1933 return false; 1934 } 1935 } 1936 } 1937 1938 return true; 1939 } 1940 1941 void dpu_encoder_kickoff(struct drm_encoder *drm_enc) 1942 { 1943 struct dpu_encoder_virt *dpu_enc; 1944 struct dpu_encoder_phys *phys; 1945 unsigned long timeout_ms; 1946 unsigned int i; 1947 1948 DPU_ATRACE_BEGIN("encoder_kickoff"); 1949 dpu_enc = to_dpu_encoder_virt(drm_enc); 1950 1951 trace_dpu_enc_kickoff(DRMID(drm_enc)); 1952 1953 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / 1954 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); 1955 1956 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); 1957 mod_timer(&dpu_enc->frame_done_timer, 1958 jiffies + msecs_to_jiffies(timeout_ms)); 1959 1960 /* All phys encs are ready to go, trigger the kickoff */ 1961 _dpu_encoder_kickoff_phys(dpu_enc); 1962 1963 /* allow phys encs to handle any post-kickoff business */ 1964 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1965 phys = dpu_enc->phys_encs[i]; 1966 if (phys->ops.handle_post_kickoff) 1967 phys->ops.handle_post_kickoff(phys); 1968 } 1969 1970 DPU_ATRACE_END("encoder_kickoff"); 1971 } 1972 1973 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) 1974 { 1975 struct dpu_hw_mixer_cfg mixer; 1976 int i, num_lm; 1977 struct dpu_global_state *global_state; 1978 struct dpu_hw_blk *hw_lm[2]; 1979 struct dpu_hw_mixer *hw_mixer[2]; 1980 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 1981 1982 memset(&mixer, 0, sizeof(mixer)); 1983 1984 /* reset all mixers for this encoder */ 1985 if (phys_enc->hw_ctl->ops.clear_all_blendstages) 1986 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl); 1987 1988 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); 1989 1990 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, 1991 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 1992 1993 for (i = 0; i < num_lm; i++) { 1994 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); 1995 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) 1996 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); 1997 1998 /* clear all blendstages */ 1999 if (phys_enc->hw_ctl->ops.setup_blendstage) 2000 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); 2001 } 2002 } 2003 2004 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl, 2005 struct dpu_hw_dsc *hw_dsc, 2006 struct dpu_hw_pingpong *hw_pp) 2007 { 2008 if (hw_dsc->ops.dsc_disable) 2009 hw_dsc->ops.dsc_disable(hw_dsc); 2010 2011 if (hw_pp->ops.disable_dsc) 2012 hw_pp->ops.disable_dsc(hw_pp); 2013 2014 if (hw_dsc->ops.dsc_bind_pingpong_blk) 2015 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE); 2016 2017 if (ctl->ops.update_pending_flush_dsc) 2018 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2019 } 2020 2021 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) 2022 { 2023 /* coding only for 2LM, 2enc, 1 dsc config */ 2024 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2025 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2026 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2027 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2028 int i; 2029 2030 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2031 hw_pp[i] = dpu_enc->hw_pp[i]; 2032 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2033 2034 if (hw_pp[i] && hw_dsc[i]) 2035 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]); 2036 } 2037 } 2038 2039 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) 2040 { 2041 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2042 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 2043 int i; 2044 struct dpu_encoder_virt *dpu_enc; 2045 2046 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2047 2048 phys_enc->hw_ctl->ops.reset(ctl); 2049 2050 dpu_encoder_helper_reset_mixers(phys_enc); 2051 2052 /* 2053 * TODO: move the once-only operation like CTL flush/trigger 2054 * into dpu_encoder_virt_disable() and all operations which need 2055 * to be done per phys encoder into the phys_disable() op. 2056 */ 2057 if (phys_enc->hw_wb) { 2058 /* disable the PP block */ 2059 if (phys_enc->hw_wb->ops.bind_pingpong_blk) 2060 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE); 2061 2062 /* mark WB flush as pending */ 2063 if (phys_enc->hw_ctl->ops.update_pending_flush_wb) 2064 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); 2065 } else { 2066 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2067 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) 2068 phys_enc->hw_intf->ops.bind_pingpong_blk( 2069 dpu_enc->phys_encs[i]->hw_intf, 2070 PINGPONG_NONE); 2071 2072 /* mark INTF flush as pending */ 2073 if (phys_enc->hw_ctl->ops.update_pending_flush_intf) 2074 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl, 2075 dpu_enc->phys_encs[i]->hw_intf->idx); 2076 } 2077 } 2078 2079 /* reset the merge 3D HW block */ 2080 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { 2081 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 2082 BLEND_3D_NONE); 2083 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) 2084 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl, 2085 phys_enc->hw_pp->merge_3d->idx); 2086 } 2087 2088 if (dpu_enc->dsc) { 2089 dpu_encoder_unprep_dsc(dpu_enc); 2090 dpu_enc->dsc = NULL; 2091 } 2092 2093 intf_cfg.stream_sel = 0; /* Don't care value for video mode */ 2094 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); 2095 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); 2096 2097 if (phys_enc->hw_intf) 2098 intf_cfg.intf = phys_enc->hw_intf->idx; 2099 if (phys_enc->hw_wb) 2100 intf_cfg.wb = phys_enc->hw_wb->idx; 2101 2102 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) 2103 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; 2104 2105 if (ctl->ops.reset_intf_cfg) 2106 ctl->ops.reset_intf_cfg(ctl, &intf_cfg); 2107 2108 ctl->ops.trigger_flush(ctl); 2109 ctl->ops.trigger_start(ctl); 2110 ctl->ops.clear_pending_flush(ctl); 2111 } 2112 2113 #ifdef CONFIG_DEBUG_FS 2114 static int _dpu_encoder_status_show(struct seq_file *s, void *data) 2115 { 2116 struct dpu_encoder_virt *dpu_enc = s->private; 2117 int i; 2118 2119 mutex_lock(&dpu_enc->enc_lock); 2120 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2121 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2122 2123 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ", 2124 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1, 2125 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1, 2126 atomic_read(&phys->vsync_cnt), 2127 atomic_read(&phys->underrun_cnt)); 2128 2129 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); 2130 } 2131 mutex_unlock(&dpu_enc->enc_lock); 2132 2133 return 0; 2134 } 2135 2136 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); 2137 2138 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2139 { 2140 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2141 2142 char name[12]; 2143 2144 if (!drm_enc->dev) { 2145 DPU_ERROR("invalid encoder or kms\n"); 2146 return -EINVAL; 2147 } 2148 2149 snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id); 2150 2151 /* create overall sub-directory for the encoder */ 2152 dpu_enc->debugfs_root = debugfs_create_dir(name, 2153 drm_enc->dev->primary->debugfs_root); 2154 2155 /* don't error check these */ 2156 debugfs_create_file("status", 0600, 2157 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); 2158 2159 return 0; 2160 } 2161 #else 2162 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2163 { 2164 return 0; 2165 } 2166 #endif 2167 2168 static int dpu_encoder_late_register(struct drm_encoder *encoder) 2169 { 2170 return _dpu_encoder_init_debugfs(encoder); 2171 } 2172 2173 static void dpu_encoder_early_unregister(struct drm_encoder *encoder) 2174 { 2175 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2176 2177 debugfs_remove_recursive(dpu_enc->debugfs_root); 2178 } 2179 2180 static int dpu_encoder_virt_add_phys_encs( 2181 struct drm_device *dev, 2182 struct msm_display_info *disp_info, 2183 struct dpu_encoder_virt *dpu_enc, 2184 struct dpu_enc_phys_init_params *params) 2185 { 2186 struct dpu_encoder_phys *enc = NULL; 2187 2188 DPU_DEBUG_ENC(dpu_enc, "\n"); 2189 2190 /* 2191 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types 2192 * in this function, check up-front. 2193 */ 2194 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= 2195 ARRAY_SIZE(dpu_enc->phys_encs)) { 2196 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", 2197 dpu_enc->num_phys_encs); 2198 return -EINVAL; 2199 } 2200 2201 2202 if (disp_info->intf_type == INTF_WB) { 2203 enc = dpu_encoder_phys_wb_init(dev, params); 2204 2205 if (IS_ERR(enc)) { 2206 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", 2207 PTR_ERR(enc)); 2208 return PTR_ERR(enc); 2209 } 2210 2211 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2212 ++dpu_enc->num_phys_encs; 2213 } else if (disp_info->is_cmd_mode) { 2214 enc = dpu_encoder_phys_cmd_init(dev, params); 2215 2216 if (IS_ERR(enc)) { 2217 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", 2218 PTR_ERR(enc)); 2219 return PTR_ERR(enc); 2220 } 2221 2222 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2223 ++dpu_enc->num_phys_encs; 2224 } else { 2225 enc = dpu_encoder_phys_vid_init(dev, params); 2226 2227 if (IS_ERR(enc)) { 2228 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", 2229 PTR_ERR(enc)); 2230 return PTR_ERR(enc); 2231 } 2232 2233 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2234 ++dpu_enc->num_phys_encs; 2235 } 2236 2237 if (params->split_role == ENC_ROLE_SLAVE) 2238 dpu_enc->cur_slave = enc; 2239 else 2240 dpu_enc->cur_master = enc; 2241 2242 return 0; 2243 } 2244 2245 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, 2246 struct dpu_kms *dpu_kms, 2247 struct msm_display_info *disp_info) 2248 { 2249 int ret = 0; 2250 int i = 0; 2251 struct dpu_enc_phys_init_params phys_params; 2252 2253 if (!dpu_enc) { 2254 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); 2255 return -EINVAL; 2256 } 2257 2258 dpu_enc->cur_master = NULL; 2259 2260 memset(&phys_params, 0, sizeof(phys_params)); 2261 phys_params.dpu_kms = dpu_kms; 2262 phys_params.parent = &dpu_enc->base; 2263 phys_params.enc_spinlock = &dpu_enc->enc_spinlock; 2264 2265 WARN_ON(disp_info->num_of_h_tiles < 1); 2266 2267 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); 2268 2269 if (disp_info->intf_type != INTF_WB) 2270 dpu_enc->idle_pc_supported = 2271 dpu_kms->catalog->caps->has_idle_pc; 2272 2273 mutex_lock(&dpu_enc->enc_lock); 2274 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { 2275 /* 2276 * Left-most tile is at index 0, content is controller id 2277 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right 2278 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right 2279 */ 2280 u32 controller_id = disp_info->h_tile_instance[i]; 2281 2282 if (disp_info->num_of_h_tiles > 1) { 2283 if (i == 0) 2284 phys_params.split_role = ENC_ROLE_MASTER; 2285 else 2286 phys_params.split_role = ENC_ROLE_SLAVE; 2287 } else { 2288 phys_params.split_role = ENC_ROLE_SOLO; 2289 } 2290 2291 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", 2292 i, controller_id, phys_params.split_role); 2293 2294 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm, 2295 disp_info->intf_type, 2296 controller_id); 2297 2298 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX) 2299 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id); 2300 2301 if (!phys_params.hw_intf && !phys_params.hw_wb) { 2302 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i); 2303 ret = -EINVAL; 2304 break; 2305 } 2306 2307 if (phys_params.hw_intf && phys_params.hw_wb) { 2308 DPU_ERROR_ENC(dpu_enc, 2309 "invalid phys both intf and wb block at idx: %d\n", i); 2310 ret = -EINVAL; 2311 break; 2312 } 2313 2314 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info, 2315 dpu_enc, &phys_params); 2316 if (ret) { 2317 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); 2318 break; 2319 } 2320 } 2321 2322 mutex_unlock(&dpu_enc->enc_lock); 2323 2324 return ret; 2325 } 2326 2327 static void dpu_encoder_frame_done_timeout(struct timer_list *t) 2328 { 2329 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, 2330 frame_done_timer); 2331 struct drm_encoder *drm_enc = &dpu_enc->base; 2332 u32 event; 2333 2334 if (!drm_enc->dev) { 2335 DPU_ERROR("invalid parameters\n"); 2336 return; 2337 } 2338 2339 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { 2340 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", 2341 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); 2342 return; 2343 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 2344 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); 2345 return; 2346 } 2347 2348 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n"); 2349 2350 event = DPU_ENCODER_FRAME_EVENT_ERROR; 2351 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); 2352 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event); 2353 } 2354 2355 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { 2356 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, 2357 .atomic_disable = dpu_encoder_virt_atomic_disable, 2358 .atomic_enable = dpu_encoder_virt_atomic_enable, 2359 .atomic_check = dpu_encoder_virt_atomic_check, 2360 }; 2361 2362 static const struct drm_encoder_funcs dpu_encoder_funcs = { 2363 .destroy = dpu_encoder_destroy, 2364 .late_register = dpu_encoder_late_register, 2365 .early_unregister = dpu_encoder_early_unregister, 2366 }; 2367 2368 struct drm_encoder *dpu_encoder_init(struct drm_device *dev, 2369 int drm_enc_mode, 2370 struct msm_display_info *disp_info) 2371 { 2372 struct msm_drm_private *priv = dev->dev_private; 2373 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 2374 struct drm_encoder *drm_enc = NULL; 2375 struct dpu_encoder_virt *dpu_enc = NULL; 2376 int ret = 0; 2377 2378 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); 2379 if (!dpu_enc) 2380 return ERR_PTR(-ENOMEM); 2381 2382 ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, 2383 drm_enc_mode, NULL); 2384 if (ret) { 2385 devm_kfree(dev->dev, dpu_enc); 2386 return ERR_PTR(ret); 2387 } 2388 2389 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2390 2391 spin_lock_init(&dpu_enc->enc_spinlock); 2392 dpu_enc->enabled = false; 2393 mutex_init(&dpu_enc->enc_lock); 2394 mutex_init(&dpu_enc->rc_lock); 2395 2396 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2397 if (ret) 2398 goto fail; 2399 2400 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2401 timer_setup(&dpu_enc->frame_done_timer, 2402 dpu_encoder_frame_done_timeout, 0); 2403 2404 if (disp_info->intf_type == INTF_DP) 2405 dpu_enc->wide_bus_en = msm_dp_wide_bus_available( 2406 priv->dp[disp_info->h_tile_instance[0]]); 2407 2408 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2409 dpu_encoder_off_work); 2410 dpu_enc->idle_timeout = IDLE_TIMEOUT; 2411 2412 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); 2413 2414 DPU_DEBUG_ENC(dpu_enc, "created\n"); 2415 2416 return &dpu_enc->base; 2417 2418 fail: 2419 DPU_ERROR("failed to create encoder\n"); 2420 if (drm_enc) 2421 dpu_encoder_destroy(drm_enc); 2422 2423 return ERR_PTR(ret); 2424 } 2425 2426 /** 2427 * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state 2428 * @drm_enc: encoder pointer 2429 * 2430 * Wait for hardware to have flushed the current pending changes to hardware at 2431 * a vblank or CTL_START. Physical encoders will map this differently depending 2432 * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START. 2433 * 2434 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2435 */ 2436 int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc) 2437 { 2438 struct dpu_encoder_virt *dpu_enc = NULL; 2439 int i, ret = 0; 2440 2441 if (!drm_enc) { 2442 DPU_ERROR("invalid encoder\n"); 2443 return -EINVAL; 2444 } 2445 dpu_enc = to_dpu_encoder_virt(drm_enc); 2446 DPU_DEBUG_ENC(dpu_enc, "\n"); 2447 2448 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2449 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2450 2451 if (phys->ops.wait_for_commit_done) { 2452 DPU_ATRACE_BEGIN("wait_for_commit_done"); 2453 ret = phys->ops.wait_for_commit_done(phys); 2454 DPU_ATRACE_END("wait_for_commit_done"); 2455 if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) { 2456 dpu_enc->commit_done_timedout = true; 2457 msm_disp_snapshot_state(drm_enc->dev); 2458 } 2459 if (ret) 2460 return ret; 2461 } 2462 } 2463 2464 return ret; 2465 } 2466 2467 /** 2468 * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel 2469 * @drm_enc: encoder pointer 2470 * 2471 * Wait for the hardware to transfer all the pixels to the panel. Physical 2472 * encoders will map this differently depending on the type: vid mode -> vsync_irq, 2473 * cmd mode -> pp_done. 2474 * 2475 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise 2476 */ 2477 int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) 2478 { 2479 struct dpu_encoder_virt *dpu_enc = NULL; 2480 int i, ret = 0; 2481 2482 if (!drm_enc) { 2483 DPU_ERROR("invalid encoder\n"); 2484 return -EINVAL; 2485 } 2486 dpu_enc = to_dpu_encoder_virt(drm_enc); 2487 DPU_DEBUG_ENC(dpu_enc, "\n"); 2488 2489 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2490 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2491 2492 if (phys->ops.wait_for_tx_complete) { 2493 DPU_ATRACE_BEGIN("wait_for_tx_complete"); 2494 ret = phys->ops.wait_for_tx_complete(phys); 2495 DPU_ATRACE_END("wait_for_tx_complete"); 2496 if (ret) 2497 return ret; 2498 } 2499 } 2500 2501 return ret; 2502 } 2503 2504 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) 2505 { 2506 struct dpu_encoder_virt *dpu_enc = NULL; 2507 2508 if (!encoder) { 2509 DPU_ERROR("invalid encoder\n"); 2510 return INTF_MODE_NONE; 2511 } 2512 dpu_enc = to_dpu_encoder_virt(encoder); 2513 2514 if (dpu_enc->cur_master) 2515 return dpu_enc->cur_master->intf_mode; 2516 2517 if (dpu_enc->num_phys_encs) 2518 return dpu_enc->phys_encs[0]->intf_mode; 2519 2520 return INTF_MODE_NONE; 2521 } 2522 2523 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) 2524 { 2525 struct drm_encoder *encoder = phys_enc->parent; 2526 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2527 2528 return dpu_enc->dsc_mask; 2529 } 2530 2531 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc, 2532 struct dpu_enc_phys_init_params *p) 2533 { 2534 int i; 2535 2536 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; 2537 phys_enc->hw_intf = p->hw_intf; 2538 phys_enc->hw_wb = p->hw_wb; 2539 phys_enc->parent = p->parent; 2540 phys_enc->dpu_kms = p->dpu_kms; 2541 phys_enc->split_role = p->split_role; 2542 phys_enc->enc_spinlock = p->enc_spinlock; 2543 phys_enc->enable_state = DPU_ENC_DISABLED; 2544 2545 for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++) 2546 phys_enc->irq[i] = -EINVAL; 2547 2548 atomic_set(&phys_enc->vblank_refcount, 0); 2549 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 2550 atomic_set(&phys_enc->pending_ctlstart_cnt, 0); 2551 2552 atomic_set(&phys_enc->vsync_cnt, 0); 2553 atomic_set(&phys_enc->underrun_cnt, 0); 2554 2555 init_waitqueue_head(&phys_enc->pending_kickoff_wq); 2556 } 2557