1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 6 * 7 * Author: Rob Clark <robdclark@gmail.com> 8 */ 9 10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 11 #include <linux/debugfs.h> 12 #include <linux/kthread.h> 13 #include <linux/seq_file.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_crtc.h> 17 #include <drm/drm_file.h> 18 #include <drm/drm_probe_helper.h> 19 20 #include "msm_drv.h" 21 #include "dpu_kms.h" 22 #include "dpu_hwio.h" 23 #include "dpu_hw_catalog.h" 24 #include "dpu_hw_intf.h" 25 #include "dpu_hw_ctl.h" 26 #include "dpu_hw_dspp.h" 27 #include "dpu_hw_dsc.h" 28 #include "dpu_hw_merge3d.h" 29 #include "dpu_formats.h" 30 #include "dpu_encoder_phys.h" 31 #include "dpu_crtc.h" 32 #include "dpu_trace.h" 33 #include "dpu_core_irq.h" 34 #include "disp/msm_disp_snapshot.h" 35 36 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\ 37 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 38 39 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ 40 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) 41 42 /* 43 * Two to anticipate panels that can do cmd/vid dynamic switching 44 * plan is to create all possible physical encoder types, and switch between 45 * them at runtime 46 */ 47 #define NUM_PHYS_ENCODER_TYPES 2 48 49 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ 50 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) 51 52 #define MAX_CHANNELS_PER_ENC 2 53 54 #define IDLE_SHORT_TIMEOUT 1 55 56 #define MAX_HDISPLAY_SPLIT 1080 57 58 /* timeout in frames waiting for frame done */ 59 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 60 61 /** 62 * enum dpu_enc_rc_events - events for resource control state machine 63 * @DPU_ENC_RC_EVENT_KICKOFF: 64 * This event happens at NORMAL priority. 65 * Event that signals the start of the transfer. When this event is 66 * received, enable MDP/DSI core clocks. Regardless of the previous 67 * state, the resource should be in ON state at the end of this event. 68 * @DPU_ENC_RC_EVENT_FRAME_DONE: 69 * This event happens at INTERRUPT level. 70 * Event signals the end of the data transfer after the PP FRAME_DONE 71 * event. At the end of this event, a delayed work is scheduled to go to 72 * IDLE_PC state after IDLE_TIMEOUT time. 73 * @DPU_ENC_RC_EVENT_PRE_STOP: 74 * This event happens at NORMAL priority. 75 * This event, when received during the ON state, leave the RC STATE 76 * in the PRE_OFF state. It should be followed by the STOP event as 77 * part of encoder disable. 78 * If received during IDLE or OFF states, it will do nothing. 79 * @DPU_ENC_RC_EVENT_STOP: 80 * This event happens at NORMAL priority. 81 * When this event is received, disable all the MDP/DSI core clocks, and 82 * disable IRQs. It should be called from the PRE_OFF or IDLE states. 83 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. 84 * PRE_OFF is expected when PRE_STOP was executed during the ON state. 85 * Resource state should be in OFF at the end of the event. 86 * @DPU_ENC_RC_EVENT_ENTER_IDLE: 87 * This event happens at NORMAL priority from a work item. 88 * Event signals that there were no frame updates for IDLE_TIMEOUT time. 89 * This would disable MDP/DSI core clocks and change the resource state 90 * to IDLE. 91 */ 92 enum dpu_enc_rc_events { 93 DPU_ENC_RC_EVENT_KICKOFF = 1, 94 DPU_ENC_RC_EVENT_FRAME_DONE, 95 DPU_ENC_RC_EVENT_PRE_STOP, 96 DPU_ENC_RC_EVENT_STOP, 97 DPU_ENC_RC_EVENT_ENTER_IDLE 98 }; 99 100 /* 101 * enum dpu_enc_rc_states - states that the resource control maintains 102 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state 103 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state 104 * @DPU_ENC_RC_STATE_ON: Resource is in ON state 105 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state 106 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state 107 */ 108 enum dpu_enc_rc_states { 109 DPU_ENC_RC_STATE_OFF, 110 DPU_ENC_RC_STATE_PRE_OFF, 111 DPU_ENC_RC_STATE_ON, 112 DPU_ENC_RC_STATE_IDLE 113 }; 114 115 /** 116 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical 117 * encoders. Virtual encoder manages one "logical" display. Physical 118 * encoders manage one intf block, tied to a specific panel/sub-panel. 119 * Virtual encoder defers as much as possible to the physical encoders. 120 * Virtual encoder registers itself with the DRM Framework as the encoder. 121 * @base: drm_encoder base class for registration with DRM 122 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes 123 * @enabled: True if the encoder is active, protected by enc_lock 124 * @num_phys_encs: Actual number of physical encoders contained. 125 * @phys_encs: Container of physical encoders managed. 126 * @cur_master: Pointer to the current master in this mode. Optimization 127 * Only valid after enable. Cleared as disable. 128 * @cur_slave: As above but for the slave encoder. 129 * @hw_pp: Handle to the pingpong blocks used for the display. No. 130 * pingpong blocks can be different than num_phys_encs. 131 * @hw_dsc: Handle to the DSC blocks used for the display. 132 * @dsc_mask: Bitmask of used DSC blocks. 133 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped 134 * for partial update right-only cases, such as pingpong 135 * split where virtual pingpong does not generate IRQs 136 * @crtc: Pointer to the currently assigned crtc. Normally you 137 * would use crtc->state->encoder_mask to determine the 138 * link between encoder/crtc. However in this case we need 139 * to track crtc in the disable() hook which is called 140 * _after_ encoder_mask is cleared. 141 * @connector: If a mode is set, cached pointer to the active connector 142 * @crtc_kickoff_cb: Callback into CRTC that will flush & start 143 * all CTL paths 144 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb 145 * @debugfs_root: Debug file system root file node 146 * @enc_lock: Lock around physical encoder 147 * create/destroy/enable/disable 148 * @frame_busy_mask: Bitmask tracking which phys_enc we are still 149 * busy processing current command. 150 * Bit0 = phys_encs[0] etc. 151 * @crtc_frame_event_cb: callback handler for frame event 152 * @crtc_frame_event_cb_data: callback handler private data 153 * @frame_done_timeout_ms: frame done timeout in ms 154 * @frame_done_timer: watchdog timer for frame done event 155 * @vsync_event_timer: vsync timer 156 * @disp_info: local copy of msm_display_info struct 157 * @idle_pc_supported: indicate if idle power collaps is supported 158 * @rc_lock: resource control mutex lock to protect 159 * virt encoder over various state changes 160 * @rc_state: resource controller state 161 * @delayed_off_work: delayed worker to schedule disabling of 162 * clks and resources after IDLE_TIMEOUT time. 163 * @vsync_event_work: worker to handle vsync event for autorefresh 164 * @topology: topology of the display 165 * @idle_timeout: idle timeout duration in milliseconds 166 * @wide_bus_en: wide bus is enabled on this interface 167 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders 168 */ 169 struct dpu_encoder_virt { 170 struct drm_encoder base; 171 spinlock_t enc_spinlock; 172 173 bool enabled; 174 175 unsigned int num_phys_encs; 176 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; 177 struct dpu_encoder_phys *cur_master; 178 struct dpu_encoder_phys *cur_slave; 179 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 180 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 181 182 unsigned int dsc_mask; 183 184 bool intfs_swapped; 185 186 struct drm_crtc *crtc; 187 struct drm_connector *connector; 188 189 struct dentry *debugfs_root; 190 struct mutex enc_lock; 191 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); 192 void (*crtc_frame_event_cb)(void *, u32 event); 193 void *crtc_frame_event_cb_data; 194 195 atomic_t frame_done_timeout_ms; 196 struct timer_list frame_done_timer; 197 struct timer_list vsync_event_timer; 198 199 struct msm_display_info disp_info; 200 201 bool idle_pc_supported; 202 struct mutex rc_lock; 203 enum dpu_enc_rc_states rc_state; 204 struct delayed_work delayed_off_work; 205 struct kthread_work vsync_event_work; 206 struct msm_display_topology topology; 207 208 u32 idle_timeout; 209 210 bool wide_bus_en; 211 212 /* DSC configuration */ 213 struct drm_dsc_config *dsc; 214 }; 215 216 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) 217 218 static u32 dither_matrix[DITHER_MATRIX_SZ] = { 219 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 220 }; 221 222 223 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) 224 { 225 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 226 227 return dpu_enc->wide_bus_en; 228 } 229 230 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) 231 { 232 struct dpu_encoder_virt *dpu_enc; 233 int i, num_intf = 0; 234 235 dpu_enc = to_dpu_encoder_virt(drm_enc); 236 237 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 238 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 239 240 if (phys->hw_intf && phys->hw_intf->ops.setup_misr 241 && phys->hw_intf->ops.collect_misr) 242 num_intf++; 243 } 244 245 return num_intf; 246 } 247 248 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) 249 { 250 struct dpu_encoder_virt *dpu_enc; 251 252 int i; 253 254 dpu_enc = to_dpu_encoder_virt(drm_enc); 255 256 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 257 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 258 259 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) 260 continue; 261 262 phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1); 263 } 264 } 265 266 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) 267 { 268 struct dpu_encoder_virt *dpu_enc; 269 270 int i, rc = 0, entries_added = 0; 271 272 if (!drm_enc->crtc) { 273 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); 274 return -EINVAL; 275 } 276 277 dpu_enc = to_dpu_encoder_virt(drm_enc); 278 279 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 280 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 281 282 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) 283 continue; 284 285 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]); 286 if (rc) 287 return rc; 288 entries_added++; 289 } 290 291 return entries_added; 292 } 293 294 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc) 295 { 296 struct dpu_hw_dither_cfg dither_cfg = { 0 }; 297 298 if (!hw_pp->ops.setup_dither) 299 return; 300 301 switch (bpc) { 302 case 6: 303 dither_cfg.c0_bitdepth = 6; 304 dither_cfg.c1_bitdepth = 6; 305 dither_cfg.c2_bitdepth = 6; 306 dither_cfg.c3_bitdepth = 6; 307 dither_cfg.temporal_en = 0; 308 break; 309 default: 310 hw_pp->ops.setup_dither(hw_pp, NULL); 311 return; 312 } 313 314 memcpy(&dither_cfg.matrix, dither_matrix, 315 sizeof(u32) * DITHER_MATRIX_SZ); 316 317 hw_pp->ops.setup_dither(hw_pp, &dither_cfg); 318 } 319 320 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) 321 { 322 switch (intf_mode) { 323 case INTF_MODE_VIDEO: 324 return "INTF_MODE_VIDEO"; 325 case INTF_MODE_CMD: 326 return "INTF_MODE_CMD"; 327 case INTF_MODE_WB_BLOCK: 328 return "INTF_MODE_WB_BLOCK"; 329 case INTF_MODE_WB_LINE: 330 return "INTF_MODE_WB_LINE"; 331 default: 332 return "INTF_MODE_UNKNOWN"; 333 } 334 } 335 336 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, 337 enum dpu_intr_idx intr_idx) 338 { 339 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n", 340 DRMID(phys_enc->parent), 341 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode), 342 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1, 343 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1, 344 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); 345 346 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, 347 DPU_ENCODER_FRAME_EVENT_ERROR); 348 } 349 350 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, 351 u32 irq_idx, struct dpu_encoder_wait_info *info); 352 353 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, 354 int irq, 355 void (*func)(void *arg, int irq_idx), 356 struct dpu_encoder_wait_info *wait_info) 357 { 358 u32 irq_status; 359 int ret; 360 361 if (!wait_info) { 362 DPU_ERROR("invalid params\n"); 363 return -EINVAL; 364 } 365 /* note: do master / slave checking outside */ 366 367 /* return EWOULDBLOCK since we know the wait isn't necessary */ 368 if (phys_enc->enable_state == DPU_ENC_DISABLED) { 369 DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n", 370 DRMID(phys_enc->parent), func, 371 irq); 372 return -EWOULDBLOCK; 373 } 374 375 if (irq < 0) { 376 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", 377 DRMID(phys_enc->parent), func); 378 return 0; 379 } 380 381 DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n", 382 DRMID(phys_enc->parent), func, 383 irq, phys_enc->hw_pp->idx - PINGPONG_0, 384 atomic_read(wait_info->atomic_cnt)); 385 386 ret = dpu_encoder_helper_wait_event_timeout( 387 DRMID(phys_enc->parent), 388 irq, 389 wait_info); 390 391 if (ret <= 0) { 392 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq); 393 if (irq_status) { 394 unsigned long flags; 395 396 DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n", 397 DRMID(phys_enc->parent), func, 398 irq, 399 phys_enc->hw_pp->idx - PINGPONG_0, 400 atomic_read(wait_info->atomic_cnt)); 401 local_irq_save(flags); 402 func(phys_enc, irq); 403 local_irq_restore(flags); 404 ret = 0; 405 } else { 406 ret = -ETIMEDOUT; 407 DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n", 408 DRMID(phys_enc->parent), func, 409 irq, 410 phys_enc->hw_pp->idx - PINGPONG_0, 411 atomic_read(wait_info->atomic_cnt)); 412 } 413 } else { 414 ret = 0; 415 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), 416 func, irq, 417 phys_enc->hw_pp->idx - PINGPONG_0, 418 atomic_read(wait_info->atomic_cnt)); 419 } 420 421 return ret; 422 } 423 424 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) 425 { 426 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 427 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; 428 return phys ? atomic_read(&phys->vsync_cnt) : 0; 429 } 430 431 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) 432 { 433 struct dpu_encoder_virt *dpu_enc; 434 struct dpu_encoder_phys *phys; 435 int linecount = 0; 436 437 dpu_enc = to_dpu_encoder_virt(drm_enc); 438 phys = dpu_enc ? dpu_enc->cur_master : NULL; 439 440 if (phys && phys->ops.get_line_count) 441 linecount = phys->ops.get_line_count(phys); 442 443 return linecount; 444 } 445 446 static void dpu_encoder_destroy(struct drm_encoder *drm_enc) 447 { 448 struct dpu_encoder_virt *dpu_enc = NULL; 449 int i = 0; 450 451 if (!drm_enc) { 452 DPU_ERROR("invalid encoder\n"); 453 return; 454 } 455 456 dpu_enc = to_dpu_encoder_virt(drm_enc); 457 DPU_DEBUG_ENC(dpu_enc, "\n"); 458 459 mutex_lock(&dpu_enc->enc_lock); 460 461 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 462 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 463 464 if (phys->ops.destroy) { 465 phys->ops.destroy(phys); 466 --dpu_enc->num_phys_encs; 467 dpu_enc->phys_encs[i] = NULL; 468 } 469 } 470 471 if (dpu_enc->num_phys_encs) 472 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n", 473 dpu_enc->num_phys_encs); 474 dpu_enc->num_phys_encs = 0; 475 mutex_unlock(&dpu_enc->enc_lock); 476 477 drm_encoder_cleanup(drm_enc); 478 mutex_destroy(&dpu_enc->enc_lock); 479 } 480 481 void dpu_encoder_helper_split_config( 482 struct dpu_encoder_phys *phys_enc, 483 enum dpu_intf interface) 484 { 485 struct dpu_encoder_virt *dpu_enc; 486 struct split_pipe_cfg cfg = { 0 }; 487 struct dpu_hw_mdp *hw_mdptop; 488 struct msm_display_info *disp_info; 489 490 if (!phys_enc->hw_mdptop || !phys_enc->parent) { 491 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); 492 return; 493 } 494 495 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 496 hw_mdptop = phys_enc->hw_mdptop; 497 disp_info = &dpu_enc->disp_info; 498 499 if (disp_info->intf_type != INTF_DSI) 500 return; 501 502 /** 503 * disable split modes since encoder will be operating in as the only 504 * encoder, either for the entire use case in the case of, for example, 505 * single DSI, or for this frame in the case of left/right only partial 506 * update. 507 */ 508 if (phys_enc->split_role == ENC_ROLE_SOLO) { 509 if (hw_mdptop->ops.setup_split_pipe) 510 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 511 return; 512 } 513 514 cfg.en = true; 515 cfg.mode = phys_enc->intf_mode; 516 cfg.intf = interface; 517 518 if (cfg.en && phys_enc->ops.needs_single_flush && 519 phys_enc->ops.needs_single_flush(phys_enc)) 520 cfg.split_flush_en = true; 521 522 if (phys_enc->split_role == ENC_ROLE_MASTER) { 523 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); 524 525 if (hw_mdptop->ops.setup_split_pipe) 526 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); 527 } 528 } 529 530 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) 531 { 532 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 533 int i, intf_count = 0, num_dsc = 0; 534 535 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 536 if (dpu_enc->phys_encs[i]) 537 intf_count++; 538 539 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ 540 if (dpu_enc->dsc) 541 num_dsc = 2; 542 543 return (num_dsc > 0) && (num_dsc > intf_count); 544 } 545 546 static struct msm_display_topology dpu_encoder_get_topology( 547 struct dpu_encoder_virt *dpu_enc, 548 struct dpu_kms *dpu_kms, 549 struct drm_display_mode *mode, 550 struct drm_crtc_state *crtc_state) 551 { 552 struct msm_display_topology topology = {0}; 553 int i, intf_count = 0; 554 555 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) 556 if (dpu_enc->phys_encs[i]) 557 intf_count++; 558 559 /* Datapath topology selection 560 * 561 * Dual display 562 * 2 LM, 2 INTF ( Split display using 2 interfaces) 563 * 564 * Single display 565 * 1 LM, 1 INTF 566 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 567 * 568 * Add dspps to the reservation requirements if ctm is requested 569 */ 570 if (intf_count == 2) 571 topology.num_lm = 2; 572 else if (!dpu_kms->catalog->caps->has_3d_merge) 573 topology.num_lm = 1; 574 else 575 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 576 577 if (crtc_state->ctm) 578 topology.num_dspp = topology.num_lm; 579 580 topology.num_intf = intf_count; 581 582 if (dpu_enc->dsc) { 583 /* 584 * In case of Display Stream Compression (DSC), we would use 585 * 2 DSC encoders, 2 layer mixers and 1 interface 586 * this is power optimal and can drive up to (including) 4k 587 * screens 588 */ 589 topology.num_dsc = 2; 590 topology.num_lm = 2; 591 topology.num_intf = 1; 592 } 593 594 return topology; 595 } 596 597 static int dpu_encoder_virt_atomic_check( 598 struct drm_encoder *drm_enc, 599 struct drm_crtc_state *crtc_state, 600 struct drm_connector_state *conn_state) 601 { 602 struct dpu_encoder_virt *dpu_enc; 603 struct msm_drm_private *priv; 604 struct dpu_kms *dpu_kms; 605 struct drm_display_mode *adj_mode; 606 struct msm_display_topology topology; 607 struct dpu_global_state *global_state; 608 int i = 0; 609 int ret = 0; 610 611 if (!drm_enc || !crtc_state || !conn_state) { 612 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", 613 drm_enc != NULL, crtc_state != NULL, conn_state != NULL); 614 return -EINVAL; 615 } 616 617 dpu_enc = to_dpu_encoder_virt(drm_enc); 618 DPU_DEBUG_ENC(dpu_enc, "\n"); 619 620 priv = drm_enc->dev->dev_private; 621 dpu_kms = to_dpu_kms(priv->kms); 622 adj_mode = &crtc_state->adjusted_mode; 623 global_state = dpu_kms_get_global_state(crtc_state->state); 624 if (IS_ERR(global_state)) 625 return PTR_ERR(global_state); 626 627 trace_dpu_enc_atomic_check(DRMID(drm_enc)); 628 629 /* perform atomic check on the first physical encoder (master) */ 630 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 631 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 632 633 if (phys->ops.atomic_check) 634 ret = phys->ops.atomic_check(phys, crtc_state, 635 conn_state); 636 if (ret) { 637 DPU_ERROR_ENC(dpu_enc, 638 "mode unsupported, phys idx %d\n", i); 639 return ret; 640 } 641 } 642 643 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state); 644 645 /* 646 * Release and Allocate resources on every modeset 647 * Dont allocate when active is false. 648 */ 649 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 650 dpu_rm_release(global_state, drm_enc); 651 652 if (!crtc_state->active_changed || crtc_state->enable) 653 ret = dpu_rm_reserve(&dpu_kms->rm, global_state, 654 drm_enc, crtc_state, topology); 655 } 656 657 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); 658 659 return ret; 660 } 661 662 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, 663 struct msm_display_info *disp_info) 664 { 665 struct dpu_vsync_source_cfg vsync_cfg = { 0 }; 666 struct msm_drm_private *priv; 667 struct dpu_kms *dpu_kms; 668 struct dpu_hw_mdp *hw_mdptop; 669 struct drm_encoder *drm_enc; 670 struct dpu_encoder_phys *phys_enc; 671 int i; 672 673 if (!dpu_enc || !disp_info) { 674 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", 675 dpu_enc != NULL, disp_info != NULL); 676 return; 677 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { 678 DPU_ERROR("invalid num phys enc %d/%d\n", 679 dpu_enc->num_phys_encs, 680 (int) ARRAY_SIZE(dpu_enc->hw_pp)); 681 return; 682 } 683 684 drm_enc = &dpu_enc->base; 685 /* this pointers are checked in virt_enable_helper */ 686 priv = drm_enc->dev->dev_private; 687 688 dpu_kms = to_dpu_kms(priv->kms); 689 hw_mdptop = dpu_kms->hw_mdp; 690 if (!hw_mdptop) { 691 DPU_ERROR("invalid mdptop\n"); 692 return; 693 } 694 695 if (hw_mdptop->ops.setup_vsync_source && 696 disp_info->is_cmd_mode) { 697 for (i = 0; i < dpu_enc->num_phys_encs; i++) 698 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; 699 700 vsync_cfg.pp_count = dpu_enc->num_phys_encs; 701 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode); 702 703 if (disp_info->is_te_using_watchdog_timer) 704 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0; 705 else 706 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO; 707 708 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); 709 710 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 711 phys_enc = dpu_enc->phys_encs[i]; 712 713 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel) 714 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf, 715 vsync_cfg.vsync_source); 716 } 717 } 718 } 719 720 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable) 721 { 722 struct dpu_encoder_virt *dpu_enc; 723 int i; 724 725 if (!drm_enc) { 726 DPU_ERROR("invalid encoder\n"); 727 return; 728 } 729 730 dpu_enc = to_dpu_encoder_virt(drm_enc); 731 732 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable); 733 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 734 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 735 736 if (phys->ops.irq_control) 737 phys->ops.irq_control(phys, enable); 738 } 739 740 } 741 742 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc, 743 bool enable) 744 { 745 struct msm_drm_private *priv; 746 struct dpu_kms *dpu_kms; 747 struct dpu_encoder_virt *dpu_enc; 748 749 dpu_enc = to_dpu_encoder_virt(drm_enc); 750 priv = drm_enc->dev->dev_private; 751 dpu_kms = to_dpu_kms(priv->kms); 752 753 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable); 754 755 if (!dpu_enc->cur_master) { 756 DPU_ERROR("encoder master not set\n"); 757 return; 758 } 759 760 if (enable) { 761 /* enable DPU core clks */ 762 pm_runtime_get_sync(&dpu_kms->pdev->dev); 763 764 /* enable all the irq */ 765 _dpu_encoder_irq_control(drm_enc, true); 766 767 } else { 768 /* disable all the irq */ 769 _dpu_encoder_irq_control(drm_enc, false); 770 771 /* disable DPU core clks */ 772 pm_runtime_put_sync(&dpu_kms->pdev->dev); 773 } 774 775 } 776 777 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, 778 u32 sw_event) 779 { 780 struct dpu_encoder_virt *dpu_enc; 781 struct msm_drm_private *priv; 782 bool is_vid_mode = false; 783 784 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { 785 DPU_ERROR("invalid parameters\n"); 786 return -EINVAL; 787 } 788 dpu_enc = to_dpu_encoder_virt(drm_enc); 789 priv = drm_enc->dev->dev_private; 790 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode; 791 792 /* 793 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET 794 * events and return early for other events (ie wb display). 795 */ 796 if (!dpu_enc->idle_pc_supported && 797 (sw_event != DPU_ENC_RC_EVENT_KICKOFF && 798 sw_event != DPU_ENC_RC_EVENT_STOP && 799 sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) 800 return 0; 801 802 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, 803 dpu_enc->rc_state, "begin"); 804 805 switch (sw_event) { 806 case DPU_ENC_RC_EVENT_KICKOFF: 807 /* cancel delayed off work, if any */ 808 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 809 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 810 sw_event); 811 812 mutex_lock(&dpu_enc->rc_lock); 813 814 /* return if the resource control is already in ON state */ 815 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 816 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n", 817 DRMID(drm_enc), sw_event); 818 mutex_unlock(&dpu_enc->rc_lock); 819 return 0; 820 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && 821 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { 822 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n", 823 DRMID(drm_enc), sw_event, 824 dpu_enc->rc_state); 825 mutex_unlock(&dpu_enc->rc_lock); 826 return -EINVAL; 827 } 828 829 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) 830 _dpu_encoder_irq_control(drm_enc, true); 831 else 832 _dpu_encoder_resource_control_helper(drm_enc, true); 833 834 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; 835 836 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 837 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 838 "kickoff"); 839 840 mutex_unlock(&dpu_enc->rc_lock); 841 break; 842 843 case DPU_ENC_RC_EVENT_FRAME_DONE: 844 /* 845 * mutex lock is not used as this event happens at interrupt 846 * context. And locking is not required as, the other events 847 * like KICKOFF and STOP does a wait-for-idle before executing 848 * the resource_control 849 */ 850 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 851 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", 852 DRMID(drm_enc), sw_event, 853 dpu_enc->rc_state); 854 return -EINVAL; 855 } 856 857 /* 858 * schedule off work item only when there are no 859 * frames pending 860 */ 861 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { 862 DRM_DEBUG_KMS("id:%d skip schedule work\n", 863 DRMID(drm_enc)); 864 return 0; 865 } 866 867 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, 868 msecs_to_jiffies(dpu_enc->idle_timeout)); 869 870 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 871 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 872 "frame done"); 873 break; 874 875 case DPU_ENC_RC_EVENT_PRE_STOP: 876 /* cancel delayed off work, if any */ 877 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 878 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 879 sw_event); 880 881 mutex_lock(&dpu_enc->rc_lock); 882 883 if (is_vid_mode && 884 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 885 _dpu_encoder_irq_control(drm_enc, true); 886 } 887 /* skip if is already OFF or IDLE, resources are off already */ 888 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || 889 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { 890 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", 891 DRMID(drm_enc), sw_event, 892 dpu_enc->rc_state); 893 mutex_unlock(&dpu_enc->rc_lock); 894 return 0; 895 } 896 897 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; 898 899 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 900 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 901 "pre stop"); 902 903 mutex_unlock(&dpu_enc->rc_lock); 904 break; 905 906 case DPU_ENC_RC_EVENT_STOP: 907 mutex_lock(&dpu_enc->rc_lock); 908 909 /* return if the resource control is already in OFF state */ 910 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { 911 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", 912 DRMID(drm_enc), sw_event); 913 mutex_unlock(&dpu_enc->rc_lock); 914 return 0; 915 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { 916 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", 917 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 918 mutex_unlock(&dpu_enc->rc_lock); 919 return -EINVAL; 920 } 921 922 /** 923 * expect to arrive here only if in either idle state or pre-off 924 * and in IDLE state the resources are already disabled 925 */ 926 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) 927 _dpu_encoder_resource_control_helper(drm_enc, false); 928 929 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; 930 931 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 932 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 933 "stop"); 934 935 mutex_unlock(&dpu_enc->rc_lock); 936 break; 937 938 case DPU_ENC_RC_EVENT_ENTER_IDLE: 939 mutex_lock(&dpu_enc->rc_lock); 940 941 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { 942 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", 943 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 944 mutex_unlock(&dpu_enc->rc_lock); 945 return 0; 946 } 947 948 /* 949 * if we are in ON but a frame was just kicked off, 950 * ignore the IDLE event, it's probably a stale timer event 951 */ 952 if (dpu_enc->frame_busy_mask[0]) { 953 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", 954 DRMID(drm_enc), sw_event, dpu_enc->rc_state); 955 mutex_unlock(&dpu_enc->rc_lock); 956 return 0; 957 } 958 959 if (is_vid_mode) 960 _dpu_encoder_irq_control(drm_enc, false); 961 else 962 _dpu_encoder_resource_control_helper(drm_enc, false); 963 964 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; 965 966 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 967 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 968 "idle"); 969 970 mutex_unlock(&dpu_enc->rc_lock); 971 break; 972 973 default: 974 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), 975 sw_event); 976 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 977 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 978 "error"); 979 break; 980 } 981 982 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 983 dpu_enc->idle_pc_supported, dpu_enc->rc_state, 984 "end"); 985 return 0; 986 } 987 988 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, 989 struct drm_writeback_job *job) 990 { 991 struct dpu_encoder_virt *dpu_enc; 992 int i; 993 994 dpu_enc = to_dpu_encoder_virt(drm_enc); 995 996 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 997 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 998 999 if (phys->ops.prepare_wb_job) 1000 phys->ops.prepare_wb_job(phys, job); 1001 1002 } 1003 } 1004 1005 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, 1006 struct drm_writeback_job *job) 1007 { 1008 struct dpu_encoder_virt *dpu_enc; 1009 int i; 1010 1011 dpu_enc = to_dpu_encoder_virt(drm_enc); 1012 1013 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1014 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1015 1016 if (phys->ops.cleanup_wb_job) 1017 phys->ops.cleanup_wb_job(phys, job); 1018 1019 } 1020 } 1021 1022 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, 1023 struct drm_crtc_state *crtc_state, 1024 struct drm_connector_state *conn_state) 1025 { 1026 struct dpu_encoder_virt *dpu_enc; 1027 struct msm_drm_private *priv; 1028 struct dpu_kms *dpu_kms; 1029 struct dpu_crtc_state *cstate; 1030 struct dpu_global_state *global_state; 1031 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; 1032 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; 1033 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; 1034 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; 1035 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; 1036 int num_lm, num_ctl, num_pp, num_dsc; 1037 unsigned int dsc_mask = 0; 1038 int i; 1039 1040 if (!drm_enc) { 1041 DPU_ERROR("invalid encoder\n"); 1042 return; 1043 } 1044 1045 dpu_enc = to_dpu_encoder_virt(drm_enc); 1046 DPU_DEBUG_ENC(dpu_enc, "\n"); 1047 1048 priv = drm_enc->dev->dev_private; 1049 dpu_kms = to_dpu_kms(priv->kms); 1050 1051 global_state = dpu_kms_get_existing_global_state(dpu_kms); 1052 if (IS_ERR_OR_NULL(global_state)) { 1053 DPU_ERROR("Failed to get global state"); 1054 return; 1055 } 1056 1057 trace_dpu_enc_mode_set(DRMID(drm_enc)); 1058 1059 /* Query resource that have been reserved in atomic check step. */ 1060 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1061 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, 1062 ARRAY_SIZE(hw_pp)); 1063 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1064 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); 1065 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1066 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 1067 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1068 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, 1069 ARRAY_SIZE(hw_dspp)); 1070 1071 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1072 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) 1073 : NULL; 1074 1075 if (dpu_enc->dsc) { 1076 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, 1077 drm_enc->base.id, DPU_HW_BLK_DSC, 1078 hw_dsc, ARRAY_SIZE(hw_dsc)); 1079 for (i = 0; i < num_dsc; i++) { 1080 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); 1081 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); 1082 } 1083 } 1084 1085 dpu_enc->dsc_mask = dsc_mask; 1086 1087 cstate = to_dpu_crtc_state(crtc_state); 1088 1089 for (i = 0; i < num_lm; i++) { 1090 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); 1091 1092 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); 1093 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); 1094 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); 1095 } 1096 1097 cstate->num_mixers = num_lm; 1098 1099 dpu_enc->connector = conn_state->connector; 1100 1101 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1102 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1103 1104 if (!dpu_enc->hw_pp[i]) { 1105 DPU_ERROR_ENC(dpu_enc, 1106 "no pp block assigned at idx: %d\n", i); 1107 return; 1108 } 1109 1110 if (!hw_ctl[i]) { 1111 DPU_ERROR_ENC(dpu_enc, 1112 "no ctl block assigned at idx: %d\n", i); 1113 return; 1114 } 1115 1116 phys->hw_pp = dpu_enc->hw_pp[i]; 1117 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); 1118 1119 phys->cached_mode = crtc_state->adjusted_mode; 1120 if (phys->ops.atomic_mode_set) 1121 phys->ops.atomic_mode_set(phys, crtc_state, conn_state); 1122 } 1123 } 1124 1125 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) 1126 { 1127 struct dpu_encoder_virt *dpu_enc = NULL; 1128 int i; 1129 1130 if (!drm_enc || !drm_enc->dev) { 1131 DPU_ERROR("invalid parameters\n"); 1132 return; 1133 } 1134 1135 dpu_enc = to_dpu_encoder_virt(drm_enc); 1136 if (!dpu_enc || !dpu_enc->cur_master) { 1137 DPU_ERROR("invalid dpu encoder/master\n"); 1138 return; 1139 } 1140 1141 1142 if (dpu_enc->disp_info.intf_type == INTF_DP && 1143 dpu_enc->cur_master->hw_mdptop && 1144 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) 1145 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( 1146 dpu_enc->cur_master->hw_mdptop); 1147 1148 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); 1149 1150 if (dpu_enc->disp_info.intf_type == INTF_DSI && 1151 !WARN_ON(dpu_enc->num_phys_encs == 0)) { 1152 unsigned bpc = dpu_enc->connector->display_info.bpc; 1153 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1154 if (!dpu_enc->hw_pp[i]) 1155 continue; 1156 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc); 1157 } 1158 } 1159 } 1160 1161 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) 1162 { 1163 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1164 1165 mutex_lock(&dpu_enc->enc_lock); 1166 1167 if (!dpu_enc->enabled) 1168 goto out; 1169 1170 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore) 1171 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); 1172 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) 1173 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); 1174 1175 _dpu_encoder_virt_enable_helper(drm_enc); 1176 1177 out: 1178 mutex_unlock(&dpu_enc->enc_lock); 1179 } 1180 1181 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, 1182 struct drm_atomic_state *state) 1183 { 1184 struct dpu_encoder_virt *dpu_enc = NULL; 1185 int ret = 0; 1186 struct drm_display_mode *cur_mode = NULL; 1187 1188 dpu_enc = to_dpu_encoder_virt(drm_enc); 1189 1190 mutex_lock(&dpu_enc->enc_lock); 1191 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; 1192 1193 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, 1194 cur_mode->vdisplay); 1195 1196 /* always enable slave encoder before master */ 1197 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) 1198 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); 1199 1200 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) 1201 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); 1202 1203 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1204 if (ret) { 1205 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", 1206 ret); 1207 goto out; 1208 } 1209 1210 _dpu_encoder_virt_enable_helper(drm_enc); 1211 1212 dpu_enc->enabled = true; 1213 1214 out: 1215 mutex_unlock(&dpu_enc->enc_lock); 1216 } 1217 1218 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, 1219 struct drm_atomic_state *state) 1220 { 1221 struct dpu_encoder_virt *dpu_enc = NULL; 1222 struct drm_crtc *crtc; 1223 struct drm_crtc_state *old_state = NULL; 1224 int i = 0; 1225 1226 dpu_enc = to_dpu_encoder_virt(drm_enc); 1227 DPU_DEBUG_ENC(dpu_enc, "\n"); 1228 1229 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); 1230 if (crtc) 1231 old_state = drm_atomic_get_old_crtc_state(state, crtc); 1232 1233 /* 1234 * The encoder is already disabled if self refresh mode was set earlier, 1235 * in the old_state for the corresponding crtc. 1236 */ 1237 if (old_state && old_state->self_refresh_active) 1238 return; 1239 1240 mutex_lock(&dpu_enc->enc_lock); 1241 dpu_enc->enabled = false; 1242 1243 trace_dpu_enc_disable(DRMID(drm_enc)); 1244 1245 /* wait for idle */ 1246 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); 1247 1248 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); 1249 1250 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1251 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1252 1253 if (phys->ops.disable) 1254 phys->ops.disable(phys); 1255 } 1256 1257 1258 /* after phys waits for frame-done, should be no more frames pending */ 1259 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1260 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1261 del_timer_sync(&dpu_enc->frame_done_timer); 1262 } 1263 1264 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); 1265 1266 dpu_enc->connector = NULL; 1267 1268 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); 1269 1270 mutex_unlock(&dpu_enc->enc_lock); 1271 } 1272 1273 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog, 1274 struct dpu_rm *dpu_rm, 1275 enum dpu_intf_type type, u32 controller_id) 1276 { 1277 int i = 0; 1278 1279 if (type == INTF_WB) 1280 return NULL; 1281 1282 for (i = 0; i < catalog->intf_count; i++) { 1283 if (catalog->intf[i].type == type 1284 && catalog->intf[i].controller_id == controller_id) { 1285 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id); 1286 } 1287 } 1288 1289 return NULL; 1290 } 1291 1292 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, 1293 struct dpu_encoder_phys *phy_enc) 1294 { 1295 struct dpu_encoder_virt *dpu_enc = NULL; 1296 unsigned long lock_flags; 1297 1298 if (!drm_enc || !phy_enc) 1299 return; 1300 1301 DPU_ATRACE_BEGIN("encoder_vblank_callback"); 1302 dpu_enc = to_dpu_encoder_virt(drm_enc); 1303 1304 atomic_inc(&phy_enc->vsync_cnt); 1305 1306 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1307 if (dpu_enc->crtc) 1308 dpu_crtc_vblank_callback(dpu_enc->crtc); 1309 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1310 1311 DPU_ATRACE_END("encoder_vblank_callback"); 1312 } 1313 1314 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, 1315 struct dpu_encoder_phys *phy_enc) 1316 { 1317 if (!phy_enc) 1318 return; 1319 1320 DPU_ATRACE_BEGIN("encoder_underrun_callback"); 1321 atomic_inc(&phy_enc->underrun_cnt); 1322 1323 /* trigger dump only on the first underrun */ 1324 if (atomic_read(&phy_enc->underrun_cnt) == 1) 1325 msm_disp_snapshot_state(drm_enc->dev); 1326 1327 trace_dpu_enc_underrun_cb(DRMID(drm_enc), 1328 atomic_read(&phy_enc->underrun_cnt)); 1329 DPU_ATRACE_END("encoder_underrun_callback"); 1330 } 1331 1332 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) 1333 { 1334 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1335 unsigned long lock_flags; 1336 1337 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1338 /* crtc should always be cleared before re-assigning */ 1339 WARN_ON(crtc && dpu_enc->crtc); 1340 dpu_enc->crtc = crtc; 1341 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1342 } 1343 1344 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, 1345 struct drm_crtc *crtc, bool enable) 1346 { 1347 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1348 unsigned long lock_flags; 1349 int i; 1350 1351 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); 1352 1353 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1354 if (dpu_enc->crtc != crtc) { 1355 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1356 return; 1357 } 1358 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1359 1360 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1361 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1362 1363 if (phys->ops.control_vblank_irq) 1364 phys->ops.control_vblank_irq(phys, enable); 1365 } 1366 } 1367 1368 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, 1369 void (*frame_event_cb)(void *, u32 event), 1370 void *frame_event_cb_data) 1371 { 1372 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1373 unsigned long lock_flags; 1374 bool enable; 1375 1376 enable = frame_event_cb ? true : false; 1377 1378 if (!drm_enc) { 1379 DPU_ERROR("invalid encoder\n"); 1380 return; 1381 } 1382 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable); 1383 1384 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1385 dpu_enc->crtc_frame_event_cb = frame_event_cb; 1386 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data; 1387 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1388 } 1389 1390 void dpu_encoder_frame_done_callback( 1391 struct drm_encoder *drm_enc, 1392 struct dpu_encoder_phys *ready_phys, u32 event) 1393 { 1394 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 1395 unsigned int i; 1396 1397 if (event & (DPU_ENCODER_FRAME_EVENT_DONE 1398 | DPU_ENCODER_FRAME_EVENT_ERROR 1399 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { 1400 1401 if (!dpu_enc->frame_busy_mask[0]) { 1402 /** 1403 * suppress frame_done without waiter, 1404 * likely autorefresh 1405 */ 1406 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event, 1407 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode), 1408 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1, 1409 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1); 1410 return; 1411 } 1412 1413 /* One of the physical encoders has become idle */ 1414 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1415 if (dpu_enc->phys_encs[i] == ready_phys) { 1416 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, 1417 dpu_enc->frame_busy_mask[0]); 1418 clear_bit(i, dpu_enc->frame_busy_mask); 1419 } 1420 } 1421 1422 if (!dpu_enc->frame_busy_mask[0]) { 1423 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1424 del_timer(&dpu_enc->frame_done_timer); 1425 1426 dpu_encoder_resource_control(drm_enc, 1427 DPU_ENC_RC_EVENT_FRAME_DONE); 1428 1429 if (dpu_enc->crtc_frame_event_cb) 1430 dpu_enc->crtc_frame_event_cb( 1431 dpu_enc->crtc_frame_event_cb_data, 1432 event); 1433 } 1434 } else { 1435 if (dpu_enc->crtc_frame_event_cb) 1436 dpu_enc->crtc_frame_event_cb( 1437 dpu_enc->crtc_frame_event_cb_data, event); 1438 } 1439 } 1440 1441 static void dpu_encoder_off_work(struct work_struct *work) 1442 { 1443 struct dpu_encoder_virt *dpu_enc = container_of(work, 1444 struct dpu_encoder_virt, delayed_off_work.work); 1445 1446 dpu_encoder_resource_control(&dpu_enc->base, 1447 DPU_ENC_RC_EVENT_ENTER_IDLE); 1448 1449 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, 1450 DPU_ENCODER_FRAME_EVENT_IDLE); 1451 } 1452 1453 /** 1454 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder 1455 * @drm_enc: Pointer to drm encoder structure 1456 * @phys: Pointer to physical encoder structure 1457 * @extra_flush_bits: Additional bit mask to include in flush trigger 1458 */ 1459 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, 1460 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) 1461 { 1462 struct dpu_hw_ctl *ctl; 1463 int pending_kickoff_cnt; 1464 u32 ret = UINT_MAX; 1465 1466 if (!phys->hw_pp) { 1467 DPU_ERROR("invalid pingpong hw\n"); 1468 return; 1469 } 1470 1471 ctl = phys->hw_ctl; 1472 if (!ctl->ops.trigger_flush) { 1473 DPU_ERROR("missing trigger cb\n"); 1474 return; 1475 } 1476 1477 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); 1478 1479 if (extra_flush_bits && ctl->ops.update_pending_flush) 1480 ctl->ops.update_pending_flush(ctl, extra_flush_bits); 1481 1482 ctl->ops.trigger_flush(ctl); 1483 1484 if (ctl->ops.get_pending_flush) 1485 ret = ctl->ops.get_pending_flush(ctl); 1486 1487 trace_dpu_enc_trigger_flush(DRMID(drm_enc), 1488 dpu_encoder_helper_get_intf_type(phys->intf_mode), 1489 phys->hw_intf ? phys->hw_intf->idx : -1, 1490 phys->hw_wb ? phys->hw_wb->idx : -1, 1491 pending_kickoff_cnt, ctl->idx, 1492 extra_flush_bits, ret); 1493 } 1494 1495 /** 1496 * _dpu_encoder_trigger_start - trigger start for a physical encoder 1497 * @phys: Pointer to physical encoder structure 1498 */ 1499 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) 1500 { 1501 if (!phys) { 1502 DPU_ERROR("invalid argument(s)\n"); 1503 return; 1504 } 1505 1506 if (!phys->hw_pp) { 1507 DPU_ERROR("invalid pingpong hw\n"); 1508 return; 1509 } 1510 1511 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) 1512 phys->ops.trigger_start(phys); 1513 } 1514 1515 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) 1516 { 1517 struct dpu_hw_ctl *ctl; 1518 1519 ctl = phys_enc->hw_ctl; 1520 if (ctl->ops.trigger_start) { 1521 ctl->ops.trigger_start(ctl); 1522 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); 1523 } 1524 } 1525 1526 static int dpu_encoder_helper_wait_event_timeout( 1527 int32_t drm_id, 1528 u32 irq_idx, 1529 struct dpu_encoder_wait_info *info) 1530 { 1531 int rc = 0; 1532 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; 1533 s64 jiffies = msecs_to_jiffies(info->timeout_ms); 1534 s64 time; 1535 1536 do { 1537 rc = wait_event_timeout(*(info->wq), 1538 atomic_read(info->atomic_cnt) == 0, jiffies); 1539 time = ktime_to_ms(ktime_get()); 1540 1541 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time, 1542 expected_time, 1543 atomic_read(info->atomic_cnt)); 1544 /* If we timed out, counter is valid and time is less, wait again */ 1545 } while (atomic_read(info->atomic_cnt) && (rc == 0) && 1546 (time < expected_time)); 1547 1548 return rc; 1549 } 1550 1551 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) 1552 { 1553 struct dpu_encoder_virt *dpu_enc; 1554 struct dpu_hw_ctl *ctl; 1555 int rc; 1556 struct drm_encoder *drm_enc; 1557 1558 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 1559 ctl = phys_enc->hw_ctl; 1560 drm_enc = phys_enc->parent; 1561 1562 if (!ctl->ops.reset) 1563 return; 1564 1565 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc), 1566 ctl->idx); 1567 1568 rc = ctl->ops.reset(ctl); 1569 if (rc) { 1570 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); 1571 msm_disp_snapshot_state(drm_enc->dev); 1572 } 1573 1574 phys_enc->enable_state = DPU_ENC_ENABLED; 1575 } 1576 1577 /** 1578 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff 1579 * Iterate through the physical encoders and perform consolidated flush 1580 * and/or control start triggering as needed. This is done in the virtual 1581 * encoder rather than the individual physical ones in order to handle 1582 * use cases that require visibility into multiple physical encoders at 1583 * a time. 1584 * @dpu_enc: Pointer to virtual encoder structure 1585 */ 1586 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) 1587 { 1588 struct dpu_hw_ctl *ctl; 1589 uint32_t i, pending_flush; 1590 unsigned long lock_flags; 1591 1592 pending_flush = 0x0; 1593 1594 /* update pending counts and trigger kickoff ctl flush atomically */ 1595 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); 1596 1597 /* don't perform flush/start operations for slave encoders */ 1598 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1599 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 1600 1601 if (phys->enable_state == DPU_ENC_DISABLED) 1602 continue; 1603 1604 ctl = phys->hw_ctl; 1605 1606 /* 1607 * This is cleared in frame_done worker, which isn't invoked 1608 * for async commits. So don't set this for async, since it'll 1609 * roll over to the next commit. 1610 */ 1611 if (phys->split_role != ENC_ROLE_SLAVE) 1612 set_bit(i, dpu_enc->frame_busy_mask); 1613 1614 if (!phys->ops.needs_single_flush || 1615 !phys->ops.needs_single_flush(phys)) 1616 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); 1617 else if (ctl->ops.get_pending_flush) 1618 pending_flush |= ctl->ops.get_pending_flush(ctl); 1619 } 1620 1621 /* for split flush, combine pending flush masks and send to master */ 1622 if (pending_flush && dpu_enc->cur_master) { 1623 _dpu_encoder_trigger_flush( 1624 &dpu_enc->base, 1625 dpu_enc->cur_master, 1626 pending_flush); 1627 } 1628 1629 _dpu_encoder_trigger_start(dpu_enc->cur_master); 1630 1631 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); 1632 } 1633 1634 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) 1635 { 1636 struct dpu_encoder_virt *dpu_enc; 1637 struct dpu_encoder_phys *phys; 1638 unsigned int i; 1639 struct dpu_hw_ctl *ctl; 1640 struct msm_display_info *disp_info; 1641 1642 if (!drm_enc) { 1643 DPU_ERROR("invalid encoder\n"); 1644 return; 1645 } 1646 dpu_enc = to_dpu_encoder_virt(drm_enc); 1647 disp_info = &dpu_enc->disp_info; 1648 1649 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1650 phys = dpu_enc->phys_encs[i]; 1651 1652 ctl = phys->hw_ctl; 1653 if (ctl->ops.clear_pending_flush) 1654 ctl->ops.clear_pending_flush(ctl); 1655 1656 /* update only for command mode primary ctl */ 1657 if ((phys == dpu_enc->cur_master) && 1658 disp_info->is_cmd_mode 1659 && ctl->ops.trigger_pending) 1660 ctl->ops.trigger_pending(ctl); 1661 } 1662 } 1663 1664 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, 1665 struct drm_display_mode *mode) 1666 { 1667 u64 pclk_rate; 1668 u32 pclk_period; 1669 u32 line_time; 1670 1671 /* 1672 * For linetime calculation, only operate on master encoder. 1673 */ 1674 if (!dpu_enc->cur_master) 1675 return 0; 1676 1677 if (!dpu_enc->cur_master->ops.get_line_count) { 1678 DPU_ERROR("get_line_count function not defined\n"); 1679 return 0; 1680 } 1681 1682 pclk_rate = mode->clock; /* pixel clock in kHz */ 1683 if (pclk_rate == 0) { 1684 DPU_ERROR("pclk is 0, cannot calculate line time\n"); 1685 return 0; 1686 } 1687 1688 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); 1689 if (pclk_period == 0) { 1690 DPU_ERROR("pclk period is 0\n"); 1691 return 0; 1692 } 1693 1694 /* 1695 * Line time calculation based on Pixel clock and HTOTAL. 1696 * Final unit is in ns. 1697 */ 1698 line_time = (pclk_period * mode->htotal) / 1000; 1699 if (line_time == 0) { 1700 DPU_ERROR("line time calculation is 0\n"); 1701 return 0; 1702 } 1703 1704 DPU_DEBUG_ENC(dpu_enc, 1705 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", 1706 pclk_rate, pclk_period, line_time); 1707 1708 return line_time; 1709 } 1710 1711 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) 1712 { 1713 struct drm_display_mode *mode; 1714 struct dpu_encoder_virt *dpu_enc; 1715 u32 cur_line; 1716 u32 line_time; 1717 u32 vtotal, time_to_vsync; 1718 ktime_t cur_time; 1719 1720 dpu_enc = to_dpu_encoder_virt(drm_enc); 1721 1722 if (!drm_enc->crtc || !drm_enc->crtc->state) { 1723 DPU_ERROR("crtc/crtc state object is NULL\n"); 1724 return -EINVAL; 1725 } 1726 mode = &drm_enc->crtc->state->adjusted_mode; 1727 1728 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); 1729 if (!line_time) 1730 return -EINVAL; 1731 1732 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); 1733 1734 vtotal = mode->vtotal; 1735 if (cur_line >= vtotal) 1736 time_to_vsync = line_time * vtotal; 1737 else 1738 time_to_vsync = line_time * (vtotal - cur_line); 1739 1740 if (time_to_vsync == 0) { 1741 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", 1742 vtotal); 1743 return -EINVAL; 1744 } 1745 1746 cur_time = ktime_get(); 1747 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); 1748 1749 DPU_DEBUG_ENC(dpu_enc, 1750 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", 1751 cur_line, vtotal, time_to_vsync, 1752 ktime_to_ms(cur_time), 1753 ktime_to_ms(*wakeup_time)); 1754 return 0; 1755 } 1756 1757 static void dpu_encoder_vsync_event_handler(struct timer_list *t) 1758 { 1759 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, 1760 vsync_event_timer); 1761 struct drm_encoder *drm_enc = &dpu_enc->base; 1762 struct msm_drm_private *priv; 1763 struct msm_drm_thread *event_thread; 1764 1765 if (!drm_enc->dev || !drm_enc->crtc) { 1766 DPU_ERROR("invalid parameters\n"); 1767 return; 1768 } 1769 1770 priv = drm_enc->dev->dev_private; 1771 1772 if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) { 1773 DPU_ERROR("invalid crtc index\n"); 1774 return; 1775 } 1776 event_thread = &priv->event_thread[drm_enc->crtc->index]; 1777 if (!event_thread) { 1778 DPU_ERROR("event_thread not found for crtc:%d\n", 1779 drm_enc->crtc->index); 1780 return; 1781 } 1782 1783 del_timer(&dpu_enc->vsync_event_timer); 1784 } 1785 1786 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) 1787 { 1788 struct dpu_encoder_virt *dpu_enc = container_of(work, 1789 struct dpu_encoder_virt, vsync_event_work); 1790 ktime_t wakeup_time; 1791 1792 if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time)) 1793 return; 1794 1795 trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time); 1796 mod_timer(&dpu_enc->vsync_event_timer, 1797 nsecs_to_jiffies(ktime_to_ns(wakeup_time))); 1798 } 1799 1800 static u32 1801 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, 1802 u32 enc_ip_width) 1803 { 1804 int ssm_delay, total_pixels, soft_slice_per_enc; 1805 1806 soft_slice_per_enc = enc_ip_width / dsc->slice_width; 1807 1808 /* 1809 * minimum number of initial line pixels is a sum of: 1810 * 1. sub-stream multiplexer delay (83 groups for 8bpc, 1811 * 91 for 10 bpc) * 3 1812 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 1813 * 3. the initial xmit delay 1814 * 4. total pipeline delay through the "lock step" of encoder (47) 1815 * 5. 6 additional pixels as the output of the rate buffer is 1816 * 48 bits wide 1817 */ 1818 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); 1819 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; 1820 if (soft_slice_per_enc > 1) 1821 total_pixels += (ssm_delay * 3); 1822 return DIV_ROUND_UP(total_pixels, dsc->slice_width); 1823 } 1824 1825 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, 1826 struct dpu_hw_dsc *hw_dsc, 1827 struct dpu_hw_pingpong *hw_pp, 1828 struct drm_dsc_config *dsc, 1829 u32 common_mode, 1830 u32 initial_lines) 1831 { 1832 if (hw_dsc->ops.dsc_config) 1833 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines); 1834 1835 if (hw_dsc->ops.dsc_config_thresh) 1836 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc); 1837 1838 if (hw_pp->ops.setup_dsc) 1839 hw_pp->ops.setup_dsc(hw_pp); 1840 1841 if (hw_dsc->ops.dsc_bind_pingpong_blk) 1842 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx); 1843 1844 if (hw_pp->ops.enable_dsc) 1845 hw_pp->ops.enable_dsc(hw_pp); 1846 1847 if (ctl->ops.update_pending_flush_dsc) 1848 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 1849 } 1850 1851 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, 1852 struct drm_dsc_config *dsc) 1853 { 1854 /* coding only for 2LM, 2enc, 1 dsc config */ 1855 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 1856 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 1857 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 1858 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 1859 int this_frame_slices; 1860 int intf_ip_w, enc_ip_w; 1861 int dsc_common_mode; 1862 int pic_width; 1863 u32 initial_lines; 1864 int i; 1865 1866 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 1867 hw_pp[i] = dpu_enc->hw_pp[i]; 1868 hw_dsc[i] = dpu_enc->hw_dsc[i]; 1869 1870 if (!hw_pp[i] || !hw_dsc[i]) { 1871 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); 1872 return; 1873 } 1874 } 1875 1876 dsc_common_mode = 0; 1877 pic_width = dsc->pic_width; 1878 1879 dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL; 1880 if (enc_master->intf_mode == INTF_MODE_VIDEO) 1881 dsc_common_mode |= DSC_MODE_VIDEO; 1882 1883 this_frame_slices = pic_width / dsc->slice_width; 1884 intf_ip_w = this_frame_slices * dsc->slice_width; 1885 1886 /* 1887 * dsc merge case: when using 2 encoders for the same stream, 1888 * no. of slices need to be same on both the encoders. 1889 */ 1890 enc_ip_w = intf_ip_w / 2; 1891 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); 1892 1893 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) 1894 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], 1895 dsc, dsc_common_mode, initial_lines); 1896 } 1897 1898 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) 1899 { 1900 struct dpu_encoder_virt *dpu_enc; 1901 struct dpu_encoder_phys *phys; 1902 bool needs_hw_reset = false; 1903 unsigned int i; 1904 1905 dpu_enc = to_dpu_encoder_virt(drm_enc); 1906 1907 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); 1908 1909 /* prepare for next kickoff, may include waiting on previous kickoff */ 1910 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); 1911 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1912 phys = dpu_enc->phys_encs[i]; 1913 if (phys->ops.prepare_for_kickoff) 1914 phys->ops.prepare_for_kickoff(phys); 1915 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 1916 needs_hw_reset = true; 1917 } 1918 DPU_ATRACE_END("enc_prepare_for_kickoff"); 1919 1920 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); 1921 1922 /* if any phys needs reset, reset all phys, in-order */ 1923 if (needs_hw_reset) { 1924 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); 1925 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1926 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]); 1927 } 1928 } 1929 1930 if (dpu_enc->dsc) 1931 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); 1932 } 1933 1934 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) 1935 { 1936 struct dpu_encoder_virt *dpu_enc; 1937 unsigned int i; 1938 struct dpu_encoder_phys *phys; 1939 1940 dpu_enc = to_dpu_encoder_virt(drm_enc); 1941 1942 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { 1943 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1944 phys = dpu_enc->phys_encs[i]; 1945 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) { 1946 DPU_DEBUG("invalid FB not kicking off\n"); 1947 return false; 1948 } 1949 } 1950 } 1951 1952 return true; 1953 } 1954 1955 void dpu_encoder_kickoff(struct drm_encoder *drm_enc) 1956 { 1957 struct dpu_encoder_virt *dpu_enc; 1958 struct dpu_encoder_phys *phys; 1959 ktime_t wakeup_time; 1960 unsigned long timeout_ms; 1961 unsigned int i; 1962 1963 DPU_ATRACE_BEGIN("encoder_kickoff"); 1964 dpu_enc = to_dpu_encoder_virt(drm_enc); 1965 1966 trace_dpu_enc_kickoff(DRMID(drm_enc)); 1967 1968 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / 1969 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); 1970 1971 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); 1972 mod_timer(&dpu_enc->frame_done_timer, 1973 jiffies + msecs_to_jiffies(timeout_ms)); 1974 1975 /* All phys encs are ready to go, trigger the kickoff */ 1976 _dpu_encoder_kickoff_phys(dpu_enc); 1977 1978 /* allow phys encs to handle any post-kickoff business */ 1979 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 1980 phys = dpu_enc->phys_encs[i]; 1981 if (phys->ops.handle_post_kickoff) 1982 phys->ops.handle_post_kickoff(phys); 1983 } 1984 1985 if (dpu_enc->disp_info.intf_type == INTF_DSI && 1986 !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) { 1987 trace_dpu_enc_early_kickoff(DRMID(drm_enc), 1988 ktime_to_ms(wakeup_time)); 1989 mod_timer(&dpu_enc->vsync_event_timer, 1990 nsecs_to_jiffies(ktime_to_ns(wakeup_time))); 1991 } 1992 1993 DPU_ATRACE_END("encoder_kickoff"); 1994 } 1995 1996 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) 1997 { 1998 struct dpu_hw_mixer_cfg mixer; 1999 int i, num_lm; 2000 struct dpu_global_state *global_state; 2001 struct dpu_hw_blk *hw_lm[2]; 2002 struct dpu_hw_mixer *hw_mixer[2]; 2003 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2004 2005 memset(&mixer, 0, sizeof(mixer)); 2006 2007 /* reset all mixers for this encoder */ 2008 if (phys_enc->hw_ctl->ops.clear_all_blendstages) 2009 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl); 2010 2011 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); 2012 2013 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, 2014 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); 2015 2016 for (i = 0; i < num_lm; i++) { 2017 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); 2018 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) 2019 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); 2020 2021 /* clear all blendstages */ 2022 if (phys_enc->hw_ctl->ops.setup_blendstage) 2023 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); 2024 } 2025 } 2026 2027 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl, 2028 struct dpu_hw_dsc *hw_dsc, 2029 struct dpu_hw_pingpong *hw_pp) 2030 { 2031 if (hw_dsc->ops.dsc_disable) 2032 hw_dsc->ops.dsc_disable(hw_dsc); 2033 2034 if (hw_pp->ops.disable_dsc) 2035 hw_pp->ops.disable_dsc(hw_pp); 2036 2037 if (hw_dsc->ops.dsc_bind_pingpong_blk) 2038 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE); 2039 2040 if (ctl->ops.update_pending_flush_dsc) 2041 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx); 2042 } 2043 2044 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) 2045 { 2046 /* coding only for 2LM, 2enc, 1 dsc config */ 2047 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; 2048 struct dpu_hw_ctl *ctl = enc_master->hw_ctl; 2049 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; 2050 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; 2051 int i; 2052 2053 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { 2054 hw_pp[i] = dpu_enc->hw_pp[i]; 2055 hw_dsc[i] = dpu_enc->hw_dsc[i]; 2056 2057 if (hw_pp[i] && hw_dsc[i]) 2058 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]); 2059 } 2060 } 2061 2062 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) 2063 { 2064 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2065 struct dpu_hw_intf_cfg intf_cfg = { 0 }; 2066 int i; 2067 struct dpu_encoder_virt *dpu_enc; 2068 2069 dpu_enc = to_dpu_encoder_virt(phys_enc->parent); 2070 2071 phys_enc->hw_ctl->ops.reset(ctl); 2072 2073 dpu_encoder_helper_reset_mixers(phys_enc); 2074 2075 /* 2076 * TODO: move the once-only operation like CTL flush/trigger 2077 * into dpu_encoder_virt_disable() and all operations which need 2078 * to be done per phys encoder into the phys_disable() op. 2079 */ 2080 if (phys_enc->hw_wb) { 2081 /* disable the PP block */ 2082 if (phys_enc->hw_wb->ops.bind_pingpong_blk) 2083 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE); 2084 2085 /* mark WB flush as pending */ 2086 if (phys_enc->hw_ctl->ops.update_pending_flush_wb) 2087 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); 2088 } else { 2089 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2090 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) 2091 phys_enc->hw_intf->ops.bind_pingpong_blk( 2092 dpu_enc->phys_encs[i]->hw_intf, 2093 PINGPONG_NONE); 2094 2095 /* mark INTF flush as pending */ 2096 if (phys_enc->hw_ctl->ops.update_pending_flush_intf) 2097 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl, 2098 dpu_enc->phys_encs[i]->hw_intf->idx); 2099 } 2100 } 2101 2102 /* reset the merge 3D HW block */ 2103 if (phys_enc->hw_pp->merge_3d) { 2104 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 2105 BLEND_3D_NONE); 2106 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) 2107 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl, 2108 phys_enc->hw_pp->merge_3d->idx); 2109 } 2110 2111 if (dpu_enc->dsc) 2112 dpu_encoder_unprep_dsc(dpu_enc); 2113 2114 intf_cfg.stream_sel = 0; /* Don't care value for video mode */ 2115 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); 2116 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); 2117 2118 if (phys_enc->hw_intf) 2119 intf_cfg.intf = phys_enc->hw_intf->idx; 2120 if (phys_enc->hw_wb) 2121 intf_cfg.wb = phys_enc->hw_wb->idx; 2122 2123 if (phys_enc->hw_pp->merge_3d) 2124 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; 2125 2126 if (ctl->ops.reset_intf_cfg) 2127 ctl->ops.reset_intf_cfg(ctl, &intf_cfg); 2128 2129 ctl->ops.trigger_flush(ctl); 2130 ctl->ops.trigger_start(ctl); 2131 ctl->ops.clear_pending_flush(ctl); 2132 } 2133 2134 #ifdef CONFIG_DEBUG_FS 2135 static int _dpu_encoder_status_show(struct seq_file *s, void *data) 2136 { 2137 struct dpu_encoder_virt *dpu_enc = s->private; 2138 int i; 2139 2140 mutex_lock(&dpu_enc->enc_lock); 2141 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2142 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2143 2144 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ", 2145 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1, 2146 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1, 2147 atomic_read(&phys->vsync_cnt), 2148 atomic_read(&phys->underrun_cnt)); 2149 2150 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); 2151 } 2152 mutex_unlock(&dpu_enc->enc_lock); 2153 2154 return 0; 2155 } 2156 2157 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); 2158 2159 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2160 { 2161 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 2162 2163 char name[12]; 2164 2165 if (!drm_enc->dev) { 2166 DPU_ERROR("invalid encoder or kms\n"); 2167 return -EINVAL; 2168 } 2169 2170 snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id); 2171 2172 /* create overall sub-directory for the encoder */ 2173 dpu_enc->debugfs_root = debugfs_create_dir(name, 2174 drm_enc->dev->primary->debugfs_root); 2175 2176 /* don't error check these */ 2177 debugfs_create_file("status", 0600, 2178 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); 2179 2180 return 0; 2181 } 2182 #else 2183 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) 2184 { 2185 return 0; 2186 } 2187 #endif 2188 2189 static int dpu_encoder_late_register(struct drm_encoder *encoder) 2190 { 2191 return _dpu_encoder_init_debugfs(encoder); 2192 } 2193 2194 static void dpu_encoder_early_unregister(struct drm_encoder *encoder) 2195 { 2196 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2197 2198 debugfs_remove_recursive(dpu_enc->debugfs_root); 2199 } 2200 2201 static int dpu_encoder_virt_add_phys_encs( 2202 struct msm_display_info *disp_info, 2203 struct dpu_encoder_virt *dpu_enc, 2204 struct dpu_enc_phys_init_params *params) 2205 { 2206 struct dpu_encoder_phys *enc = NULL; 2207 2208 DPU_DEBUG_ENC(dpu_enc, "\n"); 2209 2210 /* 2211 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types 2212 * in this function, check up-front. 2213 */ 2214 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= 2215 ARRAY_SIZE(dpu_enc->phys_encs)) { 2216 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", 2217 dpu_enc->num_phys_encs); 2218 return -EINVAL; 2219 } 2220 2221 2222 if (disp_info->intf_type == INTF_WB) { 2223 enc = dpu_encoder_phys_wb_init(params); 2224 2225 if (IS_ERR(enc)) { 2226 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", 2227 PTR_ERR(enc)); 2228 return PTR_ERR(enc); 2229 } 2230 2231 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2232 ++dpu_enc->num_phys_encs; 2233 } else if (disp_info->is_cmd_mode) { 2234 enc = dpu_encoder_phys_cmd_init(params); 2235 2236 if (IS_ERR(enc)) { 2237 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", 2238 PTR_ERR(enc)); 2239 return PTR_ERR(enc); 2240 } 2241 2242 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2243 ++dpu_enc->num_phys_encs; 2244 } else { 2245 enc = dpu_encoder_phys_vid_init(params); 2246 2247 if (IS_ERR(enc)) { 2248 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", 2249 PTR_ERR(enc)); 2250 return PTR_ERR(enc); 2251 } 2252 2253 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; 2254 ++dpu_enc->num_phys_encs; 2255 } 2256 2257 if (params->split_role == ENC_ROLE_SLAVE) 2258 dpu_enc->cur_slave = enc; 2259 else 2260 dpu_enc->cur_master = enc; 2261 2262 return 0; 2263 } 2264 2265 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, 2266 struct dpu_kms *dpu_kms, 2267 struct msm_display_info *disp_info) 2268 { 2269 int ret = 0; 2270 int i = 0; 2271 struct dpu_enc_phys_init_params phys_params; 2272 2273 if (!dpu_enc) { 2274 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); 2275 return -EINVAL; 2276 } 2277 2278 dpu_enc->cur_master = NULL; 2279 2280 memset(&phys_params, 0, sizeof(phys_params)); 2281 phys_params.dpu_kms = dpu_kms; 2282 phys_params.parent = &dpu_enc->base; 2283 phys_params.enc_spinlock = &dpu_enc->enc_spinlock; 2284 2285 WARN_ON(disp_info->num_of_h_tiles < 1); 2286 2287 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); 2288 2289 if (disp_info->intf_type != INTF_WB) 2290 dpu_enc->idle_pc_supported = 2291 dpu_kms->catalog->caps->has_idle_pc; 2292 2293 dpu_enc->dsc = disp_info->dsc; 2294 2295 mutex_lock(&dpu_enc->enc_lock); 2296 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { 2297 /* 2298 * Left-most tile is at index 0, content is controller id 2299 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right 2300 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right 2301 */ 2302 u32 controller_id = disp_info->h_tile_instance[i]; 2303 2304 if (disp_info->num_of_h_tiles > 1) { 2305 if (i == 0) 2306 phys_params.split_role = ENC_ROLE_MASTER; 2307 else 2308 phys_params.split_role = ENC_ROLE_SLAVE; 2309 } else { 2310 phys_params.split_role = ENC_ROLE_SOLO; 2311 } 2312 2313 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", 2314 i, controller_id, phys_params.split_role); 2315 2316 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm, 2317 disp_info->intf_type, 2318 controller_id); 2319 2320 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX) 2321 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id); 2322 2323 if (!phys_params.hw_intf && !phys_params.hw_wb) { 2324 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i); 2325 ret = -EINVAL; 2326 break; 2327 } 2328 2329 if (phys_params.hw_intf && phys_params.hw_wb) { 2330 DPU_ERROR_ENC(dpu_enc, 2331 "invalid phys both intf and wb block at idx: %d\n", i); 2332 ret = -EINVAL; 2333 break; 2334 } 2335 2336 ret = dpu_encoder_virt_add_phys_encs(disp_info, 2337 dpu_enc, &phys_params); 2338 if (ret) { 2339 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); 2340 break; 2341 } 2342 } 2343 2344 mutex_unlock(&dpu_enc->enc_lock); 2345 2346 return ret; 2347 } 2348 2349 static void dpu_encoder_frame_done_timeout(struct timer_list *t) 2350 { 2351 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, 2352 frame_done_timer); 2353 struct drm_encoder *drm_enc = &dpu_enc->base; 2354 u32 event; 2355 2356 if (!drm_enc->dev) { 2357 DPU_ERROR("invalid parameters\n"); 2358 return; 2359 } 2360 2361 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { 2362 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", 2363 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); 2364 return; 2365 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 2366 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); 2367 return; 2368 } 2369 2370 DPU_ERROR_ENC(dpu_enc, "frame done timeout\n"); 2371 2372 event = DPU_ENCODER_FRAME_EVENT_ERROR; 2373 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); 2374 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event); 2375 } 2376 2377 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { 2378 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, 2379 .atomic_disable = dpu_encoder_virt_atomic_disable, 2380 .atomic_enable = dpu_encoder_virt_atomic_enable, 2381 .atomic_check = dpu_encoder_virt_atomic_check, 2382 }; 2383 2384 static const struct drm_encoder_funcs dpu_encoder_funcs = { 2385 .destroy = dpu_encoder_destroy, 2386 .late_register = dpu_encoder_late_register, 2387 .early_unregister = dpu_encoder_early_unregister, 2388 }; 2389 2390 struct drm_encoder *dpu_encoder_init(struct drm_device *dev, 2391 int drm_enc_mode, 2392 struct msm_display_info *disp_info) 2393 { 2394 struct msm_drm_private *priv = dev->dev_private; 2395 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 2396 struct drm_encoder *drm_enc = NULL; 2397 struct dpu_encoder_virt *dpu_enc = NULL; 2398 int ret = 0; 2399 2400 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); 2401 if (!dpu_enc) 2402 return ERR_PTR(-ENOMEM); 2403 2404 ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, 2405 drm_enc_mode, NULL); 2406 if (ret) { 2407 devm_kfree(dev->dev, dpu_enc); 2408 return ERR_PTR(ret); 2409 } 2410 2411 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); 2412 2413 spin_lock_init(&dpu_enc->enc_spinlock); 2414 dpu_enc->enabled = false; 2415 mutex_init(&dpu_enc->enc_lock); 2416 mutex_init(&dpu_enc->rc_lock); 2417 2418 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); 2419 if (ret) 2420 goto fail; 2421 2422 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 2423 timer_setup(&dpu_enc->frame_done_timer, 2424 dpu_encoder_frame_done_timeout, 0); 2425 2426 if (disp_info->intf_type == INTF_DSI) 2427 timer_setup(&dpu_enc->vsync_event_timer, 2428 dpu_encoder_vsync_event_handler, 2429 0); 2430 else if (disp_info->intf_type == INTF_DP) 2431 dpu_enc->wide_bus_en = msm_dp_wide_bus_available( 2432 priv->dp[disp_info->h_tile_instance[0]]); 2433 2434 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2435 dpu_encoder_off_work); 2436 dpu_enc->idle_timeout = IDLE_TIMEOUT; 2437 2438 kthread_init_work(&dpu_enc->vsync_event_work, 2439 dpu_encoder_vsync_event_work_handler); 2440 2441 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); 2442 2443 DPU_DEBUG_ENC(dpu_enc, "created\n"); 2444 2445 return &dpu_enc->base; 2446 2447 fail: 2448 DPU_ERROR("failed to create encoder\n"); 2449 if (drm_enc) 2450 dpu_encoder_destroy(drm_enc); 2451 2452 return ERR_PTR(ret); 2453 } 2454 2455 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, 2456 enum msm_event_wait event) 2457 { 2458 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL; 2459 struct dpu_encoder_virt *dpu_enc = NULL; 2460 int i, ret = 0; 2461 2462 if (!drm_enc) { 2463 DPU_ERROR("invalid encoder\n"); 2464 return -EINVAL; 2465 } 2466 dpu_enc = to_dpu_encoder_virt(drm_enc); 2467 DPU_DEBUG_ENC(dpu_enc, "\n"); 2468 2469 for (i = 0; i < dpu_enc->num_phys_encs; i++) { 2470 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; 2471 2472 switch (event) { 2473 case MSM_ENC_COMMIT_DONE: 2474 fn_wait = phys->ops.wait_for_commit_done; 2475 break; 2476 case MSM_ENC_TX_COMPLETE: 2477 fn_wait = phys->ops.wait_for_tx_complete; 2478 break; 2479 case MSM_ENC_VBLANK: 2480 fn_wait = phys->ops.wait_for_vblank; 2481 break; 2482 default: 2483 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", 2484 event); 2485 return -EINVAL; 2486 } 2487 2488 if (fn_wait) { 2489 DPU_ATRACE_BEGIN("wait_for_completion_event"); 2490 ret = fn_wait(phys); 2491 DPU_ATRACE_END("wait_for_completion_event"); 2492 if (ret) 2493 return ret; 2494 } 2495 } 2496 2497 return ret; 2498 } 2499 2500 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) 2501 { 2502 struct dpu_encoder_virt *dpu_enc = NULL; 2503 2504 if (!encoder) { 2505 DPU_ERROR("invalid encoder\n"); 2506 return INTF_MODE_NONE; 2507 } 2508 dpu_enc = to_dpu_encoder_virt(encoder); 2509 2510 if (dpu_enc->cur_master) 2511 return dpu_enc->cur_master->intf_mode; 2512 2513 if (dpu_enc->num_phys_encs) 2514 return dpu_enc->phys_encs[0]->intf_mode; 2515 2516 return INTF_MODE_NONE; 2517 } 2518 2519 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) 2520 { 2521 struct drm_encoder *encoder = phys_enc->parent; 2522 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); 2523 2524 return dpu_enc->dsc_mask; 2525 } 2526 2527 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc, 2528 struct dpu_enc_phys_init_params *p) 2529 { 2530 int i; 2531 2532 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; 2533 phys_enc->hw_intf = p->hw_intf; 2534 phys_enc->hw_wb = p->hw_wb; 2535 phys_enc->parent = p->parent; 2536 phys_enc->dpu_kms = p->dpu_kms; 2537 phys_enc->split_role = p->split_role; 2538 phys_enc->enc_spinlock = p->enc_spinlock; 2539 phys_enc->enable_state = DPU_ENC_DISABLED; 2540 2541 for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++) 2542 phys_enc->irq[i] = -EINVAL; 2543 2544 atomic_set(&phys_enc->vblank_refcount, 0); 2545 atomic_set(&phys_enc->pending_kickoff_cnt, 0); 2546 atomic_set(&phys_enc->pending_ctlstart_cnt, 0); 2547 2548 atomic_set(&phys_enc->vsync_cnt, 0); 2549 atomic_set(&phys_enc->underrun_cnt, 0); 2550 2551 init_waitqueue_head(&phys_enc->pending_kickoff_wq); 2552 } 2553