1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Author: Rob Clark <robdclark@gmail.com>
8 */
9
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_file.h>
18 #include <drm/drm_probe_helper.h>
19
20 #include "msm_drv.h"
21 #include "dpu_kms.h"
22 #include "dpu_hwio.h"
23 #include "dpu_hw_catalog.h"
24 #include "dpu_hw_intf.h"
25 #include "dpu_hw_ctl.h"
26 #include "dpu_hw_dspp.h"
27 #include "dpu_hw_dsc.h"
28 #include "dpu_hw_merge3d.h"
29 #include "dpu_formats.h"
30 #include "dpu_encoder_phys.h"
31 #include "dpu_crtc.h"
32 #include "dpu_trace.h"
33 #include "dpu_core_irq.h"
34 #include "disp/msm_disp_snapshot.h"
35
36 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
37 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
38
39 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
40 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
41
42 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
43 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
44
45 /*
46 * Two to anticipate panels that can do cmd/vid dynamic switching
47 * plan is to create all possible physical encoder types, and switch between
48 * them at runtime
49 */
50 #define NUM_PHYS_ENCODER_TYPES 2
51
52 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
53 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
54
55 #define MAX_CHANNELS_PER_ENC 2
56
57 #define IDLE_SHORT_TIMEOUT 1
58
59 #define MAX_HDISPLAY_SPLIT 1080
60
61 /* timeout in frames waiting for frame done */
62 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
63
64 /**
65 * enum dpu_enc_rc_events - events for resource control state machine
66 * @DPU_ENC_RC_EVENT_KICKOFF:
67 * This event happens at NORMAL priority.
68 * Event that signals the start of the transfer. When this event is
69 * received, enable MDP/DSI core clocks. Regardless of the previous
70 * state, the resource should be in ON state at the end of this event.
71 * @DPU_ENC_RC_EVENT_FRAME_DONE:
72 * This event happens at INTERRUPT level.
73 * Event signals the end of the data transfer after the PP FRAME_DONE
74 * event. At the end of this event, a delayed work is scheduled to go to
75 * IDLE_PC state after IDLE_TIMEOUT time.
76 * @DPU_ENC_RC_EVENT_PRE_STOP:
77 * This event happens at NORMAL priority.
78 * This event, when received during the ON state, leave the RC STATE
79 * in the PRE_OFF state. It should be followed by the STOP event as
80 * part of encoder disable.
81 * If received during IDLE or OFF states, it will do nothing.
82 * @DPU_ENC_RC_EVENT_STOP:
83 * This event happens at NORMAL priority.
84 * When this event is received, disable all the MDP/DSI core clocks, and
85 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
86 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
87 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
88 * Resource state should be in OFF at the end of the event.
89 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
90 * This event happens at NORMAL priority from a work item.
91 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
92 * This would disable MDP/DSI core clocks and change the resource state
93 * to IDLE.
94 */
95 enum dpu_enc_rc_events {
96 DPU_ENC_RC_EVENT_KICKOFF = 1,
97 DPU_ENC_RC_EVENT_FRAME_DONE,
98 DPU_ENC_RC_EVENT_PRE_STOP,
99 DPU_ENC_RC_EVENT_STOP,
100 DPU_ENC_RC_EVENT_ENTER_IDLE
101 };
102
103 /*
104 * enum dpu_enc_rc_states - states that the resource control maintains
105 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
106 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
107 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
108 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
109 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
110 */
111 enum dpu_enc_rc_states {
112 DPU_ENC_RC_STATE_OFF,
113 DPU_ENC_RC_STATE_PRE_OFF,
114 DPU_ENC_RC_STATE_ON,
115 DPU_ENC_RC_STATE_IDLE
116 };
117
118 /**
119 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
120 * encoders. Virtual encoder manages one "logical" display. Physical
121 * encoders manage one intf block, tied to a specific panel/sub-panel.
122 * Virtual encoder defers as much as possible to the physical encoders.
123 * Virtual encoder registers itself with the DRM Framework as the encoder.
124 * @base: drm_encoder base class for registration with DRM
125 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
126 * @enabled: True if the encoder is active, protected by enc_lock
127 * @commit_done_timedout: True if there has been a timeout on commit after
128 * enabling the encoder.
129 * @num_phys_encs: Actual number of physical encoders contained.
130 * @phys_encs: Container of physical encoders managed.
131 * @cur_master: Pointer to the current master in this mode. Optimization
132 * Only valid after enable. Cleared as disable.
133 * @cur_slave: As above but for the slave encoder.
134 * @hw_pp: Handle to the pingpong blocks used for the display. No.
135 * pingpong blocks can be different than num_phys_encs.
136 * @hw_dsc: Handle to the DSC blocks used for the display.
137 * @dsc_mask: Bitmask of used DSC blocks.
138 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
139 * for partial update right-only cases, such as pingpong
140 * split where virtual pingpong does not generate IRQs
141 * @crtc: Pointer to the currently assigned crtc. Normally you
142 * would use crtc->state->encoder_mask to determine the
143 * link between encoder/crtc. However in this case we need
144 * to track crtc in the disable() hook which is called
145 * _after_ encoder_mask is cleared.
146 * @connector: If a mode is set, cached pointer to the active connector
147 * @crtc_kickoff_cb: Callback into CRTC that will flush & start
148 * all CTL paths
149 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
150 * @debugfs_root: Debug file system root file node
151 * @enc_lock: Lock around physical encoder
152 * create/destroy/enable/disable
153 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
154 * busy processing current command.
155 * Bit0 = phys_encs[0] etc.
156 * @crtc_frame_event_cb: callback handler for frame event
157 * @crtc_frame_event_cb_data: callback handler private data
158 * @frame_done_timeout_ms: frame done timeout in ms
159 * @frame_done_timer: watchdog timer for frame done event
160 * @disp_info: local copy of msm_display_info struct
161 * @idle_pc_supported: indicate if idle power collaps is supported
162 * @rc_lock: resource control mutex lock to protect
163 * virt encoder over various state changes
164 * @rc_state: resource controller state
165 * @delayed_off_work: delayed worker to schedule disabling of
166 * clks and resources after IDLE_TIMEOUT time.
167 * @topology: topology of the display
168 * @idle_timeout: idle timeout duration in milliseconds
169 * @wide_bus_en: wide bus is enabled on this interface
170 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
171 */
172 struct dpu_encoder_virt {
173 struct drm_encoder base;
174 spinlock_t enc_spinlock;
175
176 bool enabled;
177 bool commit_done_timedout;
178
179 unsigned int num_phys_encs;
180 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
181 struct dpu_encoder_phys *cur_master;
182 struct dpu_encoder_phys *cur_slave;
183 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
184 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
185
186 unsigned int dsc_mask;
187
188 bool intfs_swapped;
189
190 struct drm_crtc *crtc;
191 struct drm_connector *connector;
192
193 struct dentry *debugfs_root;
194 struct mutex enc_lock;
195 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
196 void (*crtc_frame_event_cb)(void *, u32 event);
197 void *crtc_frame_event_cb_data;
198
199 atomic_t frame_done_timeout_ms;
200 struct timer_list frame_done_timer;
201
202 struct msm_display_info disp_info;
203
204 bool idle_pc_supported;
205 struct mutex rc_lock;
206 enum dpu_enc_rc_states rc_state;
207 struct delayed_work delayed_off_work;
208 struct msm_display_topology topology;
209
210 u32 idle_timeout;
211
212 bool wide_bus_en;
213
214 /* DSC configuration */
215 struct drm_dsc_config *dsc;
216 };
217
218 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
219
220 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
221 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
222 };
223
224
dpu_encoder_is_widebus_enabled(const struct drm_encoder * drm_enc)225 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
226 {
227 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
228
229 return dpu_enc->wide_bus_en;
230 }
231
dpu_encoder_is_dsc_enabled(const struct drm_encoder * drm_enc)232 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
233 {
234 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
235
236 return dpu_enc->dsc ? true : false;
237 }
238
dpu_encoder_get_crc_values_cnt(const struct drm_encoder * drm_enc)239 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
240 {
241 struct dpu_encoder_virt *dpu_enc;
242 int i, num_intf = 0;
243
244 dpu_enc = to_dpu_encoder_virt(drm_enc);
245
246 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
247 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
248
249 if (phys->hw_intf && phys->hw_intf->ops.setup_misr
250 && phys->hw_intf->ops.collect_misr)
251 num_intf++;
252 }
253
254 return num_intf;
255 }
256
dpu_encoder_setup_misr(const struct drm_encoder * drm_enc)257 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
258 {
259 struct dpu_encoder_virt *dpu_enc;
260
261 int i;
262
263 dpu_enc = to_dpu_encoder_virt(drm_enc);
264
265 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
266 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
267
268 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
269 continue;
270
271 phys->hw_intf->ops.setup_misr(phys->hw_intf);
272 }
273 }
274
dpu_encoder_get_crc(const struct drm_encoder * drm_enc,u32 * crcs,int pos)275 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
276 {
277 struct dpu_encoder_virt *dpu_enc;
278
279 int i, rc = 0, entries_added = 0;
280
281 if (!drm_enc->crtc) {
282 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
283 return -EINVAL;
284 }
285
286 dpu_enc = to_dpu_encoder_virt(drm_enc);
287
288 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
289 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
290
291 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
292 continue;
293
294 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
295 if (rc)
296 return rc;
297 entries_added++;
298 }
299
300 return entries_added;
301 }
302
_dpu_encoder_setup_dither(struct dpu_hw_pingpong * hw_pp,unsigned bpc)303 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
304 {
305 struct dpu_hw_dither_cfg dither_cfg = { 0 };
306
307 if (!hw_pp->ops.setup_dither)
308 return;
309
310 switch (bpc) {
311 case 6:
312 dither_cfg.c0_bitdepth = 6;
313 dither_cfg.c1_bitdepth = 6;
314 dither_cfg.c2_bitdepth = 6;
315 dither_cfg.c3_bitdepth = 6;
316 dither_cfg.temporal_en = 0;
317 break;
318 default:
319 hw_pp->ops.setup_dither(hw_pp, NULL);
320 return;
321 }
322
323 memcpy(&dither_cfg.matrix, dither_matrix,
324 sizeof(u32) * DITHER_MATRIX_SZ);
325
326 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
327 }
328
dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)329 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
330 {
331 switch (intf_mode) {
332 case INTF_MODE_VIDEO:
333 return "INTF_MODE_VIDEO";
334 case INTF_MODE_CMD:
335 return "INTF_MODE_CMD";
336 case INTF_MODE_WB_BLOCK:
337 return "INTF_MODE_WB_BLOCK";
338 case INTF_MODE_WB_LINE:
339 return "INTF_MODE_WB_LINE";
340 default:
341 return "INTF_MODE_UNKNOWN";
342 }
343 }
344
dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)345 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
346 enum dpu_intr_idx intr_idx)
347 {
348 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
349 DRMID(phys_enc->parent),
350 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
351 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
352 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
353 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
354
355 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
356 DPU_ENCODER_FRAME_EVENT_ERROR);
357 }
358
359 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
360 u32 irq_idx, struct dpu_encoder_wait_info *info);
361
dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys * phys_enc,int irq_idx,void (* func)(void * arg),struct dpu_encoder_wait_info * wait_info)362 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
363 int irq_idx,
364 void (*func)(void *arg),
365 struct dpu_encoder_wait_info *wait_info)
366 {
367 u32 irq_status;
368 int ret;
369
370 if (!wait_info) {
371 DPU_ERROR("invalid params\n");
372 return -EINVAL;
373 }
374 /* note: do master / slave checking outside */
375
376 /* return EWOULDBLOCK since we know the wait isn't necessary */
377 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
378 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
379 DRMID(phys_enc->parent), func,
380 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
381 return -EWOULDBLOCK;
382 }
383
384 if (irq_idx < 0) {
385 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
386 DRMID(phys_enc->parent), func);
387 return 0;
388 }
389
390 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
391 DRMID(phys_enc->parent), func,
392 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
393 atomic_read(wait_info->atomic_cnt));
394
395 ret = dpu_encoder_helper_wait_event_timeout(
396 DRMID(phys_enc->parent),
397 irq_idx,
398 wait_info);
399
400 if (ret <= 0) {
401 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
402 if (irq_status) {
403 unsigned long flags;
404
405 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
406 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
407 DRMID(phys_enc->parent), func,
408 phys_enc->hw_pp->idx - PINGPONG_0,
409 atomic_read(wait_info->atomic_cnt));
410 local_irq_save(flags);
411 func(phys_enc);
412 local_irq_restore(flags);
413 ret = 0;
414 } else {
415 ret = -ETIMEDOUT;
416 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
417 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
418 DRMID(phys_enc->parent), func,
419 phys_enc->hw_pp->idx - PINGPONG_0,
420 atomic_read(wait_info->atomic_cnt));
421 }
422 } else {
423 ret = 0;
424 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
425 func, irq_idx,
426 phys_enc->hw_pp->idx - PINGPONG_0,
427 atomic_read(wait_info->atomic_cnt));
428 }
429
430 return ret;
431 }
432
dpu_encoder_get_vsync_count(struct drm_encoder * drm_enc)433 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
434 {
435 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
436 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
437 return phys ? atomic_read(&phys->vsync_cnt) : 0;
438 }
439
dpu_encoder_get_linecount(struct drm_encoder * drm_enc)440 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
441 {
442 struct dpu_encoder_virt *dpu_enc;
443 struct dpu_encoder_phys *phys;
444 int linecount = 0;
445
446 dpu_enc = to_dpu_encoder_virt(drm_enc);
447 phys = dpu_enc ? dpu_enc->cur_master : NULL;
448
449 if (phys && phys->ops.get_line_count)
450 linecount = phys->ops.get_line_count(phys);
451
452 return linecount;
453 }
454
dpu_encoder_destroy(struct drm_encoder * drm_enc)455 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
456 {
457 struct dpu_encoder_virt *dpu_enc = NULL;
458 int i = 0;
459
460 if (!drm_enc) {
461 DPU_ERROR("invalid encoder\n");
462 return;
463 }
464
465 dpu_enc = to_dpu_encoder_virt(drm_enc);
466 DPU_DEBUG_ENC(dpu_enc, "\n");
467
468 mutex_lock(&dpu_enc->enc_lock);
469
470 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
471 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
472
473 if (phys->ops.destroy) {
474 phys->ops.destroy(phys);
475 --dpu_enc->num_phys_encs;
476 dpu_enc->phys_encs[i] = NULL;
477 }
478 }
479
480 if (dpu_enc->num_phys_encs)
481 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
482 dpu_enc->num_phys_encs);
483 dpu_enc->num_phys_encs = 0;
484 mutex_unlock(&dpu_enc->enc_lock);
485
486 drm_encoder_cleanup(drm_enc);
487 mutex_destroy(&dpu_enc->enc_lock);
488 }
489
dpu_encoder_helper_split_config(struct dpu_encoder_phys * phys_enc,enum dpu_intf interface)490 void dpu_encoder_helper_split_config(
491 struct dpu_encoder_phys *phys_enc,
492 enum dpu_intf interface)
493 {
494 struct dpu_encoder_virt *dpu_enc;
495 struct split_pipe_cfg cfg = { 0 };
496 struct dpu_hw_mdp *hw_mdptop;
497 struct msm_display_info *disp_info;
498
499 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
500 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
501 return;
502 }
503
504 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
505 hw_mdptop = phys_enc->hw_mdptop;
506 disp_info = &dpu_enc->disp_info;
507
508 if (disp_info->intf_type != INTF_DSI)
509 return;
510
511 /**
512 * disable split modes since encoder will be operating in as the only
513 * encoder, either for the entire use case in the case of, for example,
514 * single DSI, or for this frame in the case of left/right only partial
515 * update.
516 */
517 if (phys_enc->split_role == ENC_ROLE_SOLO) {
518 if (hw_mdptop->ops.setup_split_pipe)
519 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
520 return;
521 }
522
523 cfg.en = true;
524 cfg.mode = phys_enc->intf_mode;
525 cfg.intf = interface;
526
527 if (cfg.en && phys_enc->ops.needs_single_flush &&
528 phys_enc->ops.needs_single_flush(phys_enc))
529 cfg.split_flush_en = true;
530
531 if (phys_enc->split_role == ENC_ROLE_MASTER) {
532 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
533
534 if (hw_mdptop->ops.setup_split_pipe)
535 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
536 }
537 }
538
dpu_encoder_use_dsc_merge(struct drm_encoder * drm_enc)539 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
540 {
541 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
542 int i, intf_count = 0, num_dsc = 0;
543
544 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
545 if (dpu_enc->phys_encs[i])
546 intf_count++;
547
548 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
549 if (dpu_enc->dsc)
550 num_dsc = 2;
551
552 return (num_dsc > 0) && (num_dsc > intf_count);
553 }
554
dpu_encoder_get_dsc_config(struct drm_encoder * drm_enc)555 static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
556 {
557 struct msm_drm_private *priv = drm_enc->dev->dev_private;
558 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
559 int index = dpu_enc->disp_info.h_tile_instance[0];
560
561 if (dpu_enc->disp_info.intf_type == INTF_DSI)
562 return msm_dsi_get_dsc_config(priv->dsi[index]);
563
564 return NULL;
565 }
566
dpu_encoder_get_topology(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct drm_display_mode * mode,struct drm_crtc_state * crtc_state,struct drm_dsc_config * dsc)567 static struct msm_display_topology dpu_encoder_get_topology(
568 struct dpu_encoder_virt *dpu_enc,
569 struct dpu_kms *dpu_kms,
570 struct drm_display_mode *mode,
571 struct drm_crtc_state *crtc_state,
572 struct drm_dsc_config *dsc)
573 {
574 struct msm_display_topology topology = {0};
575 int i, intf_count = 0;
576
577 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
578 if (dpu_enc->phys_encs[i])
579 intf_count++;
580
581 /* Datapath topology selection
582 *
583 * Dual display
584 * 2 LM, 2 INTF ( Split display using 2 interfaces)
585 *
586 * Single display
587 * 1 LM, 1 INTF
588 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
589 *
590 * Add dspps to the reservation requirements if ctm is requested
591 */
592 if (intf_count == 2)
593 topology.num_lm = 2;
594 else if (!dpu_kms->catalog->caps->has_3d_merge)
595 topology.num_lm = 1;
596 else
597 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
598
599 if (crtc_state->ctm)
600 topology.num_dspp = topology.num_lm;
601
602 topology.num_intf = intf_count;
603
604 if (dsc) {
605 /*
606 * In case of Display Stream Compression (DSC), we would use
607 * 2 DSC encoders, 2 layer mixers and 1 interface
608 * this is power optimal and can drive up to (including) 4k
609 * screens
610 */
611 topology.num_dsc = 2;
612 topology.num_lm = 2;
613 topology.num_intf = 1;
614 }
615
616 return topology;
617 }
618
dpu_encoder_virt_atomic_check(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)619 static int dpu_encoder_virt_atomic_check(
620 struct drm_encoder *drm_enc,
621 struct drm_crtc_state *crtc_state,
622 struct drm_connector_state *conn_state)
623 {
624 struct dpu_encoder_virt *dpu_enc;
625 struct msm_drm_private *priv;
626 struct dpu_kms *dpu_kms;
627 struct drm_display_mode *adj_mode;
628 struct msm_display_topology topology;
629 struct dpu_global_state *global_state;
630 struct drm_dsc_config *dsc;
631 int i = 0;
632 int ret = 0;
633
634 if (!drm_enc || !crtc_state || !conn_state) {
635 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
636 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
637 return -EINVAL;
638 }
639
640 dpu_enc = to_dpu_encoder_virt(drm_enc);
641 DPU_DEBUG_ENC(dpu_enc, "\n");
642
643 priv = drm_enc->dev->dev_private;
644 dpu_kms = to_dpu_kms(priv->kms);
645 adj_mode = &crtc_state->adjusted_mode;
646 global_state = dpu_kms_get_global_state(crtc_state->state);
647 if (IS_ERR(global_state))
648 return PTR_ERR(global_state);
649
650 trace_dpu_enc_atomic_check(DRMID(drm_enc));
651
652 /* perform atomic check on the first physical encoder (master) */
653 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
654 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
655
656 if (phys->ops.atomic_check)
657 ret = phys->ops.atomic_check(phys, crtc_state,
658 conn_state);
659 if (ret) {
660 DPU_ERROR_ENC(dpu_enc,
661 "mode unsupported, phys idx %d\n", i);
662 return ret;
663 }
664 }
665
666 dsc = dpu_encoder_get_dsc_config(drm_enc);
667
668 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
669
670 /*
671 * Release and Allocate resources on every modeset
672 * Dont allocate when active is false.
673 */
674 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
675 dpu_rm_release(global_state, drm_enc);
676
677 if (!crtc_state->active_changed || crtc_state->enable)
678 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
679 drm_enc, crtc_state, topology);
680 }
681
682 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
683
684 return ret;
685 }
686
_dpu_encoder_update_vsync_source(struct dpu_encoder_virt * dpu_enc,struct msm_display_info * disp_info)687 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
688 struct msm_display_info *disp_info)
689 {
690 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
691 struct msm_drm_private *priv;
692 struct dpu_kms *dpu_kms;
693 struct dpu_hw_mdp *hw_mdptop;
694 struct drm_encoder *drm_enc;
695 struct dpu_encoder_phys *phys_enc;
696 int i;
697
698 if (!dpu_enc || !disp_info) {
699 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
700 dpu_enc != NULL, disp_info != NULL);
701 return;
702 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
703 DPU_ERROR("invalid num phys enc %d/%d\n",
704 dpu_enc->num_phys_encs,
705 (int) ARRAY_SIZE(dpu_enc->hw_pp));
706 return;
707 }
708
709 drm_enc = &dpu_enc->base;
710 /* this pointers are checked in virt_enable_helper */
711 priv = drm_enc->dev->dev_private;
712
713 dpu_kms = to_dpu_kms(priv->kms);
714 hw_mdptop = dpu_kms->hw_mdp;
715 if (!hw_mdptop) {
716 DPU_ERROR("invalid mdptop\n");
717 return;
718 }
719
720 if (hw_mdptop->ops.setup_vsync_source &&
721 disp_info->is_cmd_mode) {
722 for (i = 0; i < dpu_enc->num_phys_encs; i++)
723 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
724
725 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
726 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
727
728 if (disp_info->is_te_using_watchdog_timer)
729 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
730 else
731 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
732
733 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
734
735 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
736 phys_enc = dpu_enc->phys_encs[i];
737
738 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
739 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
740 vsync_cfg.vsync_source);
741 }
742 }
743 }
744
_dpu_encoder_irq_control(struct drm_encoder * drm_enc,bool enable)745 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
746 {
747 struct dpu_encoder_virt *dpu_enc;
748 int i;
749
750 if (!drm_enc) {
751 DPU_ERROR("invalid encoder\n");
752 return;
753 }
754
755 dpu_enc = to_dpu_encoder_virt(drm_enc);
756
757 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
758 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
759 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
760
761 if (phys->ops.irq_control)
762 phys->ops.irq_control(phys, enable);
763 }
764
765 }
766
_dpu_encoder_resource_control_helper(struct drm_encoder * drm_enc,bool enable)767 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
768 bool enable)
769 {
770 struct msm_drm_private *priv;
771 struct dpu_kms *dpu_kms;
772 struct dpu_encoder_virt *dpu_enc;
773
774 dpu_enc = to_dpu_encoder_virt(drm_enc);
775 priv = drm_enc->dev->dev_private;
776 dpu_kms = to_dpu_kms(priv->kms);
777
778 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
779
780 if (!dpu_enc->cur_master) {
781 DPU_ERROR("encoder master not set\n");
782 return;
783 }
784
785 if (enable) {
786 /* enable DPU core clks */
787 pm_runtime_get_sync(&dpu_kms->pdev->dev);
788
789 /* enable all the irq */
790 _dpu_encoder_irq_control(drm_enc, true);
791
792 } else {
793 /* disable all the irq */
794 _dpu_encoder_irq_control(drm_enc, false);
795
796 /* disable DPU core clks */
797 pm_runtime_put_sync(&dpu_kms->pdev->dev);
798 }
799
800 }
801
dpu_encoder_resource_control(struct drm_encoder * drm_enc,u32 sw_event)802 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
803 u32 sw_event)
804 {
805 struct dpu_encoder_virt *dpu_enc;
806 struct msm_drm_private *priv;
807 bool is_vid_mode = false;
808
809 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
810 DPU_ERROR("invalid parameters\n");
811 return -EINVAL;
812 }
813 dpu_enc = to_dpu_encoder_virt(drm_enc);
814 priv = drm_enc->dev->dev_private;
815 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
816
817 /*
818 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
819 * events and return early for other events (ie wb display).
820 */
821 if (!dpu_enc->idle_pc_supported &&
822 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
823 sw_event != DPU_ENC_RC_EVENT_STOP &&
824 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
825 return 0;
826
827 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
828 dpu_enc->rc_state, "begin");
829
830 switch (sw_event) {
831 case DPU_ENC_RC_EVENT_KICKOFF:
832 /* cancel delayed off work, if any */
833 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
834 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
835 sw_event);
836
837 mutex_lock(&dpu_enc->rc_lock);
838
839 /* return if the resource control is already in ON state */
840 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
841 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
842 DRMID(drm_enc), sw_event);
843 mutex_unlock(&dpu_enc->rc_lock);
844 return 0;
845 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
846 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
847 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
848 DRMID(drm_enc), sw_event,
849 dpu_enc->rc_state);
850 mutex_unlock(&dpu_enc->rc_lock);
851 return -EINVAL;
852 }
853
854 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
855 _dpu_encoder_irq_control(drm_enc, true);
856 else
857 _dpu_encoder_resource_control_helper(drm_enc, true);
858
859 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
860
861 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
862 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
863 "kickoff");
864
865 mutex_unlock(&dpu_enc->rc_lock);
866 break;
867
868 case DPU_ENC_RC_EVENT_FRAME_DONE:
869 /*
870 * mutex lock is not used as this event happens at interrupt
871 * context. And locking is not required as, the other events
872 * like KICKOFF and STOP does a wait-for-idle before executing
873 * the resource_control
874 */
875 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
876 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
877 DRMID(drm_enc), sw_event,
878 dpu_enc->rc_state);
879 return -EINVAL;
880 }
881
882 /*
883 * schedule off work item only when there are no
884 * frames pending
885 */
886 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
887 DRM_DEBUG_KMS("id:%d skip schedule work\n",
888 DRMID(drm_enc));
889 return 0;
890 }
891
892 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
893 msecs_to_jiffies(dpu_enc->idle_timeout));
894
895 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
896 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
897 "frame done");
898 break;
899
900 case DPU_ENC_RC_EVENT_PRE_STOP:
901 /* cancel delayed off work, if any */
902 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
903 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
904 sw_event);
905
906 mutex_lock(&dpu_enc->rc_lock);
907
908 if (is_vid_mode &&
909 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
910 _dpu_encoder_irq_control(drm_enc, true);
911 }
912 /* skip if is already OFF or IDLE, resources are off already */
913 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
914 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
915 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
916 DRMID(drm_enc), sw_event,
917 dpu_enc->rc_state);
918 mutex_unlock(&dpu_enc->rc_lock);
919 return 0;
920 }
921
922 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
923
924 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
925 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
926 "pre stop");
927
928 mutex_unlock(&dpu_enc->rc_lock);
929 break;
930
931 case DPU_ENC_RC_EVENT_STOP:
932 mutex_lock(&dpu_enc->rc_lock);
933
934 /* return if the resource control is already in OFF state */
935 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
936 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
937 DRMID(drm_enc), sw_event);
938 mutex_unlock(&dpu_enc->rc_lock);
939 return 0;
940 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
941 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
942 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
943 mutex_unlock(&dpu_enc->rc_lock);
944 return -EINVAL;
945 }
946
947 /**
948 * expect to arrive here only if in either idle state or pre-off
949 * and in IDLE state the resources are already disabled
950 */
951 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
952 _dpu_encoder_resource_control_helper(drm_enc, false);
953
954 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
955
956 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
957 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
958 "stop");
959
960 mutex_unlock(&dpu_enc->rc_lock);
961 break;
962
963 case DPU_ENC_RC_EVENT_ENTER_IDLE:
964 mutex_lock(&dpu_enc->rc_lock);
965
966 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
967 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
968 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
969 mutex_unlock(&dpu_enc->rc_lock);
970 return 0;
971 }
972
973 /*
974 * if we are in ON but a frame was just kicked off,
975 * ignore the IDLE event, it's probably a stale timer event
976 */
977 if (dpu_enc->frame_busy_mask[0]) {
978 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
979 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
980 mutex_unlock(&dpu_enc->rc_lock);
981 return 0;
982 }
983
984 if (is_vid_mode)
985 _dpu_encoder_irq_control(drm_enc, false);
986 else
987 _dpu_encoder_resource_control_helper(drm_enc, false);
988
989 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
990
991 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
992 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
993 "idle");
994
995 mutex_unlock(&dpu_enc->rc_lock);
996 break;
997
998 default:
999 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
1000 sw_event);
1001 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1002 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1003 "error");
1004 break;
1005 }
1006
1007 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1008 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1009 "end");
1010 return 0;
1011 }
1012
dpu_encoder_prepare_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1013 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
1014 struct drm_writeback_job *job)
1015 {
1016 struct dpu_encoder_virt *dpu_enc;
1017 int i;
1018
1019 dpu_enc = to_dpu_encoder_virt(drm_enc);
1020
1021 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1022 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1023
1024 if (phys->ops.prepare_wb_job)
1025 phys->ops.prepare_wb_job(phys, job);
1026
1027 }
1028 }
1029
dpu_encoder_cleanup_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1030 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
1031 struct drm_writeback_job *job)
1032 {
1033 struct dpu_encoder_virt *dpu_enc;
1034 int i;
1035
1036 dpu_enc = to_dpu_encoder_virt(drm_enc);
1037
1038 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1039 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1040
1041 if (phys->ops.cleanup_wb_job)
1042 phys->ops.cleanup_wb_job(phys, job);
1043
1044 }
1045 }
1046
dpu_encoder_virt_atomic_mode_set(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1047 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1048 struct drm_crtc_state *crtc_state,
1049 struct drm_connector_state *conn_state)
1050 {
1051 struct dpu_encoder_virt *dpu_enc;
1052 struct msm_drm_private *priv;
1053 struct dpu_kms *dpu_kms;
1054 struct dpu_crtc_state *cstate;
1055 struct dpu_global_state *global_state;
1056 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
1057 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
1058 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
1059 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
1060 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
1061 int num_lm, num_ctl, num_pp, num_dsc;
1062 unsigned int dsc_mask = 0;
1063 int i;
1064
1065 if (!drm_enc) {
1066 DPU_ERROR("invalid encoder\n");
1067 return;
1068 }
1069
1070 dpu_enc = to_dpu_encoder_virt(drm_enc);
1071 DPU_DEBUG_ENC(dpu_enc, "\n");
1072
1073 priv = drm_enc->dev->dev_private;
1074 dpu_kms = to_dpu_kms(priv->kms);
1075
1076 global_state = dpu_kms_get_existing_global_state(dpu_kms);
1077 if (IS_ERR_OR_NULL(global_state)) {
1078 DPU_ERROR("Failed to get global state");
1079 return;
1080 }
1081
1082 trace_dpu_enc_mode_set(DRMID(drm_enc));
1083
1084 /* Query resource that have been reserved in atomic check step. */
1085 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1086 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1087 ARRAY_SIZE(hw_pp));
1088 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1089 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1090 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1091 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1092 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1093 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1094 ARRAY_SIZE(hw_dspp));
1095
1096 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1097 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1098 : NULL;
1099
1100 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1101 drm_enc->base.id, DPU_HW_BLK_DSC,
1102 hw_dsc, ARRAY_SIZE(hw_dsc));
1103 for (i = 0; i < num_dsc; i++) {
1104 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1105 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1106 }
1107
1108 dpu_enc->dsc_mask = dsc_mask;
1109
1110 cstate = to_dpu_crtc_state(crtc_state);
1111
1112 for (i = 0; i < num_lm; i++) {
1113 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1114
1115 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1116 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1117 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1118 }
1119
1120 cstate->num_mixers = num_lm;
1121
1122 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1123 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1124
1125 phys->hw_pp = dpu_enc->hw_pp[i];
1126 if (!phys->hw_pp) {
1127 DPU_ERROR_ENC(dpu_enc,
1128 "no pp block assigned at idx: %d\n", i);
1129 return;
1130 }
1131
1132 phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
1133 if (!phys->hw_ctl) {
1134 DPU_ERROR_ENC(dpu_enc,
1135 "no ctl block assigned at idx: %d\n", i);
1136 return;
1137 }
1138
1139 phys->cached_mode = crtc_state->adjusted_mode;
1140 if (phys->ops.atomic_mode_set)
1141 phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
1142 }
1143 }
1144
_dpu_encoder_virt_enable_helper(struct drm_encoder * drm_enc)1145 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1146 {
1147 struct dpu_encoder_virt *dpu_enc = NULL;
1148 int i;
1149
1150 if (!drm_enc || !drm_enc->dev) {
1151 DPU_ERROR("invalid parameters\n");
1152 return;
1153 }
1154
1155 dpu_enc = to_dpu_encoder_virt(drm_enc);
1156 if (!dpu_enc || !dpu_enc->cur_master) {
1157 DPU_ERROR("invalid dpu encoder/master\n");
1158 return;
1159 }
1160
1161
1162 if (dpu_enc->disp_info.intf_type == INTF_DP &&
1163 dpu_enc->cur_master->hw_mdptop &&
1164 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1165 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1166 dpu_enc->cur_master->hw_mdptop);
1167
1168 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1169
1170 if (dpu_enc->disp_info.intf_type == INTF_DSI &&
1171 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1172 unsigned bpc = dpu_enc->connector->display_info.bpc;
1173 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1174 if (!dpu_enc->hw_pp[i])
1175 continue;
1176 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1177 }
1178 }
1179 }
1180
dpu_encoder_virt_runtime_resume(struct drm_encoder * drm_enc)1181 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1182 {
1183 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1184
1185 mutex_lock(&dpu_enc->enc_lock);
1186
1187 if (!dpu_enc->enabled)
1188 goto out;
1189
1190 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1191 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1192 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1193 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1194
1195 _dpu_encoder_virt_enable_helper(drm_enc);
1196
1197 out:
1198 mutex_unlock(&dpu_enc->enc_lock);
1199 }
1200
dpu_encoder_virt_atomic_enable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1201 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
1202 struct drm_atomic_state *state)
1203 {
1204 struct dpu_encoder_virt *dpu_enc = NULL;
1205 int ret = 0;
1206 struct drm_display_mode *cur_mode = NULL;
1207
1208 dpu_enc = to_dpu_encoder_virt(drm_enc);
1209
1210 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
1211
1212 mutex_lock(&dpu_enc->enc_lock);
1213
1214 dpu_enc->commit_done_timedout = false;
1215
1216 dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
1217
1218 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1219
1220 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1221 cur_mode->vdisplay);
1222
1223 /* always enable slave encoder before master */
1224 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1225 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1226
1227 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1228 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1229
1230 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1231 if (ret) {
1232 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1233 ret);
1234 goto out;
1235 }
1236
1237 _dpu_encoder_virt_enable_helper(drm_enc);
1238
1239 dpu_enc->enabled = true;
1240
1241 out:
1242 mutex_unlock(&dpu_enc->enc_lock);
1243 }
1244
dpu_encoder_virt_atomic_disable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1245 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
1246 struct drm_atomic_state *state)
1247 {
1248 struct dpu_encoder_virt *dpu_enc = NULL;
1249 struct drm_crtc *crtc;
1250 struct drm_crtc_state *old_state = NULL;
1251 int i = 0;
1252
1253 dpu_enc = to_dpu_encoder_virt(drm_enc);
1254 DPU_DEBUG_ENC(dpu_enc, "\n");
1255
1256 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
1257 if (crtc)
1258 old_state = drm_atomic_get_old_crtc_state(state, crtc);
1259
1260 /*
1261 * The encoder is already disabled if self refresh mode was set earlier,
1262 * in the old_state for the corresponding crtc.
1263 */
1264 if (old_state && old_state->self_refresh_active)
1265 return;
1266
1267 mutex_lock(&dpu_enc->enc_lock);
1268 dpu_enc->enabled = false;
1269
1270 trace_dpu_enc_disable(DRMID(drm_enc));
1271
1272 /* wait for idle */
1273 dpu_encoder_wait_for_tx_complete(drm_enc);
1274
1275 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1276
1277 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1278 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1279
1280 if (phys->ops.disable)
1281 phys->ops.disable(phys);
1282 }
1283
1284
1285 /* after phys waits for frame-done, should be no more frames pending */
1286 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1287 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1288 del_timer_sync(&dpu_enc->frame_done_timer);
1289 }
1290
1291 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1292
1293 dpu_enc->connector = NULL;
1294
1295 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1296
1297 mutex_unlock(&dpu_enc->enc_lock);
1298 }
1299
dpu_encoder_get_intf(const struct dpu_mdss_cfg * catalog,struct dpu_rm * dpu_rm,enum dpu_intf_type type,u32 controller_id)1300 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
1301 struct dpu_rm *dpu_rm,
1302 enum dpu_intf_type type, u32 controller_id)
1303 {
1304 int i = 0;
1305
1306 if (type == INTF_WB)
1307 return NULL;
1308
1309 for (i = 0; i < catalog->intf_count; i++) {
1310 if (catalog->intf[i].type == type
1311 && catalog->intf[i].controller_id == controller_id) {
1312 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
1313 }
1314 }
1315
1316 return NULL;
1317 }
1318
dpu_encoder_vblank_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1319 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1320 struct dpu_encoder_phys *phy_enc)
1321 {
1322 struct dpu_encoder_virt *dpu_enc = NULL;
1323 unsigned long lock_flags;
1324
1325 if (!drm_enc || !phy_enc)
1326 return;
1327
1328 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1329 dpu_enc = to_dpu_encoder_virt(drm_enc);
1330
1331 atomic_inc(&phy_enc->vsync_cnt);
1332
1333 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1334 if (dpu_enc->crtc)
1335 dpu_crtc_vblank_callback(dpu_enc->crtc);
1336 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1337
1338 DPU_ATRACE_END("encoder_vblank_callback");
1339 }
1340
dpu_encoder_underrun_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1341 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1342 struct dpu_encoder_phys *phy_enc)
1343 {
1344 if (!phy_enc)
1345 return;
1346
1347 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1348 atomic_inc(&phy_enc->underrun_cnt);
1349
1350 /* trigger dump only on the first underrun */
1351 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1352 msm_disp_snapshot_state(drm_enc->dev);
1353
1354 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1355 atomic_read(&phy_enc->underrun_cnt));
1356 DPU_ATRACE_END("encoder_underrun_callback");
1357 }
1358
dpu_encoder_assign_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc)1359 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1360 {
1361 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1362 unsigned long lock_flags;
1363
1364 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1365 /* crtc should always be cleared before re-assigning */
1366 WARN_ON(crtc && dpu_enc->crtc);
1367 dpu_enc->crtc = crtc;
1368 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1369 }
1370
dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc,bool enable)1371 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1372 struct drm_crtc *crtc, bool enable)
1373 {
1374 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1375 unsigned long lock_flags;
1376 int i;
1377
1378 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1379
1380 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1381 if (dpu_enc->crtc != crtc) {
1382 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1383 return;
1384 }
1385 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1386
1387 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1388 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1389
1390 if (phys->ops.control_vblank_irq)
1391 phys->ops.control_vblank_irq(phys, enable);
1392 }
1393 }
1394
dpu_encoder_register_frame_event_callback(struct drm_encoder * drm_enc,void (* frame_event_cb)(void *,u32 event),void * frame_event_cb_data)1395 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1396 void (*frame_event_cb)(void *, u32 event),
1397 void *frame_event_cb_data)
1398 {
1399 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1400 unsigned long lock_flags;
1401 bool enable;
1402
1403 enable = frame_event_cb ? true : false;
1404
1405 if (!drm_enc) {
1406 DPU_ERROR("invalid encoder\n");
1407 return;
1408 }
1409 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1410
1411 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1412 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1413 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1414 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1415 }
1416
dpu_encoder_frame_done_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * ready_phys,u32 event)1417 void dpu_encoder_frame_done_callback(
1418 struct drm_encoder *drm_enc,
1419 struct dpu_encoder_phys *ready_phys, u32 event)
1420 {
1421 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1422 unsigned int i;
1423
1424 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1425 | DPU_ENCODER_FRAME_EVENT_ERROR
1426 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1427
1428 if (!dpu_enc->frame_busy_mask[0]) {
1429 /**
1430 * suppress frame_done without waiter,
1431 * likely autorefresh
1432 */
1433 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1434 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1435 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
1436 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
1437 return;
1438 }
1439
1440 /* One of the physical encoders has become idle */
1441 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1442 if (dpu_enc->phys_encs[i] == ready_phys) {
1443 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1444 dpu_enc->frame_busy_mask[0]);
1445 clear_bit(i, dpu_enc->frame_busy_mask);
1446 }
1447 }
1448
1449 if (!dpu_enc->frame_busy_mask[0]) {
1450 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1451 del_timer(&dpu_enc->frame_done_timer);
1452
1453 dpu_encoder_resource_control(drm_enc,
1454 DPU_ENC_RC_EVENT_FRAME_DONE);
1455
1456 if (dpu_enc->crtc_frame_event_cb)
1457 dpu_enc->crtc_frame_event_cb(
1458 dpu_enc->crtc_frame_event_cb_data,
1459 event);
1460 }
1461 } else {
1462 if (dpu_enc->crtc_frame_event_cb)
1463 dpu_enc->crtc_frame_event_cb(
1464 dpu_enc->crtc_frame_event_cb_data, event);
1465 }
1466 }
1467
dpu_encoder_off_work(struct work_struct * work)1468 static void dpu_encoder_off_work(struct work_struct *work)
1469 {
1470 struct dpu_encoder_virt *dpu_enc = container_of(work,
1471 struct dpu_encoder_virt, delayed_off_work.work);
1472
1473 dpu_encoder_resource_control(&dpu_enc->base,
1474 DPU_ENC_RC_EVENT_ENTER_IDLE);
1475
1476 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1477 DPU_ENCODER_FRAME_EVENT_IDLE);
1478 }
1479
1480 /**
1481 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1482 * @drm_enc: Pointer to drm encoder structure
1483 * @phys: Pointer to physical encoder structure
1484 * @extra_flush_bits: Additional bit mask to include in flush trigger
1485 */
_dpu_encoder_trigger_flush(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phys,uint32_t extra_flush_bits)1486 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1487 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1488 {
1489 struct dpu_hw_ctl *ctl;
1490 int pending_kickoff_cnt;
1491 u32 ret = UINT_MAX;
1492
1493 if (!phys->hw_pp) {
1494 DPU_ERROR("invalid pingpong hw\n");
1495 return;
1496 }
1497
1498 ctl = phys->hw_ctl;
1499 if (!ctl->ops.trigger_flush) {
1500 DPU_ERROR("missing trigger cb\n");
1501 return;
1502 }
1503
1504 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1505
1506 if (extra_flush_bits && ctl->ops.update_pending_flush)
1507 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1508
1509 ctl->ops.trigger_flush(ctl);
1510
1511 if (ctl->ops.get_pending_flush)
1512 ret = ctl->ops.get_pending_flush(ctl);
1513
1514 trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1515 dpu_encoder_helper_get_intf_type(phys->intf_mode),
1516 phys->hw_intf ? phys->hw_intf->idx : -1,
1517 phys->hw_wb ? phys->hw_wb->idx : -1,
1518 pending_kickoff_cnt, ctl->idx,
1519 extra_flush_bits, ret);
1520 }
1521
1522 /**
1523 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1524 * @phys: Pointer to physical encoder structure
1525 */
_dpu_encoder_trigger_start(struct dpu_encoder_phys * phys)1526 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1527 {
1528 if (!phys) {
1529 DPU_ERROR("invalid argument(s)\n");
1530 return;
1531 }
1532
1533 if (!phys->hw_pp) {
1534 DPU_ERROR("invalid pingpong hw\n");
1535 return;
1536 }
1537
1538 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1539 phys->ops.trigger_start(phys);
1540 }
1541
dpu_encoder_helper_trigger_start(struct dpu_encoder_phys * phys_enc)1542 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1543 {
1544 struct dpu_hw_ctl *ctl;
1545
1546 ctl = phys_enc->hw_ctl;
1547 if (ctl->ops.trigger_start) {
1548 ctl->ops.trigger_start(ctl);
1549 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1550 }
1551 }
1552
dpu_encoder_helper_wait_event_timeout(int32_t drm_id,u32 irq_idx,struct dpu_encoder_wait_info * info)1553 static int dpu_encoder_helper_wait_event_timeout(
1554 int32_t drm_id,
1555 u32 irq_idx,
1556 struct dpu_encoder_wait_info *info)
1557 {
1558 int rc = 0;
1559 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1560 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1561 s64 time;
1562
1563 do {
1564 rc = wait_event_timeout(*(info->wq),
1565 atomic_read(info->atomic_cnt) == 0, jiffies);
1566 time = ktime_to_ms(ktime_get());
1567
1568 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1569 expected_time,
1570 atomic_read(info->atomic_cnt));
1571 /* If we timed out, counter is valid and time is less, wait again */
1572 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1573 (time < expected_time));
1574
1575 return rc;
1576 }
1577
dpu_encoder_helper_hw_reset(struct dpu_encoder_phys * phys_enc)1578 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1579 {
1580 struct dpu_encoder_virt *dpu_enc;
1581 struct dpu_hw_ctl *ctl;
1582 int rc;
1583 struct drm_encoder *drm_enc;
1584
1585 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1586 ctl = phys_enc->hw_ctl;
1587 drm_enc = phys_enc->parent;
1588
1589 if (!ctl->ops.reset)
1590 return;
1591
1592 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1593 ctl->idx);
1594
1595 rc = ctl->ops.reset(ctl);
1596 if (rc) {
1597 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1598 msm_disp_snapshot_state(drm_enc->dev);
1599 }
1600
1601 phys_enc->enable_state = DPU_ENC_ENABLED;
1602 }
1603
1604 /**
1605 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1606 * Iterate through the physical encoders and perform consolidated flush
1607 * and/or control start triggering as needed. This is done in the virtual
1608 * encoder rather than the individual physical ones in order to handle
1609 * use cases that require visibility into multiple physical encoders at
1610 * a time.
1611 * @dpu_enc: Pointer to virtual encoder structure
1612 */
_dpu_encoder_kickoff_phys(struct dpu_encoder_virt * dpu_enc)1613 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1614 {
1615 struct dpu_hw_ctl *ctl;
1616 uint32_t i, pending_flush;
1617 unsigned long lock_flags;
1618
1619 pending_flush = 0x0;
1620
1621 /* update pending counts and trigger kickoff ctl flush atomically */
1622 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1623
1624 /* don't perform flush/start operations for slave encoders */
1625 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1626 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1627
1628 if (phys->enable_state == DPU_ENC_DISABLED)
1629 continue;
1630
1631 ctl = phys->hw_ctl;
1632
1633 /*
1634 * This is cleared in frame_done worker, which isn't invoked
1635 * for async commits. So don't set this for async, since it'll
1636 * roll over to the next commit.
1637 */
1638 if (phys->split_role != ENC_ROLE_SLAVE)
1639 set_bit(i, dpu_enc->frame_busy_mask);
1640
1641 if (!phys->ops.needs_single_flush ||
1642 !phys->ops.needs_single_flush(phys))
1643 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1644 else if (ctl->ops.get_pending_flush)
1645 pending_flush |= ctl->ops.get_pending_flush(ctl);
1646 }
1647
1648 /* for split flush, combine pending flush masks and send to master */
1649 if (pending_flush && dpu_enc->cur_master) {
1650 _dpu_encoder_trigger_flush(
1651 &dpu_enc->base,
1652 dpu_enc->cur_master,
1653 pending_flush);
1654 }
1655
1656 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1657
1658 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1659 }
1660
dpu_encoder_trigger_kickoff_pending(struct drm_encoder * drm_enc)1661 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1662 {
1663 struct dpu_encoder_virt *dpu_enc;
1664 struct dpu_encoder_phys *phys;
1665 unsigned int i;
1666 struct dpu_hw_ctl *ctl;
1667 struct msm_display_info *disp_info;
1668
1669 if (!drm_enc) {
1670 DPU_ERROR("invalid encoder\n");
1671 return;
1672 }
1673 dpu_enc = to_dpu_encoder_virt(drm_enc);
1674 disp_info = &dpu_enc->disp_info;
1675
1676 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1677 phys = dpu_enc->phys_encs[i];
1678
1679 ctl = phys->hw_ctl;
1680 ctl->ops.clear_pending_flush(ctl);
1681
1682 /* update only for command mode primary ctl */
1683 if ((phys == dpu_enc->cur_master) &&
1684 disp_info->is_cmd_mode
1685 && ctl->ops.trigger_pending)
1686 ctl->ops.trigger_pending(ctl);
1687 }
1688 }
1689
_dpu_encoder_calculate_linetime(struct dpu_encoder_virt * dpu_enc,struct drm_display_mode * mode)1690 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1691 struct drm_display_mode *mode)
1692 {
1693 u64 pclk_rate;
1694 u32 pclk_period;
1695 u32 line_time;
1696
1697 /*
1698 * For linetime calculation, only operate on master encoder.
1699 */
1700 if (!dpu_enc->cur_master)
1701 return 0;
1702
1703 if (!dpu_enc->cur_master->ops.get_line_count) {
1704 DPU_ERROR("get_line_count function not defined\n");
1705 return 0;
1706 }
1707
1708 pclk_rate = mode->clock; /* pixel clock in kHz */
1709 if (pclk_rate == 0) {
1710 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1711 return 0;
1712 }
1713
1714 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1715 if (pclk_period == 0) {
1716 DPU_ERROR("pclk period is 0\n");
1717 return 0;
1718 }
1719
1720 /*
1721 * Line time calculation based on Pixel clock and HTOTAL.
1722 * Final unit is in ns.
1723 */
1724 line_time = (pclk_period * mode->htotal) / 1000;
1725 if (line_time == 0) {
1726 DPU_ERROR("line time calculation is 0\n");
1727 return 0;
1728 }
1729
1730 DPU_DEBUG_ENC(dpu_enc,
1731 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1732 pclk_rate, pclk_period, line_time);
1733
1734 return line_time;
1735 }
1736
dpu_encoder_vsync_time(struct drm_encoder * drm_enc,ktime_t * wakeup_time)1737 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1738 {
1739 struct drm_display_mode *mode;
1740 struct dpu_encoder_virt *dpu_enc;
1741 u32 cur_line;
1742 u32 line_time;
1743 u32 vtotal, time_to_vsync;
1744 ktime_t cur_time;
1745
1746 dpu_enc = to_dpu_encoder_virt(drm_enc);
1747
1748 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1749 DPU_ERROR("crtc/crtc state object is NULL\n");
1750 return -EINVAL;
1751 }
1752 mode = &drm_enc->crtc->state->adjusted_mode;
1753
1754 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1755 if (!line_time)
1756 return -EINVAL;
1757
1758 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1759
1760 vtotal = mode->vtotal;
1761 if (cur_line >= vtotal)
1762 time_to_vsync = line_time * vtotal;
1763 else
1764 time_to_vsync = line_time * (vtotal - cur_line);
1765
1766 if (time_to_vsync == 0) {
1767 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1768 vtotal);
1769 return -EINVAL;
1770 }
1771
1772 cur_time = ktime_get();
1773 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1774
1775 DPU_DEBUG_ENC(dpu_enc,
1776 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1777 cur_line, vtotal, time_to_vsync,
1778 ktime_to_ms(cur_time),
1779 ktime_to_ms(*wakeup_time));
1780 return 0;
1781 }
1782
1783 static u32
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config * dsc,u32 enc_ip_width)1784 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
1785 u32 enc_ip_width)
1786 {
1787 int ssm_delay, total_pixels, soft_slice_per_enc;
1788
1789 soft_slice_per_enc = enc_ip_width / dsc->slice_width;
1790
1791 /*
1792 * minimum number of initial line pixels is a sum of:
1793 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1794 * 91 for 10 bpc) * 3
1795 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1796 * 3. the initial xmit delay
1797 * 4. total pipeline delay through the "lock step" of encoder (47)
1798 * 5. 6 additional pixels as the output of the rate buffer is
1799 * 48 bits wide
1800 */
1801 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
1802 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
1803 if (soft_slice_per_enc > 1)
1804 total_pixels += (ssm_delay * 3);
1805 return DIV_ROUND_UP(total_pixels, dsc->slice_width);
1806 }
1807
dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp,struct drm_dsc_config * dsc,u32 common_mode,u32 initial_lines)1808 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
1809 struct dpu_hw_dsc *hw_dsc,
1810 struct dpu_hw_pingpong *hw_pp,
1811 struct drm_dsc_config *dsc,
1812 u32 common_mode,
1813 u32 initial_lines)
1814 {
1815 if (hw_dsc->ops.dsc_config)
1816 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1817
1818 if (hw_dsc->ops.dsc_config_thresh)
1819 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1820
1821 if (hw_pp->ops.setup_dsc)
1822 hw_pp->ops.setup_dsc(hw_pp);
1823
1824 if (hw_dsc->ops.dsc_bind_pingpong_blk)
1825 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
1826
1827 if (hw_pp->ops.enable_dsc)
1828 hw_pp->ops.enable_dsc(hw_pp);
1829
1830 if (ctl->ops.update_pending_flush_dsc)
1831 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
1832 }
1833
dpu_encoder_prep_dsc(struct dpu_encoder_virt * dpu_enc,struct drm_dsc_config * dsc)1834 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1835 struct drm_dsc_config *dsc)
1836 {
1837 /* coding only for 2LM, 2enc, 1 dsc config */
1838 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1839 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
1840 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1841 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1842 int this_frame_slices;
1843 int intf_ip_w, enc_ip_w;
1844 int dsc_common_mode;
1845 int pic_width;
1846 u32 initial_lines;
1847 int i;
1848
1849 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1850 hw_pp[i] = dpu_enc->hw_pp[i];
1851 hw_dsc[i] = dpu_enc->hw_dsc[i];
1852
1853 if (!hw_pp[i] || !hw_dsc[i]) {
1854 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1855 return;
1856 }
1857 }
1858
1859 dsc_common_mode = 0;
1860 pic_width = dsc->pic_width;
1861
1862 dsc_common_mode = DSC_MODE_SPLIT_PANEL;
1863 if (dpu_encoder_use_dsc_merge(enc_master->parent))
1864 dsc_common_mode |= DSC_MODE_MULTIPLEX;
1865 if (enc_master->intf_mode == INTF_MODE_VIDEO)
1866 dsc_common_mode |= DSC_MODE_VIDEO;
1867
1868 this_frame_slices = pic_width / dsc->slice_width;
1869 intf_ip_w = this_frame_slices * dsc->slice_width;
1870
1871 /*
1872 * dsc merge case: when using 2 encoders for the same stream,
1873 * no. of slices need to be same on both the encoders.
1874 */
1875 enc_ip_w = intf_ip_w / 2;
1876 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1877
1878 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1879 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
1880 dsc, dsc_common_mode, initial_lines);
1881 }
1882
dpu_encoder_prepare_for_kickoff(struct drm_encoder * drm_enc)1883 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1884 {
1885 struct dpu_encoder_virt *dpu_enc;
1886 struct dpu_encoder_phys *phys;
1887 bool needs_hw_reset = false;
1888 unsigned int i;
1889
1890 dpu_enc = to_dpu_encoder_virt(drm_enc);
1891
1892 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1893
1894 /* prepare for next kickoff, may include waiting on previous kickoff */
1895 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1896 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1897 phys = dpu_enc->phys_encs[i];
1898 if (phys->ops.prepare_for_kickoff)
1899 phys->ops.prepare_for_kickoff(phys);
1900 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1901 needs_hw_reset = true;
1902 }
1903 DPU_ATRACE_END("enc_prepare_for_kickoff");
1904
1905 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1906
1907 /* if any phys needs reset, reset all phys, in-order */
1908 if (needs_hw_reset) {
1909 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1910 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1911 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1912 }
1913 }
1914
1915 if (dpu_enc->dsc)
1916 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1917 }
1918
dpu_encoder_is_valid_for_commit(struct drm_encoder * drm_enc)1919 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1920 {
1921 struct dpu_encoder_virt *dpu_enc;
1922 unsigned int i;
1923 struct dpu_encoder_phys *phys;
1924
1925 dpu_enc = to_dpu_encoder_virt(drm_enc);
1926
1927 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1928 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1929 phys = dpu_enc->phys_encs[i];
1930 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1931 DPU_DEBUG("invalid FB not kicking off\n");
1932 return false;
1933 }
1934 }
1935 }
1936
1937 return true;
1938 }
1939
dpu_encoder_kickoff(struct drm_encoder * drm_enc)1940 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1941 {
1942 struct dpu_encoder_virt *dpu_enc;
1943 struct dpu_encoder_phys *phys;
1944 unsigned long timeout_ms;
1945 unsigned int i;
1946
1947 DPU_ATRACE_BEGIN("encoder_kickoff");
1948 dpu_enc = to_dpu_encoder_virt(drm_enc);
1949
1950 trace_dpu_enc_kickoff(DRMID(drm_enc));
1951
1952 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1953 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1954
1955 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1956 mod_timer(&dpu_enc->frame_done_timer,
1957 jiffies + msecs_to_jiffies(timeout_ms));
1958
1959 /* All phys encs are ready to go, trigger the kickoff */
1960 _dpu_encoder_kickoff_phys(dpu_enc);
1961
1962 /* allow phys encs to handle any post-kickoff business */
1963 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1964 phys = dpu_enc->phys_encs[i];
1965 if (phys->ops.handle_post_kickoff)
1966 phys->ops.handle_post_kickoff(phys);
1967 }
1968
1969 DPU_ATRACE_END("encoder_kickoff");
1970 }
1971
dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys * phys_enc)1972 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
1973 {
1974 struct dpu_hw_mixer_cfg mixer;
1975 int i, num_lm;
1976 struct dpu_global_state *global_state;
1977 struct dpu_hw_blk *hw_lm[2];
1978 struct dpu_hw_mixer *hw_mixer[2];
1979 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1980
1981 memset(&mixer, 0, sizeof(mixer));
1982
1983 /* reset all mixers for this encoder */
1984 if (phys_enc->hw_ctl->ops.clear_all_blendstages)
1985 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
1986
1987 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
1988
1989 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
1990 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1991
1992 for (i = 0; i < num_lm; i++) {
1993 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
1994 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
1995 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
1996
1997 /* clear all blendstages */
1998 if (phys_enc->hw_ctl->ops.setup_blendstage)
1999 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
2000 }
2001 }
2002
dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp)2003 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
2004 struct dpu_hw_dsc *hw_dsc,
2005 struct dpu_hw_pingpong *hw_pp)
2006 {
2007 if (hw_dsc->ops.dsc_disable)
2008 hw_dsc->ops.dsc_disable(hw_dsc);
2009
2010 if (hw_pp->ops.disable_dsc)
2011 hw_pp->ops.disable_dsc(hw_pp);
2012
2013 if (hw_dsc->ops.dsc_bind_pingpong_blk)
2014 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
2015
2016 if (ctl->ops.update_pending_flush_dsc)
2017 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
2018 }
2019
dpu_encoder_unprep_dsc(struct dpu_encoder_virt * dpu_enc)2020 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
2021 {
2022 /* coding only for 2LM, 2enc, 1 dsc config */
2023 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
2024 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
2025 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
2026 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
2027 int i;
2028
2029 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
2030 hw_pp[i] = dpu_enc->hw_pp[i];
2031 hw_dsc[i] = dpu_enc->hw_dsc[i];
2032
2033 if (hw_pp[i] && hw_dsc[i])
2034 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
2035 }
2036 }
2037
dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys * phys_enc)2038 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2039 {
2040 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2041 struct dpu_hw_intf_cfg intf_cfg = { 0 };
2042 int i;
2043 struct dpu_encoder_virt *dpu_enc;
2044
2045 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
2046
2047 phys_enc->hw_ctl->ops.reset(ctl);
2048
2049 dpu_encoder_helper_reset_mixers(phys_enc);
2050
2051 /*
2052 * TODO: move the once-only operation like CTL flush/trigger
2053 * into dpu_encoder_virt_disable() and all operations which need
2054 * to be done per phys encoder into the phys_disable() op.
2055 */
2056 if (phys_enc->hw_wb) {
2057 /* disable the PP block */
2058 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
2059 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
2060
2061 /* mark WB flush as pending */
2062 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
2063 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
2064 } else {
2065 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2066 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
2067 phys_enc->hw_intf->ops.bind_pingpong_blk(
2068 dpu_enc->phys_encs[i]->hw_intf,
2069 PINGPONG_NONE);
2070
2071 /* mark INTF flush as pending */
2072 if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2073 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2074 dpu_enc->phys_encs[i]->hw_intf->idx);
2075 }
2076 }
2077
2078 /* reset the merge 3D HW block */
2079 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
2080 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2081 BLEND_3D_NONE);
2082 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2083 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2084 phys_enc->hw_pp->merge_3d->idx);
2085 }
2086
2087 if (dpu_enc->dsc) {
2088 dpu_encoder_unprep_dsc(dpu_enc);
2089 dpu_enc->dsc = NULL;
2090 }
2091
2092 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2093 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2094 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
2095
2096 if (phys_enc->hw_intf)
2097 intf_cfg.intf = phys_enc->hw_intf->idx;
2098 if (phys_enc->hw_wb)
2099 intf_cfg.wb = phys_enc->hw_wb->idx;
2100
2101 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
2102 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2103
2104 if (ctl->ops.reset_intf_cfg)
2105 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2106
2107 ctl->ops.trigger_flush(ctl);
2108 ctl->ops.trigger_start(ctl);
2109 ctl->ops.clear_pending_flush(ctl);
2110 }
2111
2112 #ifdef CONFIG_DEBUG_FS
_dpu_encoder_status_show(struct seq_file * s,void * data)2113 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2114 {
2115 struct dpu_encoder_virt *dpu_enc = s->private;
2116 int i;
2117
2118 mutex_lock(&dpu_enc->enc_lock);
2119 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2120 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2121
2122 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
2123 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
2124 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
2125 atomic_read(&phys->vsync_cnt),
2126 atomic_read(&phys->underrun_cnt));
2127
2128 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2129 }
2130 mutex_unlock(&dpu_enc->enc_lock);
2131
2132 return 0;
2133 }
2134
2135 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2136
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2137 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2138 {
2139 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2140
2141 char name[12];
2142
2143 if (!drm_enc->dev) {
2144 DPU_ERROR("invalid encoder or kms\n");
2145 return -EINVAL;
2146 }
2147
2148 snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id);
2149
2150 /* create overall sub-directory for the encoder */
2151 dpu_enc->debugfs_root = debugfs_create_dir(name,
2152 drm_enc->dev->primary->debugfs_root);
2153
2154 /* don't error check these */
2155 debugfs_create_file("status", 0600,
2156 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
2157
2158 return 0;
2159 }
2160 #else
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2161 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2162 {
2163 return 0;
2164 }
2165 #endif
2166
dpu_encoder_late_register(struct drm_encoder * encoder)2167 static int dpu_encoder_late_register(struct drm_encoder *encoder)
2168 {
2169 return _dpu_encoder_init_debugfs(encoder);
2170 }
2171
dpu_encoder_early_unregister(struct drm_encoder * encoder)2172 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
2173 {
2174 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2175
2176 debugfs_remove_recursive(dpu_enc->debugfs_root);
2177 }
2178
dpu_encoder_virt_add_phys_encs(struct drm_device * dev,struct msm_display_info * disp_info,struct dpu_encoder_virt * dpu_enc,struct dpu_enc_phys_init_params * params)2179 static int dpu_encoder_virt_add_phys_encs(
2180 struct drm_device *dev,
2181 struct msm_display_info *disp_info,
2182 struct dpu_encoder_virt *dpu_enc,
2183 struct dpu_enc_phys_init_params *params)
2184 {
2185 struct dpu_encoder_phys *enc = NULL;
2186
2187 DPU_DEBUG_ENC(dpu_enc, "\n");
2188
2189 /*
2190 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2191 * in this function, check up-front.
2192 */
2193 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2194 ARRAY_SIZE(dpu_enc->phys_encs)) {
2195 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2196 dpu_enc->num_phys_encs);
2197 return -EINVAL;
2198 }
2199
2200
2201 if (disp_info->intf_type == INTF_WB) {
2202 enc = dpu_encoder_phys_wb_init(dev, params);
2203
2204 if (IS_ERR(enc)) {
2205 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2206 PTR_ERR(enc));
2207 return PTR_ERR(enc);
2208 }
2209
2210 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2211 ++dpu_enc->num_phys_encs;
2212 } else if (disp_info->is_cmd_mode) {
2213 enc = dpu_encoder_phys_cmd_init(dev, params);
2214
2215 if (IS_ERR(enc)) {
2216 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2217 PTR_ERR(enc));
2218 return PTR_ERR(enc);
2219 }
2220
2221 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2222 ++dpu_enc->num_phys_encs;
2223 } else {
2224 enc = dpu_encoder_phys_vid_init(dev, params);
2225
2226 if (IS_ERR(enc)) {
2227 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2228 PTR_ERR(enc));
2229 return PTR_ERR(enc);
2230 }
2231
2232 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2233 ++dpu_enc->num_phys_encs;
2234 }
2235
2236 if (params->split_role == ENC_ROLE_SLAVE)
2237 dpu_enc->cur_slave = enc;
2238 else
2239 dpu_enc->cur_master = enc;
2240
2241 return 0;
2242 }
2243
dpu_encoder_setup_display(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct msm_display_info * disp_info)2244 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2245 struct dpu_kms *dpu_kms,
2246 struct msm_display_info *disp_info)
2247 {
2248 int ret = 0;
2249 int i = 0;
2250 struct dpu_enc_phys_init_params phys_params;
2251
2252 if (!dpu_enc) {
2253 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2254 return -EINVAL;
2255 }
2256
2257 dpu_enc->cur_master = NULL;
2258
2259 memset(&phys_params, 0, sizeof(phys_params));
2260 phys_params.dpu_kms = dpu_kms;
2261 phys_params.parent = &dpu_enc->base;
2262 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2263
2264 WARN_ON(disp_info->num_of_h_tiles < 1);
2265
2266 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2267
2268 if (disp_info->intf_type != INTF_WB)
2269 dpu_enc->idle_pc_supported =
2270 dpu_kms->catalog->caps->has_idle_pc;
2271
2272 mutex_lock(&dpu_enc->enc_lock);
2273 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2274 /*
2275 * Left-most tile is at index 0, content is controller id
2276 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2277 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2278 */
2279 u32 controller_id = disp_info->h_tile_instance[i];
2280
2281 if (disp_info->num_of_h_tiles > 1) {
2282 if (i == 0)
2283 phys_params.split_role = ENC_ROLE_MASTER;
2284 else
2285 phys_params.split_role = ENC_ROLE_SLAVE;
2286 } else {
2287 phys_params.split_role = ENC_ROLE_SOLO;
2288 }
2289
2290 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2291 i, controller_id, phys_params.split_role);
2292
2293 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
2294 disp_info->intf_type,
2295 controller_id);
2296
2297 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
2298 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
2299
2300 if (!phys_params.hw_intf && !phys_params.hw_wb) {
2301 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2302 ret = -EINVAL;
2303 break;
2304 }
2305
2306 if (phys_params.hw_intf && phys_params.hw_wb) {
2307 DPU_ERROR_ENC(dpu_enc,
2308 "invalid phys both intf and wb block at idx: %d\n", i);
2309 ret = -EINVAL;
2310 break;
2311 }
2312
2313 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
2314 dpu_enc, &phys_params);
2315 if (ret) {
2316 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2317 break;
2318 }
2319 }
2320
2321 mutex_unlock(&dpu_enc->enc_lock);
2322
2323 return ret;
2324 }
2325
dpu_encoder_frame_done_timeout(struct timer_list * t)2326 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2327 {
2328 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2329 frame_done_timer);
2330 struct drm_encoder *drm_enc = &dpu_enc->base;
2331 u32 event;
2332
2333 if (!drm_enc->dev) {
2334 DPU_ERROR("invalid parameters\n");
2335 return;
2336 }
2337
2338 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2339 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2340 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2341 return;
2342 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2343 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2344 return;
2345 }
2346
2347 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
2348
2349 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2350 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2351 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2352 }
2353
2354 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2355 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2356 .atomic_disable = dpu_encoder_virt_atomic_disable,
2357 .atomic_enable = dpu_encoder_virt_atomic_enable,
2358 .atomic_check = dpu_encoder_virt_atomic_check,
2359 };
2360
2361 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2362 .destroy = dpu_encoder_destroy,
2363 .late_register = dpu_encoder_late_register,
2364 .early_unregister = dpu_encoder_early_unregister,
2365 };
2366
dpu_encoder_init(struct drm_device * dev,int drm_enc_mode,struct msm_display_info * disp_info)2367 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2368 int drm_enc_mode,
2369 struct msm_display_info *disp_info)
2370 {
2371 struct msm_drm_private *priv = dev->dev_private;
2372 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2373 struct drm_encoder *drm_enc = NULL;
2374 struct dpu_encoder_virt *dpu_enc = NULL;
2375 int ret = 0;
2376
2377 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2378 if (!dpu_enc)
2379 return ERR_PTR(-ENOMEM);
2380
2381 ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2382 drm_enc_mode, NULL);
2383 if (ret) {
2384 devm_kfree(dev->dev, dpu_enc);
2385 return ERR_PTR(ret);
2386 }
2387
2388 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2389
2390 spin_lock_init(&dpu_enc->enc_spinlock);
2391 dpu_enc->enabled = false;
2392 mutex_init(&dpu_enc->enc_lock);
2393 mutex_init(&dpu_enc->rc_lock);
2394
2395 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2396 if (ret)
2397 goto fail;
2398
2399 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2400 timer_setup(&dpu_enc->frame_done_timer,
2401 dpu_encoder_frame_done_timeout, 0);
2402
2403 if (disp_info->intf_type == INTF_DP)
2404 dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
2405 priv->dp[disp_info->h_tile_instance[0]]);
2406
2407 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2408 dpu_encoder_off_work);
2409 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2410
2411 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2412
2413 DPU_DEBUG_ENC(dpu_enc, "created\n");
2414
2415 return &dpu_enc->base;
2416
2417 fail:
2418 DPU_ERROR("failed to create encoder\n");
2419 if (drm_enc)
2420 dpu_encoder_destroy(drm_enc);
2421
2422 return ERR_PTR(ret);
2423 }
2424
2425 /**
2426 * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
2427 * @drm_enc: encoder pointer
2428 *
2429 * Wait for hardware to have flushed the current pending changes to hardware at
2430 * a vblank or CTL_START. Physical encoders will map this differently depending
2431 * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
2432 *
2433 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2434 */
dpu_encoder_wait_for_commit_done(struct drm_encoder * drm_enc)2435 int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
2436 {
2437 struct dpu_encoder_virt *dpu_enc = NULL;
2438 int i, ret = 0;
2439
2440 if (!drm_enc) {
2441 DPU_ERROR("invalid encoder\n");
2442 return -EINVAL;
2443 }
2444 dpu_enc = to_dpu_encoder_virt(drm_enc);
2445 DPU_DEBUG_ENC(dpu_enc, "\n");
2446
2447 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2448 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2449
2450 if (phys->ops.wait_for_commit_done) {
2451 DPU_ATRACE_BEGIN("wait_for_commit_done");
2452 ret = phys->ops.wait_for_commit_done(phys);
2453 DPU_ATRACE_END("wait_for_commit_done");
2454 if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) {
2455 dpu_enc->commit_done_timedout = true;
2456 msm_disp_snapshot_state(drm_enc->dev);
2457 }
2458 if (ret)
2459 return ret;
2460 }
2461 }
2462
2463 return ret;
2464 }
2465
2466 /**
2467 * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
2468 * @drm_enc: encoder pointer
2469 *
2470 * Wait for the hardware to transfer all the pixels to the panel. Physical
2471 * encoders will map this differently depending on the type: vid mode -> vsync_irq,
2472 * cmd mode -> pp_done.
2473 *
2474 * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
2475 */
dpu_encoder_wait_for_tx_complete(struct drm_encoder * drm_enc)2476 int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
2477 {
2478 struct dpu_encoder_virt *dpu_enc = NULL;
2479 int i, ret = 0;
2480
2481 if (!drm_enc) {
2482 DPU_ERROR("invalid encoder\n");
2483 return -EINVAL;
2484 }
2485 dpu_enc = to_dpu_encoder_virt(drm_enc);
2486 DPU_DEBUG_ENC(dpu_enc, "\n");
2487
2488 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2489 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2490
2491 if (phys->ops.wait_for_tx_complete) {
2492 DPU_ATRACE_BEGIN("wait_for_tx_complete");
2493 ret = phys->ops.wait_for_tx_complete(phys);
2494 DPU_ATRACE_END("wait_for_tx_complete");
2495 if (ret)
2496 return ret;
2497 }
2498 }
2499
2500 return ret;
2501 }
2502
dpu_encoder_get_intf_mode(struct drm_encoder * encoder)2503 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2504 {
2505 struct dpu_encoder_virt *dpu_enc = NULL;
2506
2507 if (!encoder) {
2508 DPU_ERROR("invalid encoder\n");
2509 return INTF_MODE_NONE;
2510 }
2511 dpu_enc = to_dpu_encoder_virt(encoder);
2512
2513 if (dpu_enc->cur_master)
2514 return dpu_enc->cur_master->intf_mode;
2515
2516 if (dpu_enc->num_phys_encs)
2517 return dpu_enc->phys_encs[0]->intf_mode;
2518
2519 return INTF_MODE_NONE;
2520 }
2521
dpu_encoder_helper_get_dsc(struct dpu_encoder_phys * phys_enc)2522 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2523 {
2524 struct drm_encoder *encoder = phys_enc->parent;
2525 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2526
2527 return dpu_enc->dsc_mask;
2528 }
2529
dpu_encoder_phys_init(struct dpu_encoder_phys * phys_enc,struct dpu_enc_phys_init_params * p)2530 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
2531 struct dpu_enc_phys_init_params *p)
2532 {
2533 int i;
2534
2535 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
2536 phys_enc->hw_intf = p->hw_intf;
2537 phys_enc->hw_wb = p->hw_wb;
2538 phys_enc->parent = p->parent;
2539 phys_enc->dpu_kms = p->dpu_kms;
2540 phys_enc->split_role = p->split_role;
2541 phys_enc->enc_spinlock = p->enc_spinlock;
2542 phys_enc->enable_state = DPU_ENC_DISABLED;
2543
2544 for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
2545 phys_enc->irq[i] = -EINVAL;
2546
2547 atomic_set(&phys_enc->vblank_refcount, 0);
2548 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
2549 atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
2550
2551 atomic_set(&phys_enc->vsync_cnt, 0);
2552 atomic_set(&phys_enc->underrun_cnt, 0);
2553
2554 init_waitqueue_head(&phys_enc->pending_kickoff_wq);
2555 }
2556