1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/debugfs.h>
10 #include <linux/kthread.h>
11 #include <linux/seq_file.h>
12 
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_probe_helper.h>
16 
17 #include "msm_drv.h"
18 #include "dpu_kms.h"
19 #include "dpu_hwio.h"
20 #include "dpu_hw_catalog.h"
21 #include "dpu_hw_intf.h"
22 #include "dpu_hw_ctl.h"
23 #include "dpu_hw_dspp.h"
24 #include "dpu_formats.h"
25 #include "dpu_encoder_phys.h"
26 #include "dpu_crtc.h"
27 #include "dpu_trace.h"
28 #include "dpu_core_irq.h"
29 #include "disp/msm_disp_snapshot.h"
30 
31 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
32 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
33 
34 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
35 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
36 
37 #define DPU_DEBUG_PHYS(p, fmt, ...) DRM_DEBUG_ATOMIC("enc%d intf%d pp%d " fmt,\
38 		(p) ? (p)->parent->base.id : -1, \
39 		(p) ? (p)->intf_idx - INTF_0 : -1, \
40 		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
41 		##__VA_ARGS__)
42 
43 #define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
44 		(p) ? (p)->parent->base.id : -1, \
45 		(p) ? (p)->intf_idx - INTF_0 : -1, \
46 		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
47 		##__VA_ARGS__)
48 
49 /*
50  * Two to anticipate panels that can do cmd/vid dynamic switching
51  * plan is to create all possible physical encoder types, and switch between
52  * them at runtime
53  */
54 #define NUM_PHYS_ENCODER_TYPES 2
55 
56 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
57 	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
58 
59 #define MAX_CHANNELS_PER_ENC 2
60 
61 #define IDLE_SHORT_TIMEOUT	1
62 
63 #define MAX_HDISPLAY_SPLIT 1080
64 
65 /* timeout in frames waiting for frame done */
66 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
67 
68 /**
69  * enum dpu_enc_rc_events - events for resource control state machine
70  * @DPU_ENC_RC_EVENT_KICKOFF:
71  *	This event happens at NORMAL priority.
72  *	Event that signals the start of the transfer. When this event is
73  *	received, enable MDP/DSI core clocks. Regardless of the previous
74  *	state, the resource should be in ON state at the end of this event.
75  * @DPU_ENC_RC_EVENT_FRAME_DONE:
76  *	This event happens at INTERRUPT level.
77  *	Event signals the end of the data transfer after the PP FRAME_DONE
78  *	event. At the end of this event, a delayed work is scheduled to go to
79  *	IDLE_PC state after IDLE_TIMEOUT time.
80  * @DPU_ENC_RC_EVENT_PRE_STOP:
81  *	This event happens at NORMAL priority.
82  *	This event, when received during the ON state, leave the RC STATE
83  *	in the PRE_OFF state. It should be followed by the STOP event as
84  *	part of encoder disable.
85  *	If received during IDLE or OFF states, it will do nothing.
86  * @DPU_ENC_RC_EVENT_STOP:
87  *	This event happens at NORMAL priority.
88  *	When this event is received, disable all the MDP/DSI core clocks, and
89  *	disable IRQs. It should be called from the PRE_OFF or IDLE states.
90  *	IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
91  *	PRE_OFF is expected when PRE_STOP was executed during the ON state.
92  *	Resource state should be in OFF at the end of the event.
93  * @DPU_ENC_RC_EVENT_ENTER_IDLE:
94  *	This event happens at NORMAL priority from a work item.
95  *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
96  *	This would disable MDP/DSI core clocks and change the resource state
97  *	to IDLE.
98  */
99 enum dpu_enc_rc_events {
100 	DPU_ENC_RC_EVENT_KICKOFF = 1,
101 	DPU_ENC_RC_EVENT_FRAME_DONE,
102 	DPU_ENC_RC_EVENT_PRE_STOP,
103 	DPU_ENC_RC_EVENT_STOP,
104 	DPU_ENC_RC_EVENT_ENTER_IDLE
105 };
106 
107 /*
108  * enum dpu_enc_rc_states - states that the resource control maintains
109  * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
110  * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
111  * @DPU_ENC_RC_STATE_ON: Resource is in ON state
112  * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
113  * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
114  */
115 enum dpu_enc_rc_states {
116 	DPU_ENC_RC_STATE_OFF,
117 	DPU_ENC_RC_STATE_PRE_OFF,
118 	DPU_ENC_RC_STATE_ON,
119 	DPU_ENC_RC_STATE_IDLE
120 };
121 
122 /**
123  * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
124  *	encoders. Virtual encoder manages one "logical" display. Physical
125  *	encoders manage one intf block, tied to a specific panel/sub-panel.
126  *	Virtual encoder defers as much as possible to the physical encoders.
127  *	Virtual encoder registers itself with the DRM Framework as the encoder.
128  * @base:		drm_encoder base class for registration with DRM
129  * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
130  * @bus_scaling_client:	Client handle to the bus scaling interface
131  * @enabled:		True if the encoder is active, protected by enc_lock
132  * @num_phys_encs:	Actual number of physical encoders contained.
133  * @phys_encs:		Container of physical encoders managed.
134  * @cur_master:		Pointer to the current master in this mode. Optimization
135  *			Only valid after enable. Cleared as disable.
136  * @cur_slave:		As above but for the slave encoder.
137  * @hw_pp:		Handle to the pingpong blocks used for the display. No.
138  *			pingpong blocks can be different than num_phys_encs.
139  * @intfs_swapped:	Whether or not the phys_enc interfaces have been swapped
140  *			for partial update right-only cases, such as pingpong
141  *			split where virtual pingpong does not generate IRQs
142  * @crtc:		Pointer to the currently assigned crtc. Normally you
143  *			would use crtc->state->encoder_mask to determine the
144  *			link between encoder/crtc. However in this case we need
145  *			to track crtc in the disable() hook which is called
146  *			_after_ encoder_mask is cleared.
147  * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
148  *				all CTL paths
149  * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
150  * @debugfs_root:		Debug file system root file node
151  * @enc_lock:			Lock around physical encoder
152  *				create/destroy/enable/disable
153  * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
154  *				busy processing current command.
155  *				Bit0 = phys_encs[0] etc.
156  * @crtc_frame_event_cb:	callback handler for frame event
157  * @crtc_frame_event_cb_data:	callback handler private data
158  * @frame_done_timeout_ms:	frame done timeout in ms
159  * @frame_done_timer:		watchdog timer for frame done event
160  * @vsync_event_timer:		vsync timer
161  * @disp_info:			local copy of msm_display_info struct
162  * @idle_pc_supported:		indicate if idle power collaps is supported
163  * @rc_lock:			resource control mutex lock to protect
164  *				virt encoder over various state changes
165  * @rc_state:			resource controller state
166  * @delayed_off_work:		delayed worker to schedule disabling of
167  *				clks and resources after IDLE_TIMEOUT time.
168  * @vsync_event_work:		worker to handle vsync event for autorefresh
169  * @topology:                   topology of the display
170  * @idle_timeout:		idle timeout duration in milliseconds
171  * @dp:				msm_dp pointer, for DP encoders
172  */
173 struct dpu_encoder_virt {
174 	struct drm_encoder base;
175 	spinlock_t enc_spinlock;
176 	uint32_t bus_scaling_client;
177 
178 	bool enabled;
179 
180 	unsigned int num_phys_encs;
181 	struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
182 	struct dpu_encoder_phys *cur_master;
183 	struct dpu_encoder_phys *cur_slave;
184 	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
185 
186 	bool intfs_swapped;
187 
188 	struct drm_crtc *crtc;
189 
190 	struct dentry *debugfs_root;
191 	struct mutex enc_lock;
192 	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
193 	void (*crtc_frame_event_cb)(void *, u32 event);
194 	void *crtc_frame_event_cb_data;
195 
196 	atomic_t frame_done_timeout_ms;
197 	struct timer_list frame_done_timer;
198 	struct timer_list vsync_event_timer;
199 
200 	struct msm_display_info disp_info;
201 
202 	bool idle_pc_supported;
203 	struct mutex rc_lock;
204 	enum dpu_enc_rc_states rc_state;
205 	struct delayed_work delayed_off_work;
206 	struct kthread_work vsync_event_work;
207 	struct msm_display_topology topology;
208 
209 	u32 idle_timeout;
210 
211 	struct msm_dp *dp;
212 };
213 
214 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
215 
216 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
217 	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
218 };
219 
220 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
221 {
222 	struct dpu_hw_dither_cfg dither_cfg = { 0 };
223 
224 	if (!hw_pp->ops.setup_dither)
225 		return;
226 
227 	switch (bpc) {
228 	case 6:
229 		dither_cfg.c0_bitdepth = 6;
230 		dither_cfg.c1_bitdepth = 6;
231 		dither_cfg.c2_bitdepth = 6;
232 		dither_cfg.c3_bitdepth = 6;
233 		dither_cfg.temporal_en = 0;
234 		break;
235 	default:
236 		hw_pp->ops.setup_dither(hw_pp, NULL);
237 		return;
238 	}
239 
240 	memcpy(&dither_cfg.matrix, dither_matrix,
241 			sizeof(u32) * DITHER_MATRIX_SZ);
242 
243 	hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
244 }
245 
246 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
247 		enum dpu_intr_idx intr_idx)
248 {
249 	DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
250 		  DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
251 		  phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
252 
253 	if (phys_enc->parent_ops->handle_frame_done)
254 		phys_enc->parent_ops->handle_frame_done(
255 				phys_enc->parent, phys_enc,
256 				DPU_ENCODER_FRAME_EVENT_ERROR);
257 }
258 
259 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
260 		u32 irq_idx, struct dpu_encoder_wait_info *info);
261 
262 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
263 		enum dpu_intr_idx intr_idx,
264 		struct dpu_encoder_wait_info *wait_info)
265 {
266 	struct dpu_encoder_irq *irq;
267 	u32 irq_status;
268 	int ret;
269 
270 	if (!wait_info || intr_idx >= INTR_IDX_MAX) {
271 		DPU_ERROR("invalid params\n");
272 		return -EINVAL;
273 	}
274 	irq = &phys_enc->irq[intr_idx];
275 
276 	/* note: do master / slave checking outside */
277 
278 	/* return EWOULDBLOCK since we know the wait isn't necessary */
279 	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
280 		DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n",
281 			  DRMID(phys_enc->parent), intr_idx,
282 			  irq->irq_idx);
283 		return -EWOULDBLOCK;
284 	}
285 
286 	if (irq->irq_idx < 0) {
287 		DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n",
288 			      DRMID(phys_enc->parent), intr_idx,
289 			      irq->name);
290 		return 0;
291 	}
292 
293 	DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n",
294 		      DRMID(phys_enc->parent), intr_idx,
295 		      irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
296 		      atomic_read(wait_info->atomic_cnt));
297 
298 	ret = dpu_encoder_helper_wait_event_timeout(
299 			DRMID(phys_enc->parent),
300 			irq->irq_idx,
301 			wait_info);
302 
303 	if (ret <= 0) {
304 		irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
305 				irq->irq_idx, true);
306 		if (irq_status) {
307 			unsigned long flags;
308 
309 			DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
310 				      DRMID(phys_enc->parent), intr_idx,
311 				      irq->irq_idx,
312 				      phys_enc->hw_pp->idx - PINGPONG_0,
313 				      atomic_read(wait_info->atomic_cnt));
314 			local_irq_save(flags);
315 			irq->cb.func(phys_enc, irq->irq_idx);
316 			local_irq_restore(flags);
317 			ret = 0;
318 		} else {
319 			ret = -ETIMEDOUT;
320 			DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
321 				      DRMID(phys_enc->parent), intr_idx,
322 				      irq->irq_idx,
323 				      phys_enc->hw_pp->idx - PINGPONG_0,
324 				      atomic_read(wait_info->atomic_cnt));
325 		}
326 	} else {
327 		ret = 0;
328 		trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
329 			intr_idx, irq->irq_idx,
330 			phys_enc->hw_pp->idx - PINGPONG_0,
331 			atomic_read(wait_info->atomic_cnt));
332 	}
333 
334 	return ret;
335 }
336 
337 int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
338 		enum dpu_intr_idx intr_idx)
339 {
340 	struct dpu_encoder_irq *irq;
341 	int ret = 0;
342 
343 	if (intr_idx >= INTR_IDX_MAX) {
344 		DPU_ERROR("invalid params\n");
345 		return -EINVAL;
346 	}
347 	irq = &phys_enc->irq[intr_idx];
348 
349 	if (irq->irq_idx < 0) {
350 		DPU_ERROR_PHYS(phys_enc,
351 			"invalid IRQ index:%d\n", irq->irq_idx);
352 		return -EINVAL;
353 	}
354 
355 	ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
356 			&irq->cb);
357 	if (ret) {
358 		DPU_ERROR_PHYS(phys_enc,
359 			"failed to register IRQ callback for %s\n",
360 			irq->name);
361 		irq->irq_idx = -EINVAL;
362 		return ret;
363 	}
364 
365 	trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
366 				irq->irq_idx);
367 
368 	return ret;
369 }
370 
371 int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
372 		enum dpu_intr_idx intr_idx)
373 {
374 	struct dpu_encoder_irq *irq;
375 	int ret;
376 
377 	irq = &phys_enc->irq[intr_idx];
378 
379 	/* silently skip irqs that weren't registered */
380 	if (irq->irq_idx < 0) {
381 		DRM_ERROR("duplicate unregister id=%u, intr=%d, irq=%d",
382 			  DRMID(phys_enc->parent), intr_idx,
383 			  irq->irq_idx);
384 		return 0;
385 	}
386 
387 	ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
388 			&irq->cb);
389 	if (ret) {
390 		DRM_ERROR("unreg cb fail id=%u, intr=%d, irq=%d ret=%d",
391 			  DRMID(phys_enc->parent), intr_idx,
392 			  irq->irq_idx, ret);
393 	}
394 
395 	trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
396 					     irq->irq_idx);
397 
398 	return 0;
399 }
400 
401 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
402 {
403 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
404 	struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
405 	return phys ? atomic_read(&phys->vsync_cnt) : 0;
406 }
407 
408 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
409 {
410 	struct dpu_encoder_virt *dpu_enc;
411 	struct dpu_encoder_phys *phys;
412 	int linecount = 0;
413 
414 	dpu_enc = to_dpu_encoder_virt(drm_enc);
415 	phys = dpu_enc ? dpu_enc->cur_master : NULL;
416 
417 	if (phys && phys->ops.get_line_count)
418 		linecount = phys->ops.get_line_count(phys);
419 
420 	return linecount;
421 }
422 
423 void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
424 				  struct dpu_encoder_hw_resources *hw_res)
425 {
426 	struct dpu_encoder_virt *dpu_enc = NULL;
427 	int i = 0;
428 
429 	dpu_enc = to_dpu_encoder_virt(drm_enc);
430 	DPU_DEBUG_ENC(dpu_enc, "\n");
431 
432 	/* Query resources used by phys encs, expected to be without overlap */
433 	memset(hw_res, 0, sizeof(*hw_res));
434 
435 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
436 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
437 
438 		if (phys->ops.get_hw_resources)
439 			phys->ops.get_hw_resources(phys, hw_res);
440 	}
441 }
442 
443 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
444 {
445 	struct dpu_encoder_virt *dpu_enc = NULL;
446 	int i = 0;
447 
448 	if (!drm_enc) {
449 		DPU_ERROR("invalid encoder\n");
450 		return;
451 	}
452 
453 	dpu_enc = to_dpu_encoder_virt(drm_enc);
454 	DPU_DEBUG_ENC(dpu_enc, "\n");
455 
456 	mutex_lock(&dpu_enc->enc_lock);
457 
458 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
459 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
460 
461 		if (phys->ops.destroy) {
462 			phys->ops.destroy(phys);
463 			--dpu_enc->num_phys_encs;
464 			dpu_enc->phys_encs[i] = NULL;
465 		}
466 	}
467 
468 	if (dpu_enc->num_phys_encs)
469 		DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
470 				dpu_enc->num_phys_encs);
471 	dpu_enc->num_phys_encs = 0;
472 	mutex_unlock(&dpu_enc->enc_lock);
473 
474 	drm_encoder_cleanup(drm_enc);
475 	mutex_destroy(&dpu_enc->enc_lock);
476 }
477 
478 void dpu_encoder_helper_split_config(
479 		struct dpu_encoder_phys *phys_enc,
480 		enum dpu_intf interface)
481 {
482 	struct dpu_encoder_virt *dpu_enc;
483 	struct split_pipe_cfg cfg = { 0 };
484 	struct dpu_hw_mdp *hw_mdptop;
485 	struct msm_display_info *disp_info;
486 
487 	if (!phys_enc->hw_mdptop || !phys_enc->parent) {
488 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
489 		return;
490 	}
491 
492 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
493 	hw_mdptop = phys_enc->hw_mdptop;
494 	disp_info = &dpu_enc->disp_info;
495 
496 	if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
497 		return;
498 
499 	/**
500 	 * disable split modes since encoder will be operating in as the only
501 	 * encoder, either for the entire use case in the case of, for example,
502 	 * single DSI, or for this frame in the case of left/right only partial
503 	 * update.
504 	 */
505 	if (phys_enc->split_role == ENC_ROLE_SOLO) {
506 		if (hw_mdptop->ops.setup_split_pipe)
507 			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
508 		return;
509 	}
510 
511 	cfg.en = true;
512 	cfg.mode = phys_enc->intf_mode;
513 	cfg.intf = interface;
514 
515 	if (cfg.en && phys_enc->ops.needs_single_flush &&
516 			phys_enc->ops.needs_single_flush(phys_enc))
517 		cfg.split_flush_en = true;
518 
519 	if (phys_enc->split_role == ENC_ROLE_MASTER) {
520 		DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
521 
522 		if (hw_mdptop->ops.setup_split_pipe)
523 			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
524 	}
525 }
526 
527 static struct msm_display_topology dpu_encoder_get_topology(
528 			struct dpu_encoder_virt *dpu_enc,
529 			struct dpu_kms *dpu_kms,
530 			struct drm_display_mode *mode)
531 {
532 	struct msm_display_topology topology = {0};
533 	int i, intf_count = 0;
534 
535 	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
536 		if (dpu_enc->phys_encs[i])
537 			intf_count++;
538 
539 	/* Datapath topology selection
540 	 *
541 	 * Dual display
542 	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
543 	 *
544 	 * Single display
545 	 * 1 LM, 1 INTF
546 	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
547 	 *
548 	 * Adding color blocks only to primary interface if available in
549 	 * sufficient number
550 	 */
551 	if (intf_count == 2)
552 		topology.num_lm = 2;
553 	else if (!dpu_kms->catalog->caps->has_3d_merge)
554 		topology.num_lm = 1;
555 	else
556 		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
557 
558 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
559 		if (dpu_kms->catalog->dspp &&
560 			(dpu_kms->catalog->dspp_count >= topology.num_lm))
561 			topology.num_dspp = topology.num_lm;
562 	}
563 
564 	topology.num_enc = 0;
565 	topology.num_intf = intf_count;
566 
567 	return topology;
568 }
569 static int dpu_encoder_virt_atomic_check(
570 		struct drm_encoder *drm_enc,
571 		struct drm_crtc_state *crtc_state,
572 		struct drm_connector_state *conn_state)
573 {
574 	struct dpu_encoder_virt *dpu_enc;
575 	struct msm_drm_private *priv;
576 	struct dpu_kms *dpu_kms;
577 	const struct drm_display_mode *mode;
578 	struct drm_display_mode *adj_mode;
579 	struct msm_display_topology topology;
580 	struct dpu_global_state *global_state;
581 	int i = 0;
582 	int ret = 0;
583 
584 	if (!drm_enc || !crtc_state || !conn_state) {
585 		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
586 				drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
587 		return -EINVAL;
588 	}
589 
590 	dpu_enc = to_dpu_encoder_virt(drm_enc);
591 	DPU_DEBUG_ENC(dpu_enc, "\n");
592 
593 	priv = drm_enc->dev->dev_private;
594 	dpu_kms = to_dpu_kms(priv->kms);
595 	mode = &crtc_state->mode;
596 	adj_mode = &crtc_state->adjusted_mode;
597 	global_state = dpu_kms_get_global_state(crtc_state->state);
598 	if (IS_ERR(global_state))
599 		return PTR_ERR(global_state);
600 
601 	trace_dpu_enc_atomic_check(DRMID(drm_enc));
602 
603 	/* perform atomic check on the first physical encoder (master) */
604 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
605 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
606 
607 		if (phys->ops.atomic_check)
608 			ret = phys->ops.atomic_check(phys, crtc_state,
609 					conn_state);
610 		else if (phys->ops.mode_fixup)
611 			if (!phys->ops.mode_fixup(phys, mode, adj_mode))
612 				ret = -EINVAL;
613 
614 		if (ret) {
615 			DPU_ERROR_ENC(dpu_enc,
616 					"mode unsupported, phys idx %d\n", i);
617 			break;
618 		}
619 	}
620 
621 	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
622 
623 	/* Reserve dynamic resources now. */
624 	if (!ret) {
625 		/*
626 		 * Release and Allocate resources on every modeset
627 		 * Dont allocate when active is false.
628 		 */
629 		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
630 			dpu_rm_release(global_state, drm_enc);
631 
632 			if (!crtc_state->active_changed || crtc_state->active)
633 				ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
634 						drm_enc, crtc_state, topology);
635 		}
636 	}
637 
638 	trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
639 
640 	return ret;
641 }
642 
643 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
644 			struct msm_display_info *disp_info)
645 {
646 	struct dpu_vsync_source_cfg vsync_cfg = { 0 };
647 	struct msm_drm_private *priv;
648 	struct dpu_kms *dpu_kms;
649 	struct dpu_hw_mdp *hw_mdptop;
650 	struct drm_encoder *drm_enc;
651 	int i;
652 
653 	if (!dpu_enc || !disp_info) {
654 		DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
655 					dpu_enc != NULL, disp_info != NULL);
656 		return;
657 	} else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
658 		DPU_ERROR("invalid num phys enc %d/%d\n",
659 				dpu_enc->num_phys_encs,
660 				(int) ARRAY_SIZE(dpu_enc->hw_pp));
661 		return;
662 	}
663 
664 	drm_enc = &dpu_enc->base;
665 	/* this pointers are checked in virt_enable_helper */
666 	priv = drm_enc->dev->dev_private;
667 
668 	dpu_kms = to_dpu_kms(priv->kms);
669 	hw_mdptop = dpu_kms->hw_mdp;
670 	if (!hw_mdptop) {
671 		DPU_ERROR("invalid mdptop\n");
672 		return;
673 	}
674 
675 	if (hw_mdptop->ops.setup_vsync_source &&
676 			disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
677 		for (i = 0; i < dpu_enc->num_phys_encs; i++)
678 			vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
679 
680 		vsync_cfg.pp_count = dpu_enc->num_phys_encs;
681 		if (disp_info->is_te_using_watchdog_timer)
682 			vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
683 		else
684 			vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
685 
686 		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
687 	}
688 }
689 
690 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
691 {
692 	struct dpu_encoder_virt *dpu_enc;
693 	int i;
694 
695 	if (!drm_enc) {
696 		DPU_ERROR("invalid encoder\n");
697 		return;
698 	}
699 
700 	dpu_enc = to_dpu_encoder_virt(drm_enc);
701 
702 	DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
703 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
704 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
705 
706 		if (phys->ops.irq_control)
707 			phys->ops.irq_control(phys, enable);
708 	}
709 
710 }
711 
712 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
713 		bool enable)
714 {
715 	struct msm_drm_private *priv;
716 	struct dpu_kms *dpu_kms;
717 	struct dpu_encoder_virt *dpu_enc;
718 
719 	dpu_enc = to_dpu_encoder_virt(drm_enc);
720 	priv = drm_enc->dev->dev_private;
721 	dpu_kms = to_dpu_kms(priv->kms);
722 
723 	trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
724 
725 	if (!dpu_enc->cur_master) {
726 		DPU_ERROR("encoder master not set\n");
727 		return;
728 	}
729 
730 	if (enable) {
731 		/* enable DPU core clks */
732 		pm_runtime_get_sync(&dpu_kms->pdev->dev);
733 
734 		/* enable all the irq */
735 		_dpu_encoder_irq_control(drm_enc, true);
736 
737 	} else {
738 		/* disable all the irq */
739 		_dpu_encoder_irq_control(drm_enc, false);
740 
741 		/* disable DPU core clks */
742 		pm_runtime_put_sync(&dpu_kms->pdev->dev);
743 	}
744 
745 }
746 
747 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
748 		u32 sw_event)
749 {
750 	struct dpu_encoder_virt *dpu_enc;
751 	struct msm_drm_private *priv;
752 	bool is_vid_mode = false;
753 
754 	if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
755 		DPU_ERROR("invalid parameters\n");
756 		return -EINVAL;
757 	}
758 	dpu_enc = to_dpu_encoder_virt(drm_enc);
759 	priv = drm_enc->dev->dev_private;
760 	is_vid_mode = dpu_enc->disp_info.capabilities &
761 						MSM_DISPLAY_CAP_VID_MODE;
762 
763 	/*
764 	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
765 	 * events and return early for other events (ie wb display).
766 	 */
767 	if (!dpu_enc->idle_pc_supported &&
768 			(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
769 			sw_event != DPU_ENC_RC_EVENT_STOP &&
770 			sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
771 		return 0;
772 
773 	trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
774 			 dpu_enc->rc_state, "begin");
775 
776 	switch (sw_event) {
777 	case DPU_ENC_RC_EVENT_KICKOFF:
778 		/* cancel delayed off work, if any */
779 		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
780 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
781 					sw_event);
782 
783 		mutex_lock(&dpu_enc->rc_lock);
784 
785 		/* return if the resource control is already in ON state */
786 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
787 			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
788 				      DRMID(drm_enc), sw_event);
789 			mutex_unlock(&dpu_enc->rc_lock);
790 			return 0;
791 		} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
792 				dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
793 			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
794 				      DRMID(drm_enc), sw_event,
795 				      dpu_enc->rc_state);
796 			mutex_unlock(&dpu_enc->rc_lock);
797 			return -EINVAL;
798 		}
799 
800 		if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
801 			_dpu_encoder_irq_control(drm_enc, true);
802 		else
803 			_dpu_encoder_resource_control_helper(drm_enc, true);
804 
805 		dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
806 
807 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
808 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
809 				 "kickoff");
810 
811 		mutex_unlock(&dpu_enc->rc_lock);
812 		break;
813 
814 	case DPU_ENC_RC_EVENT_FRAME_DONE:
815 		/*
816 		 * mutex lock is not used as this event happens at interrupt
817 		 * context. And locking is not required as, the other events
818 		 * like KICKOFF and STOP does a wait-for-idle before executing
819 		 * the resource_control
820 		 */
821 		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
822 			DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
823 				      DRMID(drm_enc), sw_event,
824 				      dpu_enc->rc_state);
825 			return -EINVAL;
826 		}
827 
828 		/*
829 		 * schedule off work item only when there are no
830 		 * frames pending
831 		 */
832 		if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
833 			DRM_DEBUG_KMS("id:%d skip schedule work\n",
834 				      DRMID(drm_enc));
835 			return 0;
836 		}
837 
838 		queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
839 				   msecs_to_jiffies(dpu_enc->idle_timeout));
840 
841 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
842 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
843 				 "frame done");
844 		break;
845 
846 	case DPU_ENC_RC_EVENT_PRE_STOP:
847 		/* cancel delayed off work, if any */
848 		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
849 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
850 					sw_event);
851 
852 		mutex_lock(&dpu_enc->rc_lock);
853 
854 		if (is_vid_mode &&
855 			  dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
856 			_dpu_encoder_irq_control(drm_enc, true);
857 		}
858 		/* skip if is already OFF or IDLE, resources are off already */
859 		else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
860 				dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
861 			DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
862 				      DRMID(drm_enc), sw_event,
863 				      dpu_enc->rc_state);
864 			mutex_unlock(&dpu_enc->rc_lock);
865 			return 0;
866 		}
867 
868 		dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
869 
870 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
871 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
872 				 "pre stop");
873 
874 		mutex_unlock(&dpu_enc->rc_lock);
875 		break;
876 
877 	case DPU_ENC_RC_EVENT_STOP:
878 		mutex_lock(&dpu_enc->rc_lock);
879 
880 		/* return if the resource control is already in OFF state */
881 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
882 			DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
883 				      DRMID(drm_enc), sw_event);
884 			mutex_unlock(&dpu_enc->rc_lock);
885 			return 0;
886 		} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
887 			DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
888 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
889 			mutex_unlock(&dpu_enc->rc_lock);
890 			return -EINVAL;
891 		}
892 
893 		/**
894 		 * expect to arrive here only if in either idle state or pre-off
895 		 * and in IDLE state the resources are already disabled
896 		 */
897 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
898 			_dpu_encoder_resource_control_helper(drm_enc, false);
899 
900 		dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
901 
902 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
903 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
904 				 "stop");
905 
906 		mutex_unlock(&dpu_enc->rc_lock);
907 		break;
908 
909 	case DPU_ENC_RC_EVENT_ENTER_IDLE:
910 		mutex_lock(&dpu_enc->rc_lock);
911 
912 		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
913 			DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
914 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
915 			mutex_unlock(&dpu_enc->rc_lock);
916 			return 0;
917 		}
918 
919 		/*
920 		 * if we are in ON but a frame was just kicked off,
921 		 * ignore the IDLE event, it's probably a stale timer event
922 		 */
923 		if (dpu_enc->frame_busy_mask[0]) {
924 			DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
925 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
926 			mutex_unlock(&dpu_enc->rc_lock);
927 			return 0;
928 		}
929 
930 		if (is_vid_mode)
931 			_dpu_encoder_irq_control(drm_enc, false);
932 		else
933 			_dpu_encoder_resource_control_helper(drm_enc, false);
934 
935 		dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
936 
937 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
938 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
939 				 "idle");
940 
941 		mutex_unlock(&dpu_enc->rc_lock);
942 		break;
943 
944 	default:
945 		DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
946 			  sw_event);
947 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
948 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
949 				 "error");
950 		break;
951 	}
952 
953 	trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
954 			 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
955 			 "end");
956 	return 0;
957 }
958 
959 static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
960 				      struct drm_display_mode *mode,
961 				      struct drm_display_mode *adj_mode)
962 {
963 	struct dpu_encoder_virt *dpu_enc;
964 	struct msm_drm_private *priv;
965 	struct dpu_kms *dpu_kms;
966 	struct list_head *connector_list;
967 	struct drm_connector *conn = NULL, *conn_iter;
968 	struct drm_crtc *drm_crtc;
969 	struct dpu_crtc_state *cstate;
970 	struct dpu_global_state *global_state;
971 	struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
972 	struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
973 	struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
974 	struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
975 	int num_lm, num_ctl, num_pp;
976 	int i, j;
977 
978 	if (!drm_enc) {
979 		DPU_ERROR("invalid encoder\n");
980 		return;
981 	}
982 
983 	dpu_enc = to_dpu_encoder_virt(drm_enc);
984 	DPU_DEBUG_ENC(dpu_enc, "\n");
985 
986 	priv = drm_enc->dev->dev_private;
987 	dpu_kms = to_dpu_kms(priv->kms);
988 	connector_list = &dpu_kms->dev->mode_config.connector_list;
989 
990 	global_state = dpu_kms_get_existing_global_state(dpu_kms);
991 	if (IS_ERR_OR_NULL(global_state)) {
992 		DPU_ERROR("Failed to get global state");
993 		return;
994 	}
995 
996 	trace_dpu_enc_mode_set(DRMID(drm_enc));
997 
998 	list_for_each_entry(conn_iter, connector_list, head)
999 		if (conn_iter->encoder == drm_enc)
1000 			conn = conn_iter;
1001 
1002 	if (!conn) {
1003 		DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
1004 		return;
1005 	} else if (!conn->state) {
1006 		DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
1007 		return;
1008 	}
1009 
1010 	drm_for_each_crtc(drm_crtc, drm_enc->dev)
1011 		if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
1012 			break;
1013 
1014 	/* Query resource that have been reserved in atomic check step. */
1015 	num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1016 		drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1017 		ARRAY_SIZE(hw_pp));
1018 	num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1019 		drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1020 	num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1021 		drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1022 	dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1023 		drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1024 		ARRAY_SIZE(hw_dspp));
1025 
1026 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1027 		dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1028 						: NULL;
1029 
1030 	cstate = to_dpu_crtc_state(drm_crtc->state);
1031 
1032 	for (i = 0; i < num_lm; i++) {
1033 		int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1034 
1035 		cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1036 		cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1037 		cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1038 	}
1039 
1040 	cstate->num_mixers = num_lm;
1041 
1042 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1043 		int num_blk;
1044 		struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
1045 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1046 
1047 		if (!dpu_enc->hw_pp[i]) {
1048 			DPU_ERROR_ENC(dpu_enc,
1049 				"no pp block assigned at idx: %d\n", i);
1050 			return;
1051 		}
1052 
1053 		if (!hw_ctl[i]) {
1054 			DPU_ERROR_ENC(dpu_enc,
1055 				"no ctl block assigned at idx: %d\n", i);
1056 			return;
1057 		}
1058 
1059 		phys->hw_pp = dpu_enc->hw_pp[i];
1060 		phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1061 
1062 		num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
1063 			global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
1064 			hw_blk, ARRAY_SIZE(hw_blk));
1065 		for (j = 0; j < num_blk; j++) {
1066 			struct dpu_hw_intf *hw_intf;
1067 
1068 			hw_intf = to_dpu_hw_intf(hw_blk[i]);
1069 			if (hw_intf->idx == phys->intf_idx)
1070 				phys->hw_intf = hw_intf;
1071 		}
1072 
1073 		if (!phys->hw_intf) {
1074 			DPU_ERROR_ENC(dpu_enc,
1075 				      "no intf block assigned at idx: %d\n", i);
1076 			return;
1077 		}
1078 
1079 		phys->connector = conn->state->connector;
1080 		if (phys->ops.mode_set)
1081 			phys->ops.mode_set(phys, mode, adj_mode);
1082 	}
1083 }
1084 
1085 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1086 {
1087 	struct dpu_encoder_virt *dpu_enc = NULL;
1088 	int i;
1089 
1090 	if (!drm_enc || !drm_enc->dev) {
1091 		DPU_ERROR("invalid parameters\n");
1092 		return;
1093 	}
1094 
1095 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1096 	if (!dpu_enc || !dpu_enc->cur_master) {
1097 		DPU_ERROR("invalid dpu encoder/master\n");
1098 		return;
1099 	}
1100 
1101 
1102 	if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
1103 		dpu_enc->cur_master->hw_mdptop &&
1104 		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1105 		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1106 			dpu_enc->cur_master->hw_mdptop);
1107 
1108 	_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1109 
1110 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1111 			!WARN_ON(dpu_enc->num_phys_encs == 0)) {
1112 		unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
1113 		for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1114 			if (!dpu_enc->hw_pp[i])
1115 				continue;
1116 			_dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1117 		}
1118 	}
1119 }
1120 
1121 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1122 {
1123 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1124 
1125 	mutex_lock(&dpu_enc->enc_lock);
1126 
1127 	if (!dpu_enc->enabled)
1128 		goto out;
1129 
1130 	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1131 		dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1132 	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1133 		dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1134 
1135 	_dpu_encoder_virt_enable_helper(drm_enc);
1136 
1137 out:
1138 	mutex_unlock(&dpu_enc->enc_lock);
1139 }
1140 
1141 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1142 {
1143 	struct dpu_encoder_virt *dpu_enc = NULL;
1144 	int ret = 0;
1145 	struct msm_drm_private *priv;
1146 	struct drm_display_mode *cur_mode = NULL;
1147 
1148 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1149 
1150 	mutex_lock(&dpu_enc->enc_lock);
1151 	cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1152 	priv = drm_enc->dev->dev_private;
1153 
1154 	trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1155 			     cur_mode->vdisplay);
1156 
1157 	/* always enable slave encoder before master */
1158 	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1159 		dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1160 
1161 	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1162 		dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1163 
1164 	ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1165 	if (ret) {
1166 		DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1167 				ret);
1168 		goto out;
1169 	}
1170 
1171 	_dpu_encoder_virt_enable_helper(drm_enc);
1172 
1173 	dpu_enc->enabled = true;
1174 
1175 out:
1176 	mutex_unlock(&dpu_enc->enc_lock);
1177 }
1178 
1179 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1180 {
1181 	struct dpu_encoder_virt *dpu_enc = NULL;
1182 	struct msm_drm_private *priv;
1183 	int i = 0;
1184 
1185 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1186 	DPU_DEBUG_ENC(dpu_enc, "\n");
1187 
1188 	mutex_lock(&dpu_enc->enc_lock);
1189 	dpu_enc->enabled = false;
1190 
1191 	priv = drm_enc->dev->dev_private;
1192 
1193 	trace_dpu_enc_disable(DRMID(drm_enc));
1194 
1195 	/* wait for idle */
1196 	dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1197 
1198 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1199 
1200 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1201 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1202 
1203 		if (phys->ops.disable)
1204 			phys->ops.disable(phys);
1205 	}
1206 
1207 
1208 	/* after phys waits for frame-done, should be no more frames pending */
1209 	if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1210 		DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1211 		del_timer_sync(&dpu_enc->frame_done_timer);
1212 	}
1213 
1214 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1215 
1216 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1217 		dpu_enc->phys_encs[i]->connector = NULL;
1218 	}
1219 
1220 	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1221 
1222 	mutex_unlock(&dpu_enc->enc_lock);
1223 }
1224 
1225 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1226 		enum dpu_intf_type type, u32 controller_id)
1227 {
1228 	int i = 0;
1229 
1230 	for (i = 0; i < catalog->intf_count; i++) {
1231 		if (catalog->intf[i].type == type
1232 		    && catalog->intf[i].controller_id == controller_id) {
1233 			return catalog->intf[i].id;
1234 		}
1235 	}
1236 
1237 	return INTF_MAX;
1238 }
1239 
1240 static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1241 		struct dpu_encoder_phys *phy_enc)
1242 {
1243 	struct dpu_encoder_virt *dpu_enc = NULL;
1244 	unsigned long lock_flags;
1245 
1246 	if (!drm_enc || !phy_enc)
1247 		return;
1248 
1249 	DPU_ATRACE_BEGIN("encoder_vblank_callback");
1250 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1251 
1252 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1253 	if (dpu_enc->crtc)
1254 		dpu_crtc_vblank_callback(dpu_enc->crtc);
1255 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1256 
1257 	atomic_inc(&phy_enc->vsync_cnt);
1258 	DPU_ATRACE_END("encoder_vblank_callback");
1259 }
1260 
1261 static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1262 		struct dpu_encoder_phys *phy_enc)
1263 {
1264 	if (!phy_enc)
1265 		return;
1266 
1267 	DPU_ATRACE_BEGIN("encoder_underrun_callback");
1268 	atomic_inc(&phy_enc->underrun_cnt);
1269 
1270 	/* trigger dump only on the first underrun */
1271 	if (atomic_read(&phy_enc->underrun_cnt) == 1)
1272 		msm_disp_snapshot_state(drm_enc->dev);
1273 
1274 	trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1275 				  atomic_read(&phy_enc->underrun_cnt));
1276 	DPU_ATRACE_END("encoder_underrun_callback");
1277 }
1278 
1279 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1280 {
1281 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1282 	unsigned long lock_flags;
1283 
1284 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1285 	/* crtc should always be cleared before re-assigning */
1286 	WARN_ON(crtc && dpu_enc->crtc);
1287 	dpu_enc->crtc = crtc;
1288 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1289 }
1290 
1291 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1292 					struct drm_crtc *crtc, bool enable)
1293 {
1294 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1295 	unsigned long lock_flags;
1296 	int i;
1297 
1298 	trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1299 
1300 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1301 	if (dpu_enc->crtc != crtc) {
1302 		spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1303 		return;
1304 	}
1305 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1306 
1307 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1308 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1309 
1310 		if (phys->ops.control_vblank_irq)
1311 			phys->ops.control_vblank_irq(phys, enable);
1312 	}
1313 }
1314 
1315 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1316 		void (*frame_event_cb)(void *, u32 event),
1317 		void *frame_event_cb_data)
1318 {
1319 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1320 	unsigned long lock_flags;
1321 	bool enable;
1322 
1323 	enable = frame_event_cb ? true : false;
1324 
1325 	if (!drm_enc) {
1326 		DPU_ERROR("invalid encoder\n");
1327 		return;
1328 	}
1329 	trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1330 
1331 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1332 	dpu_enc->crtc_frame_event_cb = frame_event_cb;
1333 	dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1334 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1335 }
1336 
1337 static void dpu_encoder_frame_done_callback(
1338 		struct drm_encoder *drm_enc,
1339 		struct dpu_encoder_phys *ready_phys, u32 event)
1340 {
1341 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1342 	unsigned int i;
1343 
1344 	if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1345 			| DPU_ENCODER_FRAME_EVENT_ERROR
1346 			| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1347 
1348 		if (!dpu_enc->frame_busy_mask[0]) {
1349 			/**
1350 			 * suppress frame_done without waiter,
1351 			 * likely autorefresh
1352 			 */
1353 			trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
1354 					event, ready_phys->intf_idx);
1355 			return;
1356 		}
1357 
1358 		/* One of the physical encoders has become idle */
1359 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1360 			if (dpu_enc->phys_encs[i] == ready_phys) {
1361 				trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1362 						dpu_enc->frame_busy_mask[0]);
1363 				clear_bit(i, dpu_enc->frame_busy_mask);
1364 			}
1365 		}
1366 
1367 		if (!dpu_enc->frame_busy_mask[0]) {
1368 			atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1369 			del_timer(&dpu_enc->frame_done_timer);
1370 
1371 			dpu_encoder_resource_control(drm_enc,
1372 					DPU_ENC_RC_EVENT_FRAME_DONE);
1373 
1374 			if (dpu_enc->crtc_frame_event_cb)
1375 				dpu_enc->crtc_frame_event_cb(
1376 					dpu_enc->crtc_frame_event_cb_data,
1377 					event);
1378 		}
1379 	} else {
1380 		if (dpu_enc->crtc_frame_event_cb)
1381 			dpu_enc->crtc_frame_event_cb(
1382 				dpu_enc->crtc_frame_event_cb_data, event);
1383 	}
1384 }
1385 
1386 static void dpu_encoder_off_work(struct work_struct *work)
1387 {
1388 	struct dpu_encoder_virt *dpu_enc = container_of(work,
1389 			struct dpu_encoder_virt, delayed_off_work.work);
1390 
1391 	dpu_encoder_resource_control(&dpu_enc->base,
1392 						DPU_ENC_RC_EVENT_ENTER_IDLE);
1393 
1394 	dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1395 				DPU_ENCODER_FRAME_EVENT_IDLE);
1396 }
1397 
1398 /**
1399  * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1400  * @drm_enc: Pointer to drm encoder structure
1401  * @phys: Pointer to physical encoder structure
1402  * @extra_flush_bits: Additional bit mask to include in flush trigger
1403  */
1404 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1405 		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1406 {
1407 	struct dpu_hw_ctl *ctl;
1408 	int pending_kickoff_cnt;
1409 	u32 ret = UINT_MAX;
1410 
1411 	if (!phys->hw_pp) {
1412 		DPU_ERROR("invalid pingpong hw\n");
1413 		return;
1414 	}
1415 
1416 	ctl = phys->hw_ctl;
1417 	if (!ctl->ops.trigger_flush) {
1418 		DPU_ERROR("missing trigger cb\n");
1419 		return;
1420 	}
1421 
1422 	pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1423 
1424 	if (extra_flush_bits && ctl->ops.update_pending_flush)
1425 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1426 
1427 	ctl->ops.trigger_flush(ctl);
1428 
1429 	if (ctl->ops.get_pending_flush)
1430 		ret = ctl->ops.get_pending_flush(ctl);
1431 
1432 	trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
1433 				    pending_kickoff_cnt, ctl->idx,
1434 				    extra_flush_bits, ret);
1435 }
1436 
1437 /**
1438  * _dpu_encoder_trigger_start - trigger start for a physical encoder
1439  * @phys: Pointer to physical encoder structure
1440  */
1441 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1442 {
1443 	if (!phys) {
1444 		DPU_ERROR("invalid argument(s)\n");
1445 		return;
1446 	}
1447 
1448 	if (!phys->hw_pp) {
1449 		DPU_ERROR("invalid pingpong hw\n");
1450 		return;
1451 	}
1452 
1453 	if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1454 		phys->ops.trigger_start(phys);
1455 }
1456 
1457 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1458 {
1459 	struct dpu_hw_ctl *ctl;
1460 
1461 	ctl = phys_enc->hw_ctl;
1462 	if (ctl->ops.trigger_start) {
1463 		ctl->ops.trigger_start(ctl);
1464 		trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1465 	}
1466 }
1467 
1468 static int dpu_encoder_helper_wait_event_timeout(
1469 		int32_t drm_id,
1470 		u32 irq_idx,
1471 		struct dpu_encoder_wait_info *info)
1472 {
1473 	int rc = 0;
1474 	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1475 	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1476 	s64 time;
1477 
1478 	do {
1479 		rc = wait_event_timeout(*(info->wq),
1480 				atomic_read(info->atomic_cnt) == 0, jiffies);
1481 		time = ktime_to_ms(ktime_get());
1482 
1483 		trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1484 						 expected_time,
1485 						 atomic_read(info->atomic_cnt));
1486 	/* If we timed out, counter is valid and time is less, wait again */
1487 	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1488 			(time < expected_time));
1489 
1490 	return rc;
1491 }
1492 
1493 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1494 {
1495 	struct dpu_encoder_virt *dpu_enc;
1496 	struct dpu_hw_ctl *ctl;
1497 	int rc;
1498 	struct drm_encoder *drm_enc;
1499 
1500 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1501 	ctl = phys_enc->hw_ctl;
1502 	drm_enc = phys_enc->parent;
1503 
1504 	if (!ctl->ops.reset)
1505 		return;
1506 
1507 	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1508 		      ctl->idx);
1509 
1510 	rc = ctl->ops.reset(ctl);
1511 	if (rc) {
1512 		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
1513 		msm_disp_snapshot_state(drm_enc->dev);
1514 	}
1515 
1516 	phys_enc->enable_state = DPU_ENC_ENABLED;
1517 }
1518 
1519 /**
1520  * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1521  *	Iterate through the physical encoders and perform consolidated flush
1522  *	and/or control start triggering as needed. This is done in the virtual
1523  *	encoder rather than the individual physical ones in order to handle
1524  *	use cases that require visibility into multiple physical encoders at
1525  *	a time.
1526  * @dpu_enc: Pointer to virtual encoder structure
1527  */
1528 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1529 {
1530 	struct dpu_hw_ctl *ctl;
1531 	uint32_t i, pending_flush;
1532 	unsigned long lock_flags;
1533 
1534 	pending_flush = 0x0;
1535 
1536 	/* update pending counts and trigger kickoff ctl flush atomically */
1537 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1538 
1539 	/* don't perform flush/start operations for slave encoders */
1540 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1541 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1542 
1543 		if (phys->enable_state == DPU_ENC_DISABLED)
1544 			continue;
1545 
1546 		ctl = phys->hw_ctl;
1547 
1548 		/*
1549 		 * This is cleared in frame_done worker, which isn't invoked
1550 		 * for async commits. So don't set this for async, since it'll
1551 		 * roll over to the next commit.
1552 		 */
1553 		if (phys->split_role != ENC_ROLE_SLAVE)
1554 			set_bit(i, dpu_enc->frame_busy_mask);
1555 
1556 		if (!phys->ops.needs_single_flush ||
1557 				!phys->ops.needs_single_flush(phys))
1558 			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1559 		else if (ctl->ops.get_pending_flush)
1560 			pending_flush |= ctl->ops.get_pending_flush(ctl);
1561 	}
1562 
1563 	/* for split flush, combine pending flush masks and send to master */
1564 	if (pending_flush && dpu_enc->cur_master) {
1565 		_dpu_encoder_trigger_flush(
1566 				&dpu_enc->base,
1567 				dpu_enc->cur_master,
1568 				pending_flush);
1569 	}
1570 
1571 	_dpu_encoder_trigger_start(dpu_enc->cur_master);
1572 
1573 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1574 }
1575 
1576 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1577 {
1578 	struct dpu_encoder_virt *dpu_enc;
1579 	struct dpu_encoder_phys *phys;
1580 	unsigned int i;
1581 	struct dpu_hw_ctl *ctl;
1582 	struct msm_display_info *disp_info;
1583 
1584 	if (!drm_enc) {
1585 		DPU_ERROR("invalid encoder\n");
1586 		return;
1587 	}
1588 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1589 	disp_info = &dpu_enc->disp_info;
1590 
1591 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1592 		phys = dpu_enc->phys_encs[i];
1593 
1594 		ctl = phys->hw_ctl;
1595 		if (ctl->ops.clear_pending_flush)
1596 			ctl->ops.clear_pending_flush(ctl);
1597 
1598 		/* update only for command mode primary ctl */
1599 		if ((phys == dpu_enc->cur_master) &&
1600 		   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1601 		    && ctl->ops.trigger_pending)
1602 			ctl->ops.trigger_pending(ctl);
1603 	}
1604 }
1605 
1606 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1607 		struct drm_display_mode *mode)
1608 {
1609 	u64 pclk_rate;
1610 	u32 pclk_period;
1611 	u32 line_time;
1612 
1613 	/*
1614 	 * For linetime calculation, only operate on master encoder.
1615 	 */
1616 	if (!dpu_enc->cur_master)
1617 		return 0;
1618 
1619 	if (!dpu_enc->cur_master->ops.get_line_count) {
1620 		DPU_ERROR("get_line_count function not defined\n");
1621 		return 0;
1622 	}
1623 
1624 	pclk_rate = mode->clock; /* pixel clock in kHz */
1625 	if (pclk_rate == 0) {
1626 		DPU_ERROR("pclk is 0, cannot calculate line time\n");
1627 		return 0;
1628 	}
1629 
1630 	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1631 	if (pclk_period == 0) {
1632 		DPU_ERROR("pclk period is 0\n");
1633 		return 0;
1634 	}
1635 
1636 	/*
1637 	 * Line time calculation based on Pixel clock and HTOTAL.
1638 	 * Final unit is in ns.
1639 	 */
1640 	line_time = (pclk_period * mode->htotal) / 1000;
1641 	if (line_time == 0) {
1642 		DPU_ERROR("line time calculation is 0\n");
1643 		return 0;
1644 	}
1645 
1646 	DPU_DEBUG_ENC(dpu_enc,
1647 			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1648 			pclk_rate, pclk_period, line_time);
1649 
1650 	return line_time;
1651 }
1652 
1653 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1654 {
1655 	struct drm_display_mode *mode;
1656 	struct dpu_encoder_virt *dpu_enc;
1657 	u32 cur_line;
1658 	u32 line_time;
1659 	u32 vtotal, time_to_vsync;
1660 	ktime_t cur_time;
1661 
1662 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1663 
1664 	if (!drm_enc->crtc || !drm_enc->crtc->state) {
1665 		DPU_ERROR("crtc/crtc state object is NULL\n");
1666 		return -EINVAL;
1667 	}
1668 	mode = &drm_enc->crtc->state->adjusted_mode;
1669 
1670 	line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1671 	if (!line_time)
1672 		return -EINVAL;
1673 
1674 	cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1675 
1676 	vtotal = mode->vtotal;
1677 	if (cur_line >= vtotal)
1678 		time_to_vsync = line_time * vtotal;
1679 	else
1680 		time_to_vsync = line_time * (vtotal - cur_line);
1681 
1682 	if (time_to_vsync == 0) {
1683 		DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1684 				vtotal);
1685 		return -EINVAL;
1686 	}
1687 
1688 	cur_time = ktime_get();
1689 	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1690 
1691 	DPU_DEBUG_ENC(dpu_enc,
1692 			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1693 			cur_line, vtotal, time_to_vsync,
1694 			ktime_to_ms(cur_time),
1695 			ktime_to_ms(*wakeup_time));
1696 	return 0;
1697 }
1698 
1699 static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1700 {
1701 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1702 			vsync_event_timer);
1703 	struct drm_encoder *drm_enc = &dpu_enc->base;
1704 	struct msm_drm_private *priv;
1705 	struct msm_drm_thread *event_thread;
1706 
1707 	if (!drm_enc->dev || !drm_enc->crtc) {
1708 		DPU_ERROR("invalid parameters\n");
1709 		return;
1710 	}
1711 
1712 	priv = drm_enc->dev->dev_private;
1713 
1714 	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1715 		DPU_ERROR("invalid crtc index\n");
1716 		return;
1717 	}
1718 	event_thread = &priv->event_thread[drm_enc->crtc->index];
1719 	if (!event_thread) {
1720 		DPU_ERROR("event_thread not found for crtc:%d\n",
1721 				drm_enc->crtc->index);
1722 		return;
1723 	}
1724 
1725 	del_timer(&dpu_enc->vsync_event_timer);
1726 }
1727 
1728 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1729 {
1730 	struct dpu_encoder_virt *dpu_enc = container_of(work,
1731 			struct dpu_encoder_virt, vsync_event_work);
1732 	ktime_t wakeup_time;
1733 
1734 	if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1735 		return;
1736 
1737 	trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1738 	mod_timer(&dpu_enc->vsync_event_timer,
1739 			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1740 }
1741 
1742 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1743 {
1744 	struct dpu_encoder_virt *dpu_enc;
1745 	struct dpu_encoder_phys *phys;
1746 	bool needs_hw_reset = false;
1747 	unsigned int i;
1748 
1749 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1750 
1751 	trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1752 
1753 	/* prepare for next kickoff, may include waiting on previous kickoff */
1754 	DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1755 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1756 		phys = dpu_enc->phys_encs[i];
1757 		if (phys->ops.prepare_for_kickoff)
1758 			phys->ops.prepare_for_kickoff(phys);
1759 		if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1760 			needs_hw_reset = true;
1761 	}
1762 	DPU_ATRACE_END("enc_prepare_for_kickoff");
1763 
1764 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1765 
1766 	/* if any phys needs reset, reset all phys, in-order */
1767 	if (needs_hw_reset) {
1768 		trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1769 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1770 			dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1771 		}
1772 	}
1773 }
1774 
1775 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1776 {
1777 	struct dpu_encoder_virt *dpu_enc;
1778 	struct dpu_encoder_phys *phys;
1779 	ktime_t wakeup_time;
1780 	unsigned long timeout_ms;
1781 	unsigned int i;
1782 
1783 	DPU_ATRACE_BEGIN("encoder_kickoff");
1784 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1785 
1786 	trace_dpu_enc_kickoff(DRMID(drm_enc));
1787 
1788 	timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1789 			drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1790 
1791 	atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1792 	mod_timer(&dpu_enc->frame_done_timer,
1793 			jiffies + msecs_to_jiffies(timeout_ms));
1794 
1795 	/* All phys encs are ready to go, trigger the kickoff */
1796 	_dpu_encoder_kickoff_phys(dpu_enc);
1797 
1798 	/* allow phys encs to handle any post-kickoff business */
1799 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1800 		phys = dpu_enc->phys_encs[i];
1801 		if (phys->ops.handle_post_kickoff)
1802 			phys->ops.handle_post_kickoff(phys);
1803 	}
1804 
1805 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1806 			!dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1807 		trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1808 					    ktime_to_ms(wakeup_time));
1809 		mod_timer(&dpu_enc->vsync_event_timer,
1810 				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1811 	}
1812 
1813 	DPU_ATRACE_END("encoder_kickoff");
1814 }
1815 
1816 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
1817 {
1818 	struct dpu_encoder_virt *dpu_enc;
1819 	struct dpu_encoder_phys *phys;
1820 	int i;
1821 
1822 	if (!drm_enc) {
1823 		DPU_ERROR("invalid encoder\n");
1824 		return;
1825 	}
1826 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1827 
1828 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1829 		phys = dpu_enc->phys_encs[i];
1830 		if (phys->ops.prepare_commit)
1831 			phys->ops.prepare_commit(phys);
1832 	}
1833 }
1834 
1835 #ifdef CONFIG_DEBUG_FS
1836 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
1837 {
1838 	struct dpu_encoder_virt *dpu_enc = s->private;
1839 	int i;
1840 
1841 	mutex_lock(&dpu_enc->enc_lock);
1842 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1843 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1844 
1845 		seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
1846 				phys->intf_idx - INTF_0,
1847 				atomic_read(&phys->vsync_cnt),
1848 				atomic_read(&phys->underrun_cnt));
1849 
1850 		switch (phys->intf_mode) {
1851 		case INTF_MODE_VIDEO:
1852 			seq_puts(s, "mode: video\n");
1853 			break;
1854 		case INTF_MODE_CMD:
1855 			seq_puts(s, "mode: command\n");
1856 			break;
1857 		default:
1858 			seq_puts(s, "mode: ???\n");
1859 			break;
1860 		}
1861 	}
1862 	mutex_unlock(&dpu_enc->enc_lock);
1863 
1864 	return 0;
1865 }
1866 
1867 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
1868 
1869 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1870 {
1871 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1872 	int i;
1873 
1874 	char name[DPU_NAME_SIZE];
1875 
1876 	if (!drm_enc->dev) {
1877 		DPU_ERROR("invalid encoder or kms\n");
1878 		return -EINVAL;
1879 	}
1880 
1881 	snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
1882 
1883 	/* create overall sub-directory for the encoder */
1884 	dpu_enc->debugfs_root = debugfs_create_dir(name,
1885 			drm_enc->dev->primary->debugfs_root);
1886 
1887 	/* don't error check these */
1888 	debugfs_create_file("status", 0600,
1889 		dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
1890 
1891 	for (i = 0; i < dpu_enc->num_phys_encs; i++)
1892 		if (dpu_enc->phys_encs[i]->ops.late_register)
1893 			dpu_enc->phys_encs[i]->ops.late_register(
1894 					dpu_enc->phys_encs[i],
1895 					dpu_enc->debugfs_root);
1896 
1897 	return 0;
1898 }
1899 #else
1900 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1901 {
1902 	return 0;
1903 }
1904 #endif
1905 
1906 static int dpu_encoder_late_register(struct drm_encoder *encoder)
1907 {
1908 	return _dpu_encoder_init_debugfs(encoder);
1909 }
1910 
1911 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
1912 {
1913 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
1914 
1915 	debugfs_remove_recursive(dpu_enc->debugfs_root);
1916 }
1917 
1918 static int dpu_encoder_virt_add_phys_encs(
1919 		u32 display_caps,
1920 		struct dpu_encoder_virt *dpu_enc,
1921 		struct dpu_enc_phys_init_params *params)
1922 {
1923 	struct dpu_encoder_phys *enc = NULL;
1924 
1925 	DPU_DEBUG_ENC(dpu_enc, "\n");
1926 
1927 	/*
1928 	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
1929 	 * in this function, check up-front.
1930 	 */
1931 	if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
1932 			ARRAY_SIZE(dpu_enc->phys_encs)) {
1933 		DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
1934 			  dpu_enc->num_phys_encs);
1935 		return -EINVAL;
1936 	}
1937 
1938 	if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
1939 		enc = dpu_encoder_phys_vid_init(params);
1940 
1941 		if (IS_ERR_OR_NULL(enc)) {
1942 			DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
1943 				PTR_ERR(enc));
1944 			return enc == NULL ? -EINVAL : PTR_ERR(enc);
1945 		}
1946 
1947 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
1948 		++dpu_enc->num_phys_encs;
1949 	}
1950 
1951 	if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
1952 		enc = dpu_encoder_phys_cmd_init(params);
1953 
1954 		if (IS_ERR_OR_NULL(enc)) {
1955 			DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
1956 				PTR_ERR(enc));
1957 			return enc == NULL ? -EINVAL : PTR_ERR(enc);
1958 		}
1959 
1960 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
1961 		++dpu_enc->num_phys_encs;
1962 	}
1963 
1964 	if (params->split_role == ENC_ROLE_SLAVE)
1965 		dpu_enc->cur_slave = enc;
1966 	else
1967 		dpu_enc->cur_master = enc;
1968 
1969 	return 0;
1970 }
1971 
1972 static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
1973 	.handle_vblank_virt = dpu_encoder_vblank_callback,
1974 	.handle_underrun_virt = dpu_encoder_underrun_callback,
1975 	.handle_frame_done = dpu_encoder_frame_done_callback,
1976 };
1977 
1978 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
1979 				 struct dpu_kms *dpu_kms,
1980 				 struct msm_display_info *disp_info)
1981 {
1982 	int ret = 0;
1983 	int i = 0;
1984 	enum dpu_intf_type intf_type = INTF_NONE;
1985 	struct dpu_enc_phys_init_params phys_params;
1986 
1987 	if (!dpu_enc) {
1988 		DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
1989 		return -EINVAL;
1990 	}
1991 
1992 	dpu_enc->cur_master = NULL;
1993 
1994 	memset(&phys_params, 0, sizeof(phys_params));
1995 	phys_params.dpu_kms = dpu_kms;
1996 	phys_params.parent = &dpu_enc->base;
1997 	phys_params.parent_ops = &dpu_encoder_parent_ops;
1998 	phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
1999 
2000 	switch (disp_info->intf_type) {
2001 	case DRM_MODE_ENCODER_DSI:
2002 		intf_type = INTF_DSI;
2003 		break;
2004 	case DRM_MODE_ENCODER_TMDS:
2005 		intf_type = INTF_DP;
2006 		break;
2007 	}
2008 
2009 	WARN_ON(disp_info->num_of_h_tiles < 1);
2010 
2011 	DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2012 
2013 	if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2014 	    (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2015 		dpu_enc->idle_pc_supported =
2016 				dpu_kms->catalog->caps->has_idle_pc;
2017 
2018 	mutex_lock(&dpu_enc->enc_lock);
2019 	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2020 		/*
2021 		 * Left-most tile is at index 0, content is controller id
2022 		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2023 		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2024 		 */
2025 		u32 controller_id = disp_info->h_tile_instance[i];
2026 
2027 		if (disp_info->num_of_h_tiles > 1) {
2028 			if (i == 0)
2029 				phys_params.split_role = ENC_ROLE_MASTER;
2030 			else
2031 				phys_params.split_role = ENC_ROLE_SLAVE;
2032 		} else {
2033 			phys_params.split_role = ENC_ROLE_SOLO;
2034 		}
2035 
2036 		DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2037 				i, controller_id, phys_params.split_role);
2038 
2039 		phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2040 													intf_type,
2041 													controller_id);
2042 		if (phys_params.intf_idx == INTF_MAX) {
2043 			DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
2044 						  intf_type, controller_id);
2045 			ret = -EINVAL;
2046 		}
2047 
2048 		if (!ret) {
2049 			ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
2050 												 dpu_enc,
2051 												 &phys_params);
2052 			if (ret)
2053 				DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2054 		}
2055 	}
2056 
2057 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2058 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2059 		atomic_set(&phys->vsync_cnt, 0);
2060 		atomic_set(&phys->underrun_cnt, 0);
2061 	}
2062 	mutex_unlock(&dpu_enc->enc_lock);
2063 
2064 	return ret;
2065 }
2066 
2067 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2068 {
2069 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2070 			frame_done_timer);
2071 	struct drm_encoder *drm_enc = &dpu_enc->base;
2072 	u32 event;
2073 
2074 	if (!drm_enc->dev) {
2075 		DPU_ERROR("invalid parameters\n");
2076 		return;
2077 	}
2078 
2079 	if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2080 		DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2081 			      DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2082 		return;
2083 	} else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2084 		DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2085 		return;
2086 	}
2087 
2088 	DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2089 
2090 	event = DPU_ENCODER_FRAME_EVENT_ERROR;
2091 	trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2092 	dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2093 }
2094 
2095 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2096 	.mode_set = dpu_encoder_virt_mode_set,
2097 	.disable = dpu_encoder_virt_disable,
2098 	.enable = dpu_encoder_virt_enable,
2099 	.atomic_check = dpu_encoder_virt_atomic_check,
2100 };
2101 
2102 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2103 		.destroy = dpu_encoder_destroy,
2104 		.late_register = dpu_encoder_late_register,
2105 		.early_unregister = dpu_encoder_early_unregister,
2106 };
2107 
2108 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2109 		struct msm_display_info *disp_info)
2110 {
2111 	struct msm_drm_private *priv = dev->dev_private;
2112 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2113 	struct drm_encoder *drm_enc = NULL;
2114 	struct dpu_encoder_virt *dpu_enc = NULL;
2115 	int ret = 0;
2116 
2117 	dpu_enc = to_dpu_encoder_virt(enc);
2118 
2119 	ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2120 	if (ret)
2121 		goto fail;
2122 
2123 	atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2124 	timer_setup(&dpu_enc->frame_done_timer,
2125 			dpu_encoder_frame_done_timeout, 0);
2126 
2127 	if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2128 		timer_setup(&dpu_enc->vsync_event_timer,
2129 				dpu_encoder_vsync_event_handler,
2130 				0);
2131 	else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
2132 		dpu_enc->dp = priv->dp[disp_info->h_tile_instance[0]];
2133 
2134 	INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2135 			dpu_encoder_off_work);
2136 	dpu_enc->idle_timeout = IDLE_TIMEOUT;
2137 
2138 	kthread_init_work(&dpu_enc->vsync_event_work,
2139 			dpu_encoder_vsync_event_work_handler);
2140 
2141 	memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2142 
2143 	DPU_DEBUG_ENC(dpu_enc, "created\n");
2144 
2145 	return ret;
2146 
2147 fail:
2148 	DPU_ERROR("failed to create encoder\n");
2149 	if (drm_enc)
2150 		dpu_encoder_destroy(drm_enc);
2151 
2152 	return ret;
2153 
2154 
2155 }
2156 
2157 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2158 		int drm_enc_mode)
2159 {
2160 	struct dpu_encoder_virt *dpu_enc = NULL;
2161 	int rc = 0;
2162 
2163 	dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2164 	if (!dpu_enc)
2165 		return ERR_PTR(-ENOMEM);
2166 
2167 	rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2168 			drm_enc_mode, NULL);
2169 	if (rc) {
2170 		devm_kfree(dev->dev, dpu_enc);
2171 		return ERR_PTR(rc);
2172 	}
2173 
2174 	drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2175 
2176 	spin_lock_init(&dpu_enc->enc_spinlock);
2177 	dpu_enc->enabled = false;
2178 	mutex_init(&dpu_enc->enc_lock);
2179 	mutex_init(&dpu_enc->rc_lock);
2180 
2181 	return &dpu_enc->base;
2182 }
2183 
2184 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2185 	enum msm_event_wait event)
2186 {
2187 	int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2188 	struct dpu_encoder_virt *dpu_enc = NULL;
2189 	int i, ret = 0;
2190 
2191 	if (!drm_enc) {
2192 		DPU_ERROR("invalid encoder\n");
2193 		return -EINVAL;
2194 	}
2195 	dpu_enc = to_dpu_encoder_virt(drm_enc);
2196 	DPU_DEBUG_ENC(dpu_enc, "\n");
2197 
2198 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2199 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2200 
2201 		switch (event) {
2202 		case MSM_ENC_COMMIT_DONE:
2203 			fn_wait = phys->ops.wait_for_commit_done;
2204 			break;
2205 		case MSM_ENC_TX_COMPLETE:
2206 			fn_wait = phys->ops.wait_for_tx_complete;
2207 			break;
2208 		case MSM_ENC_VBLANK:
2209 			fn_wait = phys->ops.wait_for_vblank;
2210 			break;
2211 		default:
2212 			DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2213 					event);
2214 			return -EINVAL;
2215 		}
2216 
2217 		if (fn_wait) {
2218 			DPU_ATRACE_BEGIN("wait_for_completion_event");
2219 			ret = fn_wait(phys);
2220 			DPU_ATRACE_END("wait_for_completion_event");
2221 			if (ret)
2222 				return ret;
2223 		}
2224 	}
2225 
2226 	return ret;
2227 }
2228 
2229 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2230 {
2231 	struct dpu_encoder_virt *dpu_enc = NULL;
2232 
2233 	if (!encoder) {
2234 		DPU_ERROR("invalid encoder\n");
2235 		return INTF_MODE_NONE;
2236 	}
2237 	dpu_enc = to_dpu_encoder_virt(encoder);
2238 
2239 	if (dpu_enc->cur_master)
2240 		return dpu_enc->cur_master->intf_mode;
2241 
2242 	if (dpu_enc->num_phys_encs)
2243 		return dpu_enc->phys_encs[0]->intf_mode;
2244 
2245 	return INTF_MODE_NONE;
2246 }
2247