1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6  *
7  * Author: Rob Clark <robdclark@gmail.com>
8  */
9 
10 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14 
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_probe_helper.h>
18 
19 #include "msm_drv.h"
20 #include "dpu_kms.h"
21 #include "dpu_hwio.h"
22 #include "dpu_hw_catalog.h"
23 #include "dpu_hw_intf.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_hw_dsc.h"
27 #include "dpu_hw_merge3d.h"
28 #include "dpu_formats.h"
29 #include "dpu_encoder_phys.h"
30 #include "dpu_crtc.h"
31 #include "dpu_trace.h"
32 #include "dpu_core_irq.h"
33 #include "disp/msm_disp_snapshot.h"
34 
35 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
36 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
37 
38 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
39 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
40 
41 /*
42  * Two to anticipate panels that can do cmd/vid dynamic switching
43  * plan is to create all possible physical encoder types, and switch between
44  * them at runtime
45  */
46 #define NUM_PHYS_ENCODER_TYPES 2
47 
48 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
49 	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
50 
51 #define MAX_CHANNELS_PER_ENC 2
52 
53 #define IDLE_SHORT_TIMEOUT	1
54 
55 #define MAX_HDISPLAY_SPLIT 1080
56 
57 /* timeout in frames waiting for frame done */
58 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
59 
60 /**
61  * enum dpu_enc_rc_events - events for resource control state machine
62  * @DPU_ENC_RC_EVENT_KICKOFF:
63  *	This event happens at NORMAL priority.
64  *	Event that signals the start of the transfer. When this event is
65  *	received, enable MDP/DSI core clocks. Regardless of the previous
66  *	state, the resource should be in ON state at the end of this event.
67  * @DPU_ENC_RC_EVENT_FRAME_DONE:
68  *	This event happens at INTERRUPT level.
69  *	Event signals the end of the data transfer after the PP FRAME_DONE
70  *	event. At the end of this event, a delayed work is scheduled to go to
71  *	IDLE_PC state after IDLE_TIMEOUT time.
72  * @DPU_ENC_RC_EVENT_PRE_STOP:
73  *	This event happens at NORMAL priority.
74  *	This event, when received during the ON state, leave the RC STATE
75  *	in the PRE_OFF state. It should be followed by the STOP event as
76  *	part of encoder disable.
77  *	If received during IDLE or OFF states, it will do nothing.
78  * @DPU_ENC_RC_EVENT_STOP:
79  *	This event happens at NORMAL priority.
80  *	When this event is received, disable all the MDP/DSI core clocks, and
81  *	disable IRQs. It should be called from the PRE_OFF or IDLE states.
82  *	IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
83  *	PRE_OFF is expected when PRE_STOP was executed during the ON state.
84  *	Resource state should be in OFF at the end of the event.
85  * @DPU_ENC_RC_EVENT_ENTER_IDLE:
86  *	This event happens at NORMAL priority from a work item.
87  *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
88  *	This would disable MDP/DSI core clocks and change the resource state
89  *	to IDLE.
90  */
91 enum dpu_enc_rc_events {
92 	DPU_ENC_RC_EVENT_KICKOFF = 1,
93 	DPU_ENC_RC_EVENT_FRAME_DONE,
94 	DPU_ENC_RC_EVENT_PRE_STOP,
95 	DPU_ENC_RC_EVENT_STOP,
96 	DPU_ENC_RC_EVENT_ENTER_IDLE
97 };
98 
99 /*
100  * enum dpu_enc_rc_states - states that the resource control maintains
101  * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
102  * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
103  * @DPU_ENC_RC_STATE_ON: Resource is in ON state
104  * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
105  * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
106  */
107 enum dpu_enc_rc_states {
108 	DPU_ENC_RC_STATE_OFF,
109 	DPU_ENC_RC_STATE_PRE_OFF,
110 	DPU_ENC_RC_STATE_ON,
111 	DPU_ENC_RC_STATE_IDLE
112 };
113 
114 /**
115  * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
116  *	encoders. Virtual encoder manages one "logical" display. Physical
117  *	encoders manage one intf block, tied to a specific panel/sub-panel.
118  *	Virtual encoder defers as much as possible to the physical encoders.
119  *	Virtual encoder registers itself with the DRM Framework as the encoder.
120  * @base:		drm_encoder base class for registration with DRM
121  * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
122  * @enabled:		True if the encoder is active, protected by enc_lock
123  * @num_phys_encs:	Actual number of physical encoders contained.
124  * @phys_encs:		Container of physical encoders managed.
125  * @cur_master:		Pointer to the current master in this mode. Optimization
126  *			Only valid after enable. Cleared as disable.
127  * @cur_slave:		As above but for the slave encoder.
128  * @hw_pp:		Handle to the pingpong blocks used for the display. No.
129  *			pingpong blocks can be different than num_phys_encs.
130  * @hw_dsc:		Handle to the DSC blocks used for the display.
131  * @dsc_mask:		Bitmask of used DSC blocks.
132  * @intfs_swapped:	Whether or not the phys_enc interfaces have been swapped
133  *			for partial update right-only cases, such as pingpong
134  *			split where virtual pingpong does not generate IRQs
135  * @crtc:		Pointer to the currently assigned crtc. Normally you
136  *			would use crtc->state->encoder_mask to determine the
137  *			link between encoder/crtc. However in this case we need
138  *			to track crtc in the disable() hook which is called
139  *			_after_ encoder_mask is cleared.
140  * @connector:		If a mode is set, cached pointer to the active connector
141  * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
142  *				all CTL paths
143  * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
144  * @debugfs_root:		Debug file system root file node
145  * @enc_lock:			Lock around physical encoder
146  *				create/destroy/enable/disable
147  * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
148  *				busy processing current command.
149  *				Bit0 = phys_encs[0] etc.
150  * @crtc_frame_event_cb:	callback handler for frame event
151  * @crtc_frame_event_cb_data:	callback handler private data
152  * @frame_done_timeout_ms:	frame done timeout in ms
153  * @frame_done_timer:		watchdog timer for frame done event
154  * @vsync_event_timer:		vsync timer
155  * @disp_info:			local copy of msm_display_info struct
156  * @idle_pc_supported:		indicate if idle power collaps is supported
157  * @rc_lock:			resource control mutex lock to protect
158  *				virt encoder over various state changes
159  * @rc_state:			resource controller state
160  * @delayed_off_work:		delayed worker to schedule disabling of
161  *				clks and resources after IDLE_TIMEOUT time.
162  * @vsync_event_work:		worker to handle vsync event for autorefresh
163  * @topology:                   topology of the display
164  * @idle_timeout:		idle timeout duration in milliseconds
165  * @dsc:			msm_display_dsc_config pointer, for DSC-enabled encoders
166  */
167 struct dpu_encoder_virt {
168 	struct drm_encoder base;
169 	spinlock_t enc_spinlock;
170 
171 	bool enabled;
172 
173 	unsigned int num_phys_encs;
174 	struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
175 	struct dpu_encoder_phys *cur_master;
176 	struct dpu_encoder_phys *cur_slave;
177 	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
178 	struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
179 
180 	unsigned int dsc_mask;
181 
182 	bool intfs_swapped;
183 
184 	struct drm_crtc *crtc;
185 	struct drm_connector *connector;
186 
187 	struct dentry *debugfs_root;
188 	struct mutex enc_lock;
189 	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
190 	void (*crtc_frame_event_cb)(void *, u32 event);
191 	void *crtc_frame_event_cb_data;
192 
193 	atomic_t frame_done_timeout_ms;
194 	struct timer_list frame_done_timer;
195 	struct timer_list vsync_event_timer;
196 
197 	struct msm_display_info disp_info;
198 
199 	bool idle_pc_supported;
200 	struct mutex rc_lock;
201 	enum dpu_enc_rc_states rc_state;
202 	struct delayed_work delayed_off_work;
203 	struct kthread_work vsync_event_work;
204 	struct msm_display_topology topology;
205 
206 	u32 idle_timeout;
207 
208 	bool wide_bus_en;
209 
210 	/* DSC configuration */
211 	struct msm_display_dsc_config *dsc;
212 };
213 
214 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
215 
216 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
217 	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
218 };
219 
220 
221 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
222 {
223 	const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
224 
225 	return dpu_enc->wide_bus_en;
226 }
227 
228 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
229 {
230 	struct dpu_hw_dither_cfg dither_cfg = { 0 };
231 
232 	if (!hw_pp->ops.setup_dither)
233 		return;
234 
235 	switch (bpc) {
236 	case 6:
237 		dither_cfg.c0_bitdepth = 6;
238 		dither_cfg.c1_bitdepth = 6;
239 		dither_cfg.c2_bitdepth = 6;
240 		dither_cfg.c3_bitdepth = 6;
241 		dither_cfg.temporal_en = 0;
242 		break;
243 	default:
244 		hw_pp->ops.setup_dither(hw_pp, NULL);
245 		return;
246 	}
247 
248 	memcpy(&dither_cfg.matrix, dither_matrix,
249 			sizeof(u32) * DITHER_MATRIX_SZ);
250 
251 	hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
252 }
253 
254 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
255 {
256 	switch (intf_mode) {
257 	case INTF_MODE_VIDEO:
258 		return "INTF_MODE_VIDEO";
259 	case INTF_MODE_CMD:
260 		return "INTF_MODE_CMD";
261 	case INTF_MODE_WB_BLOCK:
262 		return "INTF_MODE_WB_BLOCK";
263 	case INTF_MODE_WB_LINE:
264 		return "INTF_MODE_WB_LINE";
265 	default:
266 		return "INTF_MODE_UNKNOWN";
267 	}
268 }
269 
270 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
271 		enum dpu_intr_idx intr_idx)
272 {
273 	DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
274 			DRMID(phys_enc->parent),
275 			dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
276 			phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
277 			phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
278 
279 	if (phys_enc->parent_ops->handle_frame_done)
280 		phys_enc->parent_ops->handle_frame_done(
281 				phys_enc->parent, phys_enc,
282 				DPU_ENCODER_FRAME_EVENT_ERROR);
283 }
284 
285 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
286 		u32 irq_idx, struct dpu_encoder_wait_info *info);
287 
288 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
289 		int irq,
290 		void (*func)(void *arg, int irq_idx),
291 		struct dpu_encoder_wait_info *wait_info)
292 {
293 	u32 irq_status;
294 	int ret;
295 
296 	if (!wait_info) {
297 		DPU_ERROR("invalid params\n");
298 		return -EINVAL;
299 	}
300 	/* note: do master / slave checking outside */
301 
302 	/* return EWOULDBLOCK since we know the wait isn't necessary */
303 	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
304 		DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
305 			  DRMID(phys_enc->parent), func,
306 			  irq);
307 		return -EWOULDBLOCK;
308 	}
309 
310 	if (irq < 0) {
311 		DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
312 			      DRMID(phys_enc->parent), func);
313 		return 0;
314 	}
315 
316 	DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
317 		      DRMID(phys_enc->parent), func,
318 		      irq, phys_enc->hw_pp->idx - PINGPONG_0,
319 		      atomic_read(wait_info->atomic_cnt));
320 
321 	ret = dpu_encoder_helper_wait_event_timeout(
322 			DRMID(phys_enc->parent),
323 			irq,
324 			wait_info);
325 
326 	if (ret <= 0) {
327 		irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
328 		if (irq_status) {
329 			unsigned long flags;
330 
331 			DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
332 				      DRMID(phys_enc->parent), func,
333 				      irq,
334 				      phys_enc->hw_pp->idx - PINGPONG_0,
335 				      atomic_read(wait_info->atomic_cnt));
336 			local_irq_save(flags);
337 			func(phys_enc, irq);
338 			local_irq_restore(flags);
339 			ret = 0;
340 		} else {
341 			ret = -ETIMEDOUT;
342 			DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
343 				      DRMID(phys_enc->parent), func,
344 				      irq,
345 				      phys_enc->hw_pp->idx - PINGPONG_0,
346 				      atomic_read(wait_info->atomic_cnt));
347 		}
348 	} else {
349 		ret = 0;
350 		trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
351 			func, irq,
352 			phys_enc->hw_pp->idx - PINGPONG_0,
353 			atomic_read(wait_info->atomic_cnt));
354 	}
355 
356 	return ret;
357 }
358 
359 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
360 {
361 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
362 	struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
363 	return phys ? atomic_read(&phys->vsync_cnt) : 0;
364 }
365 
366 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
367 {
368 	struct dpu_encoder_virt *dpu_enc;
369 	struct dpu_encoder_phys *phys;
370 	int linecount = 0;
371 
372 	dpu_enc = to_dpu_encoder_virt(drm_enc);
373 	phys = dpu_enc ? dpu_enc->cur_master : NULL;
374 
375 	if (phys && phys->ops.get_line_count)
376 		linecount = phys->ops.get_line_count(phys);
377 
378 	return linecount;
379 }
380 
381 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
382 {
383 	struct dpu_encoder_virt *dpu_enc = NULL;
384 	int i = 0;
385 
386 	if (!drm_enc) {
387 		DPU_ERROR("invalid encoder\n");
388 		return;
389 	}
390 
391 	dpu_enc = to_dpu_encoder_virt(drm_enc);
392 	DPU_DEBUG_ENC(dpu_enc, "\n");
393 
394 	mutex_lock(&dpu_enc->enc_lock);
395 
396 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
397 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
398 
399 		if (phys->ops.destroy) {
400 			phys->ops.destroy(phys);
401 			--dpu_enc->num_phys_encs;
402 			dpu_enc->phys_encs[i] = NULL;
403 		}
404 	}
405 
406 	if (dpu_enc->num_phys_encs)
407 		DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
408 				dpu_enc->num_phys_encs);
409 	dpu_enc->num_phys_encs = 0;
410 	mutex_unlock(&dpu_enc->enc_lock);
411 
412 	drm_encoder_cleanup(drm_enc);
413 	mutex_destroy(&dpu_enc->enc_lock);
414 }
415 
416 void dpu_encoder_helper_split_config(
417 		struct dpu_encoder_phys *phys_enc,
418 		enum dpu_intf interface)
419 {
420 	struct dpu_encoder_virt *dpu_enc;
421 	struct split_pipe_cfg cfg = { 0 };
422 	struct dpu_hw_mdp *hw_mdptop;
423 	struct msm_display_info *disp_info;
424 
425 	if (!phys_enc->hw_mdptop || !phys_enc->parent) {
426 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
427 		return;
428 	}
429 
430 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
431 	hw_mdptop = phys_enc->hw_mdptop;
432 	disp_info = &dpu_enc->disp_info;
433 
434 	if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
435 		return;
436 
437 	/**
438 	 * disable split modes since encoder will be operating in as the only
439 	 * encoder, either for the entire use case in the case of, for example,
440 	 * single DSI, or for this frame in the case of left/right only partial
441 	 * update.
442 	 */
443 	if (phys_enc->split_role == ENC_ROLE_SOLO) {
444 		if (hw_mdptop->ops.setup_split_pipe)
445 			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
446 		return;
447 	}
448 
449 	cfg.en = true;
450 	cfg.mode = phys_enc->intf_mode;
451 	cfg.intf = interface;
452 
453 	if (cfg.en && phys_enc->ops.needs_single_flush &&
454 			phys_enc->ops.needs_single_flush(phys_enc))
455 		cfg.split_flush_en = true;
456 
457 	if (phys_enc->split_role == ENC_ROLE_MASTER) {
458 		DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
459 
460 		if (hw_mdptop->ops.setup_split_pipe)
461 			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
462 	}
463 }
464 
465 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
466 {
467 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
468 	int i, intf_count = 0, num_dsc = 0;
469 
470 	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
471 		if (dpu_enc->phys_encs[i])
472 			intf_count++;
473 
474 	/* See dpu_encoder_get_topology, we only support 2:2:1 topology */
475 	if (dpu_enc->dsc)
476 		num_dsc = 2;
477 
478 	return (num_dsc > 0) && (num_dsc > intf_count);
479 }
480 
481 static struct msm_display_topology dpu_encoder_get_topology(
482 			struct dpu_encoder_virt *dpu_enc,
483 			struct dpu_kms *dpu_kms,
484 			struct drm_display_mode *mode)
485 {
486 	struct msm_display_topology topology = {0};
487 	int i, intf_count = 0;
488 
489 	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
490 		if (dpu_enc->phys_encs[i])
491 			intf_count++;
492 
493 	/* Datapath topology selection
494 	 *
495 	 * Dual display
496 	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
497 	 *
498 	 * Single display
499 	 * 1 LM, 1 INTF
500 	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
501 	 *
502 	 * Adding color blocks only to primary interface if available in
503 	 * sufficient number
504 	 */
505 	if (intf_count == 2)
506 		topology.num_lm = 2;
507 	else if (!dpu_kms->catalog->caps->has_3d_merge)
508 		topology.num_lm = 1;
509 	else
510 		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
511 
512 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
513 		if (dpu_kms->catalog->dspp &&
514 			(dpu_kms->catalog->dspp_count >= topology.num_lm))
515 			topology.num_dspp = topology.num_lm;
516 	}
517 
518 	topology.num_enc = 0;
519 	topology.num_intf = intf_count;
520 
521 	if (dpu_enc->dsc) {
522 		/* In case of Display Stream Compression (DSC), we would use
523 		 * 2 encoders, 2 layer mixers and 1 interface
524 		 * this is power optimal and can drive up to (including) 4k
525 		 * screens
526 		 */
527 		topology.num_enc = 2;
528 		topology.num_dsc = 2;
529 		topology.num_intf = 1;
530 		topology.num_lm = 2;
531 	}
532 
533 	return topology;
534 }
535 
536 static int dpu_encoder_virt_atomic_check(
537 		struct drm_encoder *drm_enc,
538 		struct drm_crtc_state *crtc_state,
539 		struct drm_connector_state *conn_state)
540 {
541 	struct dpu_encoder_virt *dpu_enc;
542 	struct msm_drm_private *priv;
543 	struct dpu_kms *dpu_kms;
544 	struct drm_display_mode *adj_mode;
545 	struct msm_display_topology topology;
546 	struct dpu_global_state *global_state;
547 	int i = 0;
548 	int ret = 0;
549 
550 	if (!drm_enc || !crtc_state || !conn_state) {
551 		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
552 				drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
553 		return -EINVAL;
554 	}
555 
556 	dpu_enc = to_dpu_encoder_virt(drm_enc);
557 	DPU_DEBUG_ENC(dpu_enc, "\n");
558 
559 	priv = drm_enc->dev->dev_private;
560 	dpu_kms = to_dpu_kms(priv->kms);
561 	adj_mode = &crtc_state->adjusted_mode;
562 	global_state = dpu_kms_get_global_state(crtc_state->state);
563 	if (IS_ERR(global_state))
564 		return PTR_ERR(global_state);
565 
566 	trace_dpu_enc_atomic_check(DRMID(drm_enc));
567 
568 	/* perform atomic check on the first physical encoder (master) */
569 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
570 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
571 
572 		if (phys->ops.atomic_check)
573 			ret = phys->ops.atomic_check(phys, crtc_state,
574 					conn_state);
575 		if (ret) {
576 			DPU_ERROR_ENC(dpu_enc,
577 					"mode unsupported, phys idx %d\n", i);
578 			break;
579 		}
580 	}
581 
582 	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
583 
584 	/* Reserve dynamic resources now. */
585 	if (!ret) {
586 		/*
587 		 * Release and Allocate resources on every modeset
588 		 * Dont allocate when active is false.
589 		 */
590 		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
591 			dpu_rm_release(global_state, drm_enc);
592 
593 			if (!crtc_state->active_changed || crtc_state->active)
594 				ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
595 						drm_enc, crtc_state, topology);
596 		}
597 	}
598 
599 	trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
600 
601 	return ret;
602 }
603 
604 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
605 			struct msm_display_info *disp_info)
606 {
607 	struct dpu_vsync_source_cfg vsync_cfg = { 0 };
608 	struct msm_drm_private *priv;
609 	struct dpu_kms *dpu_kms;
610 	struct dpu_hw_mdp *hw_mdptop;
611 	struct drm_encoder *drm_enc;
612 	int i;
613 
614 	if (!dpu_enc || !disp_info) {
615 		DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
616 					dpu_enc != NULL, disp_info != NULL);
617 		return;
618 	} else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
619 		DPU_ERROR("invalid num phys enc %d/%d\n",
620 				dpu_enc->num_phys_encs,
621 				(int) ARRAY_SIZE(dpu_enc->hw_pp));
622 		return;
623 	}
624 
625 	drm_enc = &dpu_enc->base;
626 	/* this pointers are checked in virt_enable_helper */
627 	priv = drm_enc->dev->dev_private;
628 
629 	dpu_kms = to_dpu_kms(priv->kms);
630 	hw_mdptop = dpu_kms->hw_mdp;
631 	if (!hw_mdptop) {
632 		DPU_ERROR("invalid mdptop\n");
633 		return;
634 	}
635 
636 	if (hw_mdptop->ops.setup_vsync_source &&
637 			disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
638 		for (i = 0; i < dpu_enc->num_phys_encs; i++)
639 			vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
640 
641 		vsync_cfg.pp_count = dpu_enc->num_phys_encs;
642 		if (disp_info->is_te_using_watchdog_timer)
643 			vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
644 		else
645 			vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
646 
647 		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
648 	}
649 }
650 
651 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
652 {
653 	struct dpu_encoder_virt *dpu_enc;
654 	int i;
655 
656 	if (!drm_enc) {
657 		DPU_ERROR("invalid encoder\n");
658 		return;
659 	}
660 
661 	dpu_enc = to_dpu_encoder_virt(drm_enc);
662 
663 	DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
664 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
665 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
666 
667 		if (phys->ops.irq_control)
668 			phys->ops.irq_control(phys, enable);
669 	}
670 
671 }
672 
673 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
674 		bool enable)
675 {
676 	struct msm_drm_private *priv;
677 	struct dpu_kms *dpu_kms;
678 	struct dpu_encoder_virt *dpu_enc;
679 
680 	dpu_enc = to_dpu_encoder_virt(drm_enc);
681 	priv = drm_enc->dev->dev_private;
682 	dpu_kms = to_dpu_kms(priv->kms);
683 
684 	trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
685 
686 	if (!dpu_enc->cur_master) {
687 		DPU_ERROR("encoder master not set\n");
688 		return;
689 	}
690 
691 	if (enable) {
692 		/* enable DPU core clks */
693 		pm_runtime_get_sync(&dpu_kms->pdev->dev);
694 
695 		/* enable all the irq */
696 		_dpu_encoder_irq_control(drm_enc, true);
697 
698 	} else {
699 		/* disable all the irq */
700 		_dpu_encoder_irq_control(drm_enc, false);
701 
702 		/* disable DPU core clks */
703 		pm_runtime_put_sync(&dpu_kms->pdev->dev);
704 	}
705 
706 }
707 
708 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
709 		u32 sw_event)
710 {
711 	struct dpu_encoder_virt *dpu_enc;
712 	struct msm_drm_private *priv;
713 	bool is_vid_mode = false;
714 
715 	if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
716 		DPU_ERROR("invalid parameters\n");
717 		return -EINVAL;
718 	}
719 	dpu_enc = to_dpu_encoder_virt(drm_enc);
720 	priv = drm_enc->dev->dev_private;
721 	is_vid_mode = dpu_enc->disp_info.capabilities &
722 						MSM_DISPLAY_CAP_VID_MODE;
723 
724 	/*
725 	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
726 	 * events and return early for other events (ie wb display).
727 	 */
728 	if (!dpu_enc->idle_pc_supported &&
729 			(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
730 			sw_event != DPU_ENC_RC_EVENT_STOP &&
731 			sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
732 		return 0;
733 
734 	trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
735 			 dpu_enc->rc_state, "begin");
736 
737 	switch (sw_event) {
738 	case DPU_ENC_RC_EVENT_KICKOFF:
739 		/* cancel delayed off work, if any */
740 		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
741 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
742 					sw_event);
743 
744 		mutex_lock(&dpu_enc->rc_lock);
745 
746 		/* return if the resource control is already in ON state */
747 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
748 			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
749 				      DRMID(drm_enc), sw_event);
750 			mutex_unlock(&dpu_enc->rc_lock);
751 			return 0;
752 		} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
753 				dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
754 			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
755 				      DRMID(drm_enc), sw_event,
756 				      dpu_enc->rc_state);
757 			mutex_unlock(&dpu_enc->rc_lock);
758 			return -EINVAL;
759 		}
760 
761 		if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
762 			_dpu_encoder_irq_control(drm_enc, true);
763 		else
764 			_dpu_encoder_resource_control_helper(drm_enc, true);
765 
766 		dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
767 
768 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
769 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
770 				 "kickoff");
771 
772 		mutex_unlock(&dpu_enc->rc_lock);
773 		break;
774 
775 	case DPU_ENC_RC_EVENT_FRAME_DONE:
776 		/*
777 		 * mutex lock is not used as this event happens at interrupt
778 		 * context. And locking is not required as, the other events
779 		 * like KICKOFF and STOP does a wait-for-idle before executing
780 		 * the resource_control
781 		 */
782 		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
783 			DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
784 				      DRMID(drm_enc), sw_event,
785 				      dpu_enc->rc_state);
786 			return -EINVAL;
787 		}
788 
789 		/*
790 		 * schedule off work item only when there are no
791 		 * frames pending
792 		 */
793 		if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
794 			DRM_DEBUG_KMS("id:%d skip schedule work\n",
795 				      DRMID(drm_enc));
796 			return 0;
797 		}
798 
799 		queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
800 				   msecs_to_jiffies(dpu_enc->idle_timeout));
801 
802 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
803 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
804 				 "frame done");
805 		break;
806 
807 	case DPU_ENC_RC_EVENT_PRE_STOP:
808 		/* cancel delayed off work, if any */
809 		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
810 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
811 					sw_event);
812 
813 		mutex_lock(&dpu_enc->rc_lock);
814 
815 		if (is_vid_mode &&
816 			  dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
817 			_dpu_encoder_irq_control(drm_enc, true);
818 		}
819 		/* skip if is already OFF or IDLE, resources are off already */
820 		else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
821 				dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
822 			DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
823 				      DRMID(drm_enc), sw_event,
824 				      dpu_enc->rc_state);
825 			mutex_unlock(&dpu_enc->rc_lock);
826 			return 0;
827 		}
828 
829 		dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
830 
831 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
832 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
833 				 "pre stop");
834 
835 		mutex_unlock(&dpu_enc->rc_lock);
836 		break;
837 
838 	case DPU_ENC_RC_EVENT_STOP:
839 		mutex_lock(&dpu_enc->rc_lock);
840 
841 		/* return if the resource control is already in OFF state */
842 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
843 			DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
844 				      DRMID(drm_enc), sw_event);
845 			mutex_unlock(&dpu_enc->rc_lock);
846 			return 0;
847 		} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
848 			DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
849 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
850 			mutex_unlock(&dpu_enc->rc_lock);
851 			return -EINVAL;
852 		}
853 
854 		/**
855 		 * expect to arrive here only if in either idle state or pre-off
856 		 * and in IDLE state the resources are already disabled
857 		 */
858 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
859 			_dpu_encoder_resource_control_helper(drm_enc, false);
860 
861 		dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
862 
863 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
864 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
865 				 "stop");
866 
867 		mutex_unlock(&dpu_enc->rc_lock);
868 		break;
869 
870 	case DPU_ENC_RC_EVENT_ENTER_IDLE:
871 		mutex_lock(&dpu_enc->rc_lock);
872 
873 		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
874 			DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
875 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
876 			mutex_unlock(&dpu_enc->rc_lock);
877 			return 0;
878 		}
879 
880 		/*
881 		 * if we are in ON but a frame was just kicked off,
882 		 * ignore the IDLE event, it's probably a stale timer event
883 		 */
884 		if (dpu_enc->frame_busy_mask[0]) {
885 			DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
886 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
887 			mutex_unlock(&dpu_enc->rc_lock);
888 			return 0;
889 		}
890 
891 		if (is_vid_mode)
892 			_dpu_encoder_irq_control(drm_enc, false);
893 		else
894 			_dpu_encoder_resource_control_helper(drm_enc, false);
895 
896 		dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
897 
898 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
899 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
900 				 "idle");
901 
902 		mutex_unlock(&dpu_enc->rc_lock);
903 		break;
904 
905 	default:
906 		DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
907 			  sw_event);
908 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
909 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
910 				 "error");
911 		break;
912 	}
913 
914 	trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
915 			 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
916 			 "end");
917 	return 0;
918 }
919 
920 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
921 		struct drm_writeback_job *job)
922 {
923 	struct dpu_encoder_virt *dpu_enc;
924 	int i;
925 
926 	dpu_enc = to_dpu_encoder_virt(drm_enc);
927 
928 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
929 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
930 
931 		if (phys->ops.prepare_wb_job)
932 			phys->ops.prepare_wb_job(phys, job);
933 
934 	}
935 }
936 
937 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
938 		struct drm_writeback_job *job)
939 {
940 	struct dpu_encoder_virt *dpu_enc;
941 	int i;
942 
943 	dpu_enc = to_dpu_encoder_virt(drm_enc);
944 
945 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
946 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
947 
948 		if (phys->ops.cleanup_wb_job)
949 			phys->ops.cleanup_wb_job(phys, job);
950 
951 	}
952 }
953 
954 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
955 					     struct drm_crtc_state *crtc_state,
956 					     struct drm_connector_state *conn_state)
957 {
958 	struct dpu_encoder_virt *dpu_enc;
959 	struct msm_drm_private *priv;
960 	struct dpu_kms *dpu_kms;
961 	struct dpu_crtc_state *cstate;
962 	struct dpu_global_state *global_state;
963 	struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
964 	struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
965 	struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
966 	struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
967 	struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
968 	int num_lm, num_ctl, num_pp, num_dsc;
969 	unsigned int dsc_mask = 0;
970 	int i;
971 
972 	if (!drm_enc) {
973 		DPU_ERROR("invalid encoder\n");
974 		return;
975 	}
976 
977 	dpu_enc = to_dpu_encoder_virt(drm_enc);
978 	DPU_DEBUG_ENC(dpu_enc, "\n");
979 
980 	priv = drm_enc->dev->dev_private;
981 	dpu_kms = to_dpu_kms(priv->kms);
982 
983 	global_state = dpu_kms_get_existing_global_state(dpu_kms);
984 	if (IS_ERR_OR_NULL(global_state)) {
985 		DPU_ERROR("Failed to get global state");
986 		return;
987 	}
988 
989 	trace_dpu_enc_mode_set(DRMID(drm_enc));
990 
991 	/* Query resource that have been reserved in atomic check step. */
992 	num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
993 		drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
994 		ARRAY_SIZE(hw_pp));
995 	num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
996 		drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
997 	num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
998 		drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
999 	dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1000 		drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1001 		ARRAY_SIZE(hw_dspp));
1002 
1003 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1004 		dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1005 						: NULL;
1006 
1007 	if (dpu_enc->dsc) {
1008 		num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1009 							drm_enc->base.id, DPU_HW_BLK_DSC,
1010 							hw_dsc, ARRAY_SIZE(hw_dsc));
1011 		for (i = 0; i < num_dsc; i++) {
1012 			dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1013 			dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1014 		}
1015 	}
1016 
1017 	dpu_enc->dsc_mask = dsc_mask;
1018 
1019 	cstate = to_dpu_crtc_state(crtc_state);
1020 
1021 	for (i = 0; i < num_lm; i++) {
1022 		int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1023 
1024 		cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1025 		cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1026 		cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1027 	}
1028 
1029 	cstate->num_mixers = num_lm;
1030 
1031 	dpu_enc->connector = conn_state->connector;
1032 
1033 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1034 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1035 
1036 		if (!dpu_enc->hw_pp[i]) {
1037 			DPU_ERROR_ENC(dpu_enc,
1038 				"no pp block assigned at idx: %d\n", i);
1039 			return;
1040 		}
1041 
1042 		if (!hw_ctl[i]) {
1043 			DPU_ERROR_ENC(dpu_enc,
1044 				"no ctl block assigned at idx: %d\n", i);
1045 			return;
1046 		}
1047 
1048 		phys->hw_pp = dpu_enc->hw_pp[i];
1049 		phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1050 
1051 		if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
1052 			phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
1053 
1054 		if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
1055 			phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
1056 
1057 		if (!phys->hw_intf && !phys->hw_wb) {
1058 			DPU_ERROR_ENC(dpu_enc,
1059 				      "no intf or wb block assigned at idx: %d\n", i);
1060 			return;
1061 		}
1062 
1063 		if (phys->hw_intf && phys->hw_wb) {
1064 			DPU_ERROR_ENC(dpu_enc,
1065 					"invalid phys both intf and wb block at idx: %d\n", i);
1066 			return;
1067 		}
1068 
1069 		phys->cached_mode = crtc_state->adjusted_mode;
1070 		if (phys->ops.atomic_mode_set)
1071 			phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
1072 	}
1073 }
1074 
1075 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1076 {
1077 	struct dpu_encoder_virt *dpu_enc = NULL;
1078 	int i;
1079 
1080 	if (!drm_enc || !drm_enc->dev) {
1081 		DPU_ERROR("invalid parameters\n");
1082 		return;
1083 	}
1084 
1085 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1086 	if (!dpu_enc || !dpu_enc->cur_master) {
1087 		DPU_ERROR("invalid dpu encoder/master\n");
1088 		return;
1089 	}
1090 
1091 
1092 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
1093 		dpu_enc->cur_master->hw_mdptop &&
1094 		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1095 		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1096 			dpu_enc->cur_master->hw_mdptop);
1097 
1098 	_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1099 
1100 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1101 			!WARN_ON(dpu_enc->num_phys_encs == 0)) {
1102 		unsigned bpc = dpu_enc->connector->display_info.bpc;
1103 		for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1104 			if (!dpu_enc->hw_pp[i])
1105 				continue;
1106 			_dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1107 		}
1108 	}
1109 }
1110 
1111 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1112 {
1113 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1114 
1115 	mutex_lock(&dpu_enc->enc_lock);
1116 
1117 	if (!dpu_enc->enabled)
1118 		goto out;
1119 
1120 	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1121 		dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1122 	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1123 		dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1124 
1125 	_dpu_encoder_virt_enable_helper(drm_enc);
1126 
1127 out:
1128 	mutex_unlock(&dpu_enc->enc_lock);
1129 }
1130 
1131 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1132 {
1133 	struct dpu_encoder_virt *dpu_enc = NULL;
1134 	int ret = 0;
1135 	struct drm_display_mode *cur_mode = NULL;
1136 
1137 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1138 
1139 	mutex_lock(&dpu_enc->enc_lock);
1140 	cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1141 
1142 	trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1143 			     cur_mode->vdisplay);
1144 
1145 	/* always enable slave encoder before master */
1146 	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1147 		dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1148 
1149 	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1150 		dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1151 
1152 	ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1153 	if (ret) {
1154 		DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1155 				ret);
1156 		goto out;
1157 	}
1158 
1159 	_dpu_encoder_virt_enable_helper(drm_enc);
1160 
1161 	dpu_enc->enabled = true;
1162 
1163 out:
1164 	mutex_unlock(&dpu_enc->enc_lock);
1165 }
1166 
1167 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1168 {
1169 	struct dpu_encoder_virt *dpu_enc = NULL;
1170 	int i = 0;
1171 
1172 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1173 	DPU_DEBUG_ENC(dpu_enc, "\n");
1174 
1175 	mutex_lock(&dpu_enc->enc_lock);
1176 	dpu_enc->enabled = false;
1177 
1178 	trace_dpu_enc_disable(DRMID(drm_enc));
1179 
1180 	/* wait for idle */
1181 	dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1182 
1183 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1184 
1185 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1186 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1187 
1188 		if (phys->ops.disable)
1189 			phys->ops.disable(phys);
1190 	}
1191 
1192 
1193 	/* after phys waits for frame-done, should be no more frames pending */
1194 	if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1195 		DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1196 		del_timer_sync(&dpu_enc->frame_done_timer);
1197 	}
1198 
1199 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1200 
1201 	dpu_enc->connector = NULL;
1202 
1203 	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1204 
1205 	mutex_unlock(&dpu_enc->enc_lock);
1206 }
1207 
1208 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1209 		enum dpu_intf_type type, u32 controller_id)
1210 {
1211 	int i = 0;
1212 
1213 	if (type != INTF_WB) {
1214 		for (i = 0; i < catalog->intf_count; i++) {
1215 			if (catalog->intf[i].type == type
1216 				&& catalog->intf[i].controller_id == controller_id) {
1217 				return catalog->intf[i].id;
1218 			}
1219 		}
1220 	}
1221 
1222 	return INTF_MAX;
1223 }
1224 
1225 static enum dpu_wb dpu_encoder_get_wb(struct dpu_mdss_cfg *catalog,
1226 		enum dpu_intf_type type, u32 controller_id)
1227 {
1228 	int i = 0;
1229 
1230 	if (type != INTF_WB)
1231 		goto end;
1232 
1233 	for (i = 0; i < catalog->wb_count; i++) {
1234 		if (catalog->wb[i].id == controller_id)
1235 			return catalog->wb[i].id;
1236 	}
1237 
1238 end:
1239 	return WB_MAX;
1240 }
1241 
1242 static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1243 		struct dpu_encoder_phys *phy_enc)
1244 {
1245 	struct dpu_encoder_virt *dpu_enc = NULL;
1246 	unsigned long lock_flags;
1247 
1248 	if (!drm_enc || !phy_enc)
1249 		return;
1250 
1251 	DPU_ATRACE_BEGIN("encoder_vblank_callback");
1252 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1253 
1254 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1255 	if (dpu_enc->crtc)
1256 		dpu_crtc_vblank_callback(dpu_enc->crtc);
1257 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1258 
1259 	atomic_inc(&phy_enc->vsync_cnt);
1260 	DPU_ATRACE_END("encoder_vblank_callback");
1261 }
1262 
1263 static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1264 		struct dpu_encoder_phys *phy_enc)
1265 {
1266 	if (!phy_enc)
1267 		return;
1268 
1269 	DPU_ATRACE_BEGIN("encoder_underrun_callback");
1270 	atomic_inc(&phy_enc->underrun_cnt);
1271 
1272 	/* trigger dump only on the first underrun */
1273 	if (atomic_read(&phy_enc->underrun_cnt) == 1)
1274 		msm_disp_snapshot_state(drm_enc->dev);
1275 
1276 	trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1277 				  atomic_read(&phy_enc->underrun_cnt));
1278 	DPU_ATRACE_END("encoder_underrun_callback");
1279 }
1280 
1281 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1282 {
1283 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1284 	unsigned long lock_flags;
1285 
1286 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1287 	/* crtc should always be cleared before re-assigning */
1288 	WARN_ON(crtc && dpu_enc->crtc);
1289 	dpu_enc->crtc = crtc;
1290 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1291 }
1292 
1293 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1294 					struct drm_crtc *crtc, bool enable)
1295 {
1296 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1297 	unsigned long lock_flags;
1298 	int i;
1299 
1300 	trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1301 
1302 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1303 	if (dpu_enc->crtc != crtc) {
1304 		spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1305 		return;
1306 	}
1307 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1308 
1309 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1310 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1311 
1312 		if (phys->ops.control_vblank_irq)
1313 			phys->ops.control_vblank_irq(phys, enable);
1314 	}
1315 }
1316 
1317 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1318 		void (*frame_event_cb)(void *, u32 event),
1319 		void *frame_event_cb_data)
1320 {
1321 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1322 	unsigned long lock_flags;
1323 	bool enable;
1324 
1325 	enable = frame_event_cb ? true : false;
1326 
1327 	if (!drm_enc) {
1328 		DPU_ERROR("invalid encoder\n");
1329 		return;
1330 	}
1331 	trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1332 
1333 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1334 	dpu_enc->crtc_frame_event_cb = frame_event_cb;
1335 	dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1336 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1337 }
1338 
1339 static void dpu_encoder_frame_done_callback(
1340 		struct drm_encoder *drm_enc,
1341 		struct dpu_encoder_phys *ready_phys, u32 event)
1342 {
1343 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1344 	unsigned int i;
1345 
1346 	if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1347 			| DPU_ENCODER_FRAME_EVENT_ERROR
1348 			| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1349 
1350 		if (!dpu_enc->frame_busy_mask[0]) {
1351 			/**
1352 			 * suppress frame_done without waiter,
1353 			 * likely autorefresh
1354 			 */
1355 			trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1356 					dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1357 					ready_phys->intf_idx, ready_phys->wb_idx);
1358 			return;
1359 		}
1360 
1361 		/* One of the physical encoders has become idle */
1362 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1363 			if (dpu_enc->phys_encs[i] == ready_phys) {
1364 				trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1365 						dpu_enc->frame_busy_mask[0]);
1366 				clear_bit(i, dpu_enc->frame_busy_mask);
1367 			}
1368 		}
1369 
1370 		if (!dpu_enc->frame_busy_mask[0]) {
1371 			atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1372 			del_timer(&dpu_enc->frame_done_timer);
1373 
1374 			dpu_encoder_resource_control(drm_enc,
1375 					DPU_ENC_RC_EVENT_FRAME_DONE);
1376 
1377 			if (dpu_enc->crtc_frame_event_cb)
1378 				dpu_enc->crtc_frame_event_cb(
1379 					dpu_enc->crtc_frame_event_cb_data,
1380 					event);
1381 		}
1382 	} else {
1383 		if (dpu_enc->crtc_frame_event_cb)
1384 			dpu_enc->crtc_frame_event_cb(
1385 				dpu_enc->crtc_frame_event_cb_data, event);
1386 	}
1387 }
1388 
1389 static void dpu_encoder_off_work(struct work_struct *work)
1390 {
1391 	struct dpu_encoder_virt *dpu_enc = container_of(work,
1392 			struct dpu_encoder_virt, delayed_off_work.work);
1393 
1394 	dpu_encoder_resource_control(&dpu_enc->base,
1395 						DPU_ENC_RC_EVENT_ENTER_IDLE);
1396 
1397 	dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1398 				DPU_ENCODER_FRAME_EVENT_IDLE);
1399 }
1400 
1401 /**
1402  * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1403  * @drm_enc: Pointer to drm encoder structure
1404  * @phys: Pointer to physical encoder structure
1405  * @extra_flush_bits: Additional bit mask to include in flush trigger
1406  */
1407 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1408 		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1409 {
1410 	struct dpu_hw_ctl *ctl;
1411 	int pending_kickoff_cnt;
1412 	u32 ret = UINT_MAX;
1413 
1414 	if (!phys->hw_pp) {
1415 		DPU_ERROR("invalid pingpong hw\n");
1416 		return;
1417 	}
1418 
1419 	ctl = phys->hw_ctl;
1420 	if (!ctl->ops.trigger_flush) {
1421 		DPU_ERROR("missing trigger cb\n");
1422 		return;
1423 	}
1424 
1425 	pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1426 
1427 	if (extra_flush_bits && ctl->ops.update_pending_flush)
1428 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1429 
1430 	ctl->ops.trigger_flush(ctl);
1431 
1432 	if (ctl->ops.get_pending_flush)
1433 		ret = ctl->ops.get_pending_flush(ctl);
1434 
1435 	trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1436 			dpu_encoder_helper_get_intf_type(phys->intf_mode),
1437 			phys->intf_idx, phys->wb_idx,
1438 			pending_kickoff_cnt, ctl->idx,
1439 			extra_flush_bits, ret);
1440 }
1441 
1442 /**
1443  * _dpu_encoder_trigger_start - trigger start for a physical encoder
1444  * @phys: Pointer to physical encoder structure
1445  */
1446 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1447 {
1448 	if (!phys) {
1449 		DPU_ERROR("invalid argument(s)\n");
1450 		return;
1451 	}
1452 
1453 	if (!phys->hw_pp) {
1454 		DPU_ERROR("invalid pingpong hw\n");
1455 		return;
1456 	}
1457 
1458 	if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1459 		phys->ops.trigger_start(phys);
1460 }
1461 
1462 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1463 {
1464 	struct dpu_hw_ctl *ctl;
1465 
1466 	ctl = phys_enc->hw_ctl;
1467 	if (ctl->ops.trigger_start) {
1468 		ctl->ops.trigger_start(ctl);
1469 		trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1470 	}
1471 }
1472 
1473 static int dpu_encoder_helper_wait_event_timeout(
1474 		int32_t drm_id,
1475 		u32 irq_idx,
1476 		struct dpu_encoder_wait_info *info)
1477 {
1478 	int rc = 0;
1479 	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1480 	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1481 	s64 time;
1482 
1483 	do {
1484 		rc = wait_event_timeout(*(info->wq),
1485 				atomic_read(info->atomic_cnt) == 0, jiffies);
1486 		time = ktime_to_ms(ktime_get());
1487 
1488 		trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1489 						 expected_time,
1490 						 atomic_read(info->atomic_cnt));
1491 	/* If we timed out, counter is valid and time is less, wait again */
1492 	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1493 			(time < expected_time));
1494 
1495 	return rc;
1496 }
1497 
1498 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1499 {
1500 	struct dpu_encoder_virt *dpu_enc;
1501 	struct dpu_hw_ctl *ctl;
1502 	int rc;
1503 	struct drm_encoder *drm_enc;
1504 
1505 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1506 	ctl = phys_enc->hw_ctl;
1507 	drm_enc = phys_enc->parent;
1508 
1509 	if (!ctl->ops.reset)
1510 		return;
1511 
1512 	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1513 		      ctl->idx);
1514 
1515 	rc = ctl->ops.reset(ctl);
1516 	if (rc) {
1517 		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
1518 		msm_disp_snapshot_state(drm_enc->dev);
1519 	}
1520 
1521 	phys_enc->enable_state = DPU_ENC_ENABLED;
1522 }
1523 
1524 /**
1525  * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1526  *	Iterate through the physical encoders and perform consolidated flush
1527  *	and/or control start triggering as needed. This is done in the virtual
1528  *	encoder rather than the individual physical ones in order to handle
1529  *	use cases that require visibility into multiple physical encoders at
1530  *	a time.
1531  * @dpu_enc: Pointer to virtual encoder structure
1532  */
1533 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1534 {
1535 	struct dpu_hw_ctl *ctl;
1536 	uint32_t i, pending_flush;
1537 	unsigned long lock_flags;
1538 
1539 	pending_flush = 0x0;
1540 
1541 	/* update pending counts and trigger kickoff ctl flush atomically */
1542 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1543 
1544 	/* don't perform flush/start operations for slave encoders */
1545 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1546 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1547 
1548 		if (phys->enable_state == DPU_ENC_DISABLED)
1549 			continue;
1550 
1551 		ctl = phys->hw_ctl;
1552 
1553 		/*
1554 		 * This is cleared in frame_done worker, which isn't invoked
1555 		 * for async commits. So don't set this for async, since it'll
1556 		 * roll over to the next commit.
1557 		 */
1558 		if (phys->split_role != ENC_ROLE_SLAVE)
1559 			set_bit(i, dpu_enc->frame_busy_mask);
1560 
1561 		if (!phys->ops.needs_single_flush ||
1562 				!phys->ops.needs_single_flush(phys))
1563 			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1564 		else if (ctl->ops.get_pending_flush)
1565 			pending_flush |= ctl->ops.get_pending_flush(ctl);
1566 	}
1567 
1568 	/* for split flush, combine pending flush masks and send to master */
1569 	if (pending_flush && dpu_enc->cur_master) {
1570 		_dpu_encoder_trigger_flush(
1571 				&dpu_enc->base,
1572 				dpu_enc->cur_master,
1573 				pending_flush);
1574 	}
1575 
1576 	_dpu_encoder_trigger_start(dpu_enc->cur_master);
1577 
1578 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1579 }
1580 
1581 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1582 {
1583 	struct dpu_encoder_virt *dpu_enc;
1584 	struct dpu_encoder_phys *phys;
1585 	unsigned int i;
1586 	struct dpu_hw_ctl *ctl;
1587 	struct msm_display_info *disp_info;
1588 
1589 	if (!drm_enc) {
1590 		DPU_ERROR("invalid encoder\n");
1591 		return;
1592 	}
1593 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1594 	disp_info = &dpu_enc->disp_info;
1595 
1596 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1597 		phys = dpu_enc->phys_encs[i];
1598 
1599 		ctl = phys->hw_ctl;
1600 		if (ctl->ops.clear_pending_flush)
1601 			ctl->ops.clear_pending_flush(ctl);
1602 
1603 		/* update only for command mode primary ctl */
1604 		if ((phys == dpu_enc->cur_master) &&
1605 		   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1606 		    && ctl->ops.trigger_pending)
1607 			ctl->ops.trigger_pending(ctl);
1608 	}
1609 }
1610 
1611 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1612 		struct drm_display_mode *mode)
1613 {
1614 	u64 pclk_rate;
1615 	u32 pclk_period;
1616 	u32 line_time;
1617 
1618 	/*
1619 	 * For linetime calculation, only operate on master encoder.
1620 	 */
1621 	if (!dpu_enc->cur_master)
1622 		return 0;
1623 
1624 	if (!dpu_enc->cur_master->ops.get_line_count) {
1625 		DPU_ERROR("get_line_count function not defined\n");
1626 		return 0;
1627 	}
1628 
1629 	pclk_rate = mode->clock; /* pixel clock in kHz */
1630 	if (pclk_rate == 0) {
1631 		DPU_ERROR("pclk is 0, cannot calculate line time\n");
1632 		return 0;
1633 	}
1634 
1635 	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1636 	if (pclk_period == 0) {
1637 		DPU_ERROR("pclk period is 0\n");
1638 		return 0;
1639 	}
1640 
1641 	/*
1642 	 * Line time calculation based on Pixel clock and HTOTAL.
1643 	 * Final unit is in ns.
1644 	 */
1645 	line_time = (pclk_period * mode->htotal) / 1000;
1646 	if (line_time == 0) {
1647 		DPU_ERROR("line time calculation is 0\n");
1648 		return 0;
1649 	}
1650 
1651 	DPU_DEBUG_ENC(dpu_enc,
1652 			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1653 			pclk_rate, pclk_period, line_time);
1654 
1655 	return line_time;
1656 }
1657 
1658 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1659 {
1660 	struct drm_display_mode *mode;
1661 	struct dpu_encoder_virt *dpu_enc;
1662 	u32 cur_line;
1663 	u32 line_time;
1664 	u32 vtotal, time_to_vsync;
1665 	ktime_t cur_time;
1666 
1667 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1668 
1669 	if (!drm_enc->crtc || !drm_enc->crtc->state) {
1670 		DPU_ERROR("crtc/crtc state object is NULL\n");
1671 		return -EINVAL;
1672 	}
1673 	mode = &drm_enc->crtc->state->adjusted_mode;
1674 
1675 	line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1676 	if (!line_time)
1677 		return -EINVAL;
1678 
1679 	cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1680 
1681 	vtotal = mode->vtotal;
1682 	if (cur_line >= vtotal)
1683 		time_to_vsync = line_time * vtotal;
1684 	else
1685 		time_to_vsync = line_time * (vtotal - cur_line);
1686 
1687 	if (time_to_vsync == 0) {
1688 		DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1689 				vtotal);
1690 		return -EINVAL;
1691 	}
1692 
1693 	cur_time = ktime_get();
1694 	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1695 
1696 	DPU_DEBUG_ENC(dpu_enc,
1697 			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1698 			cur_line, vtotal, time_to_vsync,
1699 			ktime_to_ms(cur_time),
1700 			ktime_to_ms(*wakeup_time));
1701 	return 0;
1702 }
1703 
1704 static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1705 {
1706 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1707 			vsync_event_timer);
1708 	struct drm_encoder *drm_enc = &dpu_enc->base;
1709 	struct msm_drm_private *priv;
1710 	struct msm_drm_thread *event_thread;
1711 
1712 	if (!drm_enc->dev || !drm_enc->crtc) {
1713 		DPU_ERROR("invalid parameters\n");
1714 		return;
1715 	}
1716 
1717 	priv = drm_enc->dev->dev_private;
1718 
1719 	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1720 		DPU_ERROR("invalid crtc index\n");
1721 		return;
1722 	}
1723 	event_thread = &priv->event_thread[drm_enc->crtc->index];
1724 	if (!event_thread) {
1725 		DPU_ERROR("event_thread not found for crtc:%d\n",
1726 				drm_enc->crtc->index);
1727 		return;
1728 	}
1729 
1730 	del_timer(&dpu_enc->vsync_event_timer);
1731 }
1732 
1733 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1734 {
1735 	struct dpu_encoder_virt *dpu_enc = container_of(work,
1736 			struct dpu_encoder_virt, vsync_event_work);
1737 	ktime_t wakeup_time;
1738 
1739 	if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1740 		return;
1741 
1742 	trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1743 	mod_timer(&dpu_enc->vsync_event_timer,
1744 			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1745 }
1746 
1747 static u32
1748 dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
1749 				  u32 enc_ip_width)
1750 {
1751 	int ssm_delay, total_pixels, soft_slice_per_enc;
1752 
1753 	soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
1754 
1755 	/*
1756 	 * minimum number of initial line pixels is a sum of:
1757 	 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1758 	 *    91 for 10 bpc) * 3
1759 	 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1760 	 * 3. the initial xmit delay
1761 	 * 4. total pipeline delay through the "lock step" of encoder (47)
1762 	 * 5. 6 additional pixels as the output of the rate buffer is
1763 	 *    48 bits wide
1764 	 */
1765 	ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
1766 	total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
1767 	if (soft_slice_per_enc > 1)
1768 		total_pixels += (ssm_delay * 3);
1769 	return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
1770 }
1771 
1772 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
1773 				     struct dpu_hw_pingpong *hw_pp,
1774 				     struct msm_display_dsc_config *dsc,
1775 				     u32 common_mode,
1776 				     u32 initial_lines)
1777 {
1778 	if (hw_dsc->ops.dsc_config)
1779 		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1780 
1781 	if (hw_dsc->ops.dsc_config_thresh)
1782 		hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1783 
1784 	if (hw_pp->ops.setup_dsc)
1785 		hw_pp->ops.setup_dsc(hw_pp);
1786 
1787 	if (hw_pp->ops.enable_dsc)
1788 		hw_pp->ops.enable_dsc(hw_pp);
1789 }
1790 
1791 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1792 				 struct msm_display_dsc_config *dsc)
1793 {
1794 	/* coding only for 2LM, 2enc, 1 dsc config */
1795 	struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1796 	struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1797 	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1798 	int this_frame_slices;
1799 	int intf_ip_w, enc_ip_w;
1800 	int dsc_common_mode;
1801 	int pic_width;
1802 	u32 initial_lines;
1803 	int i;
1804 
1805 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1806 		hw_pp[i] = dpu_enc->hw_pp[i];
1807 		hw_dsc[i] = dpu_enc->hw_dsc[i];
1808 
1809 		if (!hw_pp[i] || !hw_dsc[i]) {
1810 			DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1811 			return;
1812 		}
1813 	}
1814 
1815 	pic_width = dsc->drm->pic_width;
1816 
1817 	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
1818 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
1819 		dsc_common_mode |= DSC_MODE_VIDEO;
1820 
1821 	this_frame_slices = pic_width / dsc->drm->slice_width;
1822 	intf_ip_w = this_frame_slices * dsc->drm->slice_width;
1823 
1824 	/*
1825 	 * dsc merge case: when using 2 encoders for the same stream,
1826 	 * no. of slices need to be same on both the encoders.
1827 	 */
1828 	enc_ip_w = intf_ip_w / 2;
1829 	initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1830 
1831 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1832 		dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
1833 }
1834 
1835 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1836 {
1837 	struct dpu_encoder_virt *dpu_enc;
1838 	struct dpu_encoder_phys *phys;
1839 	bool needs_hw_reset = false;
1840 	unsigned int i;
1841 
1842 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1843 
1844 	trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1845 
1846 	/* prepare for next kickoff, may include waiting on previous kickoff */
1847 	DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1848 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1849 		phys = dpu_enc->phys_encs[i];
1850 		if (phys->ops.prepare_for_kickoff)
1851 			phys->ops.prepare_for_kickoff(phys);
1852 		if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1853 			needs_hw_reset = true;
1854 	}
1855 	DPU_ATRACE_END("enc_prepare_for_kickoff");
1856 
1857 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1858 
1859 	/* if any phys needs reset, reset all phys, in-order */
1860 	if (needs_hw_reset) {
1861 		trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1862 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1863 			dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1864 		}
1865 	}
1866 
1867 	if (dpu_enc->dsc)
1868 		dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1869 }
1870 
1871 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1872 {
1873 	struct dpu_encoder_virt *dpu_enc;
1874 	unsigned int i;
1875 	struct dpu_encoder_phys *phys;
1876 
1877 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1878 
1879 	if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1880 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1881 			phys = dpu_enc->phys_encs[i];
1882 			if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1883 				DPU_DEBUG("invalid FB not kicking off\n");
1884 				return false;
1885 			}
1886 		}
1887 	}
1888 
1889 	return true;
1890 }
1891 
1892 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1893 {
1894 	struct dpu_encoder_virt *dpu_enc;
1895 	struct dpu_encoder_phys *phys;
1896 	ktime_t wakeup_time;
1897 	unsigned long timeout_ms;
1898 	unsigned int i;
1899 
1900 	DPU_ATRACE_BEGIN("encoder_kickoff");
1901 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1902 
1903 	trace_dpu_enc_kickoff(DRMID(drm_enc));
1904 
1905 	timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1906 			drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1907 
1908 	atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1909 	mod_timer(&dpu_enc->frame_done_timer,
1910 			jiffies + msecs_to_jiffies(timeout_ms));
1911 
1912 	/* All phys encs are ready to go, trigger the kickoff */
1913 	_dpu_encoder_kickoff_phys(dpu_enc);
1914 
1915 	/* allow phys encs to handle any post-kickoff business */
1916 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1917 		phys = dpu_enc->phys_encs[i];
1918 		if (phys->ops.handle_post_kickoff)
1919 			phys->ops.handle_post_kickoff(phys);
1920 	}
1921 
1922 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1923 			!dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1924 		trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1925 					    ktime_to_ms(wakeup_time));
1926 		mod_timer(&dpu_enc->vsync_event_timer,
1927 				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1928 	}
1929 
1930 	DPU_ATRACE_END("encoder_kickoff");
1931 }
1932 
1933 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
1934 {
1935 	struct dpu_hw_mixer_cfg mixer;
1936 	int i, num_lm;
1937 	u32 flush_mask = 0;
1938 	struct dpu_global_state *global_state;
1939 	struct dpu_hw_blk *hw_lm[2];
1940 	struct dpu_hw_mixer *hw_mixer[2];
1941 	struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1942 
1943 	memset(&mixer, 0, sizeof(mixer));
1944 
1945 	/* reset all mixers for this encoder */
1946 	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
1947 		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
1948 
1949 	global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
1950 
1951 	num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
1952 		phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1953 
1954 	for (i = 0; i < num_lm; i++) {
1955 		hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
1956 		flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
1957 		if (phys_enc->hw_ctl->ops.update_pending_flush)
1958 			phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
1959 
1960 		/* clear all blendstages */
1961 		if (phys_enc->hw_ctl->ops.setup_blendstage)
1962 			phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
1963 	}
1964 }
1965 
1966 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
1967 {
1968 	struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1969 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
1970 	int i;
1971 	struct dpu_encoder_virt *dpu_enc;
1972 
1973 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1974 
1975 	phys_enc->hw_ctl->ops.reset(ctl);
1976 
1977 	dpu_encoder_helper_reset_mixers(phys_enc);
1978 
1979 	/*
1980 	 * TODO: move the once-only operation like CTL flush/trigger
1981 	 * into dpu_encoder_virt_disable() and all operations which need
1982 	 * to be done per phys encoder into the phys_disable() op.
1983 	 */
1984 	if (phys_enc->hw_wb) {
1985 		/* disable the PP block */
1986 		if (phys_enc->hw_wb->ops.bind_pingpong_blk)
1987 			phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
1988 					phys_enc->hw_pp->idx);
1989 
1990 		/* mark WB flush as pending */
1991 		if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
1992 			phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
1993 	} else {
1994 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1995 			if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
1996 				phys_enc->hw_intf->ops.bind_pingpong_blk(
1997 						dpu_enc->phys_encs[i]->hw_intf, false,
1998 						dpu_enc->phys_encs[i]->hw_pp->idx);
1999 
2000 			/* mark INTF flush as pending */
2001 			if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2002 				phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2003 						dpu_enc->phys_encs[i]->hw_intf->idx);
2004 		}
2005 	}
2006 
2007 	/* reset the merge 3D HW block */
2008 	if (phys_enc->hw_pp->merge_3d) {
2009 		phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2010 				BLEND_3D_NONE);
2011 		if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2012 			phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2013 					phys_enc->hw_pp->merge_3d->idx);
2014 	}
2015 
2016 	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2017 	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2018 	if (phys_enc->hw_pp->merge_3d)
2019 		intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2020 
2021 	if (ctl->ops.reset_intf_cfg)
2022 		ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2023 
2024 	ctl->ops.trigger_flush(ctl);
2025 	ctl->ops.trigger_start(ctl);
2026 	ctl->ops.clear_pending_flush(ctl);
2027 }
2028 
2029 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
2030 {
2031 	struct dpu_encoder_virt *dpu_enc;
2032 	struct dpu_encoder_phys *phys;
2033 	int i;
2034 
2035 	if (!drm_enc) {
2036 		DPU_ERROR("invalid encoder\n");
2037 		return;
2038 	}
2039 	dpu_enc = to_dpu_encoder_virt(drm_enc);
2040 
2041 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2042 		phys = dpu_enc->phys_encs[i];
2043 		if (phys->ops.prepare_commit)
2044 			phys->ops.prepare_commit(phys);
2045 	}
2046 }
2047 
2048 #ifdef CONFIG_DEBUG_FS
2049 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2050 {
2051 	struct dpu_encoder_virt *dpu_enc = s->private;
2052 	int i;
2053 
2054 	mutex_lock(&dpu_enc->enc_lock);
2055 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2056 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2057 
2058 		seq_printf(s, "intf:%d  wb:%d  vsync:%8d     underrun:%8d    ",
2059 				phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
2060 				atomic_read(&phys->vsync_cnt),
2061 				atomic_read(&phys->underrun_cnt));
2062 
2063 		seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2064 	}
2065 	mutex_unlock(&dpu_enc->enc_lock);
2066 
2067 	return 0;
2068 }
2069 
2070 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2071 
2072 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2073 {
2074 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2075 	int i;
2076 
2077 	char name[DPU_NAME_SIZE];
2078 
2079 	if (!drm_enc->dev) {
2080 		DPU_ERROR("invalid encoder or kms\n");
2081 		return -EINVAL;
2082 	}
2083 
2084 	snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
2085 
2086 	/* create overall sub-directory for the encoder */
2087 	dpu_enc->debugfs_root = debugfs_create_dir(name,
2088 			drm_enc->dev->primary->debugfs_root);
2089 
2090 	/* don't error check these */
2091 	debugfs_create_file("status", 0600,
2092 		dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
2093 
2094 	for (i = 0; i < dpu_enc->num_phys_encs; i++)
2095 		if (dpu_enc->phys_encs[i]->ops.late_register)
2096 			dpu_enc->phys_encs[i]->ops.late_register(
2097 					dpu_enc->phys_encs[i],
2098 					dpu_enc->debugfs_root);
2099 
2100 	return 0;
2101 }
2102 #else
2103 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2104 {
2105 	return 0;
2106 }
2107 #endif
2108 
2109 static int dpu_encoder_late_register(struct drm_encoder *encoder)
2110 {
2111 	return _dpu_encoder_init_debugfs(encoder);
2112 }
2113 
2114 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
2115 {
2116 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2117 
2118 	debugfs_remove_recursive(dpu_enc->debugfs_root);
2119 }
2120 
2121 static int dpu_encoder_virt_add_phys_encs(
2122 		struct msm_display_info *disp_info,
2123 		struct dpu_encoder_virt *dpu_enc,
2124 		struct dpu_enc_phys_init_params *params)
2125 {
2126 	struct dpu_encoder_phys *enc = NULL;
2127 
2128 	DPU_DEBUG_ENC(dpu_enc, "\n");
2129 
2130 	/*
2131 	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2132 	 * in this function, check up-front.
2133 	 */
2134 	if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2135 			ARRAY_SIZE(dpu_enc->phys_encs)) {
2136 		DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2137 			  dpu_enc->num_phys_encs);
2138 		return -EINVAL;
2139 	}
2140 
2141 	if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) {
2142 		enc = dpu_encoder_phys_vid_init(params);
2143 
2144 		if (IS_ERR_OR_NULL(enc)) {
2145 			DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2146 				PTR_ERR(enc));
2147 			return enc == NULL ? -EINVAL : PTR_ERR(enc);
2148 		}
2149 
2150 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2151 		++dpu_enc->num_phys_encs;
2152 	}
2153 
2154 	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
2155 		enc = dpu_encoder_phys_cmd_init(params);
2156 
2157 		if (IS_ERR_OR_NULL(enc)) {
2158 			DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2159 				PTR_ERR(enc));
2160 			return enc == NULL ? -EINVAL : PTR_ERR(enc);
2161 		}
2162 
2163 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2164 		++dpu_enc->num_phys_encs;
2165 	}
2166 
2167 	if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
2168 		enc = dpu_encoder_phys_wb_init(params);
2169 
2170 		if (IS_ERR_OR_NULL(enc)) {
2171 			DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2172 					PTR_ERR(enc));
2173 			return enc == NULL ? -EINVAL : PTR_ERR(enc);
2174 		}
2175 
2176 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2177 		++dpu_enc->num_phys_encs;
2178 	}
2179 
2180 	if (params->split_role == ENC_ROLE_SLAVE)
2181 		dpu_enc->cur_slave = enc;
2182 	else
2183 		dpu_enc->cur_master = enc;
2184 
2185 	return 0;
2186 }
2187 
2188 static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
2189 	.handle_vblank_virt = dpu_encoder_vblank_callback,
2190 	.handle_underrun_virt = dpu_encoder_underrun_callback,
2191 	.handle_frame_done = dpu_encoder_frame_done_callback,
2192 };
2193 
2194 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2195 				 struct dpu_kms *dpu_kms,
2196 				 struct msm_display_info *disp_info)
2197 {
2198 	int ret = 0;
2199 	int i = 0;
2200 	enum dpu_intf_type intf_type = INTF_NONE;
2201 	struct dpu_enc_phys_init_params phys_params;
2202 
2203 	if (!dpu_enc) {
2204 		DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2205 		return -EINVAL;
2206 	}
2207 
2208 	dpu_enc->cur_master = NULL;
2209 
2210 	memset(&phys_params, 0, sizeof(phys_params));
2211 	phys_params.dpu_kms = dpu_kms;
2212 	phys_params.parent = &dpu_enc->base;
2213 	phys_params.parent_ops = &dpu_encoder_parent_ops;
2214 	phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2215 
2216 	switch (disp_info->intf_type) {
2217 	case DRM_MODE_ENCODER_DSI:
2218 		intf_type = INTF_DSI;
2219 		break;
2220 	case DRM_MODE_ENCODER_TMDS:
2221 		intf_type = INTF_DP;
2222 		break;
2223 	case DRM_MODE_ENCODER_VIRTUAL:
2224 		intf_type = INTF_WB;
2225 		break;
2226 	}
2227 
2228 	WARN_ON(disp_info->num_of_h_tiles < 1);
2229 
2230 	DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2231 
2232 	if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2233 	    (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2234 		dpu_enc->idle_pc_supported =
2235 				dpu_kms->catalog->caps->has_idle_pc;
2236 
2237 	dpu_enc->dsc = disp_info->dsc;
2238 
2239 	mutex_lock(&dpu_enc->enc_lock);
2240 	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2241 		/*
2242 		 * Left-most tile is at index 0, content is controller id
2243 		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2244 		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2245 		 */
2246 		u32 controller_id = disp_info->h_tile_instance[i];
2247 
2248 		if (disp_info->num_of_h_tiles > 1) {
2249 			if (i == 0)
2250 				phys_params.split_role = ENC_ROLE_MASTER;
2251 			else
2252 				phys_params.split_role = ENC_ROLE_SLAVE;
2253 		} else {
2254 			phys_params.split_role = ENC_ROLE_SOLO;
2255 		}
2256 
2257 		DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2258 				i, controller_id, phys_params.split_role);
2259 
2260 		phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2261 													intf_type,
2262 													controller_id);
2263 
2264 		phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
2265 				intf_type, controller_id);
2266 		/*
2267 		 * The phys_params might represent either an INTF or a WB unit, but not
2268 		 * both of them at the same time.
2269 		 */
2270 		if ((phys_params.intf_idx == INTF_MAX) &&
2271 				(phys_params.wb_idx == WB_MAX)) {
2272 			DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
2273 						  intf_type, controller_id);
2274 			ret = -EINVAL;
2275 		}
2276 
2277 		if ((phys_params.intf_idx != INTF_MAX) &&
2278 				(phys_params.wb_idx != WB_MAX)) {
2279 			DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
2280 						  intf_type, controller_id);
2281 			ret = -EINVAL;
2282 		}
2283 
2284 		if (!ret) {
2285 			ret = dpu_encoder_virt_add_phys_encs(disp_info,
2286 					dpu_enc, &phys_params);
2287 			if (ret)
2288 				DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2289 		}
2290 	}
2291 
2292 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2293 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2294 		atomic_set(&phys->vsync_cnt, 0);
2295 		atomic_set(&phys->underrun_cnt, 0);
2296 	}
2297 	mutex_unlock(&dpu_enc->enc_lock);
2298 
2299 	return ret;
2300 }
2301 
2302 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2303 {
2304 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2305 			frame_done_timer);
2306 	struct drm_encoder *drm_enc = &dpu_enc->base;
2307 	u32 event;
2308 
2309 	if (!drm_enc->dev) {
2310 		DPU_ERROR("invalid parameters\n");
2311 		return;
2312 	}
2313 
2314 	if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2315 		DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2316 			      DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2317 		return;
2318 	} else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2319 		DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2320 		return;
2321 	}
2322 
2323 	DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2324 
2325 	event = DPU_ENCODER_FRAME_EVENT_ERROR;
2326 	trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2327 	dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2328 }
2329 
2330 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2331 	.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2332 	.disable = dpu_encoder_virt_disable,
2333 	.enable = dpu_encoder_virt_enable,
2334 	.atomic_check = dpu_encoder_virt_atomic_check,
2335 };
2336 
2337 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2338 		.destroy = dpu_encoder_destroy,
2339 		.late_register = dpu_encoder_late_register,
2340 		.early_unregister = dpu_encoder_early_unregister,
2341 };
2342 
2343 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2344 		struct msm_display_info *disp_info)
2345 {
2346 	struct msm_drm_private *priv = dev->dev_private;
2347 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2348 	struct drm_encoder *drm_enc = NULL;
2349 	struct dpu_encoder_virt *dpu_enc = NULL;
2350 	int ret = 0;
2351 
2352 	dpu_enc = to_dpu_encoder_virt(enc);
2353 
2354 	ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2355 	if (ret)
2356 		goto fail;
2357 
2358 	atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2359 	timer_setup(&dpu_enc->frame_done_timer,
2360 			dpu_encoder_frame_done_timeout, 0);
2361 
2362 	if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2363 		timer_setup(&dpu_enc->vsync_event_timer,
2364 				dpu_encoder_vsync_event_handler,
2365 				0);
2366 	else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
2367 		dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
2368 				priv->dp[disp_info->h_tile_instance[0]]);
2369 
2370 	INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2371 			dpu_encoder_off_work);
2372 	dpu_enc->idle_timeout = IDLE_TIMEOUT;
2373 
2374 	kthread_init_work(&dpu_enc->vsync_event_work,
2375 			dpu_encoder_vsync_event_work_handler);
2376 
2377 	memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2378 
2379 	DPU_DEBUG_ENC(dpu_enc, "created\n");
2380 
2381 	return ret;
2382 
2383 fail:
2384 	DPU_ERROR("failed to create encoder\n");
2385 	if (drm_enc)
2386 		dpu_encoder_destroy(drm_enc);
2387 
2388 	return ret;
2389 
2390 
2391 }
2392 
2393 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2394 		int drm_enc_mode)
2395 {
2396 	struct dpu_encoder_virt *dpu_enc = NULL;
2397 	int rc = 0;
2398 
2399 	dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2400 	if (!dpu_enc)
2401 		return ERR_PTR(-ENOMEM);
2402 
2403 
2404 	rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2405 							  drm_enc_mode, NULL);
2406 	if (rc) {
2407 		devm_kfree(dev->dev, dpu_enc);
2408 		return ERR_PTR(rc);
2409 	}
2410 
2411 	drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2412 
2413 	spin_lock_init(&dpu_enc->enc_spinlock);
2414 	dpu_enc->enabled = false;
2415 	mutex_init(&dpu_enc->enc_lock);
2416 	mutex_init(&dpu_enc->rc_lock);
2417 
2418 	return &dpu_enc->base;
2419 }
2420 
2421 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2422 	enum msm_event_wait event)
2423 {
2424 	int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2425 	struct dpu_encoder_virt *dpu_enc = NULL;
2426 	int i, ret = 0;
2427 
2428 	if (!drm_enc) {
2429 		DPU_ERROR("invalid encoder\n");
2430 		return -EINVAL;
2431 	}
2432 	dpu_enc = to_dpu_encoder_virt(drm_enc);
2433 	DPU_DEBUG_ENC(dpu_enc, "\n");
2434 
2435 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2436 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2437 
2438 		switch (event) {
2439 		case MSM_ENC_COMMIT_DONE:
2440 			fn_wait = phys->ops.wait_for_commit_done;
2441 			break;
2442 		case MSM_ENC_TX_COMPLETE:
2443 			fn_wait = phys->ops.wait_for_tx_complete;
2444 			break;
2445 		case MSM_ENC_VBLANK:
2446 			fn_wait = phys->ops.wait_for_vblank;
2447 			break;
2448 		default:
2449 			DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2450 					event);
2451 			return -EINVAL;
2452 		}
2453 
2454 		if (fn_wait) {
2455 			DPU_ATRACE_BEGIN("wait_for_completion_event");
2456 			ret = fn_wait(phys);
2457 			DPU_ATRACE_END("wait_for_completion_event");
2458 			if (ret)
2459 				return ret;
2460 		}
2461 	}
2462 
2463 	return ret;
2464 }
2465 
2466 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2467 {
2468 	struct dpu_encoder_virt *dpu_enc = NULL;
2469 
2470 	if (!encoder) {
2471 		DPU_ERROR("invalid encoder\n");
2472 		return INTF_MODE_NONE;
2473 	}
2474 	dpu_enc = to_dpu_encoder_virt(encoder);
2475 
2476 	if (dpu_enc->cur_master)
2477 		return dpu_enc->cur_master->intf_mode;
2478 
2479 	if (dpu_enc->num_phys_encs)
2480 		return dpu_enc->phys_encs[0]->intf_mode;
2481 
2482 	return INTF_MODE_NONE;
2483 }
2484 
2485 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2486 {
2487 	struct drm_encoder *encoder = phys_enc->parent;
2488 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2489 
2490 	return dpu_enc->dsc_mask;
2491 }
2492