1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
7 #include <linux/delay.h>
8 #include "dpu_encoder_phys.h"
9 #include "dpu_hw_interrupts.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_core_irq.h"
12 #include "dpu_formats.h"
13 #include "dpu_trace.h"
14 #include "disp/msm_disp_snapshot.h"
15 
16 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
17 		(e) && (e)->base.parent ? \
18 		(e)->base.parent->base.id : -1, \
19 		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
20 
21 #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
22 		(e) && (e)->base.parent ? \
23 		(e)->base.parent->base.id : -1, \
24 		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
25 
26 #define to_dpu_encoder_phys_cmd(x) \
27 	container_of(x, struct dpu_encoder_phys_cmd, base)
28 
29 #define PP_TIMEOUT_MAX_TRIALS	10
30 
31 /*
32  * Tearcheck sync start and continue thresholds are empirically found
33  * based on common panels In the future, may want to allow panels to override
34  * these default values
35  */
36 #define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
37 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
38 
39 #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
40 
41 #define DPU_ENC_MAX_POLL_TIMEOUT_US	2000
42 
43 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
44 
45 static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
46 {
47 	return (phys_enc->split_role != ENC_ROLE_SLAVE);
48 }
49 
50 static void _dpu_encoder_phys_cmd_update_intf_cfg(
51 		struct dpu_encoder_phys *phys_enc)
52 {
53 	struct dpu_encoder_phys_cmd *cmd_enc =
54 			to_dpu_encoder_phys_cmd(phys_enc);
55 	struct dpu_hw_ctl *ctl;
56 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
57 
58 	ctl = phys_enc->hw_ctl;
59 	if (!ctl->ops.setup_intf_cfg)
60 		return;
61 
62 	intf_cfg.intf = phys_enc->intf_idx;
63 	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
64 	intf_cfg.stream_sel = cmd_enc->stream_sel;
65 	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
66 	intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
67 	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
68 
69 	/* setup which pp blk will connect to this intf */
70 	if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
71 		phys_enc->hw_intf->ops.bind_pingpong_blk(
72 				phys_enc->hw_intf,
73 				true,
74 				phys_enc->hw_pp->idx);
75 }
76 
77 static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
78 {
79 	struct dpu_encoder_phys *phys_enc = arg;
80 	unsigned long lock_flags;
81 	int new_cnt;
82 	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
83 
84 	if (!phys_enc->hw_pp)
85 		return;
86 
87 	DPU_ATRACE_BEGIN("pp_done_irq");
88 	/* notify all synchronous clients first, then asynchronous clients */
89 	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
90 
91 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
92 	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
93 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
94 
95 	trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
96 					  phys_enc->hw_pp->idx - PINGPONG_0,
97 					  new_cnt, event);
98 
99 	/* Signal any waiting atomic commit thread */
100 	wake_up_all(&phys_enc->pending_kickoff_wq);
101 	DPU_ATRACE_END("pp_done_irq");
102 }
103 
104 static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
105 {
106 	struct dpu_encoder_phys *phys_enc = arg;
107 	struct dpu_encoder_phys_cmd *cmd_enc;
108 
109 	if (!phys_enc->hw_pp)
110 		return;
111 
112 	DPU_ATRACE_BEGIN("rd_ptr_irq");
113 	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
114 
115 	dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
116 
117 	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
118 	wake_up_all(&cmd_enc->pending_vblank_wq);
119 	DPU_ATRACE_END("rd_ptr_irq");
120 }
121 
122 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
123 {
124 	struct dpu_encoder_phys *phys_enc = arg;
125 
126 	DPU_ATRACE_BEGIN("ctl_start_irq");
127 
128 	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
129 
130 	/* Signal any waiting ctl start interrupt */
131 	wake_up_all(&phys_enc->pending_kickoff_wq);
132 	DPU_ATRACE_END("ctl_start_irq");
133 }
134 
135 static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
136 {
137 	struct dpu_encoder_phys *phys_enc = arg;
138 
139 	dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
140 }
141 
142 static void dpu_encoder_phys_cmd_atomic_mode_set(
143 		struct dpu_encoder_phys *phys_enc,
144 		struct drm_crtc_state *crtc_state,
145 		struct drm_connector_state *conn_state)
146 {
147 	phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
148 
149 	phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
150 
151 	phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
152 
153 	phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
154 }
155 
156 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
157 		struct dpu_encoder_phys *phys_enc)
158 {
159 	struct dpu_encoder_phys_cmd *cmd_enc =
160 			to_dpu_encoder_phys_cmd(phys_enc);
161 	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
162 	bool do_log = false;
163 	struct drm_encoder *drm_enc;
164 
165 	if (!phys_enc->hw_pp)
166 		return -EINVAL;
167 
168 	drm_enc = phys_enc->parent;
169 
170 	cmd_enc->pp_timeout_report_cnt++;
171 	if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
172 		frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
173 		do_log = true;
174 	} else if (cmd_enc->pp_timeout_report_cnt == 1) {
175 		do_log = true;
176 	}
177 
178 	trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
179 		     phys_enc->hw_pp->idx - PINGPONG_0,
180 		     cmd_enc->pp_timeout_report_cnt,
181 		     atomic_read(&phys_enc->pending_kickoff_cnt),
182 		     frame_event);
183 
184 	/* to avoid flooding, only log first time, and "dead" time */
185 	if (do_log) {
186 		DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
187 			  DRMID(drm_enc),
188 			  phys_enc->hw_pp->idx - PINGPONG_0,
189 			  phys_enc->hw_ctl->idx - CTL_0,
190 			  cmd_enc->pp_timeout_report_cnt,
191 			  atomic_read(&phys_enc->pending_kickoff_cnt));
192 		msm_disp_snapshot_state(drm_enc->dev);
193 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
194 				phys_enc->irq[INTR_IDX_RDPTR]);
195 	}
196 
197 	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
198 
199 	/* request a ctl reset before the next kickoff */
200 	phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
201 
202 	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
203 
204 	return -ETIMEDOUT;
205 }
206 
207 static int _dpu_encoder_phys_cmd_wait_for_idle(
208 		struct dpu_encoder_phys *phys_enc)
209 {
210 	struct dpu_encoder_phys_cmd *cmd_enc =
211 			to_dpu_encoder_phys_cmd(phys_enc);
212 	struct dpu_encoder_wait_info wait_info;
213 	int ret;
214 
215 	wait_info.wq = &phys_enc->pending_kickoff_wq;
216 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
217 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
218 
219 	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
220 			phys_enc->irq[INTR_IDX_PINGPONG],
221 			dpu_encoder_phys_cmd_pp_tx_done_irq,
222 			&wait_info);
223 	if (ret == -ETIMEDOUT)
224 		_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
225 	else if (!ret)
226 		cmd_enc->pp_timeout_report_cnt = 0;
227 
228 	return ret;
229 }
230 
231 static int dpu_encoder_phys_cmd_control_vblank_irq(
232 		struct dpu_encoder_phys *phys_enc,
233 		bool enable)
234 {
235 	int ret = 0;
236 	int refcount;
237 
238 	if (!phys_enc->hw_pp) {
239 		DPU_ERROR("invalid encoder\n");
240 		return -EINVAL;
241 	}
242 
243 	refcount = atomic_read(&phys_enc->vblank_refcount);
244 
245 	/* Slave encoders don't report vblank */
246 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
247 		goto end;
248 
249 	/* protect against negative */
250 	if (!enable && refcount == 0) {
251 		ret = -EINVAL;
252 		goto end;
253 	}
254 
255 	DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
256 		      phys_enc->hw_pp->idx - PINGPONG_0,
257 		      enable ? "true" : "false", refcount);
258 
259 	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
260 		ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
261 				phys_enc->irq[INTR_IDX_RDPTR],
262 				dpu_encoder_phys_cmd_pp_rd_ptr_irq,
263 				phys_enc);
264 	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
265 		ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
266 				phys_enc->irq[INTR_IDX_RDPTR]);
267 
268 end:
269 	if (ret) {
270 		DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
271 			  DRMID(phys_enc->parent),
272 			  phys_enc->hw_pp->idx - PINGPONG_0, ret,
273 			  enable ? "true" : "false", refcount);
274 	}
275 
276 	return ret;
277 }
278 
279 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
280 		bool enable)
281 {
282 	trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
283 			phys_enc->hw_pp->idx - PINGPONG_0,
284 			enable, atomic_read(&phys_enc->vblank_refcount));
285 
286 	if (enable) {
287 		dpu_core_irq_register_callback(phys_enc->dpu_kms,
288 				phys_enc->irq[INTR_IDX_PINGPONG],
289 				dpu_encoder_phys_cmd_pp_tx_done_irq,
290 				phys_enc);
291 		dpu_core_irq_register_callback(phys_enc->dpu_kms,
292 				phys_enc->irq[INTR_IDX_UNDERRUN],
293 				dpu_encoder_phys_cmd_underrun_irq,
294 				phys_enc);
295 		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
296 
297 		if (dpu_encoder_phys_cmd_is_master(phys_enc))
298 			dpu_core_irq_register_callback(phys_enc->dpu_kms,
299 					phys_enc->irq[INTR_IDX_CTL_START],
300 					dpu_encoder_phys_cmd_ctl_start_irq,
301 					phys_enc);
302 	} else {
303 		if (dpu_encoder_phys_cmd_is_master(phys_enc))
304 			dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
305 					phys_enc->irq[INTR_IDX_CTL_START]);
306 
307 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
308 				phys_enc->irq[INTR_IDX_UNDERRUN]);
309 		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
310 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
311 				phys_enc->irq[INTR_IDX_PINGPONG]);
312 	}
313 }
314 
315 static void dpu_encoder_phys_cmd_tearcheck_config(
316 		struct dpu_encoder_phys *phys_enc)
317 {
318 	struct dpu_encoder_phys_cmd *cmd_enc =
319 		to_dpu_encoder_phys_cmd(phys_enc);
320 	struct dpu_hw_tear_check tc_cfg = { 0 };
321 	struct drm_display_mode *mode;
322 	bool tc_enable = true;
323 	u32 vsync_hz;
324 	struct dpu_kms *dpu_kms;
325 
326 	if (!phys_enc->hw_pp) {
327 		DPU_ERROR("invalid encoder\n");
328 		return;
329 	}
330 	mode = &phys_enc->cached_mode;
331 
332 	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
333 
334 	if (!phys_enc->hw_pp->ops.setup_tearcheck ||
335 		!phys_enc->hw_pp->ops.enable_tearcheck) {
336 		DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
337 		return;
338 	}
339 
340 	dpu_kms = phys_enc->dpu_kms;
341 
342 	/*
343 	 * TE default: dsi byte clock calculated base on 70 fps;
344 	 * around 14 ms to complete a kickoff cycle if te disabled;
345 	 * vclk_line base on 60 fps; write is faster than read;
346 	 * init == start == rdptr;
347 	 *
348 	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
349 	 * frequency divided by the no. of rows (lines) in the LCDpanel.
350 	 */
351 	vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
352 	if (vsync_hz <= 0) {
353 		DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
354 				 vsync_hz);
355 		return;
356 	}
357 
358 	tc_cfg.vsync_count = vsync_hz /
359 				(mode->vtotal * drm_mode_vrefresh(mode));
360 
361 	/*
362 	 * Set the sync_cfg_height to twice vtotal so that if we lose a
363 	 * TE event coming from the display TE pin we won't stall immediately
364 	 */
365 	tc_cfg.hw_vsync_mode = 1;
366 	tc_cfg.sync_cfg_height = mode->vtotal * 2;
367 	tc_cfg.vsync_init_val = mode->vdisplay;
368 	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
369 	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
370 	tc_cfg.start_pos = mode->vdisplay;
371 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
372 
373 	DPU_DEBUG_CMDENC(cmd_enc,
374 		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
375 		phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
376 		mode->vtotal, drm_mode_vrefresh(mode));
377 	DPU_DEBUG_CMDENC(cmd_enc,
378 		"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
379 		phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
380 		tc_cfg.rd_ptr_irq);
381 	DPU_DEBUG_CMDENC(cmd_enc,
382 		"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
383 		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
384 		tc_cfg.vsync_count, tc_cfg.vsync_init_val);
385 	DPU_DEBUG_CMDENC(cmd_enc,
386 		"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
387 		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
388 		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
389 
390 	phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
391 	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
392 }
393 
394 static void _dpu_encoder_phys_cmd_pingpong_config(
395 		struct dpu_encoder_phys *phys_enc)
396 {
397 	struct dpu_encoder_phys_cmd *cmd_enc =
398 		to_dpu_encoder_phys_cmd(phys_enc);
399 
400 	if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
401 		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
402 		return;
403 	}
404 
405 	DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
406 			phys_enc->hw_pp->idx - PINGPONG_0);
407 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
408 
409 	_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
410 	dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
411 }
412 
413 static bool dpu_encoder_phys_cmd_needs_single_flush(
414 		struct dpu_encoder_phys *phys_enc)
415 {
416 	/**
417 	 * we do separate flush for each CTL and let
418 	 * CTL_START synchronize them
419 	 */
420 	return false;
421 }
422 
423 static void dpu_encoder_phys_cmd_enable_helper(
424 		struct dpu_encoder_phys *phys_enc)
425 {
426 	struct dpu_hw_ctl *ctl;
427 
428 	if (!phys_enc->hw_pp) {
429 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
430 		return;
431 	}
432 
433 	dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
434 
435 	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
436 
437 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
438 		return;
439 
440 	ctl = phys_enc->hw_ctl;
441 	ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
442 }
443 
444 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
445 {
446 	struct dpu_encoder_phys_cmd *cmd_enc =
447 		to_dpu_encoder_phys_cmd(phys_enc);
448 
449 	if (!phys_enc->hw_pp) {
450 		DPU_ERROR("invalid phys encoder\n");
451 		return;
452 	}
453 
454 	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
455 
456 	if (phys_enc->enable_state == DPU_ENC_ENABLED) {
457 		DPU_ERROR("already enabled\n");
458 		return;
459 	}
460 
461 	dpu_encoder_phys_cmd_enable_helper(phys_enc);
462 	phys_enc->enable_state = DPU_ENC_ENABLED;
463 }
464 
465 static void _dpu_encoder_phys_cmd_connect_te(
466 		struct dpu_encoder_phys *phys_enc, bool enable)
467 {
468 	if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
469 		return;
470 
471 	trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
472 	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
473 }
474 
475 static void dpu_encoder_phys_cmd_prepare_idle_pc(
476 		struct dpu_encoder_phys *phys_enc)
477 {
478 	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
479 }
480 
481 static int dpu_encoder_phys_cmd_get_line_count(
482 		struct dpu_encoder_phys *phys_enc)
483 {
484 	struct dpu_hw_pingpong *hw_pp;
485 
486 	if (!phys_enc->hw_pp)
487 		return -EINVAL;
488 
489 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
490 		return -EINVAL;
491 
492 	hw_pp = phys_enc->hw_pp;
493 	if (!hw_pp->ops.get_line_count)
494 		return -EINVAL;
495 
496 	return hw_pp->ops.get_line_count(hw_pp);
497 }
498 
499 static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
500 {
501 	struct dpu_encoder_phys_cmd *cmd_enc =
502 		to_dpu_encoder_phys_cmd(phys_enc);
503 	struct dpu_hw_ctl *ctl;
504 
505 	if (!phys_enc->hw_pp) {
506 		DPU_ERROR("invalid encoder\n");
507 		return;
508 	}
509 	DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
510 		      phys_enc->hw_pp->idx - PINGPONG_0,
511 		      phys_enc->enable_state);
512 
513 	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
514 		DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
515 		return;
516 	}
517 
518 	if (phys_enc->hw_pp->ops.enable_tearcheck)
519 		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
520 
521 	if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
522 		phys_enc->hw_intf->ops.bind_pingpong_blk(
523 				phys_enc->hw_intf,
524 				false,
525 				phys_enc->hw_pp->idx);
526 
527 		ctl = phys_enc->hw_ctl;
528 		ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
529 	}
530 
531 	phys_enc->enable_state = DPU_ENC_DISABLED;
532 }
533 
534 static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
535 {
536 	struct dpu_encoder_phys_cmd *cmd_enc =
537 		to_dpu_encoder_phys_cmd(phys_enc);
538 
539 	kfree(cmd_enc);
540 }
541 
542 static void dpu_encoder_phys_cmd_prepare_for_kickoff(
543 		struct dpu_encoder_phys *phys_enc)
544 {
545 	struct dpu_encoder_phys_cmd *cmd_enc =
546 			to_dpu_encoder_phys_cmd(phys_enc);
547 	int ret;
548 
549 	if (!phys_enc->hw_pp) {
550 		DPU_ERROR("invalid encoder\n");
551 		return;
552 	}
553 	DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
554 		      phys_enc->hw_pp->idx - PINGPONG_0,
555 		      atomic_read(&phys_enc->pending_kickoff_cnt));
556 
557 	/*
558 	 * Mark kickoff request as outstanding. If there are more than one,
559 	 * outstanding, then we have to wait for the previous one to complete
560 	 */
561 	ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
562 	if (ret) {
563 		/* force pending_kickoff_cnt 0 to discard failed kickoff */
564 		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
565 		DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
566 			  DRMID(phys_enc->parent), ret,
567 			  phys_enc->hw_pp->idx - PINGPONG_0);
568 	}
569 
570 	dpu_encoder_phys_cmd_enable_te(phys_enc);
571 
572 	DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
573 			phys_enc->hw_pp->idx - PINGPONG_0,
574 			atomic_read(&phys_enc->pending_kickoff_cnt));
575 }
576 
577 static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
578 		struct dpu_encoder_phys *phys_enc)
579 {
580 	struct dpu_hw_pp_vsync_info info;
581 
582 	if (!phys_enc)
583 		return false;
584 
585 	phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
586 	if (info.wr_ptr_line_count > 0 &&
587 	    info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
588 		return true;
589 
590 	return false;
591 }
592 
593 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
594 {
595 	struct dpu_encoder_phys_cmd *cmd_enc =
596 		to_dpu_encoder_phys_cmd(phys_enc);
597 	int trial = 0;
598 
599 	if (!phys_enc)
600 		return;
601 	if (!phys_enc->hw_pp)
602 		return;
603 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
604 		return;
605 
606 	/* If autorefresh is already disabled, we have nothing to do */
607 	if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
608 		return;
609 
610 	/*
611 	 * If autorefresh is enabled, disable it and make sure it is safe to
612 	 * proceed with current frame commit/push. Sequence fallowed is,
613 	 * 1. Disable TE
614 	 * 2. Disable autorefresh config
615 	 * 4. Poll for frame transfer ongoing to be false
616 	 * 5. Enable TE back
617 	 */
618 	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
619 	phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
620 
621 	do {
622 		udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
623 		if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
624 				> (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
625 			DPU_ERROR_CMDENC(cmd_enc,
626 					"disable autorefresh failed\n");
627 			break;
628 		}
629 
630 		trial++;
631 	} while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
632 
633 	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
634 
635 	DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
636 			 "disabled autorefresh\n");
637 }
638 
639 static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
640 		struct dpu_encoder_phys *phys_enc)
641 {
642 	struct dpu_encoder_phys_cmd *cmd_enc =
643 			to_dpu_encoder_phys_cmd(phys_enc);
644 	struct dpu_encoder_wait_info wait_info;
645 	int ret;
646 
647 	wait_info.wq = &phys_enc->pending_kickoff_wq;
648 	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
649 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
650 
651 	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
652 			phys_enc->irq[INTR_IDX_CTL_START],
653 			dpu_encoder_phys_cmd_ctl_start_irq,
654 			&wait_info);
655 	if (ret == -ETIMEDOUT) {
656 		DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
657 		ret = -EINVAL;
658 	} else if (!ret)
659 		ret = 0;
660 
661 	return ret;
662 }
663 
664 static int dpu_encoder_phys_cmd_wait_for_tx_complete(
665 		struct dpu_encoder_phys *phys_enc)
666 {
667 	int rc;
668 
669 	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
670 	if (rc) {
671 		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
672 			  DRMID(phys_enc->parent), rc,
673 			  phys_enc->intf_idx - INTF_0);
674 	}
675 
676 	return rc;
677 }
678 
679 static int dpu_encoder_phys_cmd_wait_for_commit_done(
680 		struct dpu_encoder_phys *phys_enc)
681 {
682 	/* only required for master controller */
683 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
684 		return 0;
685 
686 	if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
687 		return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
688 
689 	return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
690 }
691 
692 static int dpu_encoder_phys_cmd_wait_for_vblank(
693 		struct dpu_encoder_phys *phys_enc)
694 {
695 	int rc = 0;
696 	struct dpu_encoder_phys_cmd *cmd_enc;
697 	struct dpu_encoder_wait_info wait_info;
698 
699 	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
700 
701 	/* only required for master controller */
702 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
703 		return rc;
704 
705 	wait_info.wq = &cmd_enc->pending_vblank_wq;
706 	wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
707 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
708 
709 	atomic_inc(&cmd_enc->pending_vblank_cnt);
710 
711 	rc = dpu_encoder_helper_wait_for_irq(phys_enc,
712 			phys_enc->irq[INTR_IDX_RDPTR],
713 			dpu_encoder_phys_cmd_pp_rd_ptr_irq,
714 			&wait_info);
715 
716 	return rc;
717 }
718 
719 static void dpu_encoder_phys_cmd_handle_post_kickoff(
720 		struct dpu_encoder_phys *phys_enc)
721 {
722 	/**
723 	 * re-enable external TE, either for the first time after enabling
724 	 * or if disabled for Autorefresh
725 	 */
726 	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
727 }
728 
729 static void dpu_encoder_phys_cmd_trigger_start(
730 		struct dpu_encoder_phys *phys_enc)
731 {
732 	dpu_encoder_helper_trigger_start(phys_enc);
733 }
734 
735 static void dpu_encoder_phys_cmd_init_ops(
736 		struct dpu_encoder_phys_ops *ops)
737 {
738 	ops->is_master = dpu_encoder_phys_cmd_is_master;
739 	ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
740 	ops->enable = dpu_encoder_phys_cmd_enable;
741 	ops->disable = dpu_encoder_phys_cmd_disable;
742 	ops->destroy = dpu_encoder_phys_cmd_destroy;
743 	ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
744 	ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
745 	ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
746 	ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
747 	ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
748 	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
749 	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
750 	ops->irq_control = dpu_encoder_phys_cmd_irq_control;
751 	ops->restore = dpu_encoder_phys_cmd_enable_helper;
752 	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
753 	ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
754 	ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
755 }
756 
757 struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
758 		struct dpu_enc_phys_init_params *p)
759 {
760 	struct dpu_encoder_phys *phys_enc = NULL;
761 	struct dpu_encoder_phys_cmd *cmd_enc = NULL;
762 	int i, ret = 0;
763 
764 	DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
765 
766 	cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
767 	if (!cmd_enc) {
768 		ret = -ENOMEM;
769 		DPU_ERROR("failed to allocate\n");
770 		return ERR_PTR(ret);
771 	}
772 	phys_enc = &cmd_enc->base;
773 	phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
774 	phys_enc->intf_idx = p->intf_idx;
775 
776 	dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
777 	phys_enc->parent = p->parent;
778 	phys_enc->dpu_kms = p->dpu_kms;
779 	phys_enc->split_role = p->split_role;
780 	phys_enc->intf_mode = INTF_MODE_CMD;
781 	phys_enc->enc_spinlock = p->enc_spinlock;
782 	cmd_enc->stream_sel = 0;
783 	phys_enc->enable_state = DPU_ENC_DISABLED;
784 	for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
785 		phys_enc->irq[i] = -EINVAL;
786 
787 	atomic_set(&phys_enc->vblank_refcount, 0);
788 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
789 	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
790 	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
791 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
792 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
793 
794 	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
795 
796 	return phys_enc;
797 }
798