1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 #include "dpu_hwio.h"
8 #include "dpu_hw_ctl.h"
9 #include "dpu_kms.h"
10 #include "dpu_trace.h"
11 
12 #define   CTL_LAYER(lm)                 \
13 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
14 #define   CTL_LAYER_EXT(lm)             \
15 	(0x40 + (((lm) - LM_0) * 0x004))
16 #define   CTL_LAYER_EXT2(lm)             \
17 	(0x70 + (((lm) - LM_0) * 0x004))
18 #define   CTL_LAYER_EXT3(lm)             \
19 	(0xA0 + (((lm) - LM_0) * 0x004))
20 #define CTL_LAYER_EXT4(lm)             \
21 	(0xB8 + (((lm) - LM_0) * 0x004))
22 #define   CTL_TOP                       0x014
23 #define   CTL_FLUSH                     0x018
24 #define   CTL_START                     0x01C
25 #define   CTL_PREPARE                   0x0d0
26 #define   CTL_SW_RESET                  0x030
27 #define   CTL_LAYER_EXTN_OFFSET         0x40
28 #define   CTL_MERGE_3D_ACTIVE           0x0E4
29 #define   CTL_DSC_ACTIVE                0x0E8
30 #define   CTL_WB_ACTIVE                 0x0EC
31 #define   CTL_INTF_ACTIVE               0x0F4
32 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
33 #define   CTL_MERGE_3D_FLUSH            0x100
34 #define   CTL_DSC_FLUSH                0x104
35 #define   CTL_WB_FLUSH                  0x108
36 #define   CTL_INTF_FLUSH                0x110
37 #define   CTL_INTF_MASTER               0x134
38 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
39 
40 #define CTL_MIXER_BORDER_OUT            BIT(24)
41 #define CTL_FLUSH_MASK_CTL              BIT(17)
42 
43 #define DPU_REG_RESET_TIMEOUT_US        2000
44 #define  MERGE_3D_IDX   23
45 #define  DSC_IDX        22
46 #define  INTF_IDX       31
47 #define WB_IDX          16
48 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
49 #define CTL_INVALID_BIT                 0xffff
50 #define CTL_DEFAULT_GROUP_ID		0xf
51 
52 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
53 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
54 	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
55 
56 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
57 		const struct dpu_mdss_cfg *m,
58 		void __iomem *addr,
59 		struct dpu_hw_blk_reg_map *b)
60 {
61 	int i;
62 
63 	for (i = 0; i < m->ctl_count; i++) {
64 		if (ctl == m->ctl[i].id) {
65 			b->blk_addr = addr + m->ctl[i].base;
66 			b->log_mask = DPU_DBG_MASK_CTL;
67 			return &m->ctl[i];
68 		}
69 	}
70 	return ERR_PTR(-ENOMEM);
71 }
72 
73 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
74 		enum dpu_lm lm)
75 {
76 	int i;
77 	int stages = -EINVAL;
78 
79 	for (i = 0; i < count; i++) {
80 		if (lm == mixer[i].id) {
81 			stages = mixer[i].sblk->maxblendstages;
82 			break;
83 		}
84 	}
85 
86 	return stages;
87 }
88 
89 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
90 {
91 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
92 
93 	return DPU_REG_READ(c, CTL_FLUSH);
94 }
95 
96 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
97 {
98 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
99 				       dpu_hw_ctl_get_flush_register(ctx));
100 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
101 }
102 
103 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
104 {
105 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
106 }
107 
108 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
109 {
110 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
111 					 dpu_hw_ctl_get_flush_register(ctx));
112 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
113 }
114 
115 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
116 {
117 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
118 				     dpu_hw_ctl_get_flush_register(ctx));
119 	ctx->pending_flush_mask = 0x0;
120 
121 	memset(ctx->pending_dspp_flush_mask, 0,
122 		sizeof(ctx->pending_dspp_flush_mask));
123 }
124 
125 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
126 		u32 flushbits)
127 {
128 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
129 					      ctx->pending_flush_mask);
130 	ctx->pending_flush_mask |= flushbits;
131 }
132 
133 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
134 {
135 	return ctx->pending_flush_mask;
136 }
137 
138 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
139 {
140 	int dspp;
141 
142 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
143 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
144 				ctx->pending_merge_3d_flush_mask);
145 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
146 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
147 				ctx->pending_intf_flush_mask);
148 	if (ctx->pending_flush_mask & BIT(WB_IDX))
149 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
150 				ctx->pending_wb_flush_mask);
151 
152 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
153 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
154 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
155 				DPU_REG_WRITE(&ctx->hw,
156 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
157 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
158 		}
159 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
160 }
161 
162 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
163 {
164 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
165 				     dpu_hw_ctl_get_flush_register(ctx));
166 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
167 }
168 
169 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
170 	enum dpu_sspp sspp)
171 {
172 	switch (sspp) {
173 	case SSPP_VIG0:
174 		ctx->pending_flush_mask |=  BIT(0);
175 		break;
176 	case SSPP_VIG1:
177 		ctx->pending_flush_mask |= BIT(1);
178 		break;
179 	case SSPP_VIG2:
180 		ctx->pending_flush_mask |= BIT(2);
181 		break;
182 	case SSPP_VIG3:
183 		ctx->pending_flush_mask |= BIT(18);
184 		break;
185 	case SSPP_RGB0:
186 		ctx->pending_flush_mask |= BIT(3);
187 		break;
188 	case SSPP_RGB1:
189 		ctx->pending_flush_mask |= BIT(4);
190 		break;
191 	case SSPP_RGB2:
192 		ctx->pending_flush_mask |= BIT(5);
193 		break;
194 	case SSPP_RGB3:
195 		ctx->pending_flush_mask |= BIT(19);
196 		break;
197 	case SSPP_DMA0:
198 		ctx->pending_flush_mask |= BIT(11);
199 		break;
200 	case SSPP_DMA1:
201 		ctx->pending_flush_mask |= BIT(12);
202 		break;
203 	case SSPP_DMA2:
204 		ctx->pending_flush_mask |= BIT(24);
205 		break;
206 	case SSPP_DMA3:
207 		ctx->pending_flush_mask |= BIT(25);
208 		break;
209 	case SSPP_CURSOR0:
210 		ctx->pending_flush_mask |= BIT(22);
211 		break;
212 	case SSPP_CURSOR1:
213 		ctx->pending_flush_mask |= BIT(23);
214 		break;
215 	default:
216 		break;
217 	}
218 }
219 
220 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
221 	enum dpu_lm lm)
222 {
223 	switch (lm) {
224 	case LM_0:
225 		ctx->pending_flush_mask |= BIT(6);
226 		break;
227 	case LM_1:
228 		ctx->pending_flush_mask |= BIT(7);
229 		break;
230 	case LM_2:
231 		ctx->pending_flush_mask |= BIT(8);
232 		break;
233 	case LM_3:
234 		ctx->pending_flush_mask |= BIT(9);
235 		break;
236 	case LM_4:
237 		ctx->pending_flush_mask |= BIT(10);
238 		break;
239 	case LM_5:
240 		ctx->pending_flush_mask |= BIT(20);
241 		break;
242 	default:
243 		break;
244 	}
245 
246 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
247 }
248 
249 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
250 		enum dpu_intf intf)
251 {
252 	switch (intf) {
253 	case INTF_0:
254 		ctx->pending_flush_mask |= BIT(31);
255 		break;
256 	case INTF_1:
257 		ctx->pending_flush_mask |= BIT(30);
258 		break;
259 	case INTF_2:
260 		ctx->pending_flush_mask |= BIT(29);
261 		break;
262 	case INTF_3:
263 		ctx->pending_flush_mask |= BIT(28);
264 		break;
265 	default:
266 		break;
267 	}
268 }
269 
270 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
271 		enum dpu_wb wb)
272 {
273 	switch (wb) {
274 	case WB_0:
275 	case WB_1:
276 	case WB_2:
277 		ctx->pending_flush_mask |= BIT(WB_IDX);
278 		break;
279 	default:
280 		break;
281 	}
282 }
283 
284 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
285 		enum dpu_wb wb)
286 {
287 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
288 	ctx->pending_flush_mask |= BIT(WB_IDX);
289 }
290 
291 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
292 		enum dpu_intf intf)
293 {
294 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
295 	ctx->pending_flush_mask |= BIT(INTF_IDX);
296 }
297 
298 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
299 		enum dpu_merge_3d merge_3d)
300 {
301 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
302 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
303 }
304 
305 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
306 	enum dpu_dspp dspp, u32 dspp_sub_blk)
307 {
308 	switch (dspp) {
309 	case DSPP_0:
310 		ctx->pending_flush_mask |= BIT(13);
311 		break;
312 	case DSPP_1:
313 		ctx->pending_flush_mask |= BIT(14);
314 		break;
315 	case DSPP_2:
316 		ctx->pending_flush_mask |= BIT(15);
317 		break;
318 	case DSPP_3:
319 		ctx->pending_flush_mask |= BIT(21);
320 		break;
321 	default:
322 		break;
323 	}
324 }
325 
326 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
327 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
328 {
329 	if (dspp >= DSPP_MAX)
330 		return;
331 
332 	switch (dspp_sub_blk) {
333 	case DPU_DSPP_IGC:
334 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(2);
335 		break;
336 	case DPU_DSPP_PCC:
337 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
338 		break;
339 	case DPU_DSPP_GC:
340 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(5);
341 		break;
342 	default:
343 		return;
344 	}
345 
346 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
347 }
348 
349 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
350 {
351 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
352 	ktime_t timeout;
353 	u32 status;
354 
355 	timeout = ktime_add_us(ktime_get(), timeout_us);
356 
357 	/*
358 	 * it takes around 30us to have mdp finish resetting its ctl path
359 	 * poll every 50us so that reset should be completed at 1st poll
360 	 */
361 	do {
362 		status = DPU_REG_READ(c, CTL_SW_RESET);
363 		status &= 0x1;
364 		if (status)
365 			usleep_range(20, 50);
366 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
367 
368 	return status;
369 }
370 
371 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
372 {
373 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
374 
375 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
376 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
377 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
378 		return -EINVAL;
379 
380 	return 0;
381 }
382 
383 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
384 {
385 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
386 	u32 status;
387 
388 	status = DPU_REG_READ(c, CTL_SW_RESET);
389 	status &= 0x01;
390 	if (!status)
391 		return 0;
392 
393 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
394 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
395 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
396 		return -EINVAL;
397 	}
398 
399 	return 0;
400 }
401 
402 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
403 {
404 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
405 	int i;
406 
407 	for (i = 0; i < ctx->mixer_count; i++) {
408 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
409 
410 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
411 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
412 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
413 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
414 	}
415 
416 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
417 }
418 
419 struct ctl_blend_config {
420 	int idx, shift, ext_shift;
421 };
422 
423 static const struct ctl_blend_config ctl_blend_config[][2] = {
424 	[SSPP_NONE] = { { -1 }, { -1 } },
425 	[SSPP_MAX] =  { { -1 }, { -1 } },
426 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
427 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
428 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
429 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
430 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
431 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
432 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
433 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
434 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
435 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
436 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
437 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
438 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
439 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
440 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
441 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
442 };
443 
444 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
445 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
446 {
447 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
448 	u32 mix, ext, mix_ext;
449 	u32 mixercfg[5] = { 0 };
450 	int i, j;
451 	int stages;
452 	int pipes_per_stage;
453 
454 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
455 	if (stages < 0)
456 		return;
457 
458 	if (test_bit(DPU_MIXER_SOURCESPLIT,
459 		&ctx->mixer_hw_caps->features))
460 		pipes_per_stage = PIPES_PER_STAGE;
461 	else
462 		pipes_per_stage = 1;
463 
464 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
465 
466 	if (!stage_cfg)
467 		goto exit;
468 
469 	for (i = 0; i <= stages; i++) {
470 		/* overflow to ext register if 'i + 1 > 7' */
471 		mix = (i + 1) & 0x7;
472 		ext = i >= 7;
473 		mix_ext = (i + 1) & 0xf;
474 
475 		for (j = 0 ; j < pipes_per_stage; j++) {
476 			enum dpu_sspp_multirect_index rect_index =
477 				stage_cfg->multirect_index[i][j];
478 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
479 			const struct ctl_blend_config *cfg =
480 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
481 
482 			/*
483 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
484 			 * all EXT registers has 4-bit fields.
485 			 */
486 			if (cfg->idx == -1) {
487 				continue;
488 			} else if (cfg->idx == 0) {
489 				mixercfg[0] |= mix << cfg->shift;
490 				mixercfg[1] |= ext << cfg->ext_shift;
491 			} else {
492 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
493 			}
494 		}
495 	}
496 
497 exit:
498 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
499 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
500 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
501 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
502 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
503 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
504 }
505 
506 
507 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
508 		struct dpu_hw_intf_cfg *cfg)
509 {
510 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
511 	u32 intf_active = 0;
512 	u32 wb_active = 0;
513 	u32 mode_sel = 0;
514 
515 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
516 	 * per VM. Explicitly disable it until VM support is
517 	 * added in SW. Power on reset value is not disable.
518 	 */
519 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
520 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
521 
522 	if (cfg->dsc)
523 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
524 
525 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
526 		mode_sel |= BIT(17);
527 
528 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
529 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
530 
531 	if (cfg->intf)
532 		intf_active |= BIT(cfg->intf - INTF_0);
533 
534 	if (cfg->wb)
535 		wb_active |= BIT(cfg->wb - WB_0);
536 
537 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
538 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
539 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
540 
541 	if (cfg->merge_3d)
542 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
543 			      BIT(cfg->merge_3d - MERGE_3D_0));
544 	if (cfg->dsc) {
545 		DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
546 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
547 	}
548 }
549 
550 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
551 		struct dpu_hw_intf_cfg *cfg)
552 {
553 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
554 	u32 intf_cfg = 0;
555 
556 	intf_cfg |= (cfg->intf & 0xF) << 4;
557 
558 	if (cfg->mode_3d) {
559 		intf_cfg |= BIT(19);
560 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
561 	}
562 
563 	if (cfg->wb)
564 		intf_cfg |= (cfg->wb & 0x3) + 2;
565 
566 	switch (cfg->intf_mode_sel) {
567 	case DPU_CTL_MODE_SEL_VID:
568 		intf_cfg &= ~BIT(17);
569 		intf_cfg &= ~(0x3 << 15);
570 		break;
571 	case DPU_CTL_MODE_SEL_CMD:
572 		intf_cfg |= BIT(17);
573 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
574 		break;
575 	default:
576 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
577 		return;
578 	}
579 
580 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
581 }
582 
583 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
584 		struct dpu_hw_intf_cfg *cfg)
585 {
586 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
587 	u32 intf_active = 0;
588 	u32 wb_active = 0;
589 	u32 merge3d_active = 0;
590 
591 	/*
592 	 * This API resets each portion of the CTL path namely,
593 	 * clearing the sspps staged on the lm, merge_3d block,
594 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
595 	 * This will be used for writeback to begin with to have a
596 	 * proper teardown of the writeback session but upon further
597 	 * validation, this can be extended to all interfaces.
598 	 */
599 	if (cfg->merge_3d) {
600 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
601 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
602 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
603 				merge3d_active);
604 	}
605 
606 	dpu_hw_ctl_clear_all_blendstages(ctx);
607 
608 	if (cfg->intf) {
609 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
610 		intf_active &= ~BIT(cfg->intf - INTF_0);
611 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
612 	}
613 
614 	if (cfg->wb) {
615 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
616 		wb_active &= ~BIT(cfg->wb - WB_0);
617 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
618 	}
619 }
620 
621 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
622 	unsigned long *fetch_active)
623 {
624 	int i;
625 	u32 val = 0;
626 
627 	if (fetch_active) {
628 		for (i = 0; i < SSPP_MAX; i++) {
629 			if (test_bit(i, fetch_active) &&
630 				fetch_tbl[i] != CTL_INVALID_BIT)
631 				val |= BIT(fetch_tbl[i]);
632 		}
633 	}
634 
635 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
636 }
637 
638 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
639 		unsigned long cap)
640 {
641 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
642 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
643 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
644 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
645 		ops->update_pending_flush_intf =
646 			dpu_hw_ctl_update_pending_flush_intf_v1;
647 		ops->update_pending_flush_merge_3d =
648 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
649 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
650 	} else {
651 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
652 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
653 		ops->update_pending_flush_intf =
654 			dpu_hw_ctl_update_pending_flush_intf;
655 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
656 	}
657 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
658 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
659 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
660 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
661 	ops->trigger_start = dpu_hw_ctl_trigger_start;
662 	ops->is_started = dpu_hw_ctl_is_started;
663 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
664 	ops->reset = dpu_hw_ctl_reset_control;
665 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
666 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
667 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
668 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
669 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
670 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
671 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
672 	else
673 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
674 
675 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
676 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
677 };
678 
679 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
680 		void __iomem *addr,
681 		const struct dpu_mdss_cfg *m)
682 {
683 	struct dpu_hw_ctl *c;
684 	const struct dpu_ctl_cfg *cfg;
685 
686 	c = kzalloc(sizeof(*c), GFP_KERNEL);
687 	if (!c)
688 		return ERR_PTR(-ENOMEM);
689 
690 	cfg = _ctl_offset(idx, m, addr, &c->hw);
691 	if (IS_ERR_OR_NULL(cfg)) {
692 		kfree(c);
693 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
694 		return ERR_PTR(-EINVAL);
695 	}
696 
697 	c->caps = cfg;
698 	_setup_ctl_ops(&c->ops, c->caps->features);
699 	c->idx = idx;
700 	c->mixer_count = m->mixer_count;
701 	c->mixer_hw_caps = m->mixer;
702 
703 	return c;
704 }
705 
706 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
707 {
708 	kfree(ctx);
709 }
710