1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 #include "dpu_hwio.h"
8 #include "dpu_hw_ctl.h"
9 #include "dpu_kms.h"
10 #include "dpu_trace.h"
11 
12 #define   CTL_LAYER(lm)                 \
13 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
14 #define   CTL_LAYER_EXT(lm)             \
15 	(0x40 + (((lm) - LM_0) * 0x004))
16 #define   CTL_LAYER_EXT2(lm)             \
17 	(0x70 + (((lm) - LM_0) * 0x004))
18 #define   CTL_LAYER_EXT3(lm)             \
19 	(0xA0 + (((lm) - LM_0) * 0x004))
20 #define CTL_LAYER_EXT4(lm)             \
21 	(0xB8 + (((lm) - LM_0) * 0x004))
22 #define   CTL_TOP                       0x014
23 #define   CTL_FLUSH                     0x018
24 #define   CTL_START                     0x01C
25 #define   CTL_PREPARE                   0x0d0
26 #define   CTL_SW_RESET                  0x030
27 #define   CTL_LAYER_EXTN_OFFSET         0x40
28 #define   CTL_MERGE_3D_ACTIVE           0x0E4
29 #define   CTL_DSC_ACTIVE                0x0E8
30 #define   CTL_WB_ACTIVE                 0x0EC
31 #define   CTL_INTF_ACTIVE               0x0F4
32 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
33 #define   CTL_MERGE_3D_FLUSH            0x100
34 #define   CTL_DSC_FLUSH                0x104
35 #define   CTL_WB_FLUSH                  0x108
36 #define   CTL_INTF_FLUSH                0x110
37 #define   CTL_INTF_MASTER               0x134
38 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
39 
40 #define CTL_MIXER_BORDER_OUT            BIT(24)
41 #define CTL_FLUSH_MASK_CTL              BIT(17)
42 
43 #define DPU_REG_RESET_TIMEOUT_US        2000
44 #define  MERGE_3D_IDX   23
45 #define  DSC_IDX        22
46 #define  INTF_IDX       31
47 #define WB_IDX          16
48 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
49 #define CTL_INVALID_BIT                 0xffff
50 #define CTL_DEFAULT_GROUP_ID		0xf
51 
52 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
53 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
54 	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
55 
56 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
57 		enum dpu_lm lm)
58 {
59 	int i;
60 	int stages = -EINVAL;
61 
62 	for (i = 0; i < count; i++) {
63 		if (lm == mixer[i].id) {
64 			stages = mixer[i].sblk->maxblendstages;
65 			break;
66 		}
67 	}
68 
69 	return stages;
70 }
71 
72 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
73 {
74 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
75 
76 	return DPU_REG_READ(c, CTL_FLUSH);
77 }
78 
79 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
80 {
81 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
82 				       dpu_hw_ctl_get_flush_register(ctx));
83 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
84 }
85 
86 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
87 {
88 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
89 }
90 
91 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
92 {
93 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
94 					 dpu_hw_ctl_get_flush_register(ctx));
95 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
96 }
97 
98 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
99 {
100 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
101 				     dpu_hw_ctl_get_flush_register(ctx));
102 	ctx->pending_flush_mask = 0x0;
103 	ctx->pending_intf_flush_mask = 0;
104 	ctx->pending_wb_flush_mask = 0;
105 	ctx->pending_merge_3d_flush_mask = 0;
106 	ctx->pending_dsc_flush_mask = 0;
107 
108 	memset(ctx->pending_dspp_flush_mask, 0,
109 		sizeof(ctx->pending_dspp_flush_mask));
110 }
111 
112 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
113 		u32 flushbits)
114 {
115 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
116 					      ctx->pending_flush_mask);
117 	ctx->pending_flush_mask |= flushbits;
118 }
119 
120 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
121 {
122 	return ctx->pending_flush_mask;
123 }
124 
125 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
126 {
127 	int dspp;
128 
129 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
130 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
131 				ctx->pending_merge_3d_flush_mask);
132 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
133 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
134 				ctx->pending_intf_flush_mask);
135 	if (ctx->pending_flush_mask & BIT(WB_IDX))
136 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
137 				ctx->pending_wb_flush_mask);
138 
139 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
140 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
141 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
142 				DPU_REG_WRITE(&ctx->hw,
143 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
144 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
145 		}
146 
147 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
148 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
149 			      ctx->pending_dsc_flush_mask);
150 
151 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
152 }
153 
154 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
155 {
156 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
157 				     dpu_hw_ctl_get_flush_register(ctx));
158 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
159 }
160 
161 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
162 	enum dpu_sspp sspp)
163 {
164 	switch (sspp) {
165 	case SSPP_VIG0:
166 		ctx->pending_flush_mask |=  BIT(0);
167 		break;
168 	case SSPP_VIG1:
169 		ctx->pending_flush_mask |= BIT(1);
170 		break;
171 	case SSPP_VIG2:
172 		ctx->pending_flush_mask |= BIT(2);
173 		break;
174 	case SSPP_VIG3:
175 		ctx->pending_flush_mask |= BIT(18);
176 		break;
177 	case SSPP_RGB0:
178 		ctx->pending_flush_mask |= BIT(3);
179 		break;
180 	case SSPP_RGB1:
181 		ctx->pending_flush_mask |= BIT(4);
182 		break;
183 	case SSPP_RGB2:
184 		ctx->pending_flush_mask |= BIT(5);
185 		break;
186 	case SSPP_RGB3:
187 		ctx->pending_flush_mask |= BIT(19);
188 		break;
189 	case SSPP_DMA0:
190 		ctx->pending_flush_mask |= BIT(11);
191 		break;
192 	case SSPP_DMA1:
193 		ctx->pending_flush_mask |= BIT(12);
194 		break;
195 	case SSPP_DMA2:
196 		ctx->pending_flush_mask |= BIT(24);
197 		break;
198 	case SSPP_DMA3:
199 		ctx->pending_flush_mask |= BIT(25);
200 		break;
201 	case SSPP_CURSOR0:
202 		ctx->pending_flush_mask |= BIT(22);
203 		break;
204 	case SSPP_CURSOR1:
205 		ctx->pending_flush_mask |= BIT(23);
206 		break;
207 	default:
208 		break;
209 	}
210 }
211 
212 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
213 	enum dpu_lm lm)
214 {
215 	switch (lm) {
216 	case LM_0:
217 		ctx->pending_flush_mask |= BIT(6);
218 		break;
219 	case LM_1:
220 		ctx->pending_flush_mask |= BIT(7);
221 		break;
222 	case LM_2:
223 		ctx->pending_flush_mask |= BIT(8);
224 		break;
225 	case LM_3:
226 		ctx->pending_flush_mask |= BIT(9);
227 		break;
228 	case LM_4:
229 		ctx->pending_flush_mask |= BIT(10);
230 		break;
231 	case LM_5:
232 		ctx->pending_flush_mask |= BIT(20);
233 		break;
234 	default:
235 		break;
236 	}
237 
238 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
239 }
240 
241 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
242 		enum dpu_intf intf)
243 {
244 	switch (intf) {
245 	case INTF_0:
246 		ctx->pending_flush_mask |= BIT(31);
247 		break;
248 	case INTF_1:
249 		ctx->pending_flush_mask |= BIT(30);
250 		break;
251 	case INTF_2:
252 		ctx->pending_flush_mask |= BIT(29);
253 		break;
254 	case INTF_3:
255 		ctx->pending_flush_mask |= BIT(28);
256 		break;
257 	default:
258 		break;
259 	}
260 }
261 
262 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
263 		enum dpu_wb wb)
264 {
265 	switch (wb) {
266 	case WB_0:
267 	case WB_1:
268 	case WB_2:
269 		ctx->pending_flush_mask |= BIT(WB_IDX);
270 		break;
271 	default:
272 		break;
273 	}
274 }
275 
276 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
277 		enum dpu_wb wb)
278 {
279 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
280 	ctx->pending_flush_mask |= BIT(WB_IDX);
281 }
282 
283 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
284 		enum dpu_intf intf)
285 {
286 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
287 	ctx->pending_flush_mask |= BIT(INTF_IDX);
288 }
289 
290 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
291 		enum dpu_merge_3d merge_3d)
292 {
293 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
294 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
295 }
296 
297 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
298 						   enum dpu_dsc dsc_num)
299 {
300 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
301 	ctx->pending_flush_mask |= BIT(DSC_IDX);
302 }
303 
304 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
305 	enum dpu_dspp dspp, u32 dspp_sub_blk)
306 {
307 	switch (dspp) {
308 	case DSPP_0:
309 		ctx->pending_flush_mask |= BIT(13);
310 		break;
311 	case DSPP_1:
312 		ctx->pending_flush_mask |= BIT(14);
313 		break;
314 	case DSPP_2:
315 		ctx->pending_flush_mask |= BIT(15);
316 		break;
317 	case DSPP_3:
318 		ctx->pending_flush_mask |= BIT(21);
319 		break;
320 	default:
321 		break;
322 	}
323 }
324 
325 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
326 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
327 {
328 	if (dspp >= DSPP_MAX)
329 		return;
330 
331 	switch (dspp_sub_blk) {
332 	case DPU_DSPP_PCC:
333 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
334 		break;
335 	default:
336 		return;
337 	}
338 
339 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
340 }
341 
342 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
343 {
344 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
345 	ktime_t timeout;
346 	u32 status;
347 
348 	timeout = ktime_add_us(ktime_get(), timeout_us);
349 
350 	/*
351 	 * it takes around 30us to have mdp finish resetting its ctl path
352 	 * poll every 50us so that reset should be completed at 1st poll
353 	 */
354 	do {
355 		status = DPU_REG_READ(c, CTL_SW_RESET);
356 		status &= 0x1;
357 		if (status)
358 			usleep_range(20, 50);
359 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
360 
361 	return status;
362 }
363 
364 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
365 {
366 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
367 
368 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
369 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
370 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
371 		return -EINVAL;
372 
373 	return 0;
374 }
375 
376 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
377 {
378 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
379 	u32 status;
380 
381 	status = DPU_REG_READ(c, CTL_SW_RESET);
382 	status &= 0x01;
383 	if (!status)
384 		return 0;
385 
386 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
387 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
388 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
389 		return -EINVAL;
390 	}
391 
392 	return 0;
393 }
394 
395 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
396 {
397 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
398 	int i;
399 
400 	for (i = 0; i < ctx->mixer_count; i++) {
401 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
402 
403 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
404 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
405 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
406 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
407 	}
408 
409 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
410 }
411 
412 struct ctl_blend_config {
413 	int idx, shift, ext_shift;
414 };
415 
416 static const struct ctl_blend_config ctl_blend_config[][2] = {
417 	[SSPP_NONE] = { { -1 }, { -1 } },
418 	[SSPP_MAX] =  { { -1 }, { -1 } },
419 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
420 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
421 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
422 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
423 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
424 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
425 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
426 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
427 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
428 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
429 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
430 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
431 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
432 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
433 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
434 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
435 };
436 
437 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
438 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
439 {
440 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
441 	u32 mix, ext, mix_ext;
442 	u32 mixercfg[5] = { 0 };
443 	int i, j;
444 	int stages;
445 	int pipes_per_stage;
446 
447 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
448 	if (stages < 0)
449 		return;
450 
451 	if (test_bit(DPU_MIXER_SOURCESPLIT,
452 		&ctx->mixer_hw_caps->features))
453 		pipes_per_stage = PIPES_PER_STAGE;
454 	else
455 		pipes_per_stage = 1;
456 
457 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
458 
459 	if (!stage_cfg)
460 		goto exit;
461 
462 	for (i = 0; i <= stages; i++) {
463 		/* overflow to ext register if 'i + 1 > 7' */
464 		mix = (i + 1) & 0x7;
465 		ext = i >= 7;
466 		mix_ext = (i + 1) & 0xf;
467 
468 		for (j = 0 ; j < pipes_per_stage; j++) {
469 			enum dpu_sspp_multirect_index rect_index =
470 				stage_cfg->multirect_index[i][j];
471 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
472 			const struct ctl_blend_config *cfg =
473 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
474 
475 			/*
476 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
477 			 * all EXT registers has 4-bit fields.
478 			 */
479 			if (cfg->idx == -1) {
480 				continue;
481 			} else if (cfg->idx == 0) {
482 				mixercfg[0] |= mix << cfg->shift;
483 				mixercfg[1] |= ext << cfg->ext_shift;
484 			} else {
485 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
486 			}
487 		}
488 	}
489 
490 exit:
491 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
492 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
493 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
494 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
495 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
496 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
497 }
498 
499 
500 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
501 		struct dpu_hw_intf_cfg *cfg)
502 {
503 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
504 	u32 intf_active = 0;
505 	u32 wb_active = 0;
506 	u32 mode_sel = 0;
507 
508 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
509 	 * per VM. Explicitly disable it until VM support is
510 	 * added in SW. Power on reset value is not disable.
511 	 */
512 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
513 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
514 
515 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
516 		mode_sel |= BIT(17);
517 
518 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
519 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
520 
521 	if (cfg->intf)
522 		intf_active |= BIT(cfg->intf - INTF_0);
523 
524 	if (cfg->wb)
525 		wb_active |= BIT(cfg->wb - WB_0);
526 
527 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
528 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
529 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
530 
531 	if (cfg->merge_3d)
532 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
533 			      BIT(cfg->merge_3d - MERGE_3D_0));
534 
535 	if (cfg->dsc)
536 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
537 }
538 
539 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
540 		struct dpu_hw_intf_cfg *cfg)
541 {
542 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
543 	u32 intf_cfg = 0;
544 
545 	intf_cfg |= (cfg->intf & 0xF) << 4;
546 
547 	if (cfg->mode_3d) {
548 		intf_cfg |= BIT(19);
549 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
550 	}
551 
552 	if (cfg->wb)
553 		intf_cfg |= (cfg->wb & 0x3) + 2;
554 
555 	switch (cfg->intf_mode_sel) {
556 	case DPU_CTL_MODE_SEL_VID:
557 		intf_cfg &= ~BIT(17);
558 		intf_cfg &= ~(0x3 << 15);
559 		break;
560 	case DPU_CTL_MODE_SEL_CMD:
561 		intf_cfg |= BIT(17);
562 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
563 		break;
564 	default:
565 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
566 		return;
567 	}
568 
569 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
570 }
571 
572 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
573 		struct dpu_hw_intf_cfg *cfg)
574 {
575 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
576 	u32 intf_active = 0;
577 	u32 wb_active = 0;
578 	u32 merge3d_active = 0;
579 	u32 dsc_active;
580 
581 	/*
582 	 * This API resets each portion of the CTL path namely,
583 	 * clearing the sspps staged on the lm, merge_3d block,
584 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
585 	 * This will be used for writeback to begin with to have a
586 	 * proper teardown of the writeback session but upon further
587 	 * validation, this can be extended to all interfaces.
588 	 */
589 	if (cfg->merge_3d) {
590 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
591 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
592 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
593 				merge3d_active);
594 	}
595 
596 	dpu_hw_ctl_clear_all_blendstages(ctx);
597 
598 	if (cfg->intf) {
599 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
600 		intf_active &= ~BIT(cfg->intf - INTF_0);
601 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
602 	}
603 
604 	if (cfg->wb) {
605 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
606 		wb_active &= ~BIT(cfg->wb - WB_0);
607 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
608 	}
609 
610 	if (cfg->dsc) {
611 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
612 		dsc_active &= ~cfg->dsc;
613 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
614 	}
615 }
616 
617 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
618 	unsigned long *fetch_active)
619 {
620 	int i;
621 	u32 val = 0;
622 
623 	if (fetch_active) {
624 		for (i = 0; i < SSPP_MAX; i++) {
625 			if (test_bit(i, fetch_active) &&
626 				fetch_tbl[i] != CTL_INVALID_BIT)
627 				val |= BIT(fetch_tbl[i]);
628 		}
629 	}
630 
631 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
632 }
633 
634 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
635 		unsigned long cap)
636 {
637 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
638 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
639 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
640 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
641 		ops->update_pending_flush_intf =
642 			dpu_hw_ctl_update_pending_flush_intf_v1;
643 		ops->update_pending_flush_merge_3d =
644 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
645 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
646 		ops->update_pending_flush_dsc =
647 			dpu_hw_ctl_update_pending_flush_dsc_v1;
648 	} else {
649 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
650 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
651 		ops->update_pending_flush_intf =
652 			dpu_hw_ctl_update_pending_flush_intf;
653 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
654 	}
655 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
656 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
657 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
658 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
659 	ops->trigger_start = dpu_hw_ctl_trigger_start;
660 	ops->is_started = dpu_hw_ctl_is_started;
661 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
662 	ops->reset = dpu_hw_ctl_reset_control;
663 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
664 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
665 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
666 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
667 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
668 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
669 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
670 	else
671 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
672 
673 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
674 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
675 };
676 
677 struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
678 		void __iomem *addr,
679 		u32 mixer_count,
680 		const struct dpu_lm_cfg *mixer)
681 {
682 	struct dpu_hw_ctl *c;
683 
684 	c = kzalloc(sizeof(*c), GFP_KERNEL);
685 	if (!c)
686 		return ERR_PTR(-ENOMEM);
687 
688 	c->hw.blk_addr = addr + cfg->base;
689 	c->hw.log_mask = DPU_DBG_MASK_CTL;
690 
691 	c->caps = cfg;
692 	_setup_ctl_ops(&c->ops, c->caps->features);
693 	c->idx = cfg->id;
694 	c->mixer_count = mixer_count;
695 	c->mixer_hw_caps = mixer;
696 
697 	return c;
698 }
699 
700 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
701 {
702 	kfree(ctx);
703 }
704