1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 #include "dpu_hwio.h"
8 #include "dpu_hw_ctl.h"
9 #include "dpu_kms.h"
10 #include "dpu_trace.h"
11 
12 #define   CTL_LAYER(lm)                 \
13 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
14 #define   CTL_LAYER_EXT(lm)             \
15 	(0x40 + (((lm) - LM_0) * 0x004))
16 #define   CTL_LAYER_EXT2(lm)             \
17 	(0x70 + (((lm) - LM_0) * 0x004))
18 #define   CTL_LAYER_EXT3(lm)             \
19 	(0xA0 + (((lm) - LM_0) * 0x004))
20 #define CTL_LAYER_EXT4(lm)             \
21 	(0xB8 + (((lm) - LM_0) * 0x004))
22 #define   CTL_TOP                       0x014
23 #define   CTL_FLUSH                     0x018
24 #define   CTL_START                     0x01C
25 #define   CTL_PREPARE                   0x0d0
26 #define   CTL_SW_RESET                  0x030
27 #define   CTL_LAYER_EXTN_OFFSET         0x40
28 #define   CTL_MERGE_3D_ACTIVE           0x0E4
29 #define   CTL_DSC_ACTIVE                0x0E8
30 #define   CTL_WB_ACTIVE                 0x0EC
31 #define   CTL_INTF_ACTIVE               0x0F4
32 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
33 #define   CTL_MERGE_3D_FLUSH            0x100
34 #define   CTL_DSC_FLUSH                0x104
35 #define   CTL_WB_FLUSH                  0x108
36 #define   CTL_INTF_FLUSH                0x110
37 #define   CTL_INTF_MASTER               0x134
38 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
39 
40 #define CTL_MIXER_BORDER_OUT            BIT(24)
41 #define CTL_FLUSH_MASK_CTL              BIT(17)
42 
43 #define DPU_REG_RESET_TIMEOUT_US        2000
44 #define  MERGE_3D_IDX   23
45 #define  DSC_IDX        22
46 #define  INTF_IDX       31
47 #define WB_IDX          16
48 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
49 #define CTL_INVALID_BIT                 0xffff
50 #define CTL_DEFAULT_GROUP_ID		0xf
51 
52 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
53 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
54 	1, 2, 3, 4, 5};
55 
_mixer_stages(const struct dpu_lm_cfg * mixer,int count,enum dpu_lm lm)56 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
57 		enum dpu_lm lm)
58 {
59 	int i;
60 	int stages = -EINVAL;
61 
62 	for (i = 0; i < count; i++) {
63 		if (lm == mixer[i].id) {
64 			stages = mixer[i].sblk->maxblendstages;
65 			break;
66 		}
67 	}
68 
69 	return stages;
70 }
71 
dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl * ctx)72 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
73 {
74 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
75 
76 	return DPU_REG_READ(c, CTL_FLUSH);
77 }
78 
dpu_hw_ctl_trigger_start(struct dpu_hw_ctl * ctx)79 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
80 {
81 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
82 				       dpu_hw_ctl_get_flush_register(ctx));
83 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
84 }
85 
dpu_hw_ctl_is_started(struct dpu_hw_ctl * ctx)86 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
87 {
88 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
89 }
90 
dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl * ctx)91 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
92 {
93 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
94 					 dpu_hw_ctl_get_flush_register(ctx));
95 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
96 }
97 
dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl * ctx)98 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
99 {
100 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
101 				     dpu_hw_ctl_get_flush_register(ctx));
102 	ctx->pending_flush_mask = 0x0;
103 	ctx->pending_intf_flush_mask = 0;
104 	ctx->pending_wb_flush_mask = 0;
105 	ctx->pending_merge_3d_flush_mask = 0;
106 	ctx->pending_dsc_flush_mask = 0;
107 
108 	memset(ctx->pending_dspp_flush_mask, 0,
109 		sizeof(ctx->pending_dspp_flush_mask));
110 }
111 
dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl * ctx,u32 flushbits)112 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
113 		u32 flushbits)
114 {
115 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
116 					      ctx->pending_flush_mask);
117 	ctx->pending_flush_mask |= flushbits;
118 }
119 
dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl * ctx)120 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
121 {
122 	return ctx->pending_flush_mask;
123 }
124 
dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl * ctx)125 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
126 {
127 	int dspp;
128 
129 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
130 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
131 				ctx->pending_merge_3d_flush_mask);
132 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
133 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
134 				ctx->pending_intf_flush_mask);
135 	if (ctx->pending_flush_mask & BIT(WB_IDX))
136 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
137 				ctx->pending_wb_flush_mask);
138 
139 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
140 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
141 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
142 				DPU_REG_WRITE(&ctx->hw,
143 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
144 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
145 		}
146 
147 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
148 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
149 			      ctx->pending_dsc_flush_mask);
150 
151 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
152 }
153 
dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl * ctx)154 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
155 {
156 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
157 				     dpu_hw_ctl_get_flush_register(ctx));
158 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
159 }
160 
dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl * ctx,enum dpu_sspp sspp)161 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
162 	enum dpu_sspp sspp)
163 {
164 	switch (sspp) {
165 	case SSPP_VIG0:
166 		ctx->pending_flush_mask |=  BIT(0);
167 		break;
168 	case SSPP_VIG1:
169 		ctx->pending_flush_mask |= BIT(1);
170 		break;
171 	case SSPP_VIG2:
172 		ctx->pending_flush_mask |= BIT(2);
173 		break;
174 	case SSPP_VIG3:
175 		ctx->pending_flush_mask |= BIT(18);
176 		break;
177 	case SSPP_RGB0:
178 		ctx->pending_flush_mask |= BIT(3);
179 		break;
180 	case SSPP_RGB1:
181 		ctx->pending_flush_mask |= BIT(4);
182 		break;
183 	case SSPP_RGB2:
184 		ctx->pending_flush_mask |= BIT(5);
185 		break;
186 	case SSPP_RGB3:
187 		ctx->pending_flush_mask |= BIT(19);
188 		break;
189 	case SSPP_DMA0:
190 		ctx->pending_flush_mask |= BIT(11);
191 		break;
192 	case SSPP_DMA1:
193 		ctx->pending_flush_mask |= BIT(12);
194 		break;
195 	case SSPP_DMA2:
196 		ctx->pending_flush_mask |= BIT(24);
197 		break;
198 	case SSPP_DMA3:
199 		ctx->pending_flush_mask |= BIT(25);
200 		break;
201 	case SSPP_DMA4:
202 		ctx->pending_flush_mask |= BIT(13);
203 		break;
204 	case SSPP_DMA5:
205 		ctx->pending_flush_mask |= BIT(14);
206 		break;
207 	case SSPP_CURSOR0:
208 		ctx->pending_flush_mask |= BIT(22);
209 		break;
210 	case SSPP_CURSOR1:
211 		ctx->pending_flush_mask |= BIT(23);
212 		break;
213 	default:
214 		break;
215 	}
216 }
217 
dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl * ctx,enum dpu_lm lm)218 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
219 	enum dpu_lm lm)
220 {
221 	switch (lm) {
222 	case LM_0:
223 		ctx->pending_flush_mask |= BIT(6);
224 		break;
225 	case LM_1:
226 		ctx->pending_flush_mask |= BIT(7);
227 		break;
228 	case LM_2:
229 		ctx->pending_flush_mask |= BIT(8);
230 		break;
231 	case LM_3:
232 		ctx->pending_flush_mask |= BIT(9);
233 		break;
234 	case LM_4:
235 		ctx->pending_flush_mask |= BIT(10);
236 		break;
237 	case LM_5:
238 		ctx->pending_flush_mask |= BIT(20);
239 		break;
240 	default:
241 		break;
242 	}
243 
244 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
245 }
246 
dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl * ctx,enum dpu_intf intf)247 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
248 		enum dpu_intf intf)
249 {
250 	switch (intf) {
251 	case INTF_0:
252 		ctx->pending_flush_mask |= BIT(31);
253 		break;
254 	case INTF_1:
255 		ctx->pending_flush_mask |= BIT(30);
256 		break;
257 	case INTF_2:
258 		ctx->pending_flush_mask |= BIT(29);
259 		break;
260 	case INTF_3:
261 		ctx->pending_flush_mask |= BIT(28);
262 		break;
263 	default:
264 		break;
265 	}
266 }
267 
dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl * ctx,enum dpu_wb wb)268 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
269 		enum dpu_wb wb)
270 {
271 	switch (wb) {
272 	case WB_0:
273 	case WB_1:
274 	case WB_2:
275 		ctx->pending_flush_mask |= BIT(WB_IDX);
276 		break;
277 	default:
278 		break;
279 	}
280 }
281 
dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl * ctx,enum dpu_wb wb)282 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
283 		enum dpu_wb wb)
284 {
285 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
286 	ctx->pending_flush_mask |= BIT(WB_IDX);
287 }
288 
dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl * ctx,enum dpu_intf intf)289 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
290 		enum dpu_intf intf)
291 {
292 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
293 	ctx->pending_flush_mask |= BIT(INTF_IDX);
294 }
295 
dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl * ctx,enum dpu_merge_3d merge_3d)296 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
297 		enum dpu_merge_3d merge_3d)
298 {
299 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
300 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
301 }
302 
dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl * ctx,enum dpu_dsc dsc_num)303 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
304 						   enum dpu_dsc dsc_num)
305 {
306 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
307 	ctx->pending_flush_mask |= BIT(DSC_IDX);
308 }
309 
dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp,u32 dspp_sub_blk)310 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
311 	enum dpu_dspp dspp, u32 dspp_sub_blk)
312 {
313 	switch (dspp) {
314 	case DSPP_0:
315 		ctx->pending_flush_mask |= BIT(13);
316 		break;
317 	case DSPP_1:
318 		ctx->pending_flush_mask |= BIT(14);
319 		break;
320 	case DSPP_2:
321 		ctx->pending_flush_mask |= BIT(15);
322 		break;
323 	case DSPP_3:
324 		ctx->pending_flush_mask |= BIT(21);
325 		break;
326 	default:
327 		break;
328 	}
329 }
330 
dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp,u32 dspp_sub_blk)331 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
332 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
333 {
334 	if (dspp >= DSPP_MAX)
335 		return;
336 
337 	switch (dspp_sub_blk) {
338 	case DPU_DSPP_PCC:
339 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
340 		break;
341 	default:
342 		return;
343 	}
344 
345 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
346 }
347 
dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl * ctx,u32 timeout_us)348 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
349 {
350 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
351 	ktime_t timeout;
352 	u32 status;
353 
354 	timeout = ktime_add_us(ktime_get(), timeout_us);
355 
356 	/*
357 	 * it takes around 30us to have mdp finish resetting its ctl path
358 	 * poll every 50us so that reset should be completed at 1st poll
359 	 */
360 	do {
361 		status = DPU_REG_READ(c, CTL_SW_RESET);
362 		status &= 0x1;
363 		if (status)
364 			usleep_range(20, 50);
365 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
366 
367 	return status;
368 }
369 
dpu_hw_ctl_reset_control(struct dpu_hw_ctl * ctx)370 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
371 {
372 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
373 
374 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
375 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
376 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
377 		return -EINVAL;
378 
379 	return 0;
380 }
381 
dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl * ctx)382 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
383 {
384 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
385 	u32 status;
386 
387 	status = DPU_REG_READ(c, CTL_SW_RESET);
388 	status &= 0x01;
389 	if (!status)
390 		return 0;
391 
392 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
393 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
394 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
395 		return -EINVAL;
396 	}
397 
398 	return 0;
399 }
400 
dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl * ctx)401 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
402 {
403 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
404 	int i;
405 
406 	for (i = 0; i < ctx->mixer_count; i++) {
407 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
408 
409 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
410 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
411 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
412 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
413 	}
414 
415 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
416 }
417 
418 struct ctl_blend_config {
419 	int idx, shift, ext_shift;
420 };
421 
422 static const struct ctl_blend_config ctl_blend_config[][2] = {
423 	[SSPP_NONE] = { { -1 }, { -1 } },
424 	[SSPP_MAX] =  { { -1 }, { -1 } },
425 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
426 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
427 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
428 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
429 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
430 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
431 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
432 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
433 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
434 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
435 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
436 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
437 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
438 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
439 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
440 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
441 };
442 
dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl * ctx,enum dpu_lm lm,struct dpu_hw_stage_cfg * stage_cfg)443 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
444 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
445 {
446 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
447 	u32 mix, ext, mix_ext;
448 	u32 mixercfg[5] = { 0 };
449 	int i, j;
450 	int stages;
451 	int pipes_per_stage;
452 
453 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
454 	if (stages < 0)
455 		return;
456 
457 	if (test_bit(DPU_MIXER_SOURCESPLIT,
458 		&ctx->mixer_hw_caps->features))
459 		pipes_per_stage = PIPES_PER_STAGE;
460 	else
461 		pipes_per_stage = 1;
462 
463 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
464 
465 	if (!stage_cfg)
466 		goto exit;
467 
468 	for (i = 0; i <= stages; i++) {
469 		/* overflow to ext register if 'i + 1 > 7' */
470 		mix = (i + 1) & 0x7;
471 		ext = i >= 7;
472 		mix_ext = (i + 1) & 0xf;
473 
474 		for (j = 0 ; j < pipes_per_stage; j++) {
475 			enum dpu_sspp_multirect_index rect_index =
476 				stage_cfg->multirect_index[i][j];
477 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
478 			const struct ctl_blend_config *cfg =
479 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
480 
481 			/*
482 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
483 			 * all EXT registers has 4-bit fields.
484 			 */
485 			if (cfg->idx == -1) {
486 				continue;
487 			} else if (cfg->idx == 0) {
488 				mixercfg[0] |= mix << cfg->shift;
489 				mixercfg[1] |= ext << cfg->ext_shift;
490 			} else {
491 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
492 			}
493 		}
494 	}
495 
496 exit:
497 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
498 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
499 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
500 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
501 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
502 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
503 }
504 
505 
dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)506 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
507 		struct dpu_hw_intf_cfg *cfg)
508 {
509 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
510 	u32 intf_active = 0;
511 	u32 wb_active = 0;
512 	u32 mode_sel = 0;
513 
514 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
515 	 * per VM. Explicitly disable it until VM support is
516 	 * added in SW. Power on reset value is not disable.
517 	 */
518 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
519 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
520 
521 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
522 		mode_sel |= BIT(17);
523 
524 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
525 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
526 
527 	if (cfg->intf)
528 		intf_active |= BIT(cfg->intf - INTF_0);
529 
530 	if (cfg->wb)
531 		wb_active |= BIT(cfg->wb - WB_0);
532 
533 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
534 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
535 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
536 
537 	if (cfg->merge_3d)
538 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
539 			      BIT(cfg->merge_3d - MERGE_3D_0));
540 
541 	if (cfg->dsc)
542 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
543 }
544 
dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)545 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
546 		struct dpu_hw_intf_cfg *cfg)
547 {
548 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
549 	u32 intf_cfg = 0;
550 
551 	intf_cfg |= (cfg->intf & 0xF) << 4;
552 
553 	if (cfg->mode_3d) {
554 		intf_cfg |= BIT(19);
555 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
556 	}
557 
558 	if (cfg->wb)
559 		intf_cfg |= (cfg->wb & 0x3) + 2;
560 
561 	switch (cfg->intf_mode_sel) {
562 	case DPU_CTL_MODE_SEL_VID:
563 		intf_cfg &= ~BIT(17);
564 		intf_cfg &= ~(0x3 << 15);
565 		break;
566 	case DPU_CTL_MODE_SEL_CMD:
567 		intf_cfg |= BIT(17);
568 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
569 		break;
570 	default:
571 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
572 		return;
573 	}
574 
575 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
576 }
577 
dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)578 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
579 		struct dpu_hw_intf_cfg *cfg)
580 {
581 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
582 	u32 intf_active = 0;
583 	u32 wb_active = 0;
584 	u32 merge3d_active = 0;
585 	u32 dsc_active;
586 
587 	/*
588 	 * This API resets each portion of the CTL path namely,
589 	 * clearing the sspps staged on the lm, merge_3d block,
590 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
591 	 * This will be used for writeback to begin with to have a
592 	 * proper teardown of the writeback session but upon further
593 	 * validation, this can be extended to all interfaces.
594 	 */
595 	if (cfg->merge_3d) {
596 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
597 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
598 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
599 				merge3d_active);
600 	}
601 
602 	dpu_hw_ctl_clear_all_blendstages(ctx);
603 
604 	if (cfg->intf) {
605 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
606 		intf_active &= ~BIT(cfg->intf - INTF_0);
607 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
608 	}
609 
610 	if (cfg->wb) {
611 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
612 		wb_active &= ~BIT(cfg->wb - WB_0);
613 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
614 	}
615 
616 	if (cfg->dsc) {
617 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
618 		dsc_active &= ~cfg->dsc;
619 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
620 	}
621 }
622 
dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl * ctx,unsigned long * fetch_active)623 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
624 	unsigned long *fetch_active)
625 {
626 	int i;
627 	u32 val = 0;
628 
629 	if (fetch_active) {
630 		for (i = 0; i < SSPP_MAX; i++) {
631 			if (test_bit(i, fetch_active) &&
632 				fetch_tbl[i] != CTL_INVALID_BIT)
633 				val |= BIT(fetch_tbl[i]);
634 		}
635 	}
636 
637 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
638 }
639 
_setup_ctl_ops(struct dpu_hw_ctl_ops * ops,unsigned long cap)640 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
641 		unsigned long cap)
642 {
643 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
644 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
645 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
646 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
647 		ops->update_pending_flush_intf =
648 			dpu_hw_ctl_update_pending_flush_intf_v1;
649 		ops->update_pending_flush_merge_3d =
650 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
651 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
652 		ops->update_pending_flush_dsc =
653 			dpu_hw_ctl_update_pending_flush_dsc_v1;
654 	} else {
655 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
656 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
657 		ops->update_pending_flush_intf =
658 			dpu_hw_ctl_update_pending_flush_intf;
659 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
660 	}
661 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
662 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
663 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
664 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
665 	ops->trigger_start = dpu_hw_ctl_trigger_start;
666 	ops->is_started = dpu_hw_ctl_is_started;
667 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
668 	ops->reset = dpu_hw_ctl_reset_control;
669 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
670 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
671 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
672 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
673 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
674 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
675 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
676 	else
677 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
678 
679 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
680 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
681 };
682 
dpu_hw_ctl_init(const struct dpu_ctl_cfg * cfg,void __iomem * addr,u32 mixer_count,const struct dpu_lm_cfg * mixer)683 struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
684 		void __iomem *addr,
685 		u32 mixer_count,
686 		const struct dpu_lm_cfg *mixer)
687 {
688 	struct dpu_hw_ctl *c;
689 
690 	c = kzalloc(sizeof(*c), GFP_KERNEL);
691 	if (!c)
692 		return ERR_PTR(-ENOMEM);
693 
694 	c->hw.blk_addr = addr + cfg->base;
695 	c->hw.log_mask = DPU_DBG_MASK_CTL;
696 
697 	c->caps = cfg;
698 	_setup_ctl_ops(&c->ops, c->caps->features);
699 	c->idx = cfg->id;
700 	c->mixer_count = mixer_count;
701 	c->mixer_hw_caps = mixer;
702 
703 	return c;
704 }
705 
dpu_hw_ctl_destroy(struct dpu_hw_ctl * ctx)706 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
707 {
708 	kfree(ctx);
709 }
710