1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10 
11 #define   CTL_LAYER(lm)                 \
12 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define   CTL_LAYER_EXT(lm)             \
14 	(0x40 + (((lm) - LM_0) * 0x004))
15 #define   CTL_LAYER_EXT2(lm)             \
16 	(0x70 + (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT3(lm)             \
18 	(0xA0 + (((lm) - LM_0) * 0x004))
19 #define   CTL_TOP                       0x014
20 #define   CTL_FLUSH                     0x018
21 #define   CTL_START                     0x01C
22 #define   CTL_PREPARE                   0x0d0
23 #define   CTL_SW_RESET                  0x030
24 #define   CTL_LAYER_EXTN_OFFSET         0x40
25 #define   CTL_MERGE_3D_ACTIVE           0x0E4
26 #define   CTL_INTF_ACTIVE               0x0F4
27 #define   CTL_MERGE_3D_FLUSH            0x100
28 #define   CTL_INTF_FLUSH                0x110
29 #define   CTL_INTF_MASTER               0x134
30 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
31 
32 #define CTL_MIXER_BORDER_OUT            BIT(24)
33 #define CTL_FLUSH_MASK_CTL              BIT(17)
34 
35 #define DPU_REG_RESET_TIMEOUT_US        2000
36 #define  MERGE_3D_IDX   23
37 #define  INTF_IDX       31
38 #define CTL_INVALID_BIT                 0xffff
39 #define CTL_DEFAULT_GROUP_ID		0xf
40 
41 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
42 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
43 	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
44 
45 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
46 		const struct dpu_mdss_cfg *m,
47 		void __iomem *addr,
48 		struct dpu_hw_blk_reg_map *b)
49 {
50 	int i;
51 
52 	for (i = 0; i < m->ctl_count; i++) {
53 		if (ctl == m->ctl[i].id) {
54 			b->base_off = addr;
55 			b->blk_off = m->ctl[i].base;
56 			b->length = m->ctl[i].len;
57 			b->hwversion = m->hwversion;
58 			b->log_mask = DPU_DBG_MASK_CTL;
59 			return &m->ctl[i];
60 		}
61 	}
62 	return ERR_PTR(-ENOMEM);
63 }
64 
65 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
66 		enum dpu_lm lm)
67 {
68 	int i;
69 	int stages = -EINVAL;
70 
71 	for (i = 0; i < count; i++) {
72 		if (lm == mixer[i].id) {
73 			stages = mixer[i].sblk->maxblendstages;
74 			break;
75 		}
76 	}
77 
78 	return stages;
79 }
80 
81 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
82 {
83 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
84 
85 	return DPU_REG_READ(c, CTL_FLUSH);
86 }
87 
88 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
89 {
90 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
91 				       dpu_hw_ctl_get_flush_register(ctx));
92 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
93 }
94 
95 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
96 {
97 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
98 }
99 
100 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
101 {
102 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
103 					 dpu_hw_ctl_get_flush_register(ctx));
104 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
105 }
106 
107 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
108 {
109 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
110 				     dpu_hw_ctl_get_flush_register(ctx));
111 	ctx->pending_flush_mask = 0x0;
112 }
113 
114 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
115 		u32 flushbits)
116 {
117 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
118 					      ctx->pending_flush_mask);
119 	ctx->pending_flush_mask |= flushbits;
120 }
121 
122 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
123 {
124 	return ctx->pending_flush_mask;
125 }
126 
127 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
128 {
129 
130 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
131 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
132 				ctx->pending_merge_3d_flush_mask);
133 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
134 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
135 				ctx->pending_intf_flush_mask);
136 
137 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
138 }
139 
140 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
141 {
142 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
143 				     dpu_hw_ctl_get_flush_register(ctx));
144 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
145 }
146 
147 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
148 	enum dpu_sspp sspp)
149 {
150 	uint32_t flushbits = 0;
151 
152 	switch (sspp) {
153 	case SSPP_VIG0:
154 		flushbits =  BIT(0);
155 		break;
156 	case SSPP_VIG1:
157 		flushbits = BIT(1);
158 		break;
159 	case SSPP_VIG2:
160 		flushbits = BIT(2);
161 		break;
162 	case SSPP_VIG3:
163 		flushbits = BIT(18);
164 		break;
165 	case SSPP_RGB0:
166 		flushbits = BIT(3);
167 		break;
168 	case SSPP_RGB1:
169 		flushbits = BIT(4);
170 		break;
171 	case SSPP_RGB2:
172 		flushbits = BIT(5);
173 		break;
174 	case SSPP_RGB3:
175 		flushbits = BIT(19);
176 		break;
177 	case SSPP_DMA0:
178 		flushbits = BIT(11);
179 		break;
180 	case SSPP_DMA1:
181 		flushbits = BIT(12);
182 		break;
183 	case SSPP_DMA2:
184 		flushbits = BIT(24);
185 		break;
186 	case SSPP_DMA3:
187 		flushbits = BIT(25);
188 		break;
189 	case SSPP_CURSOR0:
190 		flushbits = BIT(22);
191 		break;
192 	case SSPP_CURSOR1:
193 		flushbits = BIT(23);
194 		break;
195 	default:
196 		break;
197 	}
198 
199 	return flushbits;
200 }
201 
202 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
203 	enum dpu_lm lm)
204 {
205 	uint32_t flushbits = 0;
206 
207 	switch (lm) {
208 	case LM_0:
209 		flushbits = BIT(6);
210 		break;
211 	case LM_1:
212 		flushbits = BIT(7);
213 		break;
214 	case LM_2:
215 		flushbits = BIT(8);
216 		break;
217 	case LM_3:
218 		flushbits = BIT(9);
219 		break;
220 	case LM_4:
221 		flushbits = BIT(10);
222 		break;
223 	case LM_5:
224 		flushbits = BIT(20);
225 		break;
226 	default:
227 		return -EINVAL;
228 	}
229 
230 	flushbits |= CTL_FLUSH_MASK_CTL;
231 
232 	return flushbits;
233 }
234 
235 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
236 		enum dpu_intf intf)
237 {
238 	switch (intf) {
239 	case INTF_0:
240 		ctx->pending_flush_mask |= BIT(31);
241 		break;
242 	case INTF_1:
243 		ctx->pending_flush_mask |= BIT(30);
244 		break;
245 	case INTF_2:
246 		ctx->pending_flush_mask |= BIT(29);
247 		break;
248 	case INTF_3:
249 		ctx->pending_flush_mask |= BIT(28);
250 		break;
251 	default:
252 		break;
253 	}
254 }
255 
256 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
257 		enum dpu_intf intf)
258 {
259 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
260 	ctx->pending_flush_mask |= BIT(INTF_IDX);
261 }
262 
263 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
264 		enum dpu_merge_3d merge_3d)
265 {
266 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
267 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
268 }
269 
270 static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
271 	enum dpu_dspp dspp)
272 {
273 	uint32_t flushbits = 0;
274 
275 	switch (dspp) {
276 	case DSPP_0:
277 		flushbits = BIT(13);
278 		break;
279 	case DSPP_1:
280 		flushbits = BIT(14);
281 		break;
282 	case DSPP_2:
283 		flushbits = BIT(15);
284 		break;
285 	case DSPP_3:
286 		flushbits = BIT(21);
287 		break;
288 	default:
289 		return 0;
290 	}
291 
292 	return flushbits;
293 }
294 
295 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
296 {
297 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
298 	ktime_t timeout;
299 	u32 status;
300 
301 	timeout = ktime_add_us(ktime_get(), timeout_us);
302 
303 	/*
304 	 * it takes around 30us to have mdp finish resetting its ctl path
305 	 * poll every 50us so that reset should be completed at 1st poll
306 	 */
307 	do {
308 		status = DPU_REG_READ(c, CTL_SW_RESET);
309 		status &= 0x1;
310 		if (status)
311 			usleep_range(20, 50);
312 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
313 
314 	return status;
315 }
316 
317 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
318 {
319 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
320 
321 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
322 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
323 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
324 		return -EINVAL;
325 
326 	return 0;
327 }
328 
329 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
330 {
331 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
332 	u32 status;
333 
334 	status = DPU_REG_READ(c, CTL_SW_RESET);
335 	status &= 0x01;
336 	if (!status)
337 		return 0;
338 
339 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
340 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
341 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
342 		return -EINVAL;
343 	}
344 
345 	return 0;
346 }
347 
348 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
349 {
350 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
351 	int i;
352 
353 	for (i = 0; i < ctx->mixer_count; i++) {
354 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
355 
356 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
357 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
358 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
359 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
360 	}
361 
362 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
363 }
364 
365 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
366 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
367 {
368 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
369 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
370 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
371 	int i, j;
372 	int stages;
373 	int pipes_per_stage;
374 
375 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
376 	if (stages < 0)
377 		return;
378 
379 	if (test_bit(DPU_MIXER_SOURCESPLIT,
380 		&ctx->mixer_hw_caps->features))
381 		pipes_per_stage = PIPES_PER_STAGE;
382 	else
383 		pipes_per_stage = 1;
384 
385 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
386 
387 	if (!stage_cfg)
388 		goto exit;
389 
390 	for (i = 0; i <= stages; i++) {
391 		/* overflow to ext register if 'i + 1 > 7' */
392 		mix = (i + 1) & 0x7;
393 		ext = i >= 7;
394 
395 		for (j = 0 ; j < pipes_per_stage; j++) {
396 			enum dpu_sspp_multirect_index rect_index =
397 				stage_cfg->multirect_index[i][j];
398 
399 			switch (stage_cfg->stage[i][j]) {
400 			case SSPP_VIG0:
401 				if (rect_index == DPU_SSPP_RECT_1) {
402 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
403 				} else {
404 					mixercfg |= mix << 0;
405 					mixercfg_ext |= ext << 0;
406 				}
407 				break;
408 			case SSPP_VIG1:
409 				if (rect_index == DPU_SSPP_RECT_1) {
410 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
411 				} else {
412 					mixercfg |= mix << 3;
413 					mixercfg_ext |= ext << 2;
414 				}
415 				break;
416 			case SSPP_VIG2:
417 				if (rect_index == DPU_SSPP_RECT_1) {
418 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
419 				} else {
420 					mixercfg |= mix << 6;
421 					mixercfg_ext |= ext << 4;
422 				}
423 				break;
424 			case SSPP_VIG3:
425 				if (rect_index == DPU_SSPP_RECT_1) {
426 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
427 				} else {
428 					mixercfg |= mix << 26;
429 					mixercfg_ext |= ext << 6;
430 				}
431 				break;
432 			case SSPP_RGB0:
433 				mixercfg |= mix << 9;
434 				mixercfg_ext |= ext << 8;
435 				break;
436 			case SSPP_RGB1:
437 				mixercfg |= mix << 12;
438 				mixercfg_ext |= ext << 10;
439 				break;
440 			case SSPP_RGB2:
441 				mixercfg |= mix << 15;
442 				mixercfg_ext |= ext << 12;
443 				break;
444 			case SSPP_RGB3:
445 				mixercfg |= mix << 29;
446 				mixercfg_ext |= ext << 14;
447 				break;
448 			case SSPP_DMA0:
449 				if (rect_index == DPU_SSPP_RECT_1) {
450 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
451 				} else {
452 					mixercfg |= mix << 18;
453 					mixercfg_ext |= ext << 16;
454 				}
455 				break;
456 			case SSPP_DMA1:
457 				if (rect_index == DPU_SSPP_RECT_1) {
458 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
459 				} else {
460 					mixercfg |= mix << 21;
461 					mixercfg_ext |= ext << 18;
462 				}
463 				break;
464 			case SSPP_DMA2:
465 				if (rect_index == DPU_SSPP_RECT_1) {
466 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
467 				} else {
468 					mix |= (i + 1) & 0xF;
469 					mixercfg_ext2 |= mix << 0;
470 				}
471 				break;
472 			case SSPP_DMA3:
473 				if (rect_index == DPU_SSPP_RECT_1) {
474 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
475 				} else {
476 					mix |= (i + 1) & 0xF;
477 					mixercfg_ext2 |= mix << 4;
478 				}
479 				break;
480 			case SSPP_CURSOR0:
481 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
482 				break;
483 			case SSPP_CURSOR1:
484 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
485 				break;
486 			default:
487 				break;
488 			}
489 		}
490 	}
491 
492 exit:
493 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
494 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
495 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
496 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
497 }
498 
499 
500 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
501 		struct dpu_hw_intf_cfg *cfg)
502 {
503 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
504 	u32 intf_active = 0;
505 	u32 mode_sel = 0;
506 
507 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
508 	 * per VM. Explicitly disable it until VM support is
509 	 * added in SW. Power on reset value is not disable.
510 	 */
511 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
512 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
513 
514 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
515 		mode_sel |= BIT(17);
516 
517 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
518 	intf_active |= BIT(cfg->intf - INTF_0);
519 
520 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
521 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
522 	if (cfg->merge_3d)
523 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
524 			      BIT(cfg->merge_3d - MERGE_3D_0));
525 }
526 
527 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
528 		struct dpu_hw_intf_cfg *cfg)
529 {
530 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
531 	u32 intf_cfg = 0;
532 
533 	intf_cfg |= (cfg->intf & 0xF) << 4;
534 
535 	if (cfg->mode_3d) {
536 		intf_cfg |= BIT(19);
537 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
538 	}
539 
540 	switch (cfg->intf_mode_sel) {
541 	case DPU_CTL_MODE_SEL_VID:
542 		intf_cfg &= ~BIT(17);
543 		intf_cfg &= ~(0x3 << 15);
544 		break;
545 	case DPU_CTL_MODE_SEL_CMD:
546 		intf_cfg |= BIT(17);
547 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
548 		break;
549 	default:
550 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
551 		return;
552 	}
553 
554 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
555 }
556 
557 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
558 	unsigned long *fetch_active)
559 {
560 	int i;
561 	u32 val = 0;
562 
563 	if (fetch_active) {
564 		for (i = 0; i < SSPP_MAX; i++) {
565 			if (test_bit(i, fetch_active) &&
566 				fetch_tbl[i] != CTL_INVALID_BIT)
567 				val |= BIT(fetch_tbl[i]);
568 		}
569 	}
570 
571 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
572 }
573 
574 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
575 		unsigned long cap)
576 {
577 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
578 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
579 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
580 		ops->update_pending_flush_intf =
581 			dpu_hw_ctl_update_pending_flush_intf_v1;
582 		ops->update_pending_flush_merge_3d =
583 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
584 	} else {
585 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
586 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
587 		ops->update_pending_flush_intf =
588 			dpu_hw_ctl_update_pending_flush_intf;
589 	}
590 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
591 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
592 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
593 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
594 	ops->trigger_start = dpu_hw_ctl_trigger_start;
595 	ops->is_started = dpu_hw_ctl_is_started;
596 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
597 	ops->reset = dpu_hw_ctl_reset_control;
598 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
599 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
600 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
601 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
602 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
603 	ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
604 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
605 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
606 };
607 
608 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
609 		void __iomem *addr,
610 		const struct dpu_mdss_cfg *m)
611 {
612 	struct dpu_hw_ctl *c;
613 	const struct dpu_ctl_cfg *cfg;
614 
615 	c = kzalloc(sizeof(*c), GFP_KERNEL);
616 	if (!c)
617 		return ERR_PTR(-ENOMEM);
618 
619 	cfg = _ctl_offset(idx, m, addr, &c->hw);
620 	if (IS_ERR_OR_NULL(cfg)) {
621 		kfree(c);
622 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
623 		return ERR_PTR(-EINVAL);
624 	}
625 
626 	c->caps = cfg;
627 	_setup_ctl_ops(&c->ops, c->caps->features);
628 	c->idx = idx;
629 	c->mixer_count = m->mixer_count;
630 	c->mixer_hw_caps = m->mixer;
631 
632 	return c;
633 }
634 
635 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
636 {
637 	kfree(ctx);
638 }
639