1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10 
11 #define   CTL_LAYER(lm)                 \
12 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define   CTL_LAYER_EXT(lm)             \
14 	(0x40 + (((lm) - LM_0) * 0x004))
15 #define   CTL_LAYER_EXT2(lm)             \
16 	(0x70 + (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT3(lm)             \
18 	(0xA0 + (((lm) - LM_0) * 0x004))
19 #define   CTL_TOP                       0x014
20 #define   CTL_FLUSH                     0x018
21 #define   CTL_START                     0x01C
22 #define   CTL_PREPARE                   0x0d0
23 #define   CTL_SW_RESET                  0x030
24 #define   CTL_LAYER_EXTN_OFFSET         0x40
25 #define   CTL_MERGE_3D_ACTIVE           0x0E4
26 #define   CTL_INTF_ACTIVE               0x0F4
27 #define   CTL_MERGE_3D_FLUSH            0x100
28 #define   CTL_INTF_FLUSH                0x110
29 #define   CTL_INTF_MASTER               0x134
30 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
31 
32 #define CTL_MIXER_BORDER_OUT            BIT(24)
33 #define CTL_FLUSH_MASK_CTL              BIT(17)
34 
35 #define DPU_REG_RESET_TIMEOUT_US        2000
36 #define  MERGE_3D_IDX   23
37 #define  INTF_IDX       31
38 #define CTL_INVALID_BIT                 0xffff
39 #define CTL_DEFAULT_GROUP_ID		0xf
40 
41 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
42 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
43 	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
44 
45 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
46 		const struct dpu_mdss_cfg *m,
47 		void __iomem *addr,
48 		struct dpu_hw_blk_reg_map *b)
49 {
50 	int i;
51 
52 	for (i = 0; i < m->ctl_count; i++) {
53 		if (ctl == m->ctl[i].id) {
54 			b->base_off = addr;
55 			b->blk_off = m->ctl[i].base;
56 			b->length = m->ctl[i].len;
57 			b->hwversion = m->hwversion;
58 			b->log_mask = DPU_DBG_MASK_CTL;
59 			return &m->ctl[i];
60 		}
61 	}
62 	return ERR_PTR(-ENOMEM);
63 }
64 
65 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
66 		enum dpu_lm lm)
67 {
68 	int i;
69 	int stages = -EINVAL;
70 
71 	for (i = 0; i < count; i++) {
72 		if (lm == mixer[i].id) {
73 			stages = mixer[i].sblk->maxblendstages;
74 			break;
75 		}
76 	}
77 
78 	return stages;
79 }
80 
81 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
82 {
83 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
84 
85 	return DPU_REG_READ(c, CTL_FLUSH);
86 }
87 
88 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
89 {
90 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
91 				       dpu_hw_ctl_get_flush_register(ctx));
92 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
93 }
94 
95 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
96 {
97 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
98 					 dpu_hw_ctl_get_flush_register(ctx));
99 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
100 }
101 
102 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
103 {
104 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
105 				     dpu_hw_ctl_get_flush_register(ctx));
106 	ctx->pending_flush_mask = 0x0;
107 }
108 
109 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
110 		u32 flushbits)
111 {
112 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
113 					      ctx->pending_flush_mask);
114 	ctx->pending_flush_mask |= flushbits;
115 }
116 
117 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
118 {
119 	return ctx->pending_flush_mask;
120 }
121 
122 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
123 {
124 
125 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
126 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
127 				ctx->pending_merge_3d_flush_mask);
128 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
129 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
130 				ctx->pending_intf_flush_mask);
131 
132 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
133 }
134 
135 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
136 {
137 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
138 				     dpu_hw_ctl_get_flush_register(ctx));
139 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
140 }
141 
142 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
143 	enum dpu_sspp sspp)
144 {
145 	uint32_t flushbits = 0;
146 
147 	switch (sspp) {
148 	case SSPP_VIG0:
149 		flushbits =  BIT(0);
150 		break;
151 	case SSPP_VIG1:
152 		flushbits = BIT(1);
153 		break;
154 	case SSPP_VIG2:
155 		flushbits = BIT(2);
156 		break;
157 	case SSPP_VIG3:
158 		flushbits = BIT(18);
159 		break;
160 	case SSPP_RGB0:
161 		flushbits = BIT(3);
162 		break;
163 	case SSPP_RGB1:
164 		flushbits = BIT(4);
165 		break;
166 	case SSPP_RGB2:
167 		flushbits = BIT(5);
168 		break;
169 	case SSPP_RGB3:
170 		flushbits = BIT(19);
171 		break;
172 	case SSPP_DMA0:
173 		flushbits = BIT(11);
174 		break;
175 	case SSPP_DMA1:
176 		flushbits = BIT(12);
177 		break;
178 	case SSPP_DMA2:
179 		flushbits = BIT(24);
180 		break;
181 	case SSPP_DMA3:
182 		flushbits = BIT(25);
183 		break;
184 	case SSPP_CURSOR0:
185 		flushbits = BIT(22);
186 		break;
187 	case SSPP_CURSOR1:
188 		flushbits = BIT(23);
189 		break;
190 	default:
191 		break;
192 	}
193 
194 	return flushbits;
195 }
196 
197 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
198 	enum dpu_lm lm)
199 {
200 	uint32_t flushbits = 0;
201 
202 	switch (lm) {
203 	case LM_0:
204 		flushbits = BIT(6);
205 		break;
206 	case LM_1:
207 		flushbits = BIT(7);
208 		break;
209 	case LM_2:
210 		flushbits = BIT(8);
211 		break;
212 	case LM_3:
213 		flushbits = BIT(9);
214 		break;
215 	case LM_4:
216 		flushbits = BIT(10);
217 		break;
218 	case LM_5:
219 		flushbits = BIT(20);
220 		break;
221 	default:
222 		return -EINVAL;
223 	}
224 
225 	flushbits |= CTL_FLUSH_MASK_CTL;
226 
227 	return flushbits;
228 }
229 
230 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
231 		enum dpu_intf intf)
232 {
233 	switch (intf) {
234 	case INTF_0:
235 		ctx->pending_flush_mask |= BIT(31);
236 		break;
237 	case INTF_1:
238 		ctx->pending_flush_mask |= BIT(30);
239 		break;
240 	case INTF_2:
241 		ctx->pending_flush_mask |= BIT(29);
242 		break;
243 	case INTF_3:
244 		ctx->pending_flush_mask |= BIT(28);
245 		break;
246 	default:
247 		break;
248 	}
249 }
250 
251 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
252 		enum dpu_intf intf)
253 {
254 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
255 	ctx->pending_flush_mask |= BIT(INTF_IDX);
256 }
257 
258 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
259 		enum dpu_merge_3d merge_3d)
260 {
261 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
262 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
263 }
264 
265 static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
266 	enum dpu_dspp dspp)
267 {
268 	uint32_t flushbits = 0;
269 
270 	switch (dspp) {
271 	case DSPP_0:
272 		flushbits = BIT(13);
273 		break;
274 	case DSPP_1:
275 		flushbits = BIT(14);
276 		break;
277 	case DSPP_2:
278 		flushbits = BIT(15);
279 		break;
280 	case DSPP_3:
281 		flushbits = BIT(21);
282 		break;
283 	default:
284 		return 0;
285 	}
286 
287 	return flushbits;
288 }
289 
290 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
291 {
292 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
293 	ktime_t timeout;
294 	u32 status;
295 
296 	timeout = ktime_add_us(ktime_get(), timeout_us);
297 
298 	/*
299 	 * it takes around 30us to have mdp finish resetting its ctl path
300 	 * poll every 50us so that reset should be completed at 1st poll
301 	 */
302 	do {
303 		status = DPU_REG_READ(c, CTL_SW_RESET);
304 		status &= 0x1;
305 		if (status)
306 			usleep_range(20, 50);
307 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
308 
309 	return status;
310 }
311 
312 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
313 {
314 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
315 
316 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
317 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
318 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
319 		return -EINVAL;
320 
321 	return 0;
322 }
323 
324 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
325 {
326 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
327 	u32 status;
328 
329 	status = DPU_REG_READ(c, CTL_SW_RESET);
330 	status &= 0x01;
331 	if (!status)
332 		return 0;
333 
334 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
335 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
336 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
337 		return -EINVAL;
338 	}
339 
340 	return 0;
341 }
342 
343 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
344 {
345 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
346 	int i;
347 
348 	for (i = 0; i < ctx->mixer_count; i++) {
349 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
350 
351 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
352 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
353 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
354 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
355 	}
356 
357 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
358 }
359 
360 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
361 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
362 {
363 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
364 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
365 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
366 	int i, j;
367 	int stages;
368 	int pipes_per_stage;
369 
370 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
371 	if (stages < 0)
372 		return;
373 
374 	if (test_bit(DPU_MIXER_SOURCESPLIT,
375 		&ctx->mixer_hw_caps->features))
376 		pipes_per_stage = PIPES_PER_STAGE;
377 	else
378 		pipes_per_stage = 1;
379 
380 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
381 
382 	if (!stage_cfg)
383 		goto exit;
384 
385 	for (i = 0; i <= stages; i++) {
386 		/* overflow to ext register if 'i + 1 > 7' */
387 		mix = (i + 1) & 0x7;
388 		ext = i >= 7;
389 
390 		for (j = 0 ; j < pipes_per_stage; j++) {
391 			enum dpu_sspp_multirect_index rect_index =
392 				stage_cfg->multirect_index[i][j];
393 
394 			switch (stage_cfg->stage[i][j]) {
395 			case SSPP_VIG0:
396 				if (rect_index == DPU_SSPP_RECT_1) {
397 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
398 				} else {
399 					mixercfg |= mix << 0;
400 					mixercfg_ext |= ext << 0;
401 				}
402 				break;
403 			case SSPP_VIG1:
404 				if (rect_index == DPU_SSPP_RECT_1) {
405 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
406 				} else {
407 					mixercfg |= mix << 3;
408 					mixercfg_ext |= ext << 2;
409 				}
410 				break;
411 			case SSPP_VIG2:
412 				if (rect_index == DPU_SSPP_RECT_1) {
413 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
414 				} else {
415 					mixercfg |= mix << 6;
416 					mixercfg_ext |= ext << 4;
417 				}
418 				break;
419 			case SSPP_VIG3:
420 				if (rect_index == DPU_SSPP_RECT_1) {
421 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
422 				} else {
423 					mixercfg |= mix << 26;
424 					mixercfg_ext |= ext << 6;
425 				}
426 				break;
427 			case SSPP_RGB0:
428 				mixercfg |= mix << 9;
429 				mixercfg_ext |= ext << 8;
430 				break;
431 			case SSPP_RGB1:
432 				mixercfg |= mix << 12;
433 				mixercfg_ext |= ext << 10;
434 				break;
435 			case SSPP_RGB2:
436 				mixercfg |= mix << 15;
437 				mixercfg_ext |= ext << 12;
438 				break;
439 			case SSPP_RGB3:
440 				mixercfg |= mix << 29;
441 				mixercfg_ext |= ext << 14;
442 				break;
443 			case SSPP_DMA0:
444 				if (rect_index == DPU_SSPP_RECT_1) {
445 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
446 				} else {
447 					mixercfg |= mix << 18;
448 					mixercfg_ext |= ext << 16;
449 				}
450 				break;
451 			case SSPP_DMA1:
452 				if (rect_index == DPU_SSPP_RECT_1) {
453 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
454 				} else {
455 					mixercfg |= mix << 21;
456 					mixercfg_ext |= ext << 18;
457 				}
458 				break;
459 			case SSPP_DMA2:
460 				if (rect_index == DPU_SSPP_RECT_1) {
461 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
462 				} else {
463 					mix |= (i + 1) & 0xF;
464 					mixercfg_ext2 |= mix << 0;
465 				}
466 				break;
467 			case SSPP_DMA3:
468 				if (rect_index == DPU_SSPP_RECT_1) {
469 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
470 				} else {
471 					mix |= (i + 1) & 0xF;
472 					mixercfg_ext2 |= mix << 4;
473 				}
474 				break;
475 			case SSPP_CURSOR0:
476 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
477 				break;
478 			case SSPP_CURSOR1:
479 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
480 				break;
481 			default:
482 				break;
483 			}
484 		}
485 	}
486 
487 exit:
488 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
489 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
490 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
491 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
492 }
493 
494 
495 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
496 		struct dpu_hw_intf_cfg *cfg)
497 {
498 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
499 	u32 intf_active = 0;
500 	u32 mode_sel = 0;
501 
502 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
503 	 * per VM. Explicitly disable it until VM support is
504 	 * added in SW. Power on reset value is not disable.
505 	 */
506 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
507 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
508 
509 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
510 		mode_sel |= BIT(17);
511 
512 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
513 	intf_active |= BIT(cfg->intf - INTF_0);
514 
515 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
516 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
517 	if (cfg->merge_3d)
518 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
519 			      BIT(cfg->merge_3d - MERGE_3D_0));
520 }
521 
522 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
523 		struct dpu_hw_intf_cfg *cfg)
524 {
525 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
526 	u32 intf_cfg = 0;
527 
528 	intf_cfg |= (cfg->intf & 0xF) << 4;
529 
530 	if (cfg->mode_3d) {
531 		intf_cfg |= BIT(19);
532 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
533 	}
534 
535 	switch (cfg->intf_mode_sel) {
536 	case DPU_CTL_MODE_SEL_VID:
537 		intf_cfg &= ~BIT(17);
538 		intf_cfg &= ~(0x3 << 15);
539 		break;
540 	case DPU_CTL_MODE_SEL_CMD:
541 		intf_cfg |= BIT(17);
542 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
543 		break;
544 	default:
545 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
546 		return;
547 	}
548 
549 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
550 }
551 
552 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
553 	unsigned long *fetch_active)
554 {
555 	int i;
556 	u32 val = 0;
557 
558 	if (fetch_active) {
559 		for (i = 0; i < SSPP_MAX; i++) {
560 			if (test_bit(i, fetch_active) &&
561 				fetch_tbl[i] != CTL_INVALID_BIT)
562 				val |= BIT(fetch_tbl[i]);
563 		}
564 	}
565 
566 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
567 }
568 
569 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
570 		unsigned long cap)
571 {
572 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
573 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
574 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
575 		ops->update_pending_flush_intf =
576 			dpu_hw_ctl_update_pending_flush_intf_v1;
577 		ops->update_pending_flush_merge_3d =
578 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
579 	} else {
580 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
581 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
582 		ops->update_pending_flush_intf =
583 			dpu_hw_ctl_update_pending_flush_intf;
584 	}
585 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
586 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
587 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
588 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
589 	ops->trigger_start = dpu_hw_ctl_trigger_start;
590 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
591 	ops->reset = dpu_hw_ctl_reset_control;
592 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
593 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
594 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
595 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
596 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
597 	ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
598 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
599 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
600 };
601 
602 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
603 		void __iomem *addr,
604 		const struct dpu_mdss_cfg *m)
605 {
606 	struct dpu_hw_ctl *c;
607 	const struct dpu_ctl_cfg *cfg;
608 
609 	c = kzalloc(sizeof(*c), GFP_KERNEL);
610 	if (!c)
611 		return ERR_PTR(-ENOMEM);
612 
613 	cfg = _ctl_offset(idx, m, addr, &c->hw);
614 	if (IS_ERR_OR_NULL(cfg)) {
615 		kfree(c);
616 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
617 		return ERR_PTR(-EINVAL);
618 	}
619 
620 	c->caps = cfg;
621 	_setup_ctl_ops(&c->ops, c->caps->features);
622 	c->idx = idx;
623 	c->mixer_count = m->mixer_count;
624 	c->mixer_hw_caps = m->mixer;
625 
626 	return c;
627 }
628 
629 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
630 {
631 	kfree(ctx);
632 }
633