1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10 
11 #define   CTL_LAYER(lm)                 \
12 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define   CTL_LAYER_EXT(lm)             \
14 	(0x40 + (((lm) - LM_0) * 0x004))
15 #define   CTL_LAYER_EXT2(lm)             \
16 	(0x70 + (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT3(lm)             \
18 	(0xA0 + (((lm) - LM_0) * 0x004))
19 #define   CTL_TOP                       0x014
20 #define   CTL_FLUSH                     0x018
21 #define   CTL_START                     0x01C
22 #define   CTL_PREPARE                   0x0d0
23 #define   CTL_SW_RESET                  0x030
24 #define   CTL_LAYER_EXTN_OFFSET         0x40
25 #define   CTL_MERGE_3D_ACTIVE           0x0E4
26 #define   CTL_INTF_ACTIVE               0x0F4
27 #define   CTL_MERGE_3D_FLUSH            0x100
28 #define   CTL_INTF_FLUSH                0x110
29 #define   CTL_INTF_MASTER               0x134
30 
31 #define CTL_MIXER_BORDER_OUT            BIT(24)
32 #define CTL_FLUSH_MASK_CTL              BIT(17)
33 
34 #define DPU_REG_RESET_TIMEOUT_US        2000
35 #define  MERGE_3D_IDX   23
36 #define  INTF_IDX       31
37 
38 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
39 		const struct dpu_mdss_cfg *m,
40 		void __iomem *addr,
41 		struct dpu_hw_blk_reg_map *b)
42 {
43 	int i;
44 
45 	for (i = 0; i < m->ctl_count; i++) {
46 		if (ctl == m->ctl[i].id) {
47 			b->base_off = addr;
48 			b->blk_off = m->ctl[i].base;
49 			b->length = m->ctl[i].len;
50 			b->hwversion = m->hwversion;
51 			b->log_mask = DPU_DBG_MASK_CTL;
52 			return &m->ctl[i];
53 		}
54 	}
55 	return ERR_PTR(-ENOMEM);
56 }
57 
58 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
59 		enum dpu_lm lm)
60 {
61 	int i;
62 	int stages = -EINVAL;
63 
64 	for (i = 0; i < count; i++) {
65 		if (lm == mixer[i].id) {
66 			stages = mixer[i].sblk->maxblendstages;
67 			break;
68 		}
69 	}
70 
71 	return stages;
72 }
73 
74 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
75 {
76 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
77 
78 	return DPU_REG_READ(c, CTL_FLUSH);
79 }
80 
81 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
82 {
83 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
84 				       dpu_hw_ctl_get_flush_register(ctx));
85 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
86 }
87 
88 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
89 {
90 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
91 					 dpu_hw_ctl_get_flush_register(ctx));
92 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
93 }
94 
95 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
96 {
97 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
98 				     dpu_hw_ctl_get_flush_register(ctx));
99 	ctx->pending_flush_mask = 0x0;
100 }
101 
102 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
103 		u32 flushbits)
104 {
105 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
106 					      ctx->pending_flush_mask);
107 	ctx->pending_flush_mask |= flushbits;
108 }
109 
110 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
111 {
112 	return ctx->pending_flush_mask;
113 }
114 
115 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
116 {
117 
118 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
119 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
120 				ctx->pending_merge_3d_flush_mask);
121 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
122 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
123 				ctx->pending_intf_flush_mask);
124 
125 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
126 }
127 
128 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
129 {
130 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
131 				     dpu_hw_ctl_get_flush_register(ctx));
132 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
133 }
134 
135 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
136 	enum dpu_sspp sspp)
137 {
138 	uint32_t flushbits = 0;
139 
140 	switch (sspp) {
141 	case SSPP_VIG0:
142 		flushbits =  BIT(0);
143 		break;
144 	case SSPP_VIG1:
145 		flushbits = BIT(1);
146 		break;
147 	case SSPP_VIG2:
148 		flushbits = BIT(2);
149 		break;
150 	case SSPP_VIG3:
151 		flushbits = BIT(18);
152 		break;
153 	case SSPP_RGB0:
154 		flushbits = BIT(3);
155 		break;
156 	case SSPP_RGB1:
157 		flushbits = BIT(4);
158 		break;
159 	case SSPP_RGB2:
160 		flushbits = BIT(5);
161 		break;
162 	case SSPP_RGB3:
163 		flushbits = BIT(19);
164 		break;
165 	case SSPP_DMA0:
166 		flushbits = BIT(11);
167 		break;
168 	case SSPP_DMA1:
169 		flushbits = BIT(12);
170 		break;
171 	case SSPP_DMA2:
172 		flushbits = BIT(24);
173 		break;
174 	case SSPP_DMA3:
175 		flushbits = BIT(25);
176 		break;
177 	case SSPP_CURSOR0:
178 		flushbits = BIT(22);
179 		break;
180 	case SSPP_CURSOR1:
181 		flushbits = BIT(23);
182 		break;
183 	default:
184 		break;
185 	}
186 
187 	return flushbits;
188 }
189 
190 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
191 	enum dpu_lm lm)
192 {
193 	uint32_t flushbits = 0;
194 
195 	switch (lm) {
196 	case LM_0:
197 		flushbits = BIT(6);
198 		break;
199 	case LM_1:
200 		flushbits = BIT(7);
201 		break;
202 	case LM_2:
203 		flushbits = BIT(8);
204 		break;
205 	case LM_3:
206 		flushbits = BIT(9);
207 		break;
208 	case LM_4:
209 		flushbits = BIT(10);
210 		break;
211 	case LM_5:
212 		flushbits = BIT(20);
213 		break;
214 	default:
215 		return -EINVAL;
216 	}
217 
218 	flushbits |= CTL_FLUSH_MASK_CTL;
219 
220 	return flushbits;
221 }
222 
223 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
224 		enum dpu_intf intf)
225 {
226 	switch (intf) {
227 	case INTF_0:
228 		ctx->pending_flush_mask |= BIT(31);
229 		break;
230 	case INTF_1:
231 		ctx->pending_flush_mask |= BIT(30);
232 		break;
233 	case INTF_2:
234 		ctx->pending_flush_mask |= BIT(29);
235 		break;
236 	case INTF_3:
237 		ctx->pending_flush_mask |= BIT(28);
238 		break;
239 	default:
240 		break;
241 	}
242 }
243 
244 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
245 		enum dpu_intf intf)
246 {
247 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
248 	ctx->pending_flush_mask |= BIT(INTF_IDX);
249 }
250 
251 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
252 		enum dpu_merge_3d merge_3d)
253 {
254 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
255 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
256 }
257 
258 static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
259 	enum dpu_dspp dspp)
260 {
261 	uint32_t flushbits = 0;
262 
263 	switch (dspp) {
264 	case DSPP_0:
265 		flushbits = BIT(13);
266 		break;
267 	case DSPP_1:
268 		flushbits = BIT(14);
269 		break;
270 	case DSPP_2:
271 		flushbits = BIT(15);
272 		break;
273 	case DSPP_3:
274 		flushbits = BIT(21);
275 		break;
276 	default:
277 		return 0;
278 	}
279 
280 	return flushbits;
281 }
282 
283 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
284 {
285 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
286 	ktime_t timeout;
287 	u32 status;
288 
289 	timeout = ktime_add_us(ktime_get(), timeout_us);
290 
291 	/*
292 	 * it takes around 30us to have mdp finish resetting its ctl path
293 	 * poll every 50us so that reset should be completed at 1st poll
294 	 */
295 	do {
296 		status = DPU_REG_READ(c, CTL_SW_RESET);
297 		status &= 0x1;
298 		if (status)
299 			usleep_range(20, 50);
300 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
301 
302 	return status;
303 }
304 
305 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
306 {
307 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
308 
309 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
310 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
311 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
312 		return -EINVAL;
313 
314 	return 0;
315 }
316 
317 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
318 {
319 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
320 	u32 status;
321 
322 	status = DPU_REG_READ(c, CTL_SW_RESET);
323 	status &= 0x01;
324 	if (!status)
325 		return 0;
326 
327 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
328 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
329 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
330 		return -EINVAL;
331 	}
332 
333 	return 0;
334 }
335 
336 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
337 {
338 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
339 	int i;
340 
341 	for (i = 0; i < ctx->mixer_count; i++) {
342 		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
343 		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
344 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
345 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
346 	}
347 }
348 
349 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
350 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
351 {
352 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
353 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
354 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
355 	int i, j;
356 	int stages;
357 	int pipes_per_stage;
358 
359 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
360 	if (stages < 0)
361 		return;
362 
363 	if (test_bit(DPU_MIXER_SOURCESPLIT,
364 		&ctx->mixer_hw_caps->features))
365 		pipes_per_stage = PIPES_PER_STAGE;
366 	else
367 		pipes_per_stage = 1;
368 
369 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
370 
371 	if (!stage_cfg)
372 		goto exit;
373 
374 	for (i = 0; i <= stages; i++) {
375 		/* overflow to ext register if 'i + 1 > 7' */
376 		mix = (i + 1) & 0x7;
377 		ext = i >= 7;
378 
379 		for (j = 0 ; j < pipes_per_stage; j++) {
380 			enum dpu_sspp_multirect_index rect_index =
381 				stage_cfg->multirect_index[i][j];
382 
383 			switch (stage_cfg->stage[i][j]) {
384 			case SSPP_VIG0:
385 				if (rect_index == DPU_SSPP_RECT_1) {
386 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
387 				} else {
388 					mixercfg |= mix << 0;
389 					mixercfg_ext |= ext << 0;
390 				}
391 				break;
392 			case SSPP_VIG1:
393 				if (rect_index == DPU_SSPP_RECT_1) {
394 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
395 				} else {
396 					mixercfg |= mix << 3;
397 					mixercfg_ext |= ext << 2;
398 				}
399 				break;
400 			case SSPP_VIG2:
401 				if (rect_index == DPU_SSPP_RECT_1) {
402 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
403 				} else {
404 					mixercfg |= mix << 6;
405 					mixercfg_ext |= ext << 4;
406 				}
407 				break;
408 			case SSPP_VIG3:
409 				if (rect_index == DPU_SSPP_RECT_1) {
410 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
411 				} else {
412 					mixercfg |= mix << 26;
413 					mixercfg_ext |= ext << 6;
414 				}
415 				break;
416 			case SSPP_RGB0:
417 				mixercfg |= mix << 9;
418 				mixercfg_ext |= ext << 8;
419 				break;
420 			case SSPP_RGB1:
421 				mixercfg |= mix << 12;
422 				mixercfg_ext |= ext << 10;
423 				break;
424 			case SSPP_RGB2:
425 				mixercfg |= mix << 15;
426 				mixercfg_ext |= ext << 12;
427 				break;
428 			case SSPP_RGB3:
429 				mixercfg |= mix << 29;
430 				mixercfg_ext |= ext << 14;
431 				break;
432 			case SSPP_DMA0:
433 				if (rect_index == DPU_SSPP_RECT_1) {
434 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
435 				} else {
436 					mixercfg |= mix << 18;
437 					mixercfg_ext |= ext << 16;
438 				}
439 				break;
440 			case SSPP_DMA1:
441 				if (rect_index == DPU_SSPP_RECT_1) {
442 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
443 				} else {
444 					mixercfg |= mix << 21;
445 					mixercfg_ext |= ext << 18;
446 				}
447 				break;
448 			case SSPP_DMA2:
449 				if (rect_index == DPU_SSPP_RECT_1) {
450 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
451 				} else {
452 					mix |= (i + 1) & 0xF;
453 					mixercfg_ext2 |= mix << 0;
454 				}
455 				break;
456 			case SSPP_DMA3:
457 				if (rect_index == DPU_SSPP_RECT_1) {
458 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
459 				} else {
460 					mix |= (i + 1) & 0xF;
461 					mixercfg_ext2 |= mix << 4;
462 				}
463 				break;
464 			case SSPP_CURSOR0:
465 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
466 				break;
467 			case SSPP_CURSOR1:
468 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
469 				break;
470 			default:
471 				break;
472 			}
473 		}
474 	}
475 
476 exit:
477 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
478 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
479 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
480 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
481 }
482 
483 
484 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
485 		struct dpu_hw_intf_cfg *cfg)
486 {
487 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
488 	u32 intf_active = 0;
489 	u32 mode_sel = 0;
490 
491 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
492 		mode_sel |= BIT(17);
493 
494 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
495 	intf_active |= BIT(cfg->intf - INTF_0);
496 
497 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
498 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
499 	DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
500 }
501 
502 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
503 		struct dpu_hw_intf_cfg *cfg)
504 {
505 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
506 	u32 intf_cfg = 0;
507 
508 	intf_cfg |= (cfg->intf & 0xF) << 4;
509 
510 	if (cfg->mode_3d) {
511 		intf_cfg |= BIT(19);
512 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
513 	}
514 
515 	switch (cfg->intf_mode_sel) {
516 	case DPU_CTL_MODE_SEL_VID:
517 		intf_cfg &= ~BIT(17);
518 		intf_cfg &= ~(0x3 << 15);
519 		break;
520 	case DPU_CTL_MODE_SEL_CMD:
521 		intf_cfg |= BIT(17);
522 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
523 		break;
524 	default:
525 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
526 		return;
527 	}
528 
529 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
530 }
531 
532 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
533 		unsigned long cap)
534 {
535 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
536 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
537 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
538 		ops->update_pending_flush_intf =
539 			dpu_hw_ctl_update_pending_flush_intf_v1;
540 		ops->update_pending_flush_merge_3d =
541 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
542 	} else {
543 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
544 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
545 		ops->update_pending_flush_intf =
546 			dpu_hw_ctl_update_pending_flush_intf;
547 	}
548 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
549 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
550 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
551 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
552 	ops->trigger_start = dpu_hw_ctl_trigger_start;
553 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
554 	ops->reset = dpu_hw_ctl_reset_control;
555 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
556 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
557 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
558 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
559 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
560 	ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
561 };
562 
563 static struct dpu_hw_blk_ops dpu_hw_ops;
564 
565 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
566 		void __iomem *addr,
567 		const struct dpu_mdss_cfg *m)
568 {
569 	struct dpu_hw_ctl *c;
570 	const struct dpu_ctl_cfg *cfg;
571 
572 	c = kzalloc(sizeof(*c), GFP_KERNEL);
573 	if (!c)
574 		return ERR_PTR(-ENOMEM);
575 
576 	cfg = _ctl_offset(idx, m, addr, &c->hw);
577 	if (IS_ERR_OR_NULL(cfg)) {
578 		kfree(c);
579 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
580 		return ERR_PTR(-EINVAL);
581 	}
582 
583 	c->caps = cfg;
584 	_setup_ctl_ops(&c->ops, c->caps->features);
585 	c->idx = idx;
586 	c->mixer_count = m->mixer_count;
587 	c->mixer_hw_caps = m->mixer;
588 
589 	dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
590 
591 	return c;
592 }
593 
594 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
595 {
596 	if (ctx)
597 		dpu_hw_blk_destroy(&ctx->base);
598 	kfree(ctx);
599 }
600