1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10 
11 #define   CTL_LAYER(lm)                 \
12 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define   CTL_LAYER_EXT(lm)             \
14 	(0x40 + (((lm) - LM_0) * 0x004))
15 #define   CTL_LAYER_EXT2(lm)             \
16 	(0x70 + (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT3(lm)             \
18 	(0xA0 + (((lm) - LM_0) * 0x004))
19 #define   CTL_TOP                       0x014
20 #define   CTL_FLUSH                     0x018
21 #define   CTL_START                     0x01C
22 #define   CTL_PREPARE                   0x0d0
23 #define   CTL_SW_RESET                  0x030
24 #define   CTL_LAYER_EXTN_OFFSET         0x40
25 #define   CTL_INTF_ACTIVE               0x0F4
26 #define   CTL_INTF_FLUSH                0x110
27 #define   CTL_INTF_MASTER               0x134
28 
29 #define CTL_MIXER_BORDER_OUT            BIT(24)
30 #define CTL_FLUSH_MASK_CTL              BIT(17)
31 
32 #define DPU_REG_RESET_TIMEOUT_US        2000
33 #define  INTF_IDX       31
34 
35 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
36 		const struct dpu_mdss_cfg *m,
37 		void __iomem *addr,
38 		struct dpu_hw_blk_reg_map *b)
39 {
40 	int i;
41 
42 	for (i = 0; i < m->ctl_count; i++) {
43 		if (ctl == m->ctl[i].id) {
44 			b->base_off = addr;
45 			b->blk_off = m->ctl[i].base;
46 			b->length = m->ctl[i].len;
47 			b->hwversion = m->hwversion;
48 			b->log_mask = DPU_DBG_MASK_CTL;
49 			return &m->ctl[i];
50 		}
51 	}
52 	return ERR_PTR(-ENOMEM);
53 }
54 
55 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
56 		enum dpu_lm lm)
57 {
58 	int i;
59 	int stages = -EINVAL;
60 
61 	for (i = 0; i < count; i++) {
62 		if (lm == mixer[i].id) {
63 			stages = mixer[i].sblk->maxblendstages;
64 			break;
65 		}
66 	}
67 
68 	return stages;
69 }
70 
71 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
72 {
73 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
74 
75 	return DPU_REG_READ(c, CTL_FLUSH);
76 }
77 
78 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
79 {
80 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
81 				       dpu_hw_ctl_get_flush_register(ctx));
82 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
83 }
84 
85 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
86 {
87 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
88 					 dpu_hw_ctl_get_flush_register(ctx));
89 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
90 }
91 
92 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
93 {
94 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
95 				     dpu_hw_ctl_get_flush_register(ctx));
96 	ctx->pending_flush_mask = 0x0;
97 }
98 
99 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
100 		u32 flushbits)
101 {
102 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
103 					      ctx->pending_flush_mask);
104 	ctx->pending_flush_mask |= flushbits;
105 }
106 
107 static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
108 		u32 flushbits)
109 {
110 	ctx->pending_intf_flush_mask |= flushbits;
111 }
112 
113 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
114 {
115 	return ctx->pending_flush_mask;
116 }
117 
118 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
119 {
120 
121 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
122 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
123 				ctx->pending_intf_flush_mask);
124 
125 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
126 }
127 
128 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
129 {
130 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
131 				     dpu_hw_ctl_get_flush_register(ctx));
132 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
133 }
134 
135 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
136 	enum dpu_sspp sspp)
137 {
138 	uint32_t flushbits = 0;
139 
140 	switch (sspp) {
141 	case SSPP_VIG0:
142 		flushbits =  BIT(0);
143 		break;
144 	case SSPP_VIG1:
145 		flushbits = BIT(1);
146 		break;
147 	case SSPP_VIG2:
148 		flushbits = BIT(2);
149 		break;
150 	case SSPP_VIG3:
151 		flushbits = BIT(18);
152 		break;
153 	case SSPP_RGB0:
154 		flushbits = BIT(3);
155 		break;
156 	case SSPP_RGB1:
157 		flushbits = BIT(4);
158 		break;
159 	case SSPP_RGB2:
160 		flushbits = BIT(5);
161 		break;
162 	case SSPP_RGB3:
163 		flushbits = BIT(19);
164 		break;
165 	case SSPP_DMA0:
166 		flushbits = BIT(11);
167 		break;
168 	case SSPP_DMA1:
169 		flushbits = BIT(12);
170 		break;
171 	case SSPP_DMA2:
172 		flushbits = BIT(24);
173 		break;
174 	case SSPP_DMA3:
175 		flushbits = BIT(25);
176 		break;
177 	case SSPP_CURSOR0:
178 		flushbits = BIT(22);
179 		break;
180 	case SSPP_CURSOR1:
181 		flushbits = BIT(23);
182 		break;
183 	default:
184 		break;
185 	}
186 
187 	return flushbits;
188 }
189 
190 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
191 	enum dpu_lm lm)
192 {
193 	uint32_t flushbits = 0;
194 
195 	switch (lm) {
196 	case LM_0:
197 		flushbits = BIT(6);
198 		break;
199 	case LM_1:
200 		flushbits = BIT(7);
201 		break;
202 	case LM_2:
203 		flushbits = BIT(8);
204 		break;
205 	case LM_3:
206 		flushbits = BIT(9);
207 		break;
208 	case LM_4:
209 		flushbits = BIT(10);
210 		break;
211 	case LM_5:
212 		flushbits = BIT(20);
213 		break;
214 	default:
215 		return -EINVAL;
216 	}
217 
218 	flushbits |= CTL_FLUSH_MASK_CTL;
219 
220 	return flushbits;
221 }
222 
223 static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
224 		u32 *flushbits, enum dpu_intf intf)
225 {
226 	switch (intf) {
227 	case INTF_0:
228 		*flushbits |= BIT(31);
229 		break;
230 	case INTF_1:
231 		*flushbits |= BIT(30);
232 		break;
233 	case INTF_2:
234 		*flushbits |= BIT(29);
235 		break;
236 	case INTF_3:
237 		*flushbits |= BIT(28);
238 		break;
239 	default:
240 		return -EINVAL;
241 	}
242 	return 0;
243 }
244 
245 static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
246 		u32 *flushbits, enum dpu_intf intf)
247 {
248 	switch (intf) {
249 	case INTF_0:
250 	case INTF_1:
251 		*flushbits |= BIT(31);
252 		break;
253 	default:
254 		return 0;
255 	}
256 	return 0;
257 }
258 
259 static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
260 		u32 *flushbits, enum dpu_intf intf)
261 {
262 	switch (intf) {
263 	case INTF_0:
264 		*flushbits |= BIT(0);
265 		break;
266 	case INTF_1:
267 		*flushbits |= BIT(1);
268 		break;
269 	default:
270 		return 0;
271 	}
272 	return 0;
273 }
274 
275 static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
276 	enum dpu_dspp dspp)
277 {
278 	uint32_t flushbits = 0;
279 
280 	switch (dspp) {
281 	case DSPP_0:
282 		flushbits = BIT(13);
283 		break;
284 	case DSPP_1:
285 		flushbits = BIT(14);
286 		break;
287 	case DSPP_2:
288 		flushbits = BIT(15);
289 		break;
290 	case DSPP_3:
291 		flushbits = BIT(21);
292 		break;
293 	default:
294 		return 0;
295 	}
296 
297 	return flushbits;
298 }
299 
300 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
301 {
302 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
303 	ktime_t timeout;
304 	u32 status;
305 
306 	timeout = ktime_add_us(ktime_get(), timeout_us);
307 
308 	/*
309 	 * it takes around 30us to have mdp finish resetting its ctl path
310 	 * poll every 50us so that reset should be completed at 1st poll
311 	 */
312 	do {
313 		status = DPU_REG_READ(c, CTL_SW_RESET);
314 		status &= 0x1;
315 		if (status)
316 			usleep_range(20, 50);
317 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
318 
319 	return status;
320 }
321 
322 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
323 {
324 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
325 
326 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
327 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
328 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
329 		return -EINVAL;
330 
331 	return 0;
332 }
333 
334 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
335 {
336 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
337 	u32 status;
338 
339 	status = DPU_REG_READ(c, CTL_SW_RESET);
340 	status &= 0x01;
341 	if (!status)
342 		return 0;
343 
344 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
345 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
346 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
347 		return -EINVAL;
348 	}
349 
350 	return 0;
351 }
352 
353 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
354 {
355 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
356 	int i;
357 
358 	for (i = 0; i < ctx->mixer_count; i++) {
359 		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
360 		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
361 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
362 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
363 	}
364 }
365 
366 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
367 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
368 {
369 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
370 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
371 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
372 	int i, j;
373 	int stages;
374 	int pipes_per_stage;
375 
376 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
377 	if (stages < 0)
378 		return;
379 
380 	if (test_bit(DPU_MIXER_SOURCESPLIT,
381 		&ctx->mixer_hw_caps->features))
382 		pipes_per_stage = PIPES_PER_STAGE;
383 	else
384 		pipes_per_stage = 1;
385 
386 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
387 
388 	if (!stage_cfg)
389 		goto exit;
390 
391 	for (i = 0; i <= stages; i++) {
392 		/* overflow to ext register if 'i + 1 > 7' */
393 		mix = (i + 1) & 0x7;
394 		ext = i >= 7;
395 
396 		for (j = 0 ; j < pipes_per_stage; j++) {
397 			enum dpu_sspp_multirect_index rect_index =
398 				stage_cfg->multirect_index[i][j];
399 
400 			switch (stage_cfg->stage[i][j]) {
401 			case SSPP_VIG0:
402 				if (rect_index == DPU_SSPP_RECT_1) {
403 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
404 				} else {
405 					mixercfg |= mix << 0;
406 					mixercfg_ext |= ext << 0;
407 				}
408 				break;
409 			case SSPP_VIG1:
410 				if (rect_index == DPU_SSPP_RECT_1) {
411 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
412 				} else {
413 					mixercfg |= mix << 3;
414 					mixercfg_ext |= ext << 2;
415 				}
416 				break;
417 			case SSPP_VIG2:
418 				if (rect_index == DPU_SSPP_RECT_1) {
419 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
420 				} else {
421 					mixercfg |= mix << 6;
422 					mixercfg_ext |= ext << 4;
423 				}
424 				break;
425 			case SSPP_VIG3:
426 				if (rect_index == DPU_SSPP_RECT_1) {
427 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
428 				} else {
429 					mixercfg |= mix << 26;
430 					mixercfg_ext |= ext << 6;
431 				}
432 				break;
433 			case SSPP_RGB0:
434 				mixercfg |= mix << 9;
435 				mixercfg_ext |= ext << 8;
436 				break;
437 			case SSPP_RGB1:
438 				mixercfg |= mix << 12;
439 				mixercfg_ext |= ext << 10;
440 				break;
441 			case SSPP_RGB2:
442 				mixercfg |= mix << 15;
443 				mixercfg_ext |= ext << 12;
444 				break;
445 			case SSPP_RGB3:
446 				mixercfg |= mix << 29;
447 				mixercfg_ext |= ext << 14;
448 				break;
449 			case SSPP_DMA0:
450 				if (rect_index == DPU_SSPP_RECT_1) {
451 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
452 				} else {
453 					mixercfg |= mix << 18;
454 					mixercfg_ext |= ext << 16;
455 				}
456 				break;
457 			case SSPP_DMA1:
458 				if (rect_index == DPU_SSPP_RECT_1) {
459 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
460 				} else {
461 					mixercfg |= mix << 21;
462 					mixercfg_ext |= ext << 18;
463 				}
464 				break;
465 			case SSPP_DMA2:
466 				if (rect_index == DPU_SSPP_RECT_1) {
467 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
468 				} else {
469 					mix |= (i + 1) & 0xF;
470 					mixercfg_ext2 |= mix << 0;
471 				}
472 				break;
473 			case SSPP_DMA3:
474 				if (rect_index == DPU_SSPP_RECT_1) {
475 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
476 				} else {
477 					mix |= (i + 1) & 0xF;
478 					mixercfg_ext2 |= mix << 4;
479 				}
480 				break;
481 			case SSPP_CURSOR0:
482 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
483 				break;
484 			case SSPP_CURSOR1:
485 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
486 				break;
487 			default:
488 				break;
489 			}
490 		}
491 	}
492 
493 exit:
494 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
495 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
496 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
497 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
498 }
499 
500 
501 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
502 		struct dpu_hw_intf_cfg *cfg)
503 {
504 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
505 	u32 intf_active = 0;
506 	u32 mode_sel = 0;
507 
508 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
509 		mode_sel |= BIT(17);
510 
511 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
512 	intf_active |= BIT(cfg->intf - INTF_0);
513 
514 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
515 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
516 }
517 
518 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
519 		struct dpu_hw_intf_cfg *cfg)
520 {
521 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
522 	u32 intf_cfg = 0;
523 
524 	intf_cfg |= (cfg->intf & 0xF) << 4;
525 
526 	if (cfg->mode_3d) {
527 		intf_cfg |= BIT(19);
528 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
529 	}
530 
531 	switch (cfg->intf_mode_sel) {
532 	case DPU_CTL_MODE_SEL_VID:
533 		intf_cfg &= ~BIT(17);
534 		intf_cfg &= ~(0x3 << 15);
535 		break;
536 	case DPU_CTL_MODE_SEL_CMD:
537 		intf_cfg |= BIT(17);
538 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
539 		break;
540 	default:
541 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
542 		return;
543 	}
544 
545 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
546 }
547 
548 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
549 		unsigned long cap)
550 {
551 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
552 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
553 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
554 		ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
555 		ops->get_bitmask_active_intf =
556 			dpu_hw_ctl_active_get_bitmask_intf;
557 		ops->update_pending_intf_flush =
558 			dpu_hw_ctl_update_pending_intf_flush;
559 	} else {
560 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
561 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
562 		ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
563 	}
564 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
565 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
566 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
567 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
568 	ops->trigger_start = dpu_hw_ctl_trigger_start;
569 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
570 	ops->reset = dpu_hw_ctl_reset_control;
571 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
572 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
573 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
574 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
575 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
576 	ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
577 };
578 
579 static struct dpu_hw_blk_ops dpu_hw_ops;
580 
581 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
582 		void __iomem *addr,
583 		const struct dpu_mdss_cfg *m)
584 {
585 	struct dpu_hw_ctl *c;
586 	const struct dpu_ctl_cfg *cfg;
587 
588 	c = kzalloc(sizeof(*c), GFP_KERNEL);
589 	if (!c)
590 		return ERR_PTR(-ENOMEM);
591 
592 	cfg = _ctl_offset(idx, m, addr, &c->hw);
593 	if (IS_ERR_OR_NULL(cfg)) {
594 		kfree(c);
595 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
596 		return ERR_PTR(-EINVAL);
597 	}
598 
599 	c->caps = cfg;
600 	_setup_ctl_ops(&c->ops, c->caps->features);
601 	c->idx = idx;
602 	c->mixer_count = m->mixer_count;
603 	c->mixer_hw_caps = m->mixer;
604 
605 	dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
606 
607 	return c;
608 }
609 
610 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
611 {
612 	if (ctx)
613 		dpu_hw_blk_destroy(&ctx->base);
614 	kfree(ctx);
615 }
616