1 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12 
13 #include <linux/delay.h>
14 #include "dpu_hwio.h"
15 #include "dpu_hw_ctl.h"
16 #include "dpu_dbg.h"
17 #include "dpu_kms.h"
18 
19 #define   CTL_LAYER(lm)                 \
20 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT(lm)             \
22 	(0x40 + (((lm) - LM_0) * 0x004))
23 #define   CTL_LAYER_EXT2(lm)             \
24 	(0x70 + (((lm) - LM_0) * 0x004))
25 #define   CTL_LAYER_EXT3(lm)             \
26 	(0xA0 + (((lm) - LM_0) * 0x004))
27 #define   CTL_TOP                       0x014
28 #define   CTL_FLUSH                     0x018
29 #define   CTL_START                     0x01C
30 #define   CTL_PREPARE                   0x0d0
31 #define   CTL_SW_RESET                  0x030
32 #define   CTL_LAYER_EXTN_OFFSET         0x40
33 
34 #define CTL_MIXER_BORDER_OUT            BIT(24)
35 #define CTL_FLUSH_MASK_CTL              BIT(17)
36 
37 #define DPU_REG_RESET_TIMEOUT_US        2000
38 
39 static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
40 		struct dpu_mdss_cfg *m,
41 		void __iomem *addr,
42 		struct dpu_hw_blk_reg_map *b)
43 {
44 	int i;
45 
46 	for (i = 0; i < m->ctl_count; i++) {
47 		if (ctl == m->ctl[i].id) {
48 			b->base_off = addr;
49 			b->blk_off = m->ctl[i].base;
50 			b->length = m->ctl[i].len;
51 			b->hwversion = m->hwversion;
52 			b->log_mask = DPU_DBG_MASK_CTL;
53 			return &m->ctl[i];
54 		}
55 	}
56 	return ERR_PTR(-ENOMEM);
57 }
58 
59 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
60 		enum dpu_lm lm)
61 {
62 	int i;
63 	int stages = -EINVAL;
64 
65 	for (i = 0; i < count; i++) {
66 		if (lm == mixer[i].id) {
67 			stages = mixer[i].sblk->maxblendstages;
68 			break;
69 		}
70 	}
71 
72 	return stages;
73 }
74 
75 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
76 {
77 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
78 }
79 
80 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
81 {
82 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
83 }
84 
85 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
86 {
87 	ctx->pending_flush_mask = 0x0;
88 }
89 
90 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
91 		u32 flushbits)
92 {
93 	ctx->pending_flush_mask |= flushbits;
94 }
95 
96 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
97 {
98 	if (!ctx)
99 		return 0x0;
100 
101 	return ctx->pending_flush_mask;
102 }
103 
104 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
105 {
106 
107 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
108 }
109 
110 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
111 {
112 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
113 
114 	return DPU_REG_READ(c, CTL_FLUSH);
115 }
116 
117 static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
118 	enum dpu_sspp sspp)
119 {
120 	uint32_t flushbits = 0;
121 
122 	switch (sspp) {
123 	case SSPP_VIG0:
124 		flushbits =  BIT(0);
125 		break;
126 	case SSPP_VIG1:
127 		flushbits = BIT(1);
128 		break;
129 	case SSPP_VIG2:
130 		flushbits = BIT(2);
131 		break;
132 	case SSPP_VIG3:
133 		flushbits = BIT(18);
134 		break;
135 	case SSPP_RGB0:
136 		flushbits = BIT(3);
137 		break;
138 	case SSPP_RGB1:
139 		flushbits = BIT(4);
140 		break;
141 	case SSPP_RGB2:
142 		flushbits = BIT(5);
143 		break;
144 	case SSPP_RGB3:
145 		flushbits = BIT(19);
146 		break;
147 	case SSPP_DMA0:
148 		flushbits = BIT(11);
149 		break;
150 	case SSPP_DMA1:
151 		flushbits = BIT(12);
152 		break;
153 	case SSPP_DMA2:
154 		flushbits = BIT(24);
155 		break;
156 	case SSPP_DMA3:
157 		flushbits = BIT(25);
158 		break;
159 	case SSPP_CURSOR0:
160 		flushbits = BIT(22);
161 		break;
162 	case SSPP_CURSOR1:
163 		flushbits = BIT(23);
164 		break;
165 	default:
166 		break;
167 	}
168 
169 	return flushbits;
170 }
171 
172 static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
173 	enum dpu_lm lm)
174 {
175 	uint32_t flushbits = 0;
176 
177 	switch (lm) {
178 	case LM_0:
179 		flushbits = BIT(6);
180 		break;
181 	case LM_1:
182 		flushbits = BIT(7);
183 		break;
184 	case LM_2:
185 		flushbits = BIT(8);
186 		break;
187 	case LM_3:
188 		flushbits = BIT(9);
189 		break;
190 	case LM_4:
191 		flushbits = BIT(10);
192 		break;
193 	case LM_5:
194 		flushbits = BIT(20);
195 		break;
196 	default:
197 		return -EINVAL;
198 	}
199 
200 	flushbits |= CTL_FLUSH_MASK_CTL;
201 
202 	return flushbits;
203 }
204 
205 static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
206 		u32 *flushbits, enum dpu_intf intf)
207 {
208 	switch (intf) {
209 	case INTF_0:
210 		*flushbits |= BIT(31);
211 		break;
212 	case INTF_1:
213 		*flushbits |= BIT(30);
214 		break;
215 	case INTF_2:
216 		*flushbits |= BIT(29);
217 		break;
218 	case INTF_3:
219 		*flushbits |= BIT(28);
220 		break;
221 	default:
222 		return -EINVAL;
223 	}
224 	return 0;
225 }
226 
227 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
228 {
229 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
230 	ktime_t timeout;
231 	u32 status;
232 
233 	timeout = ktime_add_us(ktime_get(), timeout_us);
234 
235 	/*
236 	 * it takes around 30us to have mdp finish resetting its ctl path
237 	 * poll every 50us so that reset should be completed at 1st poll
238 	 */
239 	do {
240 		status = DPU_REG_READ(c, CTL_SW_RESET);
241 		status &= 0x1;
242 		if (status)
243 			usleep_range(20, 50);
244 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
245 
246 	return status;
247 }
248 
249 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
250 {
251 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
252 
253 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
254 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
255 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
256 		return -EINVAL;
257 
258 	return 0;
259 }
260 
261 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
262 {
263 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
264 	u32 status;
265 
266 	status = DPU_REG_READ(c, CTL_SW_RESET);
267 	status &= 0x01;
268 	if (!status)
269 		return 0;
270 
271 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
272 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
273 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
274 		return -EINVAL;
275 	}
276 
277 	return 0;
278 }
279 
280 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
281 {
282 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
283 	int i;
284 
285 	for (i = 0; i < ctx->mixer_count; i++) {
286 		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
287 		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
288 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
289 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
290 	}
291 }
292 
293 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
294 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
295 {
296 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
297 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
298 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
299 	int i, j;
300 	int stages;
301 	int pipes_per_stage;
302 
303 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
304 	if (stages < 0)
305 		return;
306 
307 	if (test_bit(DPU_MIXER_SOURCESPLIT,
308 		&ctx->mixer_hw_caps->features))
309 		pipes_per_stage = PIPES_PER_STAGE;
310 	else
311 		pipes_per_stage = 1;
312 
313 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
314 
315 	if (!stage_cfg)
316 		goto exit;
317 
318 	for (i = 0; i <= stages; i++) {
319 		/* overflow to ext register if 'i + 1 > 7' */
320 		mix = (i + 1) & 0x7;
321 		ext = i >= 7;
322 
323 		for (j = 0 ; j < pipes_per_stage; j++) {
324 			enum dpu_sspp_multirect_index rect_index =
325 				stage_cfg->multirect_index[i][j];
326 
327 			switch (stage_cfg->stage[i][j]) {
328 			case SSPP_VIG0:
329 				if (rect_index == DPU_SSPP_RECT_1) {
330 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
331 				} else {
332 					mixercfg |= mix << 0;
333 					mixercfg_ext |= ext << 0;
334 				}
335 				break;
336 			case SSPP_VIG1:
337 				if (rect_index == DPU_SSPP_RECT_1) {
338 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
339 				} else {
340 					mixercfg |= mix << 3;
341 					mixercfg_ext |= ext << 2;
342 				}
343 				break;
344 			case SSPP_VIG2:
345 				if (rect_index == DPU_SSPP_RECT_1) {
346 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
347 				} else {
348 					mixercfg |= mix << 6;
349 					mixercfg_ext |= ext << 4;
350 				}
351 				break;
352 			case SSPP_VIG3:
353 				if (rect_index == DPU_SSPP_RECT_1) {
354 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
355 				} else {
356 					mixercfg |= mix << 26;
357 					mixercfg_ext |= ext << 6;
358 				}
359 				break;
360 			case SSPP_RGB0:
361 				mixercfg |= mix << 9;
362 				mixercfg_ext |= ext << 8;
363 				break;
364 			case SSPP_RGB1:
365 				mixercfg |= mix << 12;
366 				mixercfg_ext |= ext << 10;
367 				break;
368 			case SSPP_RGB2:
369 				mixercfg |= mix << 15;
370 				mixercfg_ext |= ext << 12;
371 				break;
372 			case SSPP_RGB3:
373 				mixercfg |= mix << 29;
374 				mixercfg_ext |= ext << 14;
375 				break;
376 			case SSPP_DMA0:
377 				if (rect_index == DPU_SSPP_RECT_1) {
378 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
379 				} else {
380 					mixercfg |= mix << 18;
381 					mixercfg_ext |= ext << 16;
382 				}
383 				break;
384 			case SSPP_DMA1:
385 				if (rect_index == DPU_SSPP_RECT_1) {
386 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
387 				} else {
388 					mixercfg |= mix << 21;
389 					mixercfg_ext |= ext << 18;
390 				}
391 				break;
392 			case SSPP_DMA2:
393 				if (rect_index == DPU_SSPP_RECT_1) {
394 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
395 				} else {
396 					mix |= (i + 1) & 0xF;
397 					mixercfg_ext2 |= mix << 0;
398 				}
399 				break;
400 			case SSPP_DMA3:
401 				if (rect_index == DPU_SSPP_RECT_1) {
402 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
403 				} else {
404 					mix |= (i + 1) & 0xF;
405 					mixercfg_ext2 |= mix << 4;
406 				}
407 				break;
408 			case SSPP_CURSOR0:
409 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
410 				break;
411 			case SSPP_CURSOR1:
412 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
413 				break;
414 			default:
415 				break;
416 			}
417 		}
418 	}
419 
420 exit:
421 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
422 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
423 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
424 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
425 }
426 
427 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
428 		struct dpu_hw_intf_cfg *cfg)
429 {
430 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
431 	u32 intf_cfg = 0;
432 
433 	intf_cfg |= (cfg->intf & 0xF) << 4;
434 
435 	if (cfg->mode_3d) {
436 		intf_cfg |= BIT(19);
437 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
438 	}
439 
440 	switch (cfg->intf_mode_sel) {
441 	case DPU_CTL_MODE_SEL_VID:
442 		intf_cfg &= ~BIT(17);
443 		intf_cfg &= ~(0x3 << 15);
444 		break;
445 	case DPU_CTL_MODE_SEL_CMD:
446 		intf_cfg |= BIT(17);
447 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
448 		break;
449 	default:
450 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
451 		return;
452 	}
453 
454 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
455 }
456 
457 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
458 		unsigned long cap)
459 {
460 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
461 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
462 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
463 	ops->trigger_flush = dpu_hw_ctl_trigger_flush;
464 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
465 	ops->trigger_start = dpu_hw_ctl_trigger_start;
466 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
467 	ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
468 	ops->reset = dpu_hw_ctl_reset_control;
469 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
470 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
471 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
472 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
473 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
474 	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
475 };
476 
477 static struct dpu_hw_blk_ops dpu_hw_ops = {
478 	.start = NULL,
479 	.stop = NULL,
480 };
481 
482 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
483 		void __iomem *addr,
484 		struct dpu_mdss_cfg *m)
485 {
486 	struct dpu_hw_ctl *c;
487 	struct dpu_ctl_cfg *cfg;
488 	int rc;
489 
490 	c = kzalloc(sizeof(*c), GFP_KERNEL);
491 	if (!c)
492 		return ERR_PTR(-ENOMEM);
493 
494 	cfg = _ctl_offset(idx, m, addr, &c->hw);
495 	if (IS_ERR_OR_NULL(cfg)) {
496 		kfree(c);
497 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
498 		return ERR_PTR(-EINVAL);
499 	}
500 
501 	c->caps = cfg;
502 	_setup_ctl_ops(&c->ops, c->caps->features);
503 	c->idx = idx;
504 	c->mixer_count = m->mixer_count;
505 	c->mixer_hw_caps = m->mixer;
506 
507 	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
508 	if (rc) {
509 		DPU_ERROR("failed to init hw blk %d\n", rc);
510 		goto blk_init_error;
511 	}
512 
513 	return c;
514 
515 blk_init_error:
516 	kzfree(c);
517 
518 	return ERR_PTR(rc);
519 }
520 
521 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
522 {
523 	if (ctx)
524 		dpu_hw_blk_destroy(&ctx->base);
525 	kfree(ctx);
526 }
527