1 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12 
13 #include <linux/delay.h>
14 #include "dpu_hwio.h"
15 #include "dpu_hw_ctl.h"
16 #include "dpu_kms.h"
17 #include "dpu_trace.h"
18 
19 #define   CTL_LAYER(lm)                 \
20 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT(lm)             \
22 	(0x40 + (((lm) - LM_0) * 0x004))
23 #define   CTL_LAYER_EXT2(lm)             \
24 	(0x70 + (((lm) - LM_0) * 0x004))
25 #define   CTL_LAYER_EXT3(lm)             \
26 	(0xA0 + (((lm) - LM_0) * 0x004))
27 #define   CTL_TOP                       0x014
28 #define   CTL_FLUSH                     0x018
29 #define   CTL_START                     0x01C
30 #define   CTL_PREPARE                   0x0d0
31 #define   CTL_SW_RESET                  0x030
32 #define   CTL_LAYER_EXTN_OFFSET         0x40
33 
34 #define CTL_MIXER_BORDER_OUT            BIT(24)
35 #define CTL_FLUSH_MASK_CTL              BIT(17)
36 
37 #define DPU_REG_RESET_TIMEOUT_US        2000
38 
39 static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
40 		struct dpu_mdss_cfg *m,
41 		void __iomem *addr,
42 		struct dpu_hw_blk_reg_map *b)
43 {
44 	int i;
45 
46 	for (i = 0; i < m->ctl_count; i++) {
47 		if (ctl == m->ctl[i].id) {
48 			b->base_off = addr;
49 			b->blk_off = m->ctl[i].base;
50 			b->length = m->ctl[i].len;
51 			b->hwversion = m->hwversion;
52 			b->log_mask = DPU_DBG_MASK_CTL;
53 			return &m->ctl[i];
54 		}
55 	}
56 	return ERR_PTR(-ENOMEM);
57 }
58 
59 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
60 		enum dpu_lm lm)
61 {
62 	int i;
63 	int stages = -EINVAL;
64 
65 	for (i = 0; i < count; i++) {
66 		if (lm == mixer[i].id) {
67 			stages = mixer[i].sblk->maxblendstages;
68 			break;
69 		}
70 	}
71 
72 	return stages;
73 }
74 
75 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
76 {
77 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
78 
79 	return DPU_REG_READ(c, CTL_FLUSH);
80 }
81 
82 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
83 {
84 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
85 				       dpu_hw_ctl_get_flush_register(ctx));
86 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
87 }
88 
89 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
90 {
91 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
92 					 dpu_hw_ctl_get_flush_register(ctx));
93 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
94 }
95 
96 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
97 {
98 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
99 				     dpu_hw_ctl_get_flush_register(ctx));
100 	ctx->pending_flush_mask = 0x0;
101 }
102 
103 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
104 		u32 flushbits)
105 {
106 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
107 					      ctx->pending_flush_mask);
108 	ctx->pending_flush_mask |= flushbits;
109 }
110 
111 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
112 {
113 	if (!ctx)
114 		return 0x0;
115 
116 	return ctx->pending_flush_mask;
117 }
118 
119 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
120 {
121 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
122 				     dpu_hw_ctl_get_flush_register(ctx));
123 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
124 }
125 
126 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
127 	enum dpu_sspp sspp)
128 {
129 	uint32_t flushbits = 0;
130 
131 	switch (sspp) {
132 	case SSPP_VIG0:
133 		flushbits =  BIT(0);
134 		break;
135 	case SSPP_VIG1:
136 		flushbits = BIT(1);
137 		break;
138 	case SSPP_VIG2:
139 		flushbits = BIT(2);
140 		break;
141 	case SSPP_VIG3:
142 		flushbits = BIT(18);
143 		break;
144 	case SSPP_RGB0:
145 		flushbits = BIT(3);
146 		break;
147 	case SSPP_RGB1:
148 		flushbits = BIT(4);
149 		break;
150 	case SSPP_RGB2:
151 		flushbits = BIT(5);
152 		break;
153 	case SSPP_RGB3:
154 		flushbits = BIT(19);
155 		break;
156 	case SSPP_DMA0:
157 		flushbits = BIT(11);
158 		break;
159 	case SSPP_DMA1:
160 		flushbits = BIT(12);
161 		break;
162 	case SSPP_DMA2:
163 		flushbits = BIT(24);
164 		break;
165 	case SSPP_DMA3:
166 		flushbits = BIT(25);
167 		break;
168 	case SSPP_CURSOR0:
169 		flushbits = BIT(22);
170 		break;
171 	case SSPP_CURSOR1:
172 		flushbits = BIT(23);
173 		break;
174 	default:
175 		break;
176 	}
177 
178 	return flushbits;
179 }
180 
181 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
182 	enum dpu_lm lm)
183 {
184 	uint32_t flushbits = 0;
185 
186 	switch (lm) {
187 	case LM_0:
188 		flushbits = BIT(6);
189 		break;
190 	case LM_1:
191 		flushbits = BIT(7);
192 		break;
193 	case LM_2:
194 		flushbits = BIT(8);
195 		break;
196 	case LM_3:
197 		flushbits = BIT(9);
198 		break;
199 	case LM_4:
200 		flushbits = BIT(10);
201 		break;
202 	case LM_5:
203 		flushbits = BIT(20);
204 		break;
205 	default:
206 		return -EINVAL;
207 	}
208 
209 	flushbits |= CTL_FLUSH_MASK_CTL;
210 
211 	return flushbits;
212 }
213 
214 static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
215 		u32 *flushbits, enum dpu_intf intf)
216 {
217 	switch (intf) {
218 	case INTF_0:
219 		*flushbits |= BIT(31);
220 		break;
221 	case INTF_1:
222 		*flushbits |= BIT(30);
223 		break;
224 	case INTF_2:
225 		*flushbits |= BIT(29);
226 		break;
227 	case INTF_3:
228 		*flushbits |= BIT(28);
229 		break;
230 	default:
231 		return -EINVAL;
232 	}
233 	return 0;
234 }
235 
236 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
237 {
238 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
239 	ktime_t timeout;
240 	u32 status;
241 
242 	timeout = ktime_add_us(ktime_get(), timeout_us);
243 
244 	/*
245 	 * it takes around 30us to have mdp finish resetting its ctl path
246 	 * poll every 50us so that reset should be completed at 1st poll
247 	 */
248 	do {
249 		status = DPU_REG_READ(c, CTL_SW_RESET);
250 		status &= 0x1;
251 		if (status)
252 			usleep_range(20, 50);
253 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
254 
255 	return status;
256 }
257 
258 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
259 {
260 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
261 
262 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
263 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
264 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
265 		return -EINVAL;
266 
267 	return 0;
268 }
269 
270 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
271 {
272 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
273 	u32 status;
274 
275 	status = DPU_REG_READ(c, CTL_SW_RESET);
276 	status &= 0x01;
277 	if (!status)
278 		return 0;
279 
280 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
281 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
282 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
283 		return -EINVAL;
284 	}
285 
286 	return 0;
287 }
288 
289 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
290 {
291 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
292 	int i;
293 
294 	for (i = 0; i < ctx->mixer_count; i++) {
295 		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
296 		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
297 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
298 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
299 	}
300 }
301 
302 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
303 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
304 {
305 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
306 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
307 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
308 	int i, j;
309 	int stages;
310 	int pipes_per_stage;
311 
312 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
313 	if (stages < 0)
314 		return;
315 
316 	if (test_bit(DPU_MIXER_SOURCESPLIT,
317 		&ctx->mixer_hw_caps->features))
318 		pipes_per_stage = PIPES_PER_STAGE;
319 	else
320 		pipes_per_stage = 1;
321 
322 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
323 
324 	if (!stage_cfg)
325 		goto exit;
326 
327 	for (i = 0; i <= stages; i++) {
328 		/* overflow to ext register if 'i + 1 > 7' */
329 		mix = (i + 1) & 0x7;
330 		ext = i >= 7;
331 
332 		for (j = 0 ; j < pipes_per_stage; j++) {
333 			enum dpu_sspp_multirect_index rect_index =
334 				stage_cfg->multirect_index[i][j];
335 
336 			switch (stage_cfg->stage[i][j]) {
337 			case SSPP_VIG0:
338 				if (rect_index == DPU_SSPP_RECT_1) {
339 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
340 				} else {
341 					mixercfg |= mix << 0;
342 					mixercfg_ext |= ext << 0;
343 				}
344 				break;
345 			case SSPP_VIG1:
346 				if (rect_index == DPU_SSPP_RECT_1) {
347 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
348 				} else {
349 					mixercfg |= mix << 3;
350 					mixercfg_ext |= ext << 2;
351 				}
352 				break;
353 			case SSPP_VIG2:
354 				if (rect_index == DPU_SSPP_RECT_1) {
355 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
356 				} else {
357 					mixercfg |= mix << 6;
358 					mixercfg_ext |= ext << 4;
359 				}
360 				break;
361 			case SSPP_VIG3:
362 				if (rect_index == DPU_SSPP_RECT_1) {
363 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
364 				} else {
365 					mixercfg |= mix << 26;
366 					mixercfg_ext |= ext << 6;
367 				}
368 				break;
369 			case SSPP_RGB0:
370 				mixercfg |= mix << 9;
371 				mixercfg_ext |= ext << 8;
372 				break;
373 			case SSPP_RGB1:
374 				mixercfg |= mix << 12;
375 				mixercfg_ext |= ext << 10;
376 				break;
377 			case SSPP_RGB2:
378 				mixercfg |= mix << 15;
379 				mixercfg_ext |= ext << 12;
380 				break;
381 			case SSPP_RGB3:
382 				mixercfg |= mix << 29;
383 				mixercfg_ext |= ext << 14;
384 				break;
385 			case SSPP_DMA0:
386 				if (rect_index == DPU_SSPP_RECT_1) {
387 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
388 				} else {
389 					mixercfg |= mix << 18;
390 					mixercfg_ext |= ext << 16;
391 				}
392 				break;
393 			case SSPP_DMA1:
394 				if (rect_index == DPU_SSPP_RECT_1) {
395 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
396 				} else {
397 					mixercfg |= mix << 21;
398 					mixercfg_ext |= ext << 18;
399 				}
400 				break;
401 			case SSPP_DMA2:
402 				if (rect_index == DPU_SSPP_RECT_1) {
403 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
404 				} else {
405 					mix |= (i + 1) & 0xF;
406 					mixercfg_ext2 |= mix << 0;
407 				}
408 				break;
409 			case SSPP_DMA3:
410 				if (rect_index == DPU_SSPP_RECT_1) {
411 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
412 				} else {
413 					mix |= (i + 1) & 0xF;
414 					mixercfg_ext2 |= mix << 4;
415 				}
416 				break;
417 			case SSPP_CURSOR0:
418 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
419 				break;
420 			case SSPP_CURSOR1:
421 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
422 				break;
423 			default:
424 				break;
425 			}
426 		}
427 	}
428 
429 exit:
430 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
431 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
432 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
433 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
434 }
435 
436 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
437 		struct dpu_hw_intf_cfg *cfg)
438 {
439 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
440 	u32 intf_cfg = 0;
441 
442 	intf_cfg |= (cfg->intf & 0xF) << 4;
443 
444 	if (cfg->mode_3d) {
445 		intf_cfg |= BIT(19);
446 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
447 	}
448 
449 	switch (cfg->intf_mode_sel) {
450 	case DPU_CTL_MODE_SEL_VID:
451 		intf_cfg &= ~BIT(17);
452 		intf_cfg &= ~(0x3 << 15);
453 		break;
454 	case DPU_CTL_MODE_SEL_CMD:
455 		intf_cfg |= BIT(17);
456 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
457 		break;
458 	default:
459 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
460 		return;
461 	}
462 
463 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
464 }
465 
466 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
467 		unsigned long cap)
468 {
469 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
470 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
471 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
472 	ops->trigger_flush = dpu_hw_ctl_trigger_flush;
473 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
474 	ops->trigger_start = dpu_hw_ctl_trigger_start;
475 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
476 	ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
477 	ops->reset = dpu_hw_ctl_reset_control;
478 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
479 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
480 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
481 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
482 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
483 	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
484 };
485 
486 static struct dpu_hw_blk_ops dpu_hw_ops;
487 
488 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
489 		void __iomem *addr,
490 		struct dpu_mdss_cfg *m)
491 {
492 	struct dpu_hw_ctl *c;
493 	struct dpu_ctl_cfg *cfg;
494 
495 	c = kzalloc(sizeof(*c), GFP_KERNEL);
496 	if (!c)
497 		return ERR_PTR(-ENOMEM);
498 
499 	cfg = _ctl_offset(idx, m, addr, &c->hw);
500 	if (IS_ERR_OR_NULL(cfg)) {
501 		kfree(c);
502 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
503 		return ERR_PTR(-EINVAL);
504 	}
505 
506 	c->caps = cfg;
507 	_setup_ctl_ops(&c->ops, c->caps->features);
508 	c->idx = idx;
509 	c->mixer_count = m->mixer_count;
510 	c->mixer_hw_caps = m->mixer;
511 
512 	dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
513 
514 	return c;
515 }
516 
517 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
518 {
519 	if (ctx)
520 		dpu_hw_blk_destroy(&ctx->base);
521 	kfree(ctx);
522 }
523