1 /*
2  * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3  * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * ARM Mali DP plane manipulation routines.
11  */
12 
13 #include <linux/iommu.h>
14 
15 #include <drm/drmP.h>
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_fb_cma_helper.h>
19 #include <drm/drm_gem_cma_helper.h>
20 #include <drm/drm_gem_framebuffer_helper.h>
21 #include <drm/drm_plane_helper.h>
22 #include <drm/drm_print.h>
23 
24 #include "malidp_hw.h"
25 #include "malidp_drv.h"
26 
27 /* Layer specific register offsets */
28 #define MALIDP_LAYER_FORMAT		0x000
29 #define   LAYER_FORMAT_MASK		0x3f
30 #define MALIDP_LAYER_CONTROL		0x004
31 #define   LAYER_ENABLE			(1 << 0)
32 #define   LAYER_FLOWCFG_MASK		7
33 #define   LAYER_FLOWCFG(x)		(((x) & LAYER_FLOWCFG_MASK) << 1)
34 #define     LAYER_FLOWCFG_SCALE_SE	3
35 #define   LAYER_ROT_OFFSET		8
36 #define   LAYER_H_FLIP			(1 << 10)
37 #define   LAYER_V_FLIP			(1 << 11)
38 #define   LAYER_ROT_MASK		(0xf << 8)
39 #define   LAYER_COMP_MASK		(0x3 << 12)
40 #define   LAYER_COMP_PIXEL		(0x3 << 12)
41 #define   LAYER_COMP_PLANE		(0x2 << 12)
42 #define   LAYER_PMUL_ENABLE		(0x1 << 14)
43 #define   LAYER_ALPHA_OFFSET		(16)
44 #define   LAYER_ALPHA_MASK		(0xff)
45 #define   LAYER_ALPHA(x)		(((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET)
46 #define MALIDP_LAYER_COMPOSE		0x008
47 #define MALIDP_LAYER_SIZE		0x00c
48 #define   LAYER_H_VAL(x)		(((x) & 0x1fff) << 0)
49 #define   LAYER_V_VAL(x)		(((x) & 0x1fff) << 16)
50 #define MALIDP_LAYER_COMP_SIZE		0x010
51 #define MALIDP_LAYER_OFFSET		0x014
52 #define MALIDP550_LS_ENABLE		0x01c
53 #define MALIDP550_LS_R1_IN_SIZE		0x020
54 
55 /*
56  * This 4-entry look-up-table is used to determine the full 8-bit alpha value
57  * for formats with 1- or 2-bit alpha channels.
58  * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0%
59  * opacity for 2-bit formats.
60  */
61 #define MALIDP_ALPHA_LUT 0xffaa5500
62 
63 /* page sizes the MMU prefetcher can support */
64 #define MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES	(SZ_4K | SZ_64K)
65 #define MALIDP_MMU_PREFETCH_FULL_PGSIZES	(SZ_1M | SZ_2M)
66 
67 /* readahead for partial-frame prefetch */
68 #define MALIDP_MMU_PREFETCH_READAHEAD		8
69 
70 static void malidp_de_plane_destroy(struct drm_plane *plane)
71 {
72 	struct malidp_plane *mp = to_malidp_plane(plane);
73 
74 	drm_plane_cleanup(plane);
75 	kfree(mp);
76 }
77 
78 /*
79  * Replicate what the default ->reset hook does: free the state pointer and
80  * allocate a new empty object. We just need enough space to store
81  * a malidp_plane_state instead of a drm_plane_state.
82  */
83 static void malidp_plane_reset(struct drm_plane *plane)
84 {
85 	struct malidp_plane_state *state = to_malidp_plane_state(plane->state);
86 
87 	if (state)
88 		__drm_atomic_helper_plane_destroy_state(&state->base);
89 	kfree(state);
90 	plane->state = NULL;
91 	state = kzalloc(sizeof(*state), GFP_KERNEL);
92 	if (state)
93 		__drm_atomic_helper_plane_reset(plane, &state->base);
94 }
95 
96 static struct
97 drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
98 {
99 	struct malidp_plane_state *state, *m_state;
100 
101 	if (!plane->state)
102 		return NULL;
103 
104 	state = kmalloc(sizeof(*state), GFP_KERNEL);
105 	if (!state)
106 		return NULL;
107 
108 	m_state = to_malidp_plane_state(plane->state);
109 	__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
110 	state->rotmem_size = m_state->rotmem_size;
111 	state->format = m_state->format;
112 	state->n_planes = m_state->n_planes;
113 
114 	state->mmu_prefetch_mode = m_state->mmu_prefetch_mode;
115 	state->mmu_prefetch_pgsize = m_state->mmu_prefetch_pgsize;
116 
117 	return &state->base;
118 }
119 
120 static void malidp_destroy_plane_state(struct drm_plane *plane,
121 				       struct drm_plane_state *state)
122 {
123 	struct malidp_plane_state *m_state = to_malidp_plane_state(state);
124 
125 	__drm_atomic_helper_plane_destroy_state(state);
126 	kfree(m_state);
127 }
128 
129 static const char * const prefetch_mode_names[] = {
130 	[MALIDP_PREFETCH_MODE_NONE] = "MMU_PREFETCH_NONE",
131 	[MALIDP_PREFETCH_MODE_PARTIAL] = "MMU_PREFETCH_PARTIAL",
132 	[MALIDP_PREFETCH_MODE_FULL] = "MMU_PREFETCH_FULL",
133 };
134 
135 static void malidp_plane_atomic_print_state(struct drm_printer *p,
136 					    const struct drm_plane_state *state)
137 {
138 	struct malidp_plane_state *ms = to_malidp_plane_state(state);
139 
140 	drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
141 	drm_printf(p, "\tformat_id=%u\n", ms->format);
142 	drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
143 	drm_printf(p, "\tmmu_prefetch_mode=%s\n",
144 		   prefetch_mode_names[ms->mmu_prefetch_mode]);
145 	drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
146 }
147 
148 static const struct drm_plane_funcs malidp_de_plane_funcs = {
149 	.update_plane = drm_atomic_helper_update_plane,
150 	.disable_plane = drm_atomic_helper_disable_plane,
151 	.destroy = malidp_de_plane_destroy,
152 	.reset = malidp_plane_reset,
153 	.atomic_duplicate_state = malidp_duplicate_plane_state,
154 	.atomic_destroy_state = malidp_destroy_plane_state,
155 	.atomic_print_state = malidp_plane_atomic_print_state,
156 };
157 
158 static int malidp_se_check_scaling(struct malidp_plane *mp,
159 				   struct drm_plane_state *state)
160 {
161 	struct drm_crtc_state *crtc_state =
162 		drm_atomic_get_existing_crtc_state(state->state, state->crtc);
163 	struct malidp_crtc_state *mc;
164 	u32 src_w, src_h;
165 	int ret;
166 
167 	if (!crtc_state)
168 		return -EINVAL;
169 
170 	mc = to_malidp_crtc_state(crtc_state);
171 
172 	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
173 						  0, INT_MAX, true, true);
174 	if (ret)
175 		return ret;
176 
177 	if (state->rotation & MALIDP_ROTATED_MASK) {
178 		src_w = state->src_h >> 16;
179 		src_h = state->src_w >> 16;
180 	} else {
181 		src_w = state->src_w >> 16;
182 		src_h = state->src_h >> 16;
183 	}
184 
185 	if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
186 		/* Scaling not necessary for this plane. */
187 		mc->scaled_planes_mask &= ~(mp->layer->id);
188 		return 0;
189 	}
190 
191 	if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
192 		return -EINVAL;
193 
194 	mc->scaled_planes_mask |= mp->layer->id;
195 	/* Defer scaling requirements calculation to the crtc check. */
196 	return 0;
197 }
198 
199 static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
200 {
201 	u32 pgsize_bitmap = 0;
202 
203 	if (iommu_present(&platform_bus_type)) {
204 		struct iommu_domain *mmu_dom =
205 			iommu_get_domain_for_dev(mp->base.dev->dev);
206 
207 		if (mmu_dom)
208 			pgsize_bitmap = mmu_dom->pgsize_bitmap;
209 	}
210 
211 	return pgsize_bitmap;
212 }
213 
214 /*
215  * Check if the framebuffer is entirely made up of pages at least pgsize in
216  * size. Only a heuristic: assumes that each scatterlist entry has been aligned
217  * to the largest page size smaller than its length and that the MMU maps to
218  * the largest page size possible.
219  */
220 static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
221 					 u32 pgsize)
222 {
223 	int i;
224 
225 	for (i = 0; i < ms->n_planes; i++) {
226 		struct drm_gem_object *obj;
227 		struct drm_gem_cma_object *cma_obj;
228 		struct sg_table *sgt;
229 		struct scatterlist *sgl;
230 
231 		obj = drm_gem_fb_get_obj(ms->base.fb, i);
232 		cma_obj = to_drm_gem_cma_obj(obj);
233 
234 		if (cma_obj->sgt)
235 			sgt = cma_obj->sgt;
236 		else
237 			sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
238 
239 		if (!sgt)
240 			return false;
241 
242 		sgl = sgt->sgl;
243 
244 		while (sgl) {
245 			if (sgl->length < pgsize) {
246 				if (!cma_obj->sgt)
247 					kfree(sgt);
248 				return false;
249 			}
250 
251 			sgl = sg_next(sgl);
252 		}
253 		if (!cma_obj->sgt)
254 			kfree(sgt);
255 	}
256 
257 	return true;
258 }
259 
260 /*
261  * Check if it is possible to enable partial-frame MMU prefetch given the
262  * current format, AFBC state and rotation.
263  */
264 static bool malidp_partial_prefetch_supported(u32 format, u64 modifier,
265 					      unsigned int rotation)
266 {
267 	bool afbc, sparse;
268 
269 	/* rotation and horizontal flip not supported for partial prefetch */
270 	if (rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
271 			DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X))
272 		return false;
273 
274 	afbc = modifier & DRM_FORMAT_MOD_ARM_AFBC(0);
275 	sparse = modifier & AFBC_FORMAT_MOD_SPARSE;
276 
277 	switch (format) {
278 	case DRM_FORMAT_ARGB2101010:
279 	case DRM_FORMAT_RGBA1010102:
280 	case DRM_FORMAT_BGRA1010102:
281 	case DRM_FORMAT_ARGB8888:
282 	case DRM_FORMAT_RGBA8888:
283 	case DRM_FORMAT_BGRA8888:
284 	case DRM_FORMAT_XRGB8888:
285 	case DRM_FORMAT_XBGR8888:
286 	case DRM_FORMAT_RGBX8888:
287 	case DRM_FORMAT_BGRX8888:
288 	case DRM_FORMAT_RGB888:
289 	case DRM_FORMAT_RGBA5551:
290 	case DRM_FORMAT_RGB565:
291 		/* always supported */
292 		return true;
293 
294 	case DRM_FORMAT_ABGR2101010:
295 	case DRM_FORMAT_ABGR8888:
296 	case DRM_FORMAT_ABGR1555:
297 	case DRM_FORMAT_BGR565:
298 		/* supported, but if AFBC then must be sparse mode */
299 		return (!afbc) || (afbc && sparse);
300 
301 	case DRM_FORMAT_BGR888:
302 		/* supported, but not for AFBC */
303 		return !afbc;
304 
305 	case DRM_FORMAT_YUYV:
306 	case DRM_FORMAT_UYVY:
307 	case DRM_FORMAT_NV12:
308 	case DRM_FORMAT_YUV420:
309 		/* not supported */
310 		return false;
311 
312 	default:
313 		return false;
314 	}
315 }
316 
317 /*
318  * Select the preferred MMU prefetch mode. Full-frame prefetch is preferred as
319  * long as the framebuffer is all large pages. Otherwise partial-frame prefetch
320  * is selected as long as it is supported for the current format. The selected
321  * page size for prefetch is returned in pgsize_bitmap.
322  */
323 static enum mmu_prefetch_mode malidp_mmu_prefetch_select_mode
324 		(struct malidp_plane_state *ms,	u32 *pgsize_bitmap)
325 {
326 	u32 pgsizes;
327 
328 	/* get the full-frame prefetch page size(s) supported by the MMU */
329 	pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_FULL_PGSIZES;
330 
331 	while (pgsizes) {
332 		u32 largest_pgsize = 1 << __fls(pgsizes);
333 
334 		if (malidp_check_pages_threshold(ms, largest_pgsize)) {
335 			*pgsize_bitmap = largest_pgsize;
336 			return MALIDP_PREFETCH_MODE_FULL;
337 		}
338 
339 		pgsizes -= largest_pgsize;
340 	}
341 
342 	/* get the partial-frame prefetch page size(s) supported by the MMU */
343 	pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES;
344 
345 	if (malidp_partial_prefetch_supported(ms->base.fb->format->format,
346 					      ms->base.fb->modifier,
347 					      ms->base.rotation)) {
348 		/* partial prefetch using the smallest page size */
349 		*pgsize_bitmap = 1 << __ffs(pgsizes);
350 		return MALIDP_PREFETCH_MODE_PARTIAL;
351 	}
352 	*pgsize_bitmap = 0;
353 	return MALIDP_PREFETCH_MODE_NONE;
354 }
355 
356 static u32 malidp_calc_mmu_control_value(enum mmu_prefetch_mode mode,
357 					 u8 readahead, u8 n_planes, u32 pgsize)
358 {
359 	u32 mmu_ctrl = 0;
360 
361 	if (mode != MALIDP_PREFETCH_MODE_NONE) {
362 		mmu_ctrl |= MALIDP_MMU_CTRL_EN;
363 
364 		if (mode == MALIDP_PREFETCH_MODE_PARTIAL) {
365 			mmu_ctrl |= MALIDP_MMU_CTRL_MODE;
366 			mmu_ctrl |= MALIDP_MMU_CTRL_PP_NUM_REQ(readahead);
367 		}
368 
369 		if (pgsize == SZ_64K || pgsize == SZ_2M) {
370 			int i;
371 
372 			for (i = 0; i < n_planes; i++)
373 				mmu_ctrl |= MALIDP_MMU_CTRL_PX_PS(i);
374 		}
375 	}
376 
377 	return mmu_ctrl;
378 }
379 
380 static void malidp_de_prefetch_settings(struct malidp_plane *mp,
381 					struct malidp_plane_state *ms)
382 {
383 	if (!mp->layer->mmu_ctrl_offset)
384 		return;
385 
386 	/* get the page sizes supported by the MMU */
387 	ms->mmu_prefetch_pgsize = malidp_get_pgsize_bitmap(mp);
388 	ms->mmu_prefetch_mode  =
389 		malidp_mmu_prefetch_select_mode(ms, &ms->mmu_prefetch_pgsize);
390 }
391 
392 static int malidp_de_plane_check(struct drm_plane *plane,
393 				 struct drm_plane_state *state)
394 {
395 	struct malidp_plane *mp = to_malidp_plane(plane);
396 	struct malidp_plane_state *ms = to_malidp_plane_state(state);
397 	bool rotated = state->rotation & MALIDP_ROTATED_MASK;
398 	struct drm_framebuffer *fb;
399 	u16 pixel_alpha = state->pixel_blend_mode;
400 	int i, ret;
401 	unsigned int block_w, block_h;
402 
403 	if (!state->crtc || !state->fb)
404 		return 0;
405 
406 	fb = state->fb;
407 
408 	ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
409 					     mp->layer->id,
410 					     fb->format->format);
411 	if (ms->format == MALIDP_INVALID_FORMAT_ID)
412 		return -EINVAL;
413 
414 	ms->n_planes = fb->format->num_planes;
415 	for (i = 0; i < ms->n_planes; i++) {
416 		u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
417 
418 		if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
419 				& (alignment - 1)) {
420 			DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
421 				      fb->pitches[i], i);
422 			return -EINVAL;
423 		}
424 	}
425 
426 	block_w = drm_format_info_block_width(fb->format, 0);
427 	block_h = drm_format_info_block_height(fb->format, 0);
428 	if (fb->width % block_w || fb->height % block_h) {
429 		DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
430 		return -EINVAL;
431 	}
432 	if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) {
433 		DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
434 		return -EINVAL;
435 	}
436 
437 	if ((state->crtc_w > mp->hwdev->max_line_size) ||
438 	    (state->crtc_h > mp->hwdev->max_line_size) ||
439 	    (state->crtc_w < mp->hwdev->min_line_size) ||
440 	    (state->crtc_h < mp->hwdev->min_line_size))
441 		return -EINVAL;
442 
443 	/*
444 	 * DP550/650 video layers can accept 3 plane formats only if
445 	 * fb->pitches[1] == fb->pitches[2] since they don't have a
446 	 * third plane stride register.
447 	 */
448 	if (ms->n_planes == 3 &&
449 	    !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
450 	    (state->fb->pitches[1] != state->fb->pitches[2]))
451 		return -EINVAL;
452 
453 	ret = malidp_se_check_scaling(mp, state);
454 	if (ret)
455 		return ret;
456 
457 	/* validate the rotation constraints for each layer */
458 	if (state->rotation != DRM_MODE_ROTATE_0) {
459 		if (mp->layer->rot == ROTATE_NONE)
460 			return -EINVAL;
461 		if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier))
462 			return -EINVAL;
463 		/*
464 		 * packed RGB888 / BGR888 can't be rotated or flipped
465 		 * unless they are stored in a compressed way
466 		 */
467 		if ((fb->format->format == DRM_FORMAT_RGB888 ||
468 		     fb->format->format == DRM_FORMAT_BGR888) && !(fb->modifier))
469 			return -EINVAL;
470 	}
471 
472 	ms->rotmem_size = 0;
473 	if (state->rotation & MALIDP_ROTATED_MASK) {
474 		int val;
475 
476 		val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
477 						     state->crtc_h,
478 						     fb->format->format);
479 		if (val < 0)
480 			return val;
481 
482 		ms->rotmem_size = val;
483 	}
484 
485 	/* HW can't support plane + pixel blending */
486 	if ((state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
487 	    (pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) &&
488 	    fb->format->has_alpha)
489 		return -EINVAL;
490 
491 	malidp_de_prefetch_settings(mp, ms);
492 
493 	return 0;
494 }
495 
496 static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
497 					int num_planes, unsigned int pitches[3])
498 {
499 	int i;
500 	int num_strides = num_planes;
501 
502 	if (!mp->layer->stride_offset)
503 		return;
504 
505 	if (num_planes == 3)
506 		num_strides = (mp->hwdev->hw->features &
507 			       MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
508 
509 	/*
510 	 * The drm convention for pitch is that it needs to cover width * cpp,
511 	 * but our hardware wants the pitch/stride to cover all rows included
512 	 * in a tile.
513 	 */
514 	for (i = 0; i < num_strides; ++i) {
515 		unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i);
516 
517 		malidp_hw_write(mp->hwdev, pitches[i] * block_h,
518 				mp->layer->base +
519 				mp->layer->stride_offset + i * 4);
520 	}
521 }
522 
523 static const s16
524 malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = {
525 	[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
526 		1192,    0, 1634,
527 		1192, -401, -832,
528 		1192, 2066,    0,
529 		  64,  512,  512
530 	},
531 	[DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
532 		1024,    0, 1436,
533 		1024, -352, -731,
534 		1024, 1815,    0,
535 		   0,  512,  512
536 	},
537 	[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
538 		1192,    0, 1836,
539 		1192, -218, -546,
540 		1192, 2163,    0,
541 		  64,  512,  512
542 	},
543 	[DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
544 		1024,    0, 1613,
545 		1024, -192, -479,
546 		1024, 1900,    0,
547 		   0,  512,  512
548 	},
549 	[DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
550 		1024,    0, 1476,
551 		1024, -165, -572,
552 		1024, 1884,    0,
553 		   0,  512,  512
554 	},
555 	[DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
556 		1024,    0, 1510,
557 		1024, -168, -585,
558 		1024, 1927,    0,
559 		   0,  512,  512
560 	}
561 };
562 
563 static void malidp_de_set_color_encoding(struct malidp_plane *plane,
564 					 enum drm_color_encoding enc,
565 					 enum drm_color_range range)
566 {
567 	unsigned int i;
568 
569 	for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
570 		/* coefficients are signed, two's complement values */
571 		malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i],
572 				plane->layer->base + plane->layer->yuv2rgb_offset +
573 				i * 4);
574 	}
575 }
576 
577 static void malidp_de_set_mmu_control(struct malidp_plane *mp,
578 				      struct malidp_plane_state *ms)
579 {
580 	u32 mmu_ctrl;
581 
582 	/* check hardware supports MMU prefetch */
583 	if (!mp->layer->mmu_ctrl_offset)
584 		return;
585 
586 	mmu_ctrl = malidp_calc_mmu_control_value(ms->mmu_prefetch_mode,
587 						 MALIDP_MMU_PREFETCH_READAHEAD,
588 						 ms->n_planes,
589 						 ms->mmu_prefetch_pgsize);
590 
591 	malidp_hw_write(mp->hwdev, mmu_ctrl,
592 			mp->layer->base + mp->layer->mmu_ctrl_offset);
593 }
594 
595 static void malidp_de_plane_update(struct drm_plane *plane,
596 				   struct drm_plane_state *old_state)
597 {
598 	struct malidp_plane *mp;
599 	struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
600 	struct drm_plane_state *state = plane->state;
601 	u16 pixel_alpha = state->pixel_blend_mode;
602 	u8 plane_alpha = state->alpha >> 8;
603 	u32 src_w, src_h, dest_w, dest_h, val;
604 	int i;
605 
606 	mp = to_malidp_plane(plane);
607 
608 	/* convert src values from Q16 fixed point to integer */
609 	src_w = state->src_w >> 16;
610 	src_h = state->src_h >> 16;
611 	dest_w = state->crtc_w;
612 	dest_h = state->crtc_h;
613 
614 	val = malidp_hw_read(mp->hwdev, mp->layer->base);
615 	val = (val & ~LAYER_FORMAT_MASK) | ms->format;
616 	malidp_hw_write(mp->hwdev, val, mp->layer->base);
617 
618 	for (i = 0; i < ms->n_planes; i++) {
619 		/* calculate the offset for the layer's plane registers */
620 		u16 ptr = mp->layer->ptr + (i << 4);
621 		dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb,
622 							     state, i);
623 
624 		malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr);
625 		malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4);
626 	}
627 
628 	malidp_de_set_mmu_control(mp, ms);
629 
630 	malidp_de_set_plane_pitches(mp, ms->n_planes,
631 				    state->fb->pitches);
632 
633 	if ((plane->state->color_encoding != old_state->color_encoding) ||
634 	    (plane->state->color_range != old_state->color_range))
635 		malidp_de_set_color_encoding(mp, plane->state->color_encoding,
636 					     plane->state->color_range);
637 
638 	malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
639 			mp->layer->base + MALIDP_LAYER_SIZE);
640 
641 	malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
642 			mp->layer->base + MALIDP_LAYER_COMP_SIZE);
643 
644 	malidp_hw_write(mp->hwdev, LAYER_H_VAL(state->crtc_x) |
645 			LAYER_V_VAL(state->crtc_y),
646 			mp->layer->base + MALIDP_LAYER_OFFSET);
647 
648 	if (mp->layer->id == DE_SMART) {
649 		/*
650 		 * Enable the first rectangle in the SMART layer to be
651 		 * able to use it as a drm plane.
652 		 */
653 		malidp_hw_write(mp->hwdev, 1,
654 				mp->layer->base + MALIDP550_LS_ENABLE);
655 		malidp_hw_write(mp->hwdev,
656 				LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
657 				mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
658 	}
659 
660 	/* first clear the rotation bits */
661 	val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
662 	val &= ~LAYER_ROT_MASK;
663 
664 	/* setup the rotation and axis flip bits */
665 	if (state->rotation & DRM_MODE_ROTATE_MASK)
666 		val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
667 		       LAYER_ROT_OFFSET;
668 	if (state->rotation & DRM_MODE_REFLECT_X)
669 		val |= LAYER_H_FLIP;
670 	if (state->rotation & DRM_MODE_REFLECT_Y)
671 		val |= LAYER_V_FLIP;
672 
673 	val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff));
674 
675 	if (state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
676 		val |= LAYER_COMP_PLANE;
677 	} else if (state->fb->format->has_alpha) {
678 		/* We only care about blend mode if the format has alpha */
679 		switch (pixel_alpha) {
680 		case DRM_MODE_BLEND_PREMULTI:
681 			val |= LAYER_COMP_PIXEL | LAYER_PMUL_ENABLE;
682 			break;
683 		case DRM_MODE_BLEND_COVERAGE:
684 			val |= LAYER_COMP_PIXEL;
685 			break;
686 		}
687 	}
688 	val |= LAYER_ALPHA(plane_alpha);
689 
690 	val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
691 	if (state->crtc) {
692 		struct malidp_crtc_state *m =
693 			to_malidp_crtc_state(state->crtc->state);
694 
695 		if (m->scaler_config.scale_enable &&
696 		    m->scaler_config.plane_src_id == mp->layer->id)
697 			val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE);
698 	}
699 
700 	/* set the 'enable layer' bit */
701 	val |= LAYER_ENABLE;
702 
703 	malidp_hw_write(mp->hwdev, val,
704 			mp->layer->base + MALIDP_LAYER_CONTROL);
705 }
706 
707 static void malidp_de_plane_disable(struct drm_plane *plane,
708 				    struct drm_plane_state *state)
709 {
710 	struct malidp_plane *mp = to_malidp_plane(plane);
711 
712 	malidp_hw_clearbits(mp->hwdev,
713 			    LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK),
714 			    mp->layer->base + MALIDP_LAYER_CONTROL);
715 }
716 
717 static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
718 	.atomic_check = malidp_de_plane_check,
719 	.atomic_update = malidp_de_plane_update,
720 	.atomic_disable = malidp_de_plane_disable,
721 };
722 
723 int malidp_de_planes_init(struct drm_device *drm)
724 {
725 	struct malidp_drm *malidp = drm->dev_private;
726 	const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
727 	struct malidp_plane *plane = NULL;
728 	enum drm_plane_type plane_type;
729 	unsigned long crtcs = 1 << drm->mode_config.num_crtc;
730 	unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
731 			      DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
732 	unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
733 				  BIT(DRM_MODE_BLEND_PREMULTI)   |
734 				  BIT(DRM_MODE_BLEND_COVERAGE);
735 	u32 *formats;
736 	int ret, i, j, n;
737 
738 	formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
739 	if (!formats) {
740 		ret = -ENOMEM;
741 		goto cleanup;
742 	}
743 
744 	for (i = 0; i < map->n_layers; i++) {
745 		u8 id = map->layers[i].id;
746 
747 		plane = kzalloc(sizeof(*plane), GFP_KERNEL);
748 		if (!plane) {
749 			ret = -ENOMEM;
750 			goto cleanup;
751 		}
752 
753 		/* build the list of DRM supported formats based on the map */
754 		for (n = 0, j = 0;  j < map->n_pixel_formats; j++) {
755 			if ((map->pixel_formats[j].layer & id) == id)
756 				formats[n++] = map->pixel_formats[j].format;
757 		}
758 
759 		plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
760 					DRM_PLANE_TYPE_OVERLAY;
761 		ret = drm_universal_plane_init(drm, &plane->base, crtcs,
762 					       &malidp_de_plane_funcs, formats,
763 					       n, NULL, plane_type, NULL);
764 		if (ret < 0)
765 			goto cleanup;
766 
767 		drm_plane_helper_add(&plane->base,
768 				     &malidp_de_plane_helper_funcs);
769 		plane->hwdev = malidp->dev;
770 		plane->layer = &map->layers[i];
771 
772 		drm_plane_create_alpha_property(&plane->base);
773 		drm_plane_create_blend_mode_property(&plane->base, blend_caps);
774 
775 		if (id == DE_SMART) {
776 			/* Skip the features which the SMART layer doesn't have. */
777 			continue;
778 		}
779 
780 		drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags);
781 		malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
782 				plane->layer->base + MALIDP_LAYER_COMPOSE);
783 
784 		/* Attach the YUV->RGB property only to video layers */
785 		if (id & (DE_VIDEO1 | DE_VIDEO2)) {
786 			/* default encoding for YUV->RGB is BT601 NARROW */
787 			enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601;
788 			enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
789 
790 			ret = drm_plane_create_color_properties(&plane->base,
791 					BIT(DRM_COLOR_YCBCR_BT601) | \
792 					BIT(DRM_COLOR_YCBCR_BT709) | \
793 					BIT(DRM_COLOR_YCBCR_BT2020),
794 					BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \
795 					BIT(DRM_COLOR_YCBCR_FULL_RANGE),
796 					enc, range);
797 			if (!ret)
798 				/* program the HW registers */
799 				malidp_de_set_color_encoding(plane, enc, range);
800 			else
801 				DRM_WARN("Failed to create video layer %d color properties\n", id);
802 		}
803 	}
804 
805 	kfree(formats);
806 
807 	return 0;
808 
809 cleanup:
810 	kfree(formats);
811 
812 	return ret;
813 }
814