xref: /openbmc/linux/drivers/gpu/drm/tegra/plane.c (revision acddaa55)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/iommu.h>
7 
8 #include <drm/drm_atomic.h>
9 #include <drm/drm_atomic_helper.h>
10 #include <drm/drm_fourcc.h>
11 #include <drm/drm_gem_framebuffer_helper.h>
12 #include <drm/drm_plane_helper.h>
13 
14 #include "dc.h"
15 #include "plane.h"
16 
17 static void tegra_plane_destroy(struct drm_plane *plane)
18 {
19 	struct tegra_plane *p = to_tegra_plane(plane);
20 
21 	drm_plane_cleanup(plane);
22 	kfree(p);
23 }
24 
25 static void tegra_plane_reset(struct drm_plane *plane)
26 {
27 	struct tegra_plane *p = to_tegra_plane(plane);
28 	struct tegra_plane_state *state;
29 	unsigned int i;
30 
31 	if (plane->state)
32 		__drm_atomic_helper_plane_destroy_state(plane->state);
33 
34 	kfree(plane->state);
35 	plane->state = NULL;
36 
37 	state = kzalloc(sizeof(*state), GFP_KERNEL);
38 	if (state) {
39 		plane->state = &state->base;
40 		plane->state->plane = plane;
41 		plane->state->zpos = p->index;
42 		plane->state->normalized_zpos = p->index;
43 
44 		for (i = 0; i < 3; i++)
45 			state->iova[i] = DMA_MAPPING_ERROR;
46 	}
47 }
48 
49 static struct drm_plane_state *
50 tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
51 {
52 	struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
53 	struct tegra_plane_state *copy;
54 	unsigned int i;
55 
56 	copy = kmalloc(sizeof(*copy), GFP_KERNEL);
57 	if (!copy)
58 		return NULL;
59 
60 	__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
61 	copy->tiling = state->tiling;
62 	copy->format = state->format;
63 	copy->swap = state->swap;
64 	copy->reflect_x = state->reflect_x;
65 	copy->reflect_y = state->reflect_y;
66 	copy->opaque = state->opaque;
67 
68 	for (i = 0; i < 2; i++)
69 		copy->blending[i] = state->blending[i];
70 
71 	for (i = 0; i < 3; i++) {
72 		copy->iova[i] = DMA_MAPPING_ERROR;
73 		copy->sgt[i] = NULL;
74 	}
75 
76 	return &copy->base;
77 }
78 
79 static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
80 					     struct drm_plane_state *state)
81 {
82 	__drm_atomic_helper_plane_destroy_state(state);
83 	kfree(state);
84 }
85 
86 static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
87 					     uint32_t format,
88 					     uint64_t modifier)
89 {
90 	const struct drm_format_info *info = drm_format_info(format);
91 
92 	if (modifier == DRM_FORMAT_MOD_LINEAR)
93 		return true;
94 
95 	if (info->num_planes == 1)
96 		return true;
97 
98 	return false;
99 }
100 
101 const struct drm_plane_funcs tegra_plane_funcs = {
102 	.update_plane = drm_atomic_helper_update_plane,
103 	.disable_plane = drm_atomic_helper_disable_plane,
104 	.destroy = tegra_plane_destroy,
105 	.reset = tegra_plane_reset,
106 	.atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
107 	.atomic_destroy_state = tegra_plane_atomic_destroy_state,
108 	.format_mod_supported = tegra_plane_format_mod_supported,
109 };
110 
111 static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
112 {
113 	struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
114 	unsigned int i;
115 	int err;
116 
117 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
118 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
119 		dma_addr_t phys_addr, *phys;
120 		struct sg_table *sgt;
121 
122 		if (!domain || dc->client.group)
123 			phys = &phys_addr;
124 		else
125 			phys = NULL;
126 
127 		sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
128 		if (IS_ERR(sgt)) {
129 			err = PTR_ERR(sgt);
130 			goto unpin;
131 		}
132 
133 		if (sgt) {
134 			err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
135 					 DMA_TO_DEVICE);
136 			if (err == 0) {
137 				err = -ENOMEM;
138 				goto unpin;
139 			}
140 
141 			/*
142 			 * The display controller needs contiguous memory, so
143 			 * fail if the buffer is discontiguous and we fail to
144 			 * map its SG table to a single contiguous chunk of
145 			 * I/O virtual memory.
146 			 */
147 			if (err > 1) {
148 				err = -EINVAL;
149 				goto unpin;
150 			}
151 
152 			state->iova[i] = sg_dma_address(sgt->sgl);
153 			state->sgt[i] = sgt;
154 		} else {
155 			state->iova[i] = phys_addr;
156 		}
157 	}
158 
159 	return 0;
160 
161 unpin:
162 	dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
163 
164 	while (i--) {
165 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
166 		struct sg_table *sgt = state->sgt[i];
167 
168 		if (sgt)
169 			dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
170 				     DMA_TO_DEVICE);
171 
172 		host1x_bo_unpin(dc->dev, &bo->base, sgt);
173 		state->iova[i] = DMA_MAPPING_ERROR;
174 		state->sgt[i] = NULL;
175 	}
176 
177 	return err;
178 }
179 
180 static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
181 {
182 	unsigned int i;
183 
184 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
185 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
186 		struct sg_table *sgt = state->sgt[i];
187 
188 		if (sgt)
189 			dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
190 				     DMA_TO_DEVICE);
191 
192 		host1x_bo_unpin(dc->dev, &bo->base, sgt);
193 		state->iova[i] = DMA_MAPPING_ERROR;
194 		state->sgt[i] = NULL;
195 	}
196 }
197 
198 int tegra_plane_prepare_fb(struct drm_plane *plane,
199 			   struct drm_plane_state *state)
200 {
201 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
202 
203 	if (!state->fb)
204 		return 0;
205 
206 	drm_gem_fb_prepare_fb(plane, state);
207 
208 	return tegra_dc_pin(dc, to_tegra_plane_state(state));
209 }
210 
211 void tegra_plane_cleanup_fb(struct drm_plane *plane,
212 			    struct drm_plane_state *state)
213 {
214 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
215 
216 	if (dc)
217 		tegra_dc_unpin(dc, to_tegra_plane_state(state));
218 }
219 
220 int tegra_plane_state_add(struct tegra_plane *plane,
221 			  struct drm_plane_state *state)
222 {
223 	struct drm_crtc_state *crtc_state;
224 	struct tegra_dc_state *tegra;
225 	int err;
226 
227 	/* Propagate errors from allocation or locking failures. */
228 	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
229 	if (IS_ERR(crtc_state))
230 		return PTR_ERR(crtc_state);
231 
232 	/* Check plane state for visibility and calculate clipping bounds */
233 	err = drm_atomic_helper_check_plane_state(state, crtc_state,
234 						  0, INT_MAX, true, true);
235 	if (err < 0)
236 		return err;
237 
238 	tegra = to_dc_state(crtc_state);
239 
240 	tegra->planes |= WIN_A_ACT_REQ << plane->index;
241 
242 	return 0;
243 }
244 
245 int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
246 {
247 	/* assume no swapping of fetched data */
248 	if (swap)
249 		*swap = BYTE_SWAP_NOSWAP;
250 
251 	switch (fourcc) {
252 	case DRM_FORMAT_ARGB4444:
253 		*format = WIN_COLOR_DEPTH_B4G4R4A4;
254 		break;
255 
256 	case DRM_FORMAT_ARGB1555:
257 		*format = WIN_COLOR_DEPTH_B5G5R5A1;
258 		break;
259 
260 	case DRM_FORMAT_RGB565:
261 		*format = WIN_COLOR_DEPTH_B5G6R5;
262 		break;
263 
264 	case DRM_FORMAT_RGBA5551:
265 		*format = WIN_COLOR_DEPTH_A1B5G5R5;
266 		break;
267 
268 	case DRM_FORMAT_ARGB8888:
269 		*format = WIN_COLOR_DEPTH_B8G8R8A8;
270 		break;
271 
272 	case DRM_FORMAT_ABGR8888:
273 		*format = WIN_COLOR_DEPTH_R8G8B8A8;
274 		break;
275 
276 	case DRM_FORMAT_ABGR4444:
277 		*format = WIN_COLOR_DEPTH_R4G4B4A4;
278 		break;
279 
280 	case DRM_FORMAT_ABGR1555:
281 		*format = WIN_COLOR_DEPTH_R5G5B5A;
282 		break;
283 
284 	case DRM_FORMAT_BGRA5551:
285 		*format = WIN_COLOR_DEPTH_AR5G5B5;
286 		break;
287 
288 	case DRM_FORMAT_XRGB1555:
289 		*format = WIN_COLOR_DEPTH_B5G5R5X1;
290 		break;
291 
292 	case DRM_FORMAT_RGBX5551:
293 		*format = WIN_COLOR_DEPTH_X1B5G5R5;
294 		break;
295 
296 	case DRM_FORMAT_XBGR1555:
297 		*format = WIN_COLOR_DEPTH_R5G5B5X1;
298 		break;
299 
300 	case DRM_FORMAT_BGRX5551:
301 		*format = WIN_COLOR_DEPTH_X1R5G5B5;
302 		break;
303 
304 	case DRM_FORMAT_BGR565:
305 		*format = WIN_COLOR_DEPTH_R5G6B5;
306 		break;
307 
308 	case DRM_FORMAT_BGRA8888:
309 		*format = WIN_COLOR_DEPTH_A8R8G8B8;
310 		break;
311 
312 	case DRM_FORMAT_RGBA8888:
313 		*format = WIN_COLOR_DEPTH_A8B8G8R8;
314 		break;
315 
316 	case DRM_FORMAT_XRGB8888:
317 		*format = WIN_COLOR_DEPTH_B8G8R8X8;
318 		break;
319 
320 	case DRM_FORMAT_XBGR8888:
321 		*format = WIN_COLOR_DEPTH_R8G8B8X8;
322 		break;
323 
324 	case DRM_FORMAT_UYVY:
325 		*format = WIN_COLOR_DEPTH_YCbCr422;
326 		break;
327 
328 	case DRM_FORMAT_YUYV:
329 		if (!swap)
330 			return -EINVAL;
331 
332 		*format = WIN_COLOR_DEPTH_YCbCr422;
333 		*swap = BYTE_SWAP_SWAP2;
334 		break;
335 
336 	case DRM_FORMAT_YUV420:
337 		*format = WIN_COLOR_DEPTH_YCbCr420P;
338 		break;
339 
340 	case DRM_FORMAT_YUV422:
341 		*format = WIN_COLOR_DEPTH_YCbCr422P;
342 		break;
343 
344 	default:
345 		return -EINVAL;
346 	}
347 
348 	return 0;
349 }
350 
351 bool tegra_plane_format_is_yuv(unsigned int format, bool *planar)
352 {
353 	switch (format) {
354 	case WIN_COLOR_DEPTH_YCbCr422:
355 	case WIN_COLOR_DEPTH_YUV422:
356 		if (planar)
357 			*planar = false;
358 
359 		return true;
360 
361 	case WIN_COLOR_DEPTH_YCbCr420P:
362 	case WIN_COLOR_DEPTH_YUV420P:
363 	case WIN_COLOR_DEPTH_YCbCr422P:
364 	case WIN_COLOR_DEPTH_YUV422P:
365 	case WIN_COLOR_DEPTH_YCbCr422R:
366 	case WIN_COLOR_DEPTH_YUV422R:
367 	case WIN_COLOR_DEPTH_YCbCr422RA:
368 	case WIN_COLOR_DEPTH_YUV422RA:
369 		if (planar)
370 			*planar = true;
371 
372 		return true;
373 	}
374 
375 	if (planar)
376 		*planar = false;
377 
378 	return false;
379 }
380 
381 static bool __drm_format_has_alpha(u32 format)
382 {
383 	switch (format) {
384 	case DRM_FORMAT_ARGB1555:
385 	case DRM_FORMAT_RGBA5551:
386 	case DRM_FORMAT_ABGR8888:
387 	case DRM_FORMAT_ARGB8888:
388 		return true;
389 	}
390 
391 	return false;
392 }
393 
394 static int tegra_plane_format_get_alpha(unsigned int opaque,
395 					unsigned int *alpha)
396 {
397 	if (tegra_plane_format_is_yuv(opaque, NULL)) {
398 		*alpha = opaque;
399 		return 0;
400 	}
401 
402 	switch (opaque) {
403 	case WIN_COLOR_DEPTH_B5G5R5X1:
404 		*alpha = WIN_COLOR_DEPTH_B5G5R5A1;
405 		return 0;
406 
407 	case WIN_COLOR_DEPTH_X1B5G5R5:
408 		*alpha = WIN_COLOR_DEPTH_A1B5G5R5;
409 		return 0;
410 
411 	case WIN_COLOR_DEPTH_R8G8B8X8:
412 		*alpha = WIN_COLOR_DEPTH_R8G8B8A8;
413 		return 0;
414 
415 	case WIN_COLOR_DEPTH_B8G8R8X8:
416 		*alpha = WIN_COLOR_DEPTH_B8G8R8A8;
417 		return 0;
418 
419 	case WIN_COLOR_DEPTH_B5G6R5:
420 		*alpha = opaque;
421 		return 0;
422 	}
423 
424 	return -EINVAL;
425 }
426 
427 /*
428  * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
429  * be emulated using the alpha formats and alpha blending disabled.
430  */
431 static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
432 				     struct tegra_plane_state *state)
433 {
434 	unsigned int format;
435 	int err;
436 
437 	switch (state->format) {
438 	case WIN_COLOR_DEPTH_B5G5R5A1:
439 	case WIN_COLOR_DEPTH_A1B5G5R5:
440 	case WIN_COLOR_DEPTH_R8G8B8A8:
441 	case WIN_COLOR_DEPTH_B8G8R8A8:
442 		state->opaque = false;
443 		break;
444 
445 	default:
446 		err = tegra_plane_format_get_alpha(state->format, &format);
447 		if (err < 0)
448 			return err;
449 
450 		state->format = format;
451 		state->opaque = true;
452 		break;
453 	}
454 
455 	return 0;
456 }
457 
458 static int tegra_plane_check_transparency(struct tegra_plane *tegra,
459 					  struct tegra_plane_state *state)
460 {
461 	struct drm_plane_state *old, *plane_state;
462 	struct drm_plane *plane;
463 
464 	old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
465 
466 	/* check if zpos / transparency changed */
467 	if (old->normalized_zpos == state->base.normalized_zpos &&
468 	    to_tegra_plane_state(old)->opaque == state->opaque)
469 		return 0;
470 
471 	/* include all sibling planes into this commit */
472 	drm_for_each_plane(plane, tegra->base.dev) {
473 		struct tegra_plane *p = to_tegra_plane(plane);
474 
475 		/* skip this plane and planes on different CRTCs */
476 		if (p == tegra || p->dc != tegra->dc)
477 			continue;
478 
479 		plane_state = drm_atomic_get_plane_state(state->base.state,
480 							 plane);
481 		if (IS_ERR(plane_state))
482 			return PTR_ERR(plane_state);
483 	}
484 
485 	return 1;
486 }
487 
488 static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
489 						  struct tegra_plane *other)
490 {
491 	unsigned int index = 0, i;
492 
493 	WARN_ON(plane == other);
494 
495 	for (i = 0; i < 3; i++) {
496 		if (i == plane->index)
497 			continue;
498 
499 		if (i == other->index)
500 			break;
501 
502 		index++;
503 	}
504 
505 	return index;
506 }
507 
508 static void tegra_plane_update_transparency(struct tegra_plane *tegra,
509 					    struct tegra_plane_state *state)
510 {
511 	struct drm_plane_state *new;
512 	struct drm_plane *plane;
513 	unsigned int i;
514 
515 	for_each_new_plane_in_state(state->base.state, plane, new, i) {
516 		struct tegra_plane *p = to_tegra_plane(plane);
517 		unsigned index;
518 
519 		/* skip this plane and planes on different CRTCs */
520 		if (p == tegra || p->dc != tegra->dc)
521 			continue;
522 
523 		index = tegra_plane_get_overlap_index(tegra, p);
524 
525 		if (new->fb && __drm_format_has_alpha(new->fb->format->format))
526 			state->blending[index].alpha = true;
527 		else
528 			state->blending[index].alpha = false;
529 
530 		if (new->normalized_zpos > state->base.normalized_zpos)
531 			state->blending[index].top = true;
532 		else
533 			state->blending[index].top = false;
534 
535 		/*
536 		 * Missing framebuffer means that plane is disabled, in this
537 		 * case mark B / C window as top to be able to differentiate
538 		 * windows indices order in regards to zPos for the middle
539 		 * window X / Y registers programming.
540 		 */
541 		if (!new->fb)
542 			state->blending[index].top = (index == 1);
543 	}
544 }
545 
546 static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
547 					  struct tegra_plane_state *state)
548 {
549 	struct tegra_plane_state *tegra_state;
550 	struct drm_plane_state *new;
551 	struct drm_plane *plane;
552 	int err;
553 
554 	/*
555 	 * If planes zpos / transparency changed, sibling planes blending
556 	 * state may require adjustment and in this case they will be included
557 	 * into this atom commit, otherwise blending state is unchanged.
558 	 */
559 	err = tegra_plane_check_transparency(tegra, state);
560 	if (err <= 0)
561 		return err;
562 
563 	/*
564 	 * All planes are now in the atomic state, walk them up and update
565 	 * transparency state for each plane.
566 	 */
567 	drm_for_each_plane(plane, tegra->base.dev) {
568 		struct tegra_plane *p = to_tegra_plane(plane);
569 
570 		/* skip planes on different CRTCs */
571 		if (p->dc != tegra->dc)
572 			continue;
573 
574 		new = drm_atomic_get_new_plane_state(state->base.state, plane);
575 		tegra_state = to_tegra_plane_state(new);
576 
577 		/*
578 		 * There is no need to update blending state for the disabled
579 		 * plane.
580 		 */
581 		if (new->fb)
582 			tegra_plane_update_transparency(p, tegra_state);
583 	}
584 
585 	return 0;
586 }
587 
588 int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
589 				   struct tegra_plane_state *state)
590 {
591 	int err;
592 
593 	err = tegra_plane_setup_opacity(tegra, state);
594 	if (err < 0)
595 		return err;
596 
597 	err = tegra_plane_setup_transparency(tegra, state);
598 	if (err < 0)
599 		return err;
600 
601 	return 0;
602 }
603