1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015 Free Electrons
4  * Copyright (C) 2015 NextThing Co
5  *
6  * Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/component.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_graph.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17 
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc.h>
21 #include <drm/drm_fb_cma_helper.h>
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_gem_cma_helper.h>
24 #include <drm/drm_plane_helper.h>
25 #include <drm/drm_probe_helper.h>
26 
27 #include "sun4i_backend.h"
28 #include "sun4i_drv.h"
29 #include "sun4i_frontend.h"
30 #include "sun4i_layer.h"
31 #include "sunxi_engine.h"
32 
33 struct sun4i_backend_quirks {
34 	/* backend <-> TCON muxing selection done in backend */
35 	bool needs_output_muxing;
36 
37 	/* alpha at the lowest z position is not always supported */
38 	bool supports_lowest_plane_alpha;
39 };
40 
41 static const u32 sunxi_rgb2yuv_coef[12] = {
42 	0x00000107, 0x00000204, 0x00000064, 0x00000108,
43 	0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
44 	0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
45 };
46 
47 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
48 {
49 	int i;
50 
51 	DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
52 
53 	/* Set color correction */
54 	regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
55 		     SUN4I_BACKEND_OCCTL_ENABLE);
56 
57 	for (i = 0; i < 12; i++)
58 		regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
59 			     sunxi_rgb2yuv_coef[i]);
60 }
61 
62 static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
63 {
64 	DRM_DEBUG_DRIVER("Disabling color correction\n");
65 
66 	/* Disable color correction */
67 	regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
68 			   SUN4I_BACKEND_OCCTL_ENABLE, 0);
69 }
70 
71 static void sun4i_backend_commit(struct sunxi_engine *engine)
72 {
73 	DRM_DEBUG_DRIVER("Committing changes\n");
74 
75 	regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
76 		     SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
77 		     SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
78 }
79 
80 void sun4i_backend_layer_enable(struct sun4i_backend *backend,
81 				int layer, bool enable)
82 {
83 	u32 val;
84 
85 	DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
86 			 layer);
87 
88 	if (enable)
89 		val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
90 	else
91 		val = 0;
92 
93 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
94 			   SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
95 }
96 
97 static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
98 {
99 	switch (format) {
100 	case DRM_FORMAT_ARGB8888:
101 		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
102 		break;
103 
104 	case DRM_FORMAT_ARGB4444:
105 		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
106 		break;
107 
108 	case DRM_FORMAT_ARGB1555:
109 		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
110 		break;
111 
112 	case DRM_FORMAT_RGBA5551:
113 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
114 		break;
115 
116 	case DRM_FORMAT_RGBA4444:
117 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
118 		break;
119 
120 	case DRM_FORMAT_XRGB8888:
121 		*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
122 		break;
123 
124 	case DRM_FORMAT_RGB888:
125 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
126 		break;
127 
128 	case DRM_FORMAT_RGB565:
129 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
130 		break;
131 
132 	default:
133 		return -EINVAL;
134 	}
135 
136 	return 0;
137 }
138 
139 static const uint32_t sun4i_backend_formats[] = {
140 	DRM_FORMAT_ARGB1555,
141 	DRM_FORMAT_ARGB4444,
142 	DRM_FORMAT_ARGB8888,
143 	DRM_FORMAT_RGB565,
144 	DRM_FORMAT_RGB888,
145 	DRM_FORMAT_RGBA4444,
146 	DRM_FORMAT_RGBA5551,
147 	DRM_FORMAT_UYVY,
148 	DRM_FORMAT_VYUY,
149 	DRM_FORMAT_XRGB8888,
150 	DRM_FORMAT_YUYV,
151 	DRM_FORMAT_YVYU,
152 };
153 
154 bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
155 {
156 	unsigned int i;
157 
158 	if (modifier != DRM_FORMAT_MOD_LINEAR)
159 		return false;
160 
161 	for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
162 		if (sun4i_backend_formats[i] == fmt)
163 			return true;
164 
165 	return false;
166 }
167 
168 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
169 				     int layer, struct drm_plane *plane)
170 {
171 	struct drm_plane_state *state = plane->state;
172 
173 	DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
174 
175 	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
176 		DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
177 				 state->crtc_w, state->crtc_h);
178 		regmap_write(backend->engine.regs, SUN4I_BACKEND_DISSIZE_REG,
179 			     SUN4I_BACKEND_DISSIZE(state->crtc_w,
180 						   state->crtc_h));
181 	}
182 
183 	/* Set height and width */
184 	DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
185 			 state->crtc_w, state->crtc_h);
186 	regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
187 		     SUN4I_BACKEND_LAYSIZE(state->crtc_w,
188 					   state->crtc_h));
189 
190 	/* Set base coordinates */
191 	DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
192 			 state->crtc_x, state->crtc_y);
193 	regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
194 		     SUN4I_BACKEND_LAYCOOR(state->crtc_x,
195 					   state->crtc_y));
196 
197 	return 0;
198 }
199 
200 static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
201 					   int layer, struct drm_plane *plane)
202 {
203 	struct drm_plane_state *state = plane->state;
204 	struct drm_framebuffer *fb = state->fb;
205 	const struct drm_format_info *format = fb->format;
206 	const uint32_t fmt = format->format;
207 	u32 val = SUN4I_BACKEND_IYUVCTL_EN;
208 	int i;
209 
210 	for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
211 		regmap_write(backend->engine.regs,
212 			     SUN4I_BACKEND_YGCOEF_REG(i),
213 			     sunxi_bt601_yuv2rgb_coef[i]);
214 
215 	/*
216 	 * We should do that only for a single plane, but the
217 	 * framebuffer's atomic_check has our back on this.
218 	 */
219 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
220 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
221 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
222 
223 	/* TODO: Add support for the multi-planar YUV formats */
224 	if (drm_format_info_is_yuv_packed(format) &&
225 	    drm_format_info_is_yuv_sampling_422(format))
226 		val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
227 	else
228 		DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
229 
230 	/*
231 	 * Allwinner seems to list the pixel sequence from right to left, while
232 	 * DRM lists it from left to right.
233 	 */
234 	switch (fmt) {
235 	case DRM_FORMAT_YUYV:
236 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
237 		break;
238 	case DRM_FORMAT_YVYU:
239 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
240 		break;
241 	case DRM_FORMAT_UYVY:
242 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
243 		break;
244 	case DRM_FORMAT_VYUY:
245 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
246 		break;
247 	default:
248 		DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
249 				 fmt);
250 	}
251 
252 	regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
253 
254 	return 0;
255 }
256 
257 int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
258 				       int layer, struct drm_plane *plane)
259 {
260 	struct drm_plane_state *state = plane->state;
261 	struct drm_framebuffer *fb = state->fb;
262 	bool interlaced = false;
263 	u32 val;
264 	int ret;
265 
266 	/* Clear the YUV mode */
267 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
268 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
269 
270 	if (plane->state->crtc)
271 		interlaced = plane->state->crtc->state->adjusted_mode.flags
272 			& DRM_MODE_FLAG_INTERLACE;
273 
274 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
275 			   SUN4I_BACKEND_MODCTL_ITLMOD_EN,
276 			   interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
277 
278 	DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
279 			 interlaced ? "on" : "off");
280 
281 	val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
282 	if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
283 		val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
284 	regmap_update_bits(backend->engine.regs,
285 			   SUN4I_BACKEND_ATTCTL_REG0(layer),
286 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
287 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
288 			   val);
289 
290 	if (fb->format->is_yuv)
291 		return sun4i_backend_update_yuv_format(backend, layer, plane);
292 
293 	ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
294 	if (ret) {
295 		DRM_DEBUG_DRIVER("Invalid format\n");
296 		return ret;
297 	}
298 
299 	regmap_update_bits(backend->engine.regs,
300 			   SUN4I_BACKEND_ATTCTL_REG1(layer),
301 			   SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
302 
303 	return 0;
304 }
305 
306 int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
307 					int layer, uint32_t fmt)
308 {
309 	u32 val;
310 	int ret;
311 
312 	ret = sun4i_backend_drm_format_to_layer(fmt, &val);
313 	if (ret) {
314 		DRM_DEBUG_DRIVER("Invalid format\n");
315 		return ret;
316 	}
317 
318 	regmap_update_bits(backend->engine.regs,
319 			   SUN4I_BACKEND_ATTCTL_REG0(layer),
320 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
321 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
322 
323 	regmap_update_bits(backend->engine.regs,
324 			   SUN4I_BACKEND_ATTCTL_REG1(layer),
325 			   SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
326 
327 	return 0;
328 }
329 
330 static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
331 					   struct drm_framebuffer *fb,
332 					   dma_addr_t paddr)
333 {
334 	/* TODO: Add support for the multi-planar YUV formats */
335 	DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
336 	regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
337 
338 	DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
339 	regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
340 		     fb->pitches[0] * 8);
341 
342 	return 0;
343 }
344 
345 int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
346 				      int layer, struct drm_plane *plane)
347 {
348 	struct drm_plane_state *state = plane->state;
349 	struct drm_framebuffer *fb = state->fb;
350 	u32 lo_paddr, hi_paddr;
351 	dma_addr_t paddr;
352 
353 	/* Set the line width */
354 	DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
355 	regmap_write(backend->engine.regs,
356 		     SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
357 		     fb->pitches[0] * 8);
358 
359 	/* Get the start of the displayed memory */
360 	paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
361 	DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
362 
363 	if (fb->format->is_yuv)
364 		return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
365 
366 	/* Write the 32 lower bits of the address (in bits) */
367 	lo_paddr = paddr << 3;
368 	DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
369 	regmap_write(backend->engine.regs,
370 		     SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
371 		     lo_paddr);
372 
373 	/* And the upper bits */
374 	hi_paddr = paddr >> 29;
375 	DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
376 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
377 			   SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
378 			   SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
379 
380 	return 0;
381 }
382 
383 int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
384 				    struct drm_plane *plane)
385 {
386 	struct drm_plane_state *state = plane->state;
387 	struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
388 	unsigned int priority = state->normalized_zpos;
389 	unsigned int pipe = p_state->pipe;
390 
391 	DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
392 			 layer, priority, pipe);
393 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
394 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
395 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
396 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
397 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
398 
399 	return 0;
400 }
401 
402 void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
403 				 int layer)
404 {
405 	regmap_update_bits(backend->engine.regs,
406 			   SUN4I_BACKEND_ATTCTL_REG0(layer),
407 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
408 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
409 }
410 
411 static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
412 {
413 	u16 src_h = state->src_h >> 16;
414 	u16 src_w = state->src_w >> 16;
415 
416 	DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
417 			 src_w, src_h, state->crtc_w, state->crtc_h);
418 
419 	if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
420 		return true;
421 
422 	return false;
423 }
424 
425 static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
426 {
427 	struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
428 	struct sun4i_backend *backend = layer->backend;
429 	uint32_t format = state->fb->format->format;
430 	uint64_t modifier = state->fb->modifier;
431 
432 	if (IS_ERR(backend->frontend))
433 		return false;
434 
435 	if (!sun4i_frontend_format_is_supported(format, modifier))
436 		return false;
437 
438 	if (!sun4i_backend_format_is_supported(format, modifier))
439 		return true;
440 
441 	/*
442 	 * TODO: The backend alone allows 2x and 4x integer scaling, including
443 	 * support for an alpha component (which the frontend doesn't support).
444 	 * Use the backend directly instead of the frontend in this case, with
445 	 * another test to return false.
446 	 */
447 
448 	if (sun4i_backend_plane_uses_scaler(state))
449 		return true;
450 
451 	/*
452 	 * Here the format is supported by both the frontend and the backend
453 	 * and no frontend scaling is required, so use the backend directly.
454 	 */
455 	return false;
456 }
457 
458 static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
459 					     bool *uses_frontend)
460 {
461 	if (sun4i_backend_plane_uses_frontend(state)) {
462 		*uses_frontend = true;
463 		return true;
464 	}
465 
466 	*uses_frontend = false;
467 
468 	/* Scaling is not supported without the frontend. */
469 	if (sun4i_backend_plane_uses_scaler(state))
470 		return false;
471 
472 	return true;
473 }
474 
475 static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
476 				       struct drm_crtc_state *old_state)
477 {
478 	u32 val;
479 
480 	WARN_ON(regmap_read_poll_timeout(engine->regs,
481 					 SUN4I_BACKEND_REGBUFFCTL_REG,
482 					 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
483 					 100, 50000));
484 }
485 
486 static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
487 				      struct drm_crtc_state *crtc_state)
488 {
489 	struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
490 	struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
491 	struct drm_atomic_state *state = crtc_state->state;
492 	struct drm_device *drm = state->dev;
493 	struct drm_plane *plane;
494 	unsigned int num_planes = 0;
495 	unsigned int num_alpha_planes = 0;
496 	unsigned int num_frontend_planes = 0;
497 	unsigned int num_alpha_planes_max = 1;
498 	unsigned int num_yuv_planes = 0;
499 	unsigned int current_pipe = 0;
500 	unsigned int i;
501 
502 	DRM_DEBUG_DRIVER("Starting checking our planes\n");
503 
504 	if (!crtc_state->planes_changed)
505 		return 0;
506 
507 	drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
508 		struct drm_plane_state *plane_state =
509 			drm_atomic_get_plane_state(state, plane);
510 		struct sun4i_layer_state *layer_state =
511 			state_to_sun4i_layer_state(plane_state);
512 		struct drm_framebuffer *fb = plane_state->fb;
513 
514 		if (!sun4i_backend_plane_is_supported(plane_state,
515 						      &layer_state->uses_frontend))
516 			return -EINVAL;
517 
518 		if (layer_state->uses_frontend) {
519 			DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
520 					 plane->index);
521 			num_frontend_planes++;
522 		} else {
523 			if (fb->format->is_yuv) {
524 				DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
525 				num_yuv_planes++;
526 			}
527 		}
528 
529 		DRM_DEBUG_DRIVER("Plane FB format is %p4cc\n",
530 				 &fb->format->format);
531 		if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
532 			num_alpha_planes++;
533 
534 		DRM_DEBUG_DRIVER("Plane zpos is %d\n",
535 				 plane_state->normalized_zpos);
536 
537 		/* Sort our planes by Zpos */
538 		plane_states[plane_state->normalized_zpos] = plane_state;
539 
540 		num_planes++;
541 	}
542 
543 	/* All our planes were disabled, bail out */
544 	if (!num_planes)
545 		return 0;
546 
547 	/*
548 	 * The hardware is a bit unusual here.
549 	 *
550 	 * Even though it supports 4 layers, it does the composition
551 	 * in two separate steps.
552 	 *
553 	 * The first one is assigning a layer to one of its two
554 	 * pipes. If more that 1 layer is assigned to the same pipe,
555 	 * and if pixels overlaps, the pipe will take the pixel from
556 	 * the layer with the highest priority.
557 	 *
558 	 * The second step is the actual alpha blending, that takes
559 	 * the two pipes as input, and uses the potential alpha
560 	 * component to do the transparency between the two.
561 	 *
562 	 * This two-step scenario makes us unable to guarantee a
563 	 * robust alpha blending between the 4 layers in all
564 	 * situations, since this means that we need to have one layer
565 	 * with alpha at the lowest position of our two pipes.
566 	 *
567 	 * However, we cannot even do that on every platform, since
568 	 * the hardware has a bug where the lowest plane of the lowest
569 	 * pipe (pipe 0, priority 0), if it has any alpha, will
570 	 * discard the pixel data entirely and just display the pixels
571 	 * in the background color (black by default).
572 	 *
573 	 * This means that on the affected platforms, we effectively
574 	 * have only three valid configurations with alpha, all of
575 	 * them with the alpha being on pipe1 with the lowest
576 	 * position, which can be 1, 2 or 3 depending on the number of
577 	 * planes and their zpos.
578 	 */
579 
580 	/* For platforms that are not affected by the issue described above. */
581 	if (backend->quirks->supports_lowest_plane_alpha)
582 		num_alpha_planes_max++;
583 
584 	if (num_alpha_planes > num_alpha_planes_max) {
585 		DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
586 		return -EINVAL;
587 	}
588 
589 	/* We can't have an alpha plane at the lowest position */
590 	if (!backend->quirks->supports_lowest_plane_alpha &&
591 	    (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
592 		return -EINVAL;
593 
594 	for (i = 1; i < num_planes; i++) {
595 		struct drm_plane_state *p_state = plane_states[i];
596 		struct drm_framebuffer *fb = p_state->fb;
597 		struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
598 
599 		/*
600 		 * The only alpha position is the lowest plane of the
601 		 * second pipe.
602 		 */
603 		if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
604 			current_pipe++;
605 
606 		s_state->pipe = current_pipe;
607 	}
608 
609 	/* We can only have a single YUV plane at a time */
610 	if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
611 		DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
612 		return -EINVAL;
613 	}
614 
615 	if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
616 		DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
617 		return -EINVAL;
618 	}
619 
620 	DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
621 			 num_planes, num_alpha_planes, num_frontend_planes,
622 			 num_yuv_planes);
623 
624 	return 0;
625 }
626 
627 static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
628 {
629 	struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
630 	struct sun4i_frontend *frontend = backend->frontend;
631 
632 	if (!frontend)
633 		return;
634 
635 	/*
636 	 * In a teardown scenario with the frontend involved, we have
637 	 * to keep the frontend enabled until the next vblank, and
638 	 * only then disable it.
639 	 *
640 	 * This is due to the fact that the backend will not take into
641 	 * account the new configuration (with the plane that used to
642 	 * be fed by the frontend now disabled) until we write to the
643 	 * commit bit and the hardware fetches the new configuration
644 	 * during the next vblank.
645 	 *
646 	 * So we keep the frontend around in order to prevent any
647 	 * visual artifacts.
648 	 */
649 	spin_lock(&backend->frontend_lock);
650 	if (backend->frontend_teardown) {
651 		sun4i_frontend_exit(frontend);
652 		backend->frontend_teardown = false;
653 	}
654 	spin_unlock(&backend->frontend_lock);
655 };
656 
657 static int sun4i_backend_init_sat(struct device *dev) {
658 	struct sun4i_backend *backend = dev_get_drvdata(dev);
659 	int ret;
660 
661 	backend->sat_reset = devm_reset_control_get(dev, "sat");
662 	if (IS_ERR(backend->sat_reset)) {
663 		dev_err(dev, "Couldn't get the SAT reset line\n");
664 		return PTR_ERR(backend->sat_reset);
665 	}
666 
667 	ret = reset_control_deassert(backend->sat_reset);
668 	if (ret) {
669 		dev_err(dev, "Couldn't deassert the SAT reset line\n");
670 		return ret;
671 	}
672 
673 	backend->sat_clk = devm_clk_get(dev, "sat");
674 	if (IS_ERR(backend->sat_clk)) {
675 		dev_err(dev, "Couldn't get our SAT clock\n");
676 		ret = PTR_ERR(backend->sat_clk);
677 		goto err_assert_reset;
678 	}
679 
680 	ret = clk_prepare_enable(backend->sat_clk);
681 	if (ret) {
682 		dev_err(dev, "Couldn't enable the SAT clock\n");
683 		return ret;
684 	}
685 
686 	return 0;
687 
688 err_assert_reset:
689 	reset_control_assert(backend->sat_reset);
690 	return ret;
691 }
692 
693 static int sun4i_backend_free_sat(struct device *dev) {
694 	struct sun4i_backend *backend = dev_get_drvdata(dev);
695 
696 	clk_disable_unprepare(backend->sat_clk);
697 	reset_control_assert(backend->sat_reset);
698 
699 	return 0;
700 }
701 
702 /*
703  * The display backend can take video output from the display frontend, or
704  * the display enhancement unit on the A80, as input for one it its layers.
705  * This relationship within the display pipeline is encoded in the device
706  * tree with of_graph, and we use it here to figure out which backend, if
707  * there are 2 or more, we are currently probing. The number would be in
708  * the "reg" property of the upstream output port endpoint.
709  */
710 static int sun4i_backend_of_get_id(struct device_node *node)
711 {
712 	struct device_node *ep, *remote;
713 	struct of_endpoint of_ep;
714 
715 	/* Input port is 0, and we want the first endpoint. */
716 	ep = of_graph_get_endpoint_by_regs(node, 0, -1);
717 	if (!ep)
718 		return -EINVAL;
719 
720 	remote = of_graph_get_remote_endpoint(ep);
721 	of_node_put(ep);
722 	if (!remote)
723 		return -EINVAL;
724 
725 	of_graph_parse_endpoint(remote, &of_ep);
726 	of_node_put(remote);
727 	return of_ep.id;
728 }
729 
730 /* TODO: This needs to take multiple pipelines into account */
731 static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
732 							  struct device_node *node)
733 {
734 	struct device_node *port, *ep, *remote;
735 	struct sun4i_frontend *frontend;
736 
737 	port = of_graph_get_port_by_id(node, 0);
738 	if (!port)
739 		return ERR_PTR(-EINVAL);
740 
741 	for_each_available_child_of_node(port, ep) {
742 		remote = of_graph_get_remote_port_parent(ep);
743 		if (!remote)
744 			continue;
745 		of_node_put(remote);
746 
747 		/* does this node match any registered engines? */
748 		list_for_each_entry(frontend, &drv->frontend_list, list) {
749 			if (remote == frontend->node) {
750 				of_node_put(port);
751 				of_node_put(ep);
752 				return frontend;
753 			}
754 		}
755 	}
756 	of_node_put(port);
757 	return ERR_PTR(-EINVAL);
758 }
759 
760 static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
761 	.atomic_begin			= sun4i_backend_atomic_begin,
762 	.atomic_check			= sun4i_backend_atomic_check,
763 	.commit				= sun4i_backend_commit,
764 	.layers_init			= sun4i_layers_init,
765 	.apply_color_correction		= sun4i_backend_apply_color_correction,
766 	.disable_color_correction	= sun4i_backend_disable_color_correction,
767 	.vblank_quirk			= sun4i_backend_vblank_quirk,
768 };
769 
770 static const struct regmap_config sun4i_backend_regmap_config = {
771 	.reg_bits	= 32,
772 	.val_bits	= 32,
773 	.reg_stride	= 4,
774 	.max_register	= 0x5800,
775 };
776 
777 static int sun4i_backend_bind(struct device *dev, struct device *master,
778 			      void *data)
779 {
780 	struct platform_device *pdev = to_platform_device(dev);
781 	struct drm_device *drm = data;
782 	struct sun4i_drv *drv = drm->dev_private;
783 	struct sun4i_backend *backend;
784 	const struct sun4i_backend_quirks *quirks;
785 	struct resource *res;
786 	void __iomem *regs;
787 	int i, ret;
788 
789 	backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
790 	if (!backend)
791 		return -ENOMEM;
792 	dev_set_drvdata(dev, backend);
793 	spin_lock_init(&backend->frontend_lock);
794 
795 	if (of_find_property(dev->of_node, "interconnects", NULL)) {
796 		/*
797 		 * This assume we have the same DMA constraints for all our the
798 		 * devices in our pipeline (all the backends, but also the
799 		 * frontends). This sounds bad, but it has always been the case
800 		 * for us, and DRM doesn't do per-device allocation either, so
801 		 * we would need to fix DRM first...
802 		 */
803 		ret = of_dma_configure(drm->dev, dev->of_node, true);
804 		if (ret)
805 			return ret;
806 	}
807 
808 	backend->engine.node = dev->of_node;
809 	backend->engine.ops = &sun4i_backend_engine_ops;
810 	backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
811 	if (backend->engine.id < 0)
812 		return backend->engine.id;
813 
814 	backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
815 	if (IS_ERR(backend->frontend))
816 		dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
817 
818 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
819 	regs = devm_ioremap_resource(dev, res);
820 	if (IS_ERR(regs))
821 		return PTR_ERR(regs);
822 
823 	backend->reset = devm_reset_control_get(dev, NULL);
824 	if (IS_ERR(backend->reset)) {
825 		dev_err(dev, "Couldn't get our reset line\n");
826 		return PTR_ERR(backend->reset);
827 	}
828 
829 	ret = reset_control_deassert(backend->reset);
830 	if (ret) {
831 		dev_err(dev, "Couldn't deassert our reset line\n");
832 		return ret;
833 	}
834 
835 	backend->bus_clk = devm_clk_get(dev, "ahb");
836 	if (IS_ERR(backend->bus_clk)) {
837 		dev_err(dev, "Couldn't get the backend bus clock\n");
838 		ret = PTR_ERR(backend->bus_clk);
839 		goto err_assert_reset;
840 	}
841 	clk_prepare_enable(backend->bus_clk);
842 
843 	backend->mod_clk = devm_clk_get(dev, "mod");
844 	if (IS_ERR(backend->mod_clk)) {
845 		dev_err(dev, "Couldn't get the backend module clock\n");
846 		ret = PTR_ERR(backend->mod_clk);
847 		goto err_disable_bus_clk;
848 	}
849 
850 	ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
851 	if (ret) {
852 		dev_err(dev, "Couldn't set the module clock frequency\n");
853 		goto err_disable_bus_clk;
854 	}
855 
856 	clk_prepare_enable(backend->mod_clk);
857 
858 	backend->ram_clk = devm_clk_get(dev, "ram");
859 	if (IS_ERR(backend->ram_clk)) {
860 		dev_err(dev, "Couldn't get the backend RAM clock\n");
861 		ret = PTR_ERR(backend->ram_clk);
862 		goto err_disable_mod_clk;
863 	}
864 	clk_prepare_enable(backend->ram_clk);
865 
866 	if (of_device_is_compatible(dev->of_node,
867 				    "allwinner,sun8i-a33-display-backend")) {
868 		ret = sun4i_backend_init_sat(dev);
869 		if (ret) {
870 			dev_err(dev, "Couldn't init SAT resources\n");
871 			goto err_disable_ram_clk;
872 		}
873 	}
874 
875 	backend->engine.regs = devm_regmap_init_mmio(dev, regs,
876 						     &sun4i_backend_regmap_config);
877 	if (IS_ERR(backend->engine.regs)) {
878 		dev_err(dev, "Couldn't create the backend regmap\n");
879 		return PTR_ERR(backend->engine.regs);
880 	}
881 
882 	list_add_tail(&backend->engine.list, &drv->engine_list);
883 
884 	/*
885 	 * Many of the backend's layer configuration registers have
886 	 * undefined default values. This poses a risk as we use
887 	 * regmap_update_bits in some places, and don't overwrite
888 	 * the whole register.
889 	 *
890 	 * Clear the registers here to have something predictable.
891 	 */
892 	for (i = 0x800; i < 0x1000; i += 4)
893 		regmap_write(backend->engine.regs, i, 0);
894 
895 	/* Disable registers autoloading */
896 	regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
897 		     SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
898 
899 	/* Enable the backend */
900 	regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
901 		     SUN4I_BACKEND_MODCTL_DEBE_EN |
902 		     SUN4I_BACKEND_MODCTL_START_CTL);
903 
904 	/* Set output selection if needed */
905 	quirks = of_device_get_match_data(dev);
906 	if (quirks->needs_output_muxing) {
907 		/*
908 		 * We assume there is no dynamic muxing of backends
909 		 * and TCONs, so we select the backend with same ID.
910 		 *
911 		 * While dynamic selection might be interesting, since
912 		 * the CRTC is tied to the TCON, while the layers are
913 		 * tied to the backends, this means, we will need to
914 		 * switch between groups of layers. There might not be
915 		 * a way to represent this constraint in DRM.
916 		 */
917 		regmap_update_bits(backend->engine.regs,
918 				   SUN4I_BACKEND_MODCTL_REG,
919 				   SUN4I_BACKEND_MODCTL_OUT_SEL,
920 				   (backend->engine.id
921 				    ? SUN4I_BACKEND_MODCTL_OUT_LCD1
922 				    : SUN4I_BACKEND_MODCTL_OUT_LCD0));
923 	}
924 
925 	backend->quirks = quirks;
926 
927 	return 0;
928 
929 err_disable_ram_clk:
930 	clk_disable_unprepare(backend->ram_clk);
931 err_disable_mod_clk:
932 	clk_rate_exclusive_put(backend->mod_clk);
933 	clk_disable_unprepare(backend->mod_clk);
934 err_disable_bus_clk:
935 	clk_disable_unprepare(backend->bus_clk);
936 err_assert_reset:
937 	reset_control_assert(backend->reset);
938 	return ret;
939 }
940 
941 static void sun4i_backend_unbind(struct device *dev, struct device *master,
942 				 void *data)
943 {
944 	struct sun4i_backend *backend = dev_get_drvdata(dev);
945 
946 	list_del(&backend->engine.list);
947 
948 	if (of_device_is_compatible(dev->of_node,
949 				    "allwinner,sun8i-a33-display-backend"))
950 		sun4i_backend_free_sat(dev);
951 
952 	clk_disable_unprepare(backend->ram_clk);
953 	clk_rate_exclusive_put(backend->mod_clk);
954 	clk_disable_unprepare(backend->mod_clk);
955 	clk_disable_unprepare(backend->bus_clk);
956 	reset_control_assert(backend->reset);
957 }
958 
959 static const struct component_ops sun4i_backend_ops = {
960 	.bind	= sun4i_backend_bind,
961 	.unbind	= sun4i_backend_unbind,
962 };
963 
964 static int sun4i_backend_probe(struct platform_device *pdev)
965 {
966 	return component_add(&pdev->dev, &sun4i_backend_ops);
967 }
968 
969 static int sun4i_backend_remove(struct platform_device *pdev)
970 {
971 	component_del(&pdev->dev, &sun4i_backend_ops);
972 
973 	return 0;
974 }
975 
976 static const struct sun4i_backend_quirks sun4i_backend_quirks = {
977 	.needs_output_muxing = true,
978 };
979 
980 static const struct sun4i_backend_quirks sun5i_backend_quirks = {
981 };
982 
983 static const struct sun4i_backend_quirks sun6i_backend_quirks = {
984 };
985 
986 static const struct sun4i_backend_quirks sun7i_backend_quirks = {
987 	.needs_output_muxing = true,
988 };
989 
990 static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
991 	.supports_lowest_plane_alpha = true,
992 };
993 
994 static const struct sun4i_backend_quirks sun9i_backend_quirks = {
995 };
996 
997 static const struct of_device_id sun4i_backend_of_table[] = {
998 	{
999 		.compatible = "allwinner,sun4i-a10-display-backend",
1000 		.data = &sun4i_backend_quirks,
1001 	},
1002 	{
1003 		.compatible = "allwinner,sun5i-a13-display-backend",
1004 		.data = &sun5i_backend_quirks,
1005 	},
1006 	{
1007 		.compatible = "allwinner,sun6i-a31-display-backend",
1008 		.data = &sun6i_backend_quirks,
1009 	},
1010 	{
1011 		.compatible = "allwinner,sun7i-a20-display-backend",
1012 		.data = &sun7i_backend_quirks,
1013 	},
1014 	{
1015 		.compatible = "allwinner,sun8i-a23-display-backend",
1016 		.data = &sun8i_a33_backend_quirks,
1017 	},
1018 	{
1019 		.compatible = "allwinner,sun8i-a33-display-backend",
1020 		.data = &sun8i_a33_backend_quirks,
1021 	},
1022 	{
1023 		.compatible = "allwinner,sun9i-a80-display-backend",
1024 		.data = &sun9i_backend_quirks,
1025 	},
1026 	{ }
1027 };
1028 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1029 
1030 static struct platform_driver sun4i_backend_platform_driver = {
1031 	.probe		= sun4i_backend_probe,
1032 	.remove		= sun4i_backend_remove,
1033 	.driver		= {
1034 		.name		= "sun4i-backend",
1035 		.of_match_table	= sun4i_backend_of_table,
1036 	},
1037 };
1038 module_platform_driver(sun4i_backend_platform_driver);
1039 
1040 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1041 MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1042 MODULE_LICENSE("GPL");
1043