1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #include <drm/drm_atomic.h>
9 #include <drm/drm_damage_helper.h>
10 #include <drm/drm_fourcc.h>
11 #include <drm/drm_print.h>
12 
13 #include "mdp5_kms.h"
14 
15 struct mdp5_plane {
16 	struct drm_plane base;
17 
18 	uint32_t nformats;
19 	uint32_t formats[32];
20 };
21 #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
22 
23 static int mdp5_plane_mode_set(struct drm_plane *plane,
24 		struct drm_crtc *crtc, struct drm_framebuffer *fb,
25 		struct drm_rect *src, struct drm_rect *dest);
26 
27 static struct mdp5_kms *get_kms(struct drm_plane *plane)
28 {
29 	struct msm_drm_private *priv = plane->dev->dev_private;
30 	return to_mdp5_kms(to_mdp_kms(priv->kms));
31 }
32 
33 static bool plane_enabled(struct drm_plane_state *state)
34 {
35 	return state->visible;
36 }
37 
38 static void mdp5_plane_destroy(struct drm_plane *plane)
39 {
40 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
41 
42 	drm_plane_cleanup(plane);
43 
44 	kfree(mdp5_plane);
45 }
46 
47 /* helper to install properties which are common to planes and crtcs */
48 static void mdp5_plane_install_properties(struct drm_plane *plane,
49 		struct drm_mode_object *obj)
50 {
51 	drm_plane_create_rotation_property(plane,
52 					   DRM_MODE_ROTATE_0,
53 					   DRM_MODE_ROTATE_0 |
54 					   DRM_MODE_ROTATE_180 |
55 					   DRM_MODE_REFLECT_X |
56 					   DRM_MODE_REFLECT_Y);
57 	drm_plane_create_alpha_property(plane);
58 	drm_plane_create_blend_mode_property(plane,
59 			BIT(DRM_MODE_BLEND_PIXEL_NONE) |
60 			BIT(DRM_MODE_BLEND_PREMULTI) |
61 			BIT(DRM_MODE_BLEND_COVERAGE));
62 	drm_plane_create_zpos_property(plane, 1, 1, 255);
63 }
64 
65 static void
66 mdp5_plane_atomic_print_state(struct drm_printer *p,
67 		const struct drm_plane_state *state)
68 {
69 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
70 	struct mdp5_kms *mdp5_kms = get_kms(state->plane);
71 
72 	drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
73 			pstate->hwpipe->name : "(null)");
74 	if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
75 		drm_printf(p, "\tright-hwpipe=%s\n",
76 			   pstate->r_hwpipe ? pstate->r_hwpipe->name :
77 					      "(null)");
78 	drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
79 	drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
80 	drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
81 	drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
82 	drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
83 }
84 
85 static void mdp5_plane_reset(struct drm_plane *plane)
86 {
87 	struct mdp5_plane_state *mdp5_state;
88 
89 	if (plane->state)
90 		__drm_atomic_helper_plane_destroy_state(plane->state);
91 
92 	kfree(to_mdp5_plane_state(plane->state));
93 	mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
94 
95 	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
96 		mdp5_state->base.zpos = STAGE_BASE;
97 	else
98 		mdp5_state->base.zpos = STAGE0 + drm_plane_index(plane);
99 	mdp5_state->base.normalized_zpos = mdp5_state->base.zpos;
100 
101 	__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
102 }
103 
104 static struct drm_plane_state *
105 mdp5_plane_duplicate_state(struct drm_plane *plane)
106 {
107 	struct mdp5_plane_state *mdp5_state;
108 
109 	if (WARN_ON(!plane->state))
110 		return NULL;
111 
112 	mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
113 			sizeof(*mdp5_state), GFP_KERNEL);
114 	if (!mdp5_state)
115 		return NULL;
116 
117 	__drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
118 
119 	return &mdp5_state->base;
120 }
121 
122 static void mdp5_plane_destroy_state(struct drm_plane *plane,
123 		struct drm_plane_state *state)
124 {
125 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
126 
127 	if (state->fb)
128 		drm_framebuffer_put(state->fb);
129 
130 	kfree(pstate);
131 }
132 
133 static const struct drm_plane_funcs mdp5_plane_funcs = {
134 		.update_plane = drm_atomic_helper_update_plane,
135 		.disable_plane = drm_atomic_helper_disable_plane,
136 		.destroy = mdp5_plane_destroy,
137 		.reset = mdp5_plane_reset,
138 		.atomic_duplicate_state = mdp5_plane_duplicate_state,
139 		.atomic_destroy_state = mdp5_plane_destroy_state,
140 		.atomic_print_state = mdp5_plane_atomic_print_state,
141 };
142 
143 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
144 				  struct drm_plane_state *old_state)
145 {
146 	struct mdp5_kms *mdp5_kms = get_kms(plane);
147 	struct msm_kms *kms = &mdp5_kms->base.base;
148 	struct drm_framebuffer *fb = old_state->fb;
149 
150 	if (!fb)
151 		return;
152 
153 	DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
154 	msm_framebuffer_cleanup(fb, kms->aspace);
155 }
156 
157 static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
158 					      struct drm_plane_state *state)
159 {
160 	struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
161 	struct drm_plane *plane = state->plane;
162 	struct drm_plane_state *old_state = plane->state;
163 	struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
164 	bool new_hwpipe = false;
165 	bool need_right_hwpipe = false;
166 	uint32_t max_width, max_height;
167 	bool out_of_bounds = false;
168 	uint32_t caps = 0;
169 	int min_scale, max_scale;
170 	int ret;
171 
172 	DBG("%s: check (%d -> %d)", plane->name,
173 			plane_enabled(old_state), plane_enabled(state));
174 
175 	max_width = config->hw->lm.max_width << 16;
176 	max_height = config->hw->lm.max_height << 16;
177 
178 	/* Make sure source dimensions are within bounds. */
179 	if (state->src_h > max_height)
180 		out_of_bounds = true;
181 
182 	if (state->src_w > max_width) {
183 		/* If source split is supported, we can go up to 2x
184 		 * the max LM width, but we'd need to stage another
185 		 * hwpipe to the right LM. So, the drm_plane would
186 		 * consist of 2 hwpipes.
187 		 */
188 		if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
189 		    (state->src_w <= 2 * max_width))
190 			need_right_hwpipe = true;
191 		else
192 			out_of_bounds = true;
193 	}
194 
195 	if (out_of_bounds) {
196 		struct drm_rect src = drm_plane_state_src(state);
197 		DBG("Invalid source size "DRM_RECT_FP_FMT,
198 				DRM_RECT_FP_ARG(&src));
199 		return -ERANGE;
200 	}
201 
202 	min_scale = FRAC_16_16(1, 8);
203 	max_scale = FRAC_16_16(8, 1);
204 
205 	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
206 						  min_scale, max_scale,
207 						  true, true);
208 	if (ret)
209 		return ret;
210 
211 	if (plane_enabled(state)) {
212 		unsigned int rotation;
213 		const struct mdp_format *format;
214 		struct mdp5_kms *mdp5_kms = get_kms(plane);
215 		uint32_t blkcfg = 0;
216 
217 		format = to_mdp_format(msm_framebuffer_format(state->fb));
218 		if (MDP_FORMAT_IS_YUV(format))
219 			caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
220 
221 		if (((state->src_w >> 16) != state->crtc_w) ||
222 				((state->src_h >> 16) != state->crtc_h))
223 			caps |= MDP_PIPE_CAP_SCALE;
224 
225 		rotation = drm_rotation_simplify(state->rotation,
226 						 DRM_MODE_ROTATE_0 |
227 						 DRM_MODE_REFLECT_X |
228 						 DRM_MODE_REFLECT_Y);
229 
230 		if (rotation & DRM_MODE_REFLECT_X)
231 			caps |= MDP_PIPE_CAP_HFLIP;
232 
233 		if (rotation & DRM_MODE_REFLECT_Y)
234 			caps |= MDP_PIPE_CAP_VFLIP;
235 
236 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
237 			caps |= MDP_PIPE_CAP_CURSOR;
238 
239 		/* (re)allocate hw pipe if we don't have one or caps-mismatch: */
240 		if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
241 			new_hwpipe = true;
242 
243 		/*
244 		 * (re)allocte hw pipe if we're either requesting for 2 hw pipes
245 		 * or we're switching from 2 hw pipes to 1 hw pipe because the
246 		 * new src_w can be supported by 1 hw pipe itself.
247 		 */
248 		if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
249 		    (!need_right_hwpipe && mdp5_state->r_hwpipe))
250 			new_hwpipe = true;
251 
252 		if (mdp5_kms->smp) {
253 			const struct mdp_format *format =
254 				to_mdp_format(msm_framebuffer_format(state->fb));
255 
256 			blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
257 					state->src_w >> 16, false);
258 
259 			if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
260 				new_hwpipe = true;
261 		}
262 
263 		/* (re)assign hwpipe if needed, otherwise keep old one: */
264 		if (new_hwpipe) {
265 			/* TODO maybe we want to re-assign hwpipe sometimes
266 			 * in cases when we no-longer need some caps to make
267 			 * it available for other planes?
268 			 */
269 			struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
270 			struct mdp5_hw_pipe *old_right_hwpipe =
271 							  mdp5_state->r_hwpipe;
272 			struct mdp5_hw_pipe *new_hwpipe = NULL;
273 			struct mdp5_hw_pipe *new_right_hwpipe = NULL;
274 
275 			ret = mdp5_pipe_assign(state->state, plane, caps,
276 					       blkcfg, &new_hwpipe,
277 					       need_right_hwpipe ?
278 					       &new_right_hwpipe : NULL);
279 			if (ret) {
280 				DBG("%s: failed to assign hwpipe(s)!",
281 				    plane->name);
282 				return ret;
283 			}
284 
285 			mdp5_state->hwpipe = new_hwpipe;
286 			if (need_right_hwpipe)
287 				mdp5_state->r_hwpipe = new_right_hwpipe;
288 			else
289 				/*
290 				 * set it to NULL so that the driver knows we
291 				 * don't have a right hwpipe when committing a
292 				 * new state
293 				 */
294 				mdp5_state->r_hwpipe = NULL;
295 
296 
297 			mdp5_pipe_release(state->state, old_hwpipe);
298 			mdp5_pipe_release(state->state, old_right_hwpipe);
299 		}
300 	} else {
301 		mdp5_pipe_release(state->state, mdp5_state->hwpipe);
302 		mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
303 		mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
304 	}
305 
306 	return 0;
307 }
308 
309 static int mdp5_plane_atomic_check(struct drm_plane *plane,
310 				   struct drm_atomic_state *state)
311 {
312 	struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
313 										 plane);
314 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
315 										 plane);
316 	struct drm_crtc *crtc;
317 	struct drm_crtc_state *crtc_state;
318 
319 	crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
320 	if (!crtc)
321 		return 0;
322 
323 	crtc_state = drm_atomic_get_existing_crtc_state(state,
324 							crtc);
325 	if (WARN_ON(!crtc_state))
326 		return -EINVAL;
327 
328 	return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
329 }
330 
331 static void mdp5_plane_atomic_update(struct drm_plane *plane,
332 				     struct drm_atomic_state *state)
333 {
334 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
335 									   plane);
336 
337 	DBG("%s: update", plane->name);
338 
339 	if (plane_enabled(new_state)) {
340 		int ret;
341 
342 		ret = mdp5_plane_mode_set(plane,
343 				new_state->crtc, new_state->fb,
344 				&new_state->src, &new_state->dst);
345 		/* atomic_check should have ensured that this doesn't fail */
346 		WARN_ON(ret < 0);
347 	}
348 }
349 
350 static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
351 					 struct drm_atomic_state *state)
352 {
353 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
354 										 plane);
355 	struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
356 	struct drm_crtc_state *crtc_state;
357 	int min_scale, max_scale;
358 	int ret;
359 
360 	crtc_state = drm_atomic_get_existing_crtc_state(state,
361 							new_plane_state->crtc);
362 	if (WARN_ON(!crtc_state))
363 		return -EINVAL;
364 
365 	if (!crtc_state->active)
366 		return -EINVAL;
367 
368 	mdp5_state = to_mdp5_plane_state(new_plane_state);
369 
370 	/* don't use fast path if we don't have a hwpipe allocated yet */
371 	if (!mdp5_state->hwpipe)
372 		return -EINVAL;
373 
374 	/* only allow changing of position(crtc x/y or src x/y) in fast path */
375 	if (plane->state->crtc != new_plane_state->crtc ||
376 	    plane->state->src_w != new_plane_state->src_w ||
377 	    plane->state->src_h != new_plane_state->src_h ||
378 	    plane->state->crtc_w != new_plane_state->crtc_w ||
379 	    plane->state->crtc_h != new_plane_state->crtc_h ||
380 	    !plane->state->fb ||
381 	    plane->state->fb != new_plane_state->fb)
382 		return -EINVAL;
383 
384 	min_scale = FRAC_16_16(1, 8);
385 	max_scale = FRAC_16_16(8, 1);
386 
387 	ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
388 						  min_scale, max_scale,
389 						  true, true);
390 	if (ret)
391 		return ret;
392 
393 	/*
394 	 * if the visibility of the plane changes (i.e, if the cursor is
395 	 * clipped out completely, we can't take the async path because
396 	 * we need to stage/unstage the plane from the Layer Mixer(s). We
397 	 * also assign/unassign the hwpipe(s) tied to the plane. We avoid
398 	 * taking the fast path for both these reasons.
399 	 */
400 	if (new_plane_state->visible != plane->state->visible)
401 		return -EINVAL;
402 
403 	return 0;
404 }
405 
406 static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
407 					   struct drm_atomic_state *state)
408 {
409 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
410 									   plane);
411 	struct drm_framebuffer *old_fb = plane->state->fb;
412 
413 	plane->state->src_x = new_state->src_x;
414 	plane->state->src_y = new_state->src_y;
415 	plane->state->crtc_x = new_state->crtc_x;
416 	plane->state->crtc_y = new_state->crtc_y;
417 
418 	if (plane_enabled(new_state)) {
419 		struct mdp5_ctl *ctl;
420 		struct mdp5_pipeline *pipeline =
421 					mdp5_crtc_get_pipeline(new_state->crtc);
422 		int ret;
423 
424 		ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
425 				&new_state->src, &new_state->dst);
426 		WARN_ON(ret < 0);
427 
428 		ctl = mdp5_crtc_get_ctl(new_state->crtc);
429 
430 		mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
431 	}
432 
433 	*to_mdp5_plane_state(plane->state) =
434 		*to_mdp5_plane_state(new_state);
435 
436 	new_state->fb = old_fb;
437 }
438 
439 static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
440 		.prepare_fb = msm_atomic_prepare_fb,
441 		.cleanup_fb = mdp5_plane_cleanup_fb,
442 		.atomic_check = mdp5_plane_atomic_check,
443 		.atomic_update = mdp5_plane_atomic_update,
444 		.atomic_async_check = mdp5_plane_atomic_async_check,
445 		.atomic_async_update = mdp5_plane_atomic_async_update,
446 };
447 
448 static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
449 			       enum mdp5_pipe pipe,
450 			       struct drm_framebuffer *fb)
451 {
452 	struct msm_kms *kms = &mdp5_kms->base.base;
453 
454 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
455 			MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
456 			MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
457 
458 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
459 			MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
460 			MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
461 
462 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
463 			msm_framebuffer_iova(fb, kms->aspace, 0));
464 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
465 			msm_framebuffer_iova(fb, kms->aspace, 1));
466 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
467 			msm_framebuffer_iova(fb, kms->aspace, 2));
468 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
469 			msm_framebuffer_iova(fb, kms->aspace, 3));
470 }
471 
472 /* Note: mdp5_plane->pipe_lock must be locked */
473 static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
474 {
475 	uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
476 			 ~MDP5_PIPE_OP_MODE_CSC_1_EN;
477 
478 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
479 }
480 
481 /* Note: mdp5_plane->pipe_lock must be locked */
482 static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
483 		struct csc_cfg *csc)
484 {
485 	uint32_t  i, mode = 0; /* RGB, no CSC */
486 	uint32_t *matrix;
487 
488 	if (unlikely(!csc))
489 		return;
490 
491 	if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
492 		mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
493 	if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
494 		mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
495 	mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
496 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
497 
498 	matrix = csc->matrix;
499 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
500 			MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
501 			MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
502 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
503 			MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
504 			MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
505 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
506 			MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
507 			MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
508 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
509 			MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
510 			MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
511 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
512 			MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
513 
514 	for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
515 		uint32_t *pre_clamp = csc->pre_clamp;
516 		uint32_t *post_clamp = csc->post_clamp;
517 
518 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
519 			MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
520 			MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
521 
522 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
523 			MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
524 			MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
525 
526 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
527 			MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
528 
529 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
530 			MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
531 	}
532 }
533 
534 #define PHASE_STEP_SHIFT	21
535 #define DOWN_SCALE_RATIO_MAX	32	/* 2^(26-21) */
536 
537 static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
538 {
539 	uint32_t unit;
540 
541 	if (src == 0 || dst == 0)
542 		return -EINVAL;
543 
544 	/*
545 	 * PHASE_STEP_X/Y is coded on 26 bits (25:0),
546 	 * where 2^21 represents the unity "1" in fixed-point hardware design.
547 	 * This leaves 5 bits for the integer part (downscale case):
548 	 *	-> maximum downscale ratio = 0b1_1111 = 31
549 	 */
550 	if (src > (dst * DOWN_SCALE_RATIO_MAX))
551 		return -EOVERFLOW;
552 
553 	unit = 1 << PHASE_STEP_SHIFT;
554 	*out_phase = mult_frac(unit, src, dst);
555 
556 	return 0;
557 }
558 
559 static int calc_scalex_steps(struct drm_plane *plane,
560 		uint32_t pixel_format, uint32_t src, uint32_t dest,
561 		uint32_t phasex_steps[COMP_MAX])
562 {
563 	const struct drm_format_info *info = drm_format_info(pixel_format);
564 	struct mdp5_kms *mdp5_kms = get_kms(plane);
565 	struct device *dev = mdp5_kms->dev->dev;
566 	uint32_t phasex_step;
567 	int ret;
568 
569 	ret = calc_phase_step(src, dest, &phasex_step);
570 	if (ret) {
571 		DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
572 		return ret;
573 	}
574 
575 	phasex_steps[COMP_0]   = phasex_step;
576 	phasex_steps[COMP_3]   = phasex_step;
577 	phasex_steps[COMP_1_2] = phasex_step / info->hsub;
578 
579 	return 0;
580 }
581 
582 static int calc_scaley_steps(struct drm_plane *plane,
583 		uint32_t pixel_format, uint32_t src, uint32_t dest,
584 		uint32_t phasey_steps[COMP_MAX])
585 {
586 	const struct drm_format_info *info = drm_format_info(pixel_format);
587 	struct mdp5_kms *mdp5_kms = get_kms(plane);
588 	struct device *dev = mdp5_kms->dev->dev;
589 	uint32_t phasey_step;
590 	int ret;
591 
592 	ret = calc_phase_step(src, dest, &phasey_step);
593 	if (ret) {
594 		DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
595 		return ret;
596 	}
597 
598 	phasey_steps[COMP_0]   = phasey_step;
599 	phasey_steps[COMP_3]   = phasey_step;
600 	phasey_steps[COMP_1_2] = phasey_step / info->vsub;
601 
602 	return 0;
603 }
604 
605 static uint32_t get_scale_config(const struct mdp_format *format,
606 		uint32_t src, uint32_t dst, bool horz)
607 {
608 	const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
609 	bool scaling = format->is_yuv ? true : (src != dst);
610 	uint32_t sub;
611 	uint32_t ya_filter, uv_filter;
612 	bool yuv = format->is_yuv;
613 
614 	if (!scaling)
615 		return 0;
616 
617 	if (yuv) {
618 		sub = horz ? info->hsub : info->vsub;
619 		uv_filter = ((src / sub) <= dst) ?
620 				   SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
621 	}
622 	ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
623 
624 	if (horz)
625 		return  MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
626 			MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
627 			MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
628 			COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
629 	else
630 		return  MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
631 			MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
632 			MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
633 			COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
634 }
635 
636 static void calc_pixel_ext(const struct mdp_format *format,
637 		uint32_t src, uint32_t dst, uint32_t phase_step[2],
638 		int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
639 		bool horz)
640 {
641 	bool scaling = format->is_yuv ? true : (src != dst);
642 	int i;
643 
644 	/*
645 	 * Note:
646 	 * We assume here that:
647 	 *     1. PCMN filter is used for downscale
648 	 *     2. bilinear filter is used for upscale
649 	 *     3. we are in a single pipe configuration
650 	 */
651 
652 	for (i = 0; i < COMP_MAX; i++) {
653 		pix_ext_edge1[i] = 0;
654 		pix_ext_edge2[i] = scaling ? 1 : 0;
655 	}
656 }
657 
658 static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
659 	const struct mdp_format *format,
660 	uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
661 	uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
662 {
663 	const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
664 	uint32_t lr, tb, req;
665 	int i;
666 
667 	for (i = 0; i < COMP_MAX; i++) {
668 		uint32_t roi_w = src_w;
669 		uint32_t roi_h = src_h;
670 
671 		if (format->is_yuv && i == COMP_1_2) {
672 			roi_w /= info->hsub;
673 			roi_h /= info->vsub;
674 		}
675 
676 		lr  = (pe_left[i] >= 0) ?
677 			MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
678 			MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
679 
680 		lr |= (pe_right[i] >= 0) ?
681 			MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
682 			MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
683 
684 		tb  = (pe_top[i] >= 0) ?
685 			MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
686 			MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
687 
688 		tb |= (pe_bottom[i] >= 0) ?
689 			MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
690 			MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
691 
692 		req  = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
693 				pe_left[i] + pe_right[i]);
694 
695 		req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
696 				pe_top[i] + pe_bottom[i]);
697 
698 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
699 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
700 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
701 
702 		DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
703 			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
704 			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
705 			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
706 			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
707 			FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
708 
709 		DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
710 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
711 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
712 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
713 			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
714 			FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
715 	}
716 }
717 
718 struct pixel_ext {
719 	int left[COMP_MAX];
720 	int right[COMP_MAX];
721 	int top[COMP_MAX];
722 	int bottom[COMP_MAX];
723 };
724 
725 struct phase_step {
726 	u32 x[COMP_MAX];
727 	u32 y[COMP_MAX];
728 };
729 
730 static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
731 				 struct mdp5_hw_pipe *hwpipe,
732 				 struct drm_framebuffer *fb,
733 				 struct phase_step *step,
734 				 struct pixel_ext *pe,
735 				 u32 scale_config, u32 hdecm, u32 vdecm,
736 				 bool hflip, bool vflip,
737 				 int crtc_x, int crtc_y,
738 				 unsigned int crtc_w, unsigned int crtc_h,
739 				 u32 src_img_w, u32 src_img_h,
740 				 u32 src_x, u32 src_y,
741 				 u32 src_w, u32 src_h)
742 {
743 	enum mdp5_pipe pipe = hwpipe->pipe;
744 	bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
745 	const struct mdp_format *format =
746 			to_mdp_format(msm_framebuffer_format(fb));
747 
748 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
749 			MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
750 			MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
751 
752 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
753 			MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
754 			MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
755 
756 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
757 			MDP5_PIPE_SRC_XY_X(src_x) |
758 			MDP5_PIPE_SRC_XY_Y(src_y));
759 
760 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
761 			MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
762 			MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
763 
764 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
765 			MDP5_PIPE_OUT_XY_X(crtc_x) |
766 			MDP5_PIPE_OUT_XY_Y(crtc_y));
767 
768 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
769 			MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
770 			MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
771 			MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
772 			MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
773 			COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
774 			MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
775 			MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
776 			COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
777 			MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
778 			MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
779 
780 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
781 			MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
782 			MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
783 			MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
784 			MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
785 
786 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
787 			(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
788 			(vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
789 			COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
790 			MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
791 
792 	/* not using secure mode: */
793 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
794 
795 	if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
796 		mdp5_write_pixel_ext(mdp5_kms, pipe, format,
797 				src_w, pe->left, pe->right,
798 				src_h, pe->top, pe->bottom);
799 
800 	if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
801 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
802 				step->x[COMP_0]);
803 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
804 				step->y[COMP_0]);
805 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
806 				step->x[COMP_1_2]);
807 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
808 				step->y[COMP_1_2]);
809 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
810 				MDP5_PIPE_DECIMATION_VERT(vdecm) |
811 				MDP5_PIPE_DECIMATION_HORZ(hdecm));
812 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
813 			   scale_config);
814 	}
815 
816 	if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
817 		if (MDP_FORMAT_IS_YUV(format))
818 			csc_enable(mdp5_kms, pipe,
819 					mdp_get_default_csc_cfg(CSC_YUV2RGB));
820 		else
821 			csc_disable(mdp5_kms, pipe);
822 	}
823 
824 	set_scanout_locked(mdp5_kms, pipe, fb);
825 }
826 
827 static int mdp5_plane_mode_set(struct drm_plane *plane,
828 		struct drm_crtc *crtc, struct drm_framebuffer *fb,
829 		struct drm_rect *src, struct drm_rect *dest)
830 {
831 	struct drm_plane_state *pstate = plane->state;
832 	struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
833 	struct mdp5_kms *mdp5_kms = get_kms(plane);
834 	enum mdp5_pipe pipe = hwpipe->pipe;
835 	struct mdp5_hw_pipe *right_hwpipe;
836 	const struct mdp_format *format;
837 	uint32_t nplanes, config = 0;
838 	struct phase_step step = { { 0 } };
839 	struct pixel_ext pe = { { 0 } };
840 	uint32_t hdecm = 0, vdecm = 0;
841 	uint32_t pix_format;
842 	unsigned int rotation;
843 	bool vflip, hflip;
844 	int crtc_x, crtc_y;
845 	unsigned int crtc_w, crtc_h;
846 	uint32_t src_x, src_y;
847 	uint32_t src_w, src_h;
848 	uint32_t src_img_w, src_img_h;
849 	int ret;
850 
851 	nplanes = fb->format->num_planes;
852 
853 	/* bad formats should already be rejected: */
854 	if (WARN_ON(nplanes > pipe2nclients(pipe)))
855 		return -EINVAL;
856 
857 	format = to_mdp_format(msm_framebuffer_format(fb));
858 	pix_format = format->base.pixel_format;
859 
860 	src_x = src->x1;
861 	src_y = src->y1;
862 	src_w = drm_rect_width(src);
863 	src_h = drm_rect_height(src);
864 
865 	crtc_x = dest->x1;
866 	crtc_y = dest->y1;
867 	crtc_w = drm_rect_width(dest);
868 	crtc_h = drm_rect_height(dest);
869 
870 	/* src values are in Q16 fixed point, convert to integer: */
871 	src_x = src_x >> 16;
872 	src_y = src_y >> 16;
873 	src_w = src_w >> 16;
874 	src_h = src_h >> 16;
875 
876 	src_img_w = min(fb->width, src_w);
877 	src_img_h = min(fb->height, src_h);
878 
879 	DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
880 			fb->base.id, src_x, src_y, src_w, src_h,
881 			crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
882 
883 	right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
884 	if (right_hwpipe) {
885 		/*
886 		 * if the plane comprises of 2 hw pipes, assume that the width
887 		 * is split equally across them. The only parameters that varies
888 		 * between the 2 pipes are src_x and crtc_x
889 		 */
890 		crtc_w /= 2;
891 		src_w /= 2;
892 		src_img_w /= 2;
893 	}
894 
895 	ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
896 	if (ret)
897 		return ret;
898 
899 	ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
900 	if (ret)
901 		return ret;
902 
903 	if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
904 		calc_pixel_ext(format, src_w, crtc_w, step.x,
905 			       pe.left, pe.right, true);
906 		calc_pixel_ext(format, src_h, crtc_h, step.y,
907 			       pe.top, pe.bottom, false);
908 	}
909 
910 	/* TODO calc hdecm, vdecm */
911 
912 	/* SCALE is used to both scale and up-sample chroma components */
913 	config |= get_scale_config(format, src_w, crtc_w, true);
914 	config |= get_scale_config(format, src_h, crtc_h, false);
915 	DBG("scale config = %x", config);
916 
917 	rotation = drm_rotation_simplify(pstate->rotation,
918 					 DRM_MODE_ROTATE_0 |
919 					 DRM_MODE_REFLECT_X |
920 					 DRM_MODE_REFLECT_Y);
921 	hflip = !!(rotation & DRM_MODE_REFLECT_X);
922 	vflip = !!(rotation & DRM_MODE_REFLECT_Y);
923 
924 	mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
925 			     config, hdecm, vdecm, hflip, vflip,
926 			     crtc_x, crtc_y, crtc_w, crtc_h,
927 			     src_img_w, src_img_h,
928 			     src_x, src_y, src_w, src_h);
929 	if (right_hwpipe)
930 		mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
931 				     config, hdecm, vdecm, hflip, vflip,
932 				     crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
933 				     src_img_w, src_img_h,
934 				     src_x + src_w, src_y, src_w, src_h);
935 
936 	return ret;
937 }
938 
939 /*
940  * Use this func and the one below only after the atomic state has been
941  * successfully swapped
942  */
943 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
944 {
945 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
946 
947 	if (WARN_ON(!pstate->hwpipe))
948 		return SSPP_NONE;
949 
950 	return pstate->hwpipe->pipe;
951 }
952 
953 enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
954 {
955 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
956 
957 	if (!pstate->r_hwpipe)
958 		return SSPP_NONE;
959 
960 	return pstate->r_hwpipe->pipe;
961 }
962 
963 uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
964 {
965 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
966 	u32 mask;
967 
968 	if (WARN_ON(!pstate->hwpipe))
969 		return 0;
970 
971 	mask = pstate->hwpipe->flush_mask;
972 
973 	if (pstate->r_hwpipe)
974 		mask |= pstate->r_hwpipe->flush_mask;
975 
976 	return mask;
977 }
978 
979 /* initialize plane */
980 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
981 				  enum drm_plane_type type)
982 {
983 	struct drm_plane *plane = NULL;
984 	struct mdp5_plane *mdp5_plane;
985 	int ret;
986 
987 	mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
988 	if (!mdp5_plane) {
989 		ret = -ENOMEM;
990 		goto fail;
991 	}
992 
993 	plane = &mdp5_plane->base;
994 
995 	mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
996 		ARRAY_SIZE(mdp5_plane->formats), false);
997 
998 	ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
999 			mdp5_plane->formats, mdp5_plane->nformats,
1000 			NULL, type, NULL);
1001 	if (ret)
1002 		goto fail;
1003 
1004 	drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
1005 
1006 	mdp5_plane_install_properties(plane, &plane->base);
1007 
1008 	drm_plane_enable_fb_damage_clips(plane);
1009 
1010 	return plane;
1011 
1012 fail:
1013 	if (plane)
1014 		mdp5_plane_destroy(plane);
1015 
1016 	return ERR_PTR(ret);
1017 }
1018