1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #include <linux/sort.h>
9 
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_mode.h>
12 #include <drm/drm_crtc.h>
13 #include <drm/drm_flip_work.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_probe_helper.h>
16 #include <drm/drm_vblank.h>
17 
18 #include "mdp5_kms.h"
19 
20 #define CURSOR_WIDTH	64
21 #define CURSOR_HEIGHT	64
22 
23 struct mdp5_crtc {
24 	struct drm_crtc base;
25 	int id;
26 	bool enabled;
27 
28 	spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
29 
30 	/* if there is a pending flip, these will be non-null: */
31 	struct drm_pending_vblank_event *event;
32 
33 	/* Bits have been flushed at the last commit,
34 	 * used to decide if a vsync has happened since last commit.
35 	 */
36 	u32 flushed_mask;
37 
38 #define PENDING_CURSOR 0x1
39 #define PENDING_FLIP   0x2
40 	atomic_t pending;
41 
42 	/* for unref'ing cursor bo's after scanout completes: */
43 	struct drm_flip_work unref_cursor_work;
44 
45 	struct mdp_irq vblank;
46 	struct mdp_irq err;
47 	struct mdp_irq pp_done;
48 
49 	struct completion pp_completion;
50 
51 	bool lm_cursor_enabled;
52 
53 	struct {
54 		/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
55 		spinlock_t lock;
56 
57 		/* current cursor being scanned out: */
58 		struct drm_gem_object *scanout_bo;
59 		uint64_t iova;
60 		uint32_t width, height;
61 		int x, y;
62 	} cursor;
63 };
64 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
65 
66 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
67 
68 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
69 {
70 	struct msm_drm_private *priv = crtc->dev->dev_private;
71 	return to_mdp5_kms(to_mdp_kms(priv->kms));
72 }
73 
74 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
75 {
76 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
77 
78 	atomic_or(pending, &mdp5_crtc->pending);
79 	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
80 }
81 
82 static void request_pp_done_pending(struct drm_crtc *crtc)
83 {
84 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
85 	reinit_completion(&mdp5_crtc->pp_completion);
86 }
87 
88 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
89 {
90 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
91 	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
92 	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
93 	bool start = !mdp5_cstate->defer_start;
94 
95 	mdp5_cstate->defer_start = false;
96 
97 	DBG("%s: flush=%08x", crtc->name, flush_mask);
98 
99 	return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
100 }
101 
102 /*
103  * flush updates, to make sure hw is updated to new scanout fb,
104  * so that we can safely queue unref to current fb (ie. next
105  * vblank we know hw is done w/ previous scanout_fb).
106  */
107 static u32 crtc_flush_all(struct drm_crtc *crtc)
108 {
109 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
110 	struct mdp5_hw_mixer *mixer, *r_mixer;
111 	struct drm_plane *plane;
112 	uint32_t flush_mask = 0;
113 
114 	/* this should not happen: */
115 	if (WARN_ON(!mdp5_cstate->ctl))
116 		return 0;
117 
118 	drm_atomic_crtc_for_each_plane(plane, crtc) {
119 		if (!plane->state->visible)
120 			continue;
121 		flush_mask |= mdp5_plane_get_flush(plane);
122 	}
123 
124 	mixer = mdp5_cstate->pipeline.mixer;
125 	flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
126 
127 	r_mixer = mdp5_cstate->pipeline.r_mixer;
128 	if (r_mixer)
129 		flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
130 
131 	return crtc_flush(crtc, flush_mask);
132 }
133 
134 /* if file!=NULL, this is preclose potential cancel-flip path */
135 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
136 {
137 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
138 	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
139 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
140 	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
141 	struct drm_device *dev = crtc->dev;
142 	struct drm_pending_vblank_event *event;
143 	unsigned long flags;
144 
145 	spin_lock_irqsave(&dev->event_lock, flags);
146 	event = mdp5_crtc->event;
147 	if (event) {
148 		mdp5_crtc->event = NULL;
149 		DBG("%s: send event: %p", crtc->name, event);
150 		drm_crtc_send_vblank_event(crtc, event);
151 	}
152 	spin_unlock_irqrestore(&dev->event_lock, flags);
153 
154 	if (ctl && !crtc->state->enable) {
155 		/* set STAGE_UNUSED for all layers */
156 		mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
157 		/* XXX: What to do here? */
158 		/* mdp5_crtc->ctl = NULL; */
159 	}
160 }
161 
162 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
163 {
164 	struct mdp5_crtc *mdp5_crtc =
165 		container_of(work, struct mdp5_crtc, unref_cursor_work);
166 	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
167 	struct msm_kms *kms = &mdp5_kms->base.base;
168 
169 	msm_gem_unpin_iova(val, kms->aspace);
170 	drm_gem_object_put(val);
171 }
172 
173 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
174 {
175 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
176 
177 	drm_crtc_cleanup(crtc);
178 	drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
179 
180 	kfree(mdp5_crtc);
181 }
182 
183 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
184 {
185 	switch (stage) {
186 	case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
187 	case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
188 	case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
189 	case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
190 	case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
191 	case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
192 	case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
193 	default:
194 		return 0;
195 	}
196 }
197 
198 /*
199  * left/right pipe offsets for the stage array used in blend_setup()
200  */
201 #define PIPE_LEFT	0
202 #define PIPE_RIGHT	1
203 
204 /*
205  * blend_setup() - blend all the planes of a CRTC
206  *
207  * If no base layer is available, border will be enabled as the base layer.
208  * Otherwise all layers will be blended based on their stage calculated
209  * in mdp5_crtc_atomic_check.
210  */
211 static void blend_setup(struct drm_crtc *crtc)
212 {
213 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
214 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
215 	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
216 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
217 	struct drm_plane *plane;
218 	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
219 	const struct mdp_format *format;
220 	struct mdp5_hw_mixer *mixer = pipeline->mixer;
221 	uint32_t lm = mixer->lm;
222 	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
223 	uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
224 	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
225 	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
226 	unsigned long flags;
227 	enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
228 	enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
229 	int i, plane_cnt = 0;
230 	bool bg_alpha_enabled = false;
231 	u32 mixer_op_mode = 0;
232 	u32 val;
233 #define blender(stage)	((stage) - STAGE0)
234 
235 	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
236 
237 	/* ctl could be released already when we are shutting down: */
238 	/* XXX: Can this happen now? */
239 	if (!ctl)
240 		goto out;
241 
242 	/* Collect all plane information */
243 	drm_atomic_crtc_for_each_plane(plane, crtc) {
244 		enum mdp5_pipe right_pipe;
245 
246 		if (!plane->state->visible)
247 			continue;
248 
249 		pstate = to_mdp5_plane_state(plane->state);
250 		pstates[pstate->stage] = pstate;
251 		stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
252 		/*
253 		 * if we have a right mixer, stage the same pipe as we
254 		 * have on the left mixer
255 		 */
256 		if (r_mixer)
257 			r_stage[pstate->stage][PIPE_LEFT] =
258 						mdp5_plane_pipe(plane);
259 		/*
260 		 * if we have a right pipe (i.e, the plane comprises of 2
261 		 * hwpipes, then stage the right pipe on the right side of both
262 		 * the layer mixers
263 		 */
264 		right_pipe = mdp5_plane_right_pipe(plane);
265 		if (right_pipe) {
266 			stage[pstate->stage][PIPE_RIGHT] = right_pipe;
267 			r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
268 		}
269 
270 		plane_cnt++;
271 	}
272 
273 	if (!pstates[STAGE_BASE]) {
274 		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
275 		DBG("Border Color is enabled");
276 	} else if (plane_cnt) {
277 		format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
278 
279 		if (format->alpha_enable)
280 			bg_alpha_enabled = true;
281 	}
282 
283 	/* The reset for blending */
284 	for (i = STAGE0; i <= STAGE_MAX; i++) {
285 		if (!pstates[i])
286 			continue;
287 
288 		format = to_mdp_format(
289 			msm_framebuffer_format(pstates[i]->base.fb));
290 		plane = pstates[i]->base.plane;
291 		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
292 			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
293 		fg_alpha = pstates[i]->alpha;
294 		bg_alpha = 0xFF - pstates[i]->alpha;
295 
296 		if (!format->alpha_enable && bg_alpha_enabled)
297 			mixer_op_mode = 0;
298 		else
299 			mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
300 
301 		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
302 
303 		if (format->alpha_enable && pstates[i]->premultiplied) {
304 			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
305 				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
306 			if (fg_alpha != 0xff) {
307 				bg_alpha = fg_alpha;
308 				blend_op |=
309 					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
310 					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
311 			} else {
312 				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
313 			}
314 		} else if (format->alpha_enable) {
315 			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
316 				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
317 			if (fg_alpha != 0xff) {
318 				bg_alpha = fg_alpha;
319 				blend_op |=
320 				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
321 				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
322 				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
323 				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
324 			} else {
325 				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
326 			}
327 		}
328 
329 		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
330 				blender(i)), blend_op);
331 		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
332 				blender(i)), fg_alpha);
333 		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
334 				blender(i)), bg_alpha);
335 		if (r_mixer) {
336 			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
337 					blender(i)), blend_op);
338 			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
339 					blender(i)), fg_alpha);
340 			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
341 					blender(i)), bg_alpha);
342 		}
343 	}
344 
345 	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
346 	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
347 		   val | mixer_op_mode);
348 	if (r_mixer) {
349 		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
350 		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
351 			   val | mixer_op_mode);
352 	}
353 
354 	mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
355 		       ctl_blend_flags);
356 out:
357 	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
358 }
359 
360 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
361 {
362 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
363 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
364 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
365 	struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
366 	struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
367 	uint32_t lm = mixer->lm;
368 	u32 mixer_width, val;
369 	unsigned long flags;
370 	struct drm_display_mode *mode;
371 
372 	if (WARN_ON(!crtc->state))
373 		return;
374 
375 	mode = &crtc->state->adjusted_mode;
376 
377 	DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
378 
379 	mixer_width = mode->hdisplay;
380 	if (r_mixer)
381 		mixer_width /= 2;
382 
383 	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
384 	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
385 			MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
386 			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
387 
388 	/* Assign mixer to LEFT side in source split mode */
389 	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
390 	val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
391 	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
392 
393 	if (r_mixer) {
394 		u32 r_lm = r_mixer->lm;
395 
396 		mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
397 			   MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
398 			   MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
399 
400 		/* Assign mixer to RIGHT side in source split mode */
401 		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
402 		val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
403 		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
404 	}
405 
406 	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
407 }
408 
409 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
410 {
411 	struct drm_device *dev = crtc->dev;
412 	struct drm_encoder *encoder;
413 
414 	drm_for_each_encoder(encoder, dev)
415 		if (encoder->crtc == crtc)
416 			return encoder;
417 
418 	return NULL;
419 }
420 
421 static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
422 					   bool in_vblank_irq,
423 					   int *vpos, int *hpos,
424 					   ktime_t *stime, ktime_t *etime,
425 					   const struct drm_display_mode *mode)
426 {
427 	unsigned int pipe = crtc->index;
428 	struct drm_encoder *encoder;
429 	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
430 
431 
432 	encoder = get_encoder_from_crtc(crtc);
433 	if (!encoder) {
434 		DRM_ERROR("no encoder found for crtc %d\n", pipe);
435 		return false;
436 	}
437 
438 	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
439 	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
440 
441 	/*
442 	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
443 	 * the end of VFP. Translate the porch values relative to the line
444 	 * counter positions.
445 	 */
446 
447 	vactive_start = vsw + vbp + 1;
448 
449 	vactive_end = vactive_start + mode->crtc_vdisplay;
450 
451 	/* last scan line before VSYNC */
452 	vfp_end = mode->crtc_vtotal;
453 
454 	if (stime)
455 		*stime = ktime_get();
456 
457 	line = mdp5_encoder_get_linecount(encoder);
458 
459 	if (line < vactive_start)
460 		line -= vactive_start;
461 	else if (line > vactive_end)
462 		line = line - vfp_end - vactive_start;
463 	else
464 		line -= vactive_start;
465 
466 	*vpos = line;
467 	*hpos = 0;
468 
469 	if (etime)
470 		*etime = ktime_get();
471 
472 	return true;
473 }
474 
475 static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
476 {
477 	struct drm_encoder *encoder;
478 
479 	encoder = get_encoder_from_crtc(crtc);
480 	if (!encoder)
481 		return 0;
482 
483 	return mdp5_encoder_get_framecount(encoder);
484 }
485 
486 static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
487 				     struct drm_atomic_state *state)
488 {
489 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
490 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
491 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
492 	struct device *dev = &mdp5_kms->pdev->dev;
493 	unsigned long flags;
494 
495 	DBG("%s", crtc->name);
496 
497 	if (WARN_ON(!mdp5_crtc->enabled))
498 		return;
499 
500 	/* Disable/save vblank irq handling before power is disabled */
501 	drm_crtc_vblank_off(crtc);
502 
503 	if (mdp5_cstate->cmd_mode)
504 		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
505 
506 	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
507 	pm_runtime_put_sync(dev);
508 
509 	if (crtc->state->event && !crtc->state->active) {
510 		WARN_ON(mdp5_crtc->event);
511 		spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
512 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
513 		crtc->state->event = NULL;
514 		spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
515 	}
516 
517 	mdp5_crtc->enabled = false;
518 }
519 
520 static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
521 {
522 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
523 	struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
524 	u32 count;
525 
526 	count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
527 	drm_crtc_set_max_vblank_count(crtc, count);
528 
529 	drm_crtc_vblank_on(crtc);
530 }
531 
532 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
533 				    struct drm_atomic_state *state)
534 {
535 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
536 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
537 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
538 	struct device *dev = &mdp5_kms->pdev->dev;
539 
540 	DBG("%s", crtc->name);
541 
542 	if (WARN_ON(mdp5_crtc->enabled))
543 		return;
544 
545 	pm_runtime_get_sync(dev);
546 
547 	if (mdp5_crtc->lm_cursor_enabled) {
548 		/*
549 		 * Restore LM cursor state, as it might have been lost
550 		 * with suspend:
551 		 */
552 		if (mdp5_crtc->cursor.iova) {
553 			unsigned long flags;
554 
555 			spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
556 			mdp5_crtc_restore_cursor(crtc);
557 			spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
558 
559 			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
560 					    &mdp5_cstate->pipeline, 0, true);
561 		} else {
562 			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
563 					    &mdp5_cstate->pipeline, 0, false);
564 		}
565 	}
566 
567 	/* Restore vblank irq handling after power is enabled */
568 	mdp5_crtc_vblank_on(crtc);
569 
570 	mdp5_crtc_mode_set_nofb(crtc);
571 
572 	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
573 
574 	if (mdp5_cstate->cmd_mode)
575 		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
576 
577 	mdp5_crtc->enabled = true;
578 }
579 
580 int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
581 			     struct drm_crtc_state *new_crtc_state,
582 			     bool need_right_mixer)
583 {
584 	struct mdp5_crtc_state *mdp5_cstate =
585 			to_mdp5_crtc_state(new_crtc_state);
586 	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
587 	struct mdp5_interface *intf;
588 	bool new_mixer = false;
589 
590 	new_mixer = !pipeline->mixer;
591 
592 	if ((need_right_mixer && !pipeline->r_mixer) ||
593 	    (!need_right_mixer && pipeline->r_mixer))
594 		new_mixer = true;
595 
596 	if (new_mixer) {
597 		struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
598 		struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
599 		u32 caps;
600 		int ret;
601 
602 		caps = MDP_LM_CAP_DISPLAY;
603 		if (need_right_mixer)
604 			caps |= MDP_LM_CAP_PAIR;
605 
606 		ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
607 					&pipeline->mixer, need_right_mixer ?
608 					&pipeline->r_mixer : NULL);
609 		if (ret)
610 			return ret;
611 
612 		mdp5_mixer_release(new_crtc_state->state, old_mixer);
613 		if (old_r_mixer) {
614 			mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
615 			if (!need_right_mixer)
616 				pipeline->r_mixer = NULL;
617 		}
618 	}
619 
620 	/*
621 	 * these should have been already set up in the encoder's atomic
622 	 * check (called by drm_atomic_helper_check_modeset)
623 	 */
624 	intf = pipeline->intf;
625 
626 	mdp5_cstate->err_irqmask = intf2err(intf->num);
627 	mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
628 
629 	if ((intf->type == INTF_DSI) &&
630 	    (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
631 		mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
632 		mdp5_cstate->cmd_mode = true;
633 	} else {
634 		mdp5_cstate->pp_done_irqmask = 0;
635 		mdp5_cstate->cmd_mode = false;
636 	}
637 
638 	return 0;
639 }
640 
641 struct plane_state {
642 	struct drm_plane *plane;
643 	struct mdp5_plane_state *state;
644 };
645 
646 static int pstate_cmp(const void *a, const void *b)
647 {
648 	struct plane_state *pa = (struct plane_state *)a;
649 	struct plane_state *pb = (struct plane_state *)b;
650 	return pa->state->zpos - pb->state->zpos;
651 }
652 
653 /* is there a helper for this? */
654 static bool is_fullscreen(struct drm_crtc_state *cstate,
655 		struct drm_plane_state *pstate)
656 {
657 	return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
658 		((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
659 		((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
660 }
661 
662 static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
663 					struct drm_crtc_state *new_crtc_state,
664 					struct drm_plane_state *bpstate)
665 {
666 	struct mdp5_crtc_state *mdp5_cstate =
667 			to_mdp5_crtc_state(new_crtc_state);
668 
669 	/*
670 	 * if we're in source split mode, it's mandatory to have
671 	 * border out on the base stage
672 	 */
673 	if (mdp5_cstate->pipeline.r_mixer)
674 		return STAGE0;
675 
676 	/* if the bottom-most layer is not fullscreen, we need to use
677 	 * it for solid-color:
678 	 */
679 	if (!is_fullscreen(new_crtc_state, bpstate))
680 		return STAGE0;
681 
682 	return STAGE_BASE;
683 }
684 
685 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
686 		struct drm_atomic_state *state)
687 {
688 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
689 									  crtc);
690 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
691 	struct drm_plane *plane;
692 	struct drm_device *dev = crtc->dev;
693 	struct plane_state pstates[STAGE_MAX + 1];
694 	const struct mdp5_cfg_hw *hw_cfg;
695 	const struct drm_plane_state *pstate;
696 	const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
697 	bool cursor_plane = false;
698 	bool need_right_mixer = false;
699 	int cnt = 0, i;
700 	int ret;
701 	enum mdp_mixer_stage_id start;
702 
703 	DBG("%s: check", crtc->name);
704 
705 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
706 		if (!pstate->visible)
707 			continue;
708 
709 		pstates[cnt].plane = plane;
710 		pstates[cnt].state = to_mdp5_plane_state(pstate);
711 
712 		/*
713 		 * if any plane on this crtc uses 2 hwpipes, then we need
714 		 * the crtc to have a right hwmixer.
715 		 */
716 		if (pstates[cnt].state->r_hwpipe)
717 			need_right_mixer = true;
718 		cnt++;
719 
720 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
721 			cursor_plane = true;
722 	}
723 
724 	/* bail out early if there aren't any planes */
725 	if (!cnt)
726 		return 0;
727 
728 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
729 
730 	/*
731 	 * we need a right hwmixer if the mode's width is greater than a single
732 	 * LM's max width
733 	 */
734 	if (mode->hdisplay > hw_cfg->lm.max_width)
735 		need_right_mixer = true;
736 
737 	ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
738 	if (ret) {
739 		DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
740 		return ret;
741 	}
742 
743 	/* assign a stage based on sorted zpos property */
744 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
745 
746 	/* trigger a warning if cursor isn't the highest zorder */
747 	WARN_ON(cursor_plane &&
748 		(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
749 
750 	start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
751 
752 	/* verify that there are not too many planes attached to crtc
753 	 * and that we don't have conflicting mixer stages:
754 	 */
755 	if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
756 		DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
757 			cnt, start);
758 		return -EINVAL;
759 	}
760 
761 	for (i = 0; i < cnt; i++) {
762 		if (cursor_plane && (i == (cnt - 1)))
763 			pstates[i].state->stage = hw_cfg->lm.nb_stages;
764 		else
765 			pstates[i].state->stage = start + i;
766 		DBG("%s: assign pipe %s on stage=%d", crtc->name,
767 				pstates[i].plane->name,
768 				pstates[i].state->stage);
769 	}
770 
771 	return 0;
772 }
773 
774 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
775 				   struct drm_atomic_state *state)
776 {
777 	DBG("%s: begin", crtc->name);
778 }
779 
780 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
781 				   struct drm_atomic_state *state)
782 {
783 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
784 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
785 	struct drm_device *dev = crtc->dev;
786 	unsigned long flags;
787 
788 	DBG("%s: event: %p", crtc->name, crtc->state->event);
789 
790 	WARN_ON(mdp5_crtc->event);
791 
792 	spin_lock_irqsave(&dev->event_lock, flags);
793 	mdp5_crtc->event = crtc->state->event;
794 	crtc->state->event = NULL;
795 	spin_unlock_irqrestore(&dev->event_lock, flags);
796 
797 	/*
798 	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
799 	 * it means we are trying to flush a CRTC whose state is disabled:
800 	 * nothing else needs to be done.
801 	 */
802 	/* XXX: Can this happen now ? */
803 	if (unlikely(!mdp5_cstate->ctl))
804 		return;
805 
806 	blend_setup(crtc);
807 
808 	/* PP_DONE irq is only used by command mode for now.
809 	 * It is better to request pending before FLUSH and START trigger
810 	 * to make sure no pp_done irq missed.
811 	 * This is safe because no pp_done will happen before SW trigger
812 	 * in command mode.
813 	 */
814 	if (mdp5_cstate->cmd_mode)
815 		request_pp_done_pending(crtc);
816 
817 	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
818 
819 	/* XXX are we leaking out state here? */
820 	mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
821 	mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
822 	mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
823 
824 	request_pending(crtc, PENDING_FLIP);
825 }
826 
827 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
828 {
829 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
830 	uint32_t xres = crtc->mode.hdisplay;
831 	uint32_t yres = crtc->mode.vdisplay;
832 
833 	/*
834 	 * Cursor Region Of Interest (ROI) is a plane read from cursor
835 	 * buffer to render. The ROI region is determined by the visibility of
836 	 * the cursor point. In the default Cursor image the cursor point will
837 	 * be at the top left of the cursor image.
838 	 *
839 	 * Without rotation:
840 	 * If the cursor point reaches the right (xres - x < cursor.width) or
841 	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
842 	 * width and ROI height need to be evaluated to crop the cursor image
843 	 * accordingly.
844 	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
845 	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
846 	 *
847 	 * With rotation:
848 	 * We get negative x and/or y coordinates.
849 	 * (cursor.width - abs(x)) will be new cursor width when x < 0
850 	 * (cursor.height - abs(y)) will be new cursor width when y < 0
851 	 */
852 	if (mdp5_crtc->cursor.x >= 0)
853 		*roi_w = min(mdp5_crtc->cursor.width, xres -
854 			mdp5_crtc->cursor.x);
855 	else
856 		*roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
857 	if (mdp5_crtc->cursor.y >= 0)
858 		*roi_h = min(mdp5_crtc->cursor.height, yres -
859 			mdp5_crtc->cursor.y);
860 	else
861 		*roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
862 }
863 
864 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
865 {
866 	const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
867 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
868 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
869 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
870 	const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
871 	uint32_t blendcfg, stride;
872 	uint32_t x, y, src_x, src_y, width, height;
873 	uint32_t roi_w, roi_h;
874 	int lm;
875 
876 	assert_spin_locked(&mdp5_crtc->cursor.lock);
877 
878 	lm = mdp5_cstate->pipeline.mixer->lm;
879 
880 	x = mdp5_crtc->cursor.x;
881 	y = mdp5_crtc->cursor.y;
882 	width = mdp5_crtc->cursor.width;
883 	height = mdp5_crtc->cursor.height;
884 
885 	stride = width * info->cpp[0];
886 
887 	get_roi(crtc, &roi_w, &roi_h);
888 
889 	/* If cusror buffer overlaps due to rotation on the
890 	 * upper or left screen border the pixel offset inside
891 	 * the cursor buffer of the ROI is the positive overlap
892 	 * distance.
893 	 */
894 	if (mdp5_crtc->cursor.x < 0) {
895 		src_x = abs(mdp5_crtc->cursor.x);
896 		x = 0;
897 	} else {
898 		src_x = 0;
899 	}
900 	if (mdp5_crtc->cursor.y < 0) {
901 		src_y = abs(mdp5_crtc->cursor.y);
902 		y = 0;
903 	} else {
904 		src_y = 0;
905 	}
906 	DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
907 		crtc->name, x, y, roi_w, roi_h, src_x, src_y);
908 
909 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
910 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
911 			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
912 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
913 			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
914 			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
915 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
916 			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
917 			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
918 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
919 			MDP5_LM_CURSOR_START_XY_Y_START(y) |
920 			MDP5_LM_CURSOR_START_XY_X_START(x));
921 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
922 			MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
923 			MDP5_LM_CURSOR_XY_SRC_X(src_x));
924 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
925 			mdp5_crtc->cursor.iova);
926 
927 	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
928 	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
929 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
930 }
931 
932 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
933 		struct drm_file *file, uint32_t handle,
934 		uint32_t width, uint32_t height)
935 {
936 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
937 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
938 	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
939 	struct drm_device *dev = crtc->dev;
940 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
941 	struct platform_device *pdev = mdp5_kms->pdev;
942 	struct msm_kms *kms = &mdp5_kms->base.base;
943 	struct drm_gem_object *cursor_bo, *old_bo = NULL;
944 	struct mdp5_ctl *ctl;
945 	int ret;
946 	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
947 	bool cursor_enable = true;
948 	unsigned long flags;
949 
950 	if (!mdp5_crtc->lm_cursor_enabled) {
951 		dev_warn(dev->dev,
952 			 "cursor_set is deprecated with cursor planes\n");
953 		return -EINVAL;
954 	}
955 
956 	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
957 		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
958 		return -EINVAL;
959 	}
960 
961 	ctl = mdp5_cstate->ctl;
962 	if (!ctl)
963 		return -EINVAL;
964 
965 	/* don't support LM cursors when we have source split enabled */
966 	if (mdp5_cstate->pipeline.r_mixer)
967 		return -EINVAL;
968 
969 	if (!handle) {
970 		DBG("Cursor off");
971 		cursor_enable = false;
972 		mdp5_crtc->cursor.iova = 0;
973 		pm_runtime_get_sync(&pdev->dev);
974 		goto set_cursor;
975 	}
976 
977 	cursor_bo = drm_gem_object_lookup(file, handle);
978 	if (!cursor_bo)
979 		return -ENOENT;
980 
981 	ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
982 			&mdp5_crtc->cursor.iova);
983 	if (ret)
984 		return -EINVAL;
985 
986 	pm_runtime_get_sync(&pdev->dev);
987 
988 	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
989 	old_bo = mdp5_crtc->cursor.scanout_bo;
990 
991 	mdp5_crtc->cursor.scanout_bo = cursor_bo;
992 	mdp5_crtc->cursor.width = width;
993 	mdp5_crtc->cursor.height = height;
994 
995 	mdp5_crtc_restore_cursor(crtc);
996 
997 	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
998 
999 set_cursor:
1000 	ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
1001 	if (ret) {
1002 		DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
1003 				cursor_enable ? "en" : "dis", ret);
1004 		goto end;
1005 	}
1006 
1007 	crtc_flush(crtc, flush_mask);
1008 
1009 end:
1010 	pm_runtime_put_sync(&pdev->dev);
1011 	if (old_bo) {
1012 		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
1013 		/* enable vblank to complete cursor work: */
1014 		request_pending(crtc, PENDING_CURSOR);
1015 	}
1016 	return ret;
1017 }
1018 
1019 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1020 {
1021 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
1022 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1023 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1024 	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
1025 	struct drm_device *dev = crtc->dev;
1026 	uint32_t roi_w;
1027 	uint32_t roi_h;
1028 	unsigned long flags;
1029 
1030 	if (!mdp5_crtc->lm_cursor_enabled) {
1031 		dev_warn(dev->dev,
1032 			 "cursor_move is deprecated with cursor planes\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	/* don't support LM cursors when we have source split enabled */
1037 	if (mdp5_cstate->pipeline.r_mixer)
1038 		return -EINVAL;
1039 
1040 	/* In case the CRTC is disabled, just drop the cursor update */
1041 	if (unlikely(!crtc->state->enable))
1042 		return 0;
1043 
1044 	/* accept negative x/y coordinates up to maximum cursor overlap */
1045 	mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
1046 	mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
1047 
1048 	get_roi(crtc, &roi_w, &roi_h);
1049 
1050 	pm_runtime_get_sync(&mdp5_kms->pdev->dev);
1051 
1052 	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
1053 	mdp5_crtc_restore_cursor(crtc);
1054 	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
1055 
1056 	crtc_flush(crtc, flush_mask);
1057 
1058 	pm_runtime_put_sync(&mdp5_kms->pdev->dev);
1059 
1060 	return 0;
1061 }
1062 
1063 static void
1064 mdp5_crtc_atomic_print_state(struct drm_printer *p,
1065 			     const struct drm_crtc_state *state)
1066 {
1067 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1068 	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
1069 	struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
1070 
1071 	if (WARN_ON(!pipeline))
1072 		return;
1073 
1074 	if (mdp5_cstate->ctl)
1075 		drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
1076 
1077 	drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
1078 			pipeline->mixer->name : "(null)");
1079 
1080 	if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
1081 		drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
1082 			   pipeline->r_mixer->name : "(null)");
1083 
1084 	drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
1085 }
1086 
1087 static struct drm_crtc_state *
1088 mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
1089 {
1090 	struct mdp5_crtc_state *mdp5_cstate;
1091 
1092 	if (WARN_ON(!crtc->state))
1093 		return NULL;
1094 
1095 	mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
1096 			      sizeof(*mdp5_cstate), GFP_KERNEL);
1097 	if (!mdp5_cstate)
1098 		return NULL;
1099 
1100 	__drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
1101 
1102 	return &mdp5_cstate->base;
1103 }
1104 
1105 static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
1106 {
1107 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
1108 
1109 	__drm_atomic_helper_crtc_destroy_state(state);
1110 
1111 	kfree(mdp5_cstate);
1112 }
1113 
1114 static void mdp5_crtc_reset(struct drm_crtc *crtc)
1115 {
1116 	struct mdp5_crtc_state *mdp5_cstate =
1117 		kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
1118 
1119 	if (crtc->state)
1120 		mdp5_crtc_destroy_state(crtc, crtc->state);
1121 
1122 	__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
1123 }
1124 
1125 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
1126 	.set_config = drm_atomic_helper_set_config,
1127 	.destroy = mdp5_crtc_destroy,
1128 	.page_flip = drm_atomic_helper_page_flip,
1129 	.reset = mdp5_crtc_reset,
1130 	.atomic_duplicate_state = mdp5_crtc_duplicate_state,
1131 	.atomic_destroy_state = mdp5_crtc_destroy_state,
1132 	.cursor_set = mdp5_crtc_cursor_set,
1133 	.cursor_move = mdp5_crtc_cursor_move,
1134 	.atomic_print_state = mdp5_crtc_atomic_print_state,
1135 	.get_vblank_counter = mdp5_crtc_get_vblank_counter,
1136 	.enable_vblank  = msm_crtc_enable_vblank,
1137 	.disable_vblank = msm_crtc_disable_vblank,
1138 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1139 };
1140 
1141 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
1142 	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
1143 	.atomic_check = mdp5_crtc_atomic_check,
1144 	.atomic_begin = mdp5_crtc_atomic_begin,
1145 	.atomic_flush = mdp5_crtc_atomic_flush,
1146 	.atomic_enable = mdp5_crtc_atomic_enable,
1147 	.atomic_disable = mdp5_crtc_atomic_disable,
1148 	.get_scanout_position = mdp5_crtc_get_scanout_position,
1149 };
1150 
1151 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
1152 {
1153 	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
1154 	struct drm_crtc *crtc = &mdp5_crtc->base;
1155 	struct msm_drm_private *priv = crtc->dev->dev_private;
1156 	unsigned pending;
1157 
1158 	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
1159 
1160 	pending = atomic_xchg(&mdp5_crtc->pending, 0);
1161 
1162 	if (pending & PENDING_FLIP) {
1163 		complete_flip(crtc, NULL);
1164 	}
1165 
1166 	if (pending & PENDING_CURSOR)
1167 		drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
1168 }
1169 
1170 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
1171 {
1172 	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1173 
1174 	DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
1175 }
1176 
1177 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
1178 {
1179 	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
1180 								pp_done);
1181 
1182 	complete(&mdp5_crtc->pp_completion);
1183 }
1184 
1185 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
1186 {
1187 	struct drm_device *dev = crtc->dev;
1188 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1189 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1190 	int ret;
1191 
1192 	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
1193 						msecs_to_jiffies(50));
1194 	if (ret == 0)
1195 		dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
1196 				     mdp5_cstate->pipeline.mixer->lm);
1197 }
1198 
1199 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
1200 {
1201 	struct drm_device *dev = crtc->dev;
1202 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1203 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1204 	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1205 	int ret;
1206 
1207 	/* Should not call this function if crtc is disabled. */
1208 	if (!ctl)
1209 		return;
1210 
1211 	ret = drm_crtc_vblank_get(crtc);
1212 	if (ret)
1213 		return;
1214 
1215 	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1216 		((mdp5_ctl_get_commit_status(ctl) &
1217 		mdp5_crtc->flushed_mask) == 0),
1218 		msecs_to_jiffies(50));
1219 	if (ret <= 0)
1220 		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1221 
1222 	mdp5_crtc->flushed_mask = 0;
1223 
1224 	drm_crtc_vblank_put(crtc);
1225 }
1226 
1227 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1228 {
1229 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1230 	return mdp5_crtc->vblank.irqmask;
1231 }
1232 
1233 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1234 {
1235 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1236 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
1237 
1238 	/* should this be done elsewhere ? */
1239 	mdp_irq_update(&mdp5_kms->base);
1240 
1241 	mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1242 }
1243 
1244 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1245 {
1246 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1247 
1248 	return mdp5_cstate->ctl;
1249 }
1250 
1251 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1252 {
1253 	struct mdp5_crtc_state *mdp5_cstate;
1254 
1255 	if (WARN_ON(!crtc))
1256 		return ERR_PTR(-EINVAL);
1257 
1258 	mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1259 
1260 	return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1261 		ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1262 }
1263 
1264 struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1265 {
1266 	struct mdp5_crtc_state *mdp5_cstate;
1267 
1268 	if (WARN_ON(!crtc))
1269 		return ERR_PTR(-EINVAL);
1270 
1271 	mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1272 
1273 	return &mdp5_cstate->pipeline;
1274 }
1275 
1276 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1277 {
1278 	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1279 
1280 	if (mdp5_cstate->cmd_mode)
1281 		mdp5_crtc_wait_for_pp_done(crtc);
1282 	else
1283 		mdp5_crtc_wait_for_flush_done(crtc);
1284 }
1285 
1286 /* initialize crtc */
1287 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1288 				struct drm_plane *plane,
1289 				struct drm_plane *cursor_plane, int id)
1290 {
1291 	struct drm_crtc *crtc = NULL;
1292 	struct mdp5_crtc *mdp5_crtc;
1293 
1294 	mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1295 	if (!mdp5_crtc)
1296 		return ERR_PTR(-ENOMEM);
1297 
1298 	crtc = &mdp5_crtc->base;
1299 
1300 	mdp5_crtc->id = id;
1301 
1302 	spin_lock_init(&mdp5_crtc->lm_lock);
1303 	spin_lock_init(&mdp5_crtc->cursor.lock);
1304 	init_completion(&mdp5_crtc->pp_completion);
1305 
1306 	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1307 	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1308 	mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1309 
1310 	mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
1311 
1312 	drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1313 				  &mdp5_crtc_funcs, NULL);
1314 
1315 	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1316 			"unref cursor", unref_cursor_worker);
1317 
1318 	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
1319 
1320 	return crtc;
1321 }
1322