xref: /openbmc/linux/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
20 #include <linux/sort.h>
21 #include <linux/debugfs.h>
22 #include <linux/ktime.h>
23 #include <drm/drm_mode.h>
24 #include <drm/drm_crtc.h>
25 #include <drm/drm_crtc_helper.h>
26 #include <drm/drm_flip_work.h>
27 #include <drm/drm_rect.h>
28 
29 #include "dpu_kms.h"
30 #include "dpu_hw_lm.h"
31 #include "dpu_hw_ctl.h"
32 #include "dpu_crtc.h"
33 #include "dpu_plane.h"
34 #include "dpu_encoder.h"
35 #include "dpu_vbif.h"
36 #include "dpu_power_handle.h"
37 #include "dpu_core_perf.h"
38 #include "dpu_trace.h"
39 
40 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
41 #define DPU_DRM_BLEND_OP_OPAQUE         1
42 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
43 #define DPU_DRM_BLEND_OP_COVERAGE       3
44 #define DPU_DRM_BLEND_OP_MAX            4
45 
46 /* layer mixer index on dpu_crtc */
47 #define LEFT_MIXER 0
48 #define RIGHT_MIXER 1
49 
50 static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
51 					    struct drm_display_mode *mode)
52 {
53 	return mode->hdisplay / cstate->num_mixers;
54 }
55 
56 static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
57 {
58 	struct msm_drm_private *priv = crtc->dev->dev_private;
59 
60 	return to_dpu_kms(priv->kms);
61 }
62 
63 static void dpu_crtc_destroy(struct drm_crtc *crtc)
64 {
65 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
66 
67 	DPU_DEBUG("\n");
68 
69 	if (!crtc)
70 		return;
71 
72 	dpu_crtc->phandle = NULL;
73 
74 	drm_crtc_cleanup(crtc);
75 	mutex_destroy(&dpu_crtc->crtc_lock);
76 	kfree(dpu_crtc);
77 }
78 
79 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
80 		struct dpu_plane_state *pstate, struct dpu_format *format)
81 {
82 	struct dpu_hw_mixer *lm = mixer->hw_lm;
83 	uint32_t blend_op;
84 	struct drm_format_name_buf format_name;
85 
86 	/* default to opaque blending */
87 	blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
88 		DPU_BLEND_BG_ALPHA_BG_CONST;
89 
90 	if (format->alpha_enable) {
91 		/* coverage blending */
92 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
93 			DPU_BLEND_BG_ALPHA_FG_PIXEL |
94 			DPU_BLEND_BG_INV_ALPHA;
95 	}
96 
97 	lm->ops.setup_blend_config(lm, pstate->stage,
98 				0xFF, 0, blend_op);
99 
100 	DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
101 		drm_get_format_name(format->base.pixel_format, &format_name),
102 		format->alpha_enable, blend_op);
103 }
104 
105 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
106 {
107 	struct dpu_crtc *dpu_crtc;
108 	struct dpu_crtc_state *crtc_state;
109 	int lm_idx, lm_horiz_position;
110 
111 	dpu_crtc = to_dpu_crtc(crtc);
112 	crtc_state = to_dpu_crtc_state(crtc->state);
113 
114 	lm_horiz_position = 0;
115 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
116 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
117 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
118 		struct dpu_hw_mixer_cfg cfg;
119 
120 		if (!lm_roi || !drm_rect_visible(lm_roi))
121 			continue;
122 
123 		cfg.out_width = drm_rect_width(lm_roi);
124 		cfg.out_height = drm_rect_height(lm_roi);
125 		cfg.right_mixer = lm_horiz_position++;
126 		cfg.flags = 0;
127 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
128 	}
129 }
130 
131 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
132 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
133 {
134 	struct drm_plane *plane;
135 	struct drm_framebuffer *fb;
136 	struct drm_plane_state *state;
137 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
138 	struct dpu_plane_state *pstate = NULL;
139 	struct dpu_format *format;
140 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
141 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
142 
143 	u32 flush_mask;
144 	uint32_t stage_idx, lm_idx;
145 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
146 	bool bg_alpha_enable = false;
147 
148 	drm_atomic_crtc_for_each_plane(plane, crtc) {
149 		state = plane->state;
150 		if (!state)
151 			continue;
152 
153 		pstate = to_dpu_plane_state(state);
154 		fb = state->fb;
155 
156 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
157 
158 		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
159 				crtc->base.id,
160 				pstate->stage,
161 				plane->base.id,
162 				dpu_plane_pipe(plane) - SSPP_VIG0,
163 				state->fb ? state->fb->base.id : -1);
164 
165 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
166 
167 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
168 			bg_alpha_enable = true;
169 
170 		stage_idx = zpos_cnt[pstate->stage]++;
171 		stage_cfg->stage[pstate->stage][stage_idx] =
172 					dpu_plane_pipe(plane);
173 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
174 					pstate->multirect_index;
175 
176 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
177 					   state, pstate, stage_idx,
178 					   dpu_plane_pipe(plane) - SSPP_VIG0,
179 					   format->base.pixel_format,
180 					   fb ? fb->modifier : 0);
181 
182 		/* blend config update */
183 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
184 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
185 						pstate, format);
186 
187 			mixer[lm_idx].flush_mask |= flush_mask;
188 
189 			if (bg_alpha_enable && !format->alpha_enable)
190 				mixer[lm_idx].mixer_op_mode = 0;
191 			else
192 				mixer[lm_idx].mixer_op_mode |=
193 						1 << pstate->stage;
194 		}
195 	}
196 
197 	 _dpu_crtc_program_lm_output_roi(crtc);
198 }
199 
200 /**
201  * _dpu_crtc_blend_setup - configure crtc mixers
202  * @crtc: Pointer to drm crtc structure
203  */
204 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
205 {
206 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
207 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
208 	struct dpu_crtc_mixer *mixer = cstate->mixers;
209 	struct dpu_hw_ctl *ctl;
210 	struct dpu_hw_mixer *lm;
211 	int i;
212 
213 	DPU_DEBUG("%s\n", dpu_crtc->name);
214 
215 	for (i = 0; i < cstate->num_mixers; i++) {
216 		if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
217 			DPU_ERROR("invalid lm or ctl assigned to mixer\n");
218 			return;
219 		}
220 		mixer[i].mixer_op_mode = 0;
221 		mixer[i].flush_mask = 0;
222 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
223 			mixer[i].lm_ctl->ops.clear_all_blendstages(
224 					mixer[i].lm_ctl);
225 	}
226 
227 	/* initialize stage cfg */
228 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
229 
230 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
231 
232 	for (i = 0; i < cstate->num_mixers; i++) {
233 		ctl = mixer[i].lm_ctl;
234 		lm = mixer[i].hw_lm;
235 
236 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
237 
238 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
239 			mixer[i].hw_lm->idx);
240 
241 		/* stage config flush mask */
242 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
243 
244 		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
245 			mixer[i].hw_lm->idx - LM_0,
246 			mixer[i].mixer_op_mode,
247 			ctl->idx - CTL_0,
248 			mixer[i].flush_mask);
249 
250 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
251 			&dpu_crtc->stage_cfg);
252 	}
253 }
254 
255 /**
256  *  _dpu_crtc_complete_flip - signal pending page_flip events
257  * Any pending vblank events are added to the vblank_event_list
258  * so that the next vblank interrupt shall signal them.
259  * However PAGE_FLIP events are not handled through the vblank_event_list.
260  * This API signals any pending PAGE_FLIP events requested through
261  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
262  * @crtc: Pointer to drm crtc structure
263  */
264 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
265 {
266 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
267 	struct drm_device *dev = crtc->dev;
268 	unsigned long flags;
269 
270 	spin_lock_irqsave(&dev->event_lock, flags);
271 	if (dpu_crtc->event) {
272 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
273 			      dpu_crtc->event);
274 		trace_dpu_crtc_complete_flip(DRMID(crtc));
275 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
276 		dpu_crtc->event = NULL;
277 	}
278 	spin_unlock_irqrestore(&dev->event_lock, flags);
279 }
280 
281 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
282 {
283 	struct drm_encoder *encoder;
284 
285 	if (!crtc || !crtc->dev) {
286 		DPU_ERROR("invalid crtc\n");
287 		return INTF_MODE_NONE;
288 	}
289 
290 	drm_for_each_encoder(encoder, crtc->dev)
291 		if (encoder->crtc == crtc)
292 			return dpu_encoder_get_intf_mode(encoder);
293 
294 	return INTF_MODE_NONE;
295 }
296 
297 static void dpu_crtc_vblank_cb(void *data)
298 {
299 	struct drm_crtc *crtc = (struct drm_crtc *)data;
300 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
301 
302 	/* keep statistics on vblank callback - with auto reset via debugfs */
303 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
304 		dpu_crtc->vblank_cb_time = ktime_get();
305 	else
306 		dpu_crtc->vblank_cb_count++;
307 	_dpu_crtc_complete_flip(crtc);
308 	drm_crtc_handle_vblank(crtc);
309 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
310 }
311 
312 static void dpu_crtc_frame_event_work(struct kthread_work *work)
313 {
314 	struct dpu_crtc_frame_event *fevent = container_of(work,
315 			struct dpu_crtc_frame_event, work);
316 	struct drm_crtc *crtc = fevent->crtc;
317 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
318 	unsigned long flags;
319 	bool frame_done = false;
320 
321 	DPU_ATRACE_BEGIN("crtc_frame_event");
322 
323 	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
324 			ktime_to_ns(fevent->ts));
325 
326 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
327 				| DPU_ENCODER_FRAME_EVENT_ERROR
328 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
329 
330 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
331 			/* this should not happen */
332 			DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
333 					crtc->base.id,
334 					fevent->event,
335 					ktime_to_ns(fevent->ts),
336 					atomic_read(&dpu_crtc->frame_pending));
337 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
338 			/* release bandwidth and other resources */
339 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
340 							fevent->event);
341 			dpu_core_perf_crtc_release_bw(crtc);
342 		} else {
343 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
344 								fevent->event);
345 		}
346 
347 		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
348 			dpu_core_perf_crtc_update(crtc, 0, false);
349 
350 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
351 					| DPU_ENCODER_FRAME_EVENT_ERROR))
352 			frame_done = true;
353 	}
354 
355 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
356 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
357 				crtc->base.id, ktime_to_ns(fevent->ts));
358 
359 	if (frame_done)
360 		complete_all(&dpu_crtc->frame_done_comp);
361 
362 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
363 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
364 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
365 	DPU_ATRACE_END("crtc_frame_event");
366 }
367 
368 /*
369  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
370  * registers this API to encoder for all frame event callbacks like
371  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
372  * from different context - IRQ, user thread, commit_thread, etc. Each event
373  * should be carefully reviewed and should be processed in proper task context
374  * to avoid schedulin delay or properly manage the irq context's bottom half
375  * processing.
376  */
377 static void dpu_crtc_frame_event_cb(void *data, u32 event)
378 {
379 	struct drm_crtc *crtc = (struct drm_crtc *)data;
380 	struct dpu_crtc *dpu_crtc;
381 	struct msm_drm_private *priv;
382 	struct dpu_crtc_frame_event *fevent;
383 	unsigned long flags;
384 	u32 crtc_id;
385 
386 	/* Nothing to do on idle event */
387 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
388 		return;
389 
390 	dpu_crtc = to_dpu_crtc(crtc);
391 	priv = crtc->dev->dev_private;
392 	crtc_id = drm_crtc_index(crtc);
393 
394 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
395 
396 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
397 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
398 			struct dpu_crtc_frame_event, list);
399 	if (fevent)
400 		list_del_init(&fevent->list);
401 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
402 
403 	if (!fevent) {
404 		DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
405 		return;
406 	}
407 
408 	fevent->event = event;
409 	fevent->crtc = crtc;
410 	fevent->ts = ktime_get();
411 	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
412 }
413 
414 void dpu_crtc_complete_commit(struct drm_crtc *crtc,
415 		struct drm_crtc_state *old_state)
416 {
417 	if (!crtc || !crtc->state) {
418 		DPU_ERROR("invalid crtc\n");
419 		return;
420 	}
421 	trace_dpu_crtc_complete_commit(DRMID(crtc));
422 }
423 
424 static void _dpu_crtc_setup_mixer_for_encoder(
425 		struct drm_crtc *crtc,
426 		struct drm_encoder *enc)
427 {
428 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
429 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
430 	struct dpu_rm *rm = &dpu_kms->rm;
431 	struct dpu_crtc_mixer *mixer;
432 	struct dpu_hw_ctl *last_valid_ctl = NULL;
433 	int i;
434 	struct dpu_rm_hw_iter lm_iter, ctl_iter;
435 
436 	dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
437 	dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
438 
439 	/* Set up all the mixers and ctls reserved by this encoder */
440 	for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
441 		mixer = &cstate->mixers[i];
442 
443 		if (!dpu_rm_get_hw(rm, &lm_iter))
444 			break;
445 		mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
446 
447 		/* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
448 		if (!dpu_rm_get_hw(rm, &ctl_iter)) {
449 			DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
450 					mixer->hw_lm->idx - LM_0);
451 			mixer->lm_ctl = last_valid_ctl;
452 		} else {
453 			mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
454 			last_valid_ctl = mixer->lm_ctl;
455 		}
456 
457 		/* Shouldn't happen, mixers are always >= ctls */
458 		if (!mixer->lm_ctl) {
459 			DPU_ERROR("no valid ctls found for lm %d\n",
460 					mixer->hw_lm->idx - LM_0);
461 			return;
462 		}
463 
464 		mixer->encoder = enc;
465 
466 		cstate->num_mixers++;
467 		DPU_DEBUG("setup mixer %d: lm %d\n",
468 				i, mixer->hw_lm->idx - LM_0);
469 		DPU_DEBUG("setup mixer %d: ctl %d\n",
470 				i, mixer->lm_ctl->idx - CTL_0);
471 	}
472 }
473 
474 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
475 {
476 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
477 	struct drm_encoder *enc;
478 
479 	mutex_lock(&dpu_crtc->crtc_lock);
480 	/* Check for mixers on all encoders attached to this crtc */
481 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
482 		if (enc->crtc != crtc)
483 			continue;
484 
485 		_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
486 	}
487 
488 	mutex_unlock(&dpu_crtc->crtc_lock);
489 }
490 
491 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
492 		struct drm_crtc_state *state)
493 {
494 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
495 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
496 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
497 	u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
498 	int i;
499 
500 	for (i = 0; i < cstate->num_mixers; i++) {
501 		struct drm_rect *r = &cstate->lm_bounds[i];
502 		r->x1 = crtc_split_width * i;
503 		r->y1 = 0;
504 		r->x2 = r->x1 + crtc_split_width;
505 		r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
506 
507 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
508 	}
509 
510 	drm_mode_debug_printmodeline(adj_mode);
511 }
512 
513 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
514 		struct drm_crtc_state *old_state)
515 {
516 	struct dpu_crtc *dpu_crtc;
517 	struct dpu_crtc_state *cstate;
518 	struct drm_encoder *encoder;
519 	struct drm_device *dev;
520 	unsigned long flags;
521 	struct dpu_crtc_smmu_state_data *smmu_state;
522 
523 	if (!crtc) {
524 		DPU_ERROR("invalid crtc\n");
525 		return;
526 	}
527 
528 	if (!crtc->state->enable) {
529 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
530 				crtc->base.id, crtc->state->enable);
531 		return;
532 	}
533 
534 	DPU_DEBUG("crtc%d\n", crtc->base.id);
535 
536 	dpu_crtc = to_dpu_crtc(crtc);
537 	cstate = to_dpu_crtc_state(crtc->state);
538 	dev = crtc->dev;
539 	smmu_state = &dpu_crtc->smmu_state;
540 
541 	if (!cstate->num_mixers) {
542 		_dpu_crtc_setup_mixers(crtc);
543 		_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
544 	}
545 
546 	if (dpu_crtc->event) {
547 		WARN_ON(dpu_crtc->event);
548 	} else {
549 		spin_lock_irqsave(&dev->event_lock, flags);
550 		dpu_crtc->event = crtc->state->event;
551 		crtc->state->event = NULL;
552 		spin_unlock_irqrestore(&dev->event_lock, flags);
553 	}
554 
555 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
556 		if (encoder->crtc != crtc)
557 			continue;
558 
559 		/* encoder will trigger pending mask now */
560 		dpu_encoder_trigger_kickoff_pending(encoder);
561 	}
562 
563 	/*
564 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
565 	 * it means we are trying to flush a CRTC whose state is disabled:
566 	 * nothing else needs to be done.
567 	 */
568 	if (unlikely(!cstate->num_mixers))
569 		return;
570 
571 	_dpu_crtc_blend_setup(crtc);
572 
573 	/*
574 	 * PP_DONE irq is only used by command mode for now.
575 	 * It is better to request pending before FLUSH and START trigger
576 	 * to make sure no pp_done irq missed.
577 	 * This is safe because no pp_done will happen before SW trigger
578 	 * in command mode.
579 	 */
580 }
581 
582 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
583 		struct drm_crtc_state *old_crtc_state)
584 {
585 	struct dpu_crtc *dpu_crtc;
586 	struct drm_device *dev;
587 	struct drm_plane *plane;
588 	struct msm_drm_private *priv;
589 	struct msm_drm_thread *event_thread;
590 	unsigned long flags;
591 	struct dpu_crtc_state *cstate;
592 
593 	if (!crtc->state->enable) {
594 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
595 				crtc->base.id, crtc->state->enable);
596 		return;
597 	}
598 
599 	DPU_DEBUG("crtc%d\n", crtc->base.id);
600 
601 	dpu_crtc = to_dpu_crtc(crtc);
602 	cstate = to_dpu_crtc_state(crtc->state);
603 	dev = crtc->dev;
604 	priv = dev->dev_private;
605 
606 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
607 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
608 		return;
609 	}
610 
611 	event_thread = &priv->event_thread[crtc->index];
612 
613 	if (dpu_crtc->event) {
614 		DPU_DEBUG("already received dpu_crtc->event\n");
615 	} else {
616 		spin_lock_irqsave(&dev->event_lock, flags);
617 		dpu_crtc->event = crtc->state->event;
618 		crtc->state->event = NULL;
619 		spin_unlock_irqrestore(&dev->event_lock, flags);
620 	}
621 
622 	/*
623 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
624 	 * it means we are trying to flush a CRTC whose state is disabled:
625 	 * nothing else needs to be done.
626 	 */
627 	if (unlikely(!cstate->num_mixers))
628 		return;
629 
630 	/*
631 	 * For planes without commit update, drm framework will not add
632 	 * those planes to current state since hardware update is not
633 	 * required. However, if those planes were power collapsed since
634 	 * last commit cycle, driver has to restore the hardware state
635 	 * of those planes explicitly here prior to plane flush.
636 	 */
637 	drm_atomic_crtc_for_each_plane(plane, crtc)
638 		dpu_plane_restore(plane);
639 
640 	/* update performance setting before crtc kickoff */
641 	dpu_core_perf_crtc_update(crtc, 1, false);
642 
643 	/*
644 	 * Final plane updates: Give each plane a chance to complete all
645 	 *                      required writes/flushing before crtc's "flush
646 	 *                      everything" call below.
647 	 */
648 	drm_atomic_crtc_for_each_plane(plane, crtc) {
649 		if (dpu_crtc->smmu_state.transition_error)
650 			dpu_plane_set_error(plane, true);
651 		dpu_plane_flush(plane);
652 	}
653 
654 	/* Kickoff will be scheduled by outer layer */
655 }
656 
657 /**
658  * dpu_crtc_destroy_state - state destroy hook
659  * @crtc: drm CRTC
660  * @state: CRTC state object to release
661  */
662 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
663 		struct drm_crtc_state *state)
664 {
665 	struct dpu_crtc *dpu_crtc;
666 	struct dpu_crtc_state *cstate;
667 
668 	if (!crtc || !state) {
669 		DPU_ERROR("invalid argument(s)\n");
670 		return;
671 	}
672 
673 	dpu_crtc = to_dpu_crtc(crtc);
674 	cstate = to_dpu_crtc_state(state);
675 
676 	DPU_DEBUG("crtc%d\n", crtc->base.id);
677 
678 	__drm_atomic_helper_crtc_destroy_state(state);
679 
680 	kfree(cstate);
681 }
682 
683 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
684 {
685 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
686 	int ret, rc = 0;
687 
688 	if (!atomic_read(&dpu_crtc->frame_pending)) {
689 		DPU_DEBUG("no frames pending\n");
690 		return 0;
691 	}
692 
693 	DPU_ATRACE_BEGIN("frame done completion wait");
694 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
695 			msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
696 	if (!ret) {
697 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
698 		rc = -ETIMEDOUT;
699 	}
700 	DPU_ATRACE_END("frame done completion wait");
701 
702 	return rc;
703 }
704 
705 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
706 {
707 	struct drm_encoder *encoder;
708 	struct drm_device *dev = crtc->dev;
709 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
710 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
711 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
712 	int ret;
713 
714 	/*
715 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
716 	 * it means we are trying to start a CRTC whose state is disabled:
717 	 * nothing else needs to be done.
718 	 */
719 	if (unlikely(!cstate->num_mixers))
720 		return;
721 
722 	DPU_ATRACE_BEGIN("crtc_commit");
723 
724 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
725 		struct dpu_encoder_kickoff_params params = { 0 };
726 
727 		if (encoder->crtc != crtc)
728 			continue;
729 
730 		/*
731 		 * Encoder will flush/start now, unless it has a tx pending.
732 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
733 		 */
734 		dpu_encoder_prepare_for_kickoff(encoder, &params);
735 	}
736 
737 	/* wait for frame_event_done completion */
738 	DPU_ATRACE_BEGIN("wait_for_frame_done_event");
739 	ret = _dpu_crtc_wait_for_frame_done(crtc);
740 	DPU_ATRACE_END("wait_for_frame_done_event");
741 	if (ret) {
742 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
743 				crtc->base.id,
744 				atomic_read(&dpu_crtc->frame_pending));
745 		goto end;
746 	}
747 
748 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
749 		/* acquire bandwidth and other resources */
750 		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
751 	} else
752 		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
753 
754 	dpu_crtc->play_count++;
755 
756 	dpu_vbif_clear_errors(dpu_kms);
757 
758 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
759 		if (encoder->crtc != crtc)
760 			continue;
761 
762 		dpu_encoder_kickoff(encoder);
763 	}
764 
765 end:
766 	reinit_completion(&dpu_crtc->frame_done_comp);
767 	DPU_ATRACE_END("crtc_commit");
768 }
769 
770 /**
771  * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
772  * @dpu_crtc: Pointer to dpu crtc structure
773  * @enable: Whether to enable/disable vblanks
774  */
775 static void _dpu_crtc_vblank_enable_no_lock(
776 		struct dpu_crtc *dpu_crtc, bool enable)
777 {
778 	struct drm_crtc *crtc = &dpu_crtc->base;
779 	struct drm_device *dev = crtc->dev;
780 	struct drm_encoder *enc;
781 
782 	if (enable) {
783 		/* drop lock since power crtc cb may try to re-acquire lock */
784 		mutex_unlock(&dpu_crtc->crtc_lock);
785 		pm_runtime_get_sync(dev->dev);
786 		mutex_lock(&dpu_crtc->crtc_lock);
787 
788 		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
789 			if (enc->crtc != crtc)
790 				continue;
791 
792 			trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
793 						     DRMID(enc), enable,
794 						     dpu_crtc);
795 
796 			dpu_encoder_register_vblank_callback(enc,
797 					dpu_crtc_vblank_cb, (void *)crtc);
798 		}
799 	} else {
800 		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
801 			if (enc->crtc != crtc)
802 				continue;
803 
804 			trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
805 						     DRMID(enc), enable,
806 						     dpu_crtc);
807 
808 			dpu_encoder_register_vblank_callback(enc, NULL, NULL);
809 		}
810 
811 		/* drop lock since power crtc cb may try to re-acquire lock */
812 		mutex_unlock(&dpu_crtc->crtc_lock);
813 		pm_runtime_put_sync(dev->dev);
814 		mutex_lock(&dpu_crtc->crtc_lock);
815 	}
816 }
817 
818 /**
819  * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
820  * @crtc: Pointer to drm crtc object
821  * @enable: true to enable suspend, false to indicate resume
822  */
823 static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
824 {
825 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
826 
827 	DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
828 
829 	mutex_lock(&dpu_crtc->crtc_lock);
830 
831 	/*
832 	 * If the vblank is enabled, release a power reference on suspend
833 	 * and take it back during resume (if it is still enabled).
834 	 */
835 	trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
836 	if (dpu_crtc->suspend == enable)
837 		DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
838 				crtc->base.id, enable);
839 	else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
840 		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
841 	}
842 
843 	dpu_crtc->suspend = enable;
844 	mutex_unlock(&dpu_crtc->crtc_lock);
845 }
846 
847 /**
848  * dpu_crtc_duplicate_state - state duplicate hook
849  * @crtc: Pointer to drm crtc structure
850  * @Returns: Pointer to new drm_crtc_state structure
851  */
852 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
853 {
854 	struct dpu_crtc *dpu_crtc;
855 	struct dpu_crtc_state *cstate, *old_cstate;
856 
857 	if (!crtc || !crtc->state) {
858 		DPU_ERROR("invalid argument(s)\n");
859 		return NULL;
860 	}
861 
862 	dpu_crtc = to_dpu_crtc(crtc);
863 	old_cstate = to_dpu_crtc_state(crtc->state);
864 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
865 	if (!cstate) {
866 		DPU_ERROR("failed to allocate state\n");
867 		return NULL;
868 	}
869 
870 	/* duplicate base helper */
871 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
872 
873 	return &cstate->base;
874 }
875 
876 /**
877  * dpu_crtc_reset - reset hook for CRTCs
878  * Resets the atomic state for @crtc by freeing the state pointer (which might
879  * be NULL, e.g. at driver load time) and allocating a new empty state object.
880  * @crtc: Pointer to drm crtc structure
881  */
882 static void dpu_crtc_reset(struct drm_crtc *crtc)
883 {
884 	struct dpu_crtc *dpu_crtc;
885 	struct dpu_crtc_state *cstate;
886 
887 	if (!crtc) {
888 		DPU_ERROR("invalid crtc\n");
889 		return;
890 	}
891 
892 	/* revert suspend actions, if necessary */
893 	if (dpu_kms_is_suspend_state(crtc->dev))
894 		_dpu_crtc_set_suspend(crtc, false);
895 
896 	/* remove previous state, if present */
897 	if (crtc->state) {
898 		dpu_crtc_destroy_state(crtc, crtc->state);
899 		crtc->state = 0;
900 	}
901 
902 	dpu_crtc = to_dpu_crtc(crtc);
903 	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
904 	if (!cstate) {
905 		DPU_ERROR("failed to allocate state\n");
906 		return;
907 	}
908 
909 	cstate->base.crtc = crtc;
910 	crtc->state = &cstate->base;
911 }
912 
913 static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
914 {
915 	struct drm_crtc *crtc = arg;
916 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
917 	struct drm_encoder *encoder;
918 
919 	mutex_lock(&dpu_crtc->crtc_lock);
920 
921 	trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
922 
923 	/* restore encoder; crtc will be programmed during commit */
924 	drm_for_each_encoder(encoder, crtc->dev) {
925 		if (encoder->crtc != crtc)
926 			continue;
927 
928 		dpu_encoder_virt_restore(encoder);
929 	}
930 
931 	mutex_unlock(&dpu_crtc->crtc_lock);
932 }
933 
934 static void dpu_crtc_disable(struct drm_crtc *crtc)
935 {
936 	struct dpu_crtc *dpu_crtc;
937 	struct dpu_crtc_state *cstate;
938 	struct drm_display_mode *mode;
939 	struct drm_encoder *encoder;
940 	struct msm_drm_private *priv;
941 	unsigned long flags;
942 
943 	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
944 		DPU_ERROR("invalid crtc\n");
945 		return;
946 	}
947 	dpu_crtc = to_dpu_crtc(crtc);
948 	cstate = to_dpu_crtc_state(crtc->state);
949 	mode = &cstate->base.adjusted_mode;
950 	priv = crtc->dev->dev_private;
951 
952 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
953 
954 	if (dpu_kms_is_suspend_state(crtc->dev))
955 		_dpu_crtc_set_suspend(crtc, true);
956 
957 	/* Disable/save vblank irq handling */
958 	drm_crtc_vblank_off(crtc);
959 
960 	mutex_lock(&dpu_crtc->crtc_lock);
961 
962 	/* wait for frame_event_done completion */
963 	if (_dpu_crtc_wait_for_frame_done(crtc))
964 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
965 				crtc->base.id,
966 				atomic_read(&dpu_crtc->frame_pending));
967 
968 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
969 	if (dpu_crtc->enabled && !dpu_crtc->suspend &&
970 			dpu_crtc->vblank_requested) {
971 		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
972 	}
973 	dpu_crtc->enabled = false;
974 
975 	if (atomic_read(&dpu_crtc->frame_pending)) {
976 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
977 				     atomic_read(&dpu_crtc->frame_pending));
978 		dpu_core_perf_crtc_release_bw(crtc);
979 		atomic_set(&dpu_crtc->frame_pending, 0);
980 	}
981 
982 	dpu_core_perf_crtc_update(crtc, 0, true);
983 
984 	drm_for_each_encoder(encoder, crtc->dev) {
985 		if (encoder->crtc != crtc)
986 			continue;
987 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
988 	}
989 
990 	if (dpu_crtc->power_event)
991 		dpu_power_handle_unregister_event(dpu_crtc->phandle,
992 				dpu_crtc->power_event);
993 
994 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
995 	cstate->num_mixers = 0;
996 
997 	/* disable clk & bw control until clk & bw properties are set */
998 	cstate->bw_control = false;
999 	cstate->bw_split_vote = false;
1000 
1001 	mutex_unlock(&dpu_crtc->crtc_lock);
1002 
1003 	if (crtc->state->event && !crtc->state->active) {
1004 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
1005 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
1006 		crtc->state->event = NULL;
1007 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1008 	}
1009 }
1010 
1011 static void dpu_crtc_enable(struct drm_crtc *crtc,
1012 		struct drm_crtc_state *old_crtc_state)
1013 {
1014 	struct dpu_crtc *dpu_crtc;
1015 	struct drm_encoder *encoder;
1016 	struct msm_drm_private *priv;
1017 
1018 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
1019 		DPU_ERROR("invalid crtc\n");
1020 		return;
1021 	}
1022 	priv = crtc->dev->dev_private;
1023 
1024 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1025 	dpu_crtc = to_dpu_crtc(crtc);
1026 
1027 	drm_for_each_encoder(encoder, crtc->dev) {
1028 		if (encoder->crtc != crtc)
1029 			continue;
1030 		dpu_encoder_register_frame_event_callback(encoder,
1031 				dpu_crtc_frame_event_cb, (void *)crtc);
1032 	}
1033 
1034 	mutex_lock(&dpu_crtc->crtc_lock);
1035 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
1036 	if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
1037 			dpu_crtc->vblank_requested) {
1038 		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
1039 	}
1040 	dpu_crtc->enabled = true;
1041 
1042 	mutex_unlock(&dpu_crtc->crtc_lock);
1043 
1044 	/* Enable/restore vblank irq handling */
1045 	drm_crtc_vblank_on(crtc);
1046 
1047 	dpu_crtc->power_event = dpu_power_handle_register_event(
1048 		dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
1049 		dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
1050 
1051 }
1052 
1053 struct plane_state {
1054 	struct dpu_plane_state *dpu_pstate;
1055 	const struct drm_plane_state *drm_pstate;
1056 	int stage;
1057 	u32 pipe_id;
1058 };
1059 
1060 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
1061 		struct drm_crtc_state *state)
1062 {
1063 	struct dpu_crtc *dpu_crtc;
1064 	struct plane_state *pstates;
1065 	struct dpu_crtc_state *cstate;
1066 
1067 	const struct drm_plane_state *pstate;
1068 	struct drm_plane *plane;
1069 	struct drm_display_mode *mode;
1070 
1071 	int cnt = 0, rc = 0, mixer_width, i, z_pos;
1072 
1073 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
1074 	int multirect_count = 0;
1075 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
1076 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
1077 	struct drm_rect crtc_rect = { 0 };
1078 
1079 	if (!crtc) {
1080 		DPU_ERROR("invalid crtc\n");
1081 		return -EINVAL;
1082 	}
1083 
1084 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
1085 
1086 	dpu_crtc = to_dpu_crtc(crtc);
1087 	cstate = to_dpu_crtc_state(state);
1088 
1089 	if (!state->enable || !state->active) {
1090 		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
1091 				crtc->base.id, state->enable, state->active);
1092 		goto end;
1093 	}
1094 
1095 	mode = &state->adjusted_mode;
1096 	DPU_DEBUG("%s: check", dpu_crtc->name);
1097 
1098 	/* force a full mode set if active state changed */
1099 	if (state->active_changed)
1100 		state->mode_changed = true;
1101 
1102 	memset(pipe_staged, 0, sizeof(pipe_staged));
1103 
1104 	mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
1105 
1106 	_dpu_crtc_setup_lm_bounds(crtc, state);
1107 
1108 	crtc_rect.x2 = mode->hdisplay;
1109 	crtc_rect.y2 = mode->vdisplay;
1110 
1111 	 /* get plane state for all drm planes associated with crtc state */
1112 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
1113 		struct drm_rect dst, clip = crtc_rect;
1114 
1115 		if (IS_ERR_OR_NULL(pstate)) {
1116 			rc = PTR_ERR(pstate);
1117 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
1118 					dpu_crtc->name, plane->base.id, rc);
1119 			goto end;
1120 		}
1121 		if (cnt >= DPU_STAGE_MAX * 4)
1122 			continue;
1123 
1124 		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
1125 		pstates[cnt].drm_pstate = pstate;
1126 		pstates[cnt].stage = pstate->normalized_zpos;
1127 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
1128 
1129 		if (pipe_staged[pstates[cnt].pipe_id]) {
1130 			multirect_plane[multirect_count].r0 =
1131 				pipe_staged[pstates[cnt].pipe_id];
1132 			multirect_plane[multirect_count].r1 = pstate;
1133 			multirect_count++;
1134 
1135 			pipe_staged[pstates[cnt].pipe_id] = NULL;
1136 		} else {
1137 			pipe_staged[pstates[cnt].pipe_id] = pstate;
1138 		}
1139 
1140 		cnt++;
1141 
1142 		dst = drm_plane_state_dest(pstate);
1143 		if (!drm_rect_intersect(&clip, &dst)) {
1144 			DPU_ERROR("invalid vertical/horizontal destination\n");
1145 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
1146 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
1147 				  DRM_RECT_ARG(&dst));
1148 			rc = -E2BIG;
1149 			goto end;
1150 		}
1151 	}
1152 
1153 	for (i = 1; i < SSPP_MAX; i++) {
1154 		if (pipe_staged[i]) {
1155 			dpu_plane_clear_multirect(pipe_staged[i]);
1156 
1157 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1158 				DPU_ERROR(
1159 					"r1 only virt plane:%d not supported\n",
1160 					pipe_staged[i]->plane->base.id);
1161 				rc  = -EINVAL;
1162 				goto end;
1163 			}
1164 		}
1165 	}
1166 
1167 	z_pos = -1;
1168 	for (i = 0; i < cnt; i++) {
1169 		/* reset counts at every new blend stage */
1170 		if (pstates[i].stage != z_pos) {
1171 			left_zpos_cnt = 0;
1172 			right_zpos_cnt = 0;
1173 			z_pos = pstates[i].stage;
1174 		}
1175 
1176 		/* verify z_pos setting before using it */
1177 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1178 			DPU_ERROR("> %d plane stages assigned\n",
1179 					DPU_STAGE_MAX - DPU_STAGE_0);
1180 			rc = -EINVAL;
1181 			goto end;
1182 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1183 			if (left_zpos_cnt == 2) {
1184 				DPU_ERROR("> 2 planes @ stage %d on left\n",
1185 					z_pos);
1186 				rc = -EINVAL;
1187 				goto end;
1188 			}
1189 			left_zpos_cnt++;
1190 
1191 		} else {
1192 			if (right_zpos_cnt == 2) {
1193 				DPU_ERROR("> 2 planes @ stage %d on right\n",
1194 					z_pos);
1195 				rc = -EINVAL;
1196 				goto end;
1197 			}
1198 			right_zpos_cnt++;
1199 		}
1200 
1201 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1202 		DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1203 	}
1204 
1205 	for (i = 0; i < multirect_count; i++) {
1206 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1207 			DPU_ERROR(
1208 			"multirect validation failed for planes (%d - %d)\n",
1209 					multirect_plane[i].r0->plane->base.id,
1210 					multirect_plane[i].r1->plane->base.id);
1211 			rc = -EINVAL;
1212 			goto end;
1213 		}
1214 	}
1215 
1216 	rc = dpu_core_perf_crtc_check(crtc, state);
1217 	if (rc) {
1218 		DPU_ERROR("crtc%d failed performance check %d\n",
1219 				crtc->base.id, rc);
1220 		goto end;
1221 	}
1222 
1223 	/* validate source split:
1224 	 * use pstates sorted by stage to check planes on same stage
1225 	 * we assume that all pipes are in source split so its valid to compare
1226 	 * without taking into account left/right mixer placement
1227 	 */
1228 	for (i = 1; i < cnt; i++) {
1229 		struct plane_state *prv_pstate, *cur_pstate;
1230 		struct drm_rect left_rect, right_rect;
1231 		int32_t left_pid, right_pid;
1232 		int32_t stage;
1233 
1234 		prv_pstate = &pstates[i - 1];
1235 		cur_pstate = &pstates[i];
1236 		if (prv_pstate->stage != cur_pstate->stage)
1237 			continue;
1238 
1239 		stage = cur_pstate->stage;
1240 
1241 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1242 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1243 
1244 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1245 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1246 
1247 		if (right_rect.x1 < left_rect.x1) {
1248 			swap(left_pid, right_pid);
1249 			swap(left_rect, right_rect);
1250 		}
1251 
1252 		/**
1253 		 * - planes are enumerated in pipe-priority order such that
1254 		 *   planes with lower drm_id must be left-most in a shared
1255 		 *   blend-stage when using source split.
1256 		 * - planes in source split must be contiguous in width
1257 		 * - planes in source split must have same dest yoff and height
1258 		 */
1259 		if (right_pid < left_pid) {
1260 			DPU_ERROR(
1261 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1262 				stage, left_pid, right_pid);
1263 			rc = -EINVAL;
1264 			goto end;
1265 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1266 			DPU_ERROR("non-contiguous coordinates for src split. "
1267 				  "stage: %d left: " DRM_RECT_FMT " right: "
1268 				  DRM_RECT_FMT "\n", stage,
1269 				  DRM_RECT_ARG(&left_rect),
1270 				  DRM_RECT_ARG(&right_rect));
1271 			rc = -EINVAL;
1272 			goto end;
1273 		} else if (left_rect.y1 != right_rect.y1 ||
1274 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1275 			DPU_ERROR("source split at stage: %d. invalid "
1276 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1277 				  DRM_RECT_FMT "\n", stage,
1278 				  DRM_RECT_ARG(&left_rect),
1279 				  DRM_RECT_ARG(&right_rect));
1280 			rc = -EINVAL;
1281 			goto end;
1282 		}
1283 	}
1284 
1285 end:
1286 	kfree(pstates);
1287 	return rc;
1288 }
1289 
1290 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1291 {
1292 	struct dpu_crtc *dpu_crtc;
1293 
1294 	if (!crtc) {
1295 		DPU_ERROR("invalid crtc\n");
1296 		return -EINVAL;
1297 	}
1298 	dpu_crtc = to_dpu_crtc(crtc);
1299 
1300 	mutex_lock(&dpu_crtc->crtc_lock);
1301 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1302 	if (dpu_crtc->enabled && !dpu_crtc->suspend) {
1303 		_dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
1304 	}
1305 	dpu_crtc->vblank_requested = en;
1306 	mutex_unlock(&dpu_crtc->crtc_lock);
1307 
1308 	return 0;
1309 }
1310 
1311 #ifdef CONFIG_DEBUG_FS
1312 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1313 {
1314 	struct dpu_crtc *dpu_crtc;
1315 	struct dpu_plane_state *pstate = NULL;
1316 	struct dpu_crtc_mixer *m;
1317 
1318 	struct drm_crtc *crtc;
1319 	struct drm_plane *plane;
1320 	struct drm_display_mode *mode;
1321 	struct drm_framebuffer *fb;
1322 	struct drm_plane_state *state;
1323 	struct dpu_crtc_state *cstate;
1324 
1325 	int i, out_width;
1326 
1327 	if (!s || !s->private)
1328 		return -EINVAL;
1329 
1330 	dpu_crtc = s->private;
1331 	crtc = &dpu_crtc->base;
1332 
1333 	drm_modeset_lock_all(crtc->dev);
1334 	cstate = to_dpu_crtc_state(crtc->state);
1335 
1336 	mutex_lock(&dpu_crtc->crtc_lock);
1337 	mode = &crtc->state->adjusted_mode;
1338 	out_width = _dpu_crtc_get_mixer_width(cstate, mode);
1339 
1340 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1341 				mode->hdisplay, mode->vdisplay);
1342 
1343 	seq_puts(s, "\n");
1344 
1345 	for (i = 0; i < cstate->num_mixers; ++i) {
1346 		m = &cstate->mixers[i];
1347 		if (!m->hw_lm)
1348 			seq_printf(s, "\tmixer[%d] has no lm\n", i);
1349 		else if (!m->lm_ctl)
1350 			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1351 		else
1352 			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1353 				m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1354 				out_width, mode->vdisplay);
1355 	}
1356 
1357 	seq_puts(s, "\n");
1358 
1359 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1360 		pstate = to_dpu_plane_state(plane->state);
1361 		state = plane->state;
1362 
1363 		if (!pstate || !state)
1364 			continue;
1365 
1366 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1367 			pstate->stage);
1368 
1369 		if (plane->state->fb) {
1370 			fb = plane->state->fb;
1371 
1372 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1373 				fb->base.id, (char *) &fb->format->format,
1374 				fb->width, fb->height);
1375 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1376 				seq_printf(s, "cpp[%d]:%u ",
1377 						i, fb->format->cpp[i]);
1378 			seq_puts(s, "\n\t");
1379 
1380 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1381 			seq_puts(s, "\n");
1382 
1383 			seq_puts(s, "\t");
1384 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1385 				seq_printf(s, "pitches[%d]:%8u ", i,
1386 							fb->pitches[i]);
1387 			seq_puts(s, "\n");
1388 
1389 			seq_puts(s, "\t");
1390 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1391 				seq_printf(s, "offsets[%d]:%8u ", i,
1392 							fb->offsets[i]);
1393 			seq_puts(s, "\n");
1394 		}
1395 
1396 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1397 			state->src_x, state->src_y, state->src_w, state->src_h);
1398 
1399 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1400 			state->crtc_x, state->crtc_y, state->crtc_w,
1401 			state->crtc_h);
1402 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1403 			pstate->multirect_mode, pstate->multirect_index);
1404 
1405 		seq_puts(s, "\n");
1406 	}
1407 	if (dpu_crtc->vblank_cb_count) {
1408 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1409 		s64 diff_ms = ktime_to_ms(diff);
1410 		s64 fps = diff_ms ? div_s64(
1411 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1412 
1413 		seq_printf(s,
1414 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1415 				fps, dpu_crtc->vblank_cb_count,
1416 				ktime_to_ms(diff), dpu_crtc->play_count);
1417 
1418 		/* reset time & count for next measurement */
1419 		dpu_crtc->vblank_cb_count = 0;
1420 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1421 	}
1422 
1423 	seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
1424 
1425 	mutex_unlock(&dpu_crtc->crtc_lock);
1426 	drm_modeset_unlock_all(crtc->dev);
1427 
1428 	return 0;
1429 }
1430 
1431 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1432 {
1433 	return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1434 }
1435 
1436 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1437 static int __prefix ## _open(struct inode *inode, struct file *file)	\
1438 {									\
1439 	return single_open(file, __prefix ## _show, inode->i_private);	\
1440 }									\
1441 static const struct file_operations __prefix ## _fops = {		\
1442 	.owner = THIS_MODULE,						\
1443 	.open = __prefix ## _open,					\
1444 	.release = single_release,					\
1445 	.read = seq_read,						\
1446 	.llseek = seq_lseek,						\
1447 }
1448 
1449 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1450 {
1451 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1452 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1453 	int i;
1454 
1455 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1456 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1457 	seq_printf(s, "core_clk_rate: %llu\n",
1458 			dpu_crtc->cur_perf.core_clk_rate);
1459 	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
1460 			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
1461 		seq_printf(s, "bw_ctl[%s]: %llu\n",
1462 				dpu_power_handle_get_dbus_name(i),
1463 				dpu_crtc->cur_perf.bw_ctl[i]);
1464 		seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
1465 				dpu_power_handle_get_dbus_name(i),
1466 				dpu_crtc->cur_perf.max_per_pipe_ib[i]);
1467 	}
1468 
1469 	return 0;
1470 }
1471 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1472 
1473 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1474 {
1475 	struct dpu_crtc *dpu_crtc;
1476 	struct dpu_kms *dpu_kms;
1477 
1478 	static const struct file_operations debugfs_status_fops = {
1479 		.open =		_dpu_debugfs_status_open,
1480 		.read =		seq_read,
1481 		.llseek =	seq_lseek,
1482 		.release =	single_release,
1483 	};
1484 
1485 	if (!crtc)
1486 		return -EINVAL;
1487 	dpu_crtc = to_dpu_crtc(crtc);
1488 
1489 	dpu_kms = _dpu_crtc_get_kms(crtc);
1490 
1491 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1492 			crtc->dev->primary->debugfs_root);
1493 	if (!dpu_crtc->debugfs_root)
1494 		return -ENOMEM;
1495 
1496 	/* don't error check these */
1497 	debugfs_create_file("status", 0400,
1498 			dpu_crtc->debugfs_root,
1499 			dpu_crtc, &debugfs_status_fops);
1500 	debugfs_create_file("state", 0600,
1501 			dpu_crtc->debugfs_root,
1502 			&dpu_crtc->base,
1503 			&dpu_crtc_debugfs_state_fops);
1504 
1505 	return 0;
1506 }
1507 
1508 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1509 {
1510 	struct dpu_crtc *dpu_crtc;
1511 
1512 	if (!crtc)
1513 		return;
1514 	dpu_crtc = to_dpu_crtc(crtc);
1515 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1516 }
1517 #else
1518 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1519 {
1520 	return 0;
1521 }
1522 
1523 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1524 {
1525 }
1526 #endif /* CONFIG_DEBUG_FS */
1527 
1528 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1529 {
1530 	return _dpu_crtc_init_debugfs(crtc);
1531 }
1532 
1533 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1534 {
1535 	_dpu_crtc_destroy_debugfs(crtc);
1536 }
1537 
1538 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1539 	.set_config = drm_atomic_helper_set_config,
1540 	.destroy = dpu_crtc_destroy,
1541 	.page_flip = drm_atomic_helper_page_flip,
1542 	.reset = dpu_crtc_reset,
1543 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1544 	.atomic_destroy_state = dpu_crtc_destroy_state,
1545 	.late_register = dpu_crtc_late_register,
1546 	.early_unregister = dpu_crtc_early_unregister,
1547 };
1548 
1549 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1550 	.disable = dpu_crtc_disable,
1551 	.atomic_enable = dpu_crtc_enable,
1552 	.atomic_check = dpu_crtc_atomic_check,
1553 	.atomic_begin = dpu_crtc_atomic_begin,
1554 	.atomic_flush = dpu_crtc_atomic_flush,
1555 };
1556 
1557 /* initialize crtc */
1558 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1559 				struct drm_plane *cursor)
1560 {
1561 	struct drm_crtc *crtc = NULL;
1562 	struct dpu_crtc *dpu_crtc = NULL;
1563 	struct msm_drm_private *priv = NULL;
1564 	struct dpu_kms *kms = NULL;
1565 	int i;
1566 
1567 	priv = dev->dev_private;
1568 	kms = to_dpu_kms(priv->kms);
1569 
1570 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1571 	if (!dpu_crtc)
1572 		return ERR_PTR(-ENOMEM);
1573 
1574 	crtc = &dpu_crtc->base;
1575 	crtc->dev = dev;
1576 
1577 	mutex_init(&dpu_crtc->crtc_lock);
1578 	spin_lock_init(&dpu_crtc->spin_lock);
1579 	atomic_set(&dpu_crtc->frame_pending, 0);
1580 
1581 	init_completion(&dpu_crtc->frame_done_comp);
1582 
1583 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1584 
1585 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1586 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1587 		list_add(&dpu_crtc->frame_events[i].list,
1588 				&dpu_crtc->frame_event_list);
1589 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1590 				dpu_crtc_frame_event_work);
1591 	}
1592 
1593 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1594 				NULL);
1595 
1596 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1597 
1598 	/* save user friendly CRTC name for later */
1599 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1600 
1601 	/* initialize event handling */
1602 	spin_lock_init(&dpu_crtc->event_lock);
1603 
1604 	dpu_crtc->phandle = &kms->phandle;
1605 
1606 	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1607 	return crtc;
1608 }
1609