1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
20 #include <linux/sort.h>
21 #include <linux/debugfs.h>
22 #include <linux/ktime.h>
23 #include <drm/drm_crtc.h>
24 #include <drm/drm_flip_work.h>
25 #include <drm/drm_mode.h>
26 #include <drm/drm_probe_helper.h>
27 #include <drm/drm_rect.h>
28 
29 #include "dpu_kms.h"
30 #include "dpu_hw_lm.h"
31 #include "dpu_hw_ctl.h"
32 #include "dpu_crtc.h"
33 #include "dpu_plane.h"
34 #include "dpu_encoder.h"
35 #include "dpu_vbif.h"
36 #include "dpu_core_perf.h"
37 #include "dpu_trace.h"
38 
39 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
40 #define DPU_DRM_BLEND_OP_OPAQUE         1
41 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
42 #define DPU_DRM_BLEND_OP_COVERAGE       3
43 #define DPU_DRM_BLEND_OP_MAX            4
44 
45 /* layer mixer index on dpu_crtc */
46 #define LEFT_MIXER 0
47 #define RIGHT_MIXER 1
48 
49 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
50 {
51 	struct msm_drm_private *priv = crtc->dev->dev_private;
52 
53 	return to_dpu_kms(priv->kms);
54 }
55 
56 static void dpu_crtc_destroy(struct drm_crtc *crtc)
57 {
58 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59 
60 	DPU_DEBUG("\n");
61 
62 	if (!crtc)
63 		return;
64 
65 	drm_crtc_cleanup(crtc);
66 	kfree(dpu_crtc);
67 }
68 
69 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
70 		struct dpu_plane_state *pstate, struct dpu_format *format)
71 {
72 	struct dpu_hw_mixer *lm = mixer->hw_lm;
73 	uint32_t blend_op;
74 	struct drm_format_name_buf format_name;
75 
76 	/* default to opaque blending */
77 	blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
78 		DPU_BLEND_BG_ALPHA_BG_CONST;
79 
80 	if (format->alpha_enable) {
81 		/* coverage blending */
82 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
83 			DPU_BLEND_BG_ALPHA_FG_PIXEL |
84 			DPU_BLEND_BG_INV_ALPHA;
85 	}
86 
87 	lm->ops.setup_blend_config(lm, pstate->stage,
88 				0xFF, 0, blend_op);
89 
90 	DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
91 		drm_get_format_name(format->base.pixel_format, &format_name),
92 		format->alpha_enable, blend_op);
93 }
94 
95 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
96 {
97 	struct dpu_crtc *dpu_crtc;
98 	struct dpu_crtc_state *crtc_state;
99 	int lm_idx, lm_horiz_position;
100 
101 	dpu_crtc = to_dpu_crtc(crtc);
102 	crtc_state = to_dpu_crtc_state(crtc->state);
103 
104 	lm_horiz_position = 0;
105 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
106 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
107 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
108 		struct dpu_hw_mixer_cfg cfg;
109 
110 		if (!lm_roi || !drm_rect_visible(lm_roi))
111 			continue;
112 
113 		cfg.out_width = drm_rect_width(lm_roi);
114 		cfg.out_height = drm_rect_height(lm_roi);
115 		cfg.right_mixer = lm_horiz_position++;
116 		cfg.flags = 0;
117 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
118 	}
119 }
120 
121 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
122 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
123 {
124 	struct drm_plane *plane;
125 	struct drm_framebuffer *fb;
126 	struct drm_plane_state *state;
127 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
128 	struct dpu_plane_state *pstate = NULL;
129 	struct dpu_format *format;
130 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
131 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
132 
133 	u32 flush_mask;
134 	uint32_t stage_idx, lm_idx;
135 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
136 	bool bg_alpha_enable = false;
137 
138 	drm_atomic_crtc_for_each_plane(plane, crtc) {
139 		state = plane->state;
140 		if (!state)
141 			continue;
142 
143 		pstate = to_dpu_plane_state(state);
144 		fb = state->fb;
145 
146 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
147 
148 		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
149 				crtc->base.id,
150 				pstate->stage,
151 				plane->base.id,
152 				dpu_plane_pipe(plane) - SSPP_VIG0,
153 				state->fb ? state->fb->base.id : -1);
154 
155 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
156 
157 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
158 			bg_alpha_enable = true;
159 
160 		stage_idx = zpos_cnt[pstate->stage]++;
161 		stage_cfg->stage[pstate->stage][stage_idx] =
162 					dpu_plane_pipe(plane);
163 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
164 					pstate->multirect_index;
165 
166 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
167 					   state, pstate, stage_idx,
168 					   dpu_plane_pipe(plane) - SSPP_VIG0,
169 					   format->base.pixel_format,
170 					   fb ? fb->modifier : 0);
171 
172 		/* blend config update */
173 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
174 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
175 						pstate, format);
176 
177 			mixer[lm_idx].flush_mask |= flush_mask;
178 
179 			if (bg_alpha_enable && !format->alpha_enable)
180 				mixer[lm_idx].mixer_op_mode = 0;
181 			else
182 				mixer[lm_idx].mixer_op_mode |=
183 						1 << pstate->stage;
184 		}
185 	}
186 
187 	 _dpu_crtc_program_lm_output_roi(crtc);
188 }
189 
190 /**
191  * _dpu_crtc_blend_setup - configure crtc mixers
192  * @crtc: Pointer to drm crtc structure
193  */
194 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
195 {
196 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
197 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
198 	struct dpu_crtc_mixer *mixer = cstate->mixers;
199 	struct dpu_hw_ctl *ctl;
200 	struct dpu_hw_mixer *lm;
201 	int i;
202 
203 	DPU_DEBUG("%s\n", dpu_crtc->name);
204 
205 	for (i = 0; i < cstate->num_mixers; i++) {
206 		if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
207 			DPU_ERROR("invalid lm or ctl assigned to mixer\n");
208 			return;
209 		}
210 		mixer[i].mixer_op_mode = 0;
211 		mixer[i].flush_mask = 0;
212 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
213 			mixer[i].lm_ctl->ops.clear_all_blendstages(
214 					mixer[i].lm_ctl);
215 	}
216 
217 	/* initialize stage cfg */
218 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
219 
220 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
221 
222 	for (i = 0; i < cstate->num_mixers; i++) {
223 		ctl = mixer[i].lm_ctl;
224 		lm = mixer[i].hw_lm;
225 
226 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
227 
228 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
229 			mixer[i].hw_lm->idx);
230 
231 		/* stage config flush mask */
232 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
233 
234 		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
235 			mixer[i].hw_lm->idx - LM_0,
236 			mixer[i].mixer_op_mode,
237 			ctl->idx - CTL_0,
238 			mixer[i].flush_mask);
239 
240 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
241 			&dpu_crtc->stage_cfg);
242 	}
243 }
244 
245 /**
246  *  _dpu_crtc_complete_flip - signal pending page_flip events
247  * Any pending vblank events are added to the vblank_event_list
248  * so that the next vblank interrupt shall signal them.
249  * However PAGE_FLIP events are not handled through the vblank_event_list.
250  * This API signals any pending PAGE_FLIP events requested through
251  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
252  * @crtc: Pointer to drm crtc structure
253  */
254 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
255 {
256 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
257 	struct drm_device *dev = crtc->dev;
258 	unsigned long flags;
259 
260 	spin_lock_irqsave(&dev->event_lock, flags);
261 	if (dpu_crtc->event) {
262 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
263 			      dpu_crtc->event);
264 		trace_dpu_crtc_complete_flip(DRMID(crtc));
265 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
266 		dpu_crtc->event = NULL;
267 	}
268 	spin_unlock_irqrestore(&dev->event_lock, flags);
269 }
270 
271 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
272 {
273 	struct drm_encoder *encoder;
274 
275 	if (!crtc || !crtc->dev) {
276 		DPU_ERROR("invalid crtc\n");
277 		return INTF_MODE_NONE;
278 	}
279 
280 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
281 
282 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
283 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
284 		return dpu_encoder_get_intf_mode(encoder);
285 
286 	return INTF_MODE_NONE;
287 }
288 
289 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
290 {
291 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
292 
293 	/* keep statistics on vblank callback - with auto reset via debugfs */
294 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
295 		dpu_crtc->vblank_cb_time = ktime_get();
296 	else
297 		dpu_crtc->vblank_cb_count++;
298 	_dpu_crtc_complete_flip(crtc);
299 	drm_crtc_handle_vblank(crtc);
300 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
301 }
302 
303 static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
304 {
305 	int ret = 0;
306 	struct drm_modeset_acquire_ctx ctx;
307 
308 	DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
309 	dpu_core_perf_crtc_release_bw(crtc);
310 	DRM_MODESET_LOCK_ALL_END(ctx, ret);
311 	if (ret)
312 		DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
313 			  ret);
314 }
315 
316 static void dpu_crtc_frame_event_work(struct kthread_work *work)
317 {
318 	struct dpu_crtc_frame_event *fevent = container_of(work,
319 			struct dpu_crtc_frame_event, work);
320 	struct drm_crtc *crtc = fevent->crtc;
321 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
322 	unsigned long flags;
323 	bool frame_done = false;
324 
325 	DPU_ATRACE_BEGIN("crtc_frame_event");
326 
327 	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
328 			ktime_to_ns(fevent->ts));
329 
330 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
331 				| DPU_ENCODER_FRAME_EVENT_ERROR
332 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
333 
334 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
335 			/* this should not happen */
336 			DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
337 					crtc->base.id,
338 					fevent->event,
339 					ktime_to_ns(fevent->ts),
340 					atomic_read(&dpu_crtc->frame_pending));
341 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
342 			/* release bandwidth and other resources */
343 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
344 							fevent->event);
345 			dpu_crtc_release_bw_unlocked(crtc);
346 		} else {
347 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
348 								fevent->event);
349 		}
350 
351 		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
352 			dpu_core_perf_crtc_update(crtc, 0, false);
353 
354 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
355 					| DPU_ENCODER_FRAME_EVENT_ERROR))
356 			frame_done = true;
357 	}
358 
359 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
360 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
361 				crtc->base.id, ktime_to_ns(fevent->ts));
362 
363 	if (frame_done)
364 		complete_all(&dpu_crtc->frame_done_comp);
365 
366 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
367 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
368 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
369 	DPU_ATRACE_END("crtc_frame_event");
370 }
371 
372 /*
373  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
374  * registers this API to encoder for all frame event callbacks like
375  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
376  * from different context - IRQ, user thread, commit_thread, etc. Each event
377  * should be carefully reviewed and should be processed in proper task context
378  * to avoid schedulin delay or properly manage the irq context's bottom half
379  * processing.
380  */
381 static void dpu_crtc_frame_event_cb(void *data, u32 event)
382 {
383 	struct drm_crtc *crtc = (struct drm_crtc *)data;
384 	struct dpu_crtc *dpu_crtc;
385 	struct msm_drm_private *priv;
386 	struct dpu_crtc_frame_event *fevent;
387 	unsigned long flags;
388 	u32 crtc_id;
389 
390 	/* Nothing to do on idle event */
391 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
392 		return;
393 
394 	dpu_crtc = to_dpu_crtc(crtc);
395 	priv = crtc->dev->dev_private;
396 	crtc_id = drm_crtc_index(crtc);
397 
398 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
399 
400 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
401 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
402 			struct dpu_crtc_frame_event, list);
403 	if (fevent)
404 		list_del_init(&fevent->list);
405 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
406 
407 	if (!fevent) {
408 		DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
409 		return;
410 	}
411 
412 	fevent->event = event;
413 	fevent->crtc = crtc;
414 	fevent->ts = ktime_get();
415 	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
416 }
417 
418 void dpu_crtc_complete_commit(struct drm_crtc *crtc,
419 		struct drm_crtc_state *old_state)
420 {
421 	if (!crtc || !crtc->state) {
422 		DPU_ERROR("invalid crtc\n");
423 		return;
424 	}
425 	trace_dpu_crtc_complete_commit(DRMID(crtc));
426 }
427 
428 static void _dpu_crtc_setup_mixer_for_encoder(
429 		struct drm_crtc *crtc,
430 		struct drm_encoder *enc)
431 {
432 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
433 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
434 	struct dpu_rm *rm = &dpu_kms->rm;
435 	struct dpu_crtc_mixer *mixer;
436 	struct dpu_hw_ctl *last_valid_ctl = NULL;
437 	int i;
438 	struct dpu_rm_hw_iter lm_iter, ctl_iter;
439 
440 	dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
441 	dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
442 
443 	/* Set up all the mixers and ctls reserved by this encoder */
444 	for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
445 		mixer = &cstate->mixers[i];
446 
447 		if (!dpu_rm_get_hw(rm, &lm_iter))
448 			break;
449 		mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
450 
451 		/* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
452 		if (!dpu_rm_get_hw(rm, &ctl_iter)) {
453 			DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
454 					mixer->hw_lm->idx - LM_0);
455 			mixer->lm_ctl = last_valid_ctl;
456 		} else {
457 			mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
458 			last_valid_ctl = mixer->lm_ctl;
459 		}
460 
461 		/* Shouldn't happen, mixers are always >= ctls */
462 		if (!mixer->lm_ctl) {
463 			DPU_ERROR("no valid ctls found for lm %d\n",
464 					mixer->hw_lm->idx - LM_0);
465 			return;
466 		}
467 
468 		cstate->num_mixers++;
469 		DPU_DEBUG("setup mixer %d: lm %d\n",
470 				i, mixer->hw_lm->idx - LM_0);
471 		DPU_DEBUG("setup mixer %d: ctl %d\n",
472 				i, mixer->lm_ctl->idx - CTL_0);
473 	}
474 }
475 
476 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
477 {
478 	struct drm_encoder *enc;
479 
480 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
481 
482 	/* Check for mixers on all encoders attached to this crtc */
483 	drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
484 		_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
485 }
486 
487 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
488 		struct drm_crtc_state *state)
489 {
490 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
491 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
492 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
493 	int i;
494 
495 	for (i = 0; i < cstate->num_mixers; i++) {
496 		struct drm_rect *r = &cstate->lm_bounds[i];
497 		r->x1 = crtc_split_width * i;
498 		r->y1 = 0;
499 		r->x2 = r->x1 + crtc_split_width;
500 		r->y2 = adj_mode->vdisplay;
501 
502 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
503 	}
504 
505 	drm_mode_debug_printmodeline(adj_mode);
506 }
507 
508 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
509 		struct drm_crtc_state *old_state)
510 {
511 	struct dpu_crtc *dpu_crtc;
512 	struct dpu_crtc_state *cstate;
513 	struct drm_encoder *encoder;
514 	struct drm_device *dev;
515 	unsigned long flags;
516 	struct dpu_crtc_smmu_state_data *smmu_state;
517 
518 	if (!crtc) {
519 		DPU_ERROR("invalid crtc\n");
520 		return;
521 	}
522 
523 	if (!crtc->state->enable) {
524 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
525 				crtc->base.id, crtc->state->enable);
526 		return;
527 	}
528 
529 	DPU_DEBUG("crtc%d\n", crtc->base.id);
530 
531 	dpu_crtc = to_dpu_crtc(crtc);
532 	cstate = to_dpu_crtc_state(crtc->state);
533 	dev = crtc->dev;
534 	smmu_state = &dpu_crtc->smmu_state;
535 
536 	if (!cstate->num_mixers) {
537 		_dpu_crtc_setup_mixers(crtc);
538 		_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
539 	}
540 
541 	if (dpu_crtc->event) {
542 		WARN_ON(dpu_crtc->event);
543 	} else {
544 		spin_lock_irqsave(&dev->event_lock, flags);
545 		dpu_crtc->event = crtc->state->event;
546 		crtc->state->event = NULL;
547 		spin_unlock_irqrestore(&dev->event_lock, flags);
548 	}
549 
550 	/* encoder will trigger pending mask now */
551 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
552 		dpu_encoder_trigger_kickoff_pending(encoder);
553 
554 	/*
555 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
556 	 * it means we are trying to flush a CRTC whose state is disabled:
557 	 * nothing else needs to be done.
558 	 */
559 	if (unlikely(!cstate->num_mixers))
560 		return;
561 
562 	_dpu_crtc_blend_setup(crtc);
563 
564 	/*
565 	 * PP_DONE irq is only used by command mode for now.
566 	 * It is better to request pending before FLUSH and START trigger
567 	 * to make sure no pp_done irq missed.
568 	 * This is safe because no pp_done will happen before SW trigger
569 	 * in command mode.
570 	 */
571 }
572 
573 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
574 		struct drm_crtc_state *old_crtc_state)
575 {
576 	struct dpu_crtc *dpu_crtc;
577 	struct drm_device *dev;
578 	struct drm_plane *plane;
579 	struct msm_drm_private *priv;
580 	struct msm_drm_thread *event_thread;
581 	unsigned long flags;
582 	struct dpu_crtc_state *cstate;
583 
584 	if (!crtc->state->enable) {
585 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
586 				crtc->base.id, crtc->state->enable);
587 		return;
588 	}
589 
590 	DPU_DEBUG("crtc%d\n", crtc->base.id);
591 
592 	dpu_crtc = to_dpu_crtc(crtc);
593 	cstate = to_dpu_crtc_state(crtc->state);
594 	dev = crtc->dev;
595 	priv = dev->dev_private;
596 
597 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
598 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
599 		return;
600 	}
601 
602 	event_thread = &priv->event_thread[crtc->index];
603 
604 	if (dpu_crtc->event) {
605 		DPU_DEBUG("already received dpu_crtc->event\n");
606 	} else {
607 		spin_lock_irqsave(&dev->event_lock, flags);
608 		dpu_crtc->event = crtc->state->event;
609 		crtc->state->event = NULL;
610 		spin_unlock_irqrestore(&dev->event_lock, flags);
611 	}
612 
613 	/*
614 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
615 	 * it means we are trying to flush a CRTC whose state is disabled:
616 	 * nothing else needs to be done.
617 	 */
618 	if (unlikely(!cstate->num_mixers))
619 		return;
620 
621 	/*
622 	 * For planes without commit update, drm framework will not add
623 	 * those planes to current state since hardware update is not
624 	 * required. However, if those planes were power collapsed since
625 	 * last commit cycle, driver has to restore the hardware state
626 	 * of those planes explicitly here prior to plane flush.
627 	 */
628 	drm_atomic_crtc_for_each_plane(plane, crtc)
629 		dpu_plane_restore(plane);
630 
631 	/* update performance setting before crtc kickoff */
632 	dpu_core_perf_crtc_update(crtc, 1, false);
633 
634 	/*
635 	 * Final plane updates: Give each plane a chance to complete all
636 	 *                      required writes/flushing before crtc's "flush
637 	 *                      everything" call below.
638 	 */
639 	drm_atomic_crtc_for_each_plane(plane, crtc) {
640 		if (dpu_crtc->smmu_state.transition_error)
641 			dpu_plane_set_error(plane, true);
642 		dpu_plane_flush(plane);
643 	}
644 
645 	/* Kickoff will be scheduled by outer layer */
646 }
647 
648 /**
649  * dpu_crtc_destroy_state - state destroy hook
650  * @crtc: drm CRTC
651  * @state: CRTC state object to release
652  */
653 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
654 		struct drm_crtc_state *state)
655 {
656 	struct dpu_crtc *dpu_crtc;
657 	struct dpu_crtc_state *cstate;
658 
659 	if (!crtc || !state) {
660 		DPU_ERROR("invalid argument(s)\n");
661 		return;
662 	}
663 
664 	dpu_crtc = to_dpu_crtc(crtc);
665 	cstate = to_dpu_crtc_state(state);
666 
667 	DPU_DEBUG("crtc%d\n", crtc->base.id);
668 
669 	__drm_atomic_helper_crtc_destroy_state(state);
670 
671 	kfree(cstate);
672 }
673 
674 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
675 {
676 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
677 	int ret, rc = 0;
678 
679 	if (!atomic_read(&dpu_crtc->frame_pending)) {
680 		DPU_DEBUG("no frames pending\n");
681 		return 0;
682 	}
683 
684 	DPU_ATRACE_BEGIN("frame done completion wait");
685 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
686 			msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
687 	if (!ret) {
688 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
689 		rc = -ETIMEDOUT;
690 	}
691 	DPU_ATRACE_END("frame done completion wait");
692 
693 	return rc;
694 }
695 
696 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
697 {
698 	struct drm_encoder *encoder;
699 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
700 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
701 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
702 	int ret;
703 
704 	/*
705 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
706 	 * it means we are trying to start a CRTC whose state is disabled:
707 	 * nothing else needs to be done.
708 	 */
709 	if (unlikely(!cstate->num_mixers))
710 		return;
711 
712 	DPU_ATRACE_BEGIN("crtc_commit");
713 
714 	/*
715 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
716 	 * may delay and flush at an irq event (e.g. ppdone)
717 	 */
718 	drm_for_each_encoder_mask(encoder, crtc->dev,
719 				  crtc->state->encoder_mask)
720 		dpu_encoder_prepare_for_kickoff(encoder, async);
721 
722 	if (!async) {
723 		/* wait for frame_event_done completion */
724 		DPU_ATRACE_BEGIN("wait_for_frame_done_event");
725 		ret = _dpu_crtc_wait_for_frame_done(crtc);
726 		DPU_ATRACE_END("wait_for_frame_done_event");
727 		if (ret) {
728 			DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
729 					crtc->base.id,
730 					atomic_read(&dpu_crtc->frame_pending));
731 			goto end;
732 		}
733 
734 		if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
735 			/* acquire bandwidth and other resources */
736 			DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
737 		} else
738 			DPU_DEBUG("crtc%d commit\n", crtc->base.id);
739 
740 		dpu_crtc->play_count++;
741 	}
742 
743 	dpu_vbif_clear_errors(dpu_kms);
744 
745 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
746 		dpu_encoder_kickoff(encoder, async);
747 
748 end:
749 	if (!async)
750 		reinit_completion(&dpu_crtc->frame_done_comp);
751 	DPU_ATRACE_END("crtc_commit");
752 }
753 
754 static void dpu_crtc_reset(struct drm_crtc *crtc)
755 {
756 	struct dpu_crtc_state *cstate;
757 
758 	if (crtc->state)
759 		dpu_crtc_destroy_state(crtc, crtc->state);
760 
761 	crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
762 	if (crtc->state)
763 		crtc->state->crtc = crtc;
764 }
765 
766 /**
767  * dpu_crtc_duplicate_state - state duplicate hook
768  * @crtc: Pointer to drm crtc structure
769  * @Returns: Pointer to new drm_crtc_state structure
770  */
771 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
772 {
773 	struct dpu_crtc *dpu_crtc;
774 	struct dpu_crtc_state *cstate, *old_cstate;
775 
776 	if (!crtc || !crtc->state) {
777 		DPU_ERROR("invalid argument(s)\n");
778 		return NULL;
779 	}
780 
781 	dpu_crtc = to_dpu_crtc(crtc);
782 	old_cstate = to_dpu_crtc_state(crtc->state);
783 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
784 	if (!cstate) {
785 		DPU_ERROR("failed to allocate state\n");
786 		return NULL;
787 	}
788 
789 	/* duplicate base helper */
790 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
791 
792 	return &cstate->base;
793 }
794 
795 static void dpu_crtc_disable(struct drm_crtc *crtc,
796 			     struct drm_crtc_state *old_crtc_state)
797 {
798 	struct dpu_crtc *dpu_crtc;
799 	struct dpu_crtc_state *cstate;
800 	struct drm_display_mode *mode;
801 	struct drm_encoder *encoder;
802 	struct msm_drm_private *priv;
803 	unsigned long flags;
804 
805 	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
806 		DPU_ERROR("invalid crtc\n");
807 		return;
808 	}
809 	dpu_crtc = to_dpu_crtc(crtc);
810 	cstate = to_dpu_crtc_state(crtc->state);
811 	mode = &cstate->base.adjusted_mode;
812 	priv = crtc->dev->dev_private;
813 
814 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
815 
816 	/* Disable/save vblank irq handling */
817 	drm_crtc_vblank_off(crtc);
818 
819 	drm_for_each_encoder_mask(encoder, crtc->dev,
820 				  old_crtc_state->encoder_mask)
821 		dpu_encoder_assign_crtc(encoder, NULL);
822 
823 	/* wait for frame_event_done completion */
824 	if (_dpu_crtc_wait_for_frame_done(crtc))
825 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
826 				crtc->base.id,
827 				atomic_read(&dpu_crtc->frame_pending));
828 
829 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
830 	dpu_crtc->enabled = false;
831 
832 	if (atomic_read(&dpu_crtc->frame_pending)) {
833 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
834 				     atomic_read(&dpu_crtc->frame_pending));
835 		dpu_core_perf_crtc_release_bw(crtc);
836 		atomic_set(&dpu_crtc->frame_pending, 0);
837 	}
838 
839 	dpu_core_perf_crtc_update(crtc, 0, true);
840 
841 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
842 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
843 
844 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
845 	cstate->num_mixers = 0;
846 
847 	/* disable clk & bw control until clk & bw properties are set */
848 	cstate->bw_control = false;
849 	cstate->bw_split_vote = false;
850 
851 	if (crtc->state->event && !crtc->state->active) {
852 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
853 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
854 		crtc->state->event = NULL;
855 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
856 	}
857 
858 	pm_runtime_put_sync(crtc->dev->dev);
859 }
860 
861 static void dpu_crtc_enable(struct drm_crtc *crtc,
862 		struct drm_crtc_state *old_crtc_state)
863 {
864 	struct dpu_crtc *dpu_crtc;
865 	struct drm_encoder *encoder;
866 	struct msm_drm_private *priv;
867 
868 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
869 		DPU_ERROR("invalid crtc\n");
870 		return;
871 	}
872 	priv = crtc->dev->dev_private;
873 
874 	pm_runtime_get_sync(crtc->dev->dev);
875 
876 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
877 	dpu_crtc = to_dpu_crtc(crtc);
878 
879 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
880 		dpu_encoder_register_frame_event_callback(encoder,
881 				dpu_crtc_frame_event_cb, (void *)crtc);
882 
883 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
884 	dpu_crtc->enabled = true;
885 
886 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
887 		dpu_encoder_assign_crtc(encoder, crtc);
888 
889 	/* Enable/restore vblank irq handling */
890 	drm_crtc_vblank_on(crtc);
891 }
892 
893 struct plane_state {
894 	struct dpu_plane_state *dpu_pstate;
895 	const struct drm_plane_state *drm_pstate;
896 	int stage;
897 	u32 pipe_id;
898 };
899 
900 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
901 		struct drm_crtc_state *state)
902 {
903 	struct dpu_crtc *dpu_crtc;
904 	struct plane_state *pstates;
905 	struct dpu_crtc_state *cstate;
906 
907 	const struct drm_plane_state *pstate;
908 	struct drm_plane *plane;
909 	struct drm_display_mode *mode;
910 
911 	int cnt = 0, rc = 0, mixer_width, i, z_pos;
912 
913 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
914 	int multirect_count = 0;
915 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
916 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
917 	struct drm_rect crtc_rect = { 0 };
918 
919 	if (!crtc) {
920 		DPU_ERROR("invalid crtc\n");
921 		return -EINVAL;
922 	}
923 
924 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
925 
926 	dpu_crtc = to_dpu_crtc(crtc);
927 	cstate = to_dpu_crtc_state(state);
928 
929 	if (!state->enable || !state->active) {
930 		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
931 				crtc->base.id, state->enable, state->active);
932 		goto end;
933 	}
934 
935 	mode = &state->adjusted_mode;
936 	DPU_DEBUG("%s: check", dpu_crtc->name);
937 
938 	/* force a full mode set if active state changed */
939 	if (state->active_changed)
940 		state->mode_changed = true;
941 
942 	memset(pipe_staged, 0, sizeof(pipe_staged));
943 
944 	mixer_width = mode->hdisplay / cstate->num_mixers;
945 
946 	_dpu_crtc_setup_lm_bounds(crtc, state);
947 
948 	crtc_rect.x2 = mode->hdisplay;
949 	crtc_rect.y2 = mode->vdisplay;
950 
951 	 /* get plane state for all drm planes associated with crtc state */
952 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
953 		struct drm_rect dst, clip = crtc_rect;
954 
955 		if (IS_ERR_OR_NULL(pstate)) {
956 			rc = PTR_ERR(pstate);
957 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
958 					dpu_crtc->name, plane->base.id, rc);
959 			goto end;
960 		}
961 		if (cnt >= DPU_STAGE_MAX * 4)
962 			continue;
963 
964 		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
965 		pstates[cnt].drm_pstate = pstate;
966 		pstates[cnt].stage = pstate->normalized_zpos;
967 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
968 
969 		if (pipe_staged[pstates[cnt].pipe_id]) {
970 			multirect_plane[multirect_count].r0 =
971 				pipe_staged[pstates[cnt].pipe_id];
972 			multirect_plane[multirect_count].r1 = pstate;
973 			multirect_count++;
974 
975 			pipe_staged[pstates[cnt].pipe_id] = NULL;
976 		} else {
977 			pipe_staged[pstates[cnt].pipe_id] = pstate;
978 		}
979 
980 		cnt++;
981 
982 		dst = drm_plane_state_dest(pstate);
983 		if (!drm_rect_intersect(&clip, &dst)) {
984 			DPU_ERROR("invalid vertical/horizontal destination\n");
985 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
986 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
987 				  DRM_RECT_ARG(&dst));
988 			rc = -E2BIG;
989 			goto end;
990 		}
991 	}
992 
993 	for (i = 1; i < SSPP_MAX; i++) {
994 		if (pipe_staged[i]) {
995 			dpu_plane_clear_multirect(pipe_staged[i]);
996 
997 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
998 				DPU_ERROR(
999 					"r1 only virt plane:%d not supported\n",
1000 					pipe_staged[i]->plane->base.id);
1001 				rc  = -EINVAL;
1002 				goto end;
1003 			}
1004 		}
1005 	}
1006 
1007 	z_pos = -1;
1008 	for (i = 0; i < cnt; i++) {
1009 		/* reset counts at every new blend stage */
1010 		if (pstates[i].stage != z_pos) {
1011 			left_zpos_cnt = 0;
1012 			right_zpos_cnt = 0;
1013 			z_pos = pstates[i].stage;
1014 		}
1015 
1016 		/* verify z_pos setting before using it */
1017 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1018 			DPU_ERROR("> %d plane stages assigned\n",
1019 					DPU_STAGE_MAX - DPU_STAGE_0);
1020 			rc = -EINVAL;
1021 			goto end;
1022 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1023 			if (left_zpos_cnt == 2) {
1024 				DPU_ERROR("> 2 planes @ stage %d on left\n",
1025 					z_pos);
1026 				rc = -EINVAL;
1027 				goto end;
1028 			}
1029 			left_zpos_cnt++;
1030 
1031 		} else {
1032 			if (right_zpos_cnt == 2) {
1033 				DPU_ERROR("> 2 planes @ stage %d on right\n",
1034 					z_pos);
1035 				rc = -EINVAL;
1036 				goto end;
1037 			}
1038 			right_zpos_cnt++;
1039 		}
1040 
1041 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1042 		DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1043 	}
1044 
1045 	for (i = 0; i < multirect_count; i++) {
1046 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1047 			DPU_ERROR(
1048 			"multirect validation failed for planes (%d - %d)\n",
1049 					multirect_plane[i].r0->plane->base.id,
1050 					multirect_plane[i].r1->plane->base.id);
1051 			rc = -EINVAL;
1052 			goto end;
1053 		}
1054 	}
1055 
1056 	rc = dpu_core_perf_crtc_check(crtc, state);
1057 	if (rc) {
1058 		DPU_ERROR("crtc%d failed performance check %d\n",
1059 				crtc->base.id, rc);
1060 		goto end;
1061 	}
1062 
1063 	/* validate source split:
1064 	 * use pstates sorted by stage to check planes on same stage
1065 	 * we assume that all pipes are in source split so its valid to compare
1066 	 * without taking into account left/right mixer placement
1067 	 */
1068 	for (i = 1; i < cnt; i++) {
1069 		struct plane_state *prv_pstate, *cur_pstate;
1070 		struct drm_rect left_rect, right_rect;
1071 		int32_t left_pid, right_pid;
1072 		int32_t stage;
1073 
1074 		prv_pstate = &pstates[i - 1];
1075 		cur_pstate = &pstates[i];
1076 		if (prv_pstate->stage != cur_pstate->stage)
1077 			continue;
1078 
1079 		stage = cur_pstate->stage;
1080 
1081 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1082 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1083 
1084 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1085 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1086 
1087 		if (right_rect.x1 < left_rect.x1) {
1088 			swap(left_pid, right_pid);
1089 			swap(left_rect, right_rect);
1090 		}
1091 
1092 		/**
1093 		 * - planes are enumerated in pipe-priority order such that
1094 		 *   planes with lower drm_id must be left-most in a shared
1095 		 *   blend-stage when using source split.
1096 		 * - planes in source split must be contiguous in width
1097 		 * - planes in source split must have same dest yoff and height
1098 		 */
1099 		if (right_pid < left_pid) {
1100 			DPU_ERROR(
1101 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1102 				stage, left_pid, right_pid);
1103 			rc = -EINVAL;
1104 			goto end;
1105 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1106 			DPU_ERROR("non-contiguous coordinates for src split. "
1107 				  "stage: %d left: " DRM_RECT_FMT " right: "
1108 				  DRM_RECT_FMT "\n", stage,
1109 				  DRM_RECT_ARG(&left_rect),
1110 				  DRM_RECT_ARG(&right_rect));
1111 			rc = -EINVAL;
1112 			goto end;
1113 		} else if (left_rect.y1 != right_rect.y1 ||
1114 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1115 			DPU_ERROR("source split at stage: %d. invalid "
1116 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1117 				  DRM_RECT_FMT "\n", stage,
1118 				  DRM_RECT_ARG(&left_rect),
1119 				  DRM_RECT_ARG(&right_rect));
1120 			rc = -EINVAL;
1121 			goto end;
1122 		}
1123 	}
1124 
1125 end:
1126 	kfree(pstates);
1127 	return rc;
1128 }
1129 
1130 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1131 {
1132 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1133 	struct drm_encoder *enc;
1134 
1135 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1136 
1137 	/*
1138 	 * Normally we would iterate through encoder_mask in crtc state to find
1139 	 * attached encoders. In this case, we might be disabling vblank _after_
1140 	 * encoder_mask has been cleared.
1141 	 *
1142 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1143 	 * disable (which is also after encoder_mask is cleared). So instead of
1144 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1145 	 * currently assigned to our crtc.
1146 	 *
1147 	 * Note also that this function cannot be called while crtc is disabled
1148 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1149 	 * about the assigned crtcs being inconsistent with the current state
1150 	 * (which means no need to worry about modeset locks).
1151 	 */
1152 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1153 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1154 					     dpu_crtc);
1155 
1156 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 #ifdef CONFIG_DEBUG_FS
1163 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1164 {
1165 	struct dpu_crtc *dpu_crtc;
1166 	struct dpu_plane_state *pstate = NULL;
1167 	struct dpu_crtc_mixer *m;
1168 
1169 	struct drm_crtc *crtc;
1170 	struct drm_plane *plane;
1171 	struct drm_display_mode *mode;
1172 	struct drm_framebuffer *fb;
1173 	struct drm_plane_state *state;
1174 	struct dpu_crtc_state *cstate;
1175 
1176 	int i, out_width;
1177 
1178 	dpu_crtc = s->private;
1179 	crtc = &dpu_crtc->base;
1180 
1181 	drm_modeset_lock_all(crtc->dev);
1182 	cstate = to_dpu_crtc_state(crtc->state);
1183 
1184 	mode = &crtc->state->adjusted_mode;
1185 	out_width = mode->hdisplay / cstate->num_mixers;
1186 
1187 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1188 				mode->hdisplay, mode->vdisplay);
1189 
1190 	seq_puts(s, "\n");
1191 
1192 	for (i = 0; i < cstate->num_mixers; ++i) {
1193 		m = &cstate->mixers[i];
1194 		if (!m->hw_lm)
1195 			seq_printf(s, "\tmixer[%d] has no lm\n", i);
1196 		else if (!m->lm_ctl)
1197 			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1198 		else
1199 			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1200 				m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1201 				out_width, mode->vdisplay);
1202 	}
1203 
1204 	seq_puts(s, "\n");
1205 
1206 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1207 		pstate = to_dpu_plane_state(plane->state);
1208 		state = plane->state;
1209 
1210 		if (!pstate || !state)
1211 			continue;
1212 
1213 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1214 			pstate->stage);
1215 
1216 		if (plane->state->fb) {
1217 			fb = plane->state->fb;
1218 
1219 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1220 				fb->base.id, (char *) &fb->format->format,
1221 				fb->width, fb->height);
1222 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1223 				seq_printf(s, "cpp[%d]:%u ",
1224 						i, fb->format->cpp[i]);
1225 			seq_puts(s, "\n\t");
1226 
1227 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1228 			seq_puts(s, "\n");
1229 
1230 			seq_puts(s, "\t");
1231 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1232 				seq_printf(s, "pitches[%d]:%8u ", i,
1233 							fb->pitches[i]);
1234 			seq_puts(s, "\n");
1235 
1236 			seq_puts(s, "\t");
1237 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1238 				seq_printf(s, "offsets[%d]:%8u ", i,
1239 							fb->offsets[i]);
1240 			seq_puts(s, "\n");
1241 		}
1242 
1243 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1244 			state->src_x, state->src_y, state->src_w, state->src_h);
1245 
1246 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1247 			state->crtc_x, state->crtc_y, state->crtc_w,
1248 			state->crtc_h);
1249 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1250 			pstate->multirect_mode, pstate->multirect_index);
1251 
1252 		seq_puts(s, "\n");
1253 	}
1254 	if (dpu_crtc->vblank_cb_count) {
1255 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1256 		s64 diff_ms = ktime_to_ms(diff);
1257 		s64 fps = diff_ms ? div_s64(
1258 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1259 
1260 		seq_printf(s,
1261 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1262 				fps, dpu_crtc->vblank_cb_count,
1263 				ktime_to_ms(diff), dpu_crtc->play_count);
1264 
1265 		/* reset time & count for next measurement */
1266 		dpu_crtc->vblank_cb_count = 0;
1267 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1268 	}
1269 
1270 	drm_modeset_unlock_all(crtc->dev);
1271 
1272 	return 0;
1273 }
1274 
1275 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1276 {
1277 	return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1278 }
1279 
1280 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1281 static int __prefix ## _open(struct inode *inode, struct file *file)	\
1282 {									\
1283 	return single_open(file, __prefix ## _show, inode->i_private);	\
1284 }									\
1285 static const struct file_operations __prefix ## _fops = {		\
1286 	.owner = THIS_MODULE,						\
1287 	.open = __prefix ## _open,					\
1288 	.release = single_release,					\
1289 	.read = seq_read,						\
1290 	.llseek = seq_lseek,						\
1291 }
1292 
1293 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1294 {
1295 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1296 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1297 	int i;
1298 
1299 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1300 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1301 	seq_printf(s, "core_clk_rate: %llu\n",
1302 			dpu_crtc->cur_perf.core_clk_rate);
1303 	for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
1304 			i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
1305 		seq_printf(s, "bw_ctl[%d]: %llu\n", i,
1306 				dpu_crtc->cur_perf.bw_ctl[i]);
1307 		seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
1308 				dpu_crtc->cur_perf.max_per_pipe_ib[i]);
1309 	}
1310 
1311 	return 0;
1312 }
1313 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1314 
1315 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1316 {
1317 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1318 
1319 	static const struct file_operations debugfs_status_fops = {
1320 		.open =		_dpu_debugfs_status_open,
1321 		.read =		seq_read,
1322 		.llseek =	seq_lseek,
1323 		.release =	single_release,
1324 	};
1325 
1326 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1327 			crtc->dev->primary->debugfs_root);
1328 	if (!dpu_crtc->debugfs_root)
1329 		return -ENOMEM;
1330 
1331 	/* don't error check these */
1332 	debugfs_create_file("status", 0400,
1333 			dpu_crtc->debugfs_root,
1334 			dpu_crtc, &debugfs_status_fops);
1335 	debugfs_create_file("state", 0600,
1336 			dpu_crtc->debugfs_root,
1337 			&dpu_crtc->base,
1338 			&dpu_crtc_debugfs_state_fops);
1339 
1340 	return 0;
1341 }
1342 #else
1343 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1344 {
1345 	return 0;
1346 }
1347 #endif /* CONFIG_DEBUG_FS */
1348 
1349 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1350 {
1351 	return _dpu_crtc_init_debugfs(crtc);
1352 }
1353 
1354 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1355 {
1356 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1357 
1358 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1359 }
1360 
1361 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1362 	.set_config = drm_atomic_helper_set_config,
1363 	.destroy = dpu_crtc_destroy,
1364 	.page_flip = drm_atomic_helper_page_flip,
1365 	.reset = dpu_crtc_reset,
1366 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1367 	.atomic_destroy_state = dpu_crtc_destroy_state,
1368 	.late_register = dpu_crtc_late_register,
1369 	.early_unregister = dpu_crtc_early_unregister,
1370 };
1371 
1372 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1373 	.atomic_disable = dpu_crtc_disable,
1374 	.atomic_enable = dpu_crtc_enable,
1375 	.atomic_check = dpu_crtc_atomic_check,
1376 	.atomic_begin = dpu_crtc_atomic_begin,
1377 	.atomic_flush = dpu_crtc_atomic_flush,
1378 };
1379 
1380 /* initialize crtc */
1381 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1382 				struct drm_plane *cursor)
1383 {
1384 	struct drm_crtc *crtc = NULL;
1385 	struct dpu_crtc *dpu_crtc = NULL;
1386 	struct msm_drm_private *priv = NULL;
1387 	struct dpu_kms *kms = NULL;
1388 	int i;
1389 
1390 	priv = dev->dev_private;
1391 	kms = to_dpu_kms(priv->kms);
1392 
1393 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1394 	if (!dpu_crtc)
1395 		return ERR_PTR(-ENOMEM);
1396 
1397 	crtc = &dpu_crtc->base;
1398 	crtc->dev = dev;
1399 
1400 	spin_lock_init(&dpu_crtc->spin_lock);
1401 	atomic_set(&dpu_crtc->frame_pending, 0);
1402 
1403 	init_completion(&dpu_crtc->frame_done_comp);
1404 
1405 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1406 
1407 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1408 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1409 		list_add(&dpu_crtc->frame_events[i].list,
1410 				&dpu_crtc->frame_event_list);
1411 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1412 				dpu_crtc_frame_event_work);
1413 	}
1414 
1415 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1416 				NULL);
1417 
1418 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1419 
1420 	/* save user friendly CRTC name for later */
1421 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1422 
1423 	/* initialize event handling */
1424 	spin_lock_init(&dpu_crtc->event_lock);
1425 
1426 	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1427 	return crtc;
1428 }
1429