1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
13 
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
21 
22 #include "dpu_kms.h"
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_crtc.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
29 #include "dpu_vbif.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
32 
33 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
34 #define DPU_DRM_BLEND_OP_OPAQUE         1
35 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
36 #define DPU_DRM_BLEND_OP_COVERAGE       3
37 #define DPU_DRM_BLEND_OP_MAX            4
38 
39 /* layer mixer index on dpu_crtc */
40 #define LEFT_MIXER 0
41 #define RIGHT_MIXER 1
42 
43 /* timeout in ms waiting for frame done */
44 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
45 
46 #define	CONVERT_S3_15(val) \
47 	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
48 
49 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
50 {
51 	struct msm_drm_private *priv = crtc->dev->dev_private;
52 
53 	return to_dpu_kms(priv->kms);
54 }
55 
56 static void dpu_crtc_destroy(struct drm_crtc *crtc)
57 {
58 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59 
60 	DPU_DEBUG("\n");
61 
62 	if (!crtc)
63 		return;
64 
65 	drm_crtc_cleanup(crtc);
66 	kfree(dpu_crtc);
67 }
68 
69 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
70 		struct dpu_plane_state *pstate, struct dpu_format *format)
71 {
72 	struct dpu_hw_mixer *lm = mixer->hw_lm;
73 	uint32_t blend_op;
74 	struct drm_format_name_buf format_name;
75 
76 	/* default to opaque blending */
77 	blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
78 		DPU_BLEND_BG_ALPHA_BG_CONST;
79 
80 	if (format->alpha_enable) {
81 		/* coverage blending */
82 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
83 			DPU_BLEND_BG_ALPHA_FG_PIXEL |
84 			DPU_BLEND_BG_INV_ALPHA;
85 	}
86 
87 	lm->ops.setup_blend_config(lm, pstate->stage,
88 				0xFF, 0, blend_op);
89 
90 	DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
91 		drm_get_format_name(format->base.pixel_format, &format_name),
92 		format->alpha_enable, blend_op);
93 }
94 
95 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
96 {
97 	struct dpu_crtc_state *crtc_state;
98 	int lm_idx, lm_horiz_position;
99 
100 	crtc_state = to_dpu_crtc_state(crtc->state);
101 
102 	lm_horiz_position = 0;
103 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
104 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
105 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
106 		struct dpu_hw_mixer_cfg cfg;
107 
108 		if (!lm_roi || !drm_rect_visible(lm_roi))
109 			continue;
110 
111 		cfg.out_width = drm_rect_width(lm_roi);
112 		cfg.out_height = drm_rect_height(lm_roi);
113 		cfg.right_mixer = lm_horiz_position++;
114 		cfg.flags = 0;
115 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
116 	}
117 }
118 
119 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
120 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
121 {
122 	struct drm_plane *plane;
123 	struct drm_framebuffer *fb;
124 	struct drm_plane_state *state;
125 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
126 	struct dpu_plane_state *pstate = NULL;
127 	struct dpu_format *format;
128 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
129 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
130 
131 	u32 flush_mask;
132 	uint32_t stage_idx, lm_idx;
133 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
134 	bool bg_alpha_enable = false;
135 
136 	drm_atomic_crtc_for_each_plane(plane, crtc) {
137 		state = plane->state;
138 		if (!state)
139 			continue;
140 
141 		pstate = to_dpu_plane_state(state);
142 		fb = state->fb;
143 
144 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
145 
146 		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
147 				crtc->base.id,
148 				pstate->stage,
149 				plane->base.id,
150 				dpu_plane_pipe(plane) - SSPP_VIG0,
151 				state->fb ? state->fb->base.id : -1);
152 
153 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
154 
155 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
156 			bg_alpha_enable = true;
157 
158 		stage_idx = zpos_cnt[pstate->stage]++;
159 		stage_cfg->stage[pstate->stage][stage_idx] =
160 					dpu_plane_pipe(plane);
161 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
162 					pstate->multirect_index;
163 
164 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
165 					   state, pstate, stage_idx,
166 					   dpu_plane_pipe(plane) - SSPP_VIG0,
167 					   format->base.pixel_format,
168 					   fb ? fb->modifier : 0);
169 
170 		/* blend config update */
171 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
172 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
173 						pstate, format);
174 
175 			mixer[lm_idx].flush_mask |= flush_mask;
176 
177 			if (bg_alpha_enable && !format->alpha_enable)
178 				mixer[lm_idx].mixer_op_mode = 0;
179 			else
180 				mixer[lm_idx].mixer_op_mode |=
181 						1 << pstate->stage;
182 		}
183 	}
184 
185 	 _dpu_crtc_program_lm_output_roi(crtc);
186 }
187 
188 /**
189  * _dpu_crtc_blend_setup - configure crtc mixers
190  * @crtc: Pointer to drm crtc structure
191  */
192 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
193 {
194 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
195 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
196 	struct dpu_crtc_mixer *mixer = cstate->mixers;
197 	struct dpu_hw_ctl *ctl;
198 	struct dpu_hw_mixer *lm;
199 	int i;
200 
201 	DPU_DEBUG("%s\n", dpu_crtc->name);
202 
203 	for (i = 0; i < cstate->num_mixers; i++) {
204 		mixer[i].mixer_op_mode = 0;
205 		mixer[i].flush_mask = 0;
206 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
207 			mixer[i].lm_ctl->ops.clear_all_blendstages(
208 					mixer[i].lm_ctl);
209 	}
210 
211 	/* initialize stage cfg */
212 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
213 
214 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
215 
216 	for (i = 0; i < cstate->num_mixers; i++) {
217 		ctl = mixer[i].lm_ctl;
218 		lm = mixer[i].hw_lm;
219 
220 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
221 
222 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
223 			mixer[i].hw_lm->idx);
224 
225 		/* stage config flush mask */
226 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
227 
228 		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
229 			mixer[i].hw_lm->idx - LM_0,
230 			mixer[i].mixer_op_mode,
231 			ctl->idx - CTL_0,
232 			mixer[i].flush_mask);
233 
234 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
235 			&dpu_crtc->stage_cfg);
236 	}
237 }
238 
239 /**
240  *  _dpu_crtc_complete_flip - signal pending page_flip events
241  * Any pending vblank events are added to the vblank_event_list
242  * so that the next vblank interrupt shall signal them.
243  * However PAGE_FLIP events are not handled through the vblank_event_list.
244  * This API signals any pending PAGE_FLIP events requested through
245  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
246  * @crtc: Pointer to drm crtc structure
247  */
248 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
249 {
250 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
251 	struct drm_device *dev = crtc->dev;
252 	unsigned long flags;
253 
254 	spin_lock_irqsave(&dev->event_lock, flags);
255 	if (dpu_crtc->event) {
256 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
257 			      dpu_crtc->event);
258 		trace_dpu_crtc_complete_flip(DRMID(crtc));
259 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
260 		dpu_crtc->event = NULL;
261 	}
262 	spin_unlock_irqrestore(&dev->event_lock, flags);
263 }
264 
265 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
266 {
267 	struct drm_encoder *encoder;
268 
269 	if (!crtc) {
270 		DPU_ERROR("invalid crtc\n");
271 		return INTF_MODE_NONE;
272 	}
273 
274 	/*
275 	 * TODO: This function is called from dpu debugfs and as part of atomic
276 	 * check. When called from debugfs, the crtc->mutex must be held to
277 	 * read crtc->state. However reading crtc->state from atomic check isn't
278 	 * allowed (unless you have a good reason, a big comment, and a deep
279 	 * understanding of how the atomic/modeset locks work (<- and this is
280 	 * probably not possible)). So we'll keep the WARN_ON here for now, but
281 	 * really we need to figure out a better way to track our operating mode
282 	 */
283 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
284 
285 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
286 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
287 		return dpu_encoder_get_intf_mode(encoder);
288 
289 	return INTF_MODE_NONE;
290 }
291 
292 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
293 {
294 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
295 
296 	/* keep statistics on vblank callback - with auto reset via debugfs */
297 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
298 		dpu_crtc->vblank_cb_time = ktime_get();
299 	else
300 		dpu_crtc->vblank_cb_count++;
301 	_dpu_crtc_complete_flip(crtc);
302 	drm_crtc_handle_vblank(crtc);
303 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
304 }
305 
306 static void dpu_crtc_frame_event_work(struct kthread_work *work)
307 {
308 	struct dpu_crtc_frame_event *fevent = container_of(work,
309 			struct dpu_crtc_frame_event, work);
310 	struct drm_crtc *crtc = fevent->crtc;
311 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
312 	unsigned long flags;
313 	bool frame_done = false;
314 
315 	DPU_ATRACE_BEGIN("crtc_frame_event");
316 
317 	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
318 			ktime_to_ns(fevent->ts));
319 
320 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
321 				| DPU_ENCODER_FRAME_EVENT_ERROR
322 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
323 
324 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
325 			/* ignore vblank when not pending */
326 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
327 			/* release bandwidth and other resources */
328 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
329 							fevent->event);
330 			dpu_core_perf_crtc_release_bw(crtc);
331 		} else {
332 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
333 								fevent->event);
334 		}
335 
336 		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
337 			dpu_core_perf_crtc_update(crtc, 0, false);
338 
339 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
340 					| DPU_ENCODER_FRAME_EVENT_ERROR))
341 			frame_done = true;
342 	}
343 
344 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
345 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
346 				crtc->base.id, ktime_to_ns(fevent->ts));
347 
348 	if (frame_done)
349 		complete_all(&dpu_crtc->frame_done_comp);
350 
351 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
352 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
353 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
354 	DPU_ATRACE_END("crtc_frame_event");
355 }
356 
357 /*
358  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
359  * registers this API to encoder for all frame event callbacks like
360  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
361  * from different context - IRQ, user thread, commit_thread, etc. Each event
362  * should be carefully reviewed and should be processed in proper task context
363  * to avoid schedulin delay or properly manage the irq context's bottom half
364  * processing.
365  */
366 static void dpu_crtc_frame_event_cb(void *data, u32 event)
367 {
368 	struct drm_crtc *crtc = (struct drm_crtc *)data;
369 	struct dpu_crtc *dpu_crtc;
370 	struct msm_drm_private *priv;
371 	struct dpu_crtc_frame_event *fevent;
372 	unsigned long flags;
373 	u32 crtc_id;
374 
375 	/* Nothing to do on idle event */
376 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
377 		return;
378 
379 	dpu_crtc = to_dpu_crtc(crtc);
380 	priv = crtc->dev->dev_private;
381 	crtc_id = drm_crtc_index(crtc);
382 
383 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
384 
385 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
386 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
387 			struct dpu_crtc_frame_event, list);
388 	if (fevent)
389 		list_del_init(&fevent->list);
390 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
391 
392 	if (!fevent) {
393 		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
394 		return;
395 	}
396 
397 	fevent->event = event;
398 	fevent->crtc = crtc;
399 	fevent->ts = ktime_get();
400 	kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
401 }
402 
403 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
404 {
405 	trace_dpu_crtc_complete_commit(DRMID(crtc));
406 }
407 
408 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
409 		struct drm_crtc_state *state)
410 {
411 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
412 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
413 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
414 	int i;
415 
416 	for (i = 0; i < cstate->num_mixers; i++) {
417 		struct drm_rect *r = &cstate->lm_bounds[i];
418 		r->x1 = crtc_split_width * i;
419 		r->y1 = 0;
420 		r->x2 = r->x1 + crtc_split_width;
421 		r->y2 = adj_mode->vdisplay;
422 
423 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
424 	}
425 
426 	drm_mode_debug_printmodeline(adj_mode);
427 }
428 
429 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
430 		struct dpu_hw_pcc_cfg *cfg)
431 {
432 	struct drm_color_ctm *ctm;
433 
434 	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
435 
436 	ctm = (struct drm_color_ctm *)state->ctm->data;
437 
438 	if (!ctm)
439 		return;
440 
441 	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
442 	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
443 	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
444 
445 	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
446 	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
447 	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
448 
449 	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
450 	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
451 	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
452 }
453 
454 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
455 {
456 	struct drm_crtc_state *state = crtc->state;
457 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
458 	struct dpu_crtc_mixer *mixer = cstate->mixers;
459 	struct dpu_hw_pcc_cfg cfg;
460 	struct dpu_hw_ctl *ctl;
461 	struct dpu_hw_mixer *lm;
462 	struct dpu_hw_dspp *dspp;
463 	int i;
464 
465 
466 	if (!state->color_mgmt_changed)
467 		return;
468 
469 	for (i = 0; i < cstate->num_mixers; i++) {
470 		ctl = mixer[i].lm_ctl;
471 		lm = mixer[i].hw_lm;
472 		dspp = mixer[i].hw_dspp;
473 
474 		if (!dspp || !dspp->ops.setup_pcc)
475 			continue;
476 
477 		if (!state->ctm) {
478 			dspp->ops.setup_pcc(dspp, NULL);
479 		} else {
480 			_dpu_crtc_get_pcc_coeff(state, &cfg);
481 			dspp->ops.setup_pcc(dspp, &cfg);
482 		}
483 
484 		mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
485 			mixer[i].hw_dspp->idx);
486 
487 		/* stage config flush mask */
488 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
489 
490 		DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
491 			mixer[i].hw_lm->idx - DSPP_0,
492 			ctl->idx - CTL_0,
493 			mixer[i].flush_mask);
494 	}
495 }
496 
497 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
498 		struct drm_crtc_state *old_state)
499 {
500 	struct dpu_crtc *dpu_crtc;
501 	struct dpu_crtc_state *cstate;
502 	struct drm_encoder *encoder;
503 	struct drm_device *dev;
504 	unsigned long flags;
505 
506 	if (!crtc) {
507 		DPU_ERROR("invalid crtc\n");
508 		return;
509 	}
510 
511 	if (!crtc->state->enable) {
512 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
513 				crtc->base.id, crtc->state->enable);
514 		return;
515 	}
516 
517 	DPU_DEBUG("crtc%d\n", crtc->base.id);
518 
519 	dpu_crtc = to_dpu_crtc(crtc);
520 	cstate = to_dpu_crtc_state(crtc->state);
521 	dev = crtc->dev;
522 
523 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
524 
525 	if (dpu_crtc->event) {
526 		WARN_ON(dpu_crtc->event);
527 	} else {
528 		spin_lock_irqsave(&dev->event_lock, flags);
529 		dpu_crtc->event = crtc->state->event;
530 		crtc->state->event = NULL;
531 		spin_unlock_irqrestore(&dev->event_lock, flags);
532 	}
533 
534 	/* encoder will trigger pending mask now */
535 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
536 		dpu_encoder_trigger_kickoff_pending(encoder);
537 
538 	/*
539 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
540 	 * it means we are trying to flush a CRTC whose state is disabled:
541 	 * nothing else needs to be done.
542 	 */
543 	if (unlikely(!cstate->num_mixers))
544 		return;
545 
546 	_dpu_crtc_blend_setup(crtc);
547 
548 	_dpu_crtc_setup_cp_blocks(crtc);
549 
550 	/*
551 	 * PP_DONE irq is only used by command mode for now.
552 	 * It is better to request pending before FLUSH and START trigger
553 	 * to make sure no pp_done irq missed.
554 	 * This is safe because no pp_done will happen before SW trigger
555 	 * in command mode.
556 	 */
557 }
558 
559 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
560 		struct drm_crtc_state *old_crtc_state)
561 {
562 	struct dpu_crtc *dpu_crtc;
563 	struct drm_device *dev;
564 	struct drm_plane *plane;
565 	struct msm_drm_private *priv;
566 	unsigned long flags;
567 	struct dpu_crtc_state *cstate;
568 
569 	if (!crtc->state->enable) {
570 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
571 				crtc->base.id, crtc->state->enable);
572 		return;
573 	}
574 
575 	DPU_DEBUG("crtc%d\n", crtc->base.id);
576 
577 	dpu_crtc = to_dpu_crtc(crtc);
578 	cstate = to_dpu_crtc_state(crtc->state);
579 	dev = crtc->dev;
580 	priv = dev->dev_private;
581 
582 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
583 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
584 		return;
585 	}
586 
587 	if (dpu_crtc->event) {
588 		DPU_DEBUG("already received dpu_crtc->event\n");
589 	} else {
590 		spin_lock_irqsave(&dev->event_lock, flags);
591 		dpu_crtc->event = crtc->state->event;
592 		crtc->state->event = NULL;
593 		spin_unlock_irqrestore(&dev->event_lock, flags);
594 	}
595 
596 	/*
597 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
598 	 * it means we are trying to flush a CRTC whose state is disabled:
599 	 * nothing else needs to be done.
600 	 */
601 	if (unlikely(!cstate->num_mixers))
602 		return;
603 
604 	/*
605 	 * For planes without commit update, drm framework will not add
606 	 * those planes to current state since hardware update is not
607 	 * required. However, if those planes were power collapsed since
608 	 * last commit cycle, driver has to restore the hardware state
609 	 * of those planes explicitly here prior to plane flush.
610 	 */
611 	drm_atomic_crtc_for_each_plane(plane, crtc)
612 		dpu_plane_restore(plane);
613 
614 	/* update performance setting before crtc kickoff */
615 	dpu_core_perf_crtc_update(crtc, 1, false);
616 
617 	/*
618 	 * Final plane updates: Give each plane a chance to complete all
619 	 *                      required writes/flushing before crtc's "flush
620 	 *                      everything" call below.
621 	 */
622 	drm_atomic_crtc_for_each_plane(plane, crtc) {
623 		if (dpu_crtc->smmu_state.transition_error)
624 			dpu_plane_set_error(plane, true);
625 		dpu_plane_flush(plane);
626 	}
627 
628 	/* Kickoff will be scheduled by outer layer */
629 }
630 
631 /**
632  * dpu_crtc_destroy_state - state destroy hook
633  * @crtc: drm CRTC
634  * @state: CRTC state object to release
635  */
636 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
637 		struct drm_crtc_state *state)
638 {
639 	struct dpu_crtc_state *cstate;
640 
641 	if (!crtc || !state) {
642 		DPU_ERROR("invalid argument(s)\n");
643 		return;
644 	}
645 
646 	cstate = to_dpu_crtc_state(state);
647 
648 	DPU_DEBUG("crtc%d\n", crtc->base.id);
649 
650 	__drm_atomic_helper_crtc_destroy_state(state);
651 
652 	kfree(cstate);
653 }
654 
655 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
656 {
657 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
658 	int ret, rc = 0;
659 
660 	if (!atomic_read(&dpu_crtc->frame_pending)) {
661 		DPU_DEBUG("no frames pending\n");
662 		return 0;
663 	}
664 
665 	DPU_ATRACE_BEGIN("frame done completion wait");
666 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
667 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
668 	if (!ret) {
669 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
670 		rc = -ETIMEDOUT;
671 	}
672 	DPU_ATRACE_END("frame done completion wait");
673 
674 	return rc;
675 }
676 
677 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
678 {
679 	struct drm_encoder *encoder;
680 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
681 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
682 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
683 
684 	/*
685 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
686 	 * it means we are trying to start a CRTC whose state is disabled:
687 	 * nothing else needs to be done.
688 	 */
689 	if (unlikely(!cstate->num_mixers))
690 		return;
691 
692 	DPU_ATRACE_BEGIN("crtc_commit");
693 
694 	/*
695 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
696 	 * may delay and flush at an irq event (e.g. ppdone)
697 	 */
698 	drm_for_each_encoder_mask(encoder, crtc->dev,
699 				  crtc->state->encoder_mask)
700 		dpu_encoder_prepare_for_kickoff(encoder);
701 
702 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
703 		/* acquire bandwidth and other resources */
704 		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
705 	} else
706 		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
707 
708 	dpu_crtc->play_count++;
709 
710 	dpu_vbif_clear_errors(dpu_kms);
711 
712 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
713 		dpu_encoder_kickoff(encoder);
714 
715 	reinit_completion(&dpu_crtc->frame_done_comp);
716 	DPU_ATRACE_END("crtc_commit");
717 }
718 
719 static void dpu_crtc_reset(struct drm_crtc *crtc)
720 {
721 	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
722 
723 	if (crtc->state)
724 		dpu_crtc_destroy_state(crtc, crtc->state);
725 
726 	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
727 }
728 
729 /**
730  * dpu_crtc_duplicate_state - state duplicate hook
731  * @crtc: Pointer to drm crtc structure
732  */
733 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
734 {
735 	struct dpu_crtc_state *cstate, *old_cstate;
736 
737 	if (!crtc || !crtc->state) {
738 		DPU_ERROR("invalid argument(s)\n");
739 		return NULL;
740 	}
741 
742 	old_cstate = to_dpu_crtc_state(crtc->state);
743 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
744 	if (!cstate) {
745 		DPU_ERROR("failed to allocate state\n");
746 		return NULL;
747 	}
748 
749 	/* duplicate base helper */
750 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
751 
752 	return &cstate->base;
753 }
754 
755 static void dpu_crtc_disable(struct drm_crtc *crtc,
756 			     struct drm_atomic_state *state)
757 {
758 	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
759 									      crtc);
760 	struct dpu_crtc *dpu_crtc;
761 	struct dpu_crtc_state *cstate;
762 	struct drm_encoder *encoder;
763 	unsigned long flags;
764 	bool release_bandwidth = false;
765 
766 	if (!crtc || !crtc->state) {
767 		DPU_ERROR("invalid crtc\n");
768 		return;
769 	}
770 	dpu_crtc = to_dpu_crtc(crtc);
771 	cstate = to_dpu_crtc_state(crtc->state);
772 
773 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
774 
775 	/* Disable/save vblank irq handling */
776 	drm_crtc_vblank_off(crtc);
777 
778 	drm_for_each_encoder_mask(encoder, crtc->dev,
779 				  old_crtc_state->encoder_mask) {
780 		/* in video mode, we hold an extra bandwidth reference
781 		 * as we cannot drop bandwidth at frame-done if any
782 		 * crtc is being used in video mode.
783 		 */
784 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
785 			release_bandwidth = true;
786 		dpu_encoder_assign_crtc(encoder, NULL);
787 	}
788 
789 	/* wait for frame_event_done completion */
790 	if (_dpu_crtc_wait_for_frame_done(crtc))
791 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
792 				crtc->base.id,
793 				atomic_read(&dpu_crtc->frame_pending));
794 
795 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
796 	dpu_crtc->enabled = false;
797 
798 	if (atomic_read(&dpu_crtc->frame_pending)) {
799 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
800 				     atomic_read(&dpu_crtc->frame_pending));
801 		if (release_bandwidth)
802 			dpu_core_perf_crtc_release_bw(crtc);
803 		atomic_set(&dpu_crtc->frame_pending, 0);
804 	}
805 
806 	dpu_core_perf_crtc_update(crtc, 0, true);
807 
808 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
809 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
810 
811 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
812 	cstate->num_mixers = 0;
813 
814 	/* disable clk & bw control until clk & bw properties are set */
815 	cstate->bw_control = false;
816 	cstate->bw_split_vote = false;
817 
818 	if (crtc->state->event && !crtc->state->active) {
819 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
820 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
821 		crtc->state->event = NULL;
822 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
823 	}
824 
825 	pm_runtime_put_sync(crtc->dev->dev);
826 }
827 
828 static void dpu_crtc_enable(struct drm_crtc *crtc,
829 		struct drm_atomic_state *state)
830 {
831 	struct dpu_crtc *dpu_crtc;
832 	struct drm_encoder *encoder;
833 	bool request_bandwidth = false;
834 
835 	if (!crtc) {
836 		DPU_ERROR("invalid crtc\n");
837 		return;
838 	}
839 
840 	pm_runtime_get_sync(crtc->dev->dev);
841 
842 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
843 	dpu_crtc = to_dpu_crtc(crtc);
844 
845 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
846 		/* in video mode, we hold an extra bandwidth reference
847 		 * as we cannot drop bandwidth at frame-done if any
848 		 * crtc is being used in video mode.
849 		 */
850 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
851 			request_bandwidth = true;
852 		dpu_encoder_register_frame_event_callback(encoder,
853 				dpu_crtc_frame_event_cb, (void *)crtc);
854 	}
855 
856 	if (request_bandwidth)
857 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
858 
859 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
860 	dpu_crtc->enabled = true;
861 
862 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
863 		dpu_encoder_assign_crtc(encoder, crtc);
864 
865 	/* Enable/restore vblank irq handling */
866 	drm_crtc_vblank_on(crtc);
867 }
868 
869 struct plane_state {
870 	struct dpu_plane_state *dpu_pstate;
871 	const struct drm_plane_state *drm_pstate;
872 	int stage;
873 	u32 pipe_id;
874 };
875 
876 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
877 		struct drm_crtc_state *state)
878 {
879 	struct dpu_crtc *dpu_crtc;
880 	struct plane_state *pstates;
881 	struct dpu_crtc_state *cstate;
882 
883 	const struct drm_plane_state *pstate;
884 	struct drm_plane *plane;
885 	struct drm_display_mode *mode;
886 
887 	int cnt = 0, rc = 0, mixer_width, i, z_pos;
888 
889 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
890 	int multirect_count = 0;
891 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
892 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
893 	struct drm_rect crtc_rect = { 0 };
894 
895 	if (!crtc) {
896 		DPU_ERROR("invalid crtc\n");
897 		return -EINVAL;
898 	}
899 
900 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
901 
902 	dpu_crtc = to_dpu_crtc(crtc);
903 	cstate = to_dpu_crtc_state(state);
904 
905 	if (!state->enable || !state->active) {
906 		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
907 				crtc->base.id, state->enable, state->active);
908 		goto end;
909 	}
910 
911 	mode = &state->adjusted_mode;
912 	DPU_DEBUG("%s: check", dpu_crtc->name);
913 
914 	/* force a full mode set if active state changed */
915 	if (state->active_changed)
916 		state->mode_changed = true;
917 
918 	memset(pipe_staged, 0, sizeof(pipe_staged));
919 
920 	mixer_width = mode->hdisplay / cstate->num_mixers;
921 
922 	_dpu_crtc_setup_lm_bounds(crtc, state);
923 
924 	crtc_rect.x2 = mode->hdisplay;
925 	crtc_rect.y2 = mode->vdisplay;
926 
927 	 /* get plane state for all drm planes associated with crtc state */
928 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
929 		struct drm_rect dst, clip = crtc_rect;
930 
931 		if (IS_ERR_OR_NULL(pstate)) {
932 			rc = PTR_ERR(pstate);
933 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
934 					dpu_crtc->name, plane->base.id, rc);
935 			goto end;
936 		}
937 		if (cnt >= DPU_STAGE_MAX * 4)
938 			continue;
939 
940 		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
941 		pstates[cnt].drm_pstate = pstate;
942 		pstates[cnt].stage = pstate->normalized_zpos;
943 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
944 
945 		if (pipe_staged[pstates[cnt].pipe_id]) {
946 			multirect_plane[multirect_count].r0 =
947 				pipe_staged[pstates[cnt].pipe_id];
948 			multirect_plane[multirect_count].r1 = pstate;
949 			multirect_count++;
950 
951 			pipe_staged[pstates[cnt].pipe_id] = NULL;
952 		} else {
953 			pipe_staged[pstates[cnt].pipe_id] = pstate;
954 		}
955 
956 		cnt++;
957 
958 		dst = drm_plane_state_dest(pstate);
959 		if (!drm_rect_intersect(&clip, &dst)) {
960 			DPU_ERROR("invalid vertical/horizontal destination\n");
961 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
962 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
963 				  DRM_RECT_ARG(&dst));
964 			rc = -E2BIG;
965 			goto end;
966 		}
967 	}
968 
969 	for (i = 1; i < SSPP_MAX; i++) {
970 		if (pipe_staged[i]) {
971 			dpu_plane_clear_multirect(pipe_staged[i]);
972 
973 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
974 				DPU_ERROR(
975 					"r1 only virt plane:%d not supported\n",
976 					pipe_staged[i]->plane->base.id);
977 				rc  = -EINVAL;
978 				goto end;
979 			}
980 		}
981 	}
982 
983 	z_pos = -1;
984 	for (i = 0; i < cnt; i++) {
985 		/* reset counts at every new blend stage */
986 		if (pstates[i].stage != z_pos) {
987 			left_zpos_cnt = 0;
988 			right_zpos_cnt = 0;
989 			z_pos = pstates[i].stage;
990 		}
991 
992 		/* verify z_pos setting before using it */
993 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
994 			DPU_ERROR("> %d plane stages assigned\n",
995 					DPU_STAGE_MAX - DPU_STAGE_0);
996 			rc = -EINVAL;
997 			goto end;
998 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
999 			if (left_zpos_cnt == 2) {
1000 				DPU_ERROR("> 2 planes @ stage %d on left\n",
1001 					z_pos);
1002 				rc = -EINVAL;
1003 				goto end;
1004 			}
1005 			left_zpos_cnt++;
1006 
1007 		} else {
1008 			if (right_zpos_cnt == 2) {
1009 				DPU_ERROR("> 2 planes @ stage %d on right\n",
1010 					z_pos);
1011 				rc = -EINVAL;
1012 				goto end;
1013 			}
1014 			right_zpos_cnt++;
1015 		}
1016 
1017 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1018 		DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1019 	}
1020 
1021 	for (i = 0; i < multirect_count; i++) {
1022 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1023 			DPU_ERROR(
1024 			"multirect validation failed for planes (%d - %d)\n",
1025 					multirect_plane[i].r0->plane->base.id,
1026 					multirect_plane[i].r1->plane->base.id);
1027 			rc = -EINVAL;
1028 			goto end;
1029 		}
1030 	}
1031 
1032 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1033 
1034 	rc = dpu_core_perf_crtc_check(crtc, state);
1035 	if (rc) {
1036 		DPU_ERROR("crtc%d failed performance check %d\n",
1037 				crtc->base.id, rc);
1038 		goto end;
1039 	}
1040 
1041 	/* validate source split:
1042 	 * use pstates sorted by stage to check planes on same stage
1043 	 * we assume that all pipes are in source split so its valid to compare
1044 	 * without taking into account left/right mixer placement
1045 	 */
1046 	for (i = 1; i < cnt; i++) {
1047 		struct plane_state *prv_pstate, *cur_pstate;
1048 		struct drm_rect left_rect, right_rect;
1049 		int32_t left_pid, right_pid;
1050 		int32_t stage;
1051 
1052 		prv_pstate = &pstates[i - 1];
1053 		cur_pstate = &pstates[i];
1054 		if (prv_pstate->stage != cur_pstate->stage)
1055 			continue;
1056 
1057 		stage = cur_pstate->stage;
1058 
1059 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1060 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1061 
1062 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1063 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1064 
1065 		if (right_rect.x1 < left_rect.x1) {
1066 			swap(left_pid, right_pid);
1067 			swap(left_rect, right_rect);
1068 		}
1069 
1070 		/**
1071 		 * - planes are enumerated in pipe-priority order such that
1072 		 *   planes with lower drm_id must be left-most in a shared
1073 		 *   blend-stage when using source split.
1074 		 * - planes in source split must be contiguous in width
1075 		 * - planes in source split must have same dest yoff and height
1076 		 */
1077 		if (right_pid < left_pid) {
1078 			DPU_ERROR(
1079 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1080 				stage, left_pid, right_pid);
1081 			rc = -EINVAL;
1082 			goto end;
1083 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1084 			DPU_ERROR("non-contiguous coordinates for src split. "
1085 				  "stage: %d left: " DRM_RECT_FMT " right: "
1086 				  DRM_RECT_FMT "\n", stage,
1087 				  DRM_RECT_ARG(&left_rect),
1088 				  DRM_RECT_ARG(&right_rect));
1089 			rc = -EINVAL;
1090 			goto end;
1091 		} else if (left_rect.y1 != right_rect.y1 ||
1092 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1093 			DPU_ERROR("source split at stage: %d. invalid "
1094 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1095 				  DRM_RECT_FMT "\n", stage,
1096 				  DRM_RECT_ARG(&left_rect),
1097 				  DRM_RECT_ARG(&right_rect));
1098 			rc = -EINVAL;
1099 			goto end;
1100 		}
1101 	}
1102 
1103 end:
1104 	kfree(pstates);
1105 	return rc;
1106 }
1107 
1108 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1109 {
1110 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1111 	struct drm_encoder *enc;
1112 
1113 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1114 
1115 	/*
1116 	 * Normally we would iterate through encoder_mask in crtc state to find
1117 	 * attached encoders. In this case, we might be disabling vblank _after_
1118 	 * encoder_mask has been cleared.
1119 	 *
1120 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1121 	 * disable (which is also after encoder_mask is cleared). So instead of
1122 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1123 	 * currently assigned to our crtc.
1124 	 *
1125 	 * Note also that this function cannot be called while crtc is disabled
1126 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1127 	 * about the assigned crtcs being inconsistent with the current state
1128 	 * (which means no need to worry about modeset locks).
1129 	 */
1130 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1131 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1132 					     dpu_crtc);
1133 
1134 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 #ifdef CONFIG_DEBUG_FS
1141 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1142 {
1143 	struct dpu_crtc *dpu_crtc;
1144 	struct dpu_plane_state *pstate = NULL;
1145 	struct dpu_crtc_mixer *m;
1146 
1147 	struct drm_crtc *crtc;
1148 	struct drm_plane *plane;
1149 	struct drm_display_mode *mode;
1150 	struct drm_framebuffer *fb;
1151 	struct drm_plane_state *state;
1152 	struct dpu_crtc_state *cstate;
1153 
1154 	int i, out_width;
1155 
1156 	dpu_crtc = s->private;
1157 	crtc = &dpu_crtc->base;
1158 
1159 	drm_modeset_lock_all(crtc->dev);
1160 	cstate = to_dpu_crtc_state(crtc->state);
1161 
1162 	mode = &crtc->state->adjusted_mode;
1163 	out_width = mode->hdisplay / cstate->num_mixers;
1164 
1165 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1166 				mode->hdisplay, mode->vdisplay);
1167 
1168 	seq_puts(s, "\n");
1169 
1170 	for (i = 0; i < cstate->num_mixers; ++i) {
1171 		m = &cstate->mixers[i];
1172 		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1173 			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1174 			out_width, mode->vdisplay);
1175 	}
1176 
1177 	seq_puts(s, "\n");
1178 
1179 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1180 		pstate = to_dpu_plane_state(plane->state);
1181 		state = plane->state;
1182 
1183 		if (!pstate || !state)
1184 			continue;
1185 
1186 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1187 			pstate->stage);
1188 
1189 		if (plane->state->fb) {
1190 			fb = plane->state->fb;
1191 
1192 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1193 				fb->base.id, (char *) &fb->format->format,
1194 				fb->width, fb->height);
1195 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1196 				seq_printf(s, "cpp[%d]:%u ",
1197 						i, fb->format->cpp[i]);
1198 			seq_puts(s, "\n\t");
1199 
1200 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1201 			seq_puts(s, "\n");
1202 
1203 			seq_puts(s, "\t");
1204 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1205 				seq_printf(s, "pitches[%d]:%8u ", i,
1206 							fb->pitches[i]);
1207 			seq_puts(s, "\n");
1208 
1209 			seq_puts(s, "\t");
1210 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1211 				seq_printf(s, "offsets[%d]:%8u ", i,
1212 							fb->offsets[i]);
1213 			seq_puts(s, "\n");
1214 		}
1215 
1216 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1217 			state->src_x, state->src_y, state->src_w, state->src_h);
1218 
1219 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1220 			state->crtc_x, state->crtc_y, state->crtc_w,
1221 			state->crtc_h);
1222 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1223 			pstate->multirect_mode, pstate->multirect_index);
1224 
1225 		seq_puts(s, "\n");
1226 	}
1227 	if (dpu_crtc->vblank_cb_count) {
1228 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1229 		s64 diff_ms = ktime_to_ms(diff);
1230 		s64 fps = diff_ms ? div_s64(
1231 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1232 
1233 		seq_printf(s,
1234 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1235 				fps, dpu_crtc->vblank_cb_count,
1236 				ktime_to_ms(diff), dpu_crtc->play_count);
1237 
1238 		/* reset time & count for next measurement */
1239 		dpu_crtc->vblank_cb_count = 0;
1240 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1241 	}
1242 
1243 	drm_modeset_unlock_all(crtc->dev);
1244 
1245 	return 0;
1246 }
1247 
1248 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1249 {
1250 	return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1251 }
1252 
1253 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1254 static int __prefix ## _open(struct inode *inode, struct file *file)	\
1255 {									\
1256 	return single_open(file, __prefix ## _show, inode->i_private);	\
1257 }									\
1258 static const struct file_operations __prefix ## _fops = {		\
1259 	.owner = THIS_MODULE,						\
1260 	.open = __prefix ## _open,					\
1261 	.release = single_release,					\
1262 	.read = seq_read,						\
1263 	.llseek = seq_lseek,						\
1264 }
1265 
1266 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1267 {
1268 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1269 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1270 
1271 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1272 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1273 	seq_printf(s, "core_clk_rate: %llu\n",
1274 			dpu_crtc->cur_perf.core_clk_rate);
1275 	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1276 	seq_printf(s, "max_per_pipe_ib: %llu\n",
1277 				dpu_crtc->cur_perf.max_per_pipe_ib);
1278 
1279 	return 0;
1280 }
1281 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1282 
1283 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1284 {
1285 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1286 
1287 	static const struct file_operations debugfs_status_fops = {
1288 		.open =		_dpu_debugfs_status_open,
1289 		.read =		seq_read,
1290 		.llseek =	seq_lseek,
1291 		.release =	single_release,
1292 	};
1293 
1294 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1295 			crtc->dev->primary->debugfs_root);
1296 
1297 	debugfs_create_file("status", 0400,
1298 			dpu_crtc->debugfs_root,
1299 			dpu_crtc, &debugfs_status_fops);
1300 	debugfs_create_file("state", 0600,
1301 			dpu_crtc->debugfs_root,
1302 			&dpu_crtc->base,
1303 			&dpu_crtc_debugfs_state_fops);
1304 
1305 	return 0;
1306 }
1307 #else
1308 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1309 {
1310 	return 0;
1311 }
1312 #endif /* CONFIG_DEBUG_FS */
1313 
1314 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1315 {
1316 	return _dpu_crtc_init_debugfs(crtc);
1317 }
1318 
1319 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1320 {
1321 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1322 
1323 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1324 }
1325 
1326 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1327 	.set_config = drm_atomic_helper_set_config,
1328 	.destroy = dpu_crtc_destroy,
1329 	.page_flip = drm_atomic_helper_page_flip,
1330 	.reset = dpu_crtc_reset,
1331 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1332 	.atomic_destroy_state = dpu_crtc_destroy_state,
1333 	.late_register = dpu_crtc_late_register,
1334 	.early_unregister = dpu_crtc_early_unregister,
1335 	.enable_vblank  = msm_crtc_enable_vblank,
1336 	.disable_vblank = msm_crtc_disable_vblank,
1337 };
1338 
1339 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1340 	.atomic_disable = dpu_crtc_disable,
1341 	.atomic_enable = dpu_crtc_enable,
1342 	.atomic_check = dpu_crtc_atomic_check,
1343 	.atomic_begin = dpu_crtc_atomic_begin,
1344 	.atomic_flush = dpu_crtc_atomic_flush,
1345 };
1346 
1347 /* initialize crtc */
1348 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1349 				struct drm_plane *cursor)
1350 {
1351 	struct drm_crtc *crtc = NULL;
1352 	struct dpu_crtc *dpu_crtc = NULL;
1353 	int i;
1354 
1355 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1356 	if (!dpu_crtc)
1357 		return ERR_PTR(-ENOMEM);
1358 
1359 	crtc = &dpu_crtc->base;
1360 	crtc->dev = dev;
1361 
1362 	spin_lock_init(&dpu_crtc->spin_lock);
1363 	atomic_set(&dpu_crtc->frame_pending, 0);
1364 
1365 	init_completion(&dpu_crtc->frame_done_comp);
1366 
1367 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1368 
1369 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1370 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1371 		list_add(&dpu_crtc->frame_events[i].list,
1372 				&dpu_crtc->frame_event_list);
1373 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1374 				dpu_crtc_frame_event_work);
1375 	}
1376 
1377 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1378 				NULL);
1379 
1380 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1381 
1382 	drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1383 
1384 	/* save user friendly CRTC name for later */
1385 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1386 
1387 	/* initialize event handling */
1388 	spin_lock_init(&dpu_crtc->event_lock);
1389 
1390 	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1391 	return crtc;
1392 }
1393