1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
13 
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
21 
22 #include "dpu_kms.h"
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_crtc.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
29 #include "dpu_vbif.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
32 
33 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
34 #define DPU_DRM_BLEND_OP_OPAQUE         1
35 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
36 #define DPU_DRM_BLEND_OP_COVERAGE       3
37 #define DPU_DRM_BLEND_OP_MAX            4
38 
39 /* layer mixer index on dpu_crtc */
40 #define LEFT_MIXER 0
41 #define RIGHT_MIXER 1
42 
43 /* timeout in ms waiting for frame done */
44 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
45 
46 #define	CONVERT_S3_15(val) \
47 	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
48 
49 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
50 {
51 	struct msm_drm_private *priv = crtc->dev->dev_private;
52 
53 	return to_dpu_kms(priv->kms);
54 }
55 
56 static void dpu_crtc_destroy(struct drm_crtc *crtc)
57 {
58 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59 
60 	DPU_DEBUG("\n");
61 
62 	if (!crtc)
63 		return;
64 
65 	drm_crtc_cleanup(crtc);
66 	kfree(dpu_crtc);
67 }
68 
69 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
70 		struct dpu_plane_state *pstate, struct dpu_format *format)
71 {
72 	struct dpu_hw_mixer *lm = mixer->hw_lm;
73 	uint32_t blend_op;
74 
75 	/* default to opaque blending */
76 	blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
77 		DPU_BLEND_BG_ALPHA_BG_CONST;
78 
79 	if (format->alpha_enable) {
80 		/* coverage blending */
81 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
82 			DPU_BLEND_BG_ALPHA_FG_PIXEL |
83 			DPU_BLEND_BG_INV_ALPHA;
84 	}
85 
86 	lm->ops.setup_blend_config(lm, pstate->stage,
87 				0xFF, 0, blend_op);
88 
89 	DPU_DEBUG("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
90 		  &format->base.pixel_format, format->alpha_enable, blend_op);
91 }
92 
93 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
94 {
95 	struct dpu_crtc_state *crtc_state;
96 	int lm_idx, lm_horiz_position;
97 
98 	crtc_state = to_dpu_crtc_state(crtc->state);
99 
100 	lm_horiz_position = 0;
101 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
102 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
103 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
104 		struct dpu_hw_mixer_cfg cfg;
105 
106 		if (!lm_roi || !drm_rect_visible(lm_roi))
107 			continue;
108 
109 		cfg.out_width = drm_rect_width(lm_roi);
110 		cfg.out_height = drm_rect_height(lm_roi);
111 		cfg.right_mixer = lm_horiz_position++;
112 		cfg.flags = 0;
113 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
114 	}
115 }
116 
117 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
118 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
119 {
120 	struct drm_plane *plane;
121 	struct drm_framebuffer *fb;
122 	struct drm_plane_state *state;
123 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
124 	struct dpu_plane_state *pstate = NULL;
125 	struct dpu_format *format;
126 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
127 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
128 
129 	u32 flush_mask;
130 	uint32_t stage_idx, lm_idx;
131 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
132 	bool bg_alpha_enable = false;
133 
134 	drm_atomic_crtc_for_each_plane(plane, crtc) {
135 		state = plane->state;
136 		if (!state)
137 			continue;
138 
139 		pstate = to_dpu_plane_state(state);
140 		fb = state->fb;
141 
142 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
143 
144 		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
145 				crtc->base.id,
146 				pstate->stage,
147 				plane->base.id,
148 				dpu_plane_pipe(plane) - SSPP_VIG0,
149 				state->fb ? state->fb->base.id : -1);
150 
151 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
152 
153 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
154 			bg_alpha_enable = true;
155 
156 		stage_idx = zpos_cnt[pstate->stage]++;
157 		stage_cfg->stage[pstate->stage][stage_idx] =
158 					dpu_plane_pipe(plane);
159 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
160 					pstate->multirect_index;
161 
162 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
163 					   state, pstate, stage_idx,
164 					   dpu_plane_pipe(plane) - SSPP_VIG0,
165 					   format->base.pixel_format,
166 					   fb ? fb->modifier : 0);
167 
168 		/* blend config update */
169 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
170 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
171 						pstate, format);
172 
173 			mixer[lm_idx].flush_mask |= flush_mask;
174 
175 			if (bg_alpha_enable && !format->alpha_enable)
176 				mixer[lm_idx].mixer_op_mode = 0;
177 			else
178 				mixer[lm_idx].mixer_op_mode |=
179 						1 << pstate->stage;
180 		}
181 	}
182 
183 	 _dpu_crtc_program_lm_output_roi(crtc);
184 }
185 
186 /**
187  * _dpu_crtc_blend_setup - configure crtc mixers
188  * @crtc: Pointer to drm crtc structure
189  */
190 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
191 {
192 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
193 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
194 	struct dpu_crtc_mixer *mixer = cstate->mixers;
195 	struct dpu_hw_ctl *ctl;
196 	struct dpu_hw_mixer *lm;
197 	int i;
198 
199 	DPU_DEBUG("%s\n", dpu_crtc->name);
200 
201 	for (i = 0; i < cstate->num_mixers; i++) {
202 		mixer[i].mixer_op_mode = 0;
203 		mixer[i].flush_mask = 0;
204 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
205 			mixer[i].lm_ctl->ops.clear_all_blendstages(
206 					mixer[i].lm_ctl);
207 	}
208 
209 	/* initialize stage cfg */
210 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
211 
212 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
213 
214 	for (i = 0; i < cstate->num_mixers; i++) {
215 		ctl = mixer[i].lm_ctl;
216 		lm = mixer[i].hw_lm;
217 
218 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
219 
220 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
221 			mixer[i].hw_lm->idx);
222 
223 		/* stage config flush mask */
224 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
225 
226 		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
227 			mixer[i].hw_lm->idx - LM_0,
228 			mixer[i].mixer_op_mode,
229 			ctl->idx - CTL_0,
230 			mixer[i].flush_mask);
231 
232 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
233 			&dpu_crtc->stage_cfg);
234 	}
235 }
236 
237 /**
238  *  _dpu_crtc_complete_flip - signal pending page_flip events
239  * Any pending vblank events are added to the vblank_event_list
240  * so that the next vblank interrupt shall signal them.
241  * However PAGE_FLIP events are not handled through the vblank_event_list.
242  * This API signals any pending PAGE_FLIP events requested through
243  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
244  * @crtc: Pointer to drm crtc structure
245  */
246 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
247 {
248 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
249 	struct drm_device *dev = crtc->dev;
250 	unsigned long flags;
251 
252 	spin_lock_irqsave(&dev->event_lock, flags);
253 	if (dpu_crtc->event) {
254 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
255 			      dpu_crtc->event);
256 		trace_dpu_crtc_complete_flip(DRMID(crtc));
257 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
258 		dpu_crtc->event = NULL;
259 	}
260 	spin_unlock_irqrestore(&dev->event_lock, flags);
261 }
262 
263 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
264 {
265 	struct drm_encoder *encoder;
266 
267 	/*
268 	 * TODO: This function is called from dpu debugfs and as part of atomic
269 	 * check. When called from debugfs, the crtc->mutex must be held to
270 	 * read crtc->state. However reading crtc->state from atomic check isn't
271 	 * allowed (unless you have a good reason, a big comment, and a deep
272 	 * understanding of how the atomic/modeset locks work (<- and this is
273 	 * probably not possible)). So we'll keep the WARN_ON here for now, but
274 	 * really we need to figure out a better way to track our operating mode
275 	 */
276 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
277 
278 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
279 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
280 		return dpu_encoder_get_intf_mode(encoder);
281 
282 	return INTF_MODE_NONE;
283 }
284 
285 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
286 {
287 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
288 
289 	/* keep statistics on vblank callback - with auto reset via debugfs */
290 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
291 		dpu_crtc->vblank_cb_time = ktime_get();
292 	else
293 		dpu_crtc->vblank_cb_count++;
294 	drm_crtc_handle_vblank(crtc);
295 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
296 }
297 
298 static void dpu_crtc_frame_event_work(struct kthread_work *work)
299 {
300 	struct dpu_crtc_frame_event *fevent = container_of(work,
301 			struct dpu_crtc_frame_event, work);
302 	struct drm_crtc *crtc = fevent->crtc;
303 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
304 	unsigned long flags;
305 	bool frame_done = false;
306 
307 	DPU_ATRACE_BEGIN("crtc_frame_event");
308 
309 	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
310 			ktime_to_ns(fevent->ts));
311 
312 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
313 				| DPU_ENCODER_FRAME_EVENT_ERROR
314 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
315 
316 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
317 			/* ignore vblank when not pending */
318 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
319 			/* release bandwidth and other resources */
320 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
321 							fevent->event);
322 			dpu_core_perf_crtc_release_bw(crtc);
323 		} else {
324 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
325 								fevent->event);
326 		}
327 
328 		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
329 			dpu_core_perf_crtc_update(crtc, 0, false);
330 
331 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
332 					| DPU_ENCODER_FRAME_EVENT_ERROR))
333 			frame_done = true;
334 	}
335 
336 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
337 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
338 				crtc->base.id, ktime_to_ns(fevent->ts));
339 
340 	if (frame_done)
341 		complete_all(&dpu_crtc->frame_done_comp);
342 
343 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
344 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
345 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
346 	DPU_ATRACE_END("crtc_frame_event");
347 }
348 
349 /*
350  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
351  * registers this API to encoder for all frame event callbacks like
352  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
353  * from different context - IRQ, user thread, commit_thread, etc. Each event
354  * should be carefully reviewed and should be processed in proper task context
355  * to avoid schedulin delay or properly manage the irq context's bottom half
356  * processing.
357  */
358 static void dpu_crtc_frame_event_cb(void *data, u32 event)
359 {
360 	struct drm_crtc *crtc = (struct drm_crtc *)data;
361 	struct dpu_crtc *dpu_crtc;
362 	struct msm_drm_private *priv;
363 	struct dpu_crtc_frame_event *fevent;
364 	unsigned long flags;
365 	u32 crtc_id;
366 
367 	/* Nothing to do on idle event */
368 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
369 		return;
370 
371 	dpu_crtc = to_dpu_crtc(crtc);
372 	priv = crtc->dev->dev_private;
373 	crtc_id = drm_crtc_index(crtc);
374 
375 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
376 
377 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
378 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
379 			struct dpu_crtc_frame_event, list);
380 	if (fevent)
381 		list_del_init(&fevent->list);
382 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
383 
384 	if (!fevent) {
385 		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
386 		return;
387 	}
388 
389 	fevent->event = event;
390 	fevent->crtc = crtc;
391 	fevent->ts = ktime_get();
392 	kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
393 }
394 
395 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
396 {
397 	trace_dpu_crtc_complete_commit(DRMID(crtc));
398 	_dpu_crtc_complete_flip(crtc);
399 }
400 
401 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
402 		struct drm_crtc_state *state)
403 {
404 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
405 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
406 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
407 	int i;
408 
409 	for (i = 0; i < cstate->num_mixers; i++) {
410 		struct drm_rect *r = &cstate->lm_bounds[i];
411 		r->x1 = crtc_split_width * i;
412 		r->y1 = 0;
413 		r->x2 = r->x1 + crtc_split_width;
414 		r->y2 = adj_mode->vdisplay;
415 
416 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
417 	}
418 }
419 
420 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
421 		struct dpu_hw_pcc_cfg *cfg)
422 {
423 	struct drm_color_ctm *ctm;
424 
425 	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
426 
427 	ctm = (struct drm_color_ctm *)state->ctm->data;
428 
429 	if (!ctm)
430 		return;
431 
432 	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
433 	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
434 	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
435 
436 	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
437 	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
438 	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
439 
440 	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
441 	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
442 	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
443 }
444 
445 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
446 {
447 	struct drm_crtc_state *state = crtc->state;
448 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
449 	struct dpu_crtc_mixer *mixer = cstate->mixers;
450 	struct dpu_hw_pcc_cfg cfg;
451 	struct dpu_hw_ctl *ctl;
452 	struct dpu_hw_dspp *dspp;
453 	int i;
454 
455 
456 	if (!state->color_mgmt_changed)
457 		return;
458 
459 	for (i = 0; i < cstate->num_mixers; i++) {
460 		ctl = mixer[i].lm_ctl;
461 		dspp = mixer[i].hw_dspp;
462 
463 		if (!dspp || !dspp->ops.setup_pcc)
464 			continue;
465 
466 		if (!state->ctm) {
467 			dspp->ops.setup_pcc(dspp, NULL);
468 		} else {
469 			_dpu_crtc_get_pcc_coeff(state, &cfg);
470 			dspp->ops.setup_pcc(dspp, &cfg);
471 		}
472 
473 		mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
474 			mixer[i].hw_dspp->idx);
475 
476 		/* stage config flush mask */
477 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
478 
479 		DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
480 			mixer[i].hw_lm->idx - DSPP_0,
481 			ctl->idx - CTL_0,
482 			mixer[i].flush_mask);
483 	}
484 }
485 
486 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
487 		struct drm_atomic_state *state)
488 {
489 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
490 	struct drm_encoder *encoder;
491 
492 	if (!crtc->state->enable) {
493 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
494 				crtc->base.id, crtc->state->enable);
495 		return;
496 	}
497 
498 	DPU_DEBUG("crtc%d\n", crtc->base.id);
499 
500 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
501 
502 	/* encoder will trigger pending mask now */
503 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
504 		dpu_encoder_trigger_kickoff_pending(encoder);
505 
506 	/*
507 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
508 	 * it means we are trying to flush a CRTC whose state is disabled:
509 	 * nothing else needs to be done.
510 	 */
511 	if (unlikely(!cstate->num_mixers))
512 		return;
513 
514 	_dpu_crtc_blend_setup(crtc);
515 
516 	_dpu_crtc_setup_cp_blocks(crtc);
517 
518 	/*
519 	 * PP_DONE irq is only used by command mode for now.
520 	 * It is better to request pending before FLUSH and START trigger
521 	 * to make sure no pp_done irq missed.
522 	 * This is safe because no pp_done will happen before SW trigger
523 	 * in command mode.
524 	 */
525 }
526 
527 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
528 		struct drm_atomic_state *state)
529 {
530 	struct dpu_crtc *dpu_crtc;
531 	struct drm_device *dev;
532 	struct drm_plane *plane;
533 	struct msm_drm_private *priv;
534 	unsigned long flags;
535 	struct dpu_crtc_state *cstate;
536 
537 	if (!crtc->state->enable) {
538 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
539 				crtc->base.id, crtc->state->enable);
540 		return;
541 	}
542 
543 	DPU_DEBUG("crtc%d\n", crtc->base.id);
544 
545 	dpu_crtc = to_dpu_crtc(crtc);
546 	cstate = to_dpu_crtc_state(crtc->state);
547 	dev = crtc->dev;
548 	priv = dev->dev_private;
549 
550 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
551 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
552 		return;
553 	}
554 
555 	WARN_ON(dpu_crtc->event);
556 	spin_lock_irqsave(&dev->event_lock, flags);
557 	dpu_crtc->event = crtc->state->event;
558 	crtc->state->event = NULL;
559 	spin_unlock_irqrestore(&dev->event_lock, flags);
560 
561 	/*
562 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
563 	 * it means we are trying to flush a CRTC whose state is disabled:
564 	 * nothing else needs to be done.
565 	 */
566 	if (unlikely(!cstate->num_mixers))
567 		return;
568 
569 	/*
570 	 * For planes without commit update, drm framework will not add
571 	 * those planes to current state since hardware update is not
572 	 * required. However, if those planes were power collapsed since
573 	 * last commit cycle, driver has to restore the hardware state
574 	 * of those planes explicitly here prior to plane flush.
575 	 */
576 	drm_atomic_crtc_for_each_plane(plane, crtc)
577 		dpu_plane_restore(plane, state);
578 
579 	/* update performance setting before crtc kickoff */
580 	dpu_core_perf_crtc_update(crtc, 1, false);
581 
582 	/*
583 	 * Final plane updates: Give each plane a chance to complete all
584 	 *                      required writes/flushing before crtc's "flush
585 	 *                      everything" call below.
586 	 */
587 	drm_atomic_crtc_for_each_plane(plane, crtc) {
588 		if (dpu_crtc->smmu_state.transition_error)
589 			dpu_plane_set_error(plane, true);
590 		dpu_plane_flush(plane);
591 	}
592 
593 	/* Kickoff will be scheduled by outer layer */
594 }
595 
596 /**
597  * dpu_crtc_destroy_state - state destroy hook
598  * @crtc: drm CRTC
599  * @state: CRTC state object to release
600  */
601 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
602 		struct drm_crtc_state *state)
603 {
604 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
605 
606 	DPU_DEBUG("crtc%d\n", crtc->base.id);
607 
608 	__drm_atomic_helper_crtc_destroy_state(state);
609 
610 	kfree(cstate);
611 }
612 
613 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
614 {
615 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
616 	int ret, rc = 0;
617 
618 	if (!atomic_read(&dpu_crtc->frame_pending)) {
619 		DPU_DEBUG("no frames pending\n");
620 		return 0;
621 	}
622 
623 	DPU_ATRACE_BEGIN("frame done completion wait");
624 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
625 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
626 	if (!ret) {
627 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
628 		rc = -ETIMEDOUT;
629 	}
630 	DPU_ATRACE_END("frame done completion wait");
631 
632 	return rc;
633 }
634 
635 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
636 {
637 	struct drm_encoder *encoder;
638 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
639 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
640 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
641 
642 	/*
643 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
644 	 * it means we are trying to start a CRTC whose state is disabled:
645 	 * nothing else needs to be done.
646 	 */
647 	if (unlikely(!cstate->num_mixers))
648 		return;
649 
650 	DPU_ATRACE_BEGIN("crtc_commit");
651 
652 	/*
653 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
654 	 * may delay and flush at an irq event (e.g. ppdone)
655 	 */
656 	drm_for_each_encoder_mask(encoder, crtc->dev,
657 				  crtc->state->encoder_mask)
658 		dpu_encoder_prepare_for_kickoff(encoder);
659 
660 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
661 		/* acquire bandwidth and other resources */
662 		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
663 	} else
664 		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
665 
666 	dpu_crtc->play_count++;
667 
668 	dpu_vbif_clear_errors(dpu_kms);
669 
670 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
671 		dpu_encoder_kickoff(encoder);
672 
673 	reinit_completion(&dpu_crtc->frame_done_comp);
674 	DPU_ATRACE_END("crtc_commit");
675 }
676 
677 static void dpu_crtc_reset(struct drm_crtc *crtc)
678 {
679 	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
680 
681 	if (crtc->state)
682 		dpu_crtc_destroy_state(crtc, crtc->state);
683 
684 	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
685 }
686 
687 /**
688  * dpu_crtc_duplicate_state - state duplicate hook
689  * @crtc: Pointer to drm crtc structure
690  */
691 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
692 {
693 	struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
694 
695 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
696 	if (!cstate) {
697 		DPU_ERROR("failed to allocate state\n");
698 		return NULL;
699 	}
700 
701 	/* duplicate base helper */
702 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
703 
704 	return &cstate->base;
705 }
706 
707 static void dpu_crtc_disable(struct drm_crtc *crtc,
708 			     struct drm_atomic_state *state)
709 {
710 	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
711 									      crtc);
712 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
713 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
714 	struct drm_encoder *encoder;
715 	unsigned long flags;
716 	bool release_bandwidth = false;
717 
718 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
719 
720 	/* Disable/save vblank irq handling */
721 	drm_crtc_vblank_off(crtc);
722 
723 	drm_for_each_encoder_mask(encoder, crtc->dev,
724 				  old_crtc_state->encoder_mask) {
725 		/* in video mode, we hold an extra bandwidth reference
726 		 * as we cannot drop bandwidth at frame-done if any
727 		 * crtc is being used in video mode.
728 		 */
729 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
730 			release_bandwidth = true;
731 		dpu_encoder_assign_crtc(encoder, NULL);
732 	}
733 
734 	/* wait for frame_event_done completion */
735 	if (_dpu_crtc_wait_for_frame_done(crtc))
736 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
737 				crtc->base.id,
738 				atomic_read(&dpu_crtc->frame_pending));
739 
740 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
741 	dpu_crtc->enabled = false;
742 
743 	if (atomic_read(&dpu_crtc->frame_pending)) {
744 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
745 				     atomic_read(&dpu_crtc->frame_pending));
746 		if (release_bandwidth)
747 			dpu_core_perf_crtc_release_bw(crtc);
748 		atomic_set(&dpu_crtc->frame_pending, 0);
749 	}
750 
751 	dpu_core_perf_crtc_update(crtc, 0, true);
752 
753 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
754 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
755 
756 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
757 	cstate->num_mixers = 0;
758 
759 	/* disable clk & bw control until clk & bw properties are set */
760 	cstate->bw_control = false;
761 	cstate->bw_split_vote = false;
762 
763 	if (crtc->state->event && !crtc->state->active) {
764 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
765 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
766 		crtc->state->event = NULL;
767 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
768 	}
769 
770 	pm_runtime_put_sync(crtc->dev->dev);
771 }
772 
773 static void dpu_crtc_enable(struct drm_crtc *crtc,
774 		struct drm_atomic_state *state)
775 {
776 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
777 	struct drm_encoder *encoder;
778 	bool request_bandwidth = false;
779 
780 	pm_runtime_get_sync(crtc->dev->dev);
781 
782 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
783 
784 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
785 		/* in video mode, we hold an extra bandwidth reference
786 		 * as we cannot drop bandwidth at frame-done if any
787 		 * crtc is being used in video mode.
788 		 */
789 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
790 			request_bandwidth = true;
791 		dpu_encoder_register_frame_event_callback(encoder,
792 				dpu_crtc_frame_event_cb, (void *)crtc);
793 	}
794 
795 	if (request_bandwidth)
796 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
797 
798 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
799 	dpu_crtc->enabled = true;
800 
801 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
802 		dpu_encoder_assign_crtc(encoder, crtc);
803 
804 	/* Enable/restore vblank irq handling */
805 	drm_crtc_vblank_on(crtc);
806 }
807 
808 struct plane_state {
809 	struct dpu_plane_state *dpu_pstate;
810 	const struct drm_plane_state *drm_pstate;
811 	int stage;
812 	u32 pipe_id;
813 };
814 
815 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
816 		struct drm_atomic_state *state)
817 {
818 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
819 									  crtc);
820 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
821 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
822 	struct plane_state *pstates;
823 
824 	const struct drm_plane_state *pstate;
825 	struct drm_plane *plane;
826 	struct drm_display_mode *mode;
827 
828 	int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
829 
830 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
831 	int multirect_count = 0;
832 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
833 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
834 	struct drm_rect crtc_rect = { 0 };
835 
836 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
837 
838 	if (!crtc_state->enable || !crtc_state->active) {
839 		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
840 				crtc->base.id, crtc_state->enable,
841 				crtc_state->active);
842 		goto end;
843 	}
844 
845 	mode = &crtc_state->adjusted_mode;
846 	DPU_DEBUG("%s: check\n", dpu_crtc->name);
847 
848 	/* force a full mode set if active state changed */
849 	if (crtc_state->active_changed)
850 		crtc_state->mode_changed = true;
851 
852 	memset(pipe_staged, 0, sizeof(pipe_staged));
853 
854 	if (cstate->num_mixers) {
855 		mixer_width = mode->hdisplay / cstate->num_mixers;
856 
857 		_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
858 	}
859 
860 	crtc_rect.x2 = mode->hdisplay;
861 	crtc_rect.y2 = mode->vdisplay;
862 
863 	 /* get plane state for all drm planes associated with crtc state */
864 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
865 		struct drm_rect dst, clip = crtc_rect;
866 
867 		if (IS_ERR_OR_NULL(pstate)) {
868 			rc = PTR_ERR(pstate);
869 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
870 					dpu_crtc->name, plane->base.id, rc);
871 			goto end;
872 		}
873 		if (cnt >= DPU_STAGE_MAX * 4)
874 			continue;
875 
876 		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
877 		pstates[cnt].drm_pstate = pstate;
878 		pstates[cnt].stage = pstate->normalized_zpos;
879 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
880 
881 		if (pipe_staged[pstates[cnt].pipe_id]) {
882 			multirect_plane[multirect_count].r0 =
883 				pipe_staged[pstates[cnt].pipe_id];
884 			multirect_plane[multirect_count].r1 = pstate;
885 			multirect_count++;
886 
887 			pipe_staged[pstates[cnt].pipe_id] = NULL;
888 		} else {
889 			pipe_staged[pstates[cnt].pipe_id] = pstate;
890 		}
891 
892 		cnt++;
893 
894 		dst = drm_plane_state_dest(pstate);
895 		if (!drm_rect_intersect(&clip, &dst)) {
896 			DPU_ERROR("invalid vertical/horizontal destination\n");
897 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
898 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
899 				  DRM_RECT_ARG(&dst));
900 			rc = -E2BIG;
901 			goto end;
902 		}
903 	}
904 
905 	for (i = 1; i < SSPP_MAX; i++) {
906 		if (pipe_staged[i]) {
907 			dpu_plane_clear_multirect(pipe_staged[i]);
908 
909 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
910 				DPU_ERROR(
911 					"r1 only virt plane:%d not supported\n",
912 					pipe_staged[i]->plane->base.id);
913 				rc  = -EINVAL;
914 				goto end;
915 			}
916 		}
917 	}
918 
919 	z_pos = -1;
920 	for (i = 0; i < cnt; i++) {
921 		/* reset counts at every new blend stage */
922 		if (pstates[i].stage != z_pos) {
923 			left_zpos_cnt = 0;
924 			right_zpos_cnt = 0;
925 			z_pos = pstates[i].stage;
926 		}
927 
928 		/* verify z_pos setting before using it */
929 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
930 			DPU_ERROR("> %d plane stages assigned\n",
931 					DPU_STAGE_MAX - DPU_STAGE_0);
932 			rc = -EINVAL;
933 			goto end;
934 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
935 			if (left_zpos_cnt == 2) {
936 				DPU_ERROR("> 2 planes @ stage %d on left\n",
937 					z_pos);
938 				rc = -EINVAL;
939 				goto end;
940 			}
941 			left_zpos_cnt++;
942 
943 		} else {
944 			if (right_zpos_cnt == 2) {
945 				DPU_ERROR("> 2 planes @ stage %d on right\n",
946 					z_pos);
947 				rc = -EINVAL;
948 				goto end;
949 			}
950 			right_zpos_cnt++;
951 		}
952 
953 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
954 		DPU_DEBUG("%s: zpos %d\n", dpu_crtc->name, z_pos);
955 	}
956 
957 	for (i = 0; i < multirect_count; i++) {
958 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
959 			DPU_ERROR(
960 			"multirect validation failed for planes (%d - %d)\n",
961 					multirect_plane[i].r0->plane->base.id,
962 					multirect_plane[i].r1->plane->base.id);
963 			rc = -EINVAL;
964 			goto end;
965 		}
966 	}
967 
968 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
969 
970 	rc = dpu_core_perf_crtc_check(crtc, crtc_state);
971 	if (rc) {
972 		DPU_ERROR("crtc%d failed performance check %d\n",
973 				crtc->base.id, rc);
974 		goto end;
975 	}
976 
977 	/* validate source split:
978 	 * use pstates sorted by stage to check planes on same stage
979 	 * we assume that all pipes are in source split so its valid to compare
980 	 * without taking into account left/right mixer placement
981 	 */
982 	for (i = 1; i < cnt; i++) {
983 		struct plane_state *prv_pstate, *cur_pstate;
984 		struct drm_rect left_rect, right_rect;
985 		int32_t left_pid, right_pid;
986 		int32_t stage;
987 
988 		prv_pstate = &pstates[i - 1];
989 		cur_pstate = &pstates[i];
990 		if (prv_pstate->stage != cur_pstate->stage)
991 			continue;
992 
993 		stage = cur_pstate->stage;
994 
995 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
996 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
997 
998 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
999 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1000 
1001 		if (right_rect.x1 < left_rect.x1) {
1002 			swap(left_pid, right_pid);
1003 			swap(left_rect, right_rect);
1004 		}
1005 
1006 		/**
1007 		 * - planes are enumerated in pipe-priority order such that
1008 		 *   planes with lower drm_id must be left-most in a shared
1009 		 *   blend-stage when using source split.
1010 		 * - planes in source split must be contiguous in width
1011 		 * - planes in source split must have same dest yoff and height
1012 		 */
1013 		if (right_pid < left_pid) {
1014 			DPU_ERROR(
1015 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1016 				stage, left_pid, right_pid);
1017 			rc = -EINVAL;
1018 			goto end;
1019 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1020 			DPU_ERROR("non-contiguous coordinates for src split. "
1021 				  "stage: %d left: " DRM_RECT_FMT " right: "
1022 				  DRM_RECT_FMT "\n", stage,
1023 				  DRM_RECT_ARG(&left_rect),
1024 				  DRM_RECT_ARG(&right_rect));
1025 			rc = -EINVAL;
1026 			goto end;
1027 		} else if (left_rect.y1 != right_rect.y1 ||
1028 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1029 			DPU_ERROR("source split at stage: %d. invalid "
1030 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1031 				  DRM_RECT_FMT "\n", stage,
1032 				  DRM_RECT_ARG(&left_rect),
1033 				  DRM_RECT_ARG(&right_rect));
1034 			rc = -EINVAL;
1035 			goto end;
1036 		}
1037 	}
1038 
1039 end:
1040 	kfree(pstates);
1041 	return rc;
1042 }
1043 
1044 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1045 {
1046 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1047 	struct drm_encoder *enc;
1048 
1049 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1050 
1051 	/*
1052 	 * Normally we would iterate through encoder_mask in crtc state to find
1053 	 * attached encoders. In this case, we might be disabling vblank _after_
1054 	 * encoder_mask has been cleared.
1055 	 *
1056 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1057 	 * disable (which is also after encoder_mask is cleared). So instead of
1058 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1059 	 * currently assigned to our crtc.
1060 	 *
1061 	 * Note also that this function cannot be called while crtc is disabled
1062 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1063 	 * about the assigned crtcs being inconsistent with the current state
1064 	 * (which means no need to worry about modeset locks).
1065 	 */
1066 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1067 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1068 					     dpu_crtc);
1069 
1070 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 #ifdef CONFIG_DEBUG_FS
1077 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1078 {
1079 	struct dpu_crtc *dpu_crtc;
1080 	struct dpu_plane_state *pstate = NULL;
1081 	struct dpu_crtc_mixer *m;
1082 
1083 	struct drm_crtc *crtc;
1084 	struct drm_plane *plane;
1085 	struct drm_display_mode *mode;
1086 	struct drm_framebuffer *fb;
1087 	struct drm_plane_state *state;
1088 	struct dpu_crtc_state *cstate;
1089 
1090 	int i, out_width;
1091 
1092 	dpu_crtc = s->private;
1093 	crtc = &dpu_crtc->base;
1094 
1095 	drm_modeset_lock_all(crtc->dev);
1096 	cstate = to_dpu_crtc_state(crtc->state);
1097 
1098 	mode = &crtc->state->adjusted_mode;
1099 	out_width = mode->hdisplay / cstate->num_mixers;
1100 
1101 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1102 				mode->hdisplay, mode->vdisplay);
1103 
1104 	seq_puts(s, "\n");
1105 
1106 	for (i = 0; i < cstate->num_mixers; ++i) {
1107 		m = &cstate->mixers[i];
1108 		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1109 			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1110 			out_width, mode->vdisplay);
1111 	}
1112 
1113 	seq_puts(s, "\n");
1114 
1115 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1116 		pstate = to_dpu_plane_state(plane->state);
1117 		state = plane->state;
1118 
1119 		if (!pstate || !state)
1120 			continue;
1121 
1122 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1123 			pstate->stage);
1124 
1125 		if (plane->state->fb) {
1126 			fb = plane->state->fb;
1127 
1128 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1129 				fb->base.id, (char *) &fb->format->format,
1130 				fb->width, fb->height);
1131 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1132 				seq_printf(s, "cpp[%d]:%u ",
1133 						i, fb->format->cpp[i]);
1134 			seq_puts(s, "\n\t");
1135 
1136 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1137 			seq_puts(s, "\n");
1138 
1139 			seq_puts(s, "\t");
1140 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1141 				seq_printf(s, "pitches[%d]:%8u ", i,
1142 							fb->pitches[i]);
1143 			seq_puts(s, "\n");
1144 
1145 			seq_puts(s, "\t");
1146 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1147 				seq_printf(s, "offsets[%d]:%8u ", i,
1148 							fb->offsets[i]);
1149 			seq_puts(s, "\n");
1150 		}
1151 
1152 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1153 			state->src_x, state->src_y, state->src_w, state->src_h);
1154 
1155 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1156 			state->crtc_x, state->crtc_y, state->crtc_w,
1157 			state->crtc_h);
1158 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1159 			pstate->multirect_mode, pstate->multirect_index);
1160 
1161 		seq_puts(s, "\n");
1162 	}
1163 	if (dpu_crtc->vblank_cb_count) {
1164 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1165 		s64 diff_ms = ktime_to_ms(diff);
1166 		s64 fps = diff_ms ? div_s64(
1167 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1168 
1169 		seq_printf(s,
1170 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1171 				fps, dpu_crtc->vblank_cb_count,
1172 				ktime_to_ms(diff), dpu_crtc->play_count);
1173 
1174 		/* reset time & count for next measurement */
1175 		dpu_crtc->vblank_cb_count = 0;
1176 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1177 	}
1178 
1179 	drm_modeset_unlock_all(crtc->dev);
1180 
1181 	return 0;
1182 }
1183 
1184 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1185 
1186 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1187 {
1188 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1189 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1190 
1191 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1192 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1193 	seq_printf(s, "core_clk_rate: %llu\n",
1194 			dpu_crtc->cur_perf.core_clk_rate);
1195 	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1196 	seq_printf(s, "max_per_pipe_ib: %llu\n",
1197 				dpu_crtc->cur_perf.max_per_pipe_ib);
1198 
1199 	return 0;
1200 }
1201 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1202 
1203 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1204 {
1205 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1206 
1207 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1208 			crtc->dev->primary->debugfs_root);
1209 
1210 	debugfs_create_file("status", 0400,
1211 			dpu_crtc->debugfs_root,
1212 			dpu_crtc, &_dpu_debugfs_status_fops);
1213 	debugfs_create_file("state", 0600,
1214 			dpu_crtc->debugfs_root,
1215 			&dpu_crtc->base,
1216 			&dpu_crtc_debugfs_state_fops);
1217 
1218 	return 0;
1219 }
1220 #else
1221 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1222 {
1223 	return 0;
1224 }
1225 #endif /* CONFIG_DEBUG_FS */
1226 
1227 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1228 {
1229 	return _dpu_crtc_init_debugfs(crtc);
1230 }
1231 
1232 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1233 {
1234 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1235 
1236 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1237 }
1238 
1239 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1240 	.set_config = drm_atomic_helper_set_config,
1241 	.destroy = dpu_crtc_destroy,
1242 	.page_flip = drm_atomic_helper_page_flip,
1243 	.reset = dpu_crtc_reset,
1244 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1245 	.atomic_destroy_state = dpu_crtc_destroy_state,
1246 	.late_register = dpu_crtc_late_register,
1247 	.early_unregister = dpu_crtc_early_unregister,
1248 	.enable_vblank  = msm_crtc_enable_vblank,
1249 	.disable_vblank = msm_crtc_disable_vblank,
1250 };
1251 
1252 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1253 	.atomic_disable = dpu_crtc_disable,
1254 	.atomic_enable = dpu_crtc_enable,
1255 	.atomic_check = dpu_crtc_atomic_check,
1256 	.atomic_begin = dpu_crtc_atomic_begin,
1257 	.atomic_flush = dpu_crtc_atomic_flush,
1258 };
1259 
1260 /* initialize crtc */
1261 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1262 				struct drm_plane *cursor)
1263 {
1264 	struct drm_crtc *crtc = NULL;
1265 	struct dpu_crtc *dpu_crtc = NULL;
1266 	int i;
1267 
1268 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1269 	if (!dpu_crtc)
1270 		return ERR_PTR(-ENOMEM);
1271 
1272 	crtc = &dpu_crtc->base;
1273 	crtc->dev = dev;
1274 
1275 	spin_lock_init(&dpu_crtc->spin_lock);
1276 	atomic_set(&dpu_crtc->frame_pending, 0);
1277 
1278 	init_completion(&dpu_crtc->frame_done_comp);
1279 
1280 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1281 
1282 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1283 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1284 		list_add(&dpu_crtc->frame_events[i].list,
1285 				&dpu_crtc->frame_event_list);
1286 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1287 				dpu_crtc_frame_event_work);
1288 	}
1289 
1290 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1291 				NULL);
1292 
1293 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1294 
1295 	drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1296 
1297 	/* save user friendly CRTC name for later */
1298 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1299 
1300 	/* initialize event handling */
1301 	spin_lock_init(&dpu_crtc->event_lock);
1302 
1303 	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1304 	return crtc;
1305 }
1306