1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_flip_work.h>
15 #include <drm/drm_mode.h>
16 #include <drm/drm_probe_helper.h>
17 #include <drm/drm_rect.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "dpu_kms.h"
21 #include "dpu_hw_lm.h"
22 #include "dpu_hw_ctl.h"
23 #include "dpu_crtc.h"
24 #include "dpu_plane.h"
25 #include "dpu_encoder.h"
26 #include "dpu_vbif.h"
27 #include "dpu_core_perf.h"
28 #include "dpu_trace.h"
29 
30 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
31 #define DPU_DRM_BLEND_OP_OPAQUE         1
32 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
33 #define DPU_DRM_BLEND_OP_COVERAGE       3
34 #define DPU_DRM_BLEND_OP_MAX            4
35 
36 /* layer mixer index on dpu_crtc */
37 #define LEFT_MIXER 0
38 #define RIGHT_MIXER 1
39 
40 /* timeout in ms waiting for frame done */
41 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
42 
43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
44 {
45 	struct msm_drm_private *priv = crtc->dev->dev_private;
46 
47 	return to_dpu_kms(priv->kms);
48 }
49 
50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
51 {
52 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
53 
54 	DPU_DEBUG("\n");
55 
56 	if (!crtc)
57 		return;
58 
59 	drm_crtc_cleanup(crtc);
60 	kfree(dpu_crtc);
61 }
62 
63 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
64 		struct dpu_plane_state *pstate, struct dpu_format *format)
65 {
66 	struct dpu_hw_mixer *lm = mixer->hw_lm;
67 	uint32_t blend_op;
68 	struct drm_format_name_buf format_name;
69 
70 	/* default to opaque blending */
71 	blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
72 		DPU_BLEND_BG_ALPHA_BG_CONST;
73 
74 	if (format->alpha_enable) {
75 		/* coverage blending */
76 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
77 			DPU_BLEND_BG_ALPHA_FG_PIXEL |
78 			DPU_BLEND_BG_INV_ALPHA;
79 	}
80 
81 	lm->ops.setup_blend_config(lm, pstate->stage,
82 				0xFF, 0, blend_op);
83 
84 	DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
85 		drm_get_format_name(format->base.pixel_format, &format_name),
86 		format->alpha_enable, blend_op);
87 }
88 
89 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
90 {
91 	struct dpu_crtc *dpu_crtc;
92 	struct dpu_crtc_state *crtc_state;
93 	int lm_idx, lm_horiz_position;
94 
95 	dpu_crtc = to_dpu_crtc(crtc);
96 	crtc_state = to_dpu_crtc_state(crtc->state);
97 
98 	lm_horiz_position = 0;
99 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
100 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
101 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
102 		struct dpu_hw_mixer_cfg cfg;
103 
104 		if (!lm_roi || !drm_rect_visible(lm_roi))
105 			continue;
106 
107 		cfg.out_width = drm_rect_width(lm_roi);
108 		cfg.out_height = drm_rect_height(lm_roi);
109 		cfg.right_mixer = lm_horiz_position++;
110 		cfg.flags = 0;
111 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
112 	}
113 }
114 
115 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
116 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
117 {
118 	struct drm_plane *plane;
119 	struct drm_framebuffer *fb;
120 	struct drm_plane_state *state;
121 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
122 	struct dpu_plane_state *pstate = NULL;
123 	struct dpu_format *format;
124 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
125 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
126 
127 	u32 flush_mask;
128 	uint32_t stage_idx, lm_idx;
129 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
130 	bool bg_alpha_enable = false;
131 
132 	drm_atomic_crtc_for_each_plane(plane, crtc) {
133 		state = plane->state;
134 		if (!state)
135 			continue;
136 
137 		pstate = to_dpu_plane_state(state);
138 		fb = state->fb;
139 
140 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
141 
142 		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
143 				crtc->base.id,
144 				pstate->stage,
145 				plane->base.id,
146 				dpu_plane_pipe(plane) - SSPP_VIG0,
147 				state->fb ? state->fb->base.id : -1);
148 
149 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
150 
151 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
152 			bg_alpha_enable = true;
153 
154 		stage_idx = zpos_cnt[pstate->stage]++;
155 		stage_cfg->stage[pstate->stage][stage_idx] =
156 					dpu_plane_pipe(plane);
157 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
158 					pstate->multirect_index;
159 
160 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
161 					   state, pstate, stage_idx,
162 					   dpu_plane_pipe(plane) - SSPP_VIG0,
163 					   format->base.pixel_format,
164 					   fb ? fb->modifier : 0);
165 
166 		/* blend config update */
167 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
168 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
169 						pstate, format);
170 
171 			mixer[lm_idx].flush_mask |= flush_mask;
172 
173 			if (bg_alpha_enable && !format->alpha_enable)
174 				mixer[lm_idx].mixer_op_mode = 0;
175 			else
176 				mixer[lm_idx].mixer_op_mode |=
177 						1 << pstate->stage;
178 		}
179 	}
180 
181 	 _dpu_crtc_program_lm_output_roi(crtc);
182 }
183 
184 /**
185  * _dpu_crtc_blend_setup - configure crtc mixers
186  * @crtc: Pointer to drm crtc structure
187  */
188 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
189 {
190 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
191 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
192 	struct dpu_crtc_mixer *mixer = cstate->mixers;
193 	struct dpu_hw_ctl *ctl;
194 	struct dpu_hw_mixer *lm;
195 	int i;
196 
197 	DPU_DEBUG("%s\n", dpu_crtc->name);
198 
199 	for (i = 0; i < cstate->num_mixers; i++) {
200 		mixer[i].mixer_op_mode = 0;
201 		mixer[i].flush_mask = 0;
202 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
203 			mixer[i].lm_ctl->ops.clear_all_blendstages(
204 					mixer[i].lm_ctl);
205 	}
206 
207 	/* initialize stage cfg */
208 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
209 
210 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
211 
212 	for (i = 0; i < cstate->num_mixers; i++) {
213 		ctl = mixer[i].lm_ctl;
214 		lm = mixer[i].hw_lm;
215 
216 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
217 
218 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
219 			mixer[i].hw_lm->idx);
220 
221 		/* stage config flush mask */
222 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
223 
224 		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
225 			mixer[i].hw_lm->idx - LM_0,
226 			mixer[i].mixer_op_mode,
227 			ctl->idx - CTL_0,
228 			mixer[i].flush_mask);
229 
230 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
231 			&dpu_crtc->stage_cfg);
232 	}
233 }
234 
235 /**
236  *  _dpu_crtc_complete_flip - signal pending page_flip events
237  * Any pending vblank events are added to the vblank_event_list
238  * so that the next vblank interrupt shall signal them.
239  * However PAGE_FLIP events are not handled through the vblank_event_list.
240  * This API signals any pending PAGE_FLIP events requested through
241  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
242  * @crtc: Pointer to drm crtc structure
243  */
244 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
245 {
246 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
247 	struct drm_device *dev = crtc->dev;
248 	unsigned long flags;
249 
250 	spin_lock_irqsave(&dev->event_lock, flags);
251 	if (dpu_crtc->event) {
252 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
253 			      dpu_crtc->event);
254 		trace_dpu_crtc_complete_flip(DRMID(crtc));
255 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
256 		dpu_crtc->event = NULL;
257 	}
258 	spin_unlock_irqrestore(&dev->event_lock, flags);
259 }
260 
261 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
262 {
263 	struct drm_encoder *encoder;
264 
265 	if (!crtc) {
266 		DPU_ERROR("invalid crtc\n");
267 		return INTF_MODE_NONE;
268 	}
269 
270 	/*
271 	 * TODO: This function is called from dpu debugfs and as part of atomic
272 	 * check. When called from debugfs, the crtc->mutex must be held to
273 	 * read crtc->state. However reading crtc->state from atomic check isn't
274 	 * allowed (unless you have a good reason, a big comment, and a deep
275 	 * understanding of how the atomic/modeset locks work (<- and this is
276 	 * probably not possible)). So we'll keep the WARN_ON here for now, but
277 	 * really we need to figure out a better way to track our operating mode
278 	 */
279 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
280 
281 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
282 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
283 		return dpu_encoder_get_intf_mode(encoder);
284 
285 	return INTF_MODE_NONE;
286 }
287 
288 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
289 {
290 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
291 
292 	/* keep statistics on vblank callback - with auto reset via debugfs */
293 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
294 		dpu_crtc->vblank_cb_time = ktime_get();
295 	else
296 		dpu_crtc->vblank_cb_count++;
297 	_dpu_crtc_complete_flip(crtc);
298 	drm_crtc_handle_vblank(crtc);
299 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
300 }
301 
302 static void dpu_crtc_frame_event_work(struct kthread_work *work)
303 {
304 	struct dpu_crtc_frame_event *fevent = container_of(work,
305 			struct dpu_crtc_frame_event, work);
306 	struct drm_crtc *crtc = fevent->crtc;
307 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
308 	unsigned long flags;
309 	bool frame_done = false;
310 
311 	DPU_ATRACE_BEGIN("crtc_frame_event");
312 
313 	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
314 			ktime_to_ns(fevent->ts));
315 
316 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
317 				| DPU_ENCODER_FRAME_EVENT_ERROR
318 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
319 
320 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
321 			/* ignore vblank when not pending */
322 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
323 			/* release bandwidth and other resources */
324 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
325 							fevent->event);
326 			dpu_core_perf_crtc_release_bw(crtc);
327 		} else {
328 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
329 								fevent->event);
330 		}
331 
332 		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
333 			dpu_core_perf_crtc_update(crtc, 0, false);
334 
335 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
336 					| DPU_ENCODER_FRAME_EVENT_ERROR))
337 			frame_done = true;
338 	}
339 
340 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
341 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
342 				crtc->base.id, ktime_to_ns(fevent->ts));
343 
344 	if (frame_done)
345 		complete_all(&dpu_crtc->frame_done_comp);
346 
347 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
348 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
349 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
350 	DPU_ATRACE_END("crtc_frame_event");
351 }
352 
353 /*
354  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
355  * registers this API to encoder for all frame event callbacks like
356  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
357  * from different context - IRQ, user thread, commit_thread, etc. Each event
358  * should be carefully reviewed and should be processed in proper task context
359  * to avoid schedulin delay or properly manage the irq context's bottom half
360  * processing.
361  */
362 static void dpu_crtc_frame_event_cb(void *data, u32 event)
363 {
364 	struct drm_crtc *crtc = (struct drm_crtc *)data;
365 	struct dpu_crtc *dpu_crtc;
366 	struct msm_drm_private *priv;
367 	struct dpu_crtc_frame_event *fevent;
368 	unsigned long flags;
369 	u32 crtc_id;
370 
371 	/* Nothing to do on idle event */
372 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
373 		return;
374 
375 	dpu_crtc = to_dpu_crtc(crtc);
376 	priv = crtc->dev->dev_private;
377 	crtc_id = drm_crtc_index(crtc);
378 
379 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
380 
381 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
382 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
383 			struct dpu_crtc_frame_event, list);
384 	if (fevent)
385 		list_del_init(&fevent->list);
386 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
387 
388 	if (!fevent) {
389 		DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
390 		return;
391 	}
392 
393 	fevent->event = event;
394 	fevent->crtc = crtc;
395 	fevent->ts = ktime_get();
396 	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
397 }
398 
399 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
400 {
401 	trace_dpu_crtc_complete_commit(DRMID(crtc));
402 }
403 
404 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
405 		struct drm_crtc_state *state)
406 {
407 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
408 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
409 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
410 	int i;
411 
412 	for (i = 0; i < cstate->num_mixers; i++) {
413 		struct drm_rect *r = &cstate->lm_bounds[i];
414 		r->x1 = crtc_split_width * i;
415 		r->y1 = 0;
416 		r->x2 = r->x1 + crtc_split_width;
417 		r->y2 = adj_mode->vdisplay;
418 
419 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
420 	}
421 
422 	drm_mode_debug_printmodeline(adj_mode);
423 }
424 
425 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
426 		struct drm_crtc_state *old_state)
427 {
428 	struct dpu_crtc *dpu_crtc;
429 	struct dpu_crtc_state *cstate;
430 	struct drm_encoder *encoder;
431 	struct drm_device *dev;
432 	unsigned long flags;
433 	struct dpu_crtc_smmu_state_data *smmu_state;
434 
435 	if (!crtc) {
436 		DPU_ERROR("invalid crtc\n");
437 		return;
438 	}
439 
440 	if (!crtc->state->enable) {
441 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
442 				crtc->base.id, crtc->state->enable);
443 		return;
444 	}
445 
446 	DPU_DEBUG("crtc%d\n", crtc->base.id);
447 
448 	dpu_crtc = to_dpu_crtc(crtc);
449 	cstate = to_dpu_crtc_state(crtc->state);
450 	dev = crtc->dev;
451 	smmu_state = &dpu_crtc->smmu_state;
452 
453 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
454 
455 	if (dpu_crtc->event) {
456 		WARN_ON(dpu_crtc->event);
457 	} else {
458 		spin_lock_irqsave(&dev->event_lock, flags);
459 		dpu_crtc->event = crtc->state->event;
460 		crtc->state->event = NULL;
461 		spin_unlock_irqrestore(&dev->event_lock, flags);
462 	}
463 
464 	/* encoder will trigger pending mask now */
465 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
466 		dpu_encoder_trigger_kickoff_pending(encoder);
467 
468 	/*
469 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
470 	 * it means we are trying to flush a CRTC whose state is disabled:
471 	 * nothing else needs to be done.
472 	 */
473 	if (unlikely(!cstate->num_mixers))
474 		return;
475 
476 	_dpu_crtc_blend_setup(crtc);
477 
478 	/*
479 	 * PP_DONE irq is only used by command mode for now.
480 	 * It is better to request pending before FLUSH and START trigger
481 	 * to make sure no pp_done irq missed.
482 	 * This is safe because no pp_done will happen before SW trigger
483 	 * in command mode.
484 	 */
485 }
486 
487 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
488 		struct drm_crtc_state *old_crtc_state)
489 {
490 	struct dpu_crtc *dpu_crtc;
491 	struct drm_device *dev;
492 	struct drm_plane *plane;
493 	struct msm_drm_private *priv;
494 	struct msm_drm_thread *event_thread;
495 	unsigned long flags;
496 	struct dpu_crtc_state *cstate;
497 
498 	if (!crtc->state->enable) {
499 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
500 				crtc->base.id, crtc->state->enable);
501 		return;
502 	}
503 
504 	DPU_DEBUG("crtc%d\n", crtc->base.id);
505 
506 	dpu_crtc = to_dpu_crtc(crtc);
507 	cstate = to_dpu_crtc_state(crtc->state);
508 	dev = crtc->dev;
509 	priv = dev->dev_private;
510 
511 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
512 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
513 		return;
514 	}
515 
516 	event_thread = &priv->event_thread[crtc->index];
517 
518 	if (dpu_crtc->event) {
519 		DPU_DEBUG("already received dpu_crtc->event\n");
520 	} else {
521 		spin_lock_irqsave(&dev->event_lock, flags);
522 		dpu_crtc->event = crtc->state->event;
523 		crtc->state->event = NULL;
524 		spin_unlock_irqrestore(&dev->event_lock, flags);
525 	}
526 
527 	/*
528 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
529 	 * it means we are trying to flush a CRTC whose state is disabled:
530 	 * nothing else needs to be done.
531 	 */
532 	if (unlikely(!cstate->num_mixers))
533 		return;
534 
535 	/*
536 	 * For planes without commit update, drm framework will not add
537 	 * those planes to current state since hardware update is not
538 	 * required. However, if those planes were power collapsed since
539 	 * last commit cycle, driver has to restore the hardware state
540 	 * of those planes explicitly here prior to plane flush.
541 	 */
542 	drm_atomic_crtc_for_each_plane(plane, crtc)
543 		dpu_plane_restore(plane);
544 
545 	/* update performance setting before crtc kickoff */
546 	dpu_core_perf_crtc_update(crtc, 1, false);
547 
548 	/*
549 	 * Final plane updates: Give each plane a chance to complete all
550 	 *                      required writes/flushing before crtc's "flush
551 	 *                      everything" call below.
552 	 */
553 	drm_atomic_crtc_for_each_plane(plane, crtc) {
554 		if (dpu_crtc->smmu_state.transition_error)
555 			dpu_plane_set_error(plane, true);
556 		dpu_plane_flush(plane);
557 	}
558 
559 	/* Kickoff will be scheduled by outer layer */
560 }
561 
562 /**
563  * dpu_crtc_destroy_state - state destroy hook
564  * @crtc: drm CRTC
565  * @state: CRTC state object to release
566  */
567 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
568 		struct drm_crtc_state *state)
569 {
570 	struct dpu_crtc *dpu_crtc;
571 	struct dpu_crtc_state *cstate;
572 
573 	if (!crtc || !state) {
574 		DPU_ERROR("invalid argument(s)\n");
575 		return;
576 	}
577 
578 	dpu_crtc = to_dpu_crtc(crtc);
579 	cstate = to_dpu_crtc_state(state);
580 
581 	DPU_DEBUG("crtc%d\n", crtc->base.id);
582 
583 	__drm_atomic_helper_crtc_destroy_state(state);
584 
585 	kfree(cstate);
586 }
587 
588 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
589 {
590 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
591 	int ret, rc = 0;
592 
593 	if (!atomic_read(&dpu_crtc->frame_pending)) {
594 		DPU_DEBUG("no frames pending\n");
595 		return 0;
596 	}
597 
598 	DPU_ATRACE_BEGIN("frame done completion wait");
599 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
600 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
601 	if (!ret) {
602 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
603 		rc = -ETIMEDOUT;
604 	}
605 	DPU_ATRACE_END("frame done completion wait");
606 
607 	return rc;
608 }
609 
610 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
611 {
612 	struct drm_encoder *encoder;
613 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
614 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
615 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
616 
617 	/*
618 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
619 	 * it means we are trying to start a CRTC whose state is disabled:
620 	 * nothing else needs to be done.
621 	 */
622 	if (unlikely(!cstate->num_mixers))
623 		return;
624 
625 	DPU_ATRACE_BEGIN("crtc_commit");
626 
627 	/*
628 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
629 	 * may delay and flush at an irq event (e.g. ppdone)
630 	 */
631 	drm_for_each_encoder_mask(encoder, crtc->dev,
632 				  crtc->state->encoder_mask)
633 		dpu_encoder_prepare_for_kickoff(encoder);
634 
635 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
636 		/* acquire bandwidth and other resources */
637 		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
638 	} else
639 		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
640 
641 	dpu_crtc->play_count++;
642 
643 	dpu_vbif_clear_errors(dpu_kms);
644 
645 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
646 		dpu_encoder_kickoff(encoder);
647 
648 	reinit_completion(&dpu_crtc->frame_done_comp);
649 	DPU_ATRACE_END("crtc_commit");
650 }
651 
652 static void dpu_crtc_reset(struct drm_crtc *crtc)
653 {
654 	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
655 
656 	if (crtc->state)
657 		dpu_crtc_destroy_state(crtc, crtc->state);
658 
659 	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
660 }
661 
662 /**
663  * dpu_crtc_duplicate_state - state duplicate hook
664  * @crtc: Pointer to drm crtc structure
665  * @Returns: Pointer to new drm_crtc_state structure
666  */
667 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
668 {
669 	struct dpu_crtc *dpu_crtc;
670 	struct dpu_crtc_state *cstate, *old_cstate;
671 
672 	if (!crtc || !crtc->state) {
673 		DPU_ERROR("invalid argument(s)\n");
674 		return NULL;
675 	}
676 
677 	dpu_crtc = to_dpu_crtc(crtc);
678 	old_cstate = to_dpu_crtc_state(crtc->state);
679 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
680 	if (!cstate) {
681 		DPU_ERROR("failed to allocate state\n");
682 		return NULL;
683 	}
684 
685 	/* duplicate base helper */
686 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
687 
688 	return &cstate->base;
689 }
690 
691 static void dpu_crtc_disable(struct drm_crtc *crtc,
692 			     struct drm_crtc_state *old_crtc_state)
693 {
694 	struct dpu_crtc *dpu_crtc;
695 	struct dpu_crtc_state *cstate;
696 	struct drm_display_mode *mode;
697 	struct drm_encoder *encoder;
698 	struct msm_drm_private *priv;
699 	unsigned long flags;
700 	bool release_bandwidth = false;
701 
702 	if (!crtc || !crtc->state) {
703 		DPU_ERROR("invalid crtc\n");
704 		return;
705 	}
706 	dpu_crtc = to_dpu_crtc(crtc);
707 	cstate = to_dpu_crtc_state(crtc->state);
708 	mode = &cstate->base.adjusted_mode;
709 	priv = crtc->dev->dev_private;
710 
711 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
712 
713 	/* Disable/save vblank irq handling */
714 	drm_crtc_vblank_off(crtc);
715 
716 	drm_for_each_encoder_mask(encoder, crtc->dev,
717 				  old_crtc_state->encoder_mask) {
718 		/* in video mode, we hold an extra bandwidth reference
719 		 * as we cannot drop bandwidth at frame-done if any
720 		 * crtc is being used in video mode.
721 		 */
722 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
723 			release_bandwidth = true;
724 		dpu_encoder_assign_crtc(encoder, NULL);
725 	}
726 
727 	/* wait for frame_event_done completion */
728 	if (_dpu_crtc_wait_for_frame_done(crtc))
729 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
730 				crtc->base.id,
731 				atomic_read(&dpu_crtc->frame_pending));
732 
733 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
734 	dpu_crtc->enabled = false;
735 
736 	if (atomic_read(&dpu_crtc->frame_pending)) {
737 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
738 				     atomic_read(&dpu_crtc->frame_pending));
739 		if (release_bandwidth)
740 			dpu_core_perf_crtc_release_bw(crtc);
741 		atomic_set(&dpu_crtc->frame_pending, 0);
742 	}
743 
744 	dpu_core_perf_crtc_update(crtc, 0, true);
745 
746 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
747 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
748 
749 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
750 	cstate->num_mixers = 0;
751 
752 	/* disable clk & bw control until clk & bw properties are set */
753 	cstate->bw_control = false;
754 	cstate->bw_split_vote = false;
755 
756 	if (crtc->state->event && !crtc->state->active) {
757 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
758 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
759 		crtc->state->event = NULL;
760 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
761 	}
762 
763 	pm_runtime_put_sync(crtc->dev->dev);
764 }
765 
766 static void dpu_crtc_enable(struct drm_crtc *crtc,
767 		struct drm_crtc_state *old_crtc_state)
768 {
769 	struct dpu_crtc *dpu_crtc;
770 	struct drm_encoder *encoder;
771 	struct msm_drm_private *priv;
772 	bool request_bandwidth;
773 
774 	if (!crtc) {
775 		DPU_ERROR("invalid crtc\n");
776 		return;
777 	}
778 	priv = crtc->dev->dev_private;
779 
780 	pm_runtime_get_sync(crtc->dev->dev);
781 
782 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
783 	dpu_crtc = to_dpu_crtc(crtc);
784 
785 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
786 		/* in video mode, we hold an extra bandwidth reference
787 		 * as we cannot drop bandwidth at frame-done if any
788 		 * crtc is being used in video mode.
789 		 */
790 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
791 			request_bandwidth = true;
792 		dpu_encoder_register_frame_event_callback(encoder,
793 				dpu_crtc_frame_event_cb, (void *)crtc);
794 	}
795 
796 	if (request_bandwidth)
797 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
798 
799 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
800 	dpu_crtc->enabled = true;
801 
802 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
803 		dpu_encoder_assign_crtc(encoder, crtc);
804 
805 	/* Enable/restore vblank irq handling */
806 	drm_crtc_vblank_on(crtc);
807 }
808 
809 struct plane_state {
810 	struct dpu_plane_state *dpu_pstate;
811 	const struct drm_plane_state *drm_pstate;
812 	int stage;
813 	u32 pipe_id;
814 };
815 
816 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
817 		struct drm_crtc_state *state)
818 {
819 	struct dpu_crtc *dpu_crtc;
820 	struct plane_state *pstates;
821 	struct dpu_crtc_state *cstate;
822 
823 	const struct drm_plane_state *pstate;
824 	struct drm_plane *plane;
825 	struct drm_display_mode *mode;
826 
827 	int cnt = 0, rc = 0, mixer_width, i, z_pos;
828 
829 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
830 	int multirect_count = 0;
831 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
832 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
833 	struct drm_rect crtc_rect = { 0 };
834 
835 	if (!crtc) {
836 		DPU_ERROR("invalid crtc\n");
837 		return -EINVAL;
838 	}
839 
840 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
841 
842 	dpu_crtc = to_dpu_crtc(crtc);
843 	cstate = to_dpu_crtc_state(state);
844 
845 	if (!state->enable || !state->active) {
846 		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
847 				crtc->base.id, state->enable, state->active);
848 		goto end;
849 	}
850 
851 	mode = &state->adjusted_mode;
852 	DPU_DEBUG("%s: check", dpu_crtc->name);
853 
854 	/* force a full mode set if active state changed */
855 	if (state->active_changed)
856 		state->mode_changed = true;
857 
858 	memset(pipe_staged, 0, sizeof(pipe_staged));
859 
860 	mixer_width = mode->hdisplay / cstate->num_mixers;
861 
862 	_dpu_crtc_setup_lm_bounds(crtc, state);
863 
864 	crtc_rect.x2 = mode->hdisplay;
865 	crtc_rect.y2 = mode->vdisplay;
866 
867 	 /* get plane state for all drm planes associated with crtc state */
868 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
869 		struct drm_rect dst, clip = crtc_rect;
870 
871 		if (IS_ERR_OR_NULL(pstate)) {
872 			rc = PTR_ERR(pstate);
873 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
874 					dpu_crtc->name, plane->base.id, rc);
875 			goto end;
876 		}
877 		if (cnt >= DPU_STAGE_MAX * 4)
878 			continue;
879 
880 		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
881 		pstates[cnt].drm_pstate = pstate;
882 		pstates[cnt].stage = pstate->normalized_zpos;
883 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
884 
885 		if (pipe_staged[pstates[cnt].pipe_id]) {
886 			multirect_plane[multirect_count].r0 =
887 				pipe_staged[pstates[cnt].pipe_id];
888 			multirect_plane[multirect_count].r1 = pstate;
889 			multirect_count++;
890 
891 			pipe_staged[pstates[cnt].pipe_id] = NULL;
892 		} else {
893 			pipe_staged[pstates[cnt].pipe_id] = pstate;
894 		}
895 
896 		cnt++;
897 
898 		dst = drm_plane_state_dest(pstate);
899 		if (!drm_rect_intersect(&clip, &dst)) {
900 			DPU_ERROR("invalid vertical/horizontal destination\n");
901 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
902 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
903 				  DRM_RECT_ARG(&dst));
904 			rc = -E2BIG;
905 			goto end;
906 		}
907 	}
908 
909 	for (i = 1; i < SSPP_MAX; i++) {
910 		if (pipe_staged[i]) {
911 			dpu_plane_clear_multirect(pipe_staged[i]);
912 
913 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
914 				DPU_ERROR(
915 					"r1 only virt plane:%d not supported\n",
916 					pipe_staged[i]->plane->base.id);
917 				rc  = -EINVAL;
918 				goto end;
919 			}
920 		}
921 	}
922 
923 	z_pos = -1;
924 	for (i = 0; i < cnt; i++) {
925 		/* reset counts at every new blend stage */
926 		if (pstates[i].stage != z_pos) {
927 			left_zpos_cnt = 0;
928 			right_zpos_cnt = 0;
929 			z_pos = pstates[i].stage;
930 		}
931 
932 		/* verify z_pos setting before using it */
933 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
934 			DPU_ERROR("> %d plane stages assigned\n",
935 					DPU_STAGE_MAX - DPU_STAGE_0);
936 			rc = -EINVAL;
937 			goto end;
938 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
939 			if (left_zpos_cnt == 2) {
940 				DPU_ERROR("> 2 planes @ stage %d on left\n",
941 					z_pos);
942 				rc = -EINVAL;
943 				goto end;
944 			}
945 			left_zpos_cnt++;
946 
947 		} else {
948 			if (right_zpos_cnt == 2) {
949 				DPU_ERROR("> 2 planes @ stage %d on right\n",
950 					z_pos);
951 				rc = -EINVAL;
952 				goto end;
953 			}
954 			right_zpos_cnt++;
955 		}
956 
957 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
958 		DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
959 	}
960 
961 	for (i = 0; i < multirect_count; i++) {
962 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
963 			DPU_ERROR(
964 			"multirect validation failed for planes (%d - %d)\n",
965 					multirect_plane[i].r0->plane->base.id,
966 					multirect_plane[i].r1->plane->base.id);
967 			rc = -EINVAL;
968 			goto end;
969 		}
970 	}
971 
972 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
973 
974 	rc = dpu_core_perf_crtc_check(crtc, state);
975 	if (rc) {
976 		DPU_ERROR("crtc%d failed performance check %d\n",
977 				crtc->base.id, rc);
978 		goto end;
979 	}
980 
981 	/* validate source split:
982 	 * use pstates sorted by stage to check planes on same stage
983 	 * we assume that all pipes are in source split so its valid to compare
984 	 * without taking into account left/right mixer placement
985 	 */
986 	for (i = 1; i < cnt; i++) {
987 		struct plane_state *prv_pstate, *cur_pstate;
988 		struct drm_rect left_rect, right_rect;
989 		int32_t left_pid, right_pid;
990 		int32_t stage;
991 
992 		prv_pstate = &pstates[i - 1];
993 		cur_pstate = &pstates[i];
994 		if (prv_pstate->stage != cur_pstate->stage)
995 			continue;
996 
997 		stage = cur_pstate->stage;
998 
999 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1000 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1001 
1002 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1003 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1004 
1005 		if (right_rect.x1 < left_rect.x1) {
1006 			swap(left_pid, right_pid);
1007 			swap(left_rect, right_rect);
1008 		}
1009 
1010 		/**
1011 		 * - planes are enumerated in pipe-priority order such that
1012 		 *   planes with lower drm_id must be left-most in a shared
1013 		 *   blend-stage when using source split.
1014 		 * - planes in source split must be contiguous in width
1015 		 * - planes in source split must have same dest yoff and height
1016 		 */
1017 		if (right_pid < left_pid) {
1018 			DPU_ERROR(
1019 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1020 				stage, left_pid, right_pid);
1021 			rc = -EINVAL;
1022 			goto end;
1023 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1024 			DPU_ERROR("non-contiguous coordinates for src split. "
1025 				  "stage: %d left: " DRM_RECT_FMT " right: "
1026 				  DRM_RECT_FMT "\n", stage,
1027 				  DRM_RECT_ARG(&left_rect),
1028 				  DRM_RECT_ARG(&right_rect));
1029 			rc = -EINVAL;
1030 			goto end;
1031 		} else if (left_rect.y1 != right_rect.y1 ||
1032 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1033 			DPU_ERROR("source split at stage: %d. invalid "
1034 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1035 				  DRM_RECT_FMT "\n", stage,
1036 				  DRM_RECT_ARG(&left_rect),
1037 				  DRM_RECT_ARG(&right_rect));
1038 			rc = -EINVAL;
1039 			goto end;
1040 		}
1041 	}
1042 
1043 end:
1044 	kfree(pstates);
1045 	return rc;
1046 }
1047 
1048 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1049 {
1050 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1051 	struct drm_encoder *enc;
1052 
1053 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1054 
1055 	/*
1056 	 * Normally we would iterate through encoder_mask in crtc state to find
1057 	 * attached encoders. In this case, we might be disabling vblank _after_
1058 	 * encoder_mask has been cleared.
1059 	 *
1060 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1061 	 * disable (which is also after encoder_mask is cleared). So instead of
1062 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1063 	 * currently assigned to our crtc.
1064 	 *
1065 	 * Note also that this function cannot be called while crtc is disabled
1066 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1067 	 * about the assigned crtcs being inconsistent with the current state
1068 	 * (which means no need to worry about modeset locks).
1069 	 */
1070 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1071 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1072 					     dpu_crtc);
1073 
1074 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1075 	}
1076 
1077 	return 0;
1078 }
1079 
1080 #ifdef CONFIG_DEBUG_FS
1081 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1082 {
1083 	struct dpu_crtc *dpu_crtc;
1084 	struct dpu_plane_state *pstate = NULL;
1085 	struct dpu_crtc_mixer *m;
1086 
1087 	struct drm_crtc *crtc;
1088 	struct drm_plane *plane;
1089 	struct drm_display_mode *mode;
1090 	struct drm_framebuffer *fb;
1091 	struct drm_plane_state *state;
1092 	struct dpu_crtc_state *cstate;
1093 
1094 	int i, out_width;
1095 
1096 	dpu_crtc = s->private;
1097 	crtc = &dpu_crtc->base;
1098 
1099 	drm_modeset_lock_all(crtc->dev);
1100 	cstate = to_dpu_crtc_state(crtc->state);
1101 
1102 	mode = &crtc->state->adjusted_mode;
1103 	out_width = mode->hdisplay / cstate->num_mixers;
1104 
1105 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1106 				mode->hdisplay, mode->vdisplay);
1107 
1108 	seq_puts(s, "\n");
1109 
1110 	for (i = 0; i < cstate->num_mixers; ++i) {
1111 		m = &cstate->mixers[i];
1112 		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1113 			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1114 			out_width, mode->vdisplay);
1115 	}
1116 
1117 	seq_puts(s, "\n");
1118 
1119 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1120 		pstate = to_dpu_plane_state(plane->state);
1121 		state = plane->state;
1122 
1123 		if (!pstate || !state)
1124 			continue;
1125 
1126 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1127 			pstate->stage);
1128 
1129 		if (plane->state->fb) {
1130 			fb = plane->state->fb;
1131 
1132 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1133 				fb->base.id, (char *) &fb->format->format,
1134 				fb->width, fb->height);
1135 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1136 				seq_printf(s, "cpp[%d]:%u ",
1137 						i, fb->format->cpp[i]);
1138 			seq_puts(s, "\n\t");
1139 
1140 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1141 			seq_puts(s, "\n");
1142 
1143 			seq_puts(s, "\t");
1144 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1145 				seq_printf(s, "pitches[%d]:%8u ", i,
1146 							fb->pitches[i]);
1147 			seq_puts(s, "\n");
1148 
1149 			seq_puts(s, "\t");
1150 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1151 				seq_printf(s, "offsets[%d]:%8u ", i,
1152 							fb->offsets[i]);
1153 			seq_puts(s, "\n");
1154 		}
1155 
1156 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1157 			state->src_x, state->src_y, state->src_w, state->src_h);
1158 
1159 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1160 			state->crtc_x, state->crtc_y, state->crtc_w,
1161 			state->crtc_h);
1162 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1163 			pstate->multirect_mode, pstate->multirect_index);
1164 
1165 		seq_puts(s, "\n");
1166 	}
1167 	if (dpu_crtc->vblank_cb_count) {
1168 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1169 		s64 diff_ms = ktime_to_ms(diff);
1170 		s64 fps = diff_ms ? div_s64(
1171 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1172 
1173 		seq_printf(s,
1174 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1175 				fps, dpu_crtc->vblank_cb_count,
1176 				ktime_to_ms(diff), dpu_crtc->play_count);
1177 
1178 		/* reset time & count for next measurement */
1179 		dpu_crtc->vblank_cb_count = 0;
1180 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1181 	}
1182 
1183 	drm_modeset_unlock_all(crtc->dev);
1184 
1185 	return 0;
1186 }
1187 
1188 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1189 {
1190 	return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1191 }
1192 
1193 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1194 static int __prefix ## _open(struct inode *inode, struct file *file)	\
1195 {									\
1196 	return single_open(file, __prefix ## _show, inode->i_private);	\
1197 }									\
1198 static const struct file_operations __prefix ## _fops = {		\
1199 	.owner = THIS_MODULE,						\
1200 	.open = __prefix ## _open,					\
1201 	.release = single_release,					\
1202 	.read = seq_read,						\
1203 	.llseek = seq_lseek,						\
1204 }
1205 
1206 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1207 {
1208 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1209 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1210 
1211 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1212 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1213 	seq_printf(s, "core_clk_rate: %llu\n",
1214 			dpu_crtc->cur_perf.core_clk_rate);
1215 	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1216 	seq_printf(s, "max_per_pipe_ib: %llu\n",
1217 				dpu_crtc->cur_perf.max_per_pipe_ib);
1218 
1219 	return 0;
1220 }
1221 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1222 
1223 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1224 {
1225 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1226 
1227 	static const struct file_operations debugfs_status_fops = {
1228 		.open =		_dpu_debugfs_status_open,
1229 		.read =		seq_read,
1230 		.llseek =	seq_lseek,
1231 		.release =	single_release,
1232 	};
1233 
1234 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1235 			crtc->dev->primary->debugfs_root);
1236 
1237 	debugfs_create_file("status", 0400,
1238 			dpu_crtc->debugfs_root,
1239 			dpu_crtc, &debugfs_status_fops);
1240 	debugfs_create_file("state", 0600,
1241 			dpu_crtc->debugfs_root,
1242 			&dpu_crtc->base,
1243 			&dpu_crtc_debugfs_state_fops);
1244 
1245 	return 0;
1246 }
1247 #else
1248 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1249 {
1250 	return 0;
1251 }
1252 #endif /* CONFIG_DEBUG_FS */
1253 
1254 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1255 {
1256 	return _dpu_crtc_init_debugfs(crtc);
1257 }
1258 
1259 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1260 {
1261 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1262 
1263 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1264 }
1265 
1266 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1267 	.set_config = drm_atomic_helper_set_config,
1268 	.destroy = dpu_crtc_destroy,
1269 	.page_flip = drm_atomic_helper_page_flip,
1270 	.reset = dpu_crtc_reset,
1271 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1272 	.atomic_destroy_state = dpu_crtc_destroy_state,
1273 	.late_register = dpu_crtc_late_register,
1274 	.early_unregister = dpu_crtc_early_unregister,
1275 };
1276 
1277 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1278 	.atomic_disable = dpu_crtc_disable,
1279 	.atomic_enable = dpu_crtc_enable,
1280 	.atomic_check = dpu_crtc_atomic_check,
1281 	.atomic_begin = dpu_crtc_atomic_begin,
1282 	.atomic_flush = dpu_crtc_atomic_flush,
1283 };
1284 
1285 /* initialize crtc */
1286 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1287 				struct drm_plane *cursor)
1288 {
1289 	struct drm_crtc *crtc = NULL;
1290 	struct dpu_crtc *dpu_crtc = NULL;
1291 	int i;
1292 
1293 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1294 	if (!dpu_crtc)
1295 		return ERR_PTR(-ENOMEM);
1296 
1297 	crtc = &dpu_crtc->base;
1298 	crtc->dev = dev;
1299 
1300 	spin_lock_init(&dpu_crtc->spin_lock);
1301 	atomic_set(&dpu_crtc->frame_pending, 0);
1302 
1303 	init_completion(&dpu_crtc->frame_done_comp);
1304 
1305 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1306 
1307 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1308 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1309 		list_add(&dpu_crtc->frame_events[i].list,
1310 				&dpu_crtc->frame_event_list);
1311 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1312 				dpu_crtc_frame_event_work);
1313 	}
1314 
1315 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1316 				NULL);
1317 
1318 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1319 
1320 	/* save user friendly CRTC name for later */
1321 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1322 
1323 	/* initialize event handling */
1324 	spin_lock_init(&dpu_crtc->event_lock);
1325 
1326 	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1327 	return crtc;
1328 }
1329