1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_flip_work.h>
15 #include <drm/drm_mode.h>
16 #include <drm/drm_probe_helper.h>
17 #include <drm/drm_rect.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "dpu_kms.h"
21 #include "dpu_hw_lm.h"
22 #include "dpu_hw_ctl.h"
23 #include "dpu_crtc.h"
24 #include "dpu_plane.h"
25 #include "dpu_encoder.h"
26 #include "dpu_vbif.h"
27 #include "dpu_core_perf.h"
28 #include "dpu_trace.h"
29 
30 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
31 #define DPU_DRM_BLEND_OP_OPAQUE         1
32 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
33 #define DPU_DRM_BLEND_OP_COVERAGE       3
34 #define DPU_DRM_BLEND_OP_MAX            4
35 
36 /* layer mixer index on dpu_crtc */
37 #define LEFT_MIXER 0
38 #define RIGHT_MIXER 1
39 
40 /* timeout in ms waiting for frame done */
41 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
42 
43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
44 {
45 	struct msm_drm_private *priv = crtc->dev->dev_private;
46 
47 	return to_dpu_kms(priv->kms);
48 }
49 
50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
51 {
52 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
53 
54 	DPU_DEBUG("\n");
55 
56 	if (!crtc)
57 		return;
58 
59 	drm_crtc_cleanup(crtc);
60 	kfree(dpu_crtc);
61 }
62 
63 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
64 		struct dpu_plane_state *pstate, struct dpu_format *format)
65 {
66 	struct dpu_hw_mixer *lm = mixer->hw_lm;
67 	uint32_t blend_op;
68 	struct drm_format_name_buf format_name;
69 
70 	/* default to opaque blending */
71 	blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
72 		DPU_BLEND_BG_ALPHA_BG_CONST;
73 
74 	if (format->alpha_enable) {
75 		/* coverage blending */
76 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
77 			DPU_BLEND_BG_ALPHA_FG_PIXEL |
78 			DPU_BLEND_BG_INV_ALPHA;
79 	}
80 
81 	lm->ops.setup_blend_config(lm, pstate->stage,
82 				0xFF, 0, blend_op);
83 
84 	DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
85 		drm_get_format_name(format->base.pixel_format, &format_name),
86 		format->alpha_enable, blend_op);
87 }
88 
89 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
90 {
91 	struct dpu_crtc *dpu_crtc;
92 	struct dpu_crtc_state *crtc_state;
93 	int lm_idx, lm_horiz_position;
94 
95 	dpu_crtc = to_dpu_crtc(crtc);
96 	crtc_state = to_dpu_crtc_state(crtc->state);
97 
98 	lm_horiz_position = 0;
99 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
100 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
101 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
102 		struct dpu_hw_mixer_cfg cfg;
103 
104 		if (!lm_roi || !drm_rect_visible(lm_roi))
105 			continue;
106 
107 		cfg.out_width = drm_rect_width(lm_roi);
108 		cfg.out_height = drm_rect_height(lm_roi);
109 		cfg.right_mixer = lm_horiz_position++;
110 		cfg.flags = 0;
111 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
112 	}
113 }
114 
115 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
116 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
117 {
118 	struct drm_plane *plane;
119 	struct drm_framebuffer *fb;
120 	struct drm_plane_state *state;
121 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
122 	struct dpu_plane_state *pstate = NULL;
123 	struct dpu_format *format;
124 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
125 	struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
126 
127 	u32 flush_mask;
128 	uint32_t stage_idx, lm_idx;
129 	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
130 	bool bg_alpha_enable = false;
131 
132 	drm_atomic_crtc_for_each_plane(plane, crtc) {
133 		state = plane->state;
134 		if (!state)
135 			continue;
136 
137 		pstate = to_dpu_plane_state(state);
138 		fb = state->fb;
139 
140 		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
141 
142 		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
143 				crtc->base.id,
144 				pstate->stage,
145 				plane->base.id,
146 				dpu_plane_pipe(plane) - SSPP_VIG0,
147 				state->fb ? state->fb->base.id : -1);
148 
149 		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
150 
151 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
152 			bg_alpha_enable = true;
153 
154 		stage_idx = zpos_cnt[pstate->stage]++;
155 		stage_cfg->stage[pstate->stage][stage_idx] =
156 					dpu_plane_pipe(plane);
157 		stage_cfg->multirect_index[pstate->stage][stage_idx] =
158 					pstate->multirect_index;
159 
160 		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
161 					   state, pstate, stage_idx,
162 					   dpu_plane_pipe(plane) - SSPP_VIG0,
163 					   format->base.pixel_format,
164 					   fb ? fb->modifier : 0);
165 
166 		/* blend config update */
167 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
168 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
169 						pstate, format);
170 
171 			mixer[lm_idx].flush_mask |= flush_mask;
172 
173 			if (bg_alpha_enable && !format->alpha_enable)
174 				mixer[lm_idx].mixer_op_mode = 0;
175 			else
176 				mixer[lm_idx].mixer_op_mode |=
177 						1 << pstate->stage;
178 		}
179 	}
180 
181 	 _dpu_crtc_program_lm_output_roi(crtc);
182 }
183 
184 /**
185  * _dpu_crtc_blend_setup - configure crtc mixers
186  * @crtc: Pointer to drm crtc structure
187  */
188 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
189 {
190 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
191 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
192 	struct dpu_crtc_mixer *mixer = cstate->mixers;
193 	struct dpu_hw_ctl *ctl;
194 	struct dpu_hw_mixer *lm;
195 	int i;
196 
197 	DPU_DEBUG("%s\n", dpu_crtc->name);
198 
199 	for (i = 0; i < cstate->num_mixers; i++) {
200 		if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
201 			DPU_ERROR("invalid lm or ctl assigned to mixer\n");
202 			return;
203 		}
204 		mixer[i].mixer_op_mode = 0;
205 		mixer[i].flush_mask = 0;
206 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
207 			mixer[i].lm_ctl->ops.clear_all_blendstages(
208 					mixer[i].lm_ctl);
209 	}
210 
211 	/* initialize stage cfg */
212 	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
213 
214 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
215 
216 	for (i = 0; i < cstate->num_mixers; i++) {
217 		ctl = mixer[i].lm_ctl;
218 		lm = mixer[i].hw_lm;
219 
220 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
221 
222 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
223 			mixer[i].hw_lm->idx);
224 
225 		/* stage config flush mask */
226 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
227 
228 		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
229 			mixer[i].hw_lm->idx - LM_0,
230 			mixer[i].mixer_op_mode,
231 			ctl->idx - CTL_0,
232 			mixer[i].flush_mask);
233 
234 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
235 			&dpu_crtc->stage_cfg);
236 	}
237 }
238 
239 /**
240  *  _dpu_crtc_complete_flip - signal pending page_flip events
241  * Any pending vblank events are added to the vblank_event_list
242  * so that the next vblank interrupt shall signal them.
243  * However PAGE_FLIP events are not handled through the vblank_event_list.
244  * This API signals any pending PAGE_FLIP events requested through
245  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
246  * @crtc: Pointer to drm crtc structure
247  */
248 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
249 {
250 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
251 	struct drm_device *dev = crtc->dev;
252 	unsigned long flags;
253 
254 	spin_lock_irqsave(&dev->event_lock, flags);
255 	if (dpu_crtc->event) {
256 		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
257 			      dpu_crtc->event);
258 		trace_dpu_crtc_complete_flip(DRMID(crtc));
259 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
260 		dpu_crtc->event = NULL;
261 	}
262 	spin_unlock_irqrestore(&dev->event_lock, flags);
263 }
264 
265 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
266 {
267 	struct drm_encoder *encoder;
268 
269 	if (!crtc || !crtc->dev) {
270 		DPU_ERROR("invalid crtc\n");
271 		return INTF_MODE_NONE;
272 	}
273 
274 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
275 
276 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
277 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
278 		return dpu_encoder_get_intf_mode(encoder);
279 
280 	return INTF_MODE_NONE;
281 }
282 
283 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
284 {
285 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
286 
287 	/* keep statistics on vblank callback - with auto reset via debugfs */
288 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
289 		dpu_crtc->vblank_cb_time = ktime_get();
290 	else
291 		dpu_crtc->vblank_cb_count++;
292 	_dpu_crtc_complete_flip(crtc);
293 	drm_crtc_handle_vblank(crtc);
294 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
295 }
296 
297 static void dpu_crtc_frame_event_work(struct kthread_work *work)
298 {
299 	struct dpu_crtc_frame_event *fevent = container_of(work,
300 			struct dpu_crtc_frame_event, work);
301 	struct drm_crtc *crtc = fevent->crtc;
302 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
303 	unsigned long flags;
304 	bool frame_done = false;
305 
306 	DPU_ATRACE_BEGIN("crtc_frame_event");
307 
308 	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
309 			ktime_to_ns(fevent->ts));
310 
311 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
312 				| DPU_ENCODER_FRAME_EVENT_ERROR
313 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
314 
315 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
316 			/* ignore vblank when not pending */
317 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
318 			/* release bandwidth and other resources */
319 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
320 							fevent->event);
321 			dpu_core_perf_crtc_release_bw(crtc);
322 		} else {
323 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
324 								fevent->event);
325 		}
326 
327 		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
328 			dpu_core_perf_crtc_update(crtc, 0, false);
329 
330 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
331 					| DPU_ENCODER_FRAME_EVENT_ERROR))
332 			frame_done = true;
333 	}
334 
335 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
336 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
337 				crtc->base.id, ktime_to_ns(fevent->ts));
338 
339 	if (frame_done)
340 		complete_all(&dpu_crtc->frame_done_comp);
341 
342 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
343 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
344 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
345 	DPU_ATRACE_END("crtc_frame_event");
346 }
347 
348 /*
349  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
350  * registers this API to encoder for all frame event callbacks like
351  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
352  * from different context - IRQ, user thread, commit_thread, etc. Each event
353  * should be carefully reviewed and should be processed in proper task context
354  * to avoid schedulin delay or properly manage the irq context's bottom half
355  * processing.
356  */
357 static void dpu_crtc_frame_event_cb(void *data, u32 event)
358 {
359 	struct drm_crtc *crtc = (struct drm_crtc *)data;
360 	struct dpu_crtc *dpu_crtc;
361 	struct msm_drm_private *priv;
362 	struct dpu_crtc_frame_event *fevent;
363 	unsigned long flags;
364 	u32 crtc_id;
365 
366 	/* Nothing to do on idle event */
367 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
368 		return;
369 
370 	dpu_crtc = to_dpu_crtc(crtc);
371 	priv = crtc->dev->dev_private;
372 	crtc_id = drm_crtc_index(crtc);
373 
374 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
375 
376 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
377 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
378 			struct dpu_crtc_frame_event, list);
379 	if (fevent)
380 		list_del_init(&fevent->list);
381 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
382 
383 	if (!fevent) {
384 		DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
385 		return;
386 	}
387 
388 	fevent->event = event;
389 	fevent->crtc = crtc;
390 	fevent->ts = ktime_get();
391 	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
392 }
393 
394 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
395 {
396 	trace_dpu_crtc_complete_commit(DRMID(crtc));
397 }
398 
399 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
400 		struct drm_crtc_state *state)
401 {
402 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
403 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
404 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
405 	int i;
406 
407 	for (i = 0; i < cstate->num_mixers; i++) {
408 		struct drm_rect *r = &cstate->lm_bounds[i];
409 		r->x1 = crtc_split_width * i;
410 		r->y1 = 0;
411 		r->x2 = r->x1 + crtc_split_width;
412 		r->y2 = adj_mode->vdisplay;
413 
414 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
415 	}
416 
417 	drm_mode_debug_printmodeline(adj_mode);
418 }
419 
420 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
421 		struct drm_crtc_state *old_state)
422 {
423 	struct dpu_crtc *dpu_crtc;
424 	struct dpu_crtc_state *cstate;
425 	struct drm_encoder *encoder;
426 	struct drm_device *dev;
427 	unsigned long flags;
428 	struct dpu_crtc_smmu_state_data *smmu_state;
429 
430 	if (!crtc) {
431 		DPU_ERROR("invalid crtc\n");
432 		return;
433 	}
434 
435 	if (!crtc->state->enable) {
436 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
437 				crtc->base.id, crtc->state->enable);
438 		return;
439 	}
440 
441 	DPU_DEBUG("crtc%d\n", crtc->base.id);
442 
443 	dpu_crtc = to_dpu_crtc(crtc);
444 	cstate = to_dpu_crtc_state(crtc->state);
445 	dev = crtc->dev;
446 	smmu_state = &dpu_crtc->smmu_state;
447 
448 	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
449 
450 	if (dpu_crtc->event) {
451 		WARN_ON(dpu_crtc->event);
452 	} else {
453 		spin_lock_irqsave(&dev->event_lock, flags);
454 		dpu_crtc->event = crtc->state->event;
455 		crtc->state->event = NULL;
456 		spin_unlock_irqrestore(&dev->event_lock, flags);
457 	}
458 
459 	/* encoder will trigger pending mask now */
460 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
461 		dpu_encoder_trigger_kickoff_pending(encoder);
462 
463 	/*
464 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
465 	 * it means we are trying to flush a CRTC whose state is disabled:
466 	 * nothing else needs to be done.
467 	 */
468 	if (unlikely(!cstate->num_mixers))
469 		return;
470 
471 	_dpu_crtc_blend_setup(crtc);
472 
473 	/*
474 	 * PP_DONE irq is only used by command mode for now.
475 	 * It is better to request pending before FLUSH and START trigger
476 	 * to make sure no pp_done irq missed.
477 	 * This is safe because no pp_done will happen before SW trigger
478 	 * in command mode.
479 	 */
480 }
481 
482 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
483 		struct drm_crtc_state *old_crtc_state)
484 {
485 	struct dpu_crtc *dpu_crtc;
486 	struct drm_device *dev;
487 	struct drm_plane *plane;
488 	struct msm_drm_private *priv;
489 	struct msm_drm_thread *event_thread;
490 	unsigned long flags;
491 	struct dpu_crtc_state *cstate;
492 
493 	if (!crtc->state->enable) {
494 		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
495 				crtc->base.id, crtc->state->enable);
496 		return;
497 	}
498 
499 	DPU_DEBUG("crtc%d\n", crtc->base.id);
500 
501 	dpu_crtc = to_dpu_crtc(crtc);
502 	cstate = to_dpu_crtc_state(crtc->state);
503 	dev = crtc->dev;
504 	priv = dev->dev_private;
505 
506 	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
507 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
508 		return;
509 	}
510 
511 	event_thread = &priv->event_thread[crtc->index];
512 
513 	if (dpu_crtc->event) {
514 		DPU_DEBUG("already received dpu_crtc->event\n");
515 	} else {
516 		spin_lock_irqsave(&dev->event_lock, flags);
517 		dpu_crtc->event = crtc->state->event;
518 		crtc->state->event = NULL;
519 		spin_unlock_irqrestore(&dev->event_lock, flags);
520 	}
521 
522 	/*
523 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
524 	 * it means we are trying to flush a CRTC whose state is disabled:
525 	 * nothing else needs to be done.
526 	 */
527 	if (unlikely(!cstate->num_mixers))
528 		return;
529 
530 	/*
531 	 * For planes without commit update, drm framework will not add
532 	 * those planes to current state since hardware update is not
533 	 * required. However, if those planes were power collapsed since
534 	 * last commit cycle, driver has to restore the hardware state
535 	 * of those planes explicitly here prior to plane flush.
536 	 */
537 	drm_atomic_crtc_for_each_plane(plane, crtc)
538 		dpu_plane_restore(plane);
539 
540 	/* update performance setting before crtc kickoff */
541 	dpu_core_perf_crtc_update(crtc, 1, false);
542 
543 	/*
544 	 * Final plane updates: Give each plane a chance to complete all
545 	 *                      required writes/flushing before crtc's "flush
546 	 *                      everything" call below.
547 	 */
548 	drm_atomic_crtc_for_each_plane(plane, crtc) {
549 		if (dpu_crtc->smmu_state.transition_error)
550 			dpu_plane_set_error(plane, true);
551 		dpu_plane_flush(plane);
552 	}
553 
554 	/* Kickoff will be scheduled by outer layer */
555 }
556 
557 /**
558  * dpu_crtc_destroy_state - state destroy hook
559  * @crtc: drm CRTC
560  * @state: CRTC state object to release
561  */
562 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
563 		struct drm_crtc_state *state)
564 {
565 	struct dpu_crtc *dpu_crtc;
566 	struct dpu_crtc_state *cstate;
567 
568 	if (!crtc || !state) {
569 		DPU_ERROR("invalid argument(s)\n");
570 		return;
571 	}
572 
573 	dpu_crtc = to_dpu_crtc(crtc);
574 	cstate = to_dpu_crtc_state(state);
575 
576 	DPU_DEBUG("crtc%d\n", crtc->base.id);
577 
578 	__drm_atomic_helper_crtc_destroy_state(state);
579 
580 	kfree(cstate);
581 }
582 
583 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
584 {
585 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
586 	int ret, rc = 0;
587 
588 	if (!atomic_read(&dpu_crtc->frame_pending)) {
589 		DPU_DEBUG("no frames pending\n");
590 		return 0;
591 	}
592 
593 	DPU_ATRACE_BEGIN("frame done completion wait");
594 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
595 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
596 	if (!ret) {
597 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
598 		rc = -ETIMEDOUT;
599 	}
600 	DPU_ATRACE_END("frame done completion wait");
601 
602 	return rc;
603 }
604 
605 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
606 {
607 	struct drm_encoder *encoder;
608 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
609 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
610 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
611 
612 	/*
613 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
614 	 * it means we are trying to start a CRTC whose state is disabled:
615 	 * nothing else needs to be done.
616 	 */
617 	if (unlikely(!cstate->num_mixers))
618 		return;
619 
620 	DPU_ATRACE_BEGIN("crtc_commit");
621 
622 	/*
623 	 * Encoder will flush/start now, unless it has a tx pending. If so, it
624 	 * may delay and flush at an irq event (e.g. ppdone)
625 	 */
626 	drm_for_each_encoder_mask(encoder, crtc->dev,
627 				  crtc->state->encoder_mask)
628 		dpu_encoder_prepare_for_kickoff(encoder);
629 
630 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
631 		/* acquire bandwidth and other resources */
632 		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
633 	} else
634 		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
635 
636 	dpu_crtc->play_count++;
637 
638 	dpu_vbif_clear_errors(dpu_kms);
639 
640 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
641 		dpu_encoder_kickoff(encoder);
642 
643 	reinit_completion(&dpu_crtc->frame_done_comp);
644 	DPU_ATRACE_END("crtc_commit");
645 }
646 
647 static void dpu_crtc_reset(struct drm_crtc *crtc)
648 {
649 	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
650 
651 	if (crtc->state)
652 		dpu_crtc_destroy_state(crtc, crtc->state);
653 
654 	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
655 }
656 
657 /**
658  * dpu_crtc_duplicate_state - state duplicate hook
659  * @crtc: Pointer to drm crtc structure
660  * @Returns: Pointer to new drm_crtc_state structure
661  */
662 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
663 {
664 	struct dpu_crtc *dpu_crtc;
665 	struct dpu_crtc_state *cstate, *old_cstate;
666 
667 	if (!crtc || !crtc->state) {
668 		DPU_ERROR("invalid argument(s)\n");
669 		return NULL;
670 	}
671 
672 	dpu_crtc = to_dpu_crtc(crtc);
673 	old_cstate = to_dpu_crtc_state(crtc->state);
674 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
675 	if (!cstate) {
676 		DPU_ERROR("failed to allocate state\n");
677 		return NULL;
678 	}
679 
680 	/* duplicate base helper */
681 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
682 
683 	return &cstate->base;
684 }
685 
686 static void dpu_crtc_disable(struct drm_crtc *crtc,
687 			     struct drm_crtc_state *old_crtc_state)
688 {
689 	struct dpu_crtc *dpu_crtc;
690 	struct dpu_crtc_state *cstate;
691 	struct drm_display_mode *mode;
692 	struct drm_encoder *encoder;
693 	struct msm_drm_private *priv;
694 	unsigned long flags;
695 	bool release_bandwidth = false;
696 
697 	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
698 		DPU_ERROR("invalid crtc\n");
699 		return;
700 	}
701 	dpu_crtc = to_dpu_crtc(crtc);
702 	cstate = to_dpu_crtc_state(crtc->state);
703 	mode = &cstate->base.adjusted_mode;
704 	priv = crtc->dev->dev_private;
705 
706 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
707 
708 	/* Disable/save vblank irq handling */
709 	drm_crtc_vblank_off(crtc);
710 
711 	drm_for_each_encoder_mask(encoder, crtc->dev,
712 				  old_crtc_state->encoder_mask) {
713 		/* in video mode, we hold an extra bandwidth reference
714 		 * as we cannot drop bandwidth at frame-done if any
715 		 * crtc is being used in video mode.
716 		 */
717 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
718 			release_bandwidth = true;
719 		dpu_encoder_assign_crtc(encoder, NULL);
720 	}
721 
722 	/* wait for frame_event_done completion */
723 	if (_dpu_crtc_wait_for_frame_done(crtc))
724 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
725 				crtc->base.id,
726 				atomic_read(&dpu_crtc->frame_pending));
727 
728 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
729 	dpu_crtc->enabled = false;
730 
731 	if (atomic_read(&dpu_crtc->frame_pending)) {
732 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
733 				     atomic_read(&dpu_crtc->frame_pending));
734 		if (release_bandwidth)
735 			dpu_core_perf_crtc_release_bw(crtc);
736 		atomic_set(&dpu_crtc->frame_pending, 0);
737 	}
738 
739 	dpu_core_perf_crtc_update(crtc, 0, true);
740 
741 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
742 		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
743 
744 	memset(cstate->mixers, 0, sizeof(cstate->mixers));
745 	cstate->num_mixers = 0;
746 
747 	/* disable clk & bw control until clk & bw properties are set */
748 	cstate->bw_control = false;
749 	cstate->bw_split_vote = false;
750 
751 	if (crtc->state->event && !crtc->state->active) {
752 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
753 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
754 		crtc->state->event = NULL;
755 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
756 	}
757 
758 	pm_runtime_put_sync(crtc->dev->dev);
759 }
760 
761 static void dpu_crtc_enable(struct drm_crtc *crtc,
762 		struct drm_crtc_state *old_crtc_state)
763 {
764 	struct dpu_crtc *dpu_crtc;
765 	struct drm_encoder *encoder;
766 	struct msm_drm_private *priv;
767 	bool request_bandwidth;
768 
769 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
770 		DPU_ERROR("invalid crtc\n");
771 		return;
772 	}
773 	priv = crtc->dev->dev_private;
774 
775 	pm_runtime_get_sync(crtc->dev->dev);
776 
777 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
778 	dpu_crtc = to_dpu_crtc(crtc);
779 
780 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
781 		/* in video mode, we hold an extra bandwidth reference
782 		 * as we cannot drop bandwidth at frame-done if any
783 		 * crtc is being used in video mode.
784 		 */
785 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
786 			request_bandwidth = true;
787 		dpu_encoder_register_frame_event_callback(encoder,
788 				dpu_crtc_frame_event_cb, (void *)crtc);
789 	}
790 
791 	if (request_bandwidth)
792 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
793 
794 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
795 	dpu_crtc->enabled = true;
796 
797 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
798 		dpu_encoder_assign_crtc(encoder, crtc);
799 
800 	/* Enable/restore vblank irq handling */
801 	drm_crtc_vblank_on(crtc);
802 }
803 
804 struct plane_state {
805 	struct dpu_plane_state *dpu_pstate;
806 	const struct drm_plane_state *drm_pstate;
807 	int stage;
808 	u32 pipe_id;
809 };
810 
811 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
812 		struct drm_crtc_state *state)
813 {
814 	struct dpu_crtc *dpu_crtc;
815 	struct plane_state *pstates;
816 	struct dpu_crtc_state *cstate;
817 
818 	const struct drm_plane_state *pstate;
819 	struct drm_plane *plane;
820 	struct drm_display_mode *mode;
821 
822 	int cnt = 0, rc = 0, mixer_width, i, z_pos;
823 
824 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
825 	int multirect_count = 0;
826 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
827 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
828 	struct drm_rect crtc_rect = { 0 };
829 
830 	if (!crtc) {
831 		DPU_ERROR("invalid crtc\n");
832 		return -EINVAL;
833 	}
834 
835 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
836 
837 	dpu_crtc = to_dpu_crtc(crtc);
838 	cstate = to_dpu_crtc_state(state);
839 
840 	if (!state->enable || !state->active) {
841 		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
842 				crtc->base.id, state->enable, state->active);
843 		goto end;
844 	}
845 
846 	mode = &state->adjusted_mode;
847 	DPU_DEBUG("%s: check", dpu_crtc->name);
848 
849 	/* force a full mode set if active state changed */
850 	if (state->active_changed)
851 		state->mode_changed = true;
852 
853 	memset(pipe_staged, 0, sizeof(pipe_staged));
854 
855 	mixer_width = mode->hdisplay / cstate->num_mixers;
856 
857 	_dpu_crtc_setup_lm_bounds(crtc, state);
858 
859 	crtc_rect.x2 = mode->hdisplay;
860 	crtc_rect.y2 = mode->vdisplay;
861 
862 	 /* get plane state for all drm planes associated with crtc state */
863 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
864 		struct drm_rect dst, clip = crtc_rect;
865 
866 		if (IS_ERR_OR_NULL(pstate)) {
867 			rc = PTR_ERR(pstate);
868 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
869 					dpu_crtc->name, plane->base.id, rc);
870 			goto end;
871 		}
872 		if (cnt >= DPU_STAGE_MAX * 4)
873 			continue;
874 
875 		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
876 		pstates[cnt].drm_pstate = pstate;
877 		pstates[cnt].stage = pstate->normalized_zpos;
878 		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
879 
880 		if (pipe_staged[pstates[cnt].pipe_id]) {
881 			multirect_plane[multirect_count].r0 =
882 				pipe_staged[pstates[cnt].pipe_id];
883 			multirect_plane[multirect_count].r1 = pstate;
884 			multirect_count++;
885 
886 			pipe_staged[pstates[cnt].pipe_id] = NULL;
887 		} else {
888 			pipe_staged[pstates[cnt].pipe_id] = pstate;
889 		}
890 
891 		cnt++;
892 
893 		dst = drm_plane_state_dest(pstate);
894 		if (!drm_rect_intersect(&clip, &dst)) {
895 			DPU_ERROR("invalid vertical/horizontal destination\n");
896 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
897 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
898 				  DRM_RECT_ARG(&dst));
899 			rc = -E2BIG;
900 			goto end;
901 		}
902 	}
903 
904 	for (i = 1; i < SSPP_MAX; i++) {
905 		if (pipe_staged[i]) {
906 			dpu_plane_clear_multirect(pipe_staged[i]);
907 
908 			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
909 				DPU_ERROR(
910 					"r1 only virt plane:%d not supported\n",
911 					pipe_staged[i]->plane->base.id);
912 				rc  = -EINVAL;
913 				goto end;
914 			}
915 		}
916 	}
917 
918 	z_pos = -1;
919 	for (i = 0; i < cnt; i++) {
920 		/* reset counts at every new blend stage */
921 		if (pstates[i].stage != z_pos) {
922 			left_zpos_cnt = 0;
923 			right_zpos_cnt = 0;
924 			z_pos = pstates[i].stage;
925 		}
926 
927 		/* verify z_pos setting before using it */
928 		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
929 			DPU_ERROR("> %d plane stages assigned\n",
930 					DPU_STAGE_MAX - DPU_STAGE_0);
931 			rc = -EINVAL;
932 			goto end;
933 		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
934 			if (left_zpos_cnt == 2) {
935 				DPU_ERROR("> 2 planes @ stage %d on left\n",
936 					z_pos);
937 				rc = -EINVAL;
938 				goto end;
939 			}
940 			left_zpos_cnt++;
941 
942 		} else {
943 			if (right_zpos_cnt == 2) {
944 				DPU_ERROR("> 2 planes @ stage %d on right\n",
945 					z_pos);
946 				rc = -EINVAL;
947 				goto end;
948 			}
949 			right_zpos_cnt++;
950 		}
951 
952 		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
953 		DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
954 	}
955 
956 	for (i = 0; i < multirect_count; i++) {
957 		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
958 			DPU_ERROR(
959 			"multirect validation failed for planes (%d - %d)\n",
960 					multirect_plane[i].r0->plane->base.id,
961 					multirect_plane[i].r1->plane->base.id);
962 			rc = -EINVAL;
963 			goto end;
964 		}
965 	}
966 
967 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
968 
969 	rc = dpu_core_perf_crtc_check(crtc, state);
970 	if (rc) {
971 		DPU_ERROR("crtc%d failed performance check %d\n",
972 				crtc->base.id, rc);
973 		goto end;
974 	}
975 
976 	/* validate source split:
977 	 * use pstates sorted by stage to check planes on same stage
978 	 * we assume that all pipes are in source split so its valid to compare
979 	 * without taking into account left/right mixer placement
980 	 */
981 	for (i = 1; i < cnt; i++) {
982 		struct plane_state *prv_pstate, *cur_pstate;
983 		struct drm_rect left_rect, right_rect;
984 		int32_t left_pid, right_pid;
985 		int32_t stage;
986 
987 		prv_pstate = &pstates[i - 1];
988 		cur_pstate = &pstates[i];
989 		if (prv_pstate->stage != cur_pstate->stage)
990 			continue;
991 
992 		stage = cur_pstate->stage;
993 
994 		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
995 		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
996 
997 		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
998 		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
999 
1000 		if (right_rect.x1 < left_rect.x1) {
1001 			swap(left_pid, right_pid);
1002 			swap(left_rect, right_rect);
1003 		}
1004 
1005 		/**
1006 		 * - planes are enumerated in pipe-priority order such that
1007 		 *   planes with lower drm_id must be left-most in a shared
1008 		 *   blend-stage when using source split.
1009 		 * - planes in source split must be contiguous in width
1010 		 * - planes in source split must have same dest yoff and height
1011 		 */
1012 		if (right_pid < left_pid) {
1013 			DPU_ERROR(
1014 				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1015 				stage, left_pid, right_pid);
1016 			rc = -EINVAL;
1017 			goto end;
1018 		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1019 			DPU_ERROR("non-contiguous coordinates for src split. "
1020 				  "stage: %d left: " DRM_RECT_FMT " right: "
1021 				  DRM_RECT_FMT "\n", stage,
1022 				  DRM_RECT_ARG(&left_rect),
1023 				  DRM_RECT_ARG(&right_rect));
1024 			rc = -EINVAL;
1025 			goto end;
1026 		} else if (left_rect.y1 != right_rect.y1 ||
1027 			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1028 			DPU_ERROR("source split at stage: %d. invalid "
1029 				  "yoff/height: left: " DRM_RECT_FMT " right: "
1030 				  DRM_RECT_FMT "\n", stage,
1031 				  DRM_RECT_ARG(&left_rect),
1032 				  DRM_RECT_ARG(&right_rect));
1033 			rc = -EINVAL;
1034 			goto end;
1035 		}
1036 	}
1037 
1038 end:
1039 	kfree(pstates);
1040 	return rc;
1041 }
1042 
1043 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1044 {
1045 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1046 	struct drm_encoder *enc;
1047 
1048 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1049 
1050 	/*
1051 	 * Normally we would iterate through encoder_mask in crtc state to find
1052 	 * attached encoders. In this case, we might be disabling vblank _after_
1053 	 * encoder_mask has been cleared.
1054 	 *
1055 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1056 	 * disable (which is also after encoder_mask is cleared). So instead of
1057 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1058 	 * currently assigned to our crtc.
1059 	 *
1060 	 * Note also that this function cannot be called while crtc is disabled
1061 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1062 	 * about the assigned crtcs being inconsistent with the current state
1063 	 * (which means no need to worry about modeset locks).
1064 	 */
1065 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1066 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1067 					     dpu_crtc);
1068 
1069 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 #ifdef CONFIG_DEBUG_FS
1076 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1077 {
1078 	struct dpu_crtc *dpu_crtc;
1079 	struct dpu_plane_state *pstate = NULL;
1080 	struct dpu_crtc_mixer *m;
1081 
1082 	struct drm_crtc *crtc;
1083 	struct drm_plane *plane;
1084 	struct drm_display_mode *mode;
1085 	struct drm_framebuffer *fb;
1086 	struct drm_plane_state *state;
1087 	struct dpu_crtc_state *cstate;
1088 
1089 	int i, out_width;
1090 
1091 	dpu_crtc = s->private;
1092 	crtc = &dpu_crtc->base;
1093 
1094 	drm_modeset_lock_all(crtc->dev);
1095 	cstate = to_dpu_crtc_state(crtc->state);
1096 
1097 	mode = &crtc->state->adjusted_mode;
1098 	out_width = mode->hdisplay / cstate->num_mixers;
1099 
1100 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1101 				mode->hdisplay, mode->vdisplay);
1102 
1103 	seq_puts(s, "\n");
1104 
1105 	for (i = 0; i < cstate->num_mixers; ++i) {
1106 		m = &cstate->mixers[i];
1107 		if (!m->hw_lm)
1108 			seq_printf(s, "\tmixer[%d] has no lm\n", i);
1109 		else if (!m->lm_ctl)
1110 			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1111 		else
1112 			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1113 				m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1114 				out_width, mode->vdisplay);
1115 	}
1116 
1117 	seq_puts(s, "\n");
1118 
1119 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1120 		pstate = to_dpu_plane_state(plane->state);
1121 		state = plane->state;
1122 
1123 		if (!pstate || !state)
1124 			continue;
1125 
1126 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1127 			pstate->stage);
1128 
1129 		if (plane->state->fb) {
1130 			fb = plane->state->fb;
1131 
1132 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1133 				fb->base.id, (char *) &fb->format->format,
1134 				fb->width, fb->height);
1135 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1136 				seq_printf(s, "cpp[%d]:%u ",
1137 						i, fb->format->cpp[i]);
1138 			seq_puts(s, "\n\t");
1139 
1140 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1141 			seq_puts(s, "\n");
1142 
1143 			seq_puts(s, "\t");
1144 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1145 				seq_printf(s, "pitches[%d]:%8u ", i,
1146 							fb->pitches[i]);
1147 			seq_puts(s, "\n");
1148 
1149 			seq_puts(s, "\t");
1150 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1151 				seq_printf(s, "offsets[%d]:%8u ", i,
1152 							fb->offsets[i]);
1153 			seq_puts(s, "\n");
1154 		}
1155 
1156 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1157 			state->src_x, state->src_y, state->src_w, state->src_h);
1158 
1159 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1160 			state->crtc_x, state->crtc_y, state->crtc_w,
1161 			state->crtc_h);
1162 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1163 			pstate->multirect_mode, pstate->multirect_index);
1164 
1165 		seq_puts(s, "\n");
1166 	}
1167 	if (dpu_crtc->vblank_cb_count) {
1168 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1169 		s64 diff_ms = ktime_to_ms(diff);
1170 		s64 fps = diff_ms ? div_s64(
1171 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1172 
1173 		seq_printf(s,
1174 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1175 				fps, dpu_crtc->vblank_cb_count,
1176 				ktime_to_ms(diff), dpu_crtc->play_count);
1177 
1178 		/* reset time & count for next measurement */
1179 		dpu_crtc->vblank_cb_count = 0;
1180 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1181 	}
1182 
1183 	drm_modeset_unlock_all(crtc->dev);
1184 
1185 	return 0;
1186 }
1187 
1188 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1189 {
1190 	return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1191 }
1192 
1193 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1194 static int __prefix ## _open(struct inode *inode, struct file *file)	\
1195 {									\
1196 	return single_open(file, __prefix ## _show, inode->i_private);	\
1197 }									\
1198 static const struct file_operations __prefix ## _fops = {		\
1199 	.owner = THIS_MODULE,						\
1200 	.open = __prefix ## _open,					\
1201 	.release = single_release,					\
1202 	.read = seq_read,						\
1203 	.llseek = seq_lseek,						\
1204 }
1205 
1206 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1207 {
1208 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1209 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1210 
1211 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1212 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1213 	seq_printf(s, "core_clk_rate: %llu\n",
1214 			dpu_crtc->cur_perf.core_clk_rate);
1215 	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1216 	seq_printf(s, "max_per_pipe_ib: %llu\n",
1217 				dpu_crtc->cur_perf.max_per_pipe_ib);
1218 
1219 	return 0;
1220 }
1221 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1222 
1223 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1224 {
1225 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1226 
1227 	static const struct file_operations debugfs_status_fops = {
1228 		.open =		_dpu_debugfs_status_open,
1229 		.read =		seq_read,
1230 		.llseek =	seq_lseek,
1231 		.release =	single_release,
1232 	};
1233 
1234 	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1235 			crtc->dev->primary->debugfs_root);
1236 
1237 	debugfs_create_file("status", 0400,
1238 			dpu_crtc->debugfs_root,
1239 			dpu_crtc, &debugfs_status_fops);
1240 	debugfs_create_file("state", 0600,
1241 			dpu_crtc->debugfs_root,
1242 			&dpu_crtc->base,
1243 			&dpu_crtc_debugfs_state_fops);
1244 
1245 	return 0;
1246 }
1247 #else
1248 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1249 {
1250 	return 0;
1251 }
1252 #endif /* CONFIG_DEBUG_FS */
1253 
1254 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1255 {
1256 	return _dpu_crtc_init_debugfs(crtc);
1257 }
1258 
1259 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1260 {
1261 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1262 
1263 	debugfs_remove_recursive(dpu_crtc->debugfs_root);
1264 }
1265 
1266 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1267 	.set_config = drm_atomic_helper_set_config,
1268 	.destroy = dpu_crtc_destroy,
1269 	.page_flip = drm_atomic_helper_page_flip,
1270 	.reset = dpu_crtc_reset,
1271 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1272 	.atomic_destroy_state = dpu_crtc_destroy_state,
1273 	.late_register = dpu_crtc_late_register,
1274 	.early_unregister = dpu_crtc_early_unregister,
1275 };
1276 
1277 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1278 	.atomic_disable = dpu_crtc_disable,
1279 	.atomic_enable = dpu_crtc_enable,
1280 	.atomic_check = dpu_crtc_atomic_check,
1281 	.atomic_begin = dpu_crtc_atomic_begin,
1282 	.atomic_flush = dpu_crtc_atomic_flush,
1283 };
1284 
1285 /* initialize crtc */
1286 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1287 				struct drm_plane *cursor)
1288 {
1289 	struct drm_crtc *crtc = NULL;
1290 	struct dpu_crtc *dpu_crtc = NULL;
1291 	struct msm_drm_private *priv = NULL;
1292 	struct dpu_kms *kms = NULL;
1293 	int i;
1294 
1295 	priv = dev->dev_private;
1296 	kms = to_dpu_kms(priv->kms);
1297 
1298 	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1299 	if (!dpu_crtc)
1300 		return ERR_PTR(-ENOMEM);
1301 
1302 	crtc = &dpu_crtc->base;
1303 	crtc->dev = dev;
1304 
1305 	spin_lock_init(&dpu_crtc->spin_lock);
1306 	atomic_set(&dpu_crtc->frame_pending, 0);
1307 
1308 	init_completion(&dpu_crtc->frame_done_comp);
1309 
1310 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1311 
1312 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1313 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1314 		list_add(&dpu_crtc->frame_events[i].list,
1315 				&dpu_crtc->frame_event_list);
1316 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1317 				dpu_crtc_frame_event_work);
1318 	}
1319 
1320 	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1321 				NULL);
1322 
1323 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1324 
1325 	/* save user friendly CRTC name for later */
1326 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1327 
1328 	/* initialize event handling */
1329 	spin_lock_init(&dpu_crtc->event_lock);
1330 
1331 	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1332 	return crtc;
1333 }
1334