xref: /openbmc/linux/drivers/gpu/drm/msm/msm_atomic.c (revision ddb6e37a50e02736f3c9f1a9f8f873989b22af54)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cf3a7e4cSRob Clark /*
3cf3a7e4cSRob Clark  * Copyright (C) 2014 Red Hat
4cf3a7e4cSRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5cf3a7e4cSRob Clark  */
6cf3a7e4cSRob Clark 
772fdb40cSDaniel Vetter #include <drm/drm_atomic_uapi.h>
8820c1707SThomas Zimmermann #include <drm/drm_gem_atomic_helper.h>
9feea39a8SSam Ravnborg #include <drm/drm_vblank.h>
1072fdb40cSDaniel Vetter 
11d934a712SRob Clark #include "msm_atomic_trace.h"
12cf3a7e4cSRob Clark #include "msm_drv.h"
13db8f4d5dSSean Paul #include "msm_gem.h"
14cf3a7e4cSRob Clark #include "msm_kms.h"
15cf3a7e4cSRob Clark 
16db8f4d5dSSean Paul int msm_atomic_prepare_fb(struct drm_plane *plane,
17db8f4d5dSSean Paul 			  struct drm_plane_state *new_state)
18db8f4d5dSSean Paul {
19db8f4d5dSSean Paul 	struct msm_drm_private *priv = plane->dev->dev_private;
20db8f4d5dSSean Paul 	struct msm_kms *kms = priv->kms;
21db8f4d5dSSean Paul 
22db8f4d5dSSean Paul 	if (!new_state->fb)
23db8f4d5dSSean Paul 		return 0;
24db8f4d5dSSean Paul 
25820c1707SThomas Zimmermann 	drm_gem_plane_helper_prepare_fb(plane, new_state);
26db8f4d5dSSean Paul 
27db8f4d5dSSean Paul 	return msm_framebuffer_prepare(new_state->fb, kms->aspace);
28db8f4d5dSSean Paul }
29db8f4d5dSSean Paul 
3043906812SRob Clark /*
3143906812SRob Clark  * Helpers to control vblanks while we flush.. basically just to ensure
3243906812SRob Clark  * that vblank accounting is switched on, so we get valid seqn/timestamp
3343906812SRob Clark  * on pageflip events (if requested)
3443906812SRob Clark  */
3543906812SRob Clark 
3643906812SRob Clark static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
3743906812SRob Clark {
3843906812SRob Clark 	struct drm_crtc *crtc;
3943906812SRob Clark 
4043906812SRob Clark 	for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
4143906812SRob Clark 		if (!crtc->state->active)
4243906812SRob Clark 			continue;
4343906812SRob Clark 		drm_crtc_vblank_get(crtc);
4443906812SRob Clark 	}
4543906812SRob Clark }
4643906812SRob Clark 
4743906812SRob Clark static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
4843906812SRob Clark {
4943906812SRob Clark 	struct drm_crtc *crtc;
5043906812SRob Clark 
5143906812SRob Clark 	for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
5243906812SRob Clark 		if (!crtc->state->active)
5343906812SRob Clark 			continue;
5443906812SRob Clark 		drm_crtc_vblank_put(crtc);
5543906812SRob Clark 	}
5643906812SRob Clark }
5743906812SRob Clark 
58b3d91800SKrishna Manikandan static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
59b3d91800SKrishna Manikandan {
60743c97caSStephen Boyd 	int crtc_index;
61b3d91800SKrishna Manikandan 	struct drm_crtc *crtc;
62b3d91800SKrishna Manikandan 
63743c97caSStephen Boyd 	for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
64743c97caSStephen Boyd 		crtc_index = drm_crtc_index(crtc);
65743c97caSStephen Boyd 		mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
66743c97caSStephen Boyd 	}
67b3d91800SKrishna Manikandan }
68b3d91800SKrishna Manikandan 
69b3d91800SKrishna Manikandan static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
70b3d91800SKrishna Manikandan {
71b3d91800SKrishna Manikandan 	struct drm_crtc *crtc;
72b3d91800SKrishna Manikandan 
73cb21f3f8SRob Clark 	for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask)
74b3d91800SKrishna Manikandan 		mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]);
75b3d91800SKrishna Manikandan }
76b3d91800SKrishna Manikandan 
772d99ced7SRob Clark static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
782d99ced7SRob Clark {
792d99ced7SRob Clark 	unsigned crtc_mask = BIT(crtc_idx);
802d99ced7SRob Clark 
81d934a712SRob Clark 	trace_msm_atomic_async_commit_start(crtc_mask);
82d934a712SRob Clark 
83b3d91800SKrishna Manikandan 	lock_crtcs(kms, crtc_mask);
842d99ced7SRob Clark 
852d99ced7SRob Clark 	if (!(kms->pending_crtc_mask & crtc_mask)) {
86b3d91800SKrishna Manikandan 		unlock_crtcs(kms, crtc_mask);
87d934a712SRob Clark 		goto out;
882d99ced7SRob Clark 	}
892d99ced7SRob Clark 
902d99ced7SRob Clark 	kms->pending_crtc_mask &= ~crtc_mask;
912d99ced7SRob Clark 
922d99ced7SRob Clark 	kms->funcs->enable_commit(kms);
932d99ced7SRob Clark 
9443906812SRob Clark 	vblank_get(kms, crtc_mask);
9543906812SRob Clark 
962d99ced7SRob Clark 	/*
972d99ced7SRob Clark 	 * Flush hardware updates:
982d99ced7SRob Clark 	 */
99d934a712SRob Clark 	trace_msm_atomic_flush_commit(crtc_mask);
1002d99ced7SRob Clark 	kms->funcs->flush_commit(kms, crtc_mask);
1012d99ced7SRob Clark 
1022d99ced7SRob Clark 	/*
1032d99ced7SRob Clark 	 * Wait for flush to complete:
1042d99ced7SRob Clark 	 */
105d934a712SRob Clark 	trace_msm_atomic_wait_flush_start(crtc_mask);
1062d99ced7SRob Clark 	kms->funcs->wait_flush(kms, crtc_mask);
107d934a712SRob Clark 	trace_msm_atomic_wait_flush_finish(crtc_mask);
1082d99ced7SRob Clark 
10943906812SRob Clark 	vblank_put(kms, crtc_mask);
11043906812SRob Clark 
1112d99ced7SRob Clark 	kms->funcs->complete_commit(kms, crtc_mask);
112b3d91800SKrishna Manikandan 	unlock_crtcs(kms, crtc_mask);
1132d99ced7SRob Clark 	kms->funcs->disable_commit(kms);
114d934a712SRob Clark 
115d934a712SRob Clark out:
116d934a712SRob Clark 	trace_msm_atomic_async_commit_finish(crtc_mask);
1172d99ced7SRob Clark }
1182d99ced7SRob Clark 
119363bcec9SRob Clark static void msm_atomic_pending_work(struct kthread_work *work)
1202d99ced7SRob Clark {
1212d99ced7SRob Clark 	struct msm_pending_timer *timer = container_of(work,
122*ddb6e37aSRob Clark 			struct msm_pending_timer, work.work);
1232d99ced7SRob Clark 
1242d99ced7SRob Clark 	msm_atomic_async_commit(timer->kms, timer->crtc_idx);
1252d99ced7SRob Clark }
1262d99ced7SRob Clark 
127363bcec9SRob Clark int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
1282d99ced7SRob Clark 		struct msm_kms *kms, int crtc_idx)
1292d99ced7SRob Clark {
1302d99ced7SRob Clark 	timer->kms = kms;
1312d99ced7SRob Clark 	timer->crtc_idx = crtc_idx;
132363bcec9SRob Clark 
133363bcec9SRob Clark 	timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
134363bcec9SRob Clark 	if (IS_ERR(timer->worker)) {
135363bcec9SRob Clark 		int ret = PTR_ERR(timer->worker);
136363bcec9SRob Clark 		timer->worker = NULL;
137363bcec9SRob Clark 		return ret;
138363bcec9SRob Clark 	}
139363bcec9SRob Clark 	sched_set_fifo(timer->worker->task);
140*ddb6e37aSRob Clark 
141*ddb6e37aSRob Clark 	msm_hrtimer_work_init(&timer->work, timer->worker,
142*ddb6e37aSRob Clark 			      msm_atomic_pending_work,
143*ddb6e37aSRob Clark 			      CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
144363bcec9SRob Clark 
145363bcec9SRob Clark 	return 0;
146363bcec9SRob Clark }
147363bcec9SRob Clark 
148363bcec9SRob Clark void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer)
149363bcec9SRob Clark {
150363bcec9SRob Clark 	if (timer->worker)
151363bcec9SRob Clark 		kthread_destroy_worker(timer->worker);
1522d99ced7SRob Clark }
1532d99ced7SRob Clark 
1542d99ced7SRob Clark static bool can_do_async(struct drm_atomic_state *state,
1552d99ced7SRob Clark 		struct drm_crtc **async_crtc)
1562d99ced7SRob Clark {
1572d99ced7SRob Clark 	struct drm_connector_state *connector_state;
1582d99ced7SRob Clark 	struct drm_connector *connector;
1592d99ced7SRob Clark 	struct drm_crtc_state *crtc_state;
1602d99ced7SRob Clark 	struct drm_crtc *crtc;
1612d99ced7SRob Clark 	int i, num_crtcs = 0;
1622d99ced7SRob Clark 
1632d99ced7SRob Clark 	if (!(state->legacy_cursor_update || state->async_update))
1642d99ced7SRob Clark 		return false;
1652d99ced7SRob Clark 
1662d99ced7SRob Clark 	/* any connector change, means slow path: */
1672d99ced7SRob Clark 	for_each_new_connector_in_state(state, connector, connector_state, i)
1682d99ced7SRob Clark 		return false;
1692d99ced7SRob Clark 
1702d99ced7SRob Clark 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1712d99ced7SRob Clark 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1722d99ced7SRob Clark 			return false;
1732d99ced7SRob Clark 		if (++num_crtcs > 1)
1742d99ced7SRob Clark 			return false;
1752d99ced7SRob Clark 		*async_crtc = crtc;
1762d99ced7SRob Clark 	}
1772d99ced7SRob Clark 
1782d99ced7SRob Clark 	return true;
1792d99ced7SRob Clark }
1802d99ced7SRob Clark 
181d4d2c604SRob Clark /* Get bitmask of crtcs that will need to be flushed.  The bitmask
182d4d2c604SRob Clark  * can be used with for_each_crtc_mask() iterator, to iterate
183d4d2c604SRob Clark  * effected crtcs without needing to preserve the atomic state.
184d4d2c604SRob Clark  */
185d4d2c604SRob Clark static unsigned get_crtc_mask(struct drm_atomic_state *state)
186d4d2c604SRob Clark {
187d4d2c604SRob Clark 	struct drm_crtc_state *crtc_state;
188d4d2c604SRob Clark 	struct drm_crtc *crtc;
189d4d2c604SRob Clark 	unsigned i, mask = 0;
190d4d2c604SRob Clark 
191d4d2c604SRob Clark 	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
192d4d2c604SRob Clark 		mask |= drm_crtc_mask(crtc);
193d4d2c604SRob Clark 
194d4d2c604SRob Clark 	return mask;
195d4d2c604SRob Clark }
196d4d2c604SRob Clark 
197d14659f5SSean Paul void msm_atomic_commit_tail(struct drm_atomic_state *state)
198cf3a7e4cSRob Clark {
199cf3a7e4cSRob Clark 	struct drm_device *dev = state->dev;
2000b776d45SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
2010b776d45SRob Clark 	struct msm_kms *kms = priv->kms;
2022d99ced7SRob Clark 	struct drm_crtc *async_crtc = NULL;
203d4d2c604SRob Clark 	unsigned crtc_mask = get_crtc_mask(state);
2042d99ced7SRob Clark 	bool async = kms->funcs->vsync_time &&
2052d99ced7SRob Clark 			can_do_async(state, &async_crtc);
2060b776d45SRob Clark 
207d934a712SRob Clark 	trace_msm_atomic_commit_tail_start(async, crtc_mask);
208d934a712SRob Clark 
209e35a29d5SRob Clark 	kms->funcs->enable_commit(kms);
2102d99ced7SRob Clark 
2112d99ced7SRob Clark 	/*
2122d99ced7SRob Clark 	 * Ensure any previous (potentially async) commit has
2132d99ced7SRob Clark 	 * completed:
2142d99ced7SRob Clark 	 */
215b3d91800SKrishna Manikandan 	lock_crtcs(kms, crtc_mask);
216d934a712SRob Clark 	trace_msm_atomic_wait_flush_start(crtc_mask);
2172d99ced7SRob Clark 	kms->funcs->wait_flush(kms, crtc_mask);
218d934a712SRob Clark 	trace_msm_atomic_wait_flush_finish(crtc_mask);
2192d99ced7SRob Clark 
2202d99ced7SRob Clark 	/*
2212d99ced7SRob Clark 	 * Now that there is no in-progress flush, prepare the
2222d99ced7SRob Clark 	 * current update:
2232d99ced7SRob Clark 	 */
2240b776d45SRob Clark 	kms->funcs->prepare_commit(kms, state);
225cf3a7e4cSRob Clark 
2269f6b6564SRob Clark 	/*
2279f6b6564SRob Clark 	 * Push atomic updates down to hardware:
2289f6b6564SRob Clark 	 */
2291af434a9SDaniel Vetter 	drm_atomic_helper_commit_modeset_disables(dev, state);
2302b58e98dSLiu Ying 	drm_atomic_helper_commit_planes(dev, state, 0);
2311af434a9SDaniel Vetter 	drm_atomic_helper_commit_modeset_enables(dev, state);
232cf3a7e4cSRob Clark 
2332d99ced7SRob Clark 	if (async) {
2342d99ced7SRob Clark 		struct msm_pending_timer *timer =
2352d99ced7SRob Clark 			&kms->pending_timers[drm_crtc_index(async_crtc)];
2362d99ced7SRob Clark 
2372d99ced7SRob Clark 		/* async updates are limited to single-crtc updates: */
2382d99ced7SRob Clark 		WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
2392d99ced7SRob Clark 
2402d99ced7SRob Clark 		/*
2412d99ced7SRob Clark 		 * Start timer if we don't already have an update pending
2422d99ced7SRob Clark 		 * on this crtc:
2432d99ced7SRob Clark 		 */
2442d99ced7SRob Clark 		if (!(kms->pending_crtc_mask & crtc_mask)) {
2452d99ced7SRob Clark 			ktime_t vsync_time, wakeup_time;
2462d99ced7SRob Clark 
2472d99ced7SRob Clark 			kms->pending_crtc_mask |= crtc_mask;
2482d99ced7SRob Clark 
2492d99ced7SRob Clark 			vsync_time = kms->funcs->vsync_time(kms, async_crtc);
2502d99ced7SRob Clark 			wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
2512d99ced7SRob Clark 
252*ddb6e37aSRob Clark 			msm_hrtimer_queue_work(&timer->work, wakeup_time,
2532d99ced7SRob Clark 					HRTIMER_MODE_ABS);
2542d99ced7SRob Clark 		}
2552d99ced7SRob Clark 
2562d99ced7SRob Clark 		kms->funcs->disable_commit(kms);
257b3d91800SKrishna Manikandan 		unlock_crtcs(kms, crtc_mask);
2582d99ced7SRob Clark 		/*
2592d99ced7SRob Clark 		 * At this point, from drm core's perspective, we
2602d99ced7SRob Clark 		 * are done with the atomic update, so we can just
2612d99ced7SRob Clark 		 * go ahead and signal that it is done:
2622d99ced7SRob Clark 		 */
2632d99ced7SRob Clark 		drm_atomic_helper_commit_hw_done(state);
2642d99ced7SRob Clark 		drm_atomic_helper_cleanup_planes(dev, state);
2652d99ced7SRob Clark 
266d934a712SRob Clark 		trace_msm_atomic_commit_tail_finish(async, crtc_mask);
267d934a712SRob Clark 
2682d99ced7SRob Clark 		return;
2692d99ced7SRob Clark 	}
2702d99ced7SRob Clark 
2712d99ced7SRob Clark 	/*
2722d99ced7SRob Clark 	 * If there is any async flush pending on updated crtcs, fold
2732d99ced7SRob Clark 	 * them into the current flush.
2742d99ced7SRob Clark 	 */
2752d99ced7SRob Clark 	kms->pending_crtc_mask &= ~crtc_mask;
2762d99ced7SRob Clark 
27743906812SRob Clark 	vblank_get(kms, crtc_mask);
27843906812SRob Clark 
2799f6b6564SRob Clark 	/*
2809f6b6564SRob Clark 	 * Flush hardware updates:
2819f6b6564SRob Clark 	 */
282d934a712SRob Clark 	trace_msm_atomic_flush_commit(crtc_mask);
2839f6b6564SRob Clark 	kms->funcs->flush_commit(kms, crtc_mask);
284b3d91800SKrishna Manikandan 	unlock_crtcs(kms, crtc_mask);
2852d99ced7SRob Clark 	/*
2862d99ced7SRob Clark 	 * Wait for flush to complete:
2872d99ced7SRob Clark 	 */
288d934a712SRob Clark 	trace_msm_atomic_wait_flush_start(crtc_mask);
289d4d2c604SRob Clark 	kms->funcs->wait_flush(kms, crtc_mask);
290d934a712SRob Clark 	trace_msm_atomic_wait_flush_finish(crtc_mask);
2912d99ced7SRob Clark 
29243906812SRob Clark 	vblank_put(kms, crtc_mask);
29343906812SRob Clark 
294b3d91800SKrishna Manikandan 	lock_crtcs(kms, crtc_mask);
29580b4b4a7SRob Clark 	kms->funcs->complete_commit(kms, crtc_mask);
296b3d91800SKrishna Manikandan 	unlock_crtcs(kms, crtc_mask);
297e35a29d5SRob Clark 	kms->funcs->disable_commit(kms);
29870db18dcSSean Paul 
29970db18dcSSean Paul 	drm_atomic_helper_commit_hw_done(state);
30070db18dcSSean Paul 	drm_atomic_helper_cleanup_planes(dev, state);
301d934a712SRob Clark 
302d934a712SRob Clark 	trace_msm_atomic_commit_tail_finish(async, crtc_mask);
303347b90b4SSean Paul }
304