xref: /openbmc/linux/drivers/gpu/drm/msm/msm_atomic.c (revision 2d99ced787e3d0f251fa370d2aae83cf2085a8d9)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cf3a7e4cSRob Clark /*
3cf3a7e4cSRob Clark  * Copyright (C) 2014 Red Hat
4cf3a7e4cSRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5cf3a7e4cSRob Clark  */
6cf3a7e4cSRob Clark 
772fdb40cSDaniel Vetter #include <drm/drm_atomic_uapi.h>
8e78ad765SDaniel Vetter #include <drm/drm_gem_framebuffer_helper.h>
9feea39a8SSam Ravnborg #include <drm/drm_vblank.h>
1072fdb40cSDaniel Vetter 
11cf3a7e4cSRob Clark #include "msm_drv.h"
12db8f4d5dSSean Paul #include "msm_gem.h"
13cf3a7e4cSRob Clark #include "msm_kms.h"
14cf3a7e4cSRob Clark 
15db8f4d5dSSean Paul int msm_atomic_prepare_fb(struct drm_plane *plane,
16db8f4d5dSSean Paul 			  struct drm_plane_state *new_state)
17db8f4d5dSSean Paul {
18db8f4d5dSSean Paul 	struct msm_drm_private *priv = plane->dev->dev_private;
19db8f4d5dSSean Paul 	struct msm_kms *kms = priv->kms;
20db8f4d5dSSean Paul 
21db8f4d5dSSean Paul 	if (!new_state->fb)
22db8f4d5dSSean Paul 		return 0;
23db8f4d5dSSean Paul 
24e78ad765SDaniel Vetter 	drm_gem_fb_prepare_fb(plane, new_state);
25db8f4d5dSSean Paul 
26db8f4d5dSSean Paul 	return msm_framebuffer_prepare(new_state->fb, kms->aspace);
27db8f4d5dSSean Paul }
28db8f4d5dSSean Paul 
29*2d99ced7SRob Clark static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
30*2d99ced7SRob Clark {
31*2d99ced7SRob Clark 	unsigned crtc_mask = BIT(crtc_idx);
32*2d99ced7SRob Clark 
33*2d99ced7SRob Clark 	mutex_lock(&kms->commit_lock);
34*2d99ced7SRob Clark 
35*2d99ced7SRob Clark 	if (!(kms->pending_crtc_mask & crtc_mask)) {
36*2d99ced7SRob Clark 		mutex_unlock(&kms->commit_lock);
37*2d99ced7SRob Clark 		return;
38*2d99ced7SRob Clark 	}
39*2d99ced7SRob Clark 
40*2d99ced7SRob Clark 	kms->pending_crtc_mask &= ~crtc_mask;
41*2d99ced7SRob Clark 
42*2d99ced7SRob Clark 	kms->funcs->enable_commit(kms);
43*2d99ced7SRob Clark 
44*2d99ced7SRob Clark 	/*
45*2d99ced7SRob Clark 	 * Flush hardware updates:
46*2d99ced7SRob Clark 	 */
47*2d99ced7SRob Clark 	DRM_DEBUG_ATOMIC("triggering async commit\n");
48*2d99ced7SRob Clark 	kms->funcs->flush_commit(kms, crtc_mask);
49*2d99ced7SRob Clark 	mutex_unlock(&kms->commit_lock);
50*2d99ced7SRob Clark 
51*2d99ced7SRob Clark 	/*
52*2d99ced7SRob Clark 	 * Wait for flush to complete:
53*2d99ced7SRob Clark 	 */
54*2d99ced7SRob Clark 	kms->funcs->wait_flush(kms, crtc_mask);
55*2d99ced7SRob Clark 
56*2d99ced7SRob Clark 	mutex_lock(&kms->commit_lock);
57*2d99ced7SRob Clark 	kms->funcs->complete_commit(kms, crtc_mask);
58*2d99ced7SRob Clark 	mutex_unlock(&kms->commit_lock);
59*2d99ced7SRob Clark 	kms->funcs->disable_commit(kms);
60*2d99ced7SRob Clark }
61*2d99ced7SRob Clark 
62*2d99ced7SRob Clark static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
63*2d99ced7SRob Clark {
64*2d99ced7SRob Clark 	struct msm_pending_timer *timer = container_of(t,
65*2d99ced7SRob Clark 			struct msm_pending_timer, timer);
66*2d99ced7SRob Clark 	struct msm_drm_private *priv = timer->kms->dev->dev_private;
67*2d99ced7SRob Clark 
68*2d99ced7SRob Clark 	queue_work(priv->wq, &timer->work);
69*2d99ced7SRob Clark 
70*2d99ced7SRob Clark 	return HRTIMER_NORESTART;
71*2d99ced7SRob Clark }
72*2d99ced7SRob Clark 
73*2d99ced7SRob Clark static void msm_atomic_pending_work(struct work_struct *work)
74*2d99ced7SRob Clark {
75*2d99ced7SRob Clark 	struct msm_pending_timer *timer = container_of(work,
76*2d99ced7SRob Clark 			struct msm_pending_timer, work);
77*2d99ced7SRob Clark 
78*2d99ced7SRob Clark 	msm_atomic_async_commit(timer->kms, timer->crtc_idx);
79*2d99ced7SRob Clark }
80*2d99ced7SRob Clark 
81*2d99ced7SRob Clark void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
82*2d99ced7SRob Clark 		struct msm_kms *kms, int crtc_idx)
83*2d99ced7SRob Clark {
84*2d99ced7SRob Clark 	timer->kms = kms;
85*2d99ced7SRob Clark 	timer->crtc_idx = crtc_idx;
86*2d99ced7SRob Clark 	hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
87*2d99ced7SRob Clark 	timer->timer.function = msm_atomic_pending_timer;
88*2d99ced7SRob Clark 	INIT_WORK(&timer->work, msm_atomic_pending_work);
89*2d99ced7SRob Clark }
90*2d99ced7SRob Clark 
91*2d99ced7SRob Clark static bool can_do_async(struct drm_atomic_state *state,
92*2d99ced7SRob Clark 		struct drm_crtc **async_crtc)
93*2d99ced7SRob Clark {
94*2d99ced7SRob Clark 	struct drm_connector_state *connector_state;
95*2d99ced7SRob Clark 	struct drm_connector *connector;
96*2d99ced7SRob Clark 	struct drm_crtc_state *crtc_state;
97*2d99ced7SRob Clark 	struct drm_crtc *crtc;
98*2d99ced7SRob Clark 	int i, num_crtcs = 0;
99*2d99ced7SRob Clark 
100*2d99ced7SRob Clark 	if (!(state->legacy_cursor_update || state->async_update))
101*2d99ced7SRob Clark 		return false;
102*2d99ced7SRob Clark 
103*2d99ced7SRob Clark 	/* any connector change, means slow path: */
104*2d99ced7SRob Clark 	for_each_new_connector_in_state(state, connector, connector_state, i)
105*2d99ced7SRob Clark 		return false;
106*2d99ced7SRob Clark 
107*2d99ced7SRob Clark 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
108*2d99ced7SRob Clark 		if (drm_atomic_crtc_needs_modeset(crtc_state))
109*2d99ced7SRob Clark 			return false;
110*2d99ced7SRob Clark 		if (++num_crtcs > 1)
111*2d99ced7SRob Clark 			return false;
112*2d99ced7SRob Clark 		*async_crtc = crtc;
113*2d99ced7SRob Clark 	}
114*2d99ced7SRob Clark 
115*2d99ced7SRob Clark 	return true;
116*2d99ced7SRob Clark }
117*2d99ced7SRob Clark 
118d4d2c604SRob Clark /* Get bitmask of crtcs that will need to be flushed.  The bitmask
119d4d2c604SRob Clark  * can be used with for_each_crtc_mask() iterator, to iterate
120d4d2c604SRob Clark  * effected crtcs without needing to preserve the atomic state.
121d4d2c604SRob Clark  */
122d4d2c604SRob Clark static unsigned get_crtc_mask(struct drm_atomic_state *state)
123d4d2c604SRob Clark {
124d4d2c604SRob Clark 	struct drm_crtc_state *crtc_state;
125d4d2c604SRob Clark 	struct drm_crtc *crtc;
126d4d2c604SRob Clark 	unsigned i, mask = 0;
127d4d2c604SRob Clark 
128d4d2c604SRob Clark 	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
129d4d2c604SRob Clark 		mask |= drm_crtc_mask(crtc);
130d4d2c604SRob Clark 
131d4d2c604SRob Clark 	return mask;
132d4d2c604SRob Clark }
133d4d2c604SRob Clark 
134d14659f5SSean Paul void msm_atomic_commit_tail(struct drm_atomic_state *state)
135cf3a7e4cSRob Clark {
136cf3a7e4cSRob Clark 	struct drm_device *dev = state->dev;
1370b776d45SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
1380b776d45SRob Clark 	struct msm_kms *kms = priv->kms;
139*2d99ced7SRob Clark 	struct drm_crtc *async_crtc = NULL;
140d4d2c604SRob Clark 	unsigned crtc_mask = get_crtc_mask(state);
141*2d99ced7SRob Clark 	bool async = kms->funcs->vsync_time &&
142*2d99ced7SRob Clark 			can_do_async(state, &async_crtc);
1430b776d45SRob Clark 
144e35a29d5SRob Clark 	kms->funcs->enable_commit(kms);
145*2d99ced7SRob Clark 
146*2d99ced7SRob Clark 	/*
147*2d99ced7SRob Clark 	 * Ensure any previous (potentially async) commit has
148*2d99ced7SRob Clark 	 * completed:
149*2d99ced7SRob Clark 	 */
150*2d99ced7SRob Clark 	kms->funcs->wait_flush(kms, crtc_mask);
151*2d99ced7SRob Clark 
152*2d99ced7SRob Clark 	mutex_lock(&kms->commit_lock);
153*2d99ced7SRob Clark 
154*2d99ced7SRob Clark 	/*
155*2d99ced7SRob Clark 	 * Now that there is no in-progress flush, prepare the
156*2d99ced7SRob Clark 	 * current update:
157*2d99ced7SRob Clark 	 */
1580b776d45SRob Clark 	kms->funcs->prepare_commit(kms, state);
159cf3a7e4cSRob Clark 
1609f6b6564SRob Clark 	/*
1619f6b6564SRob Clark 	 * Push atomic updates down to hardware:
1629f6b6564SRob Clark 	 */
1631af434a9SDaniel Vetter 	drm_atomic_helper_commit_modeset_disables(dev, state);
1642b58e98dSLiu Ying 	drm_atomic_helper_commit_planes(dev, state, 0);
1651af434a9SDaniel Vetter 	drm_atomic_helper_commit_modeset_enables(dev, state);
166cf3a7e4cSRob Clark 
167*2d99ced7SRob Clark 	if (async) {
168*2d99ced7SRob Clark 		struct msm_pending_timer *timer =
169*2d99ced7SRob Clark 			&kms->pending_timers[drm_crtc_index(async_crtc)];
170*2d99ced7SRob Clark 
171*2d99ced7SRob Clark 		/* async updates are limited to single-crtc updates: */
172*2d99ced7SRob Clark 		WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
173*2d99ced7SRob Clark 
174*2d99ced7SRob Clark 		/*
175*2d99ced7SRob Clark 		 * Start timer if we don't already have an update pending
176*2d99ced7SRob Clark 		 * on this crtc:
177*2d99ced7SRob Clark 		 */
178*2d99ced7SRob Clark 		if (!(kms->pending_crtc_mask & crtc_mask)) {
179*2d99ced7SRob Clark 			ktime_t vsync_time, wakeup_time;
180*2d99ced7SRob Clark 
181*2d99ced7SRob Clark 			kms->pending_crtc_mask |= crtc_mask;
182*2d99ced7SRob Clark 
183*2d99ced7SRob Clark 			vsync_time = kms->funcs->vsync_time(kms, async_crtc);
184*2d99ced7SRob Clark 			wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
185*2d99ced7SRob Clark 
186*2d99ced7SRob Clark 			hrtimer_start(&timer->timer, wakeup_time,
187*2d99ced7SRob Clark 					HRTIMER_MODE_ABS);
188*2d99ced7SRob Clark 		}
189*2d99ced7SRob Clark 
190*2d99ced7SRob Clark 		kms->funcs->disable_commit(kms);
191*2d99ced7SRob Clark 		mutex_unlock(&kms->commit_lock);
192*2d99ced7SRob Clark 
193*2d99ced7SRob Clark 		/*
194*2d99ced7SRob Clark 		 * At this point, from drm core's perspective, we
195*2d99ced7SRob Clark 		 * are done with the atomic update, so we can just
196*2d99ced7SRob Clark 		 * go ahead and signal that it is done:
197*2d99ced7SRob Clark 		 */
198*2d99ced7SRob Clark 		drm_atomic_helper_commit_hw_done(state);
199*2d99ced7SRob Clark 		drm_atomic_helper_cleanup_planes(dev, state);
200*2d99ced7SRob Clark 
201*2d99ced7SRob Clark 		return;
202*2d99ced7SRob Clark 	}
203*2d99ced7SRob Clark 
204*2d99ced7SRob Clark 	/*
205*2d99ced7SRob Clark 	 * If there is any async flush pending on updated crtcs, fold
206*2d99ced7SRob Clark 	 * them into the current flush.
207*2d99ced7SRob Clark 	 */
208*2d99ced7SRob Clark 	kms->pending_crtc_mask &= ~crtc_mask;
209*2d99ced7SRob Clark 
2109f6b6564SRob Clark 	/*
2119f6b6564SRob Clark 	 * Flush hardware updates:
2129f6b6564SRob Clark 	 */
2132b7ac1a8SJeykumar Sankaran 	if (kms->funcs->commit) {
2142b7ac1a8SJeykumar Sankaran 		DRM_DEBUG_ATOMIC("triggering commit\n");
2152b7ac1a8SJeykumar Sankaran 		kms->funcs->commit(kms, state);
2162b7ac1a8SJeykumar Sankaran 	}
2179f6b6564SRob Clark 	kms->funcs->flush_commit(kms, crtc_mask);
218*2d99ced7SRob Clark 	mutex_unlock(&kms->commit_lock);
2192b7ac1a8SJeykumar Sankaran 
220*2d99ced7SRob Clark 	/*
221*2d99ced7SRob Clark 	 * Wait for flush to complete:
222*2d99ced7SRob Clark 	 */
223d4d2c604SRob Clark 	kms->funcs->wait_flush(kms, crtc_mask);
224*2d99ced7SRob Clark 
225*2d99ced7SRob Clark 	mutex_lock(&kms->commit_lock);
22680b4b4a7SRob Clark 	kms->funcs->complete_commit(kms, crtc_mask);
227*2d99ced7SRob Clark 	mutex_unlock(&kms->commit_lock);
228e35a29d5SRob Clark 	kms->funcs->disable_commit(kms);
22970db18dcSSean Paul 
23070db18dcSSean Paul 	drm_atomic_helper_commit_hw_done(state);
23170db18dcSSean Paul 	drm_atomic_helper_cleanup_planes(dev, state);
232347b90b4SSean Paul }
233