xref: /openbmc/linux/drivers/gpu/drm/msm/msm_atomic.c (revision 70db18dca4e0130acb0600ad51c33176b6162ccc)
1cf3a7e4cSRob Clark /*
2cf3a7e4cSRob Clark  * Copyright (C) 2014 Red Hat
3cf3a7e4cSRob Clark  * Author: Rob Clark <robdclark@gmail.com>
4cf3a7e4cSRob Clark  *
5cf3a7e4cSRob Clark  * This program is free software; you can redistribute it and/or modify it
6cf3a7e4cSRob Clark  * under the terms of the GNU General Public License version 2 as published by
7cf3a7e4cSRob Clark  * the Free Software Foundation.
8cf3a7e4cSRob Clark  *
9cf3a7e4cSRob Clark  * This program is distributed in the hope that it will be useful, but WITHOUT
10cf3a7e4cSRob Clark  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11cf3a7e4cSRob Clark  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12cf3a7e4cSRob Clark  * more details.
13cf3a7e4cSRob Clark  *
14cf3a7e4cSRob Clark  * You should have received a copy of the GNU General Public License along with
15cf3a7e4cSRob Clark  * this program.  If not, see <http://www.gnu.org/licenses/>.
16cf3a7e4cSRob Clark  */
17cf3a7e4cSRob Clark 
18cf3a7e4cSRob Clark #include "msm_drv.h"
19db8f4d5dSSean Paul #include "msm_gem.h"
20cf3a7e4cSRob Clark #include "msm_kms.h"
21cf3a7e4cSRob Clark #include "msm_gem.h"
22fde5de6cSRob Clark #include "msm_fence.h"
23cf3a7e4cSRob Clark 
240a5c9aadSHai Li static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
250a5c9aadSHai Li 		struct drm_atomic_state *old_state)
260a5c9aadSHai Li {
270a5c9aadSHai Li 	struct drm_crtc *crtc;
28d7429669SMaarten Lankhorst 	struct drm_crtc_state *new_crtc_state;
290a5c9aadSHai Li 	struct msm_drm_private *priv = old_state->dev->dev_private;
300a5c9aadSHai Li 	struct msm_kms *kms = priv->kms;
310a5c9aadSHai Li 	int i;
320a5c9aadSHai Li 
33d7429669SMaarten Lankhorst 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
34d7429669SMaarten Lankhorst 		if (!new_crtc_state->active)
350a5c9aadSHai Li 			continue;
360a5c9aadSHai Li 
370a5c9aadSHai Li 		kms->funcs->wait_for_crtc_commit_done(kms, crtc);
380a5c9aadSHai Li 	}
390a5c9aadSHai Li }
400a5c9aadSHai Li 
41db8f4d5dSSean Paul int msm_atomic_prepare_fb(struct drm_plane *plane,
42db8f4d5dSSean Paul 			  struct drm_plane_state *new_state)
43db8f4d5dSSean Paul {
44db8f4d5dSSean Paul 	struct msm_drm_private *priv = plane->dev->dev_private;
45db8f4d5dSSean Paul 	struct msm_kms *kms = priv->kms;
46db8f4d5dSSean Paul 	struct drm_gem_object *obj;
47db8f4d5dSSean Paul 	struct msm_gem_object *msm_obj;
48db8f4d5dSSean Paul 	struct dma_fence *fence;
49db8f4d5dSSean Paul 
50db8f4d5dSSean Paul 	if (!new_state->fb)
51db8f4d5dSSean Paul 		return 0;
52db8f4d5dSSean Paul 
53db8f4d5dSSean Paul 	obj = msm_framebuffer_bo(new_state->fb, 0);
54db8f4d5dSSean Paul 	msm_obj = to_msm_bo(obj);
55db8f4d5dSSean Paul 	fence = reservation_object_get_excl_rcu(msm_obj->resv);
56db8f4d5dSSean Paul 
57db8f4d5dSSean Paul 	drm_atomic_set_fence_for_plane(new_state, fence);
58db8f4d5dSSean Paul 
59db8f4d5dSSean Paul 	return msm_framebuffer_prepare(new_state->fb, kms->aspace);
60db8f4d5dSSean Paul }
61db8f4d5dSSean Paul 
62347b90b4SSean Paul static void msm_atomic_commit_tail(struct drm_atomic_state *state)
63cf3a7e4cSRob Clark {
64cf3a7e4cSRob Clark 	struct drm_device *dev = state->dev;
650b776d45SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
660b776d45SRob Clark 	struct msm_kms *kms = priv->kms;
670b776d45SRob Clark 
680b776d45SRob Clark 	kms->funcs->prepare_commit(kms, state);
69cf3a7e4cSRob Clark 
701af434a9SDaniel Vetter 	drm_atomic_helper_commit_modeset_disables(dev, state);
71cf3a7e4cSRob Clark 
722b58e98dSLiu Ying 	drm_atomic_helper_commit_planes(dev, state, 0);
73cf3a7e4cSRob Clark 
741af434a9SDaniel Vetter 	drm_atomic_helper_commit_modeset_enables(dev, state);
75cf3a7e4cSRob Clark 
76f86afecfSRob Clark 	/* NOTE: _wait_for_vblanks() only waits for vblank on
77f86afecfSRob Clark 	 * enabled CRTCs.  So we end up faulting when disabling
78f86afecfSRob Clark 	 * due to (potentially) unref'ing the outgoing fb's
79f86afecfSRob Clark 	 * before the vblank when the disable has latched.
80f86afecfSRob Clark 	 *
81f86afecfSRob Clark 	 * But if it did wait on disabled (or newly disabled)
82f86afecfSRob Clark 	 * CRTCs, that would be racy (ie. we could have missed
83f86afecfSRob Clark 	 * the irq.  We need some way to poll for pipe shut
84f86afecfSRob Clark 	 * down.  Or just live with occasionally hitting the
85f86afecfSRob Clark 	 * timeout in the CRTC disable path (which really should
86f86afecfSRob Clark 	 * not be critical path)
87f86afecfSRob Clark 	 */
88f86afecfSRob Clark 
890a5c9aadSHai Li 	msm_atomic_wait_for_commit_done(dev, state);
90cf3a7e4cSRob Clark 
910b776d45SRob Clark 	kms->funcs->complete_commit(kms, state);
92*70db18dcSSean Paul 
93*70db18dcSSean Paul 	drm_atomic_helper_wait_for_vblanks(dev, state);
94*70db18dcSSean Paul 
95*70db18dcSSean Paul 	drm_atomic_helper_commit_hw_done(state);
96*70db18dcSSean Paul 
97*70db18dcSSean Paul 	drm_atomic_helper_cleanup_planes(dev, state);
98347b90b4SSean Paul }
99347b90b4SSean Paul 
100347b90b4SSean Paul /* The (potentially) asynchronous part of the commit.  At this point
101347b90b4SSean Paul  * nothing can fail short of armageddon.
102347b90b4SSean Paul  */
103*70db18dcSSean Paul static void commit_tail(struct drm_atomic_state *state)
104347b90b4SSean Paul {
105*70db18dcSSean Paul 	drm_atomic_helper_wait_for_fences(state->dev, state, false);
106347b90b4SSean Paul 
107*70db18dcSSean Paul 	drm_atomic_helper_wait_for_dependencies(state);
108347b90b4SSean Paul 
109347b90b4SSean Paul 	msm_atomic_commit_tail(state);
1100b776d45SRob Clark 
111*70db18dcSSean Paul 	drm_atomic_helper_commit_cleanup_done(state);
112cf3a7e4cSRob Clark 
113*70db18dcSSean Paul 	drm_atomic_state_put(state);
114cf3a7e4cSRob Clark }
115cf3a7e4cSRob Clark 
116*70db18dcSSean Paul static void commit_work(struct work_struct *work)
117cf3a7e4cSRob Clark {
118*70db18dcSSean Paul 	struct drm_atomic_state *state = container_of(work,
119*70db18dcSSean Paul 						      struct drm_atomic_state,
120*70db18dcSSean Paul 						      commit_work);
121*70db18dcSSean Paul 	commit_tail(state);
122cf3a7e4cSRob Clark }
123cf3a7e4cSRob Clark 
124cf3a7e4cSRob Clark /**
125cf3a7e4cSRob Clark  * drm_atomic_helper_commit - commit validated state object
126cf3a7e4cSRob Clark  * @dev: DRM device
127cf3a7e4cSRob Clark  * @state: the driver state object
128a3ccfb9fSMaarten Lankhorst  * @nonblock: nonblocking commit
129cf3a7e4cSRob Clark  *
130cf3a7e4cSRob Clark  * This function commits a with drm_atomic_helper_check() pre-validated state
131a3ccfb9fSMaarten Lankhorst  * object. This can still fail when e.g. the framebuffer reservation fails.
132cf3a7e4cSRob Clark  *
133cf3a7e4cSRob Clark  * RETURNS
134cf3a7e4cSRob Clark  * Zero for success or -errno.
135cf3a7e4cSRob Clark  */
136cf3a7e4cSRob Clark int msm_atomic_commit(struct drm_device *dev,
137a3ccfb9fSMaarten Lankhorst 		struct drm_atomic_state *state, bool nonblock)
138cf3a7e4cSRob Clark {
139ca762a8aSRob Clark 	struct msm_drm_private *priv = dev->dev_private;
1408d76b79fSDaniel Vetter 	struct drm_crtc *crtc;
1418d76b79fSDaniel Vetter 	struct drm_crtc_state *crtc_state;
1428d76b79fSDaniel Vetter 	struct drm_plane *plane;
143d7429669SMaarten Lankhorst 	struct drm_plane_state *old_plane_state, *new_plane_state;
144cf3a7e4cSRob Clark 	int i, ret;
145cf3a7e4cSRob Clark 
146224a4c97SGustavo Padovan 	/*
147224a4c97SGustavo Padovan 	 * Note that plane->atomic_async_check() should fail if we need
148224a4c97SGustavo Padovan 	 * to re-assign hwpipe or anything that touches global atomic
149224a4c97SGustavo Padovan 	 * state, so we'll never go down the async update path in those
150224a4c97SGustavo Padovan 	 * cases.
151224a4c97SGustavo Padovan 	 */
152224a4c97SGustavo Padovan 	if (state->async_update) {
153*70db18dcSSean Paul 		ret = drm_atomic_helper_prepare_planes(dev, state);
154*70db18dcSSean Paul 		if (ret)
155*70db18dcSSean Paul 			return ret;
156*70db18dcSSean Paul 
157224a4c97SGustavo Padovan 		drm_atomic_helper_async_commit(dev, state);
158224a4c97SGustavo Padovan 		drm_atomic_helper_cleanup_planes(dev, state);
159224a4c97SGustavo Padovan 		return 0;
160224a4c97SGustavo Padovan 	}
161224a4c97SGustavo Padovan 
162*70db18dcSSean Paul 	ret = drm_atomic_helper_setup_commit(state, nonblock);
163*70db18dcSSean Paul 	if (ret)
164*70db18dcSSean Paul 		return ret;
165*70db18dcSSean Paul 
166*70db18dcSSean Paul 	INIT_WORK(&state->commit_work, commit_work);
167*70db18dcSSean Paul 
168*70db18dcSSean Paul 	ret = drm_atomic_helper_prepare_planes(dev, state);
169*70db18dcSSean Paul 	if (ret)
170*70db18dcSSean Paul 		return ret;
171*70db18dcSSean Paul 
172*70db18dcSSean Paul 	if (!nonblock) {
173*70db18dcSSean Paul 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
174*70db18dcSSean Paul 		if (ret)
175f65c18c0SLaurent Pinchart 			goto error;
176f65c18c0SLaurent Pinchart 	}
177f86afecfSRob Clark 
178f86afecfSRob Clark 	/*
179*70db18dcSSean Paul 	 * This is the point of no return - everything below never fails except
180*70db18dcSSean Paul 	 * when the hw goes bonghits. Which means we can commit the new state on
181*70db18dcSSean Paul 	 * the software side now.
182*70db18dcSSean Paul 	 *
183*70db18dcSSean Paul 	 * swap driver private state while still holding state_lock
184f86afecfSRob Clark 	 */
185*70db18dcSSean Paul 	BUG_ON(drm_atomic_helper_swap_state(state, true) < 0);
186f86afecfSRob Clark 
187f86afecfSRob Clark 	/*
188cf3a7e4cSRob Clark 	 * This is the point of no return - everything below never fails except
189cf3a7e4cSRob Clark 	 * when the hw goes bonghits. Which means we can commit the new state on
190cf3a7e4cSRob Clark 	 * the software side now.
191cf3a7e4cSRob Clark 	 */
192870d738aSRob Clark 
193cf3a7e4cSRob Clark 	/*
194cf3a7e4cSRob Clark 	 * Everything below can be run asynchronously without the need to grab
195cf3a7e4cSRob Clark 	 * any modeset locks at all under one conditions: It must be guaranteed
196cf3a7e4cSRob Clark 	 * that the asynchronous work has either been cancelled (if the driver
197cf3a7e4cSRob Clark 	 * supports it, which at least requires that the framebuffers get
198cf3a7e4cSRob Clark 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
199cf3a7e4cSRob Clark 	 * before the new state gets committed on the software side with
200cf3a7e4cSRob Clark 	 * drm_atomic_helper_swap_state().
201cf3a7e4cSRob Clark 	 *
202cf3a7e4cSRob Clark 	 * This scheme allows new atomic state updates to be prepared and
203cf3a7e4cSRob Clark 	 * checked in parallel to the asynchronous completion of the previous
204cf3a7e4cSRob Clark 	 * update. Which is important since compositors need to figure out the
205cf3a7e4cSRob Clark 	 * composition of the next frame right after having submitted the
206cf3a7e4cSRob Clark 	 * current layout.
207cf3a7e4cSRob Clark 	 */
208cf3a7e4cSRob Clark 
2090853695cSChris Wilson 	drm_atomic_state_get(state);
210*70db18dcSSean Paul 	if (nonblock)
211*70db18dcSSean Paul 		queue_work(system_unbound_wq, &state->commit_work);
212*70db18dcSSean Paul 	else
213*70db18dcSSean Paul 		commit_tail(state);
214cf3a7e4cSRob Clark 
215cf3a7e4cSRob Clark 	return 0;
216f65c18c0SLaurent Pinchart 
217f65c18c0SLaurent Pinchart error:
218f65c18c0SLaurent Pinchart 	drm_atomic_helper_cleanup_planes(dev, state);
219f65c18c0SLaurent Pinchart 	return ret;
220cf3a7e4cSRob Clark }
221