1cf3a7e4cSRob Clark /* 2cf3a7e4cSRob Clark * Copyright (C) 2014 Red Hat 3cf3a7e4cSRob Clark * Author: Rob Clark <robdclark@gmail.com> 4cf3a7e4cSRob Clark * 5cf3a7e4cSRob Clark * This program is free software; you can redistribute it and/or modify it 6cf3a7e4cSRob Clark * under the terms of the GNU General Public License version 2 as published by 7cf3a7e4cSRob Clark * the Free Software Foundation. 8cf3a7e4cSRob Clark * 9cf3a7e4cSRob Clark * This program is distributed in the hope that it will be useful, but WITHOUT 10cf3a7e4cSRob Clark * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11cf3a7e4cSRob Clark * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12cf3a7e4cSRob Clark * more details. 13cf3a7e4cSRob Clark * 14cf3a7e4cSRob Clark * You should have received a copy of the GNU General Public License along with 15cf3a7e4cSRob Clark * this program. If not, see <http://www.gnu.org/licenses/>. 16cf3a7e4cSRob Clark */ 17cf3a7e4cSRob Clark 18cf3a7e4cSRob Clark #include "msm_drv.h" 19*db8f4d5dSSean Paul #include "msm_gem.h" 20cf3a7e4cSRob Clark #include "msm_kms.h" 21cf3a7e4cSRob Clark #include "msm_gem.h" 22fde5de6cSRob Clark #include "msm_fence.h" 23cf3a7e4cSRob Clark 24cf3a7e4cSRob Clark struct msm_commit { 250b776d45SRob Clark struct drm_device *dev; 26cf3a7e4cSRob Clark struct drm_atomic_state *state; 27ba00c3f2SRob Clark struct work_struct work; 28f86afecfSRob Clark uint32_t crtc_mask; 29cf3a7e4cSRob Clark }; 30cf3a7e4cSRob Clark 31ba00c3f2SRob Clark static void commit_worker(struct work_struct *work); 32cf3a7e4cSRob Clark 33f86afecfSRob Clark /* block until specified crtcs are no longer pending update, and 34f86afecfSRob Clark * atomically mark them as pending update 35f86afecfSRob Clark */ 36f86afecfSRob Clark static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) 37f86afecfSRob Clark { 38f86afecfSRob Clark int ret; 39f86afecfSRob Clark 40f86afecfSRob Clark spin_lock(&priv->pending_crtcs_event.lock); 41f86afecfSRob Clark ret = wait_event_interruptible_locked(priv->pending_crtcs_event, 42f86afecfSRob Clark !(priv->pending_crtcs & crtc_mask)); 43f86afecfSRob Clark if (ret == 0) { 44f86afecfSRob Clark DBG("start: %08x", crtc_mask); 45f86afecfSRob Clark priv->pending_crtcs |= crtc_mask; 46f86afecfSRob Clark } 47f86afecfSRob Clark spin_unlock(&priv->pending_crtcs_event.lock); 48f86afecfSRob Clark 49f86afecfSRob Clark return ret; 50f86afecfSRob Clark } 51f86afecfSRob Clark 52f86afecfSRob Clark /* clear specified crtcs (no longer pending update) 53f86afecfSRob Clark */ 54f86afecfSRob Clark static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) 55f86afecfSRob Clark { 56f86afecfSRob Clark spin_lock(&priv->pending_crtcs_event.lock); 57f86afecfSRob Clark DBG("end: %08x", crtc_mask); 58f86afecfSRob Clark priv->pending_crtcs &= ~crtc_mask; 59f86afecfSRob Clark wake_up_all_locked(&priv->pending_crtcs_event); 60f86afecfSRob Clark spin_unlock(&priv->pending_crtcs_event.lock); 61f86afecfSRob Clark } 62f86afecfSRob Clark 630b776d45SRob Clark static struct msm_commit *commit_init(struct drm_atomic_state *state) 64cf3a7e4cSRob Clark { 65cf3a7e4cSRob Clark struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); 66cf3a7e4cSRob Clark 67cf3a7e4cSRob Clark if (!c) 68cf3a7e4cSRob Clark return NULL; 69cf3a7e4cSRob Clark 700b776d45SRob Clark c->dev = state->dev; 71cf3a7e4cSRob Clark c->state = state; 720b776d45SRob Clark 73ba00c3f2SRob Clark INIT_WORK(&c->work, commit_worker); 74cf3a7e4cSRob Clark 75cf3a7e4cSRob Clark return c; 76cf3a7e4cSRob Clark } 77cf3a7e4cSRob Clark 780b776d45SRob Clark static void commit_destroy(struct msm_commit *c) 790b776d45SRob Clark { 800b776d45SRob Clark end_atomic(c->dev->dev_private, c->crtc_mask); 810b776d45SRob Clark kfree(c); 820b776d45SRob Clark } 830b776d45SRob Clark 840a5c9aadSHai Li static void msm_atomic_wait_for_commit_done(struct drm_device *dev, 850a5c9aadSHai Li struct drm_atomic_state *old_state) 860a5c9aadSHai Li { 870a5c9aadSHai Li struct drm_crtc *crtc; 88d7429669SMaarten Lankhorst struct drm_crtc_state *new_crtc_state; 890a5c9aadSHai Li struct msm_drm_private *priv = old_state->dev->dev_private; 900a5c9aadSHai Li struct msm_kms *kms = priv->kms; 910a5c9aadSHai Li int i; 920a5c9aadSHai Li 93d7429669SMaarten Lankhorst for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 94d7429669SMaarten Lankhorst if (!new_crtc_state->active) 950a5c9aadSHai Li continue; 960a5c9aadSHai Li 970a5c9aadSHai Li kms->funcs->wait_for_crtc_commit_done(kms, crtc); 980a5c9aadSHai Li } 990a5c9aadSHai Li } 1000a5c9aadSHai Li 101*db8f4d5dSSean Paul int msm_atomic_prepare_fb(struct drm_plane *plane, 102*db8f4d5dSSean Paul struct drm_plane_state *new_state) 103*db8f4d5dSSean Paul { 104*db8f4d5dSSean Paul struct msm_drm_private *priv = plane->dev->dev_private; 105*db8f4d5dSSean Paul struct msm_kms *kms = priv->kms; 106*db8f4d5dSSean Paul struct drm_gem_object *obj; 107*db8f4d5dSSean Paul struct msm_gem_object *msm_obj; 108*db8f4d5dSSean Paul struct dma_fence *fence; 109*db8f4d5dSSean Paul 110*db8f4d5dSSean Paul if (!new_state->fb) 111*db8f4d5dSSean Paul return 0; 112*db8f4d5dSSean Paul 113*db8f4d5dSSean Paul obj = msm_framebuffer_bo(new_state->fb, 0); 114*db8f4d5dSSean Paul msm_obj = to_msm_bo(obj); 115*db8f4d5dSSean Paul fence = reservation_object_get_excl_rcu(msm_obj->resv); 116*db8f4d5dSSean Paul 117*db8f4d5dSSean Paul drm_atomic_set_fence_for_plane(new_state, fence); 118*db8f4d5dSSean Paul 119*db8f4d5dSSean Paul return msm_framebuffer_prepare(new_state->fb, kms->aspace); 120*db8f4d5dSSean Paul } 121*db8f4d5dSSean Paul 122347b90b4SSean Paul static void msm_atomic_commit_tail(struct drm_atomic_state *state) 123cf3a7e4cSRob Clark { 124cf3a7e4cSRob Clark struct drm_device *dev = state->dev; 1250b776d45SRob Clark struct msm_drm_private *priv = dev->dev_private; 1260b776d45SRob Clark struct msm_kms *kms = priv->kms; 1270b776d45SRob Clark 1280b776d45SRob Clark kms->funcs->prepare_commit(kms, state); 129cf3a7e4cSRob Clark 1301af434a9SDaniel Vetter drm_atomic_helper_commit_modeset_disables(dev, state); 131cf3a7e4cSRob Clark 1322b58e98dSLiu Ying drm_atomic_helper_commit_planes(dev, state, 0); 133cf3a7e4cSRob Clark 1341af434a9SDaniel Vetter drm_atomic_helper_commit_modeset_enables(dev, state); 135cf3a7e4cSRob Clark 136f86afecfSRob Clark /* NOTE: _wait_for_vblanks() only waits for vblank on 137f86afecfSRob Clark * enabled CRTCs. So we end up faulting when disabling 138f86afecfSRob Clark * due to (potentially) unref'ing the outgoing fb's 139f86afecfSRob Clark * before the vblank when the disable has latched. 140f86afecfSRob Clark * 141f86afecfSRob Clark * But if it did wait on disabled (or newly disabled) 142f86afecfSRob Clark * CRTCs, that would be racy (ie. we could have missed 143f86afecfSRob Clark * the irq. We need some way to poll for pipe shut 144f86afecfSRob Clark * down. Or just live with occasionally hitting the 145f86afecfSRob Clark * timeout in the CRTC disable path (which really should 146f86afecfSRob Clark * not be critical path) 147f86afecfSRob Clark */ 148f86afecfSRob Clark 1490a5c9aadSHai Li msm_atomic_wait_for_commit_done(dev, state); 150cf3a7e4cSRob Clark 151cf3a7e4cSRob Clark drm_atomic_helper_cleanup_planes(dev, state); 152cf3a7e4cSRob Clark 1530b776d45SRob Clark kms->funcs->complete_commit(kms, state); 154347b90b4SSean Paul } 155347b90b4SSean Paul 156347b90b4SSean Paul /* The (potentially) asynchronous part of the commit. At this point 157347b90b4SSean Paul * nothing can fail short of armageddon. 158347b90b4SSean Paul */ 159347b90b4SSean Paul static void complete_commit(struct msm_commit *c) 160347b90b4SSean Paul { 161347b90b4SSean Paul struct drm_atomic_state *state = c->state; 162347b90b4SSean Paul struct drm_device *dev = state->dev; 163347b90b4SSean Paul 164347b90b4SSean Paul drm_atomic_helper_wait_for_fences(dev, state, false); 165347b90b4SSean Paul 166347b90b4SSean Paul msm_atomic_commit_tail(state); 1670b776d45SRob Clark 1680853695cSChris Wilson drm_atomic_state_put(state); 169cf3a7e4cSRob Clark 1700b776d45SRob Clark commit_destroy(c); 171cf3a7e4cSRob Clark } 172cf3a7e4cSRob Clark 173ba00c3f2SRob Clark static void commit_worker(struct work_struct *work) 174cf3a7e4cSRob Clark { 175347b90b4SSean Paul complete_commit(container_of(work, struct msm_commit, work)); 176cf3a7e4cSRob Clark } 177cf3a7e4cSRob Clark 178cf3a7e4cSRob Clark /** 179cf3a7e4cSRob Clark * drm_atomic_helper_commit - commit validated state object 180cf3a7e4cSRob Clark * @dev: DRM device 181cf3a7e4cSRob Clark * @state: the driver state object 182a3ccfb9fSMaarten Lankhorst * @nonblock: nonblocking commit 183cf3a7e4cSRob Clark * 184cf3a7e4cSRob Clark * This function commits a with drm_atomic_helper_check() pre-validated state 185a3ccfb9fSMaarten Lankhorst * object. This can still fail when e.g. the framebuffer reservation fails. 186cf3a7e4cSRob Clark * 187cf3a7e4cSRob Clark * RETURNS 188cf3a7e4cSRob Clark * Zero for success or -errno. 189cf3a7e4cSRob Clark */ 190cf3a7e4cSRob Clark int msm_atomic_commit(struct drm_device *dev, 191a3ccfb9fSMaarten Lankhorst struct drm_atomic_state *state, bool nonblock) 192cf3a7e4cSRob Clark { 193ca762a8aSRob Clark struct msm_drm_private *priv = dev->dev_private; 194f86afecfSRob Clark struct msm_commit *c; 1958d76b79fSDaniel Vetter struct drm_crtc *crtc; 1968d76b79fSDaniel Vetter struct drm_crtc_state *crtc_state; 1978d76b79fSDaniel Vetter struct drm_plane *plane; 198d7429669SMaarten Lankhorst struct drm_plane_state *old_plane_state, *new_plane_state; 199cf3a7e4cSRob Clark int i, ret; 200cf3a7e4cSRob Clark 201cf3a7e4cSRob Clark ret = drm_atomic_helper_prepare_planes(dev, state); 202cf3a7e4cSRob Clark if (ret) 203cf3a7e4cSRob Clark return ret; 204cf3a7e4cSRob Clark 205224a4c97SGustavo Padovan /* 206224a4c97SGustavo Padovan * Note that plane->atomic_async_check() should fail if we need 207224a4c97SGustavo Padovan * to re-assign hwpipe or anything that touches global atomic 208224a4c97SGustavo Padovan * state, so we'll never go down the async update path in those 209224a4c97SGustavo Padovan * cases. 210224a4c97SGustavo Padovan */ 211224a4c97SGustavo Padovan if (state->async_update) { 212224a4c97SGustavo Padovan drm_atomic_helper_async_commit(dev, state); 213224a4c97SGustavo Padovan drm_atomic_helper_cleanup_planes(dev, state); 214224a4c97SGustavo Padovan return 0; 215224a4c97SGustavo Padovan } 216224a4c97SGustavo Padovan 2170b776d45SRob Clark c = commit_init(state); 218f65c18c0SLaurent Pinchart if (!c) { 219f65c18c0SLaurent Pinchart ret = -ENOMEM; 220f65c18c0SLaurent Pinchart goto error; 221f65c18c0SLaurent Pinchart } 222f86afecfSRob Clark 223f86afecfSRob Clark /* 224f86afecfSRob Clark * Figure out what crtcs we have: 225f86afecfSRob Clark */ 226d7429669SMaarten Lankhorst for_each_new_crtc_in_state(state, crtc, crtc_state, i) 2278d76b79fSDaniel Vetter c->crtc_mask |= drm_crtc_mask(crtc); 228cf3a7e4cSRob Clark 229cf3a7e4cSRob Clark /* 230b6295f9aSRob Clark * Figure out what fence to wait for: 231b6295f9aSRob Clark */ 232d7429669SMaarten Lankhorst for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 233d7429669SMaarten Lankhorst if ((new_plane_state->fb != old_plane_state->fb) && new_plane_state->fb) { 234d7429669SMaarten Lankhorst struct drm_gem_object *obj = msm_framebuffer_bo(new_plane_state->fb, 0); 235b6295f9aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 2365d586983SGustavo Padovan struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv); 237b6295f9aSRob Clark 238d7429669SMaarten Lankhorst drm_atomic_set_fence_for_plane(new_plane_state, fence); 239b6295f9aSRob Clark } 240b6295f9aSRob Clark } 241b6295f9aSRob Clark 242b6295f9aSRob Clark /* 243f86afecfSRob Clark * Wait for pending updates on any of the same crtc's and then 244f86afecfSRob Clark * mark our set of crtc's as busy: 245f86afecfSRob Clark */ 246f86afecfSRob Clark ret = start_atomic(dev->dev_private, c->crtc_mask); 247771f14c5SMaarten Lankhorst if (ret) 248771f14c5SMaarten Lankhorst goto err_free; 249771f14c5SMaarten Lankhorst 250771f14c5SMaarten Lankhorst BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); 251f86afecfSRob Clark 252f86afecfSRob Clark /* 253cf3a7e4cSRob Clark * This is the point of no return - everything below never fails except 254cf3a7e4cSRob Clark * when the hw goes bonghits. Which means we can commit the new state on 255cf3a7e4cSRob Clark * the software side now. 256cf3a7e4cSRob Clark */ 257870d738aSRob Clark 258cf3a7e4cSRob Clark /* 259cf3a7e4cSRob Clark * Everything below can be run asynchronously without the need to grab 260cf3a7e4cSRob Clark * any modeset locks at all under one conditions: It must be guaranteed 261cf3a7e4cSRob Clark * that the asynchronous work has either been cancelled (if the driver 262cf3a7e4cSRob Clark * supports it, which at least requires that the framebuffers get 263cf3a7e4cSRob Clark * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 264cf3a7e4cSRob Clark * before the new state gets committed on the software side with 265cf3a7e4cSRob Clark * drm_atomic_helper_swap_state(). 266cf3a7e4cSRob Clark * 267cf3a7e4cSRob Clark * This scheme allows new atomic state updates to be prepared and 268cf3a7e4cSRob Clark * checked in parallel to the asynchronous completion of the previous 269cf3a7e4cSRob Clark * update. Which is important since compositors need to figure out the 270cf3a7e4cSRob Clark * composition of the next frame right after having submitted the 271cf3a7e4cSRob Clark * current layout. 272cf3a7e4cSRob Clark */ 273cf3a7e4cSRob Clark 2740853695cSChris Wilson drm_atomic_state_get(state); 275ba00c3f2SRob Clark if (nonblock) { 276ba00c3f2SRob Clark queue_work(priv->atomic_wq, &c->work); 277cf3a7e4cSRob Clark return 0; 278cf3a7e4cSRob Clark } 279cf3a7e4cSRob Clark 280347b90b4SSean Paul complete_commit(c); 281cf3a7e4cSRob Clark 282cf3a7e4cSRob Clark return 0; 283f65c18c0SLaurent Pinchart 284771f14c5SMaarten Lankhorst err_free: 285771f14c5SMaarten Lankhorst kfree(c); 286f65c18c0SLaurent Pinchart error: 287f65c18c0SLaurent Pinchart drm_atomic_helper_cleanup_planes(dev, state); 288f65c18c0SLaurent Pinchart return ret; 289cf3a7e4cSRob Clark } 290