1 /* 2 * Copyright (C) 2014 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "msm_drv.h" 19 #include "msm_kms.h" 20 #include "msm_gem.h" 21 22 struct msm_commit { 23 struct drm_atomic_state *state; 24 uint32_t fence; 25 struct msm_fence_cb fence_cb; 26 uint32_t crtc_mask; 27 }; 28 29 static void fence_cb(struct msm_fence_cb *cb); 30 31 /* block until specified crtcs are no longer pending update, and 32 * atomically mark them as pending update 33 */ 34 static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) 35 { 36 int ret; 37 38 spin_lock(&priv->pending_crtcs_event.lock); 39 ret = wait_event_interruptible_locked(priv->pending_crtcs_event, 40 !(priv->pending_crtcs & crtc_mask)); 41 if (ret == 0) { 42 DBG("start: %08x", crtc_mask); 43 priv->pending_crtcs |= crtc_mask; 44 } 45 spin_unlock(&priv->pending_crtcs_event.lock); 46 47 return ret; 48 } 49 50 /* clear specified crtcs (no longer pending update) 51 */ 52 static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) 53 { 54 spin_lock(&priv->pending_crtcs_event.lock); 55 DBG("end: %08x", crtc_mask); 56 priv->pending_crtcs &= ~crtc_mask; 57 wake_up_all_locked(&priv->pending_crtcs_event); 58 spin_unlock(&priv->pending_crtcs_event.lock); 59 } 60 61 static struct msm_commit *new_commit(struct drm_atomic_state *state) 62 { 63 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); 64 65 if (!c) 66 return NULL; 67 68 c->state = state; 69 /* TODO we might need a way to indicate to run the cb on a 70 * different wq so wait_for_vblanks() doesn't block retiring 71 * bo's.. 72 */ 73 INIT_FENCE_CB(&c->fence_cb, fence_cb); 74 75 return c; 76 } 77 78 /* The (potentially) asynchronous part of the commit. At this point 79 * nothing can fail short of armageddon. 80 */ 81 static void complete_commit(struct msm_commit *c) 82 { 83 struct drm_atomic_state *state = c->state; 84 struct drm_device *dev = state->dev; 85 86 drm_atomic_helper_commit_pre_planes(dev, state); 87 88 drm_atomic_helper_commit_planes(dev, state); 89 90 drm_atomic_helper_commit_post_planes(dev, state); 91 92 /* NOTE: _wait_for_vblanks() only waits for vblank on 93 * enabled CRTCs. So we end up faulting when disabling 94 * due to (potentially) unref'ing the outgoing fb's 95 * before the vblank when the disable has latched. 96 * 97 * But if it did wait on disabled (or newly disabled) 98 * CRTCs, that would be racy (ie. we could have missed 99 * the irq. We need some way to poll for pipe shut 100 * down. Or just live with occasionally hitting the 101 * timeout in the CRTC disable path (which really should 102 * not be critical path) 103 */ 104 105 drm_atomic_helper_wait_for_vblanks(dev, state); 106 107 drm_atomic_helper_cleanup_planes(dev, state); 108 109 drm_atomic_state_free(state); 110 111 end_atomic(dev->dev_private, c->crtc_mask); 112 113 kfree(c); 114 } 115 116 static void fence_cb(struct msm_fence_cb *cb) 117 { 118 struct msm_commit *c = 119 container_of(cb, struct msm_commit, fence_cb); 120 complete_commit(c); 121 } 122 123 static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) 124 { 125 struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0); 126 c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ)); 127 } 128 129 130 int msm_atomic_check(struct drm_device *dev, 131 struct drm_atomic_state *state) 132 { 133 int ret; 134 135 /* 136 * msm ->atomic_check can update ->mode_changed for pixel format 137 * changes, hence must be run before we check the modeset changes. 138 */ 139 ret = drm_atomic_helper_check_planes(dev, state); 140 if (ret) 141 return ret; 142 143 ret = drm_atomic_helper_check_modeset(dev, state); 144 if (ret) 145 return ret; 146 147 return ret; 148 } 149 150 /** 151 * drm_atomic_helper_commit - commit validated state object 152 * @dev: DRM device 153 * @state: the driver state object 154 * @async: asynchronous commit 155 * 156 * This function commits a with drm_atomic_helper_check() pre-validated state 157 * object. This can still fail when e.g. the framebuffer reservation fails. For 158 * now this doesn't implement asynchronous commits. 159 * 160 * RETURNS 161 * Zero for success or -errno. 162 */ 163 int msm_atomic_commit(struct drm_device *dev, 164 struct drm_atomic_state *state, bool async) 165 { 166 int nplanes = dev->mode_config.num_total_plane; 167 int ncrtcs = dev->mode_config.num_crtc; 168 struct msm_commit *c; 169 int i, ret; 170 171 ret = drm_atomic_helper_prepare_planes(dev, state); 172 if (ret) 173 return ret; 174 175 c = new_commit(state); 176 if (!c) 177 return -ENOMEM; 178 179 /* 180 * Figure out what crtcs we have: 181 */ 182 for (i = 0; i < ncrtcs; i++) { 183 struct drm_crtc *crtc = state->crtcs[i]; 184 if (!crtc) 185 continue; 186 c->crtc_mask |= (1 << drm_crtc_index(crtc)); 187 } 188 189 /* 190 * Figure out what fence to wait for: 191 */ 192 for (i = 0; i < nplanes; i++) { 193 struct drm_plane *plane = state->planes[i]; 194 struct drm_plane_state *new_state = state->plane_states[i]; 195 196 if (!plane) 197 continue; 198 199 if ((plane->state->fb != new_state->fb) && new_state->fb) 200 add_fb(c, new_state->fb); 201 } 202 203 /* 204 * Wait for pending updates on any of the same crtc's and then 205 * mark our set of crtc's as busy: 206 */ 207 ret = start_atomic(dev->dev_private, c->crtc_mask); 208 if (ret) 209 return ret; 210 211 /* 212 * This is the point of no return - everything below never fails except 213 * when the hw goes bonghits. Which means we can commit the new state on 214 * the software side now. 215 */ 216 217 drm_atomic_helper_swap_state(dev, state); 218 219 /* 220 * Everything below can be run asynchronously without the need to grab 221 * any modeset locks at all under one conditions: It must be guaranteed 222 * that the asynchronous work has either been cancelled (if the driver 223 * supports it, which at least requires that the framebuffers get 224 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 225 * before the new state gets committed on the software side with 226 * drm_atomic_helper_swap_state(). 227 * 228 * This scheme allows new atomic state updates to be prepared and 229 * checked in parallel to the asynchronous completion of the previous 230 * update. Which is important since compositors need to figure out the 231 * composition of the next frame right after having submitted the 232 * current layout. 233 */ 234 235 if (async) { 236 msm_queue_fence_cb(dev, &c->fence_cb, c->fence); 237 return 0; 238 } 239 240 ret = msm_wait_fence_interruptable(dev, c->fence, NULL); 241 if (ret) { 242 WARN_ON(ret); // TODO unswap state back? or?? 243 kfree(c); 244 return ret; 245 } 246 247 complete_commit(c); 248 249 return 0; 250 } 251