1 /* 2 * Copyright (C) 2014 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #ifndef DRM_MODESET_LOCK_H_ 25 #define DRM_MODESET_LOCK_H_ 26 27 #include <linux/types.h> /* stackdepot.h is not self-contained */ 28 #include <linux/stackdepot.h> 29 #include <linux/ww_mutex.h> 30 31 struct drm_modeset_lock; 32 33 /** 34 * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) 35 * @ww_ctx: base acquire ctx 36 * @contended: used internally for -EDEADLK handling 37 * @locked: list of held locks 38 * @trylock_only: trylock mode used in atomic contexts/panic notifiers 39 * @interruptible: whether interruptible locking should be used. 40 * 41 * Each thread competing for a set of locks must use one acquire 42 * ctx. And if any lock fxn returns -EDEADLK, it must backoff and 43 * retry. 44 */ 45 struct drm_modeset_acquire_ctx { 46 47 struct ww_acquire_ctx ww_ctx; 48 49 /* 50 * Contended lock: if a lock is contended you should only call 51 * drm_modeset_backoff() which drops locks and slow-locks the 52 * contended lock. 53 */ 54 struct drm_modeset_lock *contended; 55 56 /* 57 * Stack depot for debugging when a contended lock was not backed off 58 * from. 59 */ 60 depot_stack_handle_t stack_depot; 61 62 /* 63 * list of held locks (drm_modeset_lock) 64 */ 65 struct list_head locked; 66 67 /* 68 * Trylock mode, use only for panic handlers! 69 */ 70 bool trylock_only; 71 72 /* Perform interruptible waits on this context. */ 73 bool interruptible; 74 }; 75 76 /** 77 * struct drm_modeset_lock - used for locking modeset resources. 78 * @mutex: resource locking 79 * @head: used to hold its place on &drm_atomi_state.locked list when 80 * part of an atomic update 81 * 82 * Used for locking CRTCs and other modeset resources. 83 */ 84 struct drm_modeset_lock { 85 /* 86 * modeset lock 87 */ 88 struct ww_mutex mutex; 89 90 /* 91 * Resources that are locked as part of an atomic update are added 92 * to a list (so we know what to unlock at the end). 93 */ 94 struct list_head head; 95 }; 96 97 #define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0) 98 99 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, 100 uint32_t flags); 101 void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx); 102 void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx); 103 int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); 104 105 void drm_modeset_lock_init(struct drm_modeset_lock *lock); 106 107 /** 108 * drm_modeset_lock_fini - cleanup lock 109 * @lock: lock to cleanup 110 */ 111 static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock) 112 { 113 WARN_ON(!list_empty(&lock->head)); 114 } 115 116 /** 117 * drm_modeset_is_locked - equivalent to mutex_is_locked() 118 * @lock: lock to check 119 */ 120 static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) 121 { 122 return ww_mutex_is_locked(&lock->mutex); 123 } 124 125 /** 126 * drm_modeset_lock_assert_held - equivalent to lockdep_assert_held() 127 * @lock: lock to check 128 */ 129 static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock) 130 { 131 lockdep_assert_held(&lock->mutex.base); 132 } 133 134 int drm_modeset_lock(struct drm_modeset_lock *lock, 135 struct drm_modeset_acquire_ctx *ctx); 136 int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock); 137 void drm_modeset_unlock(struct drm_modeset_lock *lock); 138 139 struct drm_device; 140 struct drm_crtc; 141 struct drm_plane; 142 143 void drm_modeset_lock_all(struct drm_device *dev); 144 void drm_modeset_unlock_all(struct drm_device *dev); 145 void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); 146 147 int drm_modeset_lock_all_ctx(struct drm_device *dev, 148 struct drm_modeset_acquire_ctx *ctx); 149 150 /** 151 * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks 152 * @dev: drm device 153 * @ctx: local modeset acquire context, will be dereferenced 154 * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init() 155 * @ret: local ret/err/etc variable to track error status 156 * 157 * Use these macros to simplify grabbing all modeset locks using a local 158 * context. This has the advantage of reducing boilerplate, but also properly 159 * checking return values where appropriate. 160 * 161 * Any code run between BEGIN and END will be holding the modeset locks. 162 * 163 * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and 164 * forth between the labels on deadlock and error conditions. 165 * 166 * Drivers can acquire additional modeset locks. If any lock acquisition 167 * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with 168 * the @ret parameter containing the return value of drm_modeset_lock(). 169 * 170 * Returns: 171 * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN() 172 * is 0, so no error checking is necessary 173 */ 174 #define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ 175 if (!drm_drv_uses_atomic_modeset(dev)) \ 176 mutex_lock(&dev->mode_config.mutex); \ 177 drm_modeset_acquire_init(&ctx, flags); \ 178 modeset_lock_retry: \ 179 ret = drm_modeset_lock_all_ctx(dev, &ctx); \ 180 if (ret) \ 181 goto modeset_lock_fail; 182 183 /** 184 * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks 185 * @dev: drm device 186 * @ctx: local modeset acquire context, will be dereferenced 187 * @ret: local ret/err/etc variable to track error status 188 * 189 * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN 190 * if ret is -EDEADLK. 191 * 192 * It's important that you use the same ret variable for begin and end so 193 * deadlock conditions are properly handled. 194 * 195 * Returns: 196 * ret will be untouched unless it is -EDEADLK on entry. That means that if you 197 * successfully acquire the locks, ret will be whatever your code sets it to. If 198 * there is a deadlock or other failure with acquire or backoff, ret will be set 199 * to that failure. In both of these cases the code between BEGIN/END will not 200 * be run, so the failure will reflect the inability to grab the locks. 201 */ 202 #define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ 203 modeset_lock_fail: \ 204 if (ret == -EDEADLK) { \ 205 ret = drm_modeset_backoff(&ctx); \ 206 if (!ret) \ 207 goto modeset_lock_retry; \ 208 } \ 209 drm_modeset_drop_locks(&ctx); \ 210 drm_modeset_acquire_fini(&ctx); \ 211 if (!drm_drv_uses_atomic_modeset(dev)) \ 212 mutex_unlock(&dev->mode_config.mutex); 213 214 #endif /* DRM_MODESET_LOCK_H_ */ 215