1 /* 2 * Copyright (C) 2014 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <drm/drmP.h> 25 #include <drm/drm_crtc.h> 26 #include <drm/drm_modeset_lock.h> 27 28 /** 29 * DOC: kms locking 30 * 31 * As KMS moves toward more fine grained locking, and atomic ioctl where 32 * userspace can indirectly control locking order, it becomes necessary 33 * to use ww_mutex and acquire-contexts to avoid deadlocks. But because 34 * the locking is more distributed around the driver code, we want a bit 35 * of extra utility/tracking out of our acquire-ctx. This is provided 36 * by drm_modeset_lock / drm_modeset_acquire_ctx. 37 * 38 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt 39 * 40 * The basic usage pattern is to: 41 * 42 * drm_modeset_acquire_init(&ctx) 43 * retry: 44 * foreach (lock in random_ordered_set_of_locks) { 45 * ret = drm_modeset_lock(lock, &ctx) 46 * if (ret == -EDEADLK) { 47 * drm_modeset_backoff(&ctx); 48 * goto retry; 49 * } 50 * } 51 * 52 * ... do stuff ... 53 * 54 * drm_modeset_drop_locks(&ctx); 55 * drm_modeset_acquire_fini(&ctx); 56 */ 57 58 59 /** 60 * __drm_modeset_lock_all - internal helper to grab all modeset locks 61 * @dev: DRM device 62 * @trylock: trylock mode for atomic contexts 63 * 64 * This is a special version of drm_modeset_lock_all() which can also be used in 65 * atomic contexts. Then @trylock must be set to true. 66 * 67 * Returns: 68 * 0 on success or negative error code on failure. 69 */ 70 int __drm_modeset_lock_all(struct drm_device *dev, 71 bool trylock) 72 { 73 struct drm_mode_config *config = &dev->mode_config; 74 struct drm_modeset_acquire_ctx *ctx; 75 int ret; 76 77 ctx = kzalloc(sizeof(*ctx), 78 trylock ? GFP_ATOMIC : GFP_KERNEL); 79 if (!ctx) 80 return -ENOMEM; 81 82 if (trylock) { 83 if (!mutex_trylock(&config->mutex)) 84 return -EBUSY; 85 } else { 86 mutex_lock(&config->mutex); 87 } 88 89 drm_modeset_acquire_init(ctx, 0); 90 ctx->trylock_only = trylock; 91 92 retry: 93 ret = drm_modeset_lock(&config->connection_mutex, ctx); 94 if (ret) 95 goto fail; 96 ret = drm_modeset_lock_all_crtcs(dev, ctx); 97 if (ret) 98 goto fail; 99 100 WARN_ON(config->acquire_ctx); 101 102 /* now we hold the locks, so now that it is safe, stash the 103 * ctx for drm_modeset_unlock_all(): 104 */ 105 config->acquire_ctx = ctx; 106 107 drm_warn_on_modeset_not_all_locked(dev); 108 109 return 0; 110 111 fail: 112 if (ret == -EDEADLK) { 113 drm_modeset_backoff(ctx); 114 goto retry; 115 } 116 117 return ret; 118 } 119 EXPORT_SYMBOL(__drm_modeset_lock_all); 120 121 /** 122 * drm_modeset_lock_all - take all modeset locks 123 * @dev: drm device 124 * 125 * This function takes all modeset locks, suitable where a more fine-grained 126 * scheme isn't (yet) implemented. Locks must be dropped with 127 * drm_modeset_unlock_all. 128 */ 129 void drm_modeset_lock_all(struct drm_device *dev) 130 { 131 WARN_ON(__drm_modeset_lock_all(dev, false) != 0); 132 } 133 EXPORT_SYMBOL(drm_modeset_lock_all); 134 135 /** 136 * drm_modeset_unlock_all - drop all modeset locks 137 * @dev: device 138 * 139 * This function drop all modeset locks taken by drm_modeset_lock_all. 140 */ 141 void drm_modeset_unlock_all(struct drm_device *dev) 142 { 143 struct drm_mode_config *config = &dev->mode_config; 144 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; 145 146 if (WARN_ON(!ctx)) 147 return; 148 149 config->acquire_ctx = NULL; 150 drm_modeset_drop_locks(ctx); 151 drm_modeset_acquire_fini(ctx); 152 153 kfree(ctx); 154 155 mutex_unlock(&dev->mode_config.mutex); 156 } 157 EXPORT_SYMBOL(drm_modeset_unlock_all); 158 159 /** 160 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx 161 * @crtc: drm crtc 162 * 163 * This function locks the given crtc using a hidden acquire context. This is 164 * necessary so that drivers internally using the atomic interfaces can grab 165 * further locks with the lock acquire context. 166 */ 167 void drm_modeset_lock_crtc(struct drm_crtc *crtc) 168 { 169 struct drm_modeset_acquire_ctx *ctx; 170 int ret; 171 172 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 173 if (WARN_ON(!ctx)) 174 return; 175 176 drm_modeset_acquire_init(ctx, 0); 177 178 retry: 179 ret = drm_modeset_lock(&crtc->mutex, ctx); 180 if (ret) 181 goto fail; 182 183 WARN_ON(crtc->acquire_ctx); 184 185 /* now we hold the locks, so now that it is safe, stash the 186 * ctx for drm_modeset_unlock_crtc(): 187 */ 188 crtc->acquire_ctx = ctx; 189 190 return; 191 192 fail: 193 if (ret == -EDEADLK) { 194 drm_modeset_backoff(ctx); 195 goto retry; 196 } 197 } 198 EXPORT_SYMBOL(drm_modeset_lock_crtc); 199 200 /** 201 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls 202 * @crtc: drm crtc 203 * 204 * Legacy ioctl operations like cursor updates or page flips only have per-crtc 205 * locking, and store the acquire ctx in the corresponding crtc. All other 206 * legacy operations take all locks and use a global acquire context. This 207 * function grabs the right one. 208 */ 209 struct drm_modeset_acquire_ctx * 210 drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc) 211 { 212 if (crtc->acquire_ctx) 213 return crtc->acquire_ctx; 214 215 WARN_ON(!crtc->dev->mode_config.acquire_ctx); 216 217 return crtc->dev->mode_config.acquire_ctx; 218 } 219 EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx); 220 221 /** 222 * drm_modeset_unlock_crtc - drop crtc lock 223 * @crtc: drm crtc 224 * 225 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other 226 * locks acquired through the hidden context. 227 */ 228 void drm_modeset_unlock_crtc(struct drm_crtc *crtc) 229 { 230 struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx; 231 232 if (WARN_ON(!ctx)) 233 return; 234 235 crtc->acquire_ctx = NULL; 236 drm_modeset_drop_locks(ctx); 237 drm_modeset_acquire_fini(ctx); 238 239 kfree(ctx); 240 } 241 EXPORT_SYMBOL(drm_modeset_unlock_crtc); 242 243 /** 244 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked 245 * @dev: device 246 * 247 * Useful as a debug assert. 248 */ 249 void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) 250 { 251 struct drm_crtc *crtc; 252 253 /* Locking is currently fubar in the panic handler. */ 254 if (oops_in_progress) 255 return; 256 257 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 258 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 259 260 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 261 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 262 } 263 EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); 264 265 /** 266 * drm_modeset_acquire_init - initialize acquire context 267 * @ctx: the acquire context 268 * @flags: for future 269 */ 270 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, 271 uint32_t flags) 272 { 273 memset(ctx, 0, sizeof(*ctx)); 274 ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class); 275 INIT_LIST_HEAD(&ctx->locked); 276 } 277 EXPORT_SYMBOL(drm_modeset_acquire_init); 278 279 /** 280 * drm_modeset_acquire_fini - cleanup acquire context 281 * @ctx: the acquire context 282 */ 283 void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx) 284 { 285 ww_acquire_fini(&ctx->ww_ctx); 286 } 287 EXPORT_SYMBOL(drm_modeset_acquire_fini); 288 289 /** 290 * drm_modeset_drop_locks - drop all locks 291 * @ctx: the acquire context 292 * 293 * Drop all locks currently held against this acquire context. 294 */ 295 void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx) 296 { 297 WARN_ON(ctx->contended); 298 while (!list_empty(&ctx->locked)) { 299 struct drm_modeset_lock *lock; 300 301 lock = list_first_entry(&ctx->locked, 302 struct drm_modeset_lock, head); 303 304 drm_modeset_unlock(lock); 305 } 306 } 307 EXPORT_SYMBOL(drm_modeset_drop_locks); 308 309 static inline int modeset_lock(struct drm_modeset_lock *lock, 310 struct drm_modeset_acquire_ctx *ctx, 311 bool interruptible, bool slow) 312 { 313 int ret; 314 315 WARN_ON(ctx->contended); 316 317 if (ctx->trylock_only) { 318 if (!ww_mutex_trylock(&lock->mutex)) 319 return -EBUSY; 320 else 321 return 0; 322 } else if (interruptible && slow) { 323 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); 324 } else if (interruptible) { 325 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); 326 } else if (slow) { 327 ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx); 328 ret = 0; 329 } else { 330 ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx); 331 } 332 if (!ret) { 333 WARN_ON(!list_empty(&lock->head)); 334 list_add(&lock->head, &ctx->locked); 335 } else if (ret == -EALREADY) { 336 /* we already hold the lock.. this is fine. For atomic 337 * we will need to be able to drm_modeset_lock() things 338 * without having to keep track of what is already locked 339 * or not. 340 */ 341 ret = 0; 342 } else if (ret == -EDEADLK) { 343 ctx->contended = lock; 344 } 345 346 return ret; 347 } 348 349 static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx, 350 bool interruptible) 351 { 352 struct drm_modeset_lock *contended = ctx->contended; 353 354 ctx->contended = NULL; 355 356 if (WARN_ON(!contended)) 357 return 0; 358 359 drm_modeset_drop_locks(ctx); 360 361 return modeset_lock(contended, ctx, interruptible, true); 362 } 363 364 /** 365 * drm_modeset_backoff - deadlock avoidance backoff 366 * @ctx: the acquire context 367 * 368 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK), 369 * you must call this function to drop all currently held locks and 370 * block until the contended lock becomes available. 371 */ 372 void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx) 373 { 374 modeset_backoff(ctx, false); 375 } 376 EXPORT_SYMBOL(drm_modeset_backoff); 377 378 /** 379 * drm_modeset_backoff_interruptible - deadlock avoidance backoff 380 * @ctx: the acquire context 381 * 382 * Interruptible version of drm_modeset_backoff() 383 */ 384 int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx) 385 { 386 return modeset_backoff(ctx, true); 387 } 388 EXPORT_SYMBOL(drm_modeset_backoff_interruptible); 389 390 /** 391 * drm_modeset_lock - take modeset lock 392 * @lock: lock to take 393 * @ctx: acquire ctx 394 * 395 * If ctx is not NULL, then its ww acquire context is used and the 396 * lock will be tracked by the context and can be released by calling 397 * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a 398 * deadlock scenario has been detected and it is an error to attempt 399 * to take any more locks without first calling drm_modeset_backoff(). 400 */ 401 int drm_modeset_lock(struct drm_modeset_lock *lock, 402 struct drm_modeset_acquire_ctx *ctx) 403 { 404 if (ctx) 405 return modeset_lock(lock, ctx, false, false); 406 407 ww_mutex_lock(&lock->mutex, NULL); 408 return 0; 409 } 410 EXPORT_SYMBOL(drm_modeset_lock); 411 412 /** 413 * drm_modeset_lock_interruptible - take modeset lock 414 * @lock: lock to take 415 * @ctx: acquire ctx 416 * 417 * Interruptible version of drm_modeset_lock() 418 */ 419 int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, 420 struct drm_modeset_acquire_ctx *ctx) 421 { 422 if (ctx) 423 return modeset_lock(lock, ctx, true, false); 424 425 return ww_mutex_lock_interruptible(&lock->mutex, NULL); 426 } 427 EXPORT_SYMBOL(drm_modeset_lock_interruptible); 428 429 /** 430 * drm_modeset_unlock - drop modeset lock 431 * @lock: lock to release 432 */ 433 void drm_modeset_unlock(struct drm_modeset_lock *lock) 434 { 435 list_del_init(&lock->head); 436 ww_mutex_unlock(&lock->mutex); 437 } 438 EXPORT_SYMBOL(drm_modeset_unlock); 439 440 /* Temporary.. until we have sufficiently fine grained locking, there 441 * are a couple scenarios where it is convenient to grab all crtc locks. 442 * It is planned to remove this: 443 */ 444 int drm_modeset_lock_all_crtcs(struct drm_device *dev, 445 struct drm_modeset_acquire_ctx *ctx) 446 { 447 struct drm_mode_config *config = &dev->mode_config; 448 struct drm_crtc *crtc; 449 int ret = 0; 450 451 list_for_each_entry(crtc, &config->crtc_list, head) { 452 ret = drm_modeset_lock(&crtc->mutex, ctx); 453 if (ret) 454 return ret; 455 } 456 457 return 0; 458 } 459 EXPORT_SYMBOL(drm_modeset_lock_all_crtcs); 460