1 /* 2 * kernel/locking/mutex.c 3 * 4 * Mutexes: blocking mutual exclusion locks 5 * 6 * Started by Ingo Molnar: 7 * 8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 11 * David Howells for suggestions and improvements. 12 * 13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 14 * from the -rt tree, where it was originally implemented for rtmutexes 15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 16 * and Sven Dietrich. 17 * 18 * Also see Documentation/locking/mutex-design.txt. 19 */ 20 #include <linux/mutex.h> 21 #include <linux/ww_mutex.h> 22 #include <linux/sched/signal.h> 23 #include <linux/sched/rt.h> 24 #include <linux/sched/wake_q.h> 25 #include <linux/sched/debug.h> 26 #include <linux/export.h> 27 #include <linux/spinlock.h> 28 #include <linux/interrupt.h> 29 #include <linux/debug_locks.h> 30 #include <linux/osq_lock.h> 31 32 #ifdef CONFIG_DEBUG_MUTEXES 33 # include "mutex-debug.h" 34 #else 35 # include "mutex.h" 36 #endif 37 38 void 39 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 40 { 41 atomic_long_set(&lock->owner, 0); 42 spin_lock_init(&lock->wait_lock); 43 INIT_LIST_HEAD(&lock->wait_list); 44 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 45 osq_lock_init(&lock->osq); 46 #endif 47 48 debug_mutex_init(lock, name, key); 49 } 50 EXPORT_SYMBOL(__mutex_init); 51 52 /* 53 * @owner: contains: 'struct task_struct *' to the current lock owner, 54 * NULL means not owned. Since task_struct pointers are aligned at 55 * at least L1_CACHE_BYTES, we have low bits to store extra state. 56 * 57 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 58 * Bit1 indicates unlock needs to hand the lock to the top-waiter 59 * Bit2 indicates handoff has been done and we're waiting for pickup. 60 */ 61 #define MUTEX_FLAG_WAITERS 0x01 62 #define MUTEX_FLAG_HANDOFF 0x02 63 #define MUTEX_FLAG_PICKUP 0x04 64 65 #define MUTEX_FLAGS 0x07 66 67 static inline struct task_struct *__owner_task(unsigned long owner) 68 { 69 return (struct task_struct *)(owner & ~MUTEX_FLAGS); 70 } 71 72 static inline unsigned long __owner_flags(unsigned long owner) 73 { 74 return owner & MUTEX_FLAGS; 75 } 76 77 /* 78 * Trylock variant that retuns the owning task on failure. 79 */ 80 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 81 { 82 unsigned long owner, curr = (unsigned long)current; 83 84 owner = atomic_long_read(&lock->owner); 85 for (;;) { /* must loop, can race against a flag */ 86 unsigned long old, flags = __owner_flags(owner); 87 unsigned long task = owner & ~MUTEX_FLAGS; 88 89 if (task) { 90 if (likely(task != curr)) 91 break; 92 93 if (likely(!(flags & MUTEX_FLAG_PICKUP))) 94 break; 95 96 flags &= ~MUTEX_FLAG_PICKUP; 97 } else { 98 #ifdef CONFIG_DEBUG_MUTEXES 99 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); 100 #endif 101 } 102 103 /* 104 * We set the HANDOFF bit, we must make sure it doesn't live 105 * past the point where we acquire it. This would be possible 106 * if we (accidentally) set the bit on an unlocked mutex. 107 */ 108 flags &= ~MUTEX_FLAG_HANDOFF; 109 110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 111 if (old == owner) 112 return NULL; 113 114 owner = old; 115 } 116 117 return __owner_task(owner); 118 } 119 120 /* 121 * Actual trylock that will work on any unlocked state. 122 */ 123 static inline bool __mutex_trylock(struct mutex *lock) 124 { 125 return !__mutex_trylock_or_owner(lock); 126 } 127 128 #ifndef CONFIG_DEBUG_LOCK_ALLOC 129 /* 130 * Lockdep annotations are contained to the slow paths for simplicity. 131 * There is nothing that would stop spreading the lockdep annotations outwards 132 * except more code. 133 */ 134 135 /* 136 * Optimistic trylock that only works in the uncontended case. Make sure to 137 * follow with a __mutex_trylock() before failing. 138 */ 139 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 140 { 141 unsigned long curr = (unsigned long)current; 142 unsigned long zero = 0UL; 143 144 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 145 return true; 146 147 return false; 148 } 149 150 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 151 { 152 unsigned long curr = (unsigned long)current; 153 154 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 155 return true; 156 157 return false; 158 } 159 #endif 160 161 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 162 { 163 atomic_long_or(flag, &lock->owner); 164 } 165 166 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 167 { 168 atomic_long_andnot(flag, &lock->owner); 169 } 170 171 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 172 { 173 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 174 } 175 176 /* 177 * Add @waiter to a given location in the lock wait_list and set the 178 * FLAG_WAITERS flag if it's the first waiter. 179 */ 180 static void __sched 181 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 182 struct list_head *list) 183 { 184 debug_mutex_add_waiter(lock, waiter, current); 185 186 list_add_tail(&waiter->list, list); 187 if (__mutex_waiter_is_first(lock, waiter)) 188 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 189 } 190 191 /* 192 * Give up ownership to a specific task, when @task = NULL, this is equivalent 193 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves 194 * WAITERS. Provides RELEASE semantics like a regular unlock, the 195 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 196 */ 197 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 198 { 199 unsigned long owner = atomic_long_read(&lock->owner); 200 201 for (;;) { 202 unsigned long old, new; 203 204 #ifdef CONFIG_DEBUG_MUTEXES 205 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 206 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 207 #endif 208 209 new = (owner & MUTEX_FLAG_WAITERS); 210 new |= (unsigned long)task; 211 if (task) 212 new |= MUTEX_FLAG_PICKUP; 213 214 old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 215 if (old == owner) 216 break; 217 218 owner = old; 219 } 220 } 221 222 #ifndef CONFIG_DEBUG_LOCK_ALLOC 223 /* 224 * We split the mutex lock/unlock logic into separate fastpath and 225 * slowpath functions, to reduce the register pressure on the fastpath. 226 * We also put the fastpath first in the kernel image, to make sure the 227 * branch is predicted by the CPU as default-untaken. 228 */ 229 static void __sched __mutex_lock_slowpath(struct mutex *lock); 230 231 /** 232 * mutex_lock - acquire the mutex 233 * @lock: the mutex to be acquired 234 * 235 * Lock the mutex exclusively for this task. If the mutex is not 236 * available right now, it will sleep until it can get it. 237 * 238 * The mutex must later on be released by the same task that 239 * acquired it. Recursive locking is not allowed. The task 240 * may not exit without first unlocking the mutex. Also, kernel 241 * memory where the mutex resides must not be freed with 242 * the mutex still locked. The mutex must first be initialized 243 * (or statically defined) before it can be locked. memset()-ing 244 * the mutex to 0 is not allowed. 245 * 246 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 247 * checks that will enforce the restrictions and will also do 248 * deadlock debugging) 249 * 250 * This function is similar to (but not equivalent to) down(). 251 */ 252 void __sched mutex_lock(struct mutex *lock) 253 { 254 might_sleep(); 255 256 if (!__mutex_trylock_fast(lock)) 257 __mutex_lock_slowpath(lock); 258 } 259 EXPORT_SYMBOL(mutex_lock); 260 #endif 261 262 /* 263 * Wait-Die: 264 * The newer transactions are killed when: 265 * It (the new transaction) makes a request for a lock being held 266 * by an older transaction. 267 * 268 * Wound-Wait: 269 * The newer transactions are wounded when: 270 * An older transaction makes a request for a lock being held by 271 * the newer transaction. 272 */ 273 274 /* 275 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 276 * it. 277 */ 278 static __always_inline void 279 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 280 { 281 #ifdef CONFIG_DEBUG_MUTEXES 282 /* 283 * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 284 * but released with a normal mutex_unlock in this call. 285 * 286 * This should never happen, always use ww_mutex_unlock. 287 */ 288 DEBUG_LOCKS_WARN_ON(ww->ctx); 289 290 /* 291 * Not quite done after calling ww_acquire_done() ? 292 */ 293 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 294 295 if (ww_ctx->contending_lock) { 296 /* 297 * After -EDEADLK you tried to 298 * acquire a different ww_mutex? Bad! 299 */ 300 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 301 302 /* 303 * You called ww_mutex_lock after receiving -EDEADLK, 304 * but 'forgot' to unlock everything else first? 305 */ 306 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 307 ww_ctx->contending_lock = NULL; 308 } 309 310 /* 311 * Naughty, using a different class will lead to undefined behavior! 312 */ 313 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 314 #endif 315 ww_ctx->acquired++; 316 ww->ctx = ww_ctx; 317 } 318 319 /* 320 * Determine if context @a is 'after' context @b. IOW, @a is a younger 321 * transaction than @b and depending on algorithm either needs to wait for 322 * @b or die. 323 */ 324 static inline bool __sched 325 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 326 { 327 328 return (signed long)(a->stamp - b->stamp) > 0; 329 } 330 331 /* 332 * Wait-Die; wake a younger waiter context (when locks held) such that it can 333 * die. 334 * 335 * Among waiters with context, only the first one can have other locks acquired 336 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and 337 * __ww_mutex_check_kill() wake any but the earliest context. 338 */ 339 static bool __sched 340 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, 341 struct ww_acquire_ctx *ww_ctx) 342 { 343 if (!ww_ctx->is_wait_die) 344 return false; 345 346 if (waiter->ww_ctx->acquired > 0 && 347 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { 348 debug_mutex_wake_waiter(lock, waiter); 349 wake_up_process(waiter->task); 350 } 351 352 return true; 353 } 354 355 /* 356 * Wound-Wait; wound a younger @hold_ctx if it holds the lock. 357 * 358 * Wound the lock holder if there are waiters with older transactions than 359 * the lock holders. Even if multiple waiters may wound the lock holder, 360 * it's sufficient that only one does. 361 */ 362 static bool __ww_mutex_wound(struct mutex *lock, 363 struct ww_acquire_ctx *ww_ctx, 364 struct ww_acquire_ctx *hold_ctx) 365 { 366 struct task_struct *owner = __mutex_owner(lock); 367 368 lockdep_assert_held(&lock->wait_lock); 369 370 /* 371 * Possible through __ww_mutex_add_waiter() when we race with 372 * ww_mutex_set_context_fastpath(). In that case we'll get here again 373 * through __ww_mutex_check_waiters(). 374 */ 375 if (!hold_ctx) 376 return false; 377 378 /* 379 * Can have !owner because of __mutex_unlock_slowpath(), but if owner, 380 * it cannot go away because we'll have FLAG_WAITERS set and hold 381 * wait_lock. 382 */ 383 if (!owner) 384 return false; 385 386 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) { 387 hold_ctx->wounded = 1; 388 389 /* 390 * wake_up_process() paired with set_current_state() 391 * inserts sufficient barriers to make sure @owner either sees 392 * it's wounded in __ww_mutex_check_kill() or has a 393 * wakeup pending to re-read the wounded state. 394 */ 395 if (owner != current) 396 wake_up_process(owner); 397 398 return true; 399 } 400 401 return false; 402 } 403 404 /* 405 * We just acquired @lock under @ww_ctx, if there are later contexts waiting 406 * behind us on the wait-list, check if they need to die, or wound us. 407 * 408 * See __ww_mutex_add_waiter() for the list-order construction; basically the 409 * list is ordered by stamp, smallest (oldest) first. 410 * 411 * This relies on never mixing wait-die/wound-wait on the same wait-list; 412 * which is currently ensured by that being a ww_class property. 413 * 414 * The current task must not be on the wait list. 415 */ 416 static void __sched 417 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 418 { 419 struct mutex_waiter *cur; 420 421 lockdep_assert_held(&lock->wait_lock); 422 423 list_for_each_entry(cur, &lock->wait_list, list) { 424 if (!cur->ww_ctx) 425 continue; 426 427 if (__ww_mutex_die(lock, cur, ww_ctx) || 428 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) 429 break; 430 } 431 } 432 433 /* 434 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 435 * and wake up any waiters so they can recheck. 436 */ 437 static __always_inline void 438 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 439 { 440 ww_mutex_lock_acquired(lock, ctx); 441 442 /* 443 * The lock->ctx update should be visible on all cores before 444 * the WAITERS check is done, otherwise contended waiters might be 445 * missed. The contended waiters will either see ww_ctx == NULL 446 * and keep spinning, or it will acquire wait_lock, add itself 447 * to waiter list and sleep. 448 */ 449 smp_mb(); /* See comments above and below. */ 450 451 /* 452 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS 453 * MB MB 454 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx 455 * 456 * The memory barrier above pairs with the memory barrier in 457 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx 458 * and/or !empty list. 459 */ 460 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 461 return; 462 463 /* 464 * Uh oh, we raced in fastpath, check if any of the waiters need to 465 * die or wound us. 466 */ 467 spin_lock(&lock->base.wait_lock); 468 __ww_mutex_check_waiters(&lock->base, ctx); 469 spin_unlock(&lock->base.wait_lock); 470 } 471 472 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 473 474 static inline 475 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 476 struct mutex_waiter *waiter) 477 { 478 struct ww_mutex *ww; 479 480 ww = container_of(lock, struct ww_mutex, base); 481 482 /* 483 * If ww->ctx is set the contents are undefined, only 484 * by acquiring wait_lock there is a guarantee that 485 * they are not invalid when reading. 486 * 487 * As such, when deadlock detection needs to be 488 * performed the optimistic spinning cannot be done. 489 * 490 * Check this in every inner iteration because we may 491 * be racing against another thread's ww_mutex_lock. 492 */ 493 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 494 return false; 495 496 /* 497 * If we aren't on the wait list yet, cancel the spin 498 * if there are waiters. We want to avoid stealing the 499 * lock from a waiter with an earlier stamp, since the 500 * other thread may already own a lock that we also 501 * need. 502 */ 503 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 504 return false; 505 506 /* 507 * Similarly, stop spinning if we are no longer the 508 * first waiter. 509 */ 510 if (waiter && !__mutex_waiter_is_first(lock, waiter)) 511 return false; 512 513 return true; 514 } 515 516 /* 517 * Look out! "owner" is an entirely speculative pointer access and not 518 * reliable. 519 * 520 * "noinline" so that this function shows up on perf profiles. 521 */ 522 static noinline 523 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 524 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 525 { 526 bool ret = true; 527 528 rcu_read_lock(); 529 while (__mutex_owner(lock) == owner) { 530 /* 531 * Ensure we emit the owner->on_cpu, dereference _after_ 532 * checking lock->owner still matches owner. If that fails, 533 * owner might point to freed memory. If it still matches, 534 * the rcu_read_lock() ensures the memory stays valid. 535 */ 536 barrier(); 537 538 /* 539 * Use vcpu_is_preempted to detect lock holder preemption issue. 540 */ 541 if (!owner->on_cpu || need_resched() || 542 vcpu_is_preempted(task_cpu(owner))) { 543 ret = false; 544 break; 545 } 546 547 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 548 ret = false; 549 break; 550 } 551 552 cpu_relax(); 553 } 554 rcu_read_unlock(); 555 556 return ret; 557 } 558 559 /* 560 * Initial check for entering the mutex spinning loop 561 */ 562 static inline int mutex_can_spin_on_owner(struct mutex *lock) 563 { 564 struct task_struct *owner; 565 int retval = 1; 566 567 if (need_resched()) 568 return 0; 569 570 rcu_read_lock(); 571 owner = __mutex_owner(lock); 572 573 /* 574 * As lock holder preemption issue, we both skip spinning if task is not 575 * on cpu or its cpu is preempted 576 */ 577 if (owner) 578 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 579 rcu_read_unlock(); 580 581 /* 582 * If lock->owner is not set, the mutex has been released. Return true 583 * such that we'll trylock in the spin path, which is a faster option 584 * than the blocking slow path. 585 */ 586 return retval; 587 } 588 589 /* 590 * Optimistic spinning. 591 * 592 * We try to spin for acquisition when we find that the lock owner 593 * is currently running on a (different) CPU and while we don't 594 * need to reschedule. The rationale is that if the lock owner is 595 * running, it is likely to release the lock soon. 596 * 597 * The mutex spinners are queued up using MCS lock so that only one 598 * spinner can compete for the mutex. However, if mutex spinning isn't 599 * going to happen, there is no point in going through the lock/unlock 600 * overhead. 601 * 602 * Returns true when the lock was taken, otherwise false, indicating 603 * that we need to jump to the slowpath and sleep. 604 * 605 * The waiter flag is set to true if the spinner is a waiter in the wait 606 * queue. The waiter-spinner will spin on the lock directly and concurrently 607 * with the spinner at the head of the OSQ, if present, until the owner is 608 * changed to itself. 609 */ 610 static __always_inline bool 611 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 612 const bool use_ww_ctx, struct mutex_waiter *waiter) 613 { 614 if (!waiter) { 615 /* 616 * The purpose of the mutex_can_spin_on_owner() function is 617 * to eliminate the overhead of osq_lock() and osq_unlock() 618 * in case spinning isn't possible. As a waiter-spinner 619 * is not going to take OSQ lock anyway, there is no need 620 * to call mutex_can_spin_on_owner(). 621 */ 622 if (!mutex_can_spin_on_owner(lock)) 623 goto fail; 624 625 /* 626 * In order to avoid a stampede of mutex spinners trying to 627 * acquire the mutex all at once, the spinners need to take a 628 * MCS (queued) lock first before spinning on the owner field. 629 */ 630 if (!osq_lock(&lock->osq)) 631 goto fail; 632 } 633 634 for (;;) { 635 struct task_struct *owner; 636 637 /* Try to acquire the mutex... */ 638 owner = __mutex_trylock_or_owner(lock); 639 if (!owner) 640 break; 641 642 /* 643 * There's an owner, wait for it to either 644 * release the lock or go to sleep. 645 */ 646 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 647 goto fail_unlock; 648 649 /* 650 * The cpu_relax() call is a compiler barrier which forces 651 * everything in this loop to be re-loaded. We don't need 652 * memory barriers as we'll eventually observe the right 653 * values at the cost of a few extra spins. 654 */ 655 cpu_relax(); 656 } 657 658 if (!waiter) 659 osq_unlock(&lock->osq); 660 661 return true; 662 663 664 fail_unlock: 665 if (!waiter) 666 osq_unlock(&lock->osq); 667 668 fail: 669 /* 670 * If we fell out of the spin path because of need_resched(), 671 * reschedule now, before we try-lock the mutex. This avoids getting 672 * scheduled out right after we obtained the mutex. 673 */ 674 if (need_resched()) { 675 /* 676 * We _should_ have TASK_RUNNING here, but just in case 677 * we do not, make it so, otherwise we might get stuck. 678 */ 679 __set_current_state(TASK_RUNNING); 680 schedule_preempt_disabled(); 681 } 682 683 return false; 684 } 685 #else 686 static __always_inline bool 687 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 688 const bool use_ww_ctx, struct mutex_waiter *waiter) 689 { 690 return false; 691 } 692 #endif 693 694 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 695 696 /** 697 * mutex_unlock - release the mutex 698 * @lock: the mutex to be released 699 * 700 * Unlock a mutex that has been locked by this task previously. 701 * 702 * This function must not be used in interrupt context. Unlocking 703 * of a not locked mutex is not allowed. 704 * 705 * This function is similar to (but not equivalent to) up(). 706 */ 707 void __sched mutex_unlock(struct mutex *lock) 708 { 709 #ifndef CONFIG_DEBUG_LOCK_ALLOC 710 if (__mutex_unlock_fast(lock)) 711 return; 712 #endif 713 __mutex_unlock_slowpath(lock, _RET_IP_); 714 } 715 EXPORT_SYMBOL(mutex_unlock); 716 717 /** 718 * ww_mutex_unlock - release the w/w mutex 719 * @lock: the mutex to be released 720 * 721 * Unlock a mutex that has been locked by this task previously with any of the 722 * ww_mutex_lock* functions (with or without an acquire context). It is 723 * forbidden to release the locks after releasing the acquire context. 724 * 725 * This function must not be used in interrupt context. Unlocking 726 * of a unlocked mutex is not allowed. 727 */ 728 void __sched ww_mutex_unlock(struct ww_mutex *lock) 729 { 730 /* 731 * The unlocking fastpath is the 0->1 transition from 'locked' 732 * into 'unlocked' state: 733 */ 734 if (lock->ctx) { 735 #ifdef CONFIG_DEBUG_MUTEXES 736 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 737 #endif 738 if (lock->ctx->acquired > 0) 739 lock->ctx->acquired--; 740 lock->ctx = NULL; 741 } 742 743 mutex_unlock(&lock->base); 744 } 745 EXPORT_SYMBOL(ww_mutex_unlock); 746 747 748 static __always_inline int __sched 749 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 750 { 751 if (ww_ctx->acquired > 0) { 752 #ifdef CONFIG_DEBUG_MUTEXES 753 struct ww_mutex *ww; 754 755 ww = container_of(lock, struct ww_mutex, base); 756 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 757 ww_ctx->contending_lock = ww; 758 #endif 759 return -EDEADLK; 760 } 761 762 return 0; 763 } 764 765 766 /* 767 * Check the wound condition for the current lock acquire. 768 * 769 * Wound-Wait: If we're wounded, kill ourself. 770 * 771 * Wait-Die: If we're trying to acquire a lock already held by an older 772 * context, kill ourselves. 773 * 774 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to 775 * look at waiters before us in the wait-list. 776 */ 777 static inline int __sched 778 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, 779 struct ww_acquire_ctx *ctx) 780 { 781 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 782 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 783 struct mutex_waiter *cur; 784 785 if (ctx->acquired == 0) 786 return 0; 787 788 if (!ctx->is_wait_die) { 789 if (ctx->wounded) 790 return __ww_mutex_kill(lock, ctx); 791 792 return 0; 793 } 794 795 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 796 return __ww_mutex_kill(lock, ctx); 797 798 /* 799 * If there is a waiter in front of us that has a context, then its 800 * stamp is earlier than ours and we must kill ourself. 801 */ 802 cur = waiter; 803 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 804 if (!cur->ww_ctx) 805 continue; 806 807 return __ww_mutex_kill(lock, ctx); 808 } 809 810 return 0; 811 } 812 813 /* 814 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest 815 * first. Such that older contexts are preferred to acquire the lock over 816 * younger contexts. 817 * 818 * Waiters without context are interspersed in FIFO order. 819 * 820 * Furthermore, for Wait-Die kill ourself immediately when possible (there are 821 * older contexts already waiting) to avoid unnecessary waiting and for 822 * Wound-Wait ensure we wound the owning context when it is younger. 823 */ 824 static inline int __sched 825 __ww_mutex_add_waiter(struct mutex_waiter *waiter, 826 struct mutex *lock, 827 struct ww_acquire_ctx *ww_ctx) 828 { 829 struct mutex_waiter *cur; 830 struct list_head *pos; 831 bool is_wait_die; 832 833 if (!ww_ctx) { 834 __mutex_add_waiter(lock, waiter, &lock->wait_list); 835 return 0; 836 } 837 838 is_wait_die = ww_ctx->is_wait_die; 839 840 /* 841 * Add the waiter before the first waiter with a higher stamp. 842 * Waiters without a context are skipped to avoid starving 843 * them. Wait-Die waiters may die here. Wound-Wait waiters 844 * never die here, but they are sorted in stamp order and 845 * may wound the lock holder. 846 */ 847 pos = &lock->wait_list; 848 list_for_each_entry_reverse(cur, &lock->wait_list, list) { 849 if (!cur->ww_ctx) 850 continue; 851 852 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 853 /* 854 * Wait-Die: if we find an older context waiting, there 855 * is no point in queueing behind it, as we'd have to 856 * die the moment it would acquire the lock. 857 */ 858 if (is_wait_die) { 859 int ret = __ww_mutex_kill(lock, ww_ctx); 860 861 if (ret) 862 return ret; 863 } 864 865 break; 866 } 867 868 pos = &cur->list; 869 870 /* Wait-Die: ensure younger waiters die. */ 871 __ww_mutex_die(lock, cur, ww_ctx); 872 } 873 874 __mutex_add_waiter(lock, waiter, pos); 875 876 /* 877 * Wound-Wait: if we're blocking on a mutex owned by a younger context, 878 * wound that such that we might proceed. 879 */ 880 if (!is_wait_die) { 881 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 882 883 /* 884 * See ww_mutex_set_context_fastpath(). Orders setting 885 * MUTEX_FLAG_WAITERS vs the ww->ctx load, 886 * such that either we or the fastpath will wound @ww->ctx. 887 */ 888 smp_mb(); 889 __ww_mutex_wound(lock, ww_ctx, ww->ctx); 890 } 891 892 return 0; 893 } 894 895 /* 896 * Lock a mutex (possibly interruptible), slowpath: 897 */ 898 static __always_inline int __sched 899 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 900 struct lockdep_map *nest_lock, unsigned long ip, 901 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 902 { 903 struct mutex_waiter waiter; 904 bool first = false; 905 struct ww_mutex *ww; 906 int ret; 907 908 might_sleep(); 909 910 ww = container_of(lock, struct ww_mutex, base); 911 if (use_ww_ctx && ww_ctx) { 912 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 913 return -EALREADY; 914 915 /* 916 * Reset the wounded flag after a kill. No other process can 917 * race and wound us here since they can't have a valid owner 918 * pointer if we don't have any locks held. 919 */ 920 if (ww_ctx->acquired == 0) 921 ww_ctx->wounded = 0; 922 } 923 924 preempt_disable(); 925 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 926 927 if (__mutex_trylock(lock) || 928 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { 929 /* got the lock, yay! */ 930 lock_acquired(&lock->dep_map, ip); 931 if (use_ww_ctx && ww_ctx) 932 ww_mutex_set_context_fastpath(ww, ww_ctx); 933 preempt_enable(); 934 return 0; 935 } 936 937 spin_lock(&lock->wait_lock); 938 /* 939 * After waiting to acquire the wait_lock, try again. 940 */ 941 if (__mutex_trylock(lock)) { 942 if (use_ww_ctx && ww_ctx) 943 __ww_mutex_check_waiters(lock, ww_ctx); 944 945 goto skip_wait; 946 } 947 948 debug_mutex_lock_common(lock, &waiter); 949 950 lock_contended(&lock->dep_map, ip); 951 952 if (!use_ww_ctx) { 953 /* add waiting tasks to the end of the waitqueue (FIFO): */ 954 __mutex_add_waiter(lock, &waiter, &lock->wait_list); 955 956 957 #ifdef CONFIG_DEBUG_MUTEXES 958 waiter.ww_ctx = MUTEX_POISON_WW_CTX; 959 #endif 960 } else { 961 /* 962 * Add in stamp order, waking up waiters that must kill 963 * themselves. 964 */ 965 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 966 if (ret) 967 goto err_early_kill; 968 969 waiter.ww_ctx = ww_ctx; 970 } 971 972 waiter.task = current; 973 974 set_current_state(state); 975 for (;;) { 976 /* 977 * Once we hold wait_lock, we're serialized against 978 * mutex_unlock() handing the lock off to us, do a trylock 979 * before testing the error conditions to make sure we pick up 980 * the handoff. 981 */ 982 if (__mutex_trylock(lock)) 983 goto acquired; 984 985 /* 986 * Check for signals and kill conditions while holding 987 * wait_lock. This ensures the lock cancellation is ordered 988 * against mutex_unlock() and wake-ups do not go missing. 989 */ 990 if (signal_pending_state(state, current)) { 991 ret = -EINTR; 992 goto err; 993 } 994 995 if (use_ww_ctx && ww_ctx) { 996 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 997 if (ret) 998 goto err; 999 } 1000 1001 spin_unlock(&lock->wait_lock); 1002 schedule_preempt_disabled(); 1003 1004 /* 1005 * ww_mutex needs to always recheck its position since its waiter 1006 * list is not FIFO ordered. 1007 */ 1008 if ((use_ww_ctx && ww_ctx) || !first) { 1009 first = __mutex_waiter_is_first(lock, &waiter); 1010 if (first) 1011 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 1012 } 1013 1014 set_current_state(state); 1015 /* 1016 * Here we order against unlock; we must either see it change 1017 * state back to RUNNING and fall through the next schedule(), 1018 * or we must see its unlock and acquire. 1019 */ 1020 if (__mutex_trylock(lock) || 1021 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) 1022 break; 1023 1024 spin_lock(&lock->wait_lock); 1025 } 1026 spin_lock(&lock->wait_lock); 1027 acquired: 1028 __set_current_state(TASK_RUNNING); 1029 1030 if (use_ww_ctx && ww_ctx) { 1031 /* 1032 * Wound-Wait; we stole the lock (!first_waiter), check the 1033 * waiters as anyone might want to wound us. 1034 */ 1035 if (!ww_ctx->is_wait_die && 1036 !__mutex_waiter_is_first(lock, &waiter)) 1037 __ww_mutex_check_waiters(lock, ww_ctx); 1038 } 1039 1040 mutex_remove_waiter(lock, &waiter, current); 1041 if (likely(list_empty(&lock->wait_list))) 1042 __mutex_clear_flag(lock, MUTEX_FLAGS); 1043 1044 debug_mutex_free_waiter(&waiter); 1045 1046 skip_wait: 1047 /* got the lock - cleanup and rejoice! */ 1048 lock_acquired(&lock->dep_map, ip); 1049 1050 if (use_ww_ctx && ww_ctx) 1051 ww_mutex_lock_acquired(ww, ww_ctx); 1052 1053 spin_unlock(&lock->wait_lock); 1054 preempt_enable(); 1055 return 0; 1056 1057 err: 1058 __set_current_state(TASK_RUNNING); 1059 mutex_remove_waiter(lock, &waiter, current); 1060 err_early_kill: 1061 spin_unlock(&lock->wait_lock); 1062 debug_mutex_free_waiter(&waiter); 1063 mutex_release(&lock->dep_map, 1, ip); 1064 preempt_enable(); 1065 return ret; 1066 } 1067 1068 static int __sched 1069 __mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1070 struct lockdep_map *nest_lock, unsigned long ip) 1071 { 1072 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 1073 } 1074 1075 static int __sched 1076 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1077 struct lockdep_map *nest_lock, unsigned long ip, 1078 struct ww_acquire_ctx *ww_ctx) 1079 { 1080 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); 1081 } 1082 1083 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1084 void __sched 1085 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 1086 { 1087 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 1088 } 1089 1090 EXPORT_SYMBOL_GPL(mutex_lock_nested); 1091 1092 void __sched 1093 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 1094 { 1095 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 1096 } 1097 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 1098 1099 int __sched 1100 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 1101 { 1102 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 1103 } 1104 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 1105 1106 int __sched 1107 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 1108 { 1109 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 1110 } 1111 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 1112 1113 void __sched 1114 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 1115 { 1116 int token; 1117 1118 might_sleep(); 1119 1120 token = io_schedule_prepare(); 1121 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 1122 subclass, NULL, _RET_IP_, NULL, 0); 1123 io_schedule_finish(token); 1124 } 1125 EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 1126 1127 static inline int 1128 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1129 { 1130 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 1131 unsigned tmp; 1132 1133 if (ctx->deadlock_inject_countdown-- == 0) { 1134 tmp = ctx->deadlock_inject_interval; 1135 if (tmp > UINT_MAX/4) 1136 tmp = UINT_MAX; 1137 else 1138 tmp = tmp*2 + tmp + tmp/2; 1139 1140 ctx->deadlock_inject_interval = tmp; 1141 ctx->deadlock_inject_countdown = tmp; 1142 ctx->contending_lock = lock; 1143 1144 ww_mutex_unlock(lock); 1145 1146 return -EDEADLK; 1147 } 1148 #endif 1149 1150 return 0; 1151 } 1152 1153 int __sched 1154 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1155 { 1156 int ret; 1157 1158 might_sleep(); 1159 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 1160 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1161 ctx); 1162 if (!ret && ctx && ctx->acquired > 1) 1163 return ww_mutex_deadlock_injection(lock, ctx); 1164 1165 return ret; 1166 } 1167 EXPORT_SYMBOL_GPL(ww_mutex_lock); 1168 1169 int __sched 1170 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1171 { 1172 int ret; 1173 1174 might_sleep(); 1175 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 1176 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1177 ctx); 1178 1179 if (!ret && ctx && ctx->acquired > 1) 1180 return ww_mutex_deadlock_injection(lock, ctx); 1181 1182 return ret; 1183 } 1184 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 1185 1186 #endif 1187 1188 /* 1189 * Release the lock, slowpath: 1190 */ 1191 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 1192 { 1193 struct task_struct *next = NULL; 1194 DEFINE_WAKE_Q(wake_q); 1195 unsigned long owner; 1196 1197 mutex_release(&lock->dep_map, 1, ip); 1198 1199 /* 1200 * Release the lock before (potentially) taking the spinlock such that 1201 * other contenders can get on with things ASAP. 1202 * 1203 * Except when HANDOFF, in that case we must not clear the owner field, 1204 * but instead set it to the top waiter. 1205 */ 1206 owner = atomic_long_read(&lock->owner); 1207 for (;;) { 1208 unsigned long old; 1209 1210 #ifdef CONFIG_DEBUG_MUTEXES 1211 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1212 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 1213 #endif 1214 1215 if (owner & MUTEX_FLAG_HANDOFF) 1216 break; 1217 1218 old = atomic_long_cmpxchg_release(&lock->owner, owner, 1219 __owner_flags(owner)); 1220 if (old == owner) { 1221 if (owner & MUTEX_FLAG_WAITERS) 1222 break; 1223 1224 return; 1225 } 1226 1227 owner = old; 1228 } 1229 1230 spin_lock(&lock->wait_lock); 1231 debug_mutex_unlock(lock); 1232 if (!list_empty(&lock->wait_list)) { 1233 /* get the first entry from the wait-list: */ 1234 struct mutex_waiter *waiter = 1235 list_first_entry(&lock->wait_list, 1236 struct mutex_waiter, list); 1237 1238 next = waiter->task; 1239 1240 debug_mutex_wake_waiter(lock, waiter); 1241 wake_q_add(&wake_q, next); 1242 } 1243 1244 if (owner & MUTEX_FLAG_HANDOFF) 1245 __mutex_handoff(lock, next); 1246 1247 spin_unlock(&lock->wait_lock); 1248 1249 wake_up_q(&wake_q); 1250 } 1251 1252 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1253 /* 1254 * Here come the less common (and hence less performance-critical) APIs: 1255 * mutex_lock_interruptible() and mutex_trylock(). 1256 */ 1257 static noinline int __sched 1258 __mutex_lock_killable_slowpath(struct mutex *lock); 1259 1260 static noinline int __sched 1261 __mutex_lock_interruptible_slowpath(struct mutex *lock); 1262 1263 /** 1264 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 1265 * @lock: The mutex to be acquired. 1266 * 1267 * Lock the mutex like mutex_lock(). If a signal is delivered while the 1268 * process is sleeping, this function will return without acquiring the 1269 * mutex. 1270 * 1271 * Context: Process context. 1272 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1273 * signal arrived. 1274 */ 1275 int __sched mutex_lock_interruptible(struct mutex *lock) 1276 { 1277 might_sleep(); 1278 1279 if (__mutex_trylock_fast(lock)) 1280 return 0; 1281 1282 return __mutex_lock_interruptible_slowpath(lock); 1283 } 1284 1285 EXPORT_SYMBOL(mutex_lock_interruptible); 1286 1287 /** 1288 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 1289 * @lock: The mutex to be acquired. 1290 * 1291 * Lock the mutex like mutex_lock(). If a signal which will be fatal to 1292 * the current process is delivered while the process is sleeping, this 1293 * function will return without acquiring the mutex. 1294 * 1295 * Context: Process context. 1296 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1297 * fatal signal arrived. 1298 */ 1299 int __sched mutex_lock_killable(struct mutex *lock) 1300 { 1301 might_sleep(); 1302 1303 if (__mutex_trylock_fast(lock)) 1304 return 0; 1305 1306 return __mutex_lock_killable_slowpath(lock); 1307 } 1308 EXPORT_SYMBOL(mutex_lock_killable); 1309 1310 /** 1311 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 1312 * @lock: The mutex to be acquired. 1313 * 1314 * Lock the mutex like mutex_lock(). While the task is waiting for this 1315 * mutex, it will be accounted as being in the IO wait state by the 1316 * scheduler. 1317 * 1318 * Context: Process context. 1319 */ 1320 void __sched mutex_lock_io(struct mutex *lock) 1321 { 1322 int token; 1323 1324 token = io_schedule_prepare(); 1325 mutex_lock(lock); 1326 io_schedule_finish(token); 1327 } 1328 EXPORT_SYMBOL_GPL(mutex_lock_io); 1329 1330 static noinline void __sched 1331 __mutex_lock_slowpath(struct mutex *lock) 1332 { 1333 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 1334 } 1335 1336 static noinline int __sched 1337 __mutex_lock_killable_slowpath(struct mutex *lock) 1338 { 1339 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 1340 } 1341 1342 static noinline int __sched 1343 __mutex_lock_interruptible_slowpath(struct mutex *lock) 1344 { 1345 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 1346 } 1347 1348 static noinline int __sched 1349 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1350 { 1351 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, 1352 _RET_IP_, ctx); 1353 } 1354 1355 static noinline int __sched 1356 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 1357 struct ww_acquire_ctx *ctx) 1358 { 1359 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, 1360 _RET_IP_, ctx); 1361 } 1362 1363 #endif 1364 1365 /** 1366 * mutex_trylock - try to acquire the mutex, without waiting 1367 * @lock: the mutex to be acquired 1368 * 1369 * Try to acquire the mutex atomically. Returns 1 if the mutex 1370 * has been acquired successfully, and 0 on contention. 1371 * 1372 * NOTE: this function follows the spin_trylock() convention, so 1373 * it is negated from the down_trylock() return values! Be careful 1374 * about this when converting semaphore users to mutexes. 1375 * 1376 * This function must not be used in interrupt context. The 1377 * mutex must be released by the same task that acquired it. 1378 */ 1379 int __sched mutex_trylock(struct mutex *lock) 1380 { 1381 bool locked = __mutex_trylock(lock); 1382 1383 if (locked) 1384 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 1385 1386 return locked; 1387 } 1388 EXPORT_SYMBOL(mutex_trylock); 1389 1390 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1391 int __sched 1392 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1393 { 1394 might_sleep(); 1395 1396 if (__mutex_trylock_fast(&lock->base)) { 1397 if (ctx) 1398 ww_mutex_set_context_fastpath(lock, ctx); 1399 return 0; 1400 } 1401 1402 return __ww_mutex_lock_slowpath(lock, ctx); 1403 } 1404 EXPORT_SYMBOL(ww_mutex_lock); 1405 1406 int __sched 1407 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1408 { 1409 might_sleep(); 1410 1411 if (__mutex_trylock_fast(&lock->base)) { 1412 if (ctx) 1413 ww_mutex_set_context_fastpath(lock, ctx); 1414 return 0; 1415 } 1416 1417 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 1418 } 1419 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 1420 1421 #endif 1422 1423 /** 1424 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 1425 * @cnt: the atomic which we are to dec 1426 * @lock: the mutex to return holding if we dec to 0 1427 * 1428 * return true and hold lock if we dec to 0, return false otherwise 1429 */ 1430 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 1431 { 1432 /* dec if we can't possibly hit 0 */ 1433 if (atomic_add_unless(cnt, -1, 1)) 1434 return 0; 1435 /* we might hit 0, so take the lock */ 1436 mutex_lock(lock); 1437 if (!atomic_dec_and_test(cnt)) { 1438 /* when we actually did the dec, we didn't hit 0 */ 1439 mutex_unlock(lock); 1440 return 0; 1441 } 1442 /* we hit 0, and we hold the lock */ 1443 return 1; 1444 } 1445 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1446