1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/locking/mutex.c 4 * 5 * Mutexes: blocking mutual exclusion locks 6 * 7 * Started by Ingo Molnar: 8 * 9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 10 * 11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 12 * David Howells for suggestions and improvements. 13 * 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 17 * and Sven Dietrich. 18 * 19 * Also see Documentation/locking/mutex-design.rst. 20 */ 21 #include <linux/mutex.h> 22 #include <linux/ww_mutex.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/wake_q.h> 26 #include <linux/sched/debug.h> 27 #include <linux/export.h> 28 #include <linux/spinlock.h> 29 #include <linux/interrupt.h> 30 #include <linux/debug_locks.h> 31 #include <linux/osq_lock.h> 32 33 #ifdef CONFIG_DEBUG_MUTEXES 34 # include "mutex-debug.h" 35 #else 36 # include "mutex.h" 37 #endif 38 39 void 40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 41 { 42 atomic_long_set(&lock->owner, 0); 43 spin_lock_init(&lock->wait_lock); 44 INIT_LIST_HEAD(&lock->wait_list); 45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 46 osq_lock_init(&lock->osq); 47 #endif 48 49 debug_mutex_init(lock, name, key); 50 } 51 EXPORT_SYMBOL(__mutex_init); 52 53 /* 54 * @owner: contains: 'struct task_struct *' to the current lock owner, 55 * NULL means not owned. Since task_struct pointers are aligned at 56 * at least L1_CACHE_BYTES, we have low bits to store extra state. 57 * 58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 59 * Bit1 indicates unlock needs to hand the lock to the top-waiter 60 * Bit2 indicates handoff has been done and we're waiting for pickup. 61 */ 62 #define MUTEX_FLAG_WAITERS 0x01 63 #define MUTEX_FLAG_HANDOFF 0x02 64 #define MUTEX_FLAG_PICKUP 0x04 65 66 #define MUTEX_FLAGS 0x07 67 68 static inline struct task_struct *__owner_task(unsigned long owner) 69 { 70 return (struct task_struct *)(owner & ~MUTEX_FLAGS); 71 } 72 73 static inline unsigned long __owner_flags(unsigned long owner) 74 { 75 return owner & MUTEX_FLAGS; 76 } 77 78 /* 79 * Trylock variant that retuns the owning task on failure. 80 */ 81 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 82 { 83 unsigned long owner, curr = (unsigned long)current; 84 85 owner = atomic_long_read(&lock->owner); 86 for (;;) { /* must loop, can race against a flag */ 87 unsigned long old, flags = __owner_flags(owner); 88 unsigned long task = owner & ~MUTEX_FLAGS; 89 90 if (task) { 91 if (likely(task != curr)) 92 break; 93 94 if (likely(!(flags & MUTEX_FLAG_PICKUP))) 95 break; 96 97 flags &= ~MUTEX_FLAG_PICKUP; 98 } else { 99 #ifdef CONFIG_DEBUG_MUTEXES 100 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); 101 #endif 102 } 103 104 /* 105 * We set the HANDOFF bit, we must make sure it doesn't live 106 * past the point where we acquire it. This would be possible 107 * if we (accidentally) set the bit on an unlocked mutex. 108 */ 109 flags &= ~MUTEX_FLAG_HANDOFF; 110 111 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 112 if (old == owner) 113 return NULL; 114 115 owner = old; 116 } 117 118 return __owner_task(owner); 119 } 120 121 /* 122 * Actual trylock that will work on any unlocked state. 123 */ 124 static inline bool __mutex_trylock(struct mutex *lock) 125 { 126 return !__mutex_trylock_or_owner(lock); 127 } 128 129 #ifndef CONFIG_DEBUG_LOCK_ALLOC 130 /* 131 * Lockdep annotations are contained to the slow paths for simplicity. 132 * There is nothing that would stop spreading the lockdep annotations outwards 133 * except more code. 134 */ 135 136 /* 137 * Optimistic trylock that only works in the uncontended case. Make sure to 138 * follow with a __mutex_trylock() before failing. 139 */ 140 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 141 { 142 unsigned long curr = (unsigned long)current; 143 unsigned long zero = 0UL; 144 145 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 146 return true; 147 148 return false; 149 } 150 151 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 152 { 153 unsigned long curr = (unsigned long)current; 154 155 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 156 return true; 157 158 return false; 159 } 160 #endif 161 162 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 163 { 164 atomic_long_or(flag, &lock->owner); 165 } 166 167 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 168 { 169 atomic_long_andnot(flag, &lock->owner); 170 } 171 172 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 173 { 174 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 175 } 176 177 /* 178 * Add @waiter to a given location in the lock wait_list and set the 179 * FLAG_WAITERS flag if it's the first waiter. 180 */ 181 static void __sched 182 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 183 struct list_head *list) 184 { 185 debug_mutex_add_waiter(lock, waiter, current); 186 187 list_add_tail(&waiter->list, list); 188 if (__mutex_waiter_is_first(lock, waiter)) 189 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 190 } 191 192 /* 193 * Give up ownership to a specific task, when @task = NULL, this is equivalent 194 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves 195 * WAITERS. Provides RELEASE semantics like a regular unlock, the 196 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 197 */ 198 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 199 { 200 unsigned long owner = atomic_long_read(&lock->owner); 201 202 for (;;) { 203 unsigned long old, new; 204 205 #ifdef CONFIG_DEBUG_MUTEXES 206 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 207 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 208 #endif 209 210 new = (owner & MUTEX_FLAG_WAITERS); 211 new |= (unsigned long)task; 212 if (task) 213 new |= MUTEX_FLAG_PICKUP; 214 215 old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 216 if (old == owner) 217 break; 218 219 owner = old; 220 } 221 } 222 223 #ifndef CONFIG_DEBUG_LOCK_ALLOC 224 /* 225 * We split the mutex lock/unlock logic into separate fastpath and 226 * slowpath functions, to reduce the register pressure on the fastpath. 227 * We also put the fastpath first in the kernel image, to make sure the 228 * branch is predicted by the CPU as default-untaken. 229 */ 230 static void __sched __mutex_lock_slowpath(struct mutex *lock); 231 232 /** 233 * mutex_lock - acquire the mutex 234 * @lock: the mutex to be acquired 235 * 236 * Lock the mutex exclusively for this task. If the mutex is not 237 * available right now, it will sleep until it can get it. 238 * 239 * The mutex must later on be released by the same task that 240 * acquired it. Recursive locking is not allowed. The task 241 * may not exit without first unlocking the mutex. Also, kernel 242 * memory where the mutex resides must not be freed with 243 * the mutex still locked. The mutex must first be initialized 244 * (or statically defined) before it can be locked. memset()-ing 245 * the mutex to 0 is not allowed. 246 * 247 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 248 * checks that will enforce the restrictions and will also do 249 * deadlock debugging) 250 * 251 * This function is similar to (but not equivalent to) down(). 252 */ 253 void __sched mutex_lock(struct mutex *lock) 254 { 255 might_sleep(); 256 257 if (!__mutex_trylock_fast(lock)) 258 __mutex_lock_slowpath(lock); 259 } 260 EXPORT_SYMBOL(mutex_lock); 261 #endif 262 263 /* 264 * Wait-Die: 265 * The newer transactions are killed when: 266 * It (the new transaction) makes a request for a lock being held 267 * by an older transaction. 268 * 269 * Wound-Wait: 270 * The newer transactions are wounded when: 271 * An older transaction makes a request for a lock being held by 272 * the newer transaction. 273 */ 274 275 /* 276 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 277 * it. 278 */ 279 static __always_inline void 280 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 281 { 282 #ifdef CONFIG_DEBUG_MUTEXES 283 /* 284 * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 285 * but released with a normal mutex_unlock in this call. 286 * 287 * This should never happen, always use ww_mutex_unlock. 288 */ 289 DEBUG_LOCKS_WARN_ON(ww->ctx); 290 291 /* 292 * Not quite done after calling ww_acquire_done() ? 293 */ 294 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 295 296 if (ww_ctx->contending_lock) { 297 /* 298 * After -EDEADLK you tried to 299 * acquire a different ww_mutex? Bad! 300 */ 301 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 302 303 /* 304 * You called ww_mutex_lock after receiving -EDEADLK, 305 * but 'forgot' to unlock everything else first? 306 */ 307 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 308 ww_ctx->contending_lock = NULL; 309 } 310 311 /* 312 * Naughty, using a different class will lead to undefined behavior! 313 */ 314 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 315 #endif 316 ww_ctx->acquired++; 317 ww->ctx = ww_ctx; 318 } 319 320 /* 321 * Determine if context @a is 'after' context @b. IOW, @a is a younger 322 * transaction than @b and depending on algorithm either needs to wait for 323 * @b or die. 324 */ 325 static inline bool __sched 326 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 327 { 328 329 return (signed long)(a->stamp - b->stamp) > 0; 330 } 331 332 /* 333 * Wait-Die; wake a younger waiter context (when locks held) such that it can 334 * die. 335 * 336 * Among waiters with context, only the first one can have other locks acquired 337 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and 338 * __ww_mutex_check_kill() wake any but the earliest context. 339 */ 340 static bool __sched 341 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, 342 struct ww_acquire_ctx *ww_ctx) 343 { 344 if (!ww_ctx->is_wait_die) 345 return false; 346 347 if (waiter->ww_ctx->acquired > 0 && 348 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { 349 debug_mutex_wake_waiter(lock, waiter); 350 wake_up_process(waiter->task); 351 } 352 353 return true; 354 } 355 356 /* 357 * Wound-Wait; wound a younger @hold_ctx if it holds the lock. 358 * 359 * Wound the lock holder if there are waiters with older transactions than 360 * the lock holders. Even if multiple waiters may wound the lock holder, 361 * it's sufficient that only one does. 362 */ 363 static bool __ww_mutex_wound(struct mutex *lock, 364 struct ww_acquire_ctx *ww_ctx, 365 struct ww_acquire_ctx *hold_ctx) 366 { 367 struct task_struct *owner = __mutex_owner(lock); 368 369 lockdep_assert_held(&lock->wait_lock); 370 371 /* 372 * Possible through __ww_mutex_add_waiter() when we race with 373 * ww_mutex_set_context_fastpath(). In that case we'll get here again 374 * through __ww_mutex_check_waiters(). 375 */ 376 if (!hold_ctx) 377 return false; 378 379 /* 380 * Can have !owner because of __mutex_unlock_slowpath(), but if owner, 381 * it cannot go away because we'll have FLAG_WAITERS set and hold 382 * wait_lock. 383 */ 384 if (!owner) 385 return false; 386 387 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) { 388 hold_ctx->wounded = 1; 389 390 /* 391 * wake_up_process() paired with set_current_state() 392 * inserts sufficient barriers to make sure @owner either sees 393 * it's wounded in __ww_mutex_check_kill() or has a 394 * wakeup pending to re-read the wounded state. 395 */ 396 if (owner != current) 397 wake_up_process(owner); 398 399 return true; 400 } 401 402 return false; 403 } 404 405 /* 406 * We just acquired @lock under @ww_ctx, if there are later contexts waiting 407 * behind us on the wait-list, check if they need to die, or wound us. 408 * 409 * See __ww_mutex_add_waiter() for the list-order construction; basically the 410 * list is ordered by stamp, smallest (oldest) first. 411 * 412 * This relies on never mixing wait-die/wound-wait on the same wait-list; 413 * which is currently ensured by that being a ww_class property. 414 * 415 * The current task must not be on the wait list. 416 */ 417 static void __sched 418 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 419 { 420 struct mutex_waiter *cur; 421 422 lockdep_assert_held(&lock->wait_lock); 423 424 list_for_each_entry(cur, &lock->wait_list, list) { 425 if (!cur->ww_ctx) 426 continue; 427 428 if (__ww_mutex_die(lock, cur, ww_ctx) || 429 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) 430 break; 431 } 432 } 433 434 /* 435 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 436 * and wake up any waiters so they can recheck. 437 */ 438 static __always_inline void 439 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 440 { 441 ww_mutex_lock_acquired(lock, ctx); 442 443 /* 444 * The lock->ctx update should be visible on all cores before 445 * the WAITERS check is done, otherwise contended waiters might be 446 * missed. The contended waiters will either see ww_ctx == NULL 447 * and keep spinning, or it will acquire wait_lock, add itself 448 * to waiter list and sleep. 449 */ 450 smp_mb(); /* See comments above and below. */ 451 452 /* 453 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS 454 * MB MB 455 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx 456 * 457 * The memory barrier above pairs with the memory barrier in 458 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx 459 * and/or !empty list. 460 */ 461 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 462 return; 463 464 /* 465 * Uh oh, we raced in fastpath, check if any of the waiters need to 466 * die or wound us. 467 */ 468 spin_lock(&lock->base.wait_lock); 469 __ww_mutex_check_waiters(&lock->base, ctx); 470 spin_unlock(&lock->base.wait_lock); 471 } 472 473 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 474 475 static inline 476 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 477 struct mutex_waiter *waiter) 478 { 479 struct ww_mutex *ww; 480 481 ww = container_of(lock, struct ww_mutex, base); 482 483 /* 484 * If ww->ctx is set the contents are undefined, only 485 * by acquiring wait_lock there is a guarantee that 486 * they are not invalid when reading. 487 * 488 * As such, when deadlock detection needs to be 489 * performed the optimistic spinning cannot be done. 490 * 491 * Check this in every inner iteration because we may 492 * be racing against another thread's ww_mutex_lock. 493 */ 494 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 495 return false; 496 497 /* 498 * If we aren't on the wait list yet, cancel the spin 499 * if there are waiters. We want to avoid stealing the 500 * lock from a waiter with an earlier stamp, since the 501 * other thread may already own a lock that we also 502 * need. 503 */ 504 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 505 return false; 506 507 /* 508 * Similarly, stop spinning if we are no longer the 509 * first waiter. 510 */ 511 if (waiter && !__mutex_waiter_is_first(lock, waiter)) 512 return false; 513 514 return true; 515 } 516 517 /* 518 * Look out! "owner" is an entirely speculative pointer access and not 519 * reliable. 520 * 521 * "noinline" so that this function shows up on perf profiles. 522 */ 523 static noinline 524 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 525 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 526 { 527 bool ret = true; 528 529 rcu_read_lock(); 530 while (__mutex_owner(lock) == owner) { 531 /* 532 * Ensure we emit the owner->on_cpu, dereference _after_ 533 * checking lock->owner still matches owner. If that fails, 534 * owner might point to freed memory. If it still matches, 535 * the rcu_read_lock() ensures the memory stays valid. 536 */ 537 barrier(); 538 539 /* 540 * Use vcpu_is_preempted to detect lock holder preemption issue. 541 */ 542 if (!owner->on_cpu || need_resched() || 543 vcpu_is_preempted(task_cpu(owner))) { 544 ret = false; 545 break; 546 } 547 548 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 549 ret = false; 550 break; 551 } 552 553 cpu_relax(); 554 } 555 rcu_read_unlock(); 556 557 return ret; 558 } 559 560 /* 561 * Initial check for entering the mutex spinning loop 562 */ 563 static inline int mutex_can_spin_on_owner(struct mutex *lock) 564 { 565 struct task_struct *owner; 566 int retval = 1; 567 568 if (need_resched()) 569 return 0; 570 571 rcu_read_lock(); 572 owner = __mutex_owner(lock); 573 574 /* 575 * As lock holder preemption issue, we both skip spinning if task is not 576 * on cpu or its cpu is preempted 577 */ 578 if (owner) 579 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 580 rcu_read_unlock(); 581 582 /* 583 * If lock->owner is not set, the mutex has been released. Return true 584 * such that we'll trylock in the spin path, which is a faster option 585 * than the blocking slow path. 586 */ 587 return retval; 588 } 589 590 /* 591 * Optimistic spinning. 592 * 593 * We try to spin for acquisition when we find that the lock owner 594 * is currently running on a (different) CPU and while we don't 595 * need to reschedule. The rationale is that if the lock owner is 596 * running, it is likely to release the lock soon. 597 * 598 * The mutex spinners are queued up using MCS lock so that only one 599 * spinner can compete for the mutex. However, if mutex spinning isn't 600 * going to happen, there is no point in going through the lock/unlock 601 * overhead. 602 * 603 * Returns true when the lock was taken, otherwise false, indicating 604 * that we need to jump to the slowpath and sleep. 605 * 606 * The waiter flag is set to true if the spinner is a waiter in the wait 607 * queue. The waiter-spinner will spin on the lock directly and concurrently 608 * with the spinner at the head of the OSQ, if present, until the owner is 609 * changed to itself. 610 */ 611 static __always_inline bool 612 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 613 const bool use_ww_ctx, struct mutex_waiter *waiter) 614 { 615 if (!waiter) { 616 /* 617 * The purpose of the mutex_can_spin_on_owner() function is 618 * to eliminate the overhead of osq_lock() and osq_unlock() 619 * in case spinning isn't possible. As a waiter-spinner 620 * is not going to take OSQ lock anyway, there is no need 621 * to call mutex_can_spin_on_owner(). 622 */ 623 if (!mutex_can_spin_on_owner(lock)) 624 goto fail; 625 626 /* 627 * In order to avoid a stampede of mutex spinners trying to 628 * acquire the mutex all at once, the spinners need to take a 629 * MCS (queued) lock first before spinning on the owner field. 630 */ 631 if (!osq_lock(&lock->osq)) 632 goto fail; 633 } 634 635 for (;;) { 636 struct task_struct *owner; 637 638 /* Try to acquire the mutex... */ 639 owner = __mutex_trylock_or_owner(lock); 640 if (!owner) 641 break; 642 643 /* 644 * There's an owner, wait for it to either 645 * release the lock or go to sleep. 646 */ 647 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 648 goto fail_unlock; 649 650 /* 651 * The cpu_relax() call is a compiler barrier which forces 652 * everything in this loop to be re-loaded. We don't need 653 * memory barriers as we'll eventually observe the right 654 * values at the cost of a few extra spins. 655 */ 656 cpu_relax(); 657 } 658 659 if (!waiter) 660 osq_unlock(&lock->osq); 661 662 return true; 663 664 665 fail_unlock: 666 if (!waiter) 667 osq_unlock(&lock->osq); 668 669 fail: 670 /* 671 * If we fell out of the spin path because of need_resched(), 672 * reschedule now, before we try-lock the mutex. This avoids getting 673 * scheduled out right after we obtained the mutex. 674 */ 675 if (need_resched()) { 676 /* 677 * We _should_ have TASK_RUNNING here, but just in case 678 * we do not, make it so, otherwise we might get stuck. 679 */ 680 __set_current_state(TASK_RUNNING); 681 schedule_preempt_disabled(); 682 } 683 684 return false; 685 } 686 #else 687 static __always_inline bool 688 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 689 const bool use_ww_ctx, struct mutex_waiter *waiter) 690 { 691 return false; 692 } 693 #endif 694 695 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 696 697 /** 698 * mutex_unlock - release the mutex 699 * @lock: the mutex to be released 700 * 701 * Unlock a mutex that has been locked by this task previously. 702 * 703 * This function must not be used in interrupt context. Unlocking 704 * of a not locked mutex is not allowed. 705 * 706 * This function is similar to (but not equivalent to) up(). 707 */ 708 void __sched mutex_unlock(struct mutex *lock) 709 { 710 #ifndef CONFIG_DEBUG_LOCK_ALLOC 711 if (__mutex_unlock_fast(lock)) 712 return; 713 #endif 714 __mutex_unlock_slowpath(lock, _RET_IP_); 715 } 716 EXPORT_SYMBOL(mutex_unlock); 717 718 /** 719 * ww_mutex_unlock - release the w/w mutex 720 * @lock: the mutex to be released 721 * 722 * Unlock a mutex that has been locked by this task previously with any of the 723 * ww_mutex_lock* functions (with or without an acquire context). It is 724 * forbidden to release the locks after releasing the acquire context. 725 * 726 * This function must not be used in interrupt context. Unlocking 727 * of a unlocked mutex is not allowed. 728 */ 729 void __sched ww_mutex_unlock(struct ww_mutex *lock) 730 { 731 /* 732 * The unlocking fastpath is the 0->1 transition from 'locked' 733 * into 'unlocked' state: 734 */ 735 if (lock->ctx) { 736 #ifdef CONFIG_DEBUG_MUTEXES 737 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 738 #endif 739 if (lock->ctx->acquired > 0) 740 lock->ctx->acquired--; 741 lock->ctx = NULL; 742 } 743 744 mutex_unlock(&lock->base); 745 } 746 EXPORT_SYMBOL(ww_mutex_unlock); 747 748 749 static __always_inline int __sched 750 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 751 { 752 if (ww_ctx->acquired > 0) { 753 #ifdef CONFIG_DEBUG_MUTEXES 754 struct ww_mutex *ww; 755 756 ww = container_of(lock, struct ww_mutex, base); 757 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 758 ww_ctx->contending_lock = ww; 759 #endif 760 return -EDEADLK; 761 } 762 763 return 0; 764 } 765 766 767 /* 768 * Check the wound condition for the current lock acquire. 769 * 770 * Wound-Wait: If we're wounded, kill ourself. 771 * 772 * Wait-Die: If we're trying to acquire a lock already held by an older 773 * context, kill ourselves. 774 * 775 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to 776 * look at waiters before us in the wait-list. 777 */ 778 static inline int __sched 779 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, 780 struct ww_acquire_ctx *ctx) 781 { 782 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 783 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 784 struct mutex_waiter *cur; 785 786 if (ctx->acquired == 0) 787 return 0; 788 789 if (!ctx->is_wait_die) { 790 if (ctx->wounded) 791 return __ww_mutex_kill(lock, ctx); 792 793 return 0; 794 } 795 796 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 797 return __ww_mutex_kill(lock, ctx); 798 799 /* 800 * If there is a waiter in front of us that has a context, then its 801 * stamp is earlier than ours and we must kill ourself. 802 */ 803 cur = waiter; 804 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 805 if (!cur->ww_ctx) 806 continue; 807 808 return __ww_mutex_kill(lock, ctx); 809 } 810 811 return 0; 812 } 813 814 /* 815 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest 816 * first. Such that older contexts are preferred to acquire the lock over 817 * younger contexts. 818 * 819 * Waiters without context are interspersed in FIFO order. 820 * 821 * Furthermore, for Wait-Die kill ourself immediately when possible (there are 822 * older contexts already waiting) to avoid unnecessary waiting and for 823 * Wound-Wait ensure we wound the owning context when it is younger. 824 */ 825 static inline int __sched 826 __ww_mutex_add_waiter(struct mutex_waiter *waiter, 827 struct mutex *lock, 828 struct ww_acquire_ctx *ww_ctx) 829 { 830 struct mutex_waiter *cur; 831 struct list_head *pos; 832 bool is_wait_die; 833 834 if (!ww_ctx) { 835 __mutex_add_waiter(lock, waiter, &lock->wait_list); 836 return 0; 837 } 838 839 is_wait_die = ww_ctx->is_wait_die; 840 841 /* 842 * Add the waiter before the first waiter with a higher stamp. 843 * Waiters without a context are skipped to avoid starving 844 * them. Wait-Die waiters may die here. Wound-Wait waiters 845 * never die here, but they are sorted in stamp order and 846 * may wound the lock holder. 847 */ 848 pos = &lock->wait_list; 849 list_for_each_entry_reverse(cur, &lock->wait_list, list) { 850 if (!cur->ww_ctx) 851 continue; 852 853 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 854 /* 855 * Wait-Die: if we find an older context waiting, there 856 * is no point in queueing behind it, as we'd have to 857 * die the moment it would acquire the lock. 858 */ 859 if (is_wait_die) { 860 int ret = __ww_mutex_kill(lock, ww_ctx); 861 862 if (ret) 863 return ret; 864 } 865 866 break; 867 } 868 869 pos = &cur->list; 870 871 /* Wait-Die: ensure younger waiters die. */ 872 __ww_mutex_die(lock, cur, ww_ctx); 873 } 874 875 __mutex_add_waiter(lock, waiter, pos); 876 877 /* 878 * Wound-Wait: if we're blocking on a mutex owned by a younger context, 879 * wound that such that we might proceed. 880 */ 881 if (!is_wait_die) { 882 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 883 884 /* 885 * See ww_mutex_set_context_fastpath(). Orders setting 886 * MUTEX_FLAG_WAITERS vs the ww->ctx load, 887 * such that either we or the fastpath will wound @ww->ctx. 888 */ 889 smp_mb(); 890 __ww_mutex_wound(lock, ww_ctx, ww->ctx); 891 } 892 893 return 0; 894 } 895 896 /* 897 * Lock a mutex (possibly interruptible), slowpath: 898 */ 899 static __always_inline int __sched 900 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 901 struct lockdep_map *nest_lock, unsigned long ip, 902 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 903 { 904 struct mutex_waiter waiter; 905 bool first = false; 906 struct ww_mutex *ww; 907 int ret; 908 909 might_sleep(); 910 911 ww = container_of(lock, struct ww_mutex, base); 912 if (use_ww_ctx && ww_ctx) { 913 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 914 return -EALREADY; 915 916 /* 917 * Reset the wounded flag after a kill. No other process can 918 * race and wound us here since they can't have a valid owner 919 * pointer if we don't have any locks held. 920 */ 921 if (ww_ctx->acquired == 0) 922 ww_ctx->wounded = 0; 923 } 924 925 preempt_disable(); 926 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 927 928 if (__mutex_trylock(lock) || 929 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { 930 /* got the lock, yay! */ 931 lock_acquired(&lock->dep_map, ip); 932 if (use_ww_ctx && ww_ctx) 933 ww_mutex_set_context_fastpath(ww, ww_ctx); 934 preempt_enable(); 935 return 0; 936 } 937 938 spin_lock(&lock->wait_lock); 939 /* 940 * After waiting to acquire the wait_lock, try again. 941 */ 942 if (__mutex_trylock(lock)) { 943 if (use_ww_ctx && ww_ctx) 944 __ww_mutex_check_waiters(lock, ww_ctx); 945 946 goto skip_wait; 947 } 948 949 debug_mutex_lock_common(lock, &waiter); 950 951 lock_contended(&lock->dep_map, ip); 952 953 if (!use_ww_ctx) { 954 /* add waiting tasks to the end of the waitqueue (FIFO): */ 955 __mutex_add_waiter(lock, &waiter, &lock->wait_list); 956 957 958 #ifdef CONFIG_DEBUG_MUTEXES 959 waiter.ww_ctx = MUTEX_POISON_WW_CTX; 960 #endif 961 } else { 962 /* 963 * Add in stamp order, waking up waiters that must kill 964 * themselves. 965 */ 966 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 967 if (ret) 968 goto err_early_kill; 969 970 waiter.ww_ctx = ww_ctx; 971 } 972 973 waiter.task = current; 974 975 set_current_state(state); 976 for (;;) { 977 /* 978 * Once we hold wait_lock, we're serialized against 979 * mutex_unlock() handing the lock off to us, do a trylock 980 * before testing the error conditions to make sure we pick up 981 * the handoff. 982 */ 983 if (__mutex_trylock(lock)) 984 goto acquired; 985 986 /* 987 * Check for signals and kill conditions while holding 988 * wait_lock. This ensures the lock cancellation is ordered 989 * against mutex_unlock() and wake-ups do not go missing. 990 */ 991 if (signal_pending_state(state, current)) { 992 ret = -EINTR; 993 goto err; 994 } 995 996 if (use_ww_ctx && ww_ctx) { 997 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 998 if (ret) 999 goto err; 1000 } 1001 1002 spin_unlock(&lock->wait_lock); 1003 schedule_preempt_disabled(); 1004 1005 /* 1006 * ww_mutex needs to always recheck its position since its waiter 1007 * list is not FIFO ordered. 1008 */ 1009 if ((use_ww_ctx && ww_ctx) || !first) { 1010 first = __mutex_waiter_is_first(lock, &waiter); 1011 if (first) 1012 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 1013 } 1014 1015 set_current_state(state); 1016 /* 1017 * Here we order against unlock; we must either see it change 1018 * state back to RUNNING and fall through the next schedule(), 1019 * or we must see its unlock and acquire. 1020 */ 1021 if (__mutex_trylock(lock) || 1022 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) 1023 break; 1024 1025 spin_lock(&lock->wait_lock); 1026 } 1027 spin_lock(&lock->wait_lock); 1028 acquired: 1029 __set_current_state(TASK_RUNNING); 1030 1031 if (use_ww_ctx && ww_ctx) { 1032 /* 1033 * Wound-Wait; we stole the lock (!first_waiter), check the 1034 * waiters as anyone might want to wound us. 1035 */ 1036 if (!ww_ctx->is_wait_die && 1037 !__mutex_waiter_is_first(lock, &waiter)) 1038 __ww_mutex_check_waiters(lock, ww_ctx); 1039 } 1040 1041 mutex_remove_waiter(lock, &waiter, current); 1042 if (likely(list_empty(&lock->wait_list))) 1043 __mutex_clear_flag(lock, MUTEX_FLAGS); 1044 1045 debug_mutex_free_waiter(&waiter); 1046 1047 skip_wait: 1048 /* got the lock - cleanup and rejoice! */ 1049 lock_acquired(&lock->dep_map, ip); 1050 1051 if (use_ww_ctx && ww_ctx) 1052 ww_mutex_lock_acquired(ww, ww_ctx); 1053 1054 spin_unlock(&lock->wait_lock); 1055 preempt_enable(); 1056 return 0; 1057 1058 err: 1059 __set_current_state(TASK_RUNNING); 1060 mutex_remove_waiter(lock, &waiter, current); 1061 err_early_kill: 1062 spin_unlock(&lock->wait_lock); 1063 debug_mutex_free_waiter(&waiter); 1064 mutex_release(&lock->dep_map, 1, ip); 1065 preempt_enable(); 1066 return ret; 1067 } 1068 1069 static int __sched 1070 __mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1071 struct lockdep_map *nest_lock, unsigned long ip) 1072 { 1073 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 1074 } 1075 1076 static int __sched 1077 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1078 struct lockdep_map *nest_lock, unsigned long ip, 1079 struct ww_acquire_ctx *ww_ctx) 1080 { 1081 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); 1082 } 1083 1084 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1085 void __sched 1086 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 1087 { 1088 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 1089 } 1090 1091 EXPORT_SYMBOL_GPL(mutex_lock_nested); 1092 1093 void __sched 1094 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 1095 { 1096 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 1097 } 1098 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 1099 1100 int __sched 1101 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 1102 { 1103 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 1104 } 1105 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 1106 1107 int __sched 1108 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 1109 { 1110 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 1111 } 1112 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 1113 1114 void __sched 1115 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 1116 { 1117 int token; 1118 1119 might_sleep(); 1120 1121 token = io_schedule_prepare(); 1122 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 1123 subclass, NULL, _RET_IP_, NULL, 0); 1124 io_schedule_finish(token); 1125 } 1126 EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 1127 1128 static inline int 1129 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1130 { 1131 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 1132 unsigned tmp; 1133 1134 if (ctx->deadlock_inject_countdown-- == 0) { 1135 tmp = ctx->deadlock_inject_interval; 1136 if (tmp > UINT_MAX/4) 1137 tmp = UINT_MAX; 1138 else 1139 tmp = tmp*2 + tmp + tmp/2; 1140 1141 ctx->deadlock_inject_interval = tmp; 1142 ctx->deadlock_inject_countdown = tmp; 1143 ctx->contending_lock = lock; 1144 1145 ww_mutex_unlock(lock); 1146 1147 return -EDEADLK; 1148 } 1149 #endif 1150 1151 return 0; 1152 } 1153 1154 int __sched 1155 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1156 { 1157 int ret; 1158 1159 might_sleep(); 1160 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 1161 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1162 ctx); 1163 if (!ret && ctx && ctx->acquired > 1) 1164 return ww_mutex_deadlock_injection(lock, ctx); 1165 1166 return ret; 1167 } 1168 EXPORT_SYMBOL_GPL(ww_mutex_lock); 1169 1170 int __sched 1171 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1172 { 1173 int ret; 1174 1175 might_sleep(); 1176 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 1177 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1178 ctx); 1179 1180 if (!ret && ctx && ctx->acquired > 1) 1181 return ww_mutex_deadlock_injection(lock, ctx); 1182 1183 return ret; 1184 } 1185 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 1186 1187 #endif 1188 1189 /* 1190 * Release the lock, slowpath: 1191 */ 1192 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 1193 { 1194 struct task_struct *next = NULL; 1195 DEFINE_WAKE_Q(wake_q); 1196 unsigned long owner; 1197 1198 mutex_release(&lock->dep_map, 1, ip); 1199 1200 /* 1201 * Release the lock before (potentially) taking the spinlock such that 1202 * other contenders can get on with things ASAP. 1203 * 1204 * Except when HANDOFF, in that case we must not clear the owner field, 1205 * but instead set it to the top waiter. 1206 */ 1207 owner = atomic_long_read(&lock->owner); 1208 for (;;) { 1209 unsigned long old; 1210 1211 #ifdef CONFIG_DEBUG_MUTEXES 1212 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1213 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 1214 #endif 1215 1216 if (owner & MUTEX_FLAG_HANDOFF) 1217 break; 1218 1219 old = atomic_long_cmpxchg_release(&lock->owner, owner, 1220 __owner_flags(owner)); 1221 if (old == owner) { 1222 if (owner & MUTEX_FLAG_WAITERS) 1223 break; 1224 1225 return; 1226 } 1227 1228 owner = old; 1229 } 1230 1231 spin_lock(&lock->wait_lock); 1232 debug_mutex_unlock(lock); 1233 if (!list_empty(&lock->wait_list)) { 1234 /* get the first entry from the wait-list: */ 1235 struct mutex_waiter *waiter = 1236 list_first_entry(&lock->wait_list, 1237 struct mutex_waiter, list); 1238 1239 next = waiter->task; 1240 1241 debug_mutex_wake_waiter(lock, waiter); 1242 wake_q_add(&wake_q, next); 1243 } 1244 1245 if (owner & MUTEX_FLAG_HANDOFF) 1246 __mutex_handoff(lock, next); 1247 1248 spin_unlock(&lock->wait_lock); 1249 1250 wake_up_q(&wake_q); 1251 } 1252 1253 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1254 /* 1255 * Here come the less common (and hence less performance-critical) APIs: 1256 * mutex_lock_interruptible() and mutex_trylock(). 1257 */ 1258 static noinline int __sched 1259 __mutex_lock_killable_slowpath(struct mutex *lock); 1260 1261 static noinline int __sched 1262 __mutex_lock_interruptible_slowpath(struct mutex *lock); 1263 1264 /** 1265 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 1266 * @lock: The mutex to be acquired. 1267 * 1268 * Lock the mutex like mutex_lock(). If a signal is delivered while the 1269 * process is sleeping, this function will return without acquiring the 1270 * mutex. 1271 * 1272 * Context: Process context. 1273 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1274 * signal arrived. 1275 */ 1276 int __sched mutex_lock_interruptible(struct mutex *lock) 1277 { 1278 might_sleep(); 1279 1280 if (__mutex_trylock_fast(lock)) 1281 return 0; 1282 1283 return __mutex_lock_interruptible_slowpath(lock); 1284 } 1285 1286 EXPORT_SYMBOL(mutex_lock_interruptible); 1287 1288 /** 1289 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 1290 * @lock: The mutex to be acquired. 1291 * 1292 * Lock the mutex like mutex_lock(). If a signal which will be fatal to 1293 * the current process is delivered while the process is sleeping, this 1294 * function will return without acquiring the mutex. 1295 * 1296 * Context: Process context. 1297 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1298 * fatal signal arrived. 1299 */ 1300 int __sched mutex_lock_killable(struct mutex *lock) 1301 { 1302 might_sleep(); 1303 1304 if (__mutex_trylock_fast(lock)) 1305 return 0; 1306 1307 return __mutex_lock_killable_slowpath(lock); 1308 } 1309 EXPORT_SYMBOL(mutex_lock_killable); 1310 1311 /** 1312 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 1313 * @lock: The mutex to be acquired. 1314 * 1315 * Lock the mutex like mutex_lock(). While the task is waiting for this 1316 * mutex, it will be accounted as being in the IO wait state by the 1317 * scheduler. 1318 * 1319 * Context: Process context. 1320 */ 1321 void __sched mutex_lock_io(struct mutex *lock) 1322 { 1323 int token; 1324 1325 token = io_schedule_prepare(); 1326 mutex_lock(lock); 1327 io_schedule_finish(token); 1328 } 1329 EXPORT_SYMBOL_GPL(mutex_lock_io); 1330 1331 static noinline void __sched 1332 __mutex_lock_slowpath(struct mutex *lock) 1333 { 1334 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 1335 } 1336 1337 static noinline int __sched 1338 __mutex_lock_killable_slowpath(struct mutex *lock) 1339 { 1340 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 1341 } 1342 1343 static noinline int __sched 1344 __mutex_lock_interruptible_slowpath(struct mutex *lock) 1345 { 1346 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 1347 } 1348 1349 static noinline int __sched 1350 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1351 { 1352 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, 1353 _RET_IP_, ctx); 1354 } 1355 1356 static noinline int __sched 1357 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 1358 struct ww_acquire_ctx *ctx) 1359 { 1360 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, 1361 _RET_IP_, ctx); 1362 } 1363 1364 #endif 1365 1366 /** 1367 * mutex_trylock - try to acquire the mutex, without waiting 1368 * @lock: the mutex to be acquired 1369 * 1370 * Try to acquire the mutex atomically. Returns 1 if the mutex 1371 * has been acquired successfully, and 0 on contention. 1372 * 1373 * NOTE: this function follows the spin_trylock() convention, so 1374 * it is negated from the down_trylock() return values! Be careful 1375 * about this when converting semaphore users to mutexes. 1376 * 1377 * This function must not be used in interrupt context. The 1378 * mutex must be released by the same task that acquired it. 1379 */ 1380 int __sched mutex_trylock(struct mutex *lock) 1381 { 1382 bool locked = __mutex_trylock(lock); 1383 1384 if (locked) 1385 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 1386 1387 return locked; 1388 } 1389 EXPORT_SYMBOL(mutex_trylock); 1390 1391 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1392 int __sched 1393 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1394 { 1395 might_sleep(); 1396 1397 if (__mutex_trylock_fast(&lock->base)) { 1398 if (ctx) 1399 ww_mutex_set_context_fastpath(lock, ctx); 1400 return 0; 1401 } 1402 1403 return __ww_mutex_lock_slowpath(lock, ctx); 1404 } 1405 EXPORT_SYMBOL(ww_mutex_lock); 1406 1407 int __sched 1408 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1409 { 1410 might_sleep(); 1411 1412 if (__mutex_trylock_fast(&lock->base)) { 1413 if (ctx) 1414 ww_mutex_set_context_fastpath(lock, ctx); 1415 return 0; 1416 } 1417 1418 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 1419 } 1420 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 1421 1422 #endif 1423 1424 /** 1425 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 1426 * @cnt: the atomic which we are to dec 1427 * @lock: the mutex to return holding if we dec to 0 1428 * 1429 * return true and hold lock if we dec to 0, return false otherwise 1430 */ 1431 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 1432 { 1433 /* dec if we can't possibly hit 0 */ 1434 if (atomic_add_unless(cnt, -1, 1)) 1435 return 0; 1436 /* we might hit 0, so take the lock */ 1437 mutex_lock(lock); 1438 if (!atomic_dec_and_test(cnt)) { 1439 /* when we actually did the dec, we didn't hit 0 */ 1440 mutex_unlock(lock); 1441 return 0; 1442 } 1443 /* we hit 0, and we hold the lock */ 1444 return 1; 1445 } 1446 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1447