1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/locking/mutex.c 4 * 5 * Mutexes: blocking mutual exclusion locks 6 * 7 * Started by Ingo Molnar: 8 * 9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 10 * 11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 12 * David Howells for suggestions and improvements. 13 * 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 17 * and Sven Dietrich. 18 * 19 * Also see Documentation/locking/mutex-design.rst. 20 */ 21 #include <linux/mutex.h> 22 #include <linux/ww_mutex.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/wake_q.h> 26 #include <linux/sched/debug.h> 27 #include <linux/export.h> 28 #include <linux/spinlock.h> 29 #include <linux/interrupt.h> 30 #include <linux/debug_locks.h> 31 #include <linux/osq_lock.h> 32 33 #ifdef CONFIG_DEBUG_MUTEXES 34 # include "mutex-debug.h" 35 #else 36 # include "mutex.h" 37 #endif 38 39 void 40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 41 { 42 atomic_long_set(&lock->owner, 0); 43 spin_lock_init(&lock->wait_lock); 44 INIT_LIST_HEAD(&lock->wait_list); 45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 46 osq_lock_init(&lock->osq); 47 #endif 48 49 debug_mutex_init(lock, name, key); 50 } 51 EXPORT_SYMBOL(__mutex_init); 52 53 /* 54 * @owner: contains: 'struct task_struct *' to the current lock owner, 55 * NULL means not owned. Since task_struct pointers are aligned at 56 * at least L1_CACHE_BYTES, we have low bits to store extra state. 57 * 58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 59 * Bit1 indicates unlock needs to hand the lock to the top-waiter 60 * Bit2 indicates handoff has been done and we're waiting for pickup. 61 */ 62 #define MUTEX_FLAG_WAITERS 0x01 63 #define MUTEX_FLAG_HANDOFF 0x02 64 #define MUTEX_FLAG_PICKUP 0x04 65 66 #define MUTEX_FLAGS 0x07 67 68 /* 69 * Internal helper function; C doesn't allow us to hide it :/ 70 * 71 * DO NOT USE (outside of mutex code). 72 */ 73 static inline struct task_struct *__mutex_owner(struct mutex *lock) 74 { 75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); 76 } 77 78 static inline struct task_struct *__owner_task(unsigned long owner) 79 { 80 return (struct task_struct *)(owner & ~MUTEX_FLAGS); 81 } 82 83 bool mutex_is_locked(struct mutex *lock) 84 { 85 return __mutex_owner(lock) != NULL; 86 } 87 EXPORT_SYMBOL(mutex_is_locked); 88 89 static inline unsigned long __owner_flags(unsigned long owner) 90 { 91 return owner & MUTEX_FLAGS; 92 } 93 94 /* 95 * Trylock variant that returns the owning task on failure. 96 */ 97 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 98 { 99 unsigned long owner, curr = (unsigned long)current; 100 101 owner = atomic_long_read(&lock->owner); 102 for (;;) { /* must loop, can race against a flag */ 103 unsigned long old, flags = __owner_flags(owner); 104 unsigned long task = owner & ~MUTEX_FLAGS; 105 106 if (task) { 107 if (likely(task != curr)) 108 break; 109 110 if (likely(!(flags & MUTEX_FLAG_PICKUP))) 111 break; 112 113 flags &= ~MUTEX_FLAG_PICKUP; 114 } else { 115 #ifdef CONFIG_DEBUG_MUTEXES 116 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); 117 #endif 118 } 119 120 /* 121 * We set the HANDOFF bit, we must make sure it doesn't live 122 * past the point where we acquire it. This would be possible 123 * if we (accidentally) set the bit on an unlocked mutex. 124 */ 125 flags &= ~MUTEX_FLAG_HANDOFF; 126 127 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 128 if (old == owner) 129 return NULL; 130 131 owner = old; 132 } 133 134 return __owner_task(owner); 135 } 136 137 /* 138 * Actual trylock that will work on any unlocked state. 139 */ 140 static inline bool __mutex_trylock(struct mutex *lock) 141 { 142 return !__mutex_trylock_or_owner(lock); 143 } 144 145 #ifndef CONFIG_DEBUG_LOCK_ALLOC 146 /* 147 * Lockdep annotations are contained to the slow paths for simplicity. 148 * There is nothing that would stop spreading the lockdep annotations outwards 149 * except more code. 150 */ 151 152 /* 153 * Optimistic trylock that only works in the uncontended case. Make sure to 154 * follow with a __mutex_trylock() before failing. 155 */ 156 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 157 { 158 unsigned long curr = (unsigned long)current; 159 unsigned long zero = 0UL; 160 161 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 162 return true; 163 164 return false; 165 } 166 167 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 168 { 169 unsigned long curr = (unsigned long)current; 170 171 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 172 return true; 173 174 return false; 175 } 176 #endif 177 178 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 179 { 180 atomic_long_or(flag, &lock->owner); 181 } 182 183 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 184 { 185 atomic_long_andnot(flag, &lock->owner); 186 } 187 188 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 189 { 190 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 191 } 192 193 /* 194 * Add @waiter to a given location in the lock wait_list and set the 195 * FLAG_WAITERS flag if it's the first waiter. 196 */ 197 static void 198 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 199 struct list_head *list) 200 { 201 debug_mutex_add_waiter(lock, waiter, current); 202 203 list_add_tail(&waiter->list, list); 204 if (__mutex_waiter_is_first(lock, waiter)) 205 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 206 } 207 208 static void 209 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) 210 { 211 list_del(&waiter->list); 212 if (likely(list_empty(&lock->wait_list))) 213 __mutex_clear_flag(lock, MUTEX_FLAGS); 214 215 debug_mutex_remove_waiter(lock, waiter, current); 216 } 217 218 /* 219 * Give up ownership to a specific task, when @task = NULL, this is equivalent 220 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves 221 * WAITERS. Provides RELEASE semantics like a regular unlock, the 222 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 223 */ 224 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 225 { 226 unsigned long owner = atomic_long_read(&lock->owner); 227 228 for (;;) { 229 unsigned long old, new; 230 231 #ifdef CONFIG_DEBUG_MUTEXES 232 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 233 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 234 #endif 235 236 new = (owner & MUTEX_FLAG_WAITERS); 237 new |= (unsigned long)task; 238 if (task) 239 new |= MUTEX_FLAG_PICKUP; 240 241 old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 242 if (old == owner) 243 break; 244 245 owner = old; 246 } 247 } 248 249 #ifndef CONFIG_DEBUG_LOCK_ALLOC 250 /* 251 * We split the mutex lock/unlock logic into separate fastpath and 252 * slowpath functions, to reduce the register pressure on the fastpath. 253 * We also put the fastpath first in the kernel image, to make sure the 254 * branch is predicted by the CPU as default-untaken. 255 */ 256 static void __sched __mutex_lock_slowpath(struct mutex *lock); 257 258 /** 259 * mutex_lock - acquire the mutex 260 * @lock: the mutex to be acquired 261 * 262 * Lock the mutex exclusively for this task. If the mutex is not 263 * available right now, it will sleep until it can get it. 264 * 265 * The mutex must later on be released by the same task that 266 * acquired it. Recursive locking is not allowed. The task 267 * may not exit without first unlocking the mutex. Also, kernel 268 * memory where the mutex resides must not be freed with 269 * the mutex still locked. The mutex must first be initialized 270 * (or statically defined) before it can be locked. memset()-ing 271 * the mutex to 0 is not allowed. 272 * 273 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 274 * checks that will enforce the restrictions and will also do 275 * deadlock debugging) 276 * 277 * This function is similar to (but not equivalent to) down(). 278 */ 279 void __sched mutex_lock(struct mutex *lock) 280 { 281 might_sleep(); 282 283 if (!__mutex_trylock_fast(lock)) 284 __mutex_lock_slowpath(lock); 285 } 286 EXPORT_SYMBOL(mutex_lock); 287 #endif 288 289 /* 290 * Wait-Die: 291 * The newer transactions are killed when: 292 * It (the new transaction) makes a request for a lock being held 293 * by an older transaction. 294 * 295 * Wound-Wait: 296 * The newer transactions are wounded when: 297 * An older transaction makes a request for a lock being held by 298 * the newer transaction. 299 */ 300 301 /* 302 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 303 * it. 304 */ 305 static __always_inline void 306 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 307 { 308 #ifdef CONFIG_DEBUG_MUTEXES 309 /* 310 * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 311 * but released with a normal mutex_unlock in this call. 312 * 313 * This should never happen, always use ww_mutex_unlock. 314 */ 315 DEBUG_LOCKS_WARN_ON(ww->ctx); 316 317 /* 318 * Not quite done after calling ww_acquire_done() ? 319 */ 320 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 321 322 if (ww_ctx->contending_lock) { 323 /* 324 * After -EDEADLK you tried to 325 * acquire a different ww_mutex? Bad! 326 */ 327 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 328 329 /* 330 * You called ww_mutex_lock after receiving -EDEADLK, 331 * but 'forgot' to unlock everything else first? 332 */ 333 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 334 ww_ctx->contending_lock = NULL; 335 } 336 337 /* 338 * Naughty, using a different class will lead to undefined behavior! 339 */ 340 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 341 #endif 342 ww_ctx->acquired++; 343 ww->ctx = ww_ctx; 344 } 345 346 /* 347 * Determine if context @a is 'after' context @b. IOW, @a is a younger 348 * transaction than @b and depending on algorithm either needs to wait for 349 * @b or die. 350 */ 351 static inline bool __sched 352 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 353 { 354 355 return (signed long)(a->stamp - b->stamp) > 0; 356 } 357 358 /* 359 * Wait-Die; wake a younger waiter context (when locks held) such that it can 360 * die. 361 * 362 * Among waiters with context, only the first one can have other locks acquired 363 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and 364 * __ww_mutex_check_kill() wake any but the earliest context. 365 */ 366 static bool __sched 367 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, 368 struct ww_acquire_ctx *ww_ctx) 369 { 370 if (!ww_ctx->is_wait_die) 371 return false; 372 373 if (waiter->ww_ctx->acquired > 0 && 374 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { 375 debug_mutex_wake_waiter(lock, waiter); 376 wake_up_process(waiter->task); 377 } 378 379 return true; 380 } 381 382 /* 383 * Wound-Wait; wound a younger @hold_ctx if it holds the lock. 384 * 385 * Wound the lock holder if there are waiters with older transactions than 386 * the lock holders. Even if multiple waiters may wound the lock holder, 387 * it's sufficient that only one does. 388 */ 389 static bool __ww_mutex_wound(struct mutex *lock, 390 struct ww_acquire_ctx *ww_ctx, 391 struct ww_acquire_ctx *hold_ctx) 392 { 393 struct task_struct *owner = __mutex_owner(lock); 394 395 lockdep_assert_held(&lock->wait_lock); 396 397 /* 398 * Possible through __ww_mutex_add_waiter() when we race with 399 * ww_mutex_set_context_fastpath(). In that case we'll get here again 400 * through __ww_mutex_check_waiters(). 401 */ 402 if (!hold_ctx) 403 return false; 404 405 /* 406 * Can have !owner because of __mutex_unlock_slowpath(), but if owner, 407 * it cannot go away because we'll have FLAG_WAITERS set and hold 408 * wait_lock. 409 */ 410 if (!owner) 411 return false; 412 413 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) { 414 hold_ctx->wounded = 1; 415 416 /* 417 * wake_up_process() paired with set_current_state() 418 * inserts sufficient barriers to make sure @owner either sees 419 * it's wounded in __ww_mutex_check_kill() or has a 420 * wakeup pending to re-read the wounded state. 421 */ 422 if (owner != current) 423 wake_up_process(owner); 424 425 return true; 426 } 427 428 return false; 429 } 430 431 /* 432 * We just acquired @lock under @ww_ctx, if there are later contexts waiting 433 * behind us on the wait-list, check if they need to die, or wound us. 434 * 435 * See __ww_mutex_add_waiter() for the list-order construction; basically the 436 * list is ordered by stamp, smallest (oldest) first. 437 * 438 * This relies on never mixing wait-die/wound-wait on the same wait-list; 439 * which is currently ensured by that being a ww_class property. 440 * 441 * The current task must not be on the wait list. 442 */ 443 static void __sched 444 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 445 { 446 struct mutex_waiter *cur; 447 448 lockdep_assert_held(&lock->wait_lock); 449 450 list_for_each_entry(cur, &lock->wait_list, list) { 451 if (!cur->ww_ctx) 452 continue; 453 454 if (__ww_mutex_die(lock, cur, ww_ctx) || 455 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) 456 break; 457 } 458 } 459 460 /* 461 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 462 * and wake up any waiters so they can recheck. 463 */ 464 static __always_inline void 465 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 466 { 467 ww_mutex_lock_acquired(lock, ctx); 468 469 /* 470 * The lock->ctx update should be visible on all cores before 471 * the WAITERS check is done, otherwise contended waiters might be 472 * missed. The contended waiters will either see ww_ctx == NULL 473 * and keep spinning, or it will acquire wait_lock, add itself 474 * to waiter list and sleep. 475 */ 476 smp_mb(); /* See comments above and below. */ 477 478 /* 479 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS 480 * MB MB 481 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx 482 * 483 * The memory barrier above pairs with the memory barrier in 484 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx 485 * and/or !empty list. 486 */ 487 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 488 return; 489 490 /* 491 * Uh oh, we raced in fastpath, check if any of the waiters need to 492 * die or wound us. 493 */ 494 spin_lock(&lock->base.wait_lock); 495 __ww_mutex_check_waiters(&lock->base, ctx); 496 spin_unlock(&lock->base.wait_lock); 497 } 498 499 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 500 501 static inline 502 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 503 struct mutex_waiter *waiter) 504 { 505 struct ww_mutex *ww; 506 507 ww = container_of(lock, struct ww_mutex, base); 508 509 /* 510 * If ww->ctx is set the contents are undefined, only 511 * by acquiring wait_lock there is a guarantee that 512 * they are not invalid when reading. 513 * 514 * As such, when deadlock detection needs to be 515 * performed the optimistic spinning cannot be done. 516 * 517 * Check this in every inner iteration because we may 518 * be racing against another thread's ww_mutex_lock. 519 */ 520 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 521 return false; 522 523 /* 524 * If we aren't on the wait list yet, cancel the spin 525 * if there are waiters. We want to avoid stealing the 526 * lock from a waiter with an earlier stamp, since the 527 * other thread may already own a lock that we also 528 * need. 529 */ 530 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 531 return false; 532 533 /* 534 * Similarly, stop spinning if we are no longer the 535 * first waiter. 536 */ 537 if (waiter && !__mutex_waiter_is_first(lock, waiter)) 538 return false; 539 540 return true; 541 } 542 543 /* 544 * Look out! "owner" is an entirely speculative pointer access and not 545 * reliable. 546 * 547 * "noinline" so that this function shows up on perf profiles. 548 */ 549 static noinline 550 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 551 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 552 { 553 bool ret = true; 554 555 rcu_read_lock(); 556 while (__mutex_owner(lock) == owner) { 557 /* 558 * Ensure we emit the owner->on_cpu, dereference _after_ 559 * checking lock->owner still matches owner. If that fails, 560 * owner might point to freed memory. If it still matches, 561 * the rcu_read_lock() ensures the memory stays valid. 562 */ 563 barrier(); 564 565 /* 566 * Use vcpu_is_preempted to detect lock holder preemption issue. 567 */ 568 if (!owner->on_cpu || need_resched() || 569 vcpu_is_preempted(task_cpu(owner))) { 570 ret = false; 571 break; 572 } 573 574 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 575 ret = false; 576 break; 577 } 578 579 cpu_relax(); 580 } 581 rcu_read_unlock(); 582 583 return ret; 584 } 585 586 /* 587 * Initial check for entering the mutex spinning loop 588 */ 589 static inline int mutex_can_spin_on_owner(struct mutex *lock) 590 { 591 struct task_struct *owner; 592 int retval = 1; 593 594 if (need_resched()) 595 return 0; 596 597 rcu_read_lock(); 598 owner = __mutex_owner(lock); 599 600 /* 601 * As lock holder preemption issue, we both skip spinning if task is not 602 * on cpu or its cpu is preempted 603 */ 604 if (owner) 605 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 606 rcu_read_unlock(); 607 608 /* 609 * If lock->owner is not set, the mutex has been released. Return true 610 * such that we'll trylock in the spin path, which is a faster option 611 * than the blocking slow path. 612 */ 613 return retval; 614 } 615 616 /* 617 * Optimistic spinning. 618 * 619 * We try to spin for acquisition when we find that the lock owner 620 * is currently running on a (different) CPU and while we don't 621 * need to reschedule. The rationale is that if the lock owner is 622 * running, it is likely to release the lock soon. 623 * 624 * The mutex spinners are queued up using MCS lock so that only one 625 * spinner can compete for the mutex. However, if mutex spinning isn't 626 * going to happen, there is no point in going through the lock/unlock 627 * overhead. 628 * 629 * Returns true when the lock was taken, otherwise false, indicating 630 * that we need to jump to the slowpath and sleep. 631 * 632 * The waiter flag is set to true if the spinner is a waiter in the wait 633 * queue. The waiter-spinner will spin on the lock directly and concurrently 634 * with the spinner at the head of the OSQ, if present, until the owner is 635 * changed to itself. 636 */ 637 static __always_inline bool 638 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 639 struct mutex_waiter *waiter) 640 { 641 if (!waiter) { 642 /* 643 * The purpose of the mutex_can_spin_on_owner() function is 644 * to eliminate the overhead of osq_lock() and osq_unlock() 645 * in case spinning isn't possible. As a waiter-spinner 646 * is not going to take OSQ lock anyway, there is no need 647 * to call mutex_can_spin_on_owner(). 648 */ 649 if (!mutex_can_spin_on_owner(lock)) 650 goto fail; 651 652 /* 653 * In order to avoid a stampede of mutex spinners trying to 654 * acquire the mutex all at once, the spinners need to take a 655 * MCS (queued) lock first before spinning on the owner field. 656 */ 657 if (!osq_lock(&lock->osq)) 658 goto fail; 659 } 660 661 for (;;) { 662 struct task_struct *owner; 663 664 /* Try to acquire the mutex... */ 665 owner = __mutex_trylock_or_owner(lock); 666 if (!owner) 667 break; 668 669 /* 670 * There's an owner, wait for it to either 671 * release the lock or go to sleep. 672 */ 673 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 674 goto fail_unlock; 675 676 /* 677 * The cpu_relax() call is a compiler barrier which forces 678 * everything in this loop to be re-loaded. We don't need 679 * memory barriers as we'll eventually observe the right 680 * values at the cost of a few extra spins. 681 */ 682 cpu_relax(); 683 } 684 685 if (!waiter) 686 osq_unlock(&lock->osq); 687 688 return true; 689 690 691 fail_unlock: 692 if (!waiter) 693 osq_unlock(&lock->osq); 694 695 fail: 696 /* 697 * If we fell out of the spin path because of need_resched(), 698 * reschedule now, before we try-lock the mutex. This avoids getting 699 * scheduled out right after we obtained the mutex. 700 */ 701 if (need_resched()) { 702 /* 703 * We _should_ have TASK_RUNNING here, but just in case 704 * we do not, make it so, otherwise we might get stuck. 705 */ 706 __set_current_state(TASK_RUNNING); 707 schedule_preempt_disabled(); 708 } 709 710 return false; 711 } 712 #else 713 static __always_inline bool 714 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 715 struct mutex_waiter *waiter) 716 { 717 return false; 718 } 719 #endif 720 721 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 722 723 /** 724 * mutex_unlock - release the mutex 725 * @lock: the mutex to be released 726 * 727 * Unlock a mutex that has been locked by this task previously. 728 * 729 * This function must not be used in interrupt context. Unlocking 730 * of a not locked mutex is not allowed. 731 * 732 * This function is similar to (but not equivalent to) up(). 733 */ 734 void __sched mutex_unlock(struct mutex *lock) 735 { 736 #ifndef CONFIG_DEBUG_LOCK_ALLOC 737 if (__mutex_unlock_fast(lock)) 738 return; 739 #endif 740 __mutex_unlock_slowpath(lock, _RET_IP_); 741 } 742 EXPORT_SYMBOL(mutex_unlock); 743 744 /** 745 * ww_mutex_unlock - release the w/w mutex 746 * @lock: the mutex to be released 747 * 748 * Unlock a mutex that has been locked by this task previously with any of the 749 * ww_mutex_lock* functions (with or without an acquire context). It is 750 * forbidden to release the locks after releasing the acquire context. 751 * 752 * This function must not be used in interrupt context. Unlocking 753 * of a unlocked mutex is not allowed. 754 */ 755 void __sched ww_mutex_unlock(struct ww_mutex *lock) 756 { 757 /* 758 * The unlocking fastpath is the 0->1 transition from 'locked' 759 * into 'unlocked' state: 760 */ 761 if (lock->ctx) { 762 #ifdef CONFIG_DEBUG_MUTEXES 763 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 764 #endif 765 if (lock->ctx->acquired > 0) 766 lock->ctx->acquired--; 767 lock->ctx = NULL; 768 } 769 770 mutex_unlock(&lock->base); 771 } 772 EXPORT_SYMBOL(ww_mutex_unlock); 773 774 775 static __always_inline int __sched 776 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 777 { 778 if (ww_ctx->acquired > 0) { 779 #ifdef CONFIG_DEBUG_MUTEXES 780 struct ww_mutex *ww; 781 782 ww = container_of(lock, struct ww_mutex, base); 783 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 784 ww_ctx->contending_lock = ww; 785 #endif 786 return -EDEADLK; 787 } 788 789 return 0; 790 } 791 792 793 /* 794 * Check the wound condition for the current lock acquire. 795 * 796 * Wound-Wait: If we're wounded, kill ourself. 797 * 798 * Wait-Die: If we're trying to acquire a lock already held by an older 799 * context, kill ourselves. 800 * 801 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to 802 * look at waiters before us in the wait-list. 803 */ 804 static inline int __sched 805 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, 806 struct ww_acquire_ctx *ctx) 807 { 808 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 809 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 810 struct mutex_waiter *cur; 811 812 if (ctx->acquired == 0) 813 return 0; 814 815 if (!ctx->is_wait_die) { 816 if (ctx->wounded) 817 return __ww_mutex_kill(lock, ctx); 818 819 return 0; 820 } 821 822 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 823 return __ww_mutex_kill(lock, ctx); 824 825 /* 826 * If there is a waiter in front of us that has a context, then its 827 * stamp is earlier than ours and we must kill ourself. 828 */ 829 cur = waiter; 830 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 831 if (!cur->ww_ctx) 832 continue; 833 834 return __ww_mutex_kill(lock, ctx); 835 } 836 837 return 0; 838 } 839 840 /* 841 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest 842 * first. Such that older contexts are preferred to acquire the lock over 843 * younger contexts. 844 * 845 * Waiters without context are interspersed in FIFO order. 846 * 847 * Furthermore, for Wait-Die kill ourself immediately when possible (there are 848 * older contexts already waiting) to avoid unnecessary waiting and for 849 * Wound-Wait ensure we wound the owning context when it is younger. 850 */ 851 static inline int __sched 852 __ww_mutex_add_waiter(struct mutex_waiter *waiter, 853 struct mutex *lock, 854 struct ww_acquire_ctx *ww_ctx) 855 { 856 struct mutex_waiter *cur; 857 struct list_head *pos; 858 bool is_wait_die; 859 860 if (!ww_ctx) { 861 __mutex_add_waiter(lock, waiter, &lock->wait_list); 862 return 0; 863 } 864 865 is_wait_die = ww_ctx->is_wait_die; 866 867 /* 868 * Add the waiter before the first waiter with a higher stamp. 869 * Waiters without a context are skipped to avoid starving 870 * them. Wait-Die waiters may die here. Wound-Wait waiters 871 * never die here, but they are sorted in stamp order and 872 * may wound the lock holder. 873 */ 874 pos = &lock->wait_list; 875 list_for_each_entry_reverse(cur, &lock->wait_list, list) { 876 if (!cur->ww_ctx) 877 continue; 878 879 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 880 /* 881 * Wait-Die: if we find an older context waiting, there 882 * is no point in queueing behind it, as we'd have to 883 * die the moment it would acquire the lock. 884 */ 885 if (is_wait_die) { 886 int ret = __ww_mutex_kill(lock, ww_ctx); 887 888 if (ret) 889 return ret; 890 } 891 892 break; 893 } 894 895 pos = &cur->list; 896 897 /* Wait-Die: ensure younger waiters die. */ 898 __ww_mutex_die(lock, cur, ww_ctx); 899 } 900 901 __mutex_add_waiter(lock, waiter, pos); 902 903 /* 904 * Wound-Wait: if we're blocking on a mutex owned by a younger context, 905 * wound that such that we might proceed. 906 */ 907 if (!is_wait_die) { 908 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 909 910 /* 911 * See ww_mutex_set_context_fastpath(). Orders setting 912 * MUTEX_FLAG_WAITERS vs the ww->ctx load, 913 * such that either we or the fastpath will wound @ww->ctx. 914 */ 915 smp_mb(); 916 __ww_mutex_wound(lock, ww_ctx, ww->ctx); 917 } 918 919 return 0; 920 } 921 922 /* 923 * Lock a mutex (possibly interruptible), slowpath: 924 */ 925 static __always_inline int __sched 926 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 927 struct lockdep_map *nest_lock, unsigned long ip, 928 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 929 { 930 struct mutex_waiter waiter; 931 bool first = false; 932 struct ww_mutex *ww; 933 int ret; 934 935 if (!use_ww_ctx) 936 ww_ctx = NULL; 937 938 might_sleep(); 939 940 #ifdef CONFIG_DEBUG_MUTEXES 941 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 942 #endif 943 944 ww = container_of(lock, struct ww_mutex, base); 945 if (ww_ctx) { 946 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 947 return -EALREADY; 948 949 /* 950 * Reset the wounded flag after a kill. No other process can 951 * race and wound us here since they can't have a valid owner 952 * pointer if we don't have any locks held. 953 */ 954 if (ww_ctx->acquired == 0) 955 ww_ctx->wounded = 0; 956 } 957 958 preempt_disable(); 959 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 960 961 if (__mutex_trylock(lock) || 962 mutex_optimistic_spin(lock, ww_ctx, NULL)) { 963 /* got the lock, yay! */ 964 lock_acquired(&lock->dep_map, ip); 965 if (ww_ctx) 966 ww_mutex_set_context_fastpath(ww, ww_ctx); 967 preempt_enable(); 968 return 0; 969 } 970 971 spin_lock(&lock->wait_lock); 972 /* 973 * After waiting to acquire the wait_lock, try again. 974 */ 975 if (__mutex_trylock(lock)) { 976 if (ww_ctx) 977 __ww_mutex_check_waiters(lock, ww_ctx); 978 979 goto skip_wait; 980 } 981 982 debug_mutex_lock_common(lock, &waiter); 983 984 lock_contended(&lock->dep_map, ip); 985 986 if (!use_ww_ctx) { 987 /* add waiting tasks to the end of the waitqueue (FIFO): */ 988 __mutex_add_waiter(lock, &waiter, &lock->wait_list); 989 990 991 #ifdef CONFIG_DEBUG_MUTEXES 992 waiter.ww_ctx = MUTEX_POISON_WW_CTX; 993 #endif 994 } else { 995 /* 996 * Add in stamp order, waking up waiters that must kill 997 * themselves. 998 */ 999 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 1000 if (ret) 1001 goto err_early_kill; 1002 1003 waiter.ww_ctx = ww_ctx; 1004 } 1005 1006 waiter.task = current; 1007 1008 set_current_state(state); 1009 for (;;) { 1010 /* 1011 * Once we hold wait_lock, we're serialized against 1012 * mutex_unlock() handing the lock off to us, do a trylock 1013 * before testing the error conditions to make sure we pick up 1014 * the handoff. 1015 */ 1016 if (__mutex_trylock(lock)) 1017 goto acquired; 1018 1019 /* 1020 * Check for signals and kill conditions while holding 1021 * wait_lock. This ensures the lock cancellation is ordered 1022 * against mutex_unlock() and wake-ups do not go missing. 1023 */ 1024 if (signal_pending_state(state, current)) { 1025 ret = -EINTR; 1026 goto err; 1027 } 1028 1029 if (ww_ctx) { 1030 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 1031 if (ret) 1032 goto err; 1033 } 1034 1035 spin_unlock(&lock->wait_lock); 1036 schedule_preempt_disabled(); 1037 1038 /* 1039 * ww_mutex needs to always recheck its position since its waiter 1040 * list is not FIFO ordered. 1041 */ 1042 if (ww_ctx || !first) { 1043 first = __mutex_waiter_is_first(lock, &waiter); 1044 if (first) 1045 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 1046 } 1047 1048 set_current_state(state); 1049 /* 1050 * Here we order against unlock; we must either see it change 1051 * state back to RUNNING and fall through the next schedule(), 1052 * or we must see its unlock and acquire. 1053 */ 1054 if (__mutex_trylock(lock) || 1055 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) 1056 break; 1057 1058 spin_lock(&lock->wait_lock); 1059 } 1060 spin_lock(&lock->wait_lock); 1061 acquired: 1062 __set_current_state(TASK_RUNNING); 1063 1064 if (ww_ctx) { 1065 /* 1066 * Wound-Wait; we stole the lock (!first_waiter), check the 1067 * waiters as anyone might want to wound us. 1068 */ 1069 if (!ww_ctx->is_wait_die && 1070 !__mutex_waiter_is_first(lock, &waiter)) 1071 __ww_mutex_check_waiters(lock, ww_ctx); 1072 } 1073 1074 __mutex_remove_waiter(lock, &waiter); 1075 1076 debug_mutex_free_waiter(&waiter); 1077 1078 skip_wait: 1079 /* got the lock - cleanup and rejoice! */ 1080 lock_acquired(&lock->dep_map, ip); 1081 1082 if (ww_ctx) 1083 ww_mutex_lock_acquired(ww, ww_ctx); 1084 1085 spin_unlock(&lock->wait_lock); 1086 preempt_enable(); 1087 return 0; 1088 1089 err: 1090 __set_current_state(TASK_RUNNING); 1091 __mutex_remove_waiter(lock, &waiter); 1092 err_early_kill: 1093 spin_unlock(&lock->wait_lock); 1094 debug_mutex_free_waiter(&waiter); 1095 mutex_release(&lock->dep_map, ip); 1096 preempt_enable(); 1097 return ret; 1098 } 1099 1100 static int __sched 1101 __mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1102 struct lockdep_map *nest_lock, unsigned long ip) 1103 { 1104 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 1105 } 1106 1107 static int __sched 1108 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1109 struct lockdep_map *nest_lock, unsigned long ip, 1110 struct ww_acquire_ctx *ww_ctx) 1111 { 1112 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); 1113 } 1114 1115 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1116 void __sched 1117 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 1118 { 1119 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 1120 } 1121 1122 EXPORT_SYMBOL_GPL(mutex_lock_nested); 1123 1124 void __sched 1125 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 1126 { 1127 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 1128 } 1129 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 1130 1131 int __sched 1132 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 1133 { 1134 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 1135 } 1136 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 1137 1138 int __sched 1139 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 1140 { 1141 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 1142 } 1143 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 1144 1145 void __sched 1146 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 1147 { 1148 int token; 1149 1150 might_sleep(); 1151 1152 token = io_schedule_prepare(); 1153 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 1154 subclass, NULL, _RET_IP_, NULL, 0); 1155 io_schedule_finish(token); 1156 } 1157 EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 1158 1159 static inline int 1160 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1161 { 1162 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 1163 unsigned tmp; 1164 1165 if (ctx->deadlock_inject_countdown-- == 0) { 1166 tmp = ctx->deadlock_inject_interval; 1167 if (tmp > UINT_MAX/4) 1168 tmp = UINT_MAX; 1169 else 1170 tmp = tmp*2 + tmp + tmp/2; 1171 1172 ctx->deadlock_inject_interval = tmp; 1173 ctx->deadlock_inject_countdown = tmp; 1174 ctx->contending_lock = lock; 1175 1176 ww_mutex_unlock(lock); 1177 1178 return -EDEADLK; 1179 } 1180 #endif 1181 1182 return 0; 1183 } 1184 1185 int __sched 1186 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1187 { 1188 int ret; 1189 1190 might_sleep(); 1191 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 1192 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1193 ctx); 1194 if (!ret && ctx && ctx->acquired > 1) 1195 return ww_mutex_deadlock_injection(lock, ctx); 1196 1197 return ret; 1198 } 1199 EXPORT_SYMBOL_GPL(ww_mutex_lock); 1200 1201 int __sched 1202 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1203 { 1204 int ret; 1205 1206 might_sleep(); 1207 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 1208 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1209 ctx); 1210 1211 if (!ret && ctx && ctx->acquired > 1) 1212 return ww_mutex_deadlock_injection(lock, ctx); 1213 1214 return ret; 1215 } 1216 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 1217 1218 #endif 1219 1220 /* 1221 * Release the lock, slowpath: 1222 */ 1223 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 1224 { 1225 struct task_struct *next = NULL; 1226 DEFINE_WAKE_Q(wake_q); 1227 unsigned long owner; 1228 1229 mutex_release(&lock->dep_map, ip); 1230 1231 /* 1232 * Release the lock before (potentially) taking the spinlock such that 1233 * other contenders can get on with things ASAP. 1234 * 1235 * Except when HANDOFF, in that case we must not clear the owner field, 1236 * but instead set it to the top waiter. 1237 */ 1238 owner = atomic_long_read(&lock->owner); 1239 for (;;) { 1240 unsigned long old; 1241 1242 #ifdef CONFIG_DEBUG_MUTEXES 1243 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1244 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 1245 #endif 1246 1247 if (owner & MUTEX_FLAG_HANDOFF) 1248 break; 1249 1250 old = atomic_long_cmpxchg_release(&lock->owner, owner, 1251 __owner_flags(owner)); 1252 if (old == owner) { 1253 if (owner & MUTEX_FLAG_WAITERS) 1254 break; 1255 1256 return; 1257 } 1258 1259 owner = old; 1260 } 1261 1262 spin_lock(&lock->wait_lock); 1263 debug_mutex_unlock(lock); 1264 if (!list_empty(&lock->wait_list)) { 1265 /* get the first entry from the wait-list: */ 1266 struct mutex_waiter *waiter = 1267 list_first_entry(&lock->wait_list, 1268 struct mutex_waiter, list); 1269 1270 next = waiter->task; 1271 1272 debug_mutex_wake_waiter(lock, waiter); 1273 wake_q_add(&wake_q, next); 1274 } 1275 1276 if (owner & MUTEX_FLAG_HANDOFF) 1277 __mutex_handoff(lock, next); 1278 1279 spin_unlock(&lock->wait_lock); 1280 1281 wake_up_q(&wake_q); 1282 } 1283 1284 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1285 /* 1286 * Here come the less common (and hence less performance-critical) APIs: 1287 * mutex_lock_interruptible() and mutex_trylock(). 1288 */ 1289 static noinline int __sched 1290 __mutex_lock_killable_slowpath(struct mutex *lock); 1291 1292 static noinline int __sched 1293 __mutex_lock_interruptible_slowpath(struct mutex *lock); 1294 1295 /** 1296 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 1297 * @lock: The mutex to be acquired. 1298 * 1299 * Lock the mutex like mutex_lock(). If a signal is delivered while the 1300 * process is sleeping, this function will return without acquiring the 1301 * mutex. 1302 * 1303 * Context: Process context. 1304 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1305 * signal arrived. 1306 */ 1307 int __sched mutex_lock_interruptible(struct mutex *lock) 1308 { 1309 might_sleep(); 1310 1311 if (__mutex_trylock_fast(lock)) 1312 return 0; 1313 1314 return __mutex_lock_interruptible_slowpath(lock); 1315 } 1316 1317 EXPORT_SYMBOL(mutex_lock_interruptible); 1318 1319 /** 1320 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 1321 * @lock: The mutex to be acquired. 1322 * 1323 * Lock the mutex like mutex_lock(). If a signal which will be fatal to 1324 * the current process is delivered while the process is sleeping, this 1325 * function will return without acquiring the mutex. 1326 * 1327 * Context: Process context. 1328 * Return: 0 if the lock was successfully acquired or %-EINTR if a 1329 * fatal signal arrived. 1330 */ 1331 int __sched mutex_lock_killable(struct mutex *lock) 1332 { 1333 might_sleep(); 1334 1335 if (__mutex_trylock_fast(lock)) 1336 return 0; 1337 1338 return __mutex_lock_killable_slowpath(lock); 1339 } 1340 EXPORT_SYMBOL(mutex_lock_killable); 1341 1342 /** 1343 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 1344 * @lock: The mutex to be acquired. 1345 * 1346 * Lock the mutex like mutex_lock(). While the task is waiting for this 1347 * mutex, it will be accounted as being in the IO wait state by the 1348 * scheduler. 1349 * 1350 * Context: Process context. 1351 */ 1352 void __sched mutex_lock_io(struct mutex *lock) 1353 { 1354 int token; 1355 1356 token = io_schedule_prepare(); 1357 mutex_lock(lock); 1358 io_schedule_finish(token); 1359 } 1360 EXPORT_SYMBOL_GPL(mutex_lock_io); 1361 1362 static noinline void __sched 1363 __mutex_lock_slowpath(struct mutex *lock) 1364 { 1365 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 1366 } 1367 1368 static noinline int __sched 1369 __mutex_lock_killable_slowpath(struct mutex *lock) 1370 { 1371 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 1372 } 1373 1374 static noinline int __sched 1375 __mutex_lock_interruptible_slowpath(struct mutex *lock) 1376 { 1377 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 1378 } 1379 1380 static noinline int __sched 1381 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1382 { 1383 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, 1384 _RET_IP_, ctx); 1385 } 1386 1387 static noinline int __sched 1388 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 1389 struct ww_acquire_ctx *ctx) 1390 { 1391 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, 1392 _RET_IP_, ctx); 1393 } 1394 1395 #endif 1396 1397 /** 1398 * mutex_trylock - try to acquire the mutex, without waiting 1399 * @lock: the mutex to be acquired 1400 * 1401 * Try to acquire the mutex atomically. Returns 1 if the mutex 1402 * has been acquired successfully, and 0 on contention. 1403 * 1404 * NOTE: this function follows the spin_trylock() convention, so 1405 * it is negated from the down_trylock() return values! Be careful 1406 * about this when converting semaphore users to mutexes. 1407 * 1408 * This function must not be used in interrupt context. The 1409 * mutex must be released by the same task that acquired it. 1410 */ 1411 int __sched mutex_trylock(struct mutex *lock) 1412 { 1413 bool locked; 1414 1415 #ifdef CONFIG_DEBUG_MUTEXES 1416 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 1417 #endif 1418 1419 locked = __mutex_trylock(lock); 1420 if (locked) 1421 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 1422 1423 return locked; 1424 } 1425 EXPORT_SYMBOL(mutex_trylock); 1426 1427 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1428 int __sched 1429 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1430 { 1431 might_sleep(); 1432 1433 if (__mutex_trylock_fast(&lock->base)) { 1434 if (ctx) 1435 ww_mutex_set_context_fastpath(lock, ctx); 1436 return 0; 1437 } 1438 1439 return __ww_mutex_lock_slowpath(lock, ctx); 1440 } 1441 EXPORT_SYMBOL(ww_mutex_lock); 1442 1443 int __sched 1444 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1445 { 1446 might_sleep(); 1447 1448 if (__mutex_trylock_fast(&lock->base)) { 1449 if (ctx) 1450 ww_mutex_set_context_fastpath(lock, ctx); 1451 return 0; 1452 } 1453 1454 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 1455 } 1456 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 1457 1458 #endif 1459 1460 /** 1461 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 1462 * @cnt: the atomic which we are to dec 1463 * @lock: the mutex to return holding if we dec to 0 1464 * 1465 * return true and hold lock if we dec to 0, return false otherwise 1466 */ 1467 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 1468 { 1469 /* dec if we can't possibly hit 0 */ 1470 if (atomic_add_unless(cnt, -1, 1)) 1471 return 0; 1472 /* we might hit 0, so take the lock */ 1473 mutex_lock(lock); 1474 if (!atomic_dec_and_test(cnt)) { 1475 /* when we actually did the dec, we didn't hit 0 */ 1476 mutex_unlock(lock); 1477 return 0; 1478 } 1479 /* we hit 0, and we hold the lock */ 1480 return 1; 1481 } 1482 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1483