1 /* 2 * kernel/locking/mutex.c 3 * 4 * Mutexes: blocking mutual exclusion locks 5 * 6 * Started by Ingo Molnar: 7 * 8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 11 * David Howells for suggestions and improvements. 12 * 13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 14 * from the -rt tree, where it was originally implemented for rtmutexes 15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 16 * and Sven Dietrich. 17 * 18 * Also see Documentation/locking/mutex-design.txt. 19 */ 20 #include <linux/mutex.h> 21 #include <linux/ww_mutex.h> 22 #include <linux/sched.h> 23 #include <linux/sched/rt.h> 24 #include <linux/export.h> 25 #include <linux/spinlock.h> 26 #include <linux/interrupt.h> 27 #include <linux/debug_locks.h> 28 #include <linux/osq_lock.h> 29 30 /* 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 32 * which forces all calls into the slowpath: 33 */ 34 #ifdef CONFIG_DEBUG_MUTEXES 35 # include "mutex-debug.h" 36 # include <asm-generic/mutex-null.h> 37 /* 38 * Must be 0 for the debug case so we do not do the unlock outside of the 39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this 40 * case. 41 */ 42 # undef __mutex_slowpath_needs_to_unlock 43 # define __mutex_slowpath_needs_to_unlock() 0 44 #else 45 # include "mutex.h" 46 # include <asm/mutex.h> 47 #endif 48 49 void 50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 51 { 52 atomic_set(&lock->count, 1); 53 spin_lock_init(&lock->wait_lock); 54 INIT_LIST_HEAD(&lock->wait_list); 55 mutex_clear_owner(lock); 56 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 57 osq_lock_init(&lock->osq); 58 #endif 59 60 debug_mutex_init(lock, name, key); 61 } 62 63 EXPORT_SYMBOL(__mutex_init); 64 65 #ifndef CONFIG_DEBUG_LOCK_ALLOC 66 /* 67 * We split the mutex lock/unlock logic into separate fastpath and 68 * slowpath functions, to reduce the register pressure on the fastpath. 69 * We also put the fastpath first in the kernel image, to make sure the 70 * branch is predicted by the CPU as default-untaken. 71 */ 72 __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); 73 74 /** 75 * mutex_lock - acquire the mutex 76 * @lock: the mutex to be acquired 77 * 78 * Lock the mutex exclusively for this task. If the mutex is not 79 * available right now, it will sleep until it can get it. 80 * 81 * The mutex must later on be released by the same task that 82 * acquired it. Recursive locking is not allowed. The task 83 * may not exit without first unlocking the mutex. Also, kernel 84 * memory where the mutex resides must not be freed with 85 * the mutex still locked. The mutex must first be initialized 86 * (or statically defined) before it can be locked. memset()-ing 87 * the mutex to 0 is not allowed. 88 * 89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging 90 * checks that will enforce the restrictions and will also do 91 * deadlock debugging. ) 92 * 93 * This function is similar to (but not equivalent to) down(). 94 */ 95 void __sched mutex_lock(struct mutex *lock) 96 { 97 might_sleep(); 98 /* 99 * The locking fastpath is the 1->0 transition from 100 * 'unlocked' into 'locked' state. 101 */ 102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 103 mutex_set_owner(lock); 104 } 105 106 EXPORT_SYMBOL(mutex_lock); 107 #endif 108 109 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, 110 struct ww_acquire_ctx *ww_ctx) 111 { 112 #ifdef CONFIG_DEBUG_MUTEXES 113 /* 114 * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 115 * but released with a normal mutex_unlock in this call. 116 * 117 * This should never happen, always use ww_mutex_unlock. 118 */ 119 DEBUG_LOCKS_WARN_ON(ww->ctx); 120 121 /* 122 * Not quite done after calling ww_acquire_done() ? 123 */ 124 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 125 126 if (ww_ctx->contending_lock) { 127 /* 128 * After -EDEADLK you tried to 129 * acquire a different ww_mutex? Bad! 130 */ 131 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 132 133 /* 134 * You called ww_mutex_lock after receiving -EDEADLK, 135 * but 'forgot' to unlock everything else first? 136 */ 137 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 138 ww_ctx->contending_lock = NULL; 139 } 140 141 /* 142 * Naughty, using a different class will lead to undefined behavior! 143 */ 144 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 145 #endif 146 ww_ctx->acquired++; 147 } 148 149 /* 150 * After acquiring lock with fastpath or when we lost out in contested 151 * slowpath, set ctx and wake up any waiters so they can recheck. 152 * 153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, 154 * as the fastpath and opportunistic spinning are disabled in that case. 155 */ 156 static __always_inline void 157 ww_mutex_set_context_fastpath(struct ww_mutex *lock, 158 struct ww_acquire_ctx *ctx) 159 { 160 unsigned long flags; 161 struct mutex_waiter *cur; 162 163 ww_mutex_lock_acquired(lock, ctx); 164 165 lock->ctx = ctx; 166 167 /* 168 * The lock->ctx update should be visible on all cores before 169 * the atomic read is done, otherwise contended waiters might be 170 * missed. The contended waiters will either see ww_ctx == NULL 171 * and keep spinning, or it will acquire wait_lock, add itself 172 * to waiter list and sleep. 173 */ 174 smp_mb(); /* ^^^ */ 175 176 /* 177 * Check if lock is contended, if not there is nobody to wake up 178 */ 179 if (likely(atomic_read(&lock->base.count) == 0)) 180 return; 181 182 /* 183 * Uh oh, we raced in fastpath, wake up everyone in this case, 184 * so they can see the new lock->ctx. 185 */ 186 spin_lock_mutex(&lock->base.wait_lock, flags); 187 list_for_each_entry(cur, &lock->base.wait_list, list) { 188 debug_mutex_wake_waiter(&lock->base, cur); 189 wake_up_process(cur->task); 190 } 191 spin_unlock_mutex(&lock->base.wait_lock, flags); 192 } 193 194 /* 195 * After acquiring lock in the slowpath set ctx and wake up any 196 * waiters so they can recheck. 197 * 198 * Callers must hold the mutex wait_lock. 199 */ 200 static __always_inline void 201 ww_mutex_set_context_slowpath(struct ww_mutex *lock, 202 struct ww_acquire_ctx *ctx) 203 { 204 struct mutex_waiter *cur; 205 206 ww_mutex_lock_acquired(lock, ctx); 207 lock->ctx = ctx; 208 209 /* 210 * Give any possible sleeping processes the chance to wake up, 211 * so they can recheck if they have to back off. 212 */ 213 list_for_each_entry(cur, &lock->base.wait_list, list) { 214 debug_mutex_wake_waiter(&lock->base, cur); 215 wake_up_process(cur->task); 216 } 217 } 218 219 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 220 /* 221 * Look out! "owner" is an entirely speculative pointer 222 * access and not reliable. 223 */ 224 static noinline 225 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) 226 { 227 bool ret = true; 228 229 rcu_read_lock(); 230 while (lock->owner == owner) { 231 /* 232 * Ensure we emit the owner->on_cpu, dereference _after_ 233 * checking lock->owner still matches owner. If that fails, 234 * owner might point to freed memory. If it still matches, 235 * the rcu_read_lock() ensures the memory stays valid. 236 */ 237 barrier(); 238 239 if (!owner->on_cpu || need_resched()) { 240 ret = false; 241 break; 242 } 243 244 cpu_relax_lowlatency(); 245 } 246 rcu_read_unlock(); 247 248 return ret; 249 } 250 251 /* 252 * Initial check for entering the mutex spinning loop 253 */ 254 static inline int mutex_can_spin_on_owner(struct mutex *lock) 255 { 256 struct task_struct *owner; 257 int retval = 1; 258 259 if (need_resched()) 260 return 0; 261 262 rcu_read_lock(); 263 owner = READ_ONCE(lock->owner); 264 if (owner) 265 retval = owner->on_cpu; 266 rcu_read_unlock(); 267 /* 268 * if lock->owner is not set, the mutex owner may have just acquired 269 * it and not set the owner yet or the mutex has been released. 270 */ 271 return retval; 272 } 273 274 /* 275 * Atomically try to take the lock when it is available 276 */ 277 static inline bool mutex_try_to_acquire(struct mutex *lock) 278 { 279 return !mutex_is_locked(lock) && 280 (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1); 281 } 282 283 /* 284 * Optimistic spinning. 285 * 286 * We try to spin for acquisition when we find that the lock owner 287 * is currently running on a (different) CPU and while we don't 288 * need to reschedule. The rationale is that if the lock owner is 289 * running, it is likely to release the lock soon. 290 * 291 * Since this needs the lock owner, and this mutex implementation 292 * doesn't track the owner atomically in the lock field, we need to 293 * track it non-atomically. 294 * 295 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock 296 * to serialize everything. 297 * 298 * The mutex spinners are queued up using MCS lock so that only one 299 * spinner can compete for the mutex. However, if mutex spinning isn't 300 * going to happen, there is no point in going through the lock/unlock 301 * overhead. 302 * 303 * Returns true when the lock was taken, otherwise false, indicating 304 * that we need to jump to the slowpath and sleep. 305 */ 306 static bool mutex_optimistic_spin(struct mutex *lock, 307 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 308 { 309 struct task_struct *task = current; 310 311 if (!mutex_can_spin_on_owner(lock)) 312 goto done; 313 314 /* 315 * In order to avoid a stampede of mutex spinners trying to 316 * acquire the mutex all at once, the spinners need to take a 317 * MCS (queued) lock first before spinning on the owner field. 318 */ 319 if (!osq_lock(&lock->osq)) 320 goto done; 321 322 while (true) { 323 struct task_struct *owner; 324 325 if (use_ww_ctx && ww_ctx->acquired > 0) { 326 struct ww_mutex *ww; 327 328 ww = container_of(lock, struct ww_mutex, base); 329 /* 330 * If ww->ctx is set the contents are undefined, only 331 * by acquiring wait_lock there is a guarantee that 332 * they are not invalid when reading. 333 * 334 * As such, when deadlock detection needs to be 335 * performed the optimistic spinning cannot be done. 336 */ 337 if (READ_ONCE(ww->ctx)) 338 break; 339 } 340 341 /* 342 * If there's an owner, wait for it to either 343 * release the lock or go to sleep. 344 */ 345 owner = READ_ONCE(lock->owner); 346 if (owner && !mutex_spin_on_owner(lock, owner)) 347 break; 348 349 /* Try to acquire the mutex if it is unlocked. */ 350 if (mutex_try_to_acquire(lock)) { 351 lock_acquired(&lock->dep_map, ip); 352 353 if (use_ww_ctx) { 354 struct ww_mutex *ww; 355 ww = container_of(lock, struct ww_mutex, base); 356 357 ww_mutex_set_context_fastpath(ww, ww_ctx); 358 } 359 360 mutex_set_owner(lock); 361 osq_unlock(&lock->osq); 362 return true; 363 } 364 365 /* 366 * When there's no owner, we might have preempted between the 367 * owner acquiring the lock and setting the owner field. If 368 * we're an RT task that will live-lock because we won't let 369 * the owner complete. 370 */ 371 if (!owner && (need_resched() || rt_task(task))) 372 break; 373 374 /* 375 * The cpu_relax() call is a compiler barrier which forces 376 * everything in this loop to be re-loaded. We don't need 377 * memory barriers as we'll eventually observe the right 378 * values at the cost of a few extra spins. 379 */ 380 cpu_relax_lowlatency(); 381 } 382 383 osq_unlock(&lock->osq); 384 done: 385 /* 386 * If we fell out of the spin path because of need_resched(), 387 * reschedule now, before we try-lock the mutex. This avoids getting 388 * scheduled out right after we obtained the mutex. 389 */ 390 if (need_resched()) { 391 /* 392 * We _should_ have TASK_RUNNING here, but just in case 393 * we do not, make it so, otherwise we might get stuck. 394 */ 395 __set_current_state(TASK_RUNNING); 396 schedule_preempt_disabled(); 397 } 398 399 return false; 400 } 401 #else 402 static bool mutex_optimistic_spin(struct mutex *lock, 403 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 404 { 405 return false; 406 } 407 #endif 408 409 __visible __used noinline 410 void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 411 412 /** 413 * mutex_unlock - release the mutex 414 * @lock: the mutex to be released 415 * 416 * Unlock a mutex that has been locked by this task previously. 417 * 418 * This function must not be used in interrupt context. Unlocking 419 * of a not locked mutex is not allowed. 420 * 421 * This function is similar to (but not equivalent to) up(). 422 */ 423 void __sched mutex_unlock(struct mutex *lock) 424 { 425 /* 426 * The unlocking fastpath is the 0->1 transition from 'locked' 427 * into 'unlocked' state: 428 */ 429 #ifndef CONFIG_DEBUG_MUTEXES 430 /* 431 * When debugging is enabled we must not clear the owner before time, 432 * the slow path will always be taken, and that clears the owner field 433 * after verifying that it was indeed current. 434 */ 435 mutex_clear_owner(lock); 436 #endif 437 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 438 } 439 440 EXPORT_SYMBOL(mutex_unlock); 441 442 /** 443 * ww_mutex_unlock - release the w/w mutex 444 * @lock: the mutex to be released 445 * 446 * Unlock a mutex that has been locked by this task previously with any of the 447 * ww_mutex_lock* functions (with or without an acquire context). It is 448 * forbidden to release the locks after releasing the acquire context. 449 * 450 * This function must not be used in interrupt context. Unlocking 451 * of a unlocked mutex is not allowed. 452 */ 453 void __sched ww_mutex_unlock(struct ww_mutex *lock) 454 { 455 /* 456 * The unlocking fastpath is the 0->1 transition from 'locked' 457 * into 'unlocked' state: 458 */ 459 if (lock->ctx) { 460 #ifdef CONFIG_DEBUG_MUTEXES 461 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 462 #endif 463 if (lock->ctx->acquired > 0) 464 lock->ctx->acquired--; 465 lock->ctx = NULL; 466 } 467 468 #ifndef CONFIG_DEBUG_MUTEXES 469 /* 470 * When debugging is enabled we must not clear the owner before time, 471 * the slow path will always be taken, and that clears the owner field 472 * after verifying that it was indeed current. 473 */ 474 mutex_clear_owner(&lock->base); 475 #endif 476 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); 477 } 478 EXPORT_SYMBOL(ww_mutex_unlock); 479 480 static inline int __sched 481 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 482 { 483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 484 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 485 486 if (!hold_ctx) 487 return 0; 488 489 if (unlikely(ctx == hold_ctx)) 490 return -EALREADY; 491 492 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && 493 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { 494 #ifdef CONFIG_DEBUG_MUTEXES 495 DEBUG_LOCKS_WARN_ON(ctx->contending_lock); 496 ctx->contending_lock = ww; 497 #endif 498 return -EDEADLK; 499 } 500 501 return 0; 502 } 503 504 /* 505 * Lock a mutex (possibly interruptible), slowpath: 506 */ 507 static __always_inline int __sched 508 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 509 struct lockdep_map *nest_lock, unsigned long ip, 510 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 511 { 512 struct task_struct *task = current; 513 struct mutex_waiter waiter; 514 unsigned long flags; 515 int ret; 516 517 preempt_disable(); 518 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 519 520 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { 521 /* got the lock, yay! */ 522 preempt_enable(); 523 return 0; 524 } 525 526 spin_lock_mutex(&lock->wait_lock, flags); 527 528 /* 529 * Once more, try to acquire the lock. Only try-lock the mutex if 530 * it is unlocked to reduce unnecessary xchg() operations. 531 */ 532 if (!mutex_is_locked(lock) && 533 (atomic_xchg_acquire(&lock->count, 0) == 1)) 534 goto skip_wait; 535 536 debug_mutex_lock_common(lock, &waiter); 537 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 538 539 /* add waiting tasks to the end of the waitqueue (FIFO): */ 540 list_add_tail(&waiter.list, &lock->wait_list); 541 waiter.task = task; 542 543 lock_contended(&lock->dep_map, ip); 544 545 for (;;) { 546 /* 547 * Lets try to take the lock again - this is needed even if 548 * we get here for the first time (shortly after failing to 549 * acquire the lock), to make sure that we get a wakeup once 550 * it's unlocked. Later on, if we sleep, this is the 551 * operation that gives us the lock. We xchg it to -1, so 552 * that when we release the lock, we properly wake up the 553 * other waiters. We only attempt the xchg if the count is 554 * non-negative in order to avoid unnecessary xchg operations: 555 */ 556 if (atomic_read(&lock->count) >= 0 && 557 (atomic_xchg_acquire(&lock->count, -1) == 1)) 558 break; 559 560 /* 561 * got a signal? (This code gets eliminated in the 562 * TASK_UNINTERRUPTIBLE case.) 563 */ 564 if (unlikely(signal_pending_state(state, task))) { 565 ret = -EINTR; 566 goto err; 567 } 568 569 if (use_ww_ctx && ww_ctx->acquired > 0) { 570 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); 571 if (ret) 572 goto err; 573 } 574 575 __set_task_state(task, state); 576 577 /* didn't get the lock, go to sleep: */ 578 spin_unlock_mutex(&lock->wait_lock, flags); 579 schedule_preempt_disabled(); 580 spin_lock_mutex(&lock->wait_lock, flags); 581 } 582 __set_task_state(task, TASK_RUNNING); 583 584 mutex_remove_waiter(lock, &waiter, current_thread_info()); 585 /* set it to 0 if there are no waiters left: */ 586 if (likely(list_empty(&lock->wait_list))) 587 atomic_set(&lock->count, 0); 588 debug_mutex_free_waiter(&waiter); 589 590 skip_wait: 591 /* got the lock - cleanup and rejoice! */ 592 lock_acquired(&lock->dep_map, ip); 593 mutex_set_owner(lock); 594 595 if (use_ww_ctx) { 596 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 597 ww_mutex_set_context_slowpath(ww, ww_ctx); 598 } 599 600 spin_unlock_mutex(&lock->wait_lock, flags); 601 preempt_enable(); 602 return 0; 603 604 err: 605 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 606 spin_unlock_mutex(&lock->wait_lock, flags); 607 debug_mutex_free_waiter(&waiter); 608 mutex_release(&lock->dep_map, 1, ip); 609 preempt_enable(); 610 return ret; 611 } 612 613 #ifdef CONFIG_DEBUG_LOCK_ALLOC 614 void __sched 615 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 616 { 617 might_sleep(); 618 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 619 subclass, NULL, _RET_IP_, NULL, 0); 620 } 621 622 EXPORT_SYMBOL_GPL(mutex_lock_nested); 623 624 void __sched 625 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 626 { 627 might_sleep(); 628 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 629 0, nest, _RET_IP_, NULL, 0); 630 } 631 632 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 633 634 int __sched 635 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 636 { 637 might_sleep(); 638 return __mutex_lock_common(lock, TASK_KILLABLE, 639 subclass, NULL, _RET_IP_, NULL, 0); 640 } 641 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 642 643 int __sched 644 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 645 { 646 might_sleep(); 647 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 648 subclass, NULL, _RET_IP_, NULL, 0); 649 } 650 651 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 652 653 static inline int 654 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 655 { 656 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 657 unsigned tmp; 658 659 if (ctx->deadlock_inject_countdown-- == 0) { 660 tmp = ctx->deadlock_inject_interval; 661 if (tmp > UINT_MAX/4) 662 tmp = UINT_MAX; 663 else 664 tmp = tmp*2 + tmp + tmp/2; 665 666 ctx->deadlock_inject_interval = tmp; 667 ctx->deadlock_inject_countdown = tmp; 668 ctx->contending_lock = lock; 669 670 ww_mutex_unlock(lock); 671 672 return -EDEADLK; 673 } 674 #endif 675 676 return 0; 677 } 678 679 int __sched 680 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 681 { 682 int ret; 683 684 might_sleep(); 685 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 686 0, &ctx->dep_map, _RET_IP_, ctx, 1); 687 if (!ret && ctx->acquired > 1) 688 return ww_mutex_deadlock_injection(lock, ctx); 689 690 return ret; 691 } 692 EXPORT_SYMBOL_GPL(__ww_mutex_lock); 693 694 int __sched 695 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 696 { 697 int ret; 698 699 might_sleep(); 700 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 701 0, &ctx->dep_map, _RET_IP_, ctx, 1); 702 703 if (!ret && ctx->acquired > 1) 704 return ww_mutex_deadlock_injection(lock, ctx); 705 706 return ret; 707 } 708 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); 709 710 #endif 711 712 /* 713 * Release the lock, slowpath: 714 */ 715 static inline void 716 __mutex_unlock_common_slowpath(struct mutex *lock, int nested) 717 { 718 unsigned long flags; 719 720 /* 721 * As a performance measurement, release the lock before doing other 722 * wakeup related duties to follow. This allows other tasks to acquire 723 * the lock sooner, while still handling cleanups in past unlock calls. 724 * This can be done as we do not enforce strict equivalence between the 725 * mutex counter and wait_list. 726 * 727 * 728 * Some architectures leave the lock unlocked in the fastpath failure 729 * case, others need to leave it locked. In the later case we have to 730 * unlock it here - as the lock counter is currently 0 or negative. 731 */ 732 if (__mutex_slowpath_needs_to_unlock()) 733 atomic_set(&lock->count, 1); 734 735 spin_lock_mutex(&lock->wait_lock, flags); 736 mutex_release(&lock->dep_map, nested, _RET_IP_); 737 debug_mutex_unlock(lock); 738 739 if (!list_empty(&lock->wait_list)) { 740 /* get the first entry from the wait-list: */ 741 struct mutex_waiter *waiter = 742 list_entry(lock->wait_list.next, 743 struct mutex_waiter, list); 744 745 debug_mutex_wake_waiter(lock, waiter); 746 747 wake_up_process(waiter->task); 748 } 749 750 spin_unlock_mutex(&lock->wait_lock, flags); 751 } 752 753 /* 754 * Release the lock, slowpath: 755 */ 756 __visible void 757 __mutex_unlock_slowpath(atomic_t *lock_count) 758 { 759 struct mutex *lock = container_of(lock_count, struct mutex, count); 760 761 __mutex_unlock_common_slowpath(lock, 1); 762 } 763 764 #ifndef CONFIG_DEBUG_LOCK_ALLOC 765 /* 766 * Here come the less common (and hence less performance-critical) APIs: 767 * mutex_lock_interruptible() and mutex_trylock(). 768 */ 769 static noinline int __sched 770 __mutex_lock_killable_slowpath(struct mutex *lock); 771 772 static noinline int __sched 773 __mutex_lock_interruptible_slowpath(struct mutex *lock); 774 775 /** 776 * mutex_lock_interruptible - acquire the mutex, interruptible 777 * @lock: the mutex to be acquired 778 * 779 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 780 * been acquired or sleep until the mutex becomes available. If a 781 * signal arrives while waiting for the lock then this function 782 * returns -EINTR. 783 * 784 * This function is similar to (but not equivalent to) down_interruptible(). 785 */ 786 int __sched mutex_lock_interruptible(struct mutex *lock) 787 { 788 int ret; 789 790 might_sleep(); 791 ret = __mutex_fastpath_lock_retval(&lock->count); 792 if (likely(!ret)) { 793 mutex_set_owner(lock); 794 return 0; 795 } else 796 return __mutex_lock_interruptible_slowpath(lock); 797 } 798 799 EXPORT_SYMBOL(mutex_lock_interruptible); 800 801 int __sched mutex_lock_killable(struct mutex *lock) 802 { 803 int ret; 804 805 might_sleep(); 806 ret = __mutex_fastpath_lock_retval(&lock->count); 807 if (likely(!ret)) { 808 mutex_set_owner(lock); 809 return 0; 810 } else 811 return __mutex_lock_killable_slowpath(lock); 812 } 813 EXPORT_SYMBOL(mutex_lock_killable); 814 815 __visible void __sched 816 __mutex_lock_slowpath(atomic_t *lock_count) 817 { 818 struct mutex *lock = container_of(lock_count, struct mutex, count); 819 820 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 821 NULL, _RET_IP_, NULL, 0); 822 } 823 824 static noinline int __sched 825 __mutex_lock_killable_slowpath(struct mutex *lock) 826 { 827 return __mutex_lock_common(lock, TASK_KILLABLE, 0, 828 NULL, _RET_IP_, NULL, 0); 829 } 830 831 static noinline int __sched 832 __mutex_lock_interruptible_slowpath(struct mutex *lock) 833 { 834 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 835 NULL, _RET_IP_, NULL, 0); 836 } 837 838 static noinline int __sched 839 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 840 { 841 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 842 NULL, _RET_IP_, ctx, 1); 843 } 844 845 static noinline int __sched 846 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 847 struct ww_acquire_ctx *ctx) 848 { 849 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 850 NULL, _RET_IP_, ctx, 1); 851 } 852 853 #endif 854 855 /* 856 * Spinlock based trylock, we take the spinlock and check whether we 857 * can get the lock: 858 */ 859 static inline int __mutex_trylock_slowpath(atomic_t *lock_count) 860 { 861 struct mutex *lock = container_of(lock_count, struct mutex, count); 862 unsigned long flags; 863 int prev; 864 865 /* No need to trylock if the mutex is locked. */ 866 if (mutex_is_locked(lock)) 867 return 0; 868 869 spin_lock_mutex(&lock->wait_lock, flags); 870 871 prev = atomic_xchg_acquire(&lock->count, -1); 872 if (likely(prev == 1)) { 873 mutex_set_owner(lock); 874 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 875 } 876 877 /* Set it back to 0 if there are no waiters: */ 878 if (likely(list_empty(&lock->wait_list))) 879 atomic_set(&lock->count, 0); 880 881 spin_unlock_mutex(&lock->wait_lock, flags); 882 883 return prev == 1; 884 } 885 886 /** 887 * mutex_trylock - try to acquire the mutex, without waiting 888 * @lock: the mutex to be acquired 889 * 890 * Try to acquire the mutex atomically. Returns 1 if the mutex 891 * has been acquired successfully, and 0 on contention. 892 * 893 * NOTE: this function follows the spin_trylock() convention, so 894 * it is negated from the down_trylock() return values! Be careful 895 * about this when converting semaphore users to mutexes. 896 * 897 * This function must not be used in interrupt context. The 898 * mutex must be released by the same task that acquired it. 899 */ 900 int __sched mutex_trylock(struct mutex *lock) 901 { 902 int ret; 903 904 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); 905 if (ret) 906 mutex_set_owner(lock); 907 908 return ret; 909 } 910 EXPORT_SYMBOL(mutex_trylock); 911 912 #ifndef CONFIG_DEBUG_LOCK_ALLOC 913 int __sched 914 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 915 { 916 int ret; 917 918 might_sleep(); 919 920 ret = __mutex_fastpath_lock_retval(&lock->base.count); 921 922 if (likely(!ret)) { 923 ww_mutex_set_context_fastpath(lock, ctx); 924 mutex_set_owner(&lock->base); 925 } else 926 ret = __ww_mutex_lock_slowpath(lock, ctx); 927 return ret; 928 } 929 EXPORT_SYMBOL(__ww_mutex_lock); 930 931 int __sched 932 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 933 { 934 int ret; 935 936 might_sleep(); 937 938 ret = __mutex_fastpath_lock_retval(&lock->base.count); 939 940 if (likely(!ret)) { 941 ww_mutex_set_context_fastpath(lock, ctx); 942 mutex_set_owner(&lock->base); 943 } else 944 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); 945 return ret; 946 } 947 EXPORT_SYMBOL(__ww_mutex_lock_interruptible); 948 949 #endif 950 951 /** 952 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 953 * @cnt: the atomic which we are to dec 954 * @lock: the mutex to return holding if we dec to 0 955 * 956 * return true and hold lock if we dec to 0, return false otherwise 957 */ 958 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 959 { 960 /* dec if we can't possibly hit 0 */ 961 if (atomic_add_unless(cnt, -1, 1)) 962 return 0; 963 /* we might hit 0, so take the lock */ 964 mutex_lock(lock); 965 if (!atomic_dec_and_test(cnt)) { 966 /* when we actually did the dec, we didn't hit 0 */ 967 mutex_unlock(lock); 968 return 0; 969 } 970 /* we hit 0, and we hold the lock */ 971 return 1; 972 } 973 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 974