1 /* 2 * kernel/locking/mutex.c 3 * 4 * Mutexes: blocking mutual exclusion locks 5 * 6 * Started by Ingo Molnar: 7 * 8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 11 * David Howells for suggestions and improvements. 12 * 13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 14 * from the -rt tree, where it was originally implemented for rtmutexes 15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 16 * and Sven Dietrich. 17 * 18 * Also see Documentation/mutex-design.txt. 19 */ 20 #include <linux/mutex.h> 21 #include <linux/ww_mutex.h> 22 #include <linux/sched.h> 23 #include <linux/sched/rt.h> 24 #include <linux/export.h> 25 #include <linux/spinlock.h> 26 #include <linux/interrupt.h> 27 #include <linux/debug_locks.h> 28 #include "mcs_spinlock.h" 29 30 /* 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 32 * which forces all calls into the slowpath: 33 */ 34 #ifdef CONFIG_DEBUG_MUTEXES 35 # include "mutex-debug.h" 36 # include <asm-generic/mutex-null.h> 37 /* 38 * Must be 0 for the debug case so we do not do the unlock outside of the 39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this 40 * case. 41 */ 42 # undef __mutex_slowpath_needs_to_unlock 43 # define __mutex_slowpath_needs_to_unlock() 0 44 #else 45 # include "mutex.h" 46 # include <asm/mutex.h> 47 #endif 48 49 /* 50 * A negative mutex count indicates that waiters are sleeping waiting for the 51 * mutex. 52 */ 53 #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) 54 55 void 56 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 57 { 58 atomic_set(&lock->count, 1); 59 spin_lock_init(&lock->wait_lock); 60 INIT_LIST_HEAD(&lock->wait_list); 61 mutex_clear_owner(lock); 62 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 63 lock->osq = NULL; 64 #endif 65 66 debug_mutex_init(lock, name, key); 67 } 68 69 EXPORT_SYMBOL(__mutex_init); 70 71 #ifndef CONFIG_DEBUG_LOCK_ALLOC 72 /* 73 * We split the mutex lock/unlock logic into separate fastpath and 74 * slowpath functions, to reduce the register pressure on the fastpath. 75 * We also put the fastpath first in the kernel image, to make sure the 76 * branch is predicted by the CPU as default-untaken. 77 */ 78 __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); 79 80 /** 81 * mutex_lock - acquire the mutex 82 * @lock: the mutex to be acquired 83 * 84 * Lock the mutex exclusively for this task. If the mutex is not 85 * available right now, it will sleep until it can get it. 86 * 87 * The mutex must later on be released by the same task that 88 * acquired it. Recursive locking is not allowed. The task 89 * may not exit without first unlocking the mutex. Also, kernel 90 * memory where the mutex resides mutex must not be freed with 91 * the mutex still locked. The mutex must first be initialized 92 * (or statically defined) before it can be locked. memset()-ing 93 * the mutex to 0 is not allowed. 94 * 95 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging 96 * checks that will enforce the restrictions and will also do 97 * deadlock debugging. ) 98 * 99 * This function is similar to (but not equivalent to) down(). 100 */ 101 void __sched mutex_lock(struct mutex *lock) 102 { 103 might_sleep(); 104 /* 105 * The locking fastpath is the 1->0 transition from 106 * 'unlocked' into 'locked' state. 107 */ 108 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 109 mutex_set_owner(lock); 110 } 111 112 EXPORT_SYMBOL(mutex_lock); 113 #endif 114 115 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 116 /* 117 * In order to avoid a stampede of mutex spinners from acquiring the mutex 118 * more or less simultaneously, the spinners need to acquire a MCS lock 119 * first before spinning on the owner field. 120 * 121 */ 122 123 /* 124 * Mutex spinning code migrated from kernel/sched/core.c 125 */ 126 127 static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 128 { 129 if (lock->owner != owner) 130 return false; 131 132 /* 133 * Ensure we emit the owner->on_cpu, dereference _after_ checking 134 * lock->owner still matches owner, if that fails, owner might 135 * point to free()d memory, if it still matches, the rcu_read_lock() 136 * ensures the memory stays valid. 137 */ 138 barrier(); 139 140 return owner->on_cpu; 141 } 142 143 /* 144 * Look out! "owner" is an entirely speculative pointer 145 * access and not reliable. 146 */ 147 static noinline 148 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) 149 { 150 rcu_read_lock(); 151 while (owner_running(lock, owner)) { 152 if (need_resched()) 153 break; 154 155 arch_mutex_cpu_relax(); 156 } 157 rcu_read_unlock(); 158 159 /* 160 * We break out the loop above on need_resched() and when the 161 * owner changed, which is a sign for heavy contention. Return 162 * success only when lock->owner is NULL. 163 */ 164 return lock->owner == NULL; 165 } 166 167 /* 168 * Initial check for entering the mutex spinning loop 169 */ 170 static inline int mutex_can_spin_on_owner(struct mutex *lock) 171 { 172 struct task_struct *owner; 173 int retval = 1; 174 175 if (need_resched()) 176 return 0; 177 178 rcu_read_lock(); 179 owner = ACCESS_ONCE(lock->owner); 180 if (owner) 181 retval = owner->on_cpu; 182 rcu_read_unlock(); 183 /* 184 * if lock->owner is not set, the mutex owner may have just acquired 185 * it and not set the owner yet or the mutex has been released. 186 */ 187 return retval; 188 } 189 #endif 190 191 __visible __used noinline 192 void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 193 194 /** 195 * mutex_unlock - release the mutex 196 * @lock: the mutex to be released 197 * 198 * Unlock a mutex that has been locked by this task previously. 199 * 200 * This function must not be used in interrupt context. Unlocking 201 * of a not locked mutex is not allowed. 202 * 203 * This function is similar to (but not equivalent to) up(). 204 */ 205 void __sched mutex_unlock(struct mutex *lock) 206 { 207 /* 208 * The unlocking fastpath is the 0->1 transition from 'locked' 209 * into 'unlocked' state: 210 */ 211 #ifndef CONFIG_DEBUG_MUTEXES 212 /* 213 * When debugging is enabled we must not clear the owner before time, 214 * the slow path will always be taken, and that clears the owner field 215 * after verifying that it was indeed current. 216 */ 217 mutex_clear_owner(lock); 218 #endif 219 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 220 } 221 222 EXPORT_SYMBOL(mutex_unlock); 223 224 /** 225 * ww_mutex_unlock - release the w/w mutex 226 * @lock: the mutex to be released 227 * 228 * Unlock a mutex that has been locked by this task previously with any of the 229 * ww_mutex_lock* functions (with or without an acquire context). It is 230 * forbidden to release the locks after releasing the acquire context. 231 * 232 * This function must not be used in interrupt context. Unlocking 233 * of a unlocked mutex is not allowed. 234 */ 235 void __sched ww_mutex_unlock(struct ww_mutex *lock) 236 { 237 /* 238 * The unlocking fastpath is the 0->1 transition from 'locked' 239 * into 'unlocked' state: 240 */ 241 if (lock->ctx) { 242 #ifdef CONFIG_DEBUG_MUTEXES 243 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 244 #endif 245 if (lock->ctx->acquired > 0) 246 lock->ctx->acquired--; 247 lock->ctx = NULL; 248 } 249 250 #ifndef CONFIG_DEBUG_MUTEXES 251 /* 252 * When debugging is enabled we must not clear the owner before time, 253 * the slow path will always be taken, and that clears the owner field 254 * after verifying that it was indeed current. 255 */ 256 mutex_clear_owner(&lock->base); 257 #endif 258 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); 259 } 260 EXPORT_SYMBOL(ww_mutex_unlock); 261 262 static inline int __sched 263 __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 264 { 265 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 266 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 267 268 if (!hold_ctx) 269 return 0; 270 271 if (unlikely(ctx == hold_ctx)) 272 return -EALREADY; 273 274 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && 275 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { 276 #ifdef CONFIG_DEBUG_MUTEXES 277 DEBUG_LOCKS_WARN_ON(ctx->contending_lock); 278 ctx->contending_lock = ww; 279 #endif 280 return -EDEADLK; 281 } 282 283 return 0; 284 } 285 286 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, 287 struct ww_acquire_ctx *ww_ctx) 288 { 289 #ifdef CONFIG_DEBUG_MUTEXES 290 /* 291 * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 292 * but released with a normal mutex_unlock in this call. 293 * 294 * This should never happen, always use ww_mutex_unlock. 295 */ 296 DEBUG_LOCKS_WARN_ON(ww->ctx); 297 298 /* 299 * Not quite done after calling ww_acquire_done() ? 300 */ 301 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 302 303 if (ww_ctx->contending_lock) { 304 /* 305 * After -EDEADLK you tried to 306 * acquire a different ww_mutex? Bad! 307 */ 308 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 309 310 /* 311 * You called ww_mutex_lock after receiving -EDEADLK, 312 * but 'forgot' to unlock everything else first? 313 */ 314 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 315 ww_ctx->contending_lock = NULL; 316 } 317 318 /* 319 * Naughty, using a different class will lead to undefined behavior! 320 */ 321 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 322 #endif 323 ww_ctx->acquired++; 324 } 325 326 /* 327 * after acquiring lock with fastpath or when we lost out in contested 328 * slowpath, set ctx and wake up any waiters so they can recheck. 329 * 330 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, 331 * as the fastpath and opportunistic spinning are disabled in that case. 332 */ 333 static __always_inline void 334 ww_mutex_set_context_fastpath(struct ww_mutex *lock, 335 struct ww_acquire_ctx *ctx) 336 { 337 unsigned long flags; 338 struct mutex_waiter *cur; 339 340 ww_mutex_lock_acquired(lock, ctx); 341 342 lock->ctx = ctx; 343 344 /* 345 * The lock->ctx update should be visible on all cores before 346 * the atomic read is done, otherwise contended waiters might be 347 * missed. The contended waiters will either see ww_ctx == NULL 348 * and keep spinning, or it will acquire wait_lock, add itself 349 * to waiter list and sleep. 350 */ 351 smp_mb(); /* ^^^ */ 352 353 /* 354 * Check if lock is contended, if not there is nobody to wake up 355 */ 356 if (likely(atomic_read(&lock->base.count) == 0)) 357 return; 358 359 /* 360 * Uh oh, we raced in fastpath, wake up everyone in this case, 361 * so they can see the new lock->ctx. 362 */ 363 spin_lock_mutex(&lock->base.wait_lock, flags); 364 list_for_each_entry(cur, &lock->base.wait_list, list) { 365 debug_mutex_wake_waiter(&lock->base, cur); 366 wake_up_process(cur->task); 367 } 368 spin_unlock_mutex(&lock->base.wait_lock, flags); 369 } 370 371 /* 372 * Lock a mutex (possibly interruptible), slowpath: 373 */ 374 static __always_inline int __sched 375 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 376 struct lockdep_map *nest_lock, unsigned long ip, 377 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 378 { 379 struct task_struct *task = current; 380 struct mutex_waiter waiter; 381 unsigned long flags; 382 int ret; 383 384 preempt_disable(); 385 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 386 387 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 388 /* 389 * Optimistic spinning. 390 * 391 * We try to spin for acquisition when we find that there are no 392 * pending waiters and the lock owner is currently running on a 393 * (different) CPU. 394 * 395 * The rationale is that if the lock owner is running, it is likely to 396 * release the lock soon. 397 * 398 * Since this needs the lock owner, and this mutex implementation 399 * doesn't track the owner atomically in the lock field, we need to 400 * track it non-atomically. 401 * 402 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock 403 * to serialize everything. 404 * 405 * The mutex spinners are queued up using MCS lock so that only one 406 * spinner can compete for the mutex. However, if mutex spinning isn't 407 * going to happen, there is no point in going through the lock/unlock 408 * overhead. 409 */ 410 if (!mutex_can_spin_on_owner(lock)) 411 goto slowpath; 412 413 if (!osq_lock(&lock->osq)) 414 goto slowpath; 415 416 for (;;) { 417 struct task_struct *owner; 418 419 if (use_ww_ctx && ww_ctx->acquired > 0) { 420 struct ww_mutex *ww; 421 422 ww = container_of(lock, struct ww_mutex, base); 423 /* 424 * If ww->ctx is set the contents are undefined, only 425 * by acquiring wait_lock there is a guarantee that 426 * they are not invalid when reading. 427 * 428 * As such, when deadlock detection needs to be 429 * performed the optimistic spinning cannot be done. 430 */ 431 if (ACCESS_ONCE(ww->ctx)) 432 break; 433 } 434 435 /* 436 * If there's an owner, wait for it to either 437 * release the lock or go to sleep. 438 */ 439 owner = ACCESS_ONCE(lock->owner); 440 if (owner && !mutex_spin_on_owner(lock, owner)) 441 break; 442 443 if ((atomic_read(&lock->count) == 1) && 444 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 445 lock_acquired(&lock->dep_map, ip); 446 if (use_ww_ctx) { 447 struct ww_mutex *ww; 448 ww = container_of(lock, struct ww_mutex, base); 449 450 ww_mutex_set_context_fastpath(ww, ww_ctx); 451 } 452 453 mutex_set_owner(lock); 454 osq_unlock(&lock->osq); 455 preempt_enable(); 456 return 0; 457 } 458 459 /* 460 * When there's no owner, we might have preempted between the 461 * owner acquiring the lock and setting the owner field. If 462 * we're an RT task that will live-lock because we won't let 463 * the owner complete. 464 */ 465 if (!owner && (need_resched() || rt_task(task))) 466 break; 467 468 /* 469 * The cpu_relax() call is a compiler barrier which forces 470 * everything in this loop to be re-loaded. We don't need 471 * memory barriers as we'll eventually observe the right 472 * values at the cost of a few extra spins. 473 */ 474 arch_mutex_cpu_relax(); 475 } 476 osq_unlock(&lock->osq); 477 slowpath: 478 /* 479 * If we fell out of the spin path because of need_resched(), 480 * reschedule now, before we try-lock the mutex. This avoids getting 481 * scheduled out right after we obtained the mutex. 482 */ 483 if (need_resched()) 484 schedule_preempt_disabled(); 485 #endif 486 spin_lock_mutex(&lock->wait_lock, flags); 487 488 /* once more, can we acquire the lock? */ 489 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) 490 goto skip_wait; 491 492 debug_mutex_lock_common(lock, &waiter); 493 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 494 495 /* add waiting tasks to the end of the waitqueue (FIFO): */ 496 list_add_tail(&waiter.list, &lock->wait_list); 497 waiter.task = task; 498 499 lock_contended(&lock->dep_map, ip); 500 501 for (;;) { 502 /* 503 * Lets try to take the lock again - this is needed even if 504 * we get here for the first time (shortly after failing to 505 * acquire the lock), to make sure that we get a wakeup once 506 * it's unlocked. Later on, if we sleep, this is the 507 * operation that gives us the lock. We xchg it to -1, so 508 * that when we release the lock, we properly wake up the 509 * other waiters: 510 */ 511 if (MUTEX_SHOW_NO_WAITER(lock) && 512 (atomic_xchg(&lock->count, -1) == 1)) 513 break; 514 515 /* 516 * got a signal? (This code gets eliminated in the 517 * TASK_UNINTERRUPTIBLE case.) 518 */ 519 if (unlikely(signal_pending_state(state, task))) { 520 ret = -EINTR; 521 goto err; 522 } 523 524 if (use_ww_ctx && ww_ctx->acquired > 0) { 525 ret = __mutex_lock_check_stamp(lock, ww_ctx); 526 if (ret) 527 goto err; 528 } 529 530 __set_task_state(task, state); 531 532 /* didn't get the lock, go to sleep: */ 533 spin_unlock_mutex(&lock->wait_lock, flags); 534 schedule_preempt_disabled(); 535 spin_lock_mutex(&lock->wait_lock, flags); 536 } 537 mutex_remove_waiter(lock, &waiter, current_thread_info()); 538 /* set it to 0 if there are no waiters left: */ 539 if (likely(list_empty(&lock->wait_list))) 540 atomic_set(&lock->count, 0); 541 debug_mutex_free_waiter(&waiter); 542 543 skip_wait: 544 /* got the lock - cleanup and rejoice! */ 545 lock_acquired(&lock->dep_map, ip); 546 mutex_set_owner(lock); 547 548 if (use_ww_ctx) { 549 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 550 struct mutex_waiter *cur; 551 552 /* 553 * This branch gets optimized out for the common case, 554 * and is only important for ww_mutex_lock. 555 */ 556 ww_mutex_lock_acquired(ww, ww_ctx); 557 ww->ctx = ww_ctx; 558 559 /* 560 * Give any possible sleeping processes the chance to wake up, 561 * so they can recheck if they have to back off. 562 */ 563 list_for_each_entry(cur, &lock->wait_list, list) { 564 debug_mutex_wake_waiter(lock, cur); 565 wake_up_process(cur->task); 566 } 567 } 568 569 spin_unlock_mutex(&lock->wait_lock, flags); 570 preempt_enable(); 571 return 0; 572 573 err: 574 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 575 spin_unlock_mutex(&lock->wait_lock, flags); 576 debug_mutex_free_waiter(&waiter); 577 mutex_release(&lock->dep_map, 1, ip); 578 preempt_enable(); 579 return ret; 580 } 581 582 #ifdef CONFIG_DEBUG_LOCK_ALLOC 583 void __sched 584 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 585 { 586 might_sleep(); 587 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 588 subclass, NULL, _RET_IP_, NULL, 0); 589 } 590 591 EXPORT_SYMBOL_GPL(mutex_lock_nested); 592 593 void __sched 594 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 595 { 596 might_sleep(); 597 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 598 0, nest, _RET_IP_, NULL, 0); 599 } 600 601 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 602 603 int __sched 604 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 605 { 606 might_sleep(); 607 return __mutex_lock_common(lock, TASK_KILLABLE, 608 subclass, NULL, _RET_IP_, NULL, 0); 609 } 610 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 611 612 int __sched 613 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 614 { 615 might_sleep(); 616 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 617 subclass, NULL, _RET_IP_, NULL, 0); 618 } 619 620 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 621 622 static inline int 623 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 624 { 625 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 626 unsigned tmp; 627 628 if (ctx->deadlock_inject_countdown-- == 0) { 629 tmp = ctx->deadlock_inject_interval; 630 if (tmp > UINT_MAX/4) 631 tmp = UINT_MAX; 632 else 633 tmp = tmp*2 + tmp + tmp/2; 634 635 ctx->deadlock_inject_interval = tmp; 636 ctx->deadlock_inject_countdown = tmp; 637 ctx->contending_lock = lock; 638 639 ww_mutex_unlock(lock); 640 641 return -EDEADLK; 642 } 643 #endif 644 645 return 0; 646 } 647 648 int __sched 649 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 650 { 651 int ret; 652 653 might_sleep(); 654 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 655 0, &ctx->dep_map, _RET_IP_, ctx, 1); 656 if (!ret && ctx->acquired > 1) 657 return ww_mutex_deadlock_injection(lock, ctx); 658 659 return ret; 660 } 661 EXPORT_SYMBOL_GPL(__ww_mutex_lock); 662 663 int __sched 664 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 665 { 666 int ret; 667 668 might_sleep(); 669 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 670 0, &ctx->dep_map, _RET_IP_, ctx, 1); 671 672 if (!ret && ctx->acquired > 1) 673 return ww_mutex_deadlock_injection(lock, ctx); 674 675 return ret; 676 } 677 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); 678 679 #endif 680 681 /* 682 * Release the lock, slowpath: 683 */ 684 static inline void 685 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) 686 { 687 struct mutex *lock = container_of(lock_count, struct mutex, count); 688 unsigned long flags; 689 690 /* 691 * some architectures leave the lock unlocked in the fastpath failure 692 * case, others need to leave it locked. In the later case we have to 693 * unlock it here 694 */ 695 if (__mutex_slowpath_needs_to_unlock()) 696 atomic_set(&lock->count, 1); 697 698 spin_lock_mutex(&lock->wait_lock, flags); 699 mutex_release(&lock->dep_map, nested, _RET_IP_); 700 debug_mutex_unlock(lock); 701 702 if (!list_empty(&lock->wait_list)) { 703 /* get the first entry from the wait-list: */ 704 struct mutex_waiter *waiter = 705 list_entry(lock->wait_list.next, 706 struct mutex_waiter, list); 707 708 debug_mutex_wake_waiter(lock, waiter); 709 710 wake_up_process(waiter->task); 711 } 712 713 spin_unlock_mutex(&lock->wait_lock, flags); 714 } 715 716 /* 717 * Release the lock, slowpath: 718 */ 719 __visible void 720 __mutex_unlock_slowpath(atomic_t *lock_count) 721 { 722 __mutex_unlock_common_slowpath(lock_count, 1); 723 } 724 725 #ifndef CONFIG_DEBUG_LOCK_ALLOC 726 /* 727 * Here come the less common (and hence less performance-critical) APIs: 728 * mutex_lock_interruptible() and mutex_trylock(). 729 */ 730 static noinline int __sched 731 __mutex_lock_killable_slowpath(struct mutex *lock); 732 733 static noinline int __sched 734 __mutex_lock_interruptible_slowpath(struct mutex *lock); 735 736 /** 737 * mutex_lock_interruptible - acquire the mutex, interruptible 738 * @lock: the mutex to be acquired 739 * 740 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 741 * been acquired or sleep until the mutex becomes available. If a 742 * signal arrives while waiting for the lock then this function 743 * returns -EINTR. 744 * 745 * This function is similar to (but not equivalent to) down_interruptible(). 746 */ 747 int __sched mutex_lock_interruptible(struct mutex *lock) 748 { 749 int ret; 750 751 might_sleep(); 752 ret = __mutex_fastpath_lock_retval(&lock->count); 753 if (likely(!ret)) { 754 mutex_set_owner(lock); 755 return 0; 756 } else 757 return __mutex_lock_interruptible_slowpath(lock); 758 } 759 760 EXPORT_SYMBOL(mutex_lock_interruptible); 761 762 int __sched mutex_lock_killable(struct mutex *lock) 763 { 764 int ret; 765 766 might_sleep(); 767 ret = __mutex_fastpath_lock_retval(&lock->count); 768 if (likely(!ret)) { 769 mutex_set_owner(lock); 770 return 0; 771 } else 772 return __mutex_lock_killable_slowpath(lock); 773 } 774 EXPORT_SYMBOL(mutex_lock_killable); 775 776 __visible void __sched 777 __mutex_lock_slowpath(atomic_t *lock_count) 778 { 779 struct mutex *lock = container_of(lock_count, struct mutex, count); 780 781 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 782 NULL, _RET_IP_, NULL, 0); 783 } 784 785 static noinline int __sched 786 __mutex_lock_killable_slowpath(struct mutex *lock) 787 { 788 return __mutex_lock_common(lock, TASK_KILLABLE, 0, 789 NULL, _RET_IP_, NULL, 0); 790 } 791 792 static noinline int __sched 793 __mutex_lock_interruptible_slowpath(struct mutex *lock) 794 { 795 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 796 NULL, _RET_IP_, NULL, 0); 797 } 798 799 static noinline int __sched 800 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 801 { 802 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 803 NULL, _RET_IP_, ctx, 1); 804 } 805 806 static noinline int __sched 807 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 808 struct ww_acquire_ctx *ctx) 809 { 810 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 811 NULL, _RET_IP_, ctx, 1); 812 } 813 814 #endif 815 816 /* 817 * Spinlock based trylock, we take the spinlock and check whether we 818 * can get the lock: 819 */ 820 static inline int __mutex_trylock_slowpath(atomic_t *lock_count) 821 { 822 struct mutex *lock = container_of(lock_count, struct mutex, count); 823 unsigned long flags; 824 int prev; 825 826 spin_lock_mutex(&lock->wait_lock, flags); 827 828 prev = atomic_xchg(&lock->count, -1); 829 if (likely(prev == 1)) { 830 mutex_set_owner(lock); 831 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 832 } 833 834 /* Set it back to 0 if there are no waiters: */ 835 if (likely(list_empty(&lock->wait_list))) 836 atomic_set(&lock->count, 0); 837 838 spin_unlock_mutex(&lock->wait_lock, flags); 839 840 return prev == 1; 841 } 842 843 /** 844 * mutex_trylock - try to acquire the mutex, without waiting 845 * @lock: the mutex to be acquired 846 * 847 * Try to acquire the mutex atomically. Returns 1 if the mutex 848 * has been acquired successfully, and 0 on contention. 849 * 850 * NOTE: this function follows the spin_trylock() convention, so 851 * it is negated from the down_trylock() return values! Be careful 852 * about this when converting semaphore users to mutexes. 853 * 854 * This function must not be used in interrupt context. The 855 * mutex must be released by the same task that acquired it. 856 */ 857 int __sched mutex_trylock(struct mutex *lock) 858 { 859 int ret; 860 861 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); 862 if (ret) 863 mutex_set_owner(lock); 864 865 return ret; 866 } 867 EXPORT_SYMBOL(mutex_trylock); 868 869 #ifndef CONFIG_DEBUG_LOCK_ALLOC 870 int __sched 871 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 872 { 873 int ret; 874 875 might_sleep(); 876 877 ret = __mutex_fastpath_lock_retval(&lock->base.count); 878 879 if (likely(!ret)) { 880 ww_mutex_set_context_fastpath(lock, ctx); 881 mutex_set_owner(&lock->base); 882 } else 883 ret = __ww_mutex_lock_slowpath(lock, ctx); 884 return ret; 885 } 886 EXPORT_SYMBOL(__ww_mutex_lock); 887 888 int __sched 889 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 890 { 891 int ret; 892 893 might_sleep(); 894 895 ret = __mutex_fastpath_lock_retval(&lock->base.count); 896 897 if (likely(!ret)) { 898 ww_mutex_set_context_fastpath(lock, ctx); 899 mutex_set_owner(&lock->base); 900 } else 901 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); 902 return ret; 903 } 904 EXPORT_SYMBOL(__ww_mutex_lock_interruptible); 905 906 #endif 907 908 /** 909 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 910 * @cnt: the atomic which we are to dec 911 * @lock: the mutex to return holding if we dec to 0 912 * 913 * return true and hold lock if we dec to 0, return false otherwise 914 */ 915 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 916 { 917 /* dec if we can't possibly hit 0 */ 918 if (atomic_add_unless(cnt, -1, 1)) 919 return 0; 920 /* we might hit 0, so take the lock */ 921 mutex_lock(lock); 922 if (!atomic_dec_and_test(cnt)) { 923 /* when we actually did the dec, we didn't hit 0 */ 924 mutex_unlock(lock); 925 return 0; 926 } 927 /* we hit 0, and we hold the lock */ 928 return 1; 929 } 930 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 931