1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/locking/mutex.c 4 * 5 * Mutexes: blocking mutual exclusion locks 6 * 7 * Started by Ingo Molnar: 8 * 9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 10 * 11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 12 * David Howells for suggestions and improvements. 13 * 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 17 * and Sven Dietrich. 18 * 19 * Also see Documentation/locking/mutex-design.rst. 20 */ 21 #include <linux/mutex.h> 22 #include <linux/ww_mutex.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/wake_q.h> 26 #include <linux/sched/debug.h> 27 #include <linux/export.h> 28 #include <linux/spinlock.h> 29 #include <linux/interrupt.h> 30 #include <linux/debug_locks.h> 31 #include <linux/osq_lock.h> 32 33 #ifndef CONFIG_PREEMPT_RT 34 #include "mutex.h" 35 36 #ifdef CONFIG_DEBUG_MUTEXES 37 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) 38 #else 39 # define MUTEX_WARN_ON(cond) 40 #endif 41 42 void 43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 44 { 45 atomic_long_set(&lock->owner, 0); 46 raw_spin_lock_init(&lock->wait_lock); 47 INIT_LIST_HEAD(&lock->wait_list); 48 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 49 osq_lock_init(&lock->osq); 50 #endif 51 52 debug_mutex_init(lock, name, key); 53 } 54 EXPORT_SYMBOL(__mutex_init); 55 56 /* 57 * @owner: contains: 'struct task_struct *' to the current lock owner, 58 * NULL means not owned. Since task_struct pointers are aligned at 59 * at least L1_CACHE_BYTES, we have low bits to store extra state. 60 * 61 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 62 * Bit1 indicates unlock needs to hand the lock to the top-waiter 63 * Bit2 indicates handoff has been done and we're waiting for pickup. 64 */ 65 #define MUTEX_FLAG_WAITERS 0x01 66 #define MUTEX_FLAG_HANDOFF 0x02 67 #define MUTEX_FLAG_PICKUP 0x04 68 69 #define MUTEX_FLAGS 0x07 70 71 /* 72 * Internal helper function; C doesn't allow us to hide it :/ 73 * 74 * DO NOT USE (outside of mutex code). 75 */ 76 static inline struct task_struct *__mutex_owner(struct mutex *lock) 77 { 78 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); 79 } 80 81 static inline struct task_struct *__owner_task(unsigned long owner) 82 { 83 return (struct task_struct *)(owner & ~MUTEX_FLAGS); 84 } 85 86 bool mutex_is_locked(struct mutex *lock) 87 { 88 return __mutex_owner(lock) != NULL; 89 } 90 EXPORT_SYMBOL(mutex_is_locked); 91 92 static inline unsigned long __owner_flags(unsigned long owner) 93 { 94 return owner & MUTEX_FLAGS; 95 } 96 97 /* 98 * Returns: __mutex_owner(lock) on failure or NULL on success. 99 */ 100 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) 101 { 102 unsigned long owner, curr = (unsigned long)current; 103 104 owner = atomic_long_read(&lock->owner); 105 for (;;) { /* must loop, can race against a flag */ 106 unsigned long flags = __owner_flags(owner); 107 unsigned long task = owner & ~MUTEX_FLAGS; 108 109 if (task) { 110 if (flags & MUTEX_FLAG_PICKUP) { 111 if (task != curr) 112 break; 113 flags &= ~MUTEX_FLAG_PICKUP; 114 } else if (handoff) { 115 if (flags & MUTEX_FLAG_HANDOFF) 116 break; 117 flags |= MUTEX_FLAG_HANDOFF; 118 } else { 119 break; 120 } 121 } else { 122 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP)); 123 task = curr; 124 } 125 126 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { 127 if (task == curr) 128 return NULL; 129 break; 130 } 131 } 132 133 return __owner_task(owner); 134 } 135 136 /* 137 * Trylock or set HANDOFF 138 */ 139 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) 140 { 141 return !__mutex_trylock_common(lock, handoff); 142 } 143 144 /* 145 * Actual trylock that will work on any unlocked state. 146 */ 147 static inline bool __mutex_trylock(struct mutex *lock) 148 { 149 return !__mutex_trylock_common(lock, false); 150 } 151 152 #ifndef CONFIG_DEBUG_LOCK_ALLOC 153 /* 154 * Lockdep annotations are contained to the slow paths for simplicity. 155 * There is nothing that would stop spreading the lockdep annotations outwards 156 * except more code. 157 */ 158 159 /* 160 * Optimistic trylock that only works in the uncontended case. Make sure to 161 * follow with a __mutex_trylock() before failing. 162 */ 163 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 164 { 165 unsigned long curr = (unsigned long)current; 166 unsigned long zero = 0UL; 167 168 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 169 return true; 170 171 return false; 172 } 173 174 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 175 { 176 unsigned long curr = (unsigned long)current; 177 178 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); 179 } 180 #endif 181 182 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 183 { 184 atomic_long_or(flag, &lock->owner); 185 } 186 187 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 188 { 189 atomic_long_andnot(flag, &lock->owner); 190 } 191 192 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 193 { 194 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 195 } 196 197 /* 198 * Add @waiter to a given location in the lock wait_list and set the 199 * FLAG_WAITERS flag if it's the first waiter. 200 */ 201 static void 202 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 203 struct list_head *list) 204 { 205 debug_mutex_add_waiter(lock, waiter, current); 206 207 list_add_tail(&waiter->list, list); 208 if (__mutex_waiter_is_first(lock, waiter)) 209 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 210 } 211 212 static void 213 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) 214 { 215 list_del(&waiter->list); 216 if (likely(list_empty(&lock->wait_list))) 217 __mutex_clear_flag(lock, MUTEX_FLAGS); 218 219 debug_mutex_remove_waiter(lock, waiter, current); 220 } 221 222 /* 223 * Give up ownership to a specific task, when @task = NULL, this is equivalent 224 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves 225 * WAITERS. Provides RELEASE semantics like a regular unlock, the 226 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 227 */ 228 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 229 { 230 unsigned long owner = atomic_long_read(&lock->owner); 231 232 for (;;) { 233 unsigned long new; 234 235 MUTEX_WARN_ON(__owner_task(owner) != current); 236 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); 237 238 new = (owner & MUTEX_FLAG_WAITERS); 239 new |= (unsigned long)task; 240 if (task) 241 new |= MUTEX_FLAG_PICKUP; 242 243 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) 244 break; 245 } 246 } 247 248 #ifndef CONFIG_DEBUG_LOCK_ALLOC 249 /* 250 * We split the mutex lock/unlock logic into separate fastpath and 251 * slowpath functions, to reduce the register pressure on the fastpath. 252 * We also put the fastpath first in the kernel image, to make sure the 253 * branch is predicted by the CPU as default-untaken. 254 */ 255 static void __sched __mutex_lock_slowpath(struct mutex *lock); 256 257 /** 258 * mutex_lock - acquire the mutex 259 * @lock: the mutex to be acquired 260 * 261 * Lock the mutex exclusively for this task. If the mutex is not 262 * available right now, it will sleep until it can get it. 263 * 264 * The mutex must later on be released by the same task that 265 * acquired it. Recursive locking is not allowed. The task 266 * may not exit without first unlocking the mutex. Also, kernel 267 * memory where the mutex resides must not be freed with 268 * the mutex still locked. The mutex must first be initialized 269 * (or statically defined) before it can be locked. memset()-ing 270 * the mutex to 0 is not allowed. 271 * 272 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 273 * checks that will enforce the restrictions and will also do 274 * deadlock debugging) 275 * 276 * This function is similar to (but not equivalent to) down(). 277 */ 278 void __sched mutex_lock(struct mutex *lock) 279 { 280 might_sleep(); 281 282 if (!__mutex_trylock_fast(lock)) 283 __mutex_lock_slowpath(lock); 284 } 285 EXPORT_SYMBOL(mutex_lock); 286 #endif 287 288 #include "ww_mutex.h" 289 290 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 291 292 /* 293 * Trylock variant that returns the owning task on failure. 294 */ 295 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 296 { 297 return __mutex_trylock_common(lock, false); 298 } 299 300 static inline 301 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 302 struct mutex_waiter *waiter) 303 { 304 struct ww_mutex *ww; 305 306 ww = container_of(lock, struct ww_mutex, base); 307 308 /* 309 * If ww->ctx is set the contents are undefined, only 310 * by acquiring wait_lock there is a guarantee that 311 * they are not invalid when reading. 312 * 313 * As such, when deadlock detection needs to be 314 * performed the optimistic spinning cannot be done. 315 * 316 * Check this in every inner iteration because we may 317 * be racing against another thread's ww_mutex_lock. 318 */ 319 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 320 return false; 321 322 /* 323 * If we aren't on the wait list yet, cancel the spin 324 * if there are waiters. We want to avoid stealing the 325 * lock from a waiter with an earlier stamp, since the 326 * other thread may already own a lock that we also 327 * need. 328 */ 329 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 330 return false; 331 332 /* 333 * Similarly, stop spinning if we are no longer the 334 * first waiter. 335 */ 336 if (waiter && !__mutex_waiter_is_first(lock, waiter)) 337 return false; 338 339 return true; 340 } 341 342 /* 343 * Look out! "owner" is an entirely speculative pointer access and not 344 * reliable. 345 * 346 * "noinline" so that this function shows up on perf profiles. 347 */ 348 static noinline 349 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 350 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 351 { 352 bool ret = true; 353 354 lockdep_assert_preemption_disabled(); 355 356 while (__mutex_owner(lock) == owner) { 357 /* 358 * Ensure we emit the owner->on_cpu, dereference _after_ 359 * checking lock->owner still matches owner. And we already 360 * disabled preemption which is equal to the RCU read-side 361 * crital section in optimistic spinning code. Thus the 362 * task_strcut structure won't go away during the spinning 363 * period 364 */ 365 barrier(); 366 367 /* 368 * Use vcpu_is_preempted to detect lock holder preemption issue. 369 */ 370 if (!owner->on_cpu || need_resched() || 371 vcpu_is_preempted(task_cpu(owner))) { 372 ret = false; 373 break; 374 } 375 376 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 377 ret = false; 378 break; 379 } 380 381 cpu_relax(); 382 } 383 384 return ret; 385 } 386 387 /* 388 * Initial check for entering the mutex spinning loop 389 */ 390 static inline int mutex_can_spin_on_owner(struct mutex *lock) 391 { 392 struct task_struct *owner; 393 int retval = 1; 394 395 lockdep_assert_preemption_disabled(); 396 397 if (need_resched()) 398 return 0; 399 400 /* 401 * We already disabled preemption which is equal to the RCU read-side 402 * crital section in optimistic spinning code. Thus the task_strcut 403 * structure won't go away during the spinning period. 404 */ 405 owner = __mutex_owner(lock); 406 407 /* 408 * As lock holder preemption issue, we both skip spinning if task is not 409 * on cpu or its cpu is preempted 410 */ 411 412 if (owner) 413 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 414 415 /* 416 * If lock->owner is not set, the mutex has been released. Return true 417 * such that we'll trylock in the spin path, which is a faster option 418 * than the blocking slow path. 419 */ 420 return retval; 421 } 422 423 /* 424 * Optimistic spinning. 425 * 426 * We try to spin for acquisition when we find that the lock owner 427 * is currently running on a (different) CPU and while we don't 428 * need to reschedule. The rationale is that if the lock owner is 429 * running, it is likely to release the lock soon. 430 * 431 * The mutex spinners are queued up using MCS lock so that only one 432 * spinner can compete for the mutex. However, if mutex spinning isn't 433 * going to happen, there is no point in going through the lock/unlock 434 * overhead. 435 * 436 * Returns true when the lock was taken, otherwise false, indicating 437 * that we need to jump to the slowpath and sleep. 438 * 439 * The waiter flag is set to true if the spinner is a waiter in the wait 440 * queue. The waiter-spinner will spin on the lock directly and concurrently 441 * with the spinner at the head of the OSQ, if present, until the owner is 442 * changed to itself. 443 */ 444 static __always_inline bool 445 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 446 struct mutex_waiter *waiter) 447 { 448 if (!waiter) { 449 /* 450 * The purpose of the mutex_can_spin_on_owner() function is 451 * to eliminate the overhead of osq_lock() and osq_unlock() 452 * in case spinning isn't possible. As a waiter-spinner 453 * is not going to take OSQ lock anyway, there is no need 454 * to call mutex_can_spin_on_owner(). 455 */ 456 if (!mutex_can_spin_on_owner(lock)) 457 goto fail; 458 459 /* 460 * In order to avoid a stampede of mutex spinners trying to 461 * acquire the mutex all at once, the spinners need to take a 462 * MCS (queued) lock first before spinning on the owner field. 463 */ 464 if (!osq_lock(&lock->osq)) 465 goto fail; 466 } 467 468 for (;;) { 469 struct task_struct *owner; 470 471 /* Try to acquire the mutex... */ 472 owner = __mutex_trylock_or_owner(lock); 473 if (!owner) 474 break; 475 476 /* 477 * There's an owner, wait for it to either 478 * release the lock or go to sleep. 479 */ 480 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 481 goto fail_unlock; 482 483 /* 484 * The cpu_relax() call is a compiler barrier which forces 485 * everything in this loop to be re-loaded. We don't need 486 * memory barriers as we'll eventually observe the right 487 * values at the cost of a few extra spins. 488 */ 489 cpu_relax(); 490 } 491 492 if (!waiter) 493 osq_unlock(&lock->osq); 494 495 return true; 496 497 498 fail_unlock: 499 if (!waiter) 500 osq_unlock(&lock->osq); 501 502 fail: 503 /* 504 * If we fell out of the spin path because of need_resched(), 505 * reschedule now, before we try-lock the mutex. This avoids getting 506 * scheduled out right after we obtained the mutex. 507 */ 508 if (need_resched()) { 509 /* 510 * We _should_ have TASK_RUNNING here, but just in case 511 * we do not, make it so, otherwise we might get stuck. 512 */ 513 __set_current_state(TASK_RUNNING); 514 schedule_preempt_disabled(); 515 } 516 517 return false; 518 } 519 #else 520 static __always_inline bool 521 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 522 struct mutex_waiter *waiter) 523 { 524 return false; 525 } 526 #endif 527 528 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 529 530 /** 531 * mutex_unlock - release the mutex 532 * @lock: the mutex to be released 533 * 534 * Unlock a mutex that has been locked by this task previously. 535 * 536 * This function must not be used in interrupt context. Unlocking 537 * of a not locked mutex is not allowed. 538 * 539 * This function is similar to (but not equivalent to) up(). 540 */ 541 void __sched mutex_unlock(struct mutex *lock) 542 { 543 #ifndef CONFIG_DEBUG_LOCK_ALLOC 544 if (__mutex_unlock_fast(lock)) 545 return; 546 #endif 547 __mutex_unlock_slowpath(lock, _RET_IP_); 548 } 549 EXPORT_SYMBOL(mutex_unlock); 550 551 /** 552 * ww_mutex_unlock - release the w/w mutex 553 * @lock: the mutex to be released 554 * 555 * Unlock a mutex that has been locked by this task previously with any of the 556 * ww_mutex_lock* functions (with or without an acquire context). It is 557 * forbidden to release the locks after releasing the acquire context. 558 * 559 * This function must not be used in interrupt context. Unlocking 560 * of a unlocked mutex is not allowed. 561 */ 562 void __sched ww_mutex_unlock(struct ww_mutex *lock) 563 { 564 __ww_mutex_unlock(lock); 565 mutex_unlock(&lock->base); 566 } 567 EXPORT_SYMBOL(ww_mutex_unlock); 568 569 /* 570 * Lock a mutex (possibly interruptible), slowpath: 571 */ 572 static __always_inline int __sched 573 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, 574 struct lockdep_map *nest_lock, unsigned long ip, 575 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 576 { 577 struct mutex_waiter waiter; 578 struct ww_mutex *ww; 579 int ret; 580 581 if (!use_ww_ctx) 582 ww_ctx = NULL; 583 584 might_sleep(); 585 586 MUTEX_WARN_ON(lock->magic != lock); 587 588 ww = container_of(lock, struct ww_mutex, base); 589 if (ww_ctx) { 590 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 591 return -EALREADY; 592 593 /* 594 * Reset the wounded flag after a kill. No other process can 595 * race and wound us here since they can't have a valid owner 596 * pointer if we don't have any locks held. 597 */ 598 if (ww_ctx->acquired == 0) 599 ww_ctx->wounded = 0; 600 601 #ifdef CONFIG_DEBUG_LOCK_ALLOC 602 nest_lock = &ww_ctx->dep_map; 603 #endif 604 } 605 606 preempt_disable(); 607 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 608 609 if (__mutex_trylock(lock) || 610 mutex_optimistic_spin(lock, ww_ctx, NULL)) { 611 /* got the lock, yay! */ 612 lock_acquired(&lock->dep_map, ip); 613 if (ww_ctx) 614 ww_mutex_set_context_fastpath(ww, ww_ctx); 615 preempt_enable(); 616 return 0; 617 } 618 619 raw_spin_lock(&lock->wait_lock); 620 /* 621 * After waiting to acquire the wait_lock, try again. 622 */ 623 if (__mutex_trylock(lock)) { 624 if (ww_ctx) 625 __ww_mutex_check_waiters(lock, ww_ctx); 626 627 goto skip_wait; 628 } 629 630 debug_mutex_lock_common(lock, &waiter); 631 waiter.task = current; 632 if (use_ww_ctx) 633 waiter.ww_ctx = ww_ctx; 634 635 lock_contended(&lock->dep_map, ip); 636 637 if (!use_ww_ctx) { 638 /* add waiting tasks to the end of the waitqueue (FIFO): */ 639 __mutex_add_waiter(lock, &waiter, &lock->wait_list); 640 } else { 641 /* 642 * Add in stamp order, waking up waiters that must kill 643 * themselves. 644 */ 645 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 646 if (ret) 647 goto err_early_kill; 648 } 649 650 set_current_state(state); 651 for (;;) { 652 bool first; 653 654 /* 655 * Once we hold wait_lock, we're serialized against 656 * mutex_unlock() handing the lock off to us, do a trylock 657 * before testing the error conditions to make sure we pick up 658 * the handoff. 659 */ 660 if (__mutex_trylock(lock)) 661 goto acquired; 662 663 /* 664 * Check for signals and kill conditions while holding 665 * wait_lock. This ensures the lock cancellation is ordered 666 * against mutex_unlock() and wake-ups do not go missing. 667 */ 668 if (signal_pending_state(state, current)) { 669 ret = -EINTR; 670 goto err; 671 } 672 673 if (ww_ctx) { 674 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 675 if (ret) 676 goto err; 677 } 678 679 raw_spin_unlock(&lock->wait_lock); 680 schedule_preempt_disabled(); 681 682 first = __mutex_waiter_is_first(lock, &waiter); 683 684 set_current_state(state); 685 /* 686 * Here we order against unlock; we must either see it change 687 * state back to RUNNING and fall through the next schedule(), 688 * or we must see its unlock and acquire. 689 */ 690 if (__mutex_trylock_or_handoff(lock, first) || 691 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) 692 break; 693 694 raw_spin_lock(&lock->wait_lock); 695 } 696 raw_spin_lock(&lock->wait_lock); 697 acquired: 698 __set_current_state(TASK_RUNNING); 699 700 if (ww_ctx) { 701 /* 702 * Wound-Wait; we stole the lock (!first_waiter), check the 703 * waiters as anyone might want to wound us. 704 */ 705 if (!ww_ctx->is_wait_die && 706 !__mutex_waiter_is_first(lock, &waiter)) 707 __ww_mutex_check_waiters(lock, ww_ctx); 708 } 709 710 __mutex_remove_waiter(lock, &waiter); 711 712 debug_mutex_free_waiter(&waiter); 713 714 skip_wait: 715 /* got the lock - cleanup and rejoice! */ 716 lock_acquired(&lock->dep_map, ip); 717 718 if (ww_ctx) 719 ww_mutex_lock_acquired(ww, ww_ctx); 720 721 raw_spin_unlock(&lock->wait_lock); 722 preempt_enable(); 723 return 0; 724 725 err: 726 __set_current_state(TASK_RUNNING); 727 __mutex_remove_waiter(lock, &waiter); 728 err_early_kill: 729 raw_spin_unlock(&lock->wait_lock); 730 debug_mutex_free_waiter(&waiter); 731 mutex_release(&lock->dep_map, ip); 732 preempt_enable(); 733 return ret; 734 } 735 736 static int __sched 737 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 738 struct lockdep_map *nest_lock, unsigned long ip) 739 { 740 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 741 } 742 743 static int __sched 744 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, 745 unsigned long ip, struct ww_acquire_ctx *ww_ctx) 746 { 747 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); 748 } 749 750 /** 751 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context 752 * @ww: mutex to lock 753 * @ww_ctx: optional w/w acquire context 754 * 755 * Trylocks a mutex with the optional acquire context; no deadlock detection is 756 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. 757 * 758 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is 759 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock. 760 * 761 * A mutex acquired with this function must be released with ww_mutex_unlock. 762 */ 763 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 764 { 765 if (!ww_ctx) 766 return mutex_trylock(&ww->base); 767 768 MUTEX_WARN_ON(ww->base.magic != &ww->base); 769 770 /* 771 * Reset the wounded flag after a kill. No other process can 772 * race and wound us here, since they can't have a valid owner 773 * pointer if we don't have any locks held. 774 */ 775 if (ww_ctx->acquired == 0) 776 ww_ctx->wounded = 0; 777 778 if (__mutex_trylock(&ww->base)) { 779 ww_mutex_set_context_fastpath(ww, ww_ctx); 780 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); 781 return 1; 782 } 783 784 return 0; 785 } 786 EXPORT_SYMBOL(ww_mutex_trylock); 787 788 #ifdef CONFIG_DEBUG_LOCK_ALLOC 789 void __sched 790 mutex_lock_nested(struct mutex *lock, unsigned int subclass) 791 { 792 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 793 } 794 795 EXPORT_SYMBOL_GPL(mutex_lock_nested); 796 797 void __sched 798 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 799 { 800 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 801 } 802 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 803 804 int __sched 805 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 806 { 807 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 808 } 809 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 810 811 int __sched 812 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 813 { 814 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 815 } 816 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 817 818 void __sched 819 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 820 { 821 int token; 822 823 might_sleep(); 824 825 token = io_schedule_prepare(); 826 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 827 subclass, NULL, _RET_IP_, NULL, 0); 828 io_schedule_finish(token); 829 } 830 EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 831 832 static inline int 833 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 834 { 835 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 836 unsigned tmp; 837 838 if (ctx->deadlock_inject_countdown-- == 0) { 839 tmp = ctx->deadlock_inject_interval; 840 if (tmp > UINT_MAX/4) 841 tmp = UINT_MAX; 842 else 843 tmp = tmp*2 + tmp + tmp/2; 844 845 ctx->deadlock_inject_interval = tmp; 846 ctx->deadlock_inject_countdown = tmp; 847 ctx->contending_lock = lock; 848 849 ww_mutex_unlock(lock); 850 851 return -EDEADLK; 852 } 853 #endif 854 855 return 0; 856 } 857 858 int __sched 859 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 860 { 861 int ret; 862 863 might_sleep(); 864 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 865 0, _RET_IP_, ctx); 866 if (!ret && ctx && ctx->acquired > 1) 867 return ww_mutex_deadlock_injection(lock, ctx); 868 869 return ret; 870 } 871 EXPORT_SYMBOL_GPL(ww_mutex_lock); 872 873 int __sched 874 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 875 { 876 int ret; 877 878 might_sleep(); 879 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 880 0, _RET_IP_, ctx); 881 882 if (!ret && ctx && ctx->acquired > 1) 883 return ww_mutex_deadlock_injection(lock, ctx); 884 885 return ret; 886 } 887 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 888 889 #endif 890 891 /* 892 * Release the lock, slowpath: 893 */ 894 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 895 { 896 struct task_struct *next = NULL; 897 DEFINE_WAKE_Q(wake_q); 898 unsigned long owner; 899 900 mutex_release(&lock->dep_map, ip); 901 902 /* 903 * Release the lock before (potentially) taking the spinlock such that 904 * other contenders can get on with things ASAP. 905 * 906 * Except when HANDOFF, in that case we must not clear the owner field, 907 * but instead set it to the top waiter. 908 */ 909 owner = atomic_long_read(&lock->owner); 910 for (;;) { 911 MUTEX_WARN_ON(__owner_task(owner) != current); 912 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); 913 914 if (owner & MUTEX_FLAG_HANDOFF) 915 break; 916 917 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { 918 if (owner & MUTEX_FLAG_WAITERS) 919 break; 920 921 return; 922 } 923 } 924 925 raw_spin_lock(&lock->wait_lock); 926 debug_mutex_unlock(lock); 927 if (!list_empty(&lock->wait_list)) { 928 /* get the first entry from the wait-list: */ 929 struct mutex_waiter *waiter = 930 list_first_entry(&lock->wait_list, 931 struct mutex_waiter, list); 932 933 next = waiter->task; 934 935 debug_mutex_wake_waiter(lock, waiter); 936 wake_q_add(&wake_q, next); 937 } 938 939 if (owner & MUTEX_FLAG_HANDOFF) 940 __mutex_handoff(lock, next); 941 942 raw_spin_unlock(&lock->wait_lock); 943 944 wake_up_q(&wake_q); 945 } 946 947 #ifndef CONFIG_DEBUG_LOCK_ALLOC 948 /* 949 * Here come the less common (and hence less performance-critical) APIs: 950 * mutex_lock_interruptible() and mutex_trylock(). 951 */ 952 static noinline int __sched 953 __mutex_lock_killable_slowpath(struct mutex *lock); 954 955 static noinline int __sched 956 __mutex_lock_interruptible_slowpath(struct mutex *lock); 957 958 /** 959 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 960 * @lock: The mutex to be acquired. 961 * 962 * Lock the mutex like mutex_lock(). If a signal is delivered while the 963 * process is sleeping, this function will return without acquiring the 964 * mutex. 965 * 966 * Context: Process context. 967 * Return: 0 if the lock was successfully acquired or %-EINTR if a 968 * signal arrived. 969 */ 970 int __sched mutex_lock_interruptible(struct mutex *lock) 971 { 972 might_sleep(); 973 974 if (__mutex_trylock_fast(lock)) 975 return 0; 976 977 return __mutex_lock_interruptible_slowpath(lock); 978 } 979 980 EXPORT_SYMBOL(mutex_lock_interruptible); 981 982 /** 983 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 984 * @lock: The mutex to be acquired. 985 * 986 * Lock the mutex like mutex_lock(). If a signal which will be fatal to 987 * the current process is delivered while the process is sleeping, this 988 * function will return without acquiring the mutex. 989 * 990 * Context: Process context. 991 * Return: 0 if the lock was successfully acquired or %-EINTR if a 992 * fatal signal arrived. 993 */ 994 int __sched mutex_lock_killable(struct mutex *lock) 995 { 996 might_sleep(); 997 998 if (__mutex_trylock_fast(lock)) 999 return 0; 1000 1001 return __mutex_lock_killable_slowpath(lock); 1002 } 1003 EXPORT_SYMBOL(mutex_lock_killable); 1004 1005 /** 1006 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 1007 * @lock: The mutex to be acquired. 1008 * 1009 * Lock the mutex like mutex_lock(). While the task is waiting for this 1010 * mutex, it will be accounted as being in the IO wait state by the 1011 * scheduler. 1012 * 1013 * Context: Process context. 1014 */ 1015 void __sched mutex_lock_io(struct mutex *lock) 1016 { 1017 int token; 1018 1019 token = io_schedule_prepare(); 1020 mutex_lock(lock); 1021 io_schedule_finish(token); 1022 } 1023 EXPORT_SYMBOL_GPL(mutex_lock_io); 1024 1025 static noinline void __sched 1026 __mutex_lock_slowpath(struct mutex *lock) 1027 { 1028 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 1029 } 1030 1031 static noinline int __sched 1032 __mutex_lock_killable_slowpath(struct mutex *lock) 1033 { 1034 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 1035 } 1036 1037 static noinline int __sched 1038 __mutex_lock_interruptible_slowpath(struct mutex *lock) 1039 { 1040 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 1041 } 1042 1043 static noinline int __sched 1044 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1045 { 1046 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, 1047 _RET_IP_, ctx); 1048 } 1049 1050 static noinline int __sched 1051 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 1052 struct ww_acquire_ctx *ctx) 1053 { 1054 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, 1055 _RET_IP_, ctx); 1056 } 1057 1058 #endif 1059 1060 /** 1061 * mutex_trylock - try to acquire the mutex, without waiting 1062 * @lock: the mutex to be acquired 1063 * 1064 * Try to acquire the mutex atomically. Returns 1 if the mutex 1065 * has been acquired successfully, and 0 on contention. 1066 * 1067 * NOTE: this function follows the spin_trylock() convention, so 1068 * it is negated from the down_trylock() return values! Be careful 1069 * about this when converting semaphore users to mutexes. 1070 * 1071 * This function must not be used in interrupt context. The 1072 * mutex must be released by the same task that acquired it. 1073 */ 1074 int __sched mutex_trylock(struct mutex *lock) 1075 { 1076 bool locked; 1077 1078 MUTEX_WARN_ON(lock->magic != lock); 1079 1080 locked = __mutex_trylock(lock); 1081 if (locked) 1082 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 1083 1084 return locked; 1085 } 1086 EXPORT_SYMBOL(mutex_trylock); 1087 1088 #ifndef CONFIG_DEBUG_LOCK_ALLOC 1089 int __sched 1090 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1091 { 1092 might_sleep(); 1093 1094 if (__mutex_trylock_fast(&lock->base)) { 1095 if (ctx) 1096 ww_mutex_set_context_fastpath(lock, ctx); 1097 return 0; 1098 } 1099 1100 return __ww_mutex_lock_slowpath(lock, ctx); 1101 } 1102 EXPORT_SYMBOL(ww_mutex_lock); 1103 1104 int __sched 1105 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 1106 { 1107 might_sleep(); 1108 1109 if (__mutex_trylock_fast(&lock->base)) { 1110 if (ctx) 1111 ww_mutex_set_context_fastpath(lock, ctx); 1112 return 0; 1113 } 1114 1115 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 1116 } 1117 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 1118 1119 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 1120 #endif /* !CONFIG_PREEMPT_RT */ 1121 1122 /** 1123 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 1124 * @cnt: the atomic which we are to dec 1125 * @lock: the mutex to return holding if we dec to 0 1126 * 1127 * return true and hold lock if we dec to 0, return false otherwise 1128 */ 1129 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 1130 { 1131 /* dec if we can't possibly hit 0 */ 1132 if (atomic_add_unless(cnt, -1, 1)) 1133 return 0; 1134 /* we might hit 0, so take the lock */ 1135 mutex_lock(lock); 1136 if (!atomic_dec_and_test(cnt)) { 1137 /* when we actually did the dec, we didn't hit 0 */ 1138 mutex_unlock(lock); 1139 return 0; 1140 } 1141 /* we hit 0, and we hold the lock */ 1142 return 1; 1143 } 1144 EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1145