101768b42SPeter Zijlstra /* 267a6de49SPeter Zijlstra * kernel/locking/mutex.c 301768b42SPeter Zijlstra * 401768b42SPeter Zijlstra * Mutexes: blocking mutual exclusion locks 501768b42SPeter Zijlstra * 601768b42SPeter Zijlstra * Started by Ingo Molnar: 701768b42SPeter Zijlstra * 801768b42SPeter Zijlstra * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 901768b42SPeter Zijlstra * 1001768b42SPeter Zijlstra * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 1101768b42SPeter Zijlstra * David Howells for suggestions and improvements. 1201768b42SPeter Zijlstra * 1301768b42SPeter Zijlstra * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 1401768b42SPeter Zijlstra * from the -rt tree, where it was originally implemented for rtmutexes 1501768b42SPeter Zijlstra * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 1601768b42SPeter Zijlstra * and Sven Dietrich. 1701768b42SPeter Zijlstra * 18214e0aedSDavidlohr Bueso * Also see Documentation/locking/mutex-design.txt. 1901768b42SPeter Zijlstra */ 2001768b42SPeter Zijlstra #include <linux/mutex.h> 2101768b42SPeter Zijlstra #include <linux/ww_mutex.h> 2201768b42SPeter Zijlstra #include <linux/sched.h> 2301768b42SPeter Zijlstra #include <linux/sched/rt.h> 2401768b42SPeter Zijlstra #include <linux/export.h> 2501768b42SPeter Zijlstra #include <linux/spinlock.h> 2601768b42SPeter Zijlstra #include <linux/interrupt.h> 2701768b42SPeter Zijlstra #include <linux/debug_locks.h> 287a215f89SDavidlohr Bueso #include <linux/osq_lock.h> 2901768b42SPeter Zijlstra 3001768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 3101768b42SPeter Zijlstra # include "mutex-debug.h" 3201768b42SPeter Zijlstra #else 3301768b42SPeter Zijlstra # include "mutex.h" 3401768b42SPeter Zijlstra #endif 3501768b42SPeter Zijlstra 3601768b42SPeter Zijlstra void 3701768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 3801768b42SPeter Zijlstra { 393ca0ff57SPeter Zijlstra atomic_long_set(&lock->owner, 0); 4001768b42SPeter Zijlstra spin_lock_init(&lock->wait_lock); 4101768b42SPeter Zijlstra INIT_LIST_HEAD(&lock->wait_list); 4201768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 434d9d951eSJason Low osq_lock_init(&lock->osq); 4401768b42SPeter Zijlstra #endif 4501768b42SPeter Zijlstra 4601768b42SPeter Zijlstra debug_mutex_init(lock, name, key); 4701768b42SPeter Zijlstra } 4801768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init); 4901768b42SPeter Zijlstra 503ca0ff57SPeter Zijlstra /* 513ca0ff57SPeter Zijlstra * @owner: contains: 'struct task_struct *' to the current lock owner, 523ca0ff57SPeter Zijlstra * NULL means not owned. Since task_struct pointers are aligned at 533ca0ff57SPeter Zijlstra * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low 543ca0ff57SPeter Zijlstra * bits to store extra state. 553ca0ff57SPeter Zijlstra * 563ca0ff57SPeter Zijlstra * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 579d659ae1SPeter Zijlstra * Bit1 indicates unlock needs to hand the lock to the top-waiter 583ca0ff57SPeter Zijlstra */ 593ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS 0x01 609d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF 0x02 613ca0ff57SPeter Zijlstra 623ca0ff57SPeter Zijlstra #define MUTEX_FLAGS 0x03 633ca0ff57SPeter Zijlstra 643ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner) 653ca0ff57SPeter Zijlstra { 663ca0ff57SPeter Zijlstra return (struct task_struct *)(owner & ~MUTEX_FLAGS); 673ca0ff57SPeter Zijlstra } 683ca0ff57SPeter Zijlstra 693ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner) 703ca0ff57SPeter Zijlstra { 713ca0ff57SPeter Zijlstra return owner & MUTEX_FLAGS; 723ca0ff57SPeter Zijlstra } 733ca0ff57SPeter Zijlstra 743ca0ff57SPeter Zijlstra /* 753ca0ff57SPeter Zijlstra * Actual trylock that will work on any unlocked state. 769d659ae1SPeter Zijlstra * 779d659ae1SPeter Zijlstra * When setting the owner field, we must preserve the low flag bits. 789d659ae1SPeter Zijlstra * 799d659ae1SPeter Zijlstra * Be careful with @handoff, only set that in a wait-loop (where you set 809d659ae1SPeter Zijlstra * HANDOFF) to avoid recursive lock attempts. 813ca0ff57SPeter Zijlstra */ 829d659ae1SPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock, const bool handoff) 833ca0ff57SPeter Zijlstra { 843ca0ff57SPeter Zijlstra unsigned long owner, curr = (unsigned long)current; 853ca0ff57SPeter Zijlstra 863ca0ff57SPeter Zijlstra owner = atomic_long_read(&lock->owner); 873ca0ff57SPeter Zijlstra for (;;) { /* must loop, can race against a flag */ 889d659ae1SPeter Zijlstra unsigned long old, flags = __owner_flags(owner); 893ca0ff57SPeter Zijlstra 909d659ae1SPeter Zijlstra if (__owner_task(owner)) { 919d659ae1SPeter Zijlstra if (handoff && unlikely(__owner_task(owner) == current)) { 929d659ae1SPeter Zijlstra /* 939d659ae1SPeter Zijlstra * Provide ACQUIRE semantics for the lock-handoff. 949d659ae1SPeter Zijlstra * 959d659ae1SPeter Zijlstra * We cannot easily use load-acquire here, since 969d659ae1SPeter Zijlstra * the actual load is a failed cmpxchg, which 979d659ae1SPeter Zijlstra * doesn't imply any barriers. 989d659ae1SPeter Zijlstra * 999d659ae1SPeter Zijlstra * Also, this is a fairly unlikely scenario, and 1009d659ae1SPeter Zijlstra * this contains the cost. 1019d659ae1SPeter Zijlstra */ 1029d659ae1SPeter Zijlstra smp_mb(); /* ACQUIRE */ 1039d659ae1SPeter Zijlstra return true; 1049d659ae1SPeter Zijlstra } 1059d659ae1SPeter Zijlstra 1063ca0ff57SPeter Zijlstra return false; 1079d659ae1SPeter Zijlstra } 1083ca0ff57SPeter Zijlstra 1099d659ae1SPeter Zijlstra /* 1109d659ae1SPeter Zijlstra * We set the HANDOFF bit, we must make sure it doesn't live 1119d659ae1SPeter Zijlstra * past the point where we acquire it. This would be possible 1129d659ae1SPeter Zijlstra * if we (accidentally) set the bit on an unlocked mutex. 1139d659ae1SPeter Zijlstra */ 1149d659ae1SPeter Zijlstra if (handoff) 1159d659ae1SPeter Zijlstra flags &= ~MUTEX_FLAG_HANDOFF; 1169d659ae1SPeter Zijlstra 1179d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 1183ca0ff57SPeter Zijlstra if (old == owner) 1193ca0ff57SPeter Zijlstra return true; 1203ca0ff57SPeter Zijlstra 1213ca0ff57SPeter Zijlstra owner = old; 1223ca0ff57SPeter Zijlstra } 1233ca0ff57SPeter Zijlstra } 1243ca0ff57SPeter Zijlstra 1253ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 1263ca0ff57SPeter Zijlstra /* 1273ca0ff57SPeter Zijlstra * Lockdep annotations are contained to the slow paths for simplicity. 1283ca0ff57SPeter Zijlstra * There is nothing that would stop spreading the lockdep annotations outwards 1293ca0ff57SPeter Zijlstra * except more code. 1303ca0ff57SPeter Zijlstra */ 1313ca0ff57SPeter Zijlstra 1323ca0ff57SPeter Zijlstra /* 1333ca0ff57SPeter Zijlstra * Optimistic trylock that only works in the uncontended case. Make sure to 1343ca0ff57SPeter Zijlstra * follow with a __mutex_trylock() before failing. 1353ca0ff57SPeter Zijlstra */ 1363ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 1373ca0ff57SPeter Zijlstra { 1383ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 1393ca0ff57SPeter Zijlstra 1403ca0ff57SPeter Zijlstra if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr)) 1413ca0ff57SPeter Zijlstra return true; 1423ca0ff57SPeter Zijlstra 1433ca0ff57SPeter Zijlstra return false; 1443ca0ff57SPeter Zijlstra } 1453ca0ff57SPeter Zijlstra 1463ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 1473ca0ff57SPeter Zijlstra { 1483ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 1493ca0ff57SPeter Zijlstra 1503ca0ff57SPeter Zijlstra if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 1513ca0ff57SPeter Zijlstra return true; 1523ca0ff57SPeter Zijlstra 1533ca0ff57SPeter Zijlstra return false; 1543ca0ff57SPeter Zijlstra } 1553ca0ff57SPeter Zijlstra #endif 1563ca0ff57SPeter Zijlstra 1573ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 1583ca0ff57SPeter Zijlstra { 1593ca0ff57SPeter Zijlstra atomic_long_or(flag, &lock->owner); 1603ca0ff57SPeter Zijlstra } 1613ca0ff57SPeter Zijlstra 1623ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 1633ca0ff57SPeter Zijlstra { 1643ca0ff57SPeter Zijlstra atomic_long_andnot(flag, &lock->owner); 1653ca0ff57SPeter Zijlstra } 1663ca0ff57SPeter Zijlstra 1679d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 1689d659ae1SPeter Zijlstra { 1699d659ae1SPeter Zijlstra return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 1709d659ae1SPeter Zijlstra } 1719d659ae1SPeter Zijlstra 1729d659ae1SPeter Zijlstra /* 1739d659ae1SPeter Zijlstra * Give up ownership to a specific task, when @task = NULL, this is equivalent 1749d659ae1SPeter Zijlstra * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE 1759d659ae1SPeter Zijlstra * semantics like a regular unlock, the __mutex_trylock() provides matching 1769d659ae1SPeter Zijlstra * ACQUIRE semantics for the handoff. 1779d659ae1SPeter Zijlstra */ 1789d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 1799d659ae1SPeter Zijlstra { 1809d659ae1SPeter Zijlstra unsigned long owner = atomic_long_read(&lock->owner); 1819d659ae1SPeter Zijlstra 1829d659ae1SPeter Zijlstra for (;;) { 1839d659ae1SPeter Zijlstra unsigned long old, new; 1849d659ae1SPeter Zijlstra 1859d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 1869d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1879d659ae1SPeter Zijlstra #endif 1889d659ae1SPeter Zijlstra 1899d659ae1SPeter Zijlstra new = (owner & MUTEX_FLAG_WAITERS); 1909d659ae1SPeter Zijlstra new |= (unsigned long)task; 1919d659ae1SPeter Zijlstra 1929d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 1939d659ae1SPeter Zijlstra if (old == owner) 1949d659ae1SPeter Zijlstra break; 1959d659ae1SPeter Zijlstra 1969d659ae1SPeter Zijlstra owner = old; 1979d659ae1SPeter Zijlstra } 1989d659ae1SPeter Zijlstra } 1999d659ae1SPeter Zijlstra 20001768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 20101768b42SPeter Zijlstra /* 20201768b42SPeter Zijlstra * We split the mutex lock/unlock logic into separate fastpath and 20301768b42SPeter Zijlstra * slowpath functions, to reduce the register pressure on the fastpath. 20401768b42SPeter Zijlstra * We also put the fastpath first in the kernel image, to make sure the 20501768b42SPeter Zijlstra * branch is predicted by the CPU as default-untaken. 20601768b42SPeter Zijlstra */ 2073ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock); 20801768b42SPeter Zijlstra 20901768b42SPeter Zijlstra /** 21001768b42SPeter Zijlstra * mutex_lock - acquire the mutex 21101768b42SPeter Zijlstra * @lock: the mutex to be acquired 21201768b42SPeter Zijlstra * 21301768b42SPeter Zijlstra * Lock the mutex exclusively for this task. If the mutex is not 21401768b42SPeter Zijlstra * available right now, it will sleep until it can get it. 21501768b42SPeter Zijlstra * 21601768b42SPeter Zijlstra * The mutex must later on be released by the same task that 21701768b42SPeter Zijlstra * acquired it. Recursive locking is not allowed. The task 21801768b42SPeter Zijlstra * may not exit without first unlocking the mutex. Also, kernel 219139b6fd2SSharon Dvir * memory where the mutex resides must not be freed with 22001768b42SPeter Zijlstra * the mutex still locked. The mutex must first be initialized 22101768b42SPeter Zijlstra * (or statically defined) before it can be locked. memset()-ing 22201768b42SPeter Zijlstra * the mutex to 0 is not allowed. 22301768b42SPeter Zijlstra * 22401768b42SPeter Zijlstra * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging 22501768b42SPeter Zijlstra * checks that will enforce the restrictions and will also do 22601768b42SPeter Zijlstra * deadlock debugging. ) 22701768b42SPeter Zijlstra * 22801768b42SPeter Zijlstra * This function is similar to (but not equivalent to) down(). 22901768b42SPeter Zijlstra */ 23001768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock) 23101768b42SPeter Zijlstra { 23201768b42SPeter Zijlstra might_sleep(); 23301768b42SPeter Zijlstra 2343ca0ff57SPeter Zijlstra if (!__mutex_trylock_fast(lock)) 2353ca0ff57SPeter Zijlstra __mutex_lock_slowpath(lock); 2363ca0ff57SPeter Zijlstra } 23701768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock); 23801768b42SPeter Zijlstra #endif 23901768b42SPeter Zijlstra 24076916515SDavidlohr Bueso static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, 24176916515SDavidlohr Bueso struct ww_acquire_ctx *ww_ctx) 24276916515SDavidlohr Bueso { 24376916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES 24476916515SDavidlohr Bueso /* 24576916515SDavidlohr Bueso * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 24676916515SDavidlohr Bueso * but released with a normal mutex_unlock in this call. 24776916515SDavidlohr Bueso * 24876916515SDavidlohr Bueso * This should never happen, always use ww_mutex_unlock. 24976916515SDavidlohr Bueso */ 25076916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww->ctx); 25176916515SDavidlohr Bueso 25276916515SDavidlohr Bueso /* 25376916515SDavidlohr Bueso * Not quite done after calling ww_acquire_done() ? 25476916515SDavidlohr Bueso */ 25576916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 25676916515SDavidlohr Bueso 25776916515SDavidlohr Bueso if (ww_ctx->contending_lock) { 25876916515SDavidlohr Bueso /* 25976916515SDavidlohr Bueso * After -EDEADLK you tried to 26076916515SDavidlohr Bueso * acquire a different ww_mutex? Bad! 26176916515SDavidlohr Bueso */ 26276916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 26376916515SDavidlohr Bueso 26476916515SDavidlohr Bueso /* 26576916515SDavidlohr Bueso * You called ww_mutex_lock after receiving -EDEADLK, 26676916515SDavidlohr Bueso * but 'forgot' to unlock everything else first? 26776916515SDavidlohr Bueso */ 26876916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 26976916515SDavidlohr Bueso ww_ctx->contending_lock = NULL; 27076916515SDavidlohr Bueso } 27176916515SDavidlohr Bueso 27276916515SDavidlohr Bueso /* 27376916515SDavidlohr Bueso * Naughty, using a different class will lead to undefined behavior! 27476916515SDavidlohr Bueso */ 27576916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 27676916515SDavidlohr Bueso #endif 27776916515SDavidlohr Bueso ww_ctx->acquired++; 27876916515SDavidlohr Bueso } 27976916515SDavidlohr Bueso 28076916515SDavidlohr Bueso /* 2814bd19084SDavidlohr Bueso * After acquiring lock with fastpath or when we lost out in contested 28276916515SDavidlohr Bueso * slowpath, set ctx and wake up any waiters so they can recheck. 28376916515SDavidlohr Bueso */ 28476916515SDavidlohr Bueso static __always_inline void 28576916515SDavidlohr Bueso ww_mutex_set_context_fastpath(struct ww_mutex *lock, 28676916515SDavidlohr Bueso struct ww_acquire_ctx *ctx) 28776916515SDavidlohr Bueso { 28876916515SDavidlohr Bueso unsigned long flags; 28976916515SDavidlohr Bueso struct mutex_waiter *cur; 29076916515SDavidlohr Bueso 29176916515SDavidlohr Bueso ww_mutex_lock_acquired(lock, ctx); 29276916515SDavidlohr Bueso 29376916515SDavidlohr Bueso lock->ctx = ctx; 29476916515SDavidlohr Bueso 29576916515SDavidlohr Bueso /* 29676916515SDavidlohr Bueso * The lock->ctx update should be visible on all cores before 29776916515SDavidlohr Bueso * the atomic read is done, otherwise contended waiters might be 29876916515SDavidlohr Bueso * missed. The contended waiters will either see ww_ctx == NULL 29976916515SDavidlohr Bueso * and keep spinning, or it will acquire wait_lock, add itself 30076916515SDavidlohr Bueso * to waiter list and sleep. 30176916515SDavidlohr Bueso */ 30276916515SDavidlohr Bueso smp_mb(); /* ^^^ */ 30376916515SDavidlohr Bueso 30476916515SDavidlohr Bueso /* 30576916515SDavidlohr Bueso * Check if lock is contended, if not there is nobody to wake up 30676916515SDavidlohr Bueso */ 3073ca0ff57SPeter Zijlstra if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 30876916515SDavidlohr Bueso return; 30976916515SDavidlohr Bueso 31076916515SDavidlohr Bueso /* 31176916515SDavidlohr Bueso * Uh oh, we raced in fastpath, wake up everyone in this case, 31276916515SDavidlohr Bueso * so they can see the new lock->ctx. 31376916515SDavidlohr Bueso */ 31476916515SDavidlohr Bueso spin_lock_mutex(&lock->base.wait_lock, flags); 31576916515SDavidlohr Bueso list_for_each_entry(cur, &lock->base.wait_list, list) { 31676916515SDavidlohr Bueso debug_mutex_wake_waiter(&lock->base, cur); 31776916515SDavidlohr Bueso wake_up_process(cur->task); 31876916515SDavidlohr Bueso } 31976916515SDavidlohr Bueso spin_unlock_mutex(&lock->base.wait_lock, flags); 32076916515SDavidlohr Bueso } 32176916515SDavidlohr Bueso 3224bd19084SDavidlohr Bueso /* 3234bd19084SDavidlohr Bueso * After acquiring lock in the slowpath set ctx and wake up any 3244bd19084SDavidlohr Bueso * waiters so they can recheck. 3254bd19084SDavidlohr Bueso * 3264bd19084SDavidlohr Bueso * Callers must hold the mutex wait_lock. 3274bd19084SDavidlohr Bueso */ 3284bd19084SDavidlohr Bueso static __always_inline void 3294bd19084SDavidlohr Bueso ww_mutex_set_context_slowpath(struct ww_mutex *lock, 3304bd19084SDavidlohr Bueso struct ww_acquire_ctx *ctx) 3314bd19084SDavidlohr Bueso { 3324bd19084SDavidlohr Bueso struct mutex_waiter *cur; 3334bd19084SDavidlohr Bueso 3344bd19084SDavidlohr Bueso ww_mutex_lock_acquired(lock, ctx); 3354bd19084SDavidlohr Bueso lock->ctx = ctx; 3364bd19084SDavidlohr Bueso 3374bd19084SDavidlohr Bueso /* 3384bd19084SDavidlohr Bueso * Give any possible sleeping processes the chance to wake up, 3394bd19084SDavidlohr Bueso * so they can recheck if they have to back off. 3404bd19084SDavidlohr Bueso */ 3414bd19084SDavidlohr Bueso list_for_each_entry(cur, &lock->base.wait_list, list) { 3424bd19084SDavidlohr Bueso debug_mutex_wake_waiter(&lock->base, cur); 3434bd19084SDavidlohr Bueso wake_up_process(cur->task); 3444bd19084SDavidlohr Bueso } 3454bd19084SDavidlohr Bueso } 34676916515SDavidlohr Bueso 34701768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 34801768b42SPeter Zijlstra /* 34901768b42SPeter Zijlstra * Look out! "owner" is an entirely speculative pointer 35001768b42SPeter Zijlstra * access and not reliable. 35101768b42SPeter Zijlstra */ 35201768b42SPeter Zijlstra static noinline 353be1f7bf2SJason Low bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) 35401768b42SPeter Zijlstra { 35501ac33c1SJason Low bool ret = true; 356be1f7bf2SJason Low 35701768b42SPeter Zijlstra rcu_read_lock(); 3583ca0ff57SPeter Zijlstra while (__mutex_owner(lock) == owner) { 359be1f7bf2SJason Low /* 360be1f7bf2SJason Low * Ensure we emit the owner->on_cpu, dereference _after_ 36101ac33c1SJason Low * checking lock->owner still matches owner. If that fails, 36201ac33c1SJason Low * owner might point to freed memory. If it still matches, 363be1f7bf2SJason Low * the rcu_read_lock() ensures the memory stays valid. 364be1f7bf2SJason Low */ 365be1f7bf2SJason Low barrier(); 366be1f7bf2SJason Low 367be1f7bf2SJason Low if (!owner->on_cpu || need_resched()) { 368be1f7bf2SJason Low ret = false; 369be1f7bf2SJason Low break; 370be1f7bf2SJason Low } 37101768b42SPeter Zijlstra 372f2f09a4cSChristian Borntraeger cpu_relax(); 37301768b42SPeter Zijlstra } 37401768b42SPeter Zijlstra rcu_read_unlock(); 37501768b42SPeter Zijlstra 376be1f7bf2SJason Low return ret; 37701768b42SPeter Zijlstra } 37801768b42SPeter Zijlstra 37901768b42SPeter Zijlstra /* 38001768b42SPeter Zijlstra * Initial check for entering the mutex spinning loop 38101768b42SPeter Zijlstra */ 38201768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock) 38301768b42SPeter Zijlstra { 38401768b42SPeter Zijlstra struct task_struct *owner; 38501768b42SPeter Zijlstra int retval = 1; 38601768b42SPeter Zijlstra 38746af29e4SJason Low if (need_resched()) 38846af29e4SJason Low return 0; 38946af29e4SJason Low 39001768b42SPeter Zijlstra rcu_read_lock(); 3913ca0ff57SPeter Zijlstra owner = __mutex_owner(lock); 39201768b42SPeter Zijlstra if (owner) 39301768b42SPeter Zijlstra retval = owner->on_cpu; 39401768b42SPeter Zijlstra rcu_read_unlock(); 39576916515SDavidlohr Bueso 39676916515SDavidlohr Bueso /* 3973ca0ff57SPeter Zijlstra * If lock->owner is not set, the mutex has been released. Return true 3983ca0ff57SPeter Zijlstra * such that we'll trylock in the spin path, which is a faster option 3993ca0ff57SPeter Zijlstra * than the blocking slow path. 40076916515SDavidlohr Bueso */ 4013ca0ff57SPeter Zijlstra return retval; 40276916515SDavidlohr Bueso } 40376916515SDavidlohr Bueso 40476916515SDavidlohr Bueso /* 40576916515SDavidlohr Bueso * Optimistic spinning. 40676916515SDavidlohr Bueso * 40776916515SDavidlohr Bueso * We try to spin for acquisition when we find that the lock owner 40876916515SDavidlohr Bueso * is currently running on a (different) CPU and while we don't 40976916515SDavidlohr Bueso * need to reschedule. The rationale is that if the lock owner is 41076916515SDavidlohr Bueso * running, it is likely to release the lock soon. 41176916515SDavidlohr Bueso * 41276916515SDavidlohr Bueso * The mutex spinners are queued up using MCS lock so that only one 41376916515SDavidlohr Bueso * spinner can compete for the mutex. However, if mutex spinning isn't 41476916515SDavidlohr Bueso * going to happen, there is no point in going through the lock/unlock 41576916515SDavidlohr Bueso * overhead. 41676916515SDavidlohr Bueso * 41776916515SDavidlohr Bueso * Returns true when the lock was taken, otherwise false, indicating 41876916515SDavidlohr Bueso * that we need to jump to the slowpath and sleep. 419b341afb3SWaiman Long * 420b341afb3SWaiman Long * The waiter flag is set to true if the spinner is a waiter in the wait 421b341afb3SWaiman Long * queue. The waiter-spinner will spin on the lock directly and concurrently 422b341afb3SWaiman Long * with the spinner at the head of the OSQ, if present, until the owner is 423b341afb3SWaiman Long * changed to itself. 42476916515SDavidlohr Bueso */ 42576916515SDavidlohr Bueso static bool mutex_optimistic_spin(struct mutex *lock, 426b341afb3SWaiman Long struct ww_acquire_ctx *ww_ctx, 427b341afb3SWaiman Long const bool use_ww_ctx, const bool waiter) 42876916515SDavidlohr Bueso { 42976916515SDavidlohr Bueso struct task_struct *task = current; 43076916515SDavidlohr Bueso 431b341afb3SWaiman Long if (!waiter) { 432b341afb3SWaiman Long /* 433b341afb3SWaiman Long * The purpose of the mutex_can_spin_on_owner() function is 434b341afb3SWaiman Long * to eliminate the overhead of osq_lock() and osq_unlock() 435b341afb3SWaiman Long * in case spinning isn't possible. As a waiter-spinner 436b341afb3SWaiman Long * is not going to take OSQ lock anyway, there is no need 437b341afb3SWaiman Long * to call mutex_can_spin_on_owner(). 438b341afb3SWaiman Long */ 43976916515SDavidlohr Bueso if (!mutex_can_spin_on_owner(lock)) 440b341afb3SWaiman Long goto fail; 44176916515SDavidlohr Bueso 442e42f678aSDavidlohr Bueso /* 443e42f678aSDavidlohr Bueso * In order to avoid a stampede of mutex spinners trying to 444e42f678aSDavidlohr Bueso * acquire the mutex all at once, the spinners need to take a 445e42f678aSDavidlohr Bueso * MCS (queued) lock first before spinning on the owner field. 446e42f678aSDavidlohr Bueso */ 44776916515SDavidlohr Bueso if (!osq_lock(&lock->osq)) 448b341afb3SWaiman Long goto fail; 449b341afb3SWaiman Long } 45076916515SDavidlohr Bueso 451b341afb3SWaiman Long for (;;) { 45276916515SDavidlohr Bueso struct task_struct *owner; 45376916515SDavidlohr Bueso 45476916515SDavidlohr Bueso if (use_ww_ctx && ww_ctx->acquired > 0) { 45576916515SDavidlohr Bueso struct ww_mutex *ww; 45676916515SDavidlohr Bueso 45776916515SDavidlohr Bueso ww = container_of(lock, struct ww_mutex, base); 45876916515SDavidlohr Bueso /* 45976916515SDavidlohr Bueso * If ww->ctx is set the contents are undefined, only 46076916515SDavidlohr Bueso * by acquiring wait_lock there is a guarantee that 46176916515SDavidlohr Bueso * they are not invalid when reading. 46276916515SDavidlohr Bueso * 46376916515SDavidlohr Bueso * As such, when deadlock detection needs to be 46476916515SDavidlohr Bueso * performed the optimistic spinning cannot be done. 46576916515SDavidlohr Bueso */ 4664d3199e4SDavidlohr Bueso if (READ_ONCE(ww->ctx)) 467b341afb3SWaiman Long goto fail_unlock; 46876916515SDavidlohr Bueso } 46976916515SDavidlohr Bueso 47076916515SDavidlohr Bueso /* 47176916515SDavidlohr Bueso * If there's an owner, wait for it to either 47276916515SDavidlohr Bueso * release the lock or go to sleep. 47376916515SDavidlohr Bueso */ 4743ca0ff57SPeter Zijlstra owner = __mutex_owner(lock); 475b341afb3SWaiman Long if (owner) { 476b341afb3SWaiman Long if (waiter && owner == task) { 477b341afb3SWaiman Long smp_mb(); /* ACQUIRE */ 47876916515SDavidlohr Bueso break; 479b341afb3SWaiman Long } 480b341afb3SWaiman Long 481b341afb3SWaiman Long if (!mutex_spin_on_owner(lock, owner)) 482b341afb3SWaiman Long goto fail_unlock; 483b341afb3SWaiman Long } 48476916515SDavidlohr Bueso 48576916515SDavidlohr Bueso /* Try to acquire the mutex if it is unlocked. */ 486b341afb3SWaiman Long if (__mutex_trylock(lock, waiter)) 487b341afb3SWaiman Long break; 48876916515SDavidlohr Bueso 48976916515SDavidlohr Bueso /* 49076916515SDavidlohr Bueso * The cpu_relax() call is a compiler barrier which forces 49176916515SDavidlohr Bueso * everything in this loop to be re-loaded. We don't need 49276916515SDavidlohr Bueso * memory barriers as we'll eventually observe the right 49376916515SDavidlohr Bueso * values at the cost of a few extra spins. 49476916515SDavidlohr Bueso */ 495f2f09a4cSChristian Borntraeger cpu_relax(); 49676916515SDavidlohr Bueso } 49776916515SDavidlohr Bueso 498b341afb3SWaiman Long if (!waiter) 49976916515SDavidlohr Bueso osq_unlock(&lock->osq); 500b341afb3SWaiman Long 501b341afb3SWaiman Long return true; 502b341afb3SWaiman Long 503b341afb3SWaiman Long 504b341afb3SWaiman Long fail_unlock: 505b341afb3SWaiman Long if (!waiter) 506b341afb3SWaiman Long osq_unlock(&lock->osq); 507b341afb3SWaiman Long 508b341afb3SWaiman Long fail: 50976916515SDavidlohr Bueso /* 51076916515SDavidlohr Bueso * If we fell out of the spin path because of need_resched(), 51176916515SDavidlohr Bueso * reschedule now, before we try-lock the mutex. This avoids getting 51276916515SDavidlohr Bueso * scheduled out right after we obtained the mutex. 51376916515SDavidlohr Bueso */ 5146f942a1fSPeter Zijlstra if (need_resched()) { 5156f942a1fSPeter Zijlstra /* 5166f942a1fSPeter Zijlstra * We _should_ have TASK_RUNNING here, but just in case 5176f942a1fSPeter Zijlstra * we do not, make it so, otherwise we might get stuck. 5186f942a1fSPeter Zijlstra */ 5196f942a1fSPeter Zijlstra __set_current_state(TASK_RUNNING); 52076916515SDavidlohr Bueso schedule_preempt_disabled(); 5216f942a1fSPeter Zijlstra } 52276916515SDavidlohr Bueso 52376916515SDavidlohr Bueso return false; 52476916515SDavidlohr Bueso } 52576916515SDavidlohr Bueso #else 52676916515SDavidlohr Bueso static bool mutex_optimistic_spin(struct mutex *lock, 527b341afb3SWaiman Long struct ww_acquire_ctx *ww_ctx, 528b341afb3SWaiman Long const bool use_ww_ctx, const bool waiter) 52976916515SDavidlohr Bueso { 53076916515SDavidlohr Bueso return false; 53176916515SDavidlohr Bueso } 53201768b42SPeter Zijlstra #endif 53301768b42SPeter Zijlstra 5343ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 53501768b42SPeter Zijlstra 53601768b42SPeter Zijlstra /** 53701768b42SPeter Zijlstra * mutex_unlock - release the mutex 53801768b42SPeter Zijlstra * @lock: the mutex to be released 53901768b42SPeter Zijlstra * 54001768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously. 54101768b42SPeter Zijlstra * 54201768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 54301768b42SPeter Zijlstra * of a not locked mutex is not allowed. 54401768b42SPeter Zijlstra * 54501768b42SPeter Zijlstra * This function is similar to (but not equivalent to) up(). 54601768b42SPeter Zijlstra */ 54701768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock) 54801768b42SPeter Zijlstra { 5493ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 5503ca0ff57SPeter Zijlstra if (__mutex_unlock_fast(lock)) 5513ca0ff57SPeter Zijlstra return; 55201768b42SPeter Zijlstra #endif 5533ca0ff57SPeter Zijlstra __mutex_unlock_slowpath(lock, _RET_IP_); 55401768b42SPeter Zijlstra } 55501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock); 55601768b42SPeter Zijlstra 55701768b42SPeter Zijlstra /** 55801768b42SPeter Zijlstra * ww_mutex_unlock - release the w/w mutex 55901768b42SPeter Zijlstra * @lock: the mutex to be released 56001768b42SPeter Zijlstra * 56101768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously with any of the 56201768b42SPeter Zijlstra * ww_mutex_lock* functions (with or without an acquire context). It is 56301768b42SPeter Zijlstra * forbidden to release the locks after releasing the acquire context. 56401768b42SPeter Zijlstra * 56501768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 56601768b42SPeter Zijlstra * of a unlocked mutex is not allowed. 56701768b42SPeter Zijlstra */ 56801768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock) 56901768b42SPeter Zijlstra { 57001768b42SPeter Zijlstra /* 57101768b42SPeter Zijlstra * The unlocking fastpath is the 0->1 transition from 'locked' 57201768b42SPeter Zijlstra * into 'unlocked' state: 57301768b42SPeter Zijlstra */ 57401768b42SPeter Zijlstra if (lock->ctx) { 57501768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 57601768b42SPeter Zijlstra DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 57701768b42SPeter Zijlstra #endif 57801768b42SPeter Zijlstra if (lock->ctx->acquired > 0) 57901768b42SPeter Zijlstra lock->ctx->acquired--; 58001768b42SPeter Zijlstra lock->ctx = NULL; 58101768b42SPeter Zijlstra } 58201768b42SPeter Zijlstra 5833ca0ff57SPeter Zijlstra mutex_unlock(&lock->base); 58401768b42SPeter Zijlstra } 58501768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock); 58601768b42SPeter Zijlstra 58701768b42SPeter Zijlstra static inline int __sched 58863dc47e9SDavidlohr Bueso __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 58901768b42SPeter Zijlstra { 59001768b42SPeter Zijlstra struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 5914d3199e4SDavidlohr Bueso struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 59201768b42SPeter Zijlstra 59301768b42SPeter Zijlstra if (!hold_ctx) 59401768b42SPeter Zijlstra return 0; 59501768b42SPeter Zijlstra 59601768b42SPeter Zijlstra if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && 59701768b42SPeter Zijlstra (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { 59801768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 59901768b42SPeter Zijlstra DEBUG_LOCKS_WARN_ON(ctx->contending_lock); 60001768b42SPeter Zijlstra ctx->contending_lock = ww; 60101768b42SPeter Zijlstra #endif 60201768b42SPeter Zijlstra return -EDEADLK; 60301768b42SPeter Zijlstra } 60401768b42SPeter Zijlstra 60501768b42SPeter Zijlstra return 0; 60601768b42SPeter Zijlstra } 60701768b42SPeter Zijlstra 60801768b42SPeter Zijlstra /* 60901768b42SPeter Zijlstra * Lock a mutex (possibly interruptible), slowpath: 61001768b42SPeter Zijlstra */ 61101768b42SPeter Zijlstra static __always_inline int __sched 61201768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 61301768b42SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 61401768b42SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 61501768b42SPeter Zijlstra { 61601768b42SPeter Zijlstra struct task_struct *task = current; 61701768b42SPeter Zijlstra struct mutex_waiter waiter; 61801768b42SPeter Zijlstra unsigned long flags; 6199d659ae1SPeter Zijlstra bool first = false; 620a40ca565SWaiman Long struct ww_mutex *ww; 62101768b42SPeter Zijlstra int ret; 62201768b42SPeter Zijlstra 6230422e83dSChris Wilson if (use_ww_ctx) { 624a40ca565SWaiman Long ww = container_of(lock, struct ww_mutex, base); 6250422e83dSChris Wilson if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 6260422e83dSChris Wilson return -EALREADY; 6270422e83dSChris Wilson } 6280422e83dSChris Wilson 62901768b42SPeter Zijlstra preempt_disable(); 63001768b42SPeter Zijlstra mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 63101768b42SPeter Zijlstra 6329d659ae1SPeter Zijlstra if (__mutex_trylock(lock, false) || 633b341afb3SWaiman Long mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) { 63476916515SDavidlohr Bueso /* got the lock, yay! */ 6353ca0ff57SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 636a40ca565SWaiman Long if (use_ww_ctx) 6373ca0ff57SPeter Zijlstra ww_mutex_set_context_fastpath(ww, ww_ctx); 63801768b42SPeter Zijlstra preempt_enable(); 63901768b42SPeter Zijlstra return 0; 64001768b42SPeter Zijlstra } 64101768b42SPeter Zijlstra 64201768b42SPeter Zijlstra spin_lock_mutex(&lock->wait_lock, flags); 6431e820c96SJason Low /* 6443ca0ff57SPeter Zijlstra * After waiting to acquire the wait_lock, try again. 6451e820c96SJason Low */ 6469d659ae1SPeter Zijlstra if (__mutex_trylock(lock, false)) 64701768b42SPeter Zijlstra goto skip_wait; 64801768b42SPeter Zijlstra 64901768b42SPeter Zijlstra debug_mutex_lock_common(lock, &waiter); 6506720a305SLinus Torvalds debug_mutex_add_waiter(lock, &waiter, task); 65101768b42SPeter Zijlstra 65201768b42SPeter Zijlstra /* add waiting tasks to the end of the waitqueue (FIFO): */ 65301768b42SPeter Zijlstra list_add_tail(&waiter.list, &lock->wait_list); 65401768b42SPeter Zijlstra waiter.task = task; 65501768b42SPeter Zijlstra 6569d659ae1SPeter Zijlstra if (__mutex_waiter_is_first(lock, &waiter)) 6573ca0ff57SPeter Zijlstra __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 6583ca0ff57SPeter Zijlstra 65901768b42SPeter Zijlstra lock_contended(&lock->dep_map, ip); 66001768b42SPeter Zijlstra 6615bbd7e64SPeter Zijlstra set_task_state(task, state); 66201768b42SPeter Zijlstra for (;;) { 6635bbd7e64SPeter Zijlstra /* 6645bbd7e64SPeter Zijlstra * Once we hold wait_lock, we're serialized against 6655bbd7e64SPeter Zijlstra * mutex_unlock() handing the lock off to us, do a trylock 6665bbd7e64SPeter Zijlstra * before testing the error conditions to make sure we pick up 6675bbd7e64SPeter Zijlstra * the handoff. 6685bbd7e64SPeter Zijlstra */ 6699d659ae1SPeter Zijlstra if (__mutex_trylock(lock, first)) 6705bbd7e64SPeter Zijlstra goto acquired; 67101768b42SPeter Zijlstra 67201768b42SPeter Zijlstra /* 6735bbd7e64SPeter Zijlstra * Check for signals and wound conditions while holding 6745bbd7e64SPeter Zijlstra * wait_lock. This ensures the lock cancellation is ordered 6755bbd7e64SPeter Zijlstra * against mutex_unlock() and wake-ups do not go missing. 67601768b42SPeter Zijlstra */ 67701768b42SPeter Zijlstra if (unlikely(signal_pending_state(state, task))) { 67801768b42SPeter Zijlstra ret = -EINTR; 67901768b42SPeter Zijlstra goto err; 68001768b42SPeter Zijlstra } 68101768b42SPeter Zijlstra 68201768b42SPeter Zijlstra if (use_ww_ctx && ww_ctx->acquired > 0) { 68363dc47e9SDavidlohr Bueso ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); 68401768b42SPeter Zijlstra if (ret) 68501768b42SPeter Zijlstra goto err; 68601768b42SPeter Zijlstra } 68701768b42SPeter Zijlstra 68801768b42SPeter Zijlstra spin_unlock_mutex(&lock->wait_lock, flags); 68901768b42SPeter Zijlstra schedule_preempt_disabled(); 6909d659ae1SPeter Zijlstra 6919d659ae1SPeter Zijlstra if (!first && __mutex_waiter_is_first(lock, &waiter)) { 6929d659ae1SPeter Zijlstra first = true; 6939d659ae1SPeter Zijlstra __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 6949d659ae1SPeter Zijlstra } 6955bbd7e64SPeter Zijlstra 6965bbd7e64SPeter Zijlstra set_task_state(task, state); 6975bbd7e64SPeter Zijlstra /* 6985bbd7e64SPeter Zijlstra * Here we order against unlock; we must either see it change 6995bbd7e64SPeter Zijlstra * state back to RUNNING and fall through the next schedule(), 7005bbd7e64SPeter Zijlstra * or we must see its unlock and acquire. 7015bbd7e64SPeter Zijlstra */ 702b341afb3SWaiman Long if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) || 703b341afb3SWaiman Long __mutex_trylock(lock, first)) 7045bbd7e64SPeter Zijlstra break; 7055bbd7e64SPeter Zijlstra 7065bbd7e64SPeter Zijlstra spin_lock_mutex(&lock->wait_lock, flags); 70701768b42SPeter Zijlstra } 7085bbd7e64SPeter Zijlstra spin_lock_mutex(&lock->wait_lock, flags); 7095bbd7e64SPeter Zijlstra acquired: 71051587bcfSDavidlohr Bueso __set_task_state(task, TASK_RUNNING); 71151587bcfSDavidlohr Bueso 7126720a305SLinus Torvalds mutex_remove_waiter(lock, &waiter, task); 71301768b42SPeter Zijlstra if (likely(list_empty(&lock->wait_list))) 7149d659ae1SPeter Zijlstra __mutex_clear_flag(lock, MUTEX_FLAGS); 7153ca0ff57SPeter Zijlstra 71601768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 71701768b42SPeter Zijlstra 71801768b42SPeter Zijlstra skip_wait: 71901768b42SPeter Zijlstra /* got the lock - cleanup and rejoice! */ 72001768b42SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 72101768b42SPeter Zijlstra 722a40ca565SWaiman Long if (use_ww_ctx) 7234bd19084SDavidlohr Bueso ww_mutex_set_context_slowpath(ww, ww_ctx); 72401768b42SPeter Zijlstra 72501768b42SPeter Zijlstra spin_unlock_mutex(&lock->wait_lock, flags); 72601768b42SPeter Zijlstra preempt_enable(); 72701768b42SPeter Zijlstra return 0; 72801768b42SPeter Zijlstra 72901768b42SPeter Zijlstra err: 7305bbd7e64SPeter Zijlstra __set_task_state(task, TASK_RUNNING); 7316720a305SLinus Torvalds mutex_remove_waiter(lock, &waiter, task); 73201768b42SPeter Zijlstra spin_unlock_mutex(&lock->wait_lock, flags); 73301768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 73401768b42SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 73501768b42SPeter Zijlstra preempt_enable(); 73601768b42SPeter Zijlstra return ret; 73701768b42SPeter Zijlstra } 73801768b42SPeter Zijlstra 73901768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC 74001768b42SPeter Zijlstra void __sched 74101768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass) 74201768b42SPeter Zijlstra { 74301768b42SPeter Zijlstra might_sleep(); 74401768b42SPeter Zijlstra __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 74501768b42SPeter Zijlstra subclass, NULL, _RET_IP_, NULL, 0); 74601768b42SPeter Zijlstra } 74701768b42SPeter Zijlstra 74801768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested); 74901768b42SPeter Zijlstra 75001768b42SPeter Zijlstra void __sched 75101768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 75201768b42SPeter Zijlstra { 75301768b42SPeter Zijlstra might_sleep(); 75401768b42SPeter Zijlstra __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 75501768b42SPeter Zijlstra 0, nest, _RET_IP_, NULL, 0); 75601768b42SPeter Zijlstra } 75701768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 75801768b42SPeter Zijlstra 75901768b42SPeter Zijlstra int __sched 76001768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 76101768b42SPeter Zijlstra { 76201768b42SPeter Zijlstra might_sleep(); 76301768b42SPeter Zijlstra return __mutex_lock_common(lock, TASK_KILLABLE, 76401768b42SPeter Zijlstra subclass, NULL, _RET_IP_, NULL, 0); 76501768b42SPeter Zijlstra } 76601768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 76701768b42SPeter Zijlstra 76801768b42SPeter Zijlstra int __sched 76901768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 77001768b42SPeter Zijlstra { 77101768b42SPeter Zijlstra might_sleep(); 77201768b42SPeter Zijlstra return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 77301768b42SPeter Zijlstra subclass, NULL, _RET_IP_, NULL, 0); 77401768b42SPeter Zijlstra } 77501768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 77601768b42SPeter Zijlstra 77701768b42SPeter Zijlstra static inline int 77801768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 77901768b42SPeter Zijlstra { 78001768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 78101768b42SPeter Zijlstra unsigned tmp; 78201768b42SPeter Zijlstra 78301768b42SPeter Zijlstra if (ctx->deadlock_inject_countdown-- == 0) { 78401768b42SPeter Zijlstra tmp = ctx->deadlock_inject_interval; 78501768b42SPeter Zijlstra if (tmp > UINT_MAX/4) 78601768b42SPeter Zijlstra tmp = UINT_MAX; 78701768b42SPeter Zijlstra else 78801768b42SPeter Zijlstra tmp = tmp*2 + tmp + tmp/2; 78901768b42SPeter Zijlstra 79001768b42SPeter Zijlstra ctx->deadlock_inject_interval = tmp; 79101768b42SPeter Zijlstra ctx->deadlock_inject_countdown = tmp; 79201768b42SPeter Zijlstra ctx->contending_lock = lock; 79301768b42SPeter Zijlstra 79401768b42SPeter Zijlstra ww_mutex_unlock(lock); 79501768b42SPeter Zijlstra 79601768b42SPeter Zijlstra return -EDEADLK; 79701768b42SPeter Zijlstra } 79801768b42SPeter Zijlstra #endif 79901768b42SPeter Zijlstra 80001768b42SPeter Zijlstra return 0; 80101768b42SPeter Zijlstra } 80201768b42SPeter Zijlstra 80301768b42SPeter Zijlstra int __sched 80401768b42SPeter Zijlstra __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 80501768b42SPeter Zijlstra { 80601768b42SPeter Zijlstra int ret; 80701768b42SPeter Zijlstra 80801768b42SPeter Zijlstra might_sleep(); 80901768b42SPeter Zijlstra ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 81001768b42SPeter Zijlstra 0, &ctx->dep_map, _RET_IP_, ctx, 1); 81101768b42SPeter Zijlstra if (!ret && ctx->acquired > 1) 81201768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 81301768b42SPeter Zijlstra 81401768b42SPeter Zijlstra return ret; 81501768b42SPeter Zijlstra } 81601768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(__ww_mutex_lock); 81701768b42SPeter Zijlstra 81801768b42SPeter Zijlstra int __sched 81901768b42SPeter Zijlstra __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 82001768b42SPeter Zijlstra { 82101768b42SPeter Zijlstra int ret; 82201768b42SPeter Zijlstra 82301768b42SPeter Zijlstra might_sleep(); 82401768b42SPeter Zijlstra ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 82501768b42SPeter Zijlstra 0, &ctx->dep_map, _RET_IP_, ctx, 1); 82601768b42SPeter Zijlstra 82701768b42SPeter Zijlstra if (!ret && ctx->acquired > 1) 82801768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 82901768b42SPeter Zijlstra 83001768b42SPeter Zijlstra return ret; 83101768b42SPeter Zijlstra } 83201768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); 83301768b42SPeter Zijlstra 83401768b42SPeter Zijlstra #endif 83501768b42SPeter Zijlstra 83601768b42SPeter Zijlstra /* 83701768b42SPeter Zijlstra * Release the lock, slowpath: 83801768b42SPeter Zijlstra */ 8393ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 84001768b42SPeter Zijlstra { 8419d659ae1SPeter Zijlstra struct task_struct *next = NULL; 8423ca0ff57SPeter Zijlstra unsigned long owner, flags; 843*194a6b5bSWaiman Long DEFINE_WAKE_Q(wake_q); 84401768b42SPeter Zijlstra 8453ca0ff57SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 8463ca0ff57SPeter Zijlstra 84701768b42SPeter Zijlstra /* 8489d659ae1SPeter Zijlstra * Release the lock before (potentially) taking the spinlock such that 8499d659ae1SPeter Zijlstra * other contenders can get on with things ASAP. 8509d659ae1SPeter Zijlstra * 8519d659ae1SPeter Zijlstra * Except when HANDOFF, in that case we must not clear the owner field, 8529d659ae1SPeter Zijlstra * but instead set it to the top waiter. 85301768b42SPeter Zijlstra */ 8549d659ae1SPeter Zijlstra owner = atomic_long_read(&lock->owner); 8559d659ae1SPeter Zijlstra for (;;) { 8569d659ae1SPeter Zijlstra unsigned long old; 8579d659ae1SPeter Zijlstra 8589d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 8599d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 8609d659ae1SPeter Zijlstra #endif 8619d659ae1SPeter Zijlstra 8629d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 8639d659ae1SPeter Zijlstra break; 8649d659ae1SPeter Zijlstra 8659d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, 8669d659ae1SPeter Zijlstra __owner_flags(owner)); 8679d659ae1SPeter Zijlstra if (old == owner) { 8689d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_WAITERS) 8699d659ae1SPeter Zijlstra break; 8709d659ae1SPeter Zijlstra 8713ca0ff57SPeter Zijlstra return; 8729d659ae1SPeter Zijlstra } 8739d659ae1SPeter Zijlstra 8749d659ae1SPeter Zijlstra owner = old; 8759d659ae1SPeter Zijlstra } 87601768b42SPeter Zijlstra 8771d8fe7dcSJason Low spin_lock_mutex(&lock->wait_lock, flags); 8781d8fe7dcSJason Low debug_mutex_unlock(lock); 87901768b42SPeter Zijlstra if (!list_empty(&lock->wait_list)) { 88001768b42SPeter Zijlstra /* get the first entry from the wait-list: */ 88101768b42SPeter Zijlstra struct mutex_waiter *waiter = 8829d659ae1SPeter Zijlstra list_first_entry(&lock->wait_list, 88301768b42SPeter Zijlstra struct mutex_waiter, list); 88401768b42SPeter Zijlstra 8859d659ae1SPeter Zijlstra next = waiter->task; 8869d659ae1SPeter Zijlstra 88701768b42SPeter Zijlstra debug_mutex_wake_waiter(lock, waiter); 8889d659ae1SPeter Zijlstra wake_q_add(&wake_q, next); 88901768b42SPeter Zijlstra } 89001768b42SPeter Zijlstra 8919d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 8929d659ae1SPeter Zijlstra __mutex_handoff(lock, next); 8939d659ae1SPeter Zijlstra 89401768b42SPeter Zijlstra spin_unlock_mutex(&lock->wait_lock, flags); 8959d659ae1SPeter Zijlstra 8961329ce6fSDavidlohr Bueso wake_up_q(&wake_q); 89701768b42SPeter Zijlstra } 89801768b42SPeter Zijlstra 89901768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 90001768b42SPeter Zijlstra /* 90101768b42SPeter Zijlstra * Here come the less common (and hence less performance-critical) APIs: 90201768b42SPeter Zijlstra * mutex_lock_interruptible() and mutex_trylock(). 90301768b42SPeter Zijlstra */ 90401768b42SPeter Zijlstra static noinline int __sched 90501768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock); 90601768b42SPeter Zijlstra 90701768b42SPeter Zijlstra static noinline int __sched 90801768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock); 90901768b42SPeter Zijlstra 91001768b42SPeter Zijlstra /** 91101768b42SPeter Zijlstra * mutex_lock_interruptible - acquire the mutex, interruptible 91201768b42SPeter Zijlstra * @lock: the mutex to be acquired 91301768b42SPeter Zijlstra * 91401768b42SPeter Zijlstra * Lock the mutex like mutex_lock(), and return 0 if the mutex has 91501768b42SPeter Zijlstra * been acquired or sleep until the mutex becomes available. If a 91601768b42SPeter Zijlstra * signal arrives while waiting for the lock then this function 91701768b42SPeter Zijlstra * returns -EINTR. 91801768b42SPeter Zijlstra * 91901768b42SPeter Zijlstra * This function is similar to (but not equivalent to) down_interruptible(). 92001768b42SPeter Zijlstra */ 92101768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock) 92201768b42SPeter Zijlstra { 92301768b42SPeter Zijlstra might_sleep(); 9243ca0ff57SPeter Zijlstra 9253ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 92601768b42SPeter Zijlstra return 0; 9273ca0ff57SPeter Zijlstra 92801768b42SPeter Zijlstra return __mutex_lock_interruptible_slowpath(lock); 92901768b42SPeter Zijlstra } 93001768b42SPeter Zijlstra 93101768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible); 93201768b42SPeter Zijlstra 93301768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock) 93401768b42SPeter Zijlstra { 93501768b42SPeter Zijlstra might_sleep(); 9363ca0ff57SPeter Zijlstra 9373ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 93801768b42SPeter Zijlstra return 0; 9393ca0ff57SPeter Zijlstra 94001768b42SPeter Zijlstra return __mutex_lock_killable_slowpath(lock); 94101768b42SPeter Zijlstra } 94201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable); 94301768b42SPeter Zijlstra 9443ca0ff57SPeter Zijlstra static noinline void __sched 9453ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock) 94601768b42SPeter Zijlstra { 94701768b42SPeter Zijlstra __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 94801768b42SPeter Zijlstra NULL, _RET_IP_, NULL, 0); 94901768b42SPeter Zijlstra } 95001768b42SPeter Zijlstra 95101768b42SPeter Zijlstra static noinline int __sched 95201768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock) 95301768b42SPeter Zijlstra { 95401768b42SPeter Zijlstra return __mutex_lock_common(lock, TASK_KILLABLE, 0, 95501768b42SPeter Zijlstra NULL, _RET_IP_, NULL, 0); 95601768b42SPeter Zijlstra } 95701768b42SPeter Zijlstra 95801768b42SPeter Zijlstra static noinline int __sched 95901768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock) 96001768b42SPeter Zijlstra { 96101768b42SPeter Zijlstra return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 96201768b42SPeter Zijlstra NULL, _RET_IP_, NULL, 0); 96301768b42SPeter Zijlstra } 96401768b42SPeter Zijlstra 96501768b42SPeter Zijlstra static noinline int __sched 96601768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 96701768b42SPeter Zijlstra { 96801768b42SPeter Zijlstra return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 96901768b42SPeter Zijlstra NULL, _RET_IP_, ctx, 1); 97001768b42SPeter Zijlstra } 97101768b42SPeter Zijlstra 97201768b42SPeter Zijlstra static noinline int __sched 97301768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 97401768b42SPeter Zijlstra struct ww_acquire_ctx *ctx) 97501768b42SPeter Zijlstra { 97601768b42SPeter Zijlstra return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 97701768b42SPeter Zijlstra NULL, _RET_IP_, ctx, 1); 97801768b42SPeter Zijlstra } 97901768b42SPeter Zijlstra 98001768b42SPeter Zijlstra #endif 98101768b42SPeter Zijlstra 98201768b42SPeter Zijlstra /** 98301768b42SPeter Zijlstra * mutex_trylock - try to acquire the mutex, without waiting 98401768b42SPeter Zijlstra * @lock: the mutex to be acquired 98501768b42SPeter Zijlstra * 98601768b42SPeter Zijlstra * Try to acquire the mutex atomically. Returns 1 if the mutex 98701768b42SPeter Zijlstra * has been acquired successfully, and 0 on contention. 98801768b42SPeter Zijlstra * 98901768b42SPeter Zijlstra * NOTE: this function follows the spin_trylock() convention, so 99001768b42SPeter Zijlstra * it is negated from the down_trylock() return values! Be careful 99101768b42SPeter Zijlstra * about this when converting semaphore users to mutexes. 99201768b42SPeter Zijlstra * 99301768b42SPeter Zijlstra * This function must not be used in interrupt context. The 99401768b42SPeter Zijlstra * mutex must be released by the same task that acquired it. 99501768b42SPeter Zijlstra */ 99601768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock) 99701768b42SPeter Zijlstra { 9989d659ae1SPeter Zijlstra bool locked = __mutex_trylock(lock, false); 99901768b42SPeter Zijlstra 10003ca0ff57SPeter Zijlstra if (locked) 10013ca0ff57SPeter Zijlstra mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 100201768b42SPeter Zijlstra 10033ca0ff57SPeter Zijlstra return locked; 100401768b42SPeter Zijlstra } 100501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock); 100601768b42SPeter Zijlstra 100701768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 100801768b42SPeter Zijlstra int __sched 100901768b42SPeter Zijlstra __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 101001768b42SPeter Zijlstra { 101101768b42SPeter Zijlstra might_sleep(); 101201768b42SPeter Zijlstra 10133ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 101401768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 10153ca0ff57SPeter Zijlstra return 0; 10163ca0ff57SPeter Zijlstra } 10173ca0ff57SPeter Zijlstra 10183ca0ff57SPeter Zijlstra return __ww_mutex_lock_slowpath(lock, ctx); 101901768b42SPeter Zijlstra } 102001768b42SPeter Zijlstra EXPORT_SYMBOL(__ww_mutex_lock); 102101768b42SPeter Zijlstra 102201768b42SPeter Zijlstra int __sched 102301768b42SPeter Zijlstra __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 102401768b42SPeter Zijlstra { 102501768b42SPeter Zijlstra might_sleep(); 102601768b42SPeter Zijlstra 10273ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 102801768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 10293ca0ff57SPeter Zijlstra return 0; 10303ca0ff57SPeter Zijlstra } 10313ca0ff57SPeter Zijlstra 10323ca0ff57SPeter Zijlstra return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 103301768b42SPeter Zijlstra } 103401768b42SPeter Zijlstra EXPORT_SYMBOL(__ww_mutex_lock_interruptible); 103501768b42SPeter Zijlstra 103601768b42SPeter Zijlstra #endif 103701768b42SPeter Zijlstra 103801768b42SPeter Zijlstra /** 103901768b42SPeter Zijlstra * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 104001768b42SPeter Zijlstra * @cnt: the atomic which we are to dec 104101768b42SPeter Zijlstra * @lock: the mutex to return holding if we dec to 0 104201768b42SPeter Zijlstra * 104301768b42SPeter Zijlstra * return true and hold lock if we dec to 0, return false otherwise 104401768b42SPeter Zijlstra */ 104501768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 104601768b42SPeter Zijlstra { 104701768b42SPeter Zijlstra /* dec if we can't possibly hit 0 */ 104801768b42SPeter Zijlstra if (atomic_add_unless(cnt, -1, 1)) 104901768b42SPeter Zijlstra return 0; 105001768b42SPeter Zijlstra /* we might hit 0, so take the lock */ 105101768b42SPeter Zijlstra mutex_lock(lock); 105201768b42SPeter Zijlstra if (!atomic_dec_and_test(cnt)) { 105301768b42SPeter Zijlstra /* when we actually did the dec, we didn't hit 0 */ 105401768b42SPeter Zijlstra mutex_unlock(lock); 105501768b42SPeter Zijlstra return 0; 105601768b42SPeter Zijlstra } 105701768b42SPeter Zijlstra /* we hit 0, and we hold the lock */ 105801768b42SPeter Zijlstra return 1; 105901768b42SPeter Zijlstra } 106001768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1061