101768b42SPeter Zijlstra /* 267a6de49SPeter Zijlstra * kernel/locking/mutex.c 301768b42SPeter Zijlstra * 401768b42SPeter Zijlstra * Mutexes: blocking mutual exclusion locks 501768b42SPeter Zijlstra * 601768b42SPeter Zijlstra * Started by Ingo Molnar: 701768b42SPeter Zijlstra * 801768b42SPeter Zijlstra * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 901768b42SPeter Zijlstra * 1001768b42SPeter Zijlstra * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 1101768b42SPeter Zijlstra * David Howells for suggestions and improvements. 1201768b42SPeter Zijlstra * 1301768b42SPeter Zijlstra * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 1401768b42SPeter Zijlstra * from the -rt tree, where it was originally implemented for rtmutexes 1501768b42SPeter Zijlstra * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 1601768b42SPeter Zijlstra * and Sven Dietrich. 1701768b42SPeter Zijlstra * 18214e0aedSDavidlohr Bueso * Also see Documentation/locking/mutex-design.txt. 1901768b42SPeter Zijlstra */ 2001768b42SPeter Zijlstra #include <linux/mutex.h> 2101768b42SPeter Zijlstra #include <linux/ww_mutex.h> 22174cd4b1SIngo Molnar #include <linux/sched/signal.h> 2301768b42SPeter Zijlstra #include <linux/sched/rt.h> 2484f001e1SIngo Molnar #include <linux/sched/wake_q.h> 25b17b0153SIngo Molnar #include <linux/sched/debug.h> 2601768b42SPeter Zijlstra #include <linux/export.h> 2701768b42SPeter Zijlstra #include <linux/spinlock.h> 2801768b42SPeter Zijlstra #include <linux/interrupt.h> 2901768b42SPeter Zijlstra #include <linux/debug_locks.h> 307a215f89SDavidlohr Bueso #include <linux/osq_lock.h> 3101768b42SPeter Zijlstra 3201768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 3301768b42SPeter Zijlstra # include "mutex-debug.h" 3401768b42SPeter Zijlstra #else 3501768b42SPeter Zijlstra # include "mutex.h" 3601768b42SPeter Zijlstra #endif 3701768b42SPeter Zijlstra 3801768b42SPeter Zijlstra void 3901768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 4001768b42SPeter Zijlstra { 413ca0ff57SPeter Zijlstra atomic_long_set(&lock->owner, 0); 4201768b42SPeter Zijlstra spin_lock_init(&lock->wait_lock); 4301768b42SPeter Zijlstra INIT_LIST_HEAD(&lock->wait_list); 4401768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 454d9d951eSJason Low osq_lock_init(&lock->osq); 4601768b42SPeter Zijlstra #endif 4701768b42SPeter Zijlstra 4801768b42SPeter Zijlstra debug_mutex_init(lock, name, key); 4901768b42SPeter Zijlstra } 5001768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init); 5101768b42SPeter Zijlstra 523ca0ff57SPeter Zijlstra /* 533ca0ff57SPeter Zijlstra * @owner: contains: 'struct task_struct *' to the current lock owner, 543ca0ff57SPeter Zijlstra * NULL means not owned. Since task_struct pointers are aligned at 55e274795eSPeter Zijlstra * at least L1_CACHE_BYTES, we have low bits to store extra state. 563ca0ff57SPeter Zijlstra * 573ca0ff57SPeter Zijlstra * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 589d659ae1SPeter Zijlstra * Bit1 indicates unlock needs to hand the lock to the top-waiter 59e274795eSPeter Zijlstra * Bit2 indicates handoff has been done and we're waiting for pickup. 603ca0ff57SPeter Zijlstra */ 613ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS 0x01 629d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF 0x02 63e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP 0x04 643ca0ff57SPeter Zijlstra 65e274795eSPeter Zijlstra #define MUTEX_FLAGS 0x07 663ca0ff57SPeter Zijlstra 673ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner) 683ca0ff57SPeter Zijlstra { 693ca0ff57SPeter Zijlstra return (struct task_struct *)(owner & ~MUTEX_FLAGS); 703ca0ff57SPeter Zijlstra } 713ca0ff57SPeter Zijlstra 723ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner) 733ca0ff57SPeter Zijlstra { 743ca0ff57SPeter Zijlstra return owner & MUTEX_FLAGS; 753ca0ff57SPeter Zijlstra } 763ca0ff57SPeter Zijlstra 773ca0ff57SPeter Zijlstra /* 78e274795eSPeter Zijlstra * Trylock variant that retuns the owning task on failure. 793ca0ff57SPeter Zijlstra */ 80e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 813ca0ff57SPeter Zijlstra { 823ca0ff57SPeter Zijlstra unsigned long owner, curr = (unsigned long)current; 833ca0ff57SPeter Zijlstra 843ca0ff57SPeter Zijlstra owner = atomic_long_read(&lock->owner); 853ca0ff57SPeter Zijlstra for (;;) { /* must loop, can race against a flag */ 869d659ae1SPeter Zijlstra unsigned long old, flags = __owner_flags(owner); 87e274795eSPeter Zijlstra unsigned long task = owner & ~MUTEX_FLAGS; 883ca0ff57SPeter Zijlstra 89e274795eSPeter Zijlstra if (task) { 90e274795eSPeter Zijlstra if (likely(task != curr)) 91e274795eSPeter Zijlstra break; 929d659ae1SPeter Zijlstra 93e274795eSPeter Zijlstra if (likely(!(flags & MUTEX_FLAG_PICKUP))) 94e274795eSPeter Zijlstra break; 95e274795eSPeter Zijlstra 96e274795eSPeter Zijlstra flags &= ~MUTEX_FLAG_PICKUP; 97e274795eSPeter Zijlstra } else { 98e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 99e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); 100e274795eSPeter Zijlstra #endif 1019d659ae1SPeter Zijlstra } 1023ca0ff57SPeter Zijlstra 1039d659ae1SPeter Zijlstra /* 1049d659ae1SPeter Zijlstra * We set the HANDOFF bit, we must make sure it doesn't live 1059d659ae1SPeter Zijlstra * past the point where we acquire it. This would be possible 1069d659ae1SPeter Zijlstra * if we (accidentally) set the bit on an unlocked mutex. 1079d659ae1SPeter Zijlstra */ 1089d659ae1SPeter Zijlstra flags &= ~MUTEX_FLAG_HANDOFF; 1099d659ae1SPeter Zijlstra 1109d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 1113ca0ff57SPeter Zijlstra if (old == owner) 112e274795eSPeter Zijlstra return NULL; 1133ca0ff57SPeter Zijlstra 1143ca0ff57SPeter Zijlstra owner = old; 1153ca0ff57SPeter Zijlstra } 116e274795eSPeter Zijlstra 117e274795eSPeter Zijlstra return __owner_task(owner); 118e274795eSPeter Zijlstra } 119e274795eSPeter Zijlstra 120e274795eSPeter Zijlstra /* 121e274795eSPeter Zijlstra * Actual trylock that will work on any unlocked state. 122e274795eSPeter Zijlstra */ 123e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock) 124e274795eSPeter Zijlstra { 125e274795eSPeter Zijlstra return !__mutex_trylock_or_owner(lock); 1263ca0ff57SPeter Zijlstra } 1273ca0ff57SPeter Zijlstra 1283ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 1293ca0ff57SPeter Zijlstra /* 1303ca0ff57SPeter Zijlstra * Lockdep annotations are contained to the slow paths for simplicity. 1313ca0ff57SPeter Zijlstra * There is nothing that would stop spreading the lockdep annotations outwards 1323ca0ff57SPeter Zijlstra * except more code. 1333ca0ff57SPeter Zijlstra */ 1343ca0ff57SPeter Zijlstra 1353ca0ff57SPeter Zijlstra /* 1363ca0ff57SPeter Zijlstra * Optimistic trylock that only works in the uncontended case. Make sure to 1373ca0ff57SPeter Zijlstra * follow with a __mutex_trylock() before failing. 1383ca0ff57SPeter Zijlstra */ 1393ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 1403ca0ff57SPeter Zijlstra { 1413ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 142c427f695SPeter Zijlstra unsigned long zero = 0UL; 1433ca0ff57SPeter Zijlstra 144c427f695SPeter Zijlstra if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 1453ca0ff57SPeter Zijlstra return true; 1463ca0ff57SPeter Zijlstra 1473ca0ff57SPeter Zijlstra return false; 1483ca0ff57SPeter Zijlstra } 1493ca0ff57SPeter Zijlstra 1503ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 1513ca0ff57SPeter Zijlstra { 1523ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 1533ca0ff57SPeter Zijlstra 1543ca0ff57SPeter Zijlstra if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 1553ca0ff57SPeter Zijlstra return true; 1563ca0ff57SPeter Zijlstra 1573ca0ff57SPeter Zijlstra return false; 1583ca0ff57SPeter Zijlstra } 1593ca0ff57SPeter Zijlstra #endif 1603ca0ff57SPeter Zijlstra 1613ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 1623ca0ff57SPeter Zijlstra { 1633ca0ff57SPeter Zijlstra atomic_long_or(flag, &lock->owner); 1643ca0ff57SPeter Zijlstra } 1653ca0ff57SPeter Zijlstra 1663ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 1673ca0ff57SPeter Zijlstra { 1683ca0ff57SPeter Zijlstra atomic_long_andnot(flag, &lock->owner); 1693ca0ff57SPeter Zijlstra } 1703ca0ff57SPeter Zijlstra 1719d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 1729d659ae1SPeter Zijlstra { 1739d659ae1SPeter Zijlstra return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 1749d659ae1SPeter Zijlstra } 1759d659ae1SPeter Zijlstra 1769d659ae1SPeter Zijlstra /* 17708295b3bSThomas Hellstrom * Add @waiter to a given location in the lock wait_list and set the 17808295b3bSThomas Hellstrom * FLAG_WAITERS flag if it's the first waiter. 17908295b3bSThomas Hellstrom */ 18008295b3bSThomas Hellstrom static void __sched 18108295b3bSThomas Hellstrom __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 18208295b3bSThomas Hellstrom struct list_head *list) 18308295b3bSThomas Hellstrom { 18408295b3bSThomas Hellstrom debug_mutex_add_waiter(lock, waiter, current); 18508295b3bSThomas Hellstrom 18608295b3bSThomas Hellstrom list_add_tail(&waiter->list, list); 18708295b3bSThomas Hellstrom if (__mutex_waiter_is_first(lock, waiter)) 18808295b3bSThomas Hellstrom __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 18908295b3bSThomas Hellstrom } 19008295b3bSThomas Hellstrom 19108295b3bSThomas Hellstrom /* 1929d659ae1SPeter Zijlstra * Give up ownership to a specific task, when @task = NULL, this is equivalent 193e274795eSPeter Zijlstra * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves 194e274795eSPeter Zijlstra * WAITERS. Provides RELEASE semantics like a regular unlock, the 195e274795eSPeter Zijlstra * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 1969d659ae1SPeter Zijlstra */ 1979d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 1989d659ae1SPeter Zijlstra { 1999d659ae1SPeter Zijlstra unsigned long owner = atomic_long_read(&lock->owner); 2009d659ae1SPeter Zijlstra 2019d659ae1SPeter Zijlstra for (;;) { 2029d659ae1SPeter Zijlstra unsigned long old, new; 2039d659ae1SPeter Zijlstra 2049d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 2059d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 206e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 2079d659ae1SPeter Zijlstra #endif 2089d659ae1SPeter Zijlstra 2099d659ae1SPeter Zijlstra new = (owner & MUTEX_FLAG_WAITERS); 2109d659ae1SPeter Zijlstra new |= (unsigned long)task; 211e274795eSPeter Zijlstra if (task) 212e274795eSPeter Zijlstra new |= MUTEX_FLAG_PICKUP; 2139d659ae1SPeter Zijlstra 2149d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 2159d659ae1SPeter Zijlstra if (old == owner) 2169d659ae1SPeter Zijlstra break; 2179d659ae1SPeter Zijlstra 2189d659ae1SPeter Zijlstra owner = old; 2199d659ae1SPeter Zijlstra } 2209d659ae1SPeter Zijlstra } 2219d659ae1SPeter Zijlstra 22201768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 22301768b42SPeter Zijlstra /* 22401768b42SPeter Zijlstra * We split the mutex lock/unlock logic into separate fastpath and 22501768b42SPeter Zijlstra * slowpath functions, to reduce the register pressure on the fastpath. 22601768b42SPeter Zijlstra * We also put the fastpath first in the kernel image, to make sure the 22701768b42SPeter Zijlstra * branch is predicted by the CPU as default-untaken. 22801768b42SPeter Zijlstra */ 2293ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock); 23001768b42SPeter Zijlstra 23101768b42SPeter Zijlstra /** 23201768b42SPeter Zijlstra * mutex_lock - acquire the mutex 23301768b42SPeter Zijlstra * @lock: the mutex to be acquired 23401768b42SPeter Zijlstra * 23501768b42SPeter Zijlstra * Lock the mutex exclusively for this task. If the mutex is not 23601768b42SPeter Zijlstra * available right now, it will sleep until it can get it. 23701768b42SPeter Zijlstra * 23801768b42SPeter Zijlstra * The mutex must later on be released by the same task that 23901768b42SPeter Zijlstra * acquired it. Recursive locking is not allowed. The task 24001768b42SPeter Zijlstra * may not exit without first unlocking the mutex. Also, kernel 241139b6fd2SSharon Dvir * memory where the mutex resides must not be freed with 24201768b42SPeter Zijlstra * the mutex still locked. The mutex must first be initialized 24301768b42SPeter Zijlstra * (or statically defined) before it can be locked. memset()-ing 24401768b42SPeter Zijlstra * the mutex to 0 is not allowed. 24501768b42SPeter Zijlstra * 24601768b42SPeter Zijlstra * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 24701768b42SPeter Zijlstra * checks that will enforce the restrictions and will also do 2487b4ff1adSMauro Carvalho Chehab * deadlock debugging) 24901768b42SPeter Zijlstra * 25001768b42SPeter Zijlstra * This function is similar to (but not equivalent to) down(). 25101768b42SPeter Zijlstra */ 25201768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock) 25301768b42SPeter Zijlstra { 25401768b42SPeter Zijlstra might_sleep(); 25501768b42SPeter Zijlstra 2563ca0ff57SPeter Zijlstra if (!__mutex_trylock_fast(lock)) 2573ca0ff57SPeter Zijlstra __mutex_lock_slowpath(lock); 2583ca0ff57SPeter Zijlstra } 25901768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock); 26001768b42SPeter Zijlstra #endif 26101768b42SPeter Zijlstra 26255f036caSPeter Ziljstra /* 26355f036caSPeter Ziljstra * Wait-Die: 26455f036caSPeter Ziljstra * The newer transactions are killed when: 26555f036caSPeter Ziljstra * It (the new transaction) makes a request for a lock being held 26655f036caSPeter Ziljstra * by an older transaction. 26708295b3bSThomas Hellstrom * 26808295b3bSThomas Hellstrom * Wound-Wait: 26908295b3bSThomas Hellstrom * The newer transactions are wounded when: 27008295b3bSThomas Hellstrom * An older transaction makes a request for a lock being held by 27108295b3bSThomas Hellstrom * the newer transaction. 27255f036caSPeter Ziljstra */ 27355f036caSPeter Ziljstra 27455f036caSPeter Ziljstra /* 27555f036caSPeter Ziljstra * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 27655f036caSPeter Ziljstra * it. 27755f036caSPeter Ziljstra */ 278427b1820SPeter Zijlstra static __always_inline void 279427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 28076916515SDavidlohr Bueso { 28176916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES 28276916515SDavidlohr Bueso /* 28376916515SDavidlohr Bueso * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 28476916515SDavidlohr Bueso * but released with a normal mutex_unlock in this call. 28576916515SDavidlohr Bueso * 28676916515SDavidlohr Bueso * This should never happen, always use ww_mutex_unlock. 28776916515SDavidlohr Bueso */ 28876916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww->ctx); 28976916515SDavidlohr Bueso 29076916515SDavidlohr Bueso /* 29176916515SDavidlohr Bueso * Not quite done after calling ww_acquire_done() ? 29276916515SDavidlohr Bueso */ 29376916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 29476916515SDavidlohr Bueso 29576916515SDavidlohr Bueso if (ww_ctx->contending_lock) { 29676916515SDavidlohr Bueso /* 29776916515SDavidlohr Bueso * After -EDEADLK you tried to 29876916515SDavidlohr Bueso * acquire a different ww_mutex? Bad! 29976916515SDavidlohr Bueso */ 30076916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 30176916515SDavidlohr Bueso 30276916515SDavidlohr Bueso /* 30376916515SDavidlohr Bueso * You called ww_mutex_lock after receiving -EDEADLK, 30476916515SDavidlohr Bueso * but 'forgot' to unlock everything else first? 30576916515SDavidlohr Bueso */ 30676916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 30776916515SDavidlohr Bueso ww_ctx->contending_lock = NULL; 30876916515SDavidlohr Bueso } 30976916515SDavidlohr Bueso 31076916515SDavidlohr Bueso /* 31176916515SDavidlohr Bueso * Naughty, using a different class will lead to undefined behavior! 31276916515SDavidlohr Bueso */ 31376916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 31476916515SDavidlohr Bueso #endif 31576916515SDavidlohr Bueso ww_ctx->acquired++; 31655f036caSPeter Ziljstra ww->ctx = ww_ctx; 3173822da3eSNicolai Hähnle } 3183822da3eSNicolai Hähnle 31976916515SDavidlohr Bueso /* 32055f036caSPeter Ziljstra * Determine if context @a is 'after' context @b. IOW, @a is a younger 32155f036caSPeter Ziljstra * transaction than @b and depending on algorithm either needs to wait for 32255f036caSPeter Ziljstra * @b or die. 32355f036caSPeter Ziljstra */ 32455f036caSPeter Ziljstra static inline bool __sched 32555f036caSPeter Ziljstra __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 32655f036caSPeter Ziljstra { 32755f036caSPeter Ziljstra 32855f036caSPeter Ziljstra return (signed long)(a->stamp - b->stamp) > 0; 32955f036caSPeter Ziljstra } 33055f036caSPeter Ziljstra 33155f036caSPeter Ziljstra /* 33255f036caSPeter Ziljstra * Wait-Die; wake a younger waiter context (when locks held) such that it can 33355f036caSPeter Ziljstra * die. 334659cf9f5SNicolai Hähnle * 33555f036caSPeter Ziljstra * Among waiters with context, only the first one can have other locks acquired 33655f036caSPeter Ziljstra * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and 33755f036caSPeter Ziljstra * __ww_mutex_check_kill() wake any but the earliest context. 33855f036caSPeter Ziljstra */ 33955f036caSPeter Ziljstra static bool __sched 34055f036caSPeter Ziljstra __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, 34155f036caSPeter Ziljstra struct ww_acquire_ctx *ww_ctx) 34255f036caSPeter Ziljstra { 34308295b3bSThomas Hellstrom if (!ww_ctx->is_wait_die) 34408295b3bSThomas Hellstrom return false; 34508295b3bSThomas Hellstrom 34655f036caSPeter Ziljstra if (waiter->ww_ctx->acquired > 0 && 34755f036caSPeter Ziljstra __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { 34855f036caSPeter Ziljstra debug_mutex_wake_waiter(lock, waiter); 34955f036caSPeter Ziljstra wake_up_process(waiter->task); 35055f036caSPeter Ziljstra } 35155f036caSPeter Ziljstra 35255f036caSPeter Ziljstra return true; 35355f036caSPeter Ziljstra } 35455f036caSPeter Ziljstra 35555f036caSPeter Ziljstra /* 35608295b3bSThomas Hellstrom * Wound-Wait; wound a younger @hold_ctx if it holds the lock. 35708295b3bSThomas Hellstrom * 35808295b3bSThomas Hellstrom * Wound the lock holder if there are waiters with older transactions than 35908295b3bSThomas Hellstrom * the lock holders. Even if multiple waiters may wound the lock holder, 36008295b3bSThomas Hellstrom * it's sufficient that only one does. 36108295b3bSThomas Hellstrom */ 36208295b3bSThomas Hellstrom static bool __ww_mutex_wound(struct mutex *lock, 36308295b3bSThomas Hellstrom struct ww_acquire_ctx *ww_ctx, 36408295b3bSThomas Hellstrom struct ww_acquire_ctx *hold_ctx) 36508295b3bSThomas Hellstrom { 36608295b3bSThomas Hellstrom struct task_struct *owner = __mutex_owner(lock); 36708295b3bSThomas Hellstrom 36808295b3bSThomas Hellstrom lockdep_assert_held(&lock->wait_lock); 36908295b3bSThomas Hellstrom 37008295b3bSThomas Hellstrom /* 37108295b3bSThomas Hellstrom * Possible through __ww_mutex_add_waiter() when we race with 37208295b3bSThomas Hellstrom * ww_mutex_set_context_fastpath(). In that case we'll get here again 37308295b3bSThomas Hellstrom * through __ww_mutex_check_waiters(). 37408295b3bSThomas Hellstrom */ 37508295b3bSThomas Hellstrom if (!hold_ctx) 37608295b3bSThomas Hellstrom return false; 37708295b3bSThomas Hellstrom 37808295b3bSThomas Hellstrom /* 37908295b3bSThomas Hellstrom * Can have !owner because of __mutex_unlock_slowpath(), but if owner, 38008295b3bSThomas Hellstrom * it cannot go away because we'll have FLAG_WAITERS set and hold 38108295b3bSThomas Hellstrom * wait_lock. 38208295b3bSThomas Hellstrom */ 38308295b3bSThomas Hellstrom if (!owner) 38408295b3bSThomas Hellstrom return false; 38508295b3bSThomas Hellstrom 38608295b3bSThomas Hellstrom if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) { 38708295b3bSThomas Hellstrom hold_ctx->wounded = 1; 38808295b3bSThomas Hellstrom 38908295b3bSThomas Hellstrom /* 39008295b3bSThomas Hellstrom * wake_up_process() paired with set_current_state() 39108295b3bSThomas Hellstrom * inserts sufficient barriers to make sure @owner either sees 392e13e2366SThomas Hellstrom * it's wounded in __ww_mutex_check_kill() or has a 39308295b3bSThomas Hellstrom * wakeup pending to re-read the wounded state. 39408295b3bSThomas Hellstrom */ 39508295b3bSThomas Hellstrom if (owner != current) 39608295b3bSThomas Hellstrom wake_up_process(owner); 39708295b3bSThomas Hellstrom 39808295b3bSThomas Hellstrom return true; 39908295b3bSThomas Hellstrom } 40008295b3bSThomas Hellstrom 40108295b3bSThomas Hellstrom return false; 40208295b3bSThomas Hellstrom } 40308295b3bSThomas Hellstrom 40408295b3bSThomas Hellstrom /* 40555f036caSPeter Ziljstra * We just acquired @lock under @ww_ctx, if there are later contexts waiting 40608295b3bSThomas Hellstrom * behind us on the wait-list, check if they need to die, or wound us. 40755f036caSPeter Ziljstra * 40855f036caSPeter Ziljstra * See __ww_mutex_add_waiter() for the list-order construction; basically the 40955f036caSPeter Ziljstra * list is ordered by stamp, smallest (oldest) first. 410659cf9f5SNicolai Hähnle * 41108295b3bSThomas Hellstrom * This relies on never mixing wait-die/wound-wait on the same wait-list; 41208295b3bSThomas Hellstrom * which is currently ensured by that being a ww_class property. 41308295b3bSThomas Hellstrom * 414659cf9f5SNicolai Hähnle * The current task must not be on the wait list. 415659cf9f5SNicolai Hähnle */ 416659cf9f5SNicolai Hähnle static void __sched 41755f036caSPeter Ziljstra __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 418659cf9f5SNicolai Hähnle { 419659cf9f5SNicolai Hähnle struct mutex_waiter *cur; 420659cf9f5SNicolai Hähnle 421659cf9f5SNicolai Hähnle lockdep_assert_held(&lock->wait_lock); 422659cf9f5SNicolai Hähnle 423659cf9f5SNicolai Hähnle list_for_each_entry(cur, &lock->wait_list, list) { 424659cf9f5SNicolai Hähnle if (!cur->ww_ctx) 425659cf9f5SNicolai Hähnle continue; 426659cf9f5SNicolai Hähnle 42708295b3bSThomas Hellstrom if (__ww_mutex_die(lock, cur, ww_ctx) || 42808295b3bSThomas Hellstrom __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) 429659cf9f5SNicolai Hähnle break; 430659cf9f5SNicolai Hähnle } 431659cf9f5SNicolai Hähnle } 432659cf9f5SNicolai Hähnle 43376916515SDavidlohr Bueso /* 43455f036caSPeter Ziljstra * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 43555f036caSPeter Ziljstra * and wake up any waiters so they can recheck. 43676916515SDavidlohr Bueso */ 43776916515SDavidlohr Bueso static __always_inline void 438427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 43976916515SDavidlohr Bueso { 44076916515SDavidlohr Bueso ww_mutex_lock_acquired(lock, ctx); 44176916515SDavidlohr Bueso 44276916515SDavidlohr Bueso /* 44376916515SDavidlohr Bueso * The lock->ctx update should be visible on all cores before 44455f036caSPeter Ziljstra * the WAITERS check is done, otherwise contended waiters might be 44576916515SDavidlohr Bueso * missed. The contended waiters will either see ww_ctx == NULL 44676916515SDavidlohr Bueso * and keep spinning, or it will acquire wait_lock, add itself 44776916515SDavidlohr Bueso * to waiter list and sleep. 44876916515SDavidlohr Bueso */ 44908295b3bSThomas Hellstrom smp_mb(); /* See comments above and below. */ 45076916515SDavidlohr Bueso 45176916515SDavidlohr Bueso /* 45208295b3bSThomas Hellstrom * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS 45308295b3bSThomas Hellstrom * MB MB 45408295b3bSThomas Hellstrom * [R] MUTEX_FLAG_WAITERS [R] ww->ctx 45508295b3bSThomas Hellstrom * 45608295b3bSThomas Hellstrom * The memory barrier above pairs with the memory barrier in 45708295b3bSThomas Hellstrom * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx 45808295b3bSThomas Hellstrom * and/or !empty list. 45976916515SDavidlohr Bueso */ 4603ca0ff57SPeter Zijlstra if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 46176916515SDavidlohr Bueso return; 46276916515SDavidlohr Bueso 46376916515SDavidlohr Bueso /* 46455f036caSPeter Ziljstra * Uh oh, we raced in fastpath, check if any of the waiters need to 46508295b3bSThomas Hellstrom * die or wound us. 46676916515SDavidlohr Bueso */ 467b9c16a0eSPeter Zijlstra spin_lock(&lock->base.wait_lock); 46855f036caSPeter Ziljstra __ww_mutex_check_waiters(&lock->base, ctx); 469b9c16a0eSPeter Zijlstra spin_unlock(&lock->base.wait_lock); 47076916515SDavidlohr Bueso } 47176916515SDavidlohr Bueso 47201768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 473c516df97SNicolai Hähnle 474c516df97SNicolai Hähnle static inline 475c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 476c516df97SNicolai Hähnle struct mutex_waiter *waiter) 477c516df97SNicolai Hähnle { 478c516df97SNicolai Hähnle struct ww_mutex *ww; 479c516df97SNicolai Hähnle 480c516df97SNicolai Hähnle ww = container_of(lock, struct ww_mutex, base); 481c516df97SNicolai Hähnle 48201768b42SPeter Zijlstra /* 483c516df97SNicolai Hähnle * If ww->ctx is set the contents are undefined, only 484c516df97SNicolai Hähnle * by acquiring wait_lock there is a guarantee that 485c516df97SNicolai Hähnle * they are not invalid when reading. 486c516df97SNicolai Hähnle * 487c516df97SNicolai Hähnle * As such, when deadlock detection needs to be 488c516df97SNicolai Hähnle * performed the optimistic spinning cannot be done. 489c516df97SNicolai Hähnle * 490c516df97SNicolai Hähnle * Check this in every inner iteration because we may 491c516df97SNicolai Hähnle * be racing against another thread's ww_mutex_lock. 492c516df97SNicolai Hähnle */ 493c516df97SNicolai Hähnle if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 494c516df97SNicolai Hähnle return false; 495c516df97SNicolai Hähnle 496c516df97SNicolai Hähnle /* 497c516df97SNicolai Hähnle * If we aren't on the wait list yet, cancel the spin 498c516df97SNicolai Hähnle * if there are waiters. We want to avoid stealing the 499c516df97SNicolai Hähnle * lock from a waiter with an earlier stamp, since the 500c516df97SNicolai Hähnle * other thread may already own a lock that we also 501c516df97SNicolai Hähnle * need. 502c516df97SNicolai Hähnle */ 503c516df97SNicolai Hähnle if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 504c516df97SNicolai Hähnle return false; 505c516df97SNicolai Hähnle 506c516df97SNicolai Hähnle /* 507c516df97SNicolai Hähnle * Similarly, stop spinning if we are no longer the 508c516df97SNicolai Hähnle * first waiter. 509c516df97SNicolai Hähnle */ 510c516df97SNicolai Hähnle if (waiter && !__mutex_waiter_is_first(lock, waiter)) 511c516df97SNicolai Hähnle return false; 512c516df97SNicolai Hähnle 513c516df97SNicolai Hähnle return true; 514c516df97SNicolai Hähnle } 515c516df97SNicolai Hähnle 51601768b42SPeter Zijlstra /* 51725f13b40SNicolai Hähnle * Look out! "owner" is an entirely speculative pointer access and not 51825f13b40SNicolai Hähnle * reliable. 51925f13b40SNicolai Hähnle * 52025f13b40SNicolai Hähnle * "noinline" so that this function shows up on perf profiles. 52101768b42SPeter Zijlstra */ 52201768b42SPeter Zijlstra static noinline 52325f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 524c516df97SNicolai Hähnle struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 52501768b42SPeter Zijlstra { 52601ac33c1SJason Low bool ret = true; 527be1f7bf2SJason Low 52801768b42SPeter Zijlstra rcu_read_lock(); 5293ca0ff57SPeter Zijlstra while (__mutex_owner(lock) == owner) { 530be1f7bf2SJason Low /* 531be1f7bf2SJason Low * Ensure we emit the owner->on_cpu, dereference _after_ 53201ac33c1SJason Low * checking lock->owner still matches owner. If that fails, 53301ac33c1SJason Low * owner might point to freed memory. If it still matches, 534be1f7bf2SJason Low * the rcu_read_lock() ensures the memory stays valid. 535be1f7bf2SJason Low */ 536be1f7bf2SJason Low barrier(); 537be1f7bf2SJason Low 53805ffc951SPan Xinhui /* 53905ffc951SPan Xinhui * Use vcpu_is_preempted to detect lock holder preemption issue. 54005ffc951SPan Xinhui */ 54105ffc951SPan Xinhui if (!owner->on_cpu || need_resched() || 54205ffc951SPan Xinhui vcpu_is_preempted(task_cpu(owner))) { 543be1f7bf2SJason Low ret = false; 544be1f7bf2SJason Low break; 545be1f7bf2SJason Low } 54601768b42SPeter Zijlstra 547c516df97SNicolai Hähnle if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 54825f13b40SNicolai Hähnle ret = false; 54925f13b40SNicolai Hähnle break; 55025f13b40SNicolai Hähnle } 55125f13b40SNicolai Hähnle 552f2f09a4cSChristian Borntraeger cpu_relax(); 55301768b42SPeter Zijlstra } 55401768b42SPeter Zijlstra rcu_read_unlock(); 55501768b42SPeter Zijlstra 556be1f7bf2SJason Low return ret; 55701768b42SPeter Zijlstra } 55801768b42SPeter Zijlstra 55901768b42SPeter Zijlstra /* 56001768b42SPeter Zijlstra * Initial check for entering the mutex spinning loop 56101768b42SPeter Zijlstra */ 56201768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock) 56301768b42SPeter Zijlstra { 56401768b42SPeter Zijlstra struct task_struct *owner; 56501768b42SPeter Zijlstra int retval = 1; 56601768b42SPeter Zijlstra 56746af29e4SJason Low if (need_resched()) 56846af29e4SJason Low return 0; 56946af29e4SJason Low 57001768b42SPeter Zijlstra rcu_read_lock(); 5713ca0ff57SPeter Zijlstra owner = __mutex_owner(lock); 57205ffc951SPan Xinhui 57305ffc951SPan Xinhui /* 57405ffc951SPan Xinhui * As lock holder preemption issue, we both skip spinning if task is not 57505ffc951SPan Xinhui * on cpu or its cpu is preempted 57605ffc951SPan Xinhui */ 57701768b42SPeter Zijlstra if (owner) 57805ffc951SPan Xinhui retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 57901768b42SPeter Zijlstra rcu_read_unlock(); 58076916515SDavidlohr Bueso 58176916515SDavidlohr Bueso /* 5823ca0ff57SPeter Zijlstra * If lock->owner is not set, the mutex has been released. Return true 5833ca0ff57SPeter Zijlstra * such that we'll trylock in the spin path, which is a faster option 5843ca0ff57SPeter Zijlstra * than the blocking slow path. 58576916515SDavidlohr Bueso */ 5863ca0ff57SPeter Zijlstra return retval; 58776916515SDavidlohr Bueso } 58876916515SDavidlohr Bueso 58976916515SDavidlohr Bueso /* 59076916515SDavidlohr Bueso * Optimistic spinning. 59176916515SDavidlohr Bueso * 59276916515SDavidlohr Bueso * We try to spin for acquisition when we find that the lock owner 59376916515SDavidlohr Bueso * is currently running on a (different) CPU and while we don't 59476916515SDavidlohr Bueso * need to reschedule. The rationale is that if the lock owner is 59576916515SDavidlohr Bueso * running, it is likely to release the lock soon. 59676916515SDavidlohr Bueso * 59776916515SDavidlohr Bueso * The mutex spinners are queued up using MCS lock so that only one 59876916515SDavidlohr Bueso * spinner can compete for the mutex. However, if mutex spinning isn't 59976916515SDavidlohr Bueso * going to happen, there is no point in going through the lock/unlock 60076916515SDavidlohr Bueso * overhead. 60176916515SDavidlohr Bueso * 60276916515SDavidlohr Bueso * Returns true when the lock was taken, otherwise false, indicating 60376916515SDavidlohr Bueso * that we need to jump to the slowpath and sleep. 604b341afb3SWaiman Long * 605b341afb3SWaiman Long * The waiter flag is set to true if the spinner is a waiter in the wait 606b341afb3SWaiman Long * queue. The waiter-spinner will spin on the lock directly and concurrently 607b341afb3SWaiman Long * with the spinner at the head of the OSQ, if present, until the owner is 608b341afb3SWaiman Long * changed to itself. 60976916515SDavidlohr Bueso */ 610427b1820SPeter Zijlstra static __always_inline bool 611427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 612c516df97SNicolai Hähnle const bool use_ww_ctx, struct mutex_waiter *waiter) 61376916515SDavidlohr Bueso { 614b341afb3SWaiman Long if (!waiter) { 615b341afb3SWaiman Long /* 616b341afb3SWaiman Long * The purpose of the mutex_can_spin_on_owner() function is 617b341afb3SWaiman Long * to eliminate the overhead of osq_lock() and osq_unlock() 618b341afb3SWaiman Long * in case spinning isn't possible. As a waiter-spinner 619b341afb3SWaiman Long * is not going to take OSQ lock anyway, there is no need 620b341afb3SWaiman Long * to call mutex_can_spin_on_owner(). 621b341afb3SWaiman Long */ 62276916515SDavidlohr Bueso if (!mutex_can_spin_on_owner(lock)) 623b341afb3SWaiman Long goto fail; 62476916515SDavidlohr Bueso 625e42f678aSDavidlohr Bueso /* 626e42f678aSDavidlohr Bueso * In order to avoid a stampede of mutex spinners trying to 627e42f678aSDavidlohr Bueso * acquire the mutex all at once, the spinners need to take a 628e42f678aSDavidlohr Bueso * MCS (queued) lock first before spinning on the owner field. 629e42f678aSDavidlohr Bueso */ 63076916515SDavidlohr Bueso if (!osq_lock(&lock->osq)) 631b341afb3SWaiman Long goto fail; 632b341afb3SWaiman Long } 63376916515SDavidlohr Bueso 634b341afb3SWaiman Long for (;;) { 63576916515SDavidlohr Bueso struct task_struct *owner; 63676916515SDavidlohr Bueso 637e274795eSPeter Zijlstra /* Try to acquire the mutex... */ 638e274795eSPeter Zijlstra owner = __mutex_trylock_or_owner(lock); 639e274795eSPeter Zijlstra if (!owner) 640e274795eSPeter Zijlstra break; 64176916515SDavidlohr Bueso 64276916515SDavidlohr Bueso /* 643e274795eSPeter Zijlstra * There's an owner, wait for it to either 64476916515SDavidlohr Bueso * release the lock or go to sleep. 64576916515SDavidlohr Bueso */ 646c516df97SNicolai Hähnle if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 647b341afb3SWaiman Long goto fail_unlock; 64876916515SDavidlohr Bueso 64976916515SDavidlohr Bueso /* 65076916515SDavidlohr Bueso * The cpu_relax() call is a compiler barrier which forces 65176916515SDavidlohr Bueso * everything in this loop to be re-loaded. We don't need 65276916515SDavidlohr Bueso * memory barriers as we'll eventually observe the right 65376916515SDavidlohr Bueso * values at the cost of a few extra spins. 65476916515SDavidlohr Bueso */ 655f2f09a4cSChristian Borntraeger cpu_relax(); 65676916515SDavidlohr Bueso } 65776916515SDavidlohr Bueso 658b341afb3SWaiman Long if (!waiter) 65976916515SDavidlohr Bueso osq_unlock(&lock->osq); 660b341afb3SWaiman Long 661b341afb3SWaiman Long return true; 662b341afb3SWaiman Long 663b341afb3SWaiman Long 664b341afb3SWaiman Long fail_unlock: 665b341afb3SWaiman Long if (!waiter) 666b341afb3SWaiman Long osq_unlock(&lock->osq); 667b341afb3SWaiman Long 668b341afb3SWaiman Long fail: 66976916515SDavidlohr Bueso /* 67076916515SDavidlohr Bueso * If we fell out of the spin path because of need_resched(), 67176916515SDavidlohr Bueso * reschedule now, before we try-lock the mutex. This avoids getting 67276916515SDavidlohr Bueso * scheduled out right after we obtained the mutex. 67376916515SDavidlohr Bueso */ 6746f942a1fSPeter Zijlstra if (need_resched()) { 6756f942a1fSPeter Zijlstra /* 6766f942a1fSPeter Zijlstra * We _should_ have TASK_RUNNING here, but just in case 6776f942a1fSPeter Zijlstra * we do not, make it so, otherwise we might get stuck. 6786f942a1fSPeter Zijlstra */ 6796f942a1fSPeter Zijlstra __set_current_state(TASK_RUNNING); 68076916515SDavidlohr Bueso schedule_preempt_disabled(); 6816f942a1fSPeter Zijlstra } 68276916515SDavidlohr Bueso 68376916515SDavidlohr Bueso return false; 68476916515SDavidlohr Bueso } 68576916515SDavidlohr Bueso #else 686427b1820SPeter Zijlstra static __always_inline bool 687427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 688c516df97SNicolai Hähnle const bool use_ww_ctx, struct mutex_waiter *waiter) 68976916515SDavidlohr Bueso { 69076916515SDavidlohr Bueso return false; 69176916515SDavidlohr Bueso } 69201768b42SPeter Zijlstra #endif 69301768b42SPeter Zijlstra 6943ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 69501768b42SPeter Zijlstra 69601768b42SPeter Zijlstra /** 69701768b42SPeter Zijlstra * mutex_unlock - release the mutex 69801768b42SPeter Zijlstra * @lock: the mutex to be released 69901768b42SPeter Zijlstra * 70001768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously. 70101768b42SPeter Zijlstra * 70201768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 70301768b42SPeter Zijlstra * of a not locked mutex is not allowed. 70401768b42SPeter Zijlstra * 70501768b42SPeter Zijlstra * This function is similar to (but not equivalent to) up(). 70601768b42SPeter Zijlstra */ 70701768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock) 70801768b42SPeter Zijlstra { 7093ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 7103ca0ff57SPeter Zijlstra if (__mutex_unlock_fast(lock)) 7113ca0ff57SPeter Zijlstra return; 71201768b42SPeter Zijlstra #endif 7133ca0ff57SPeter Zijlstra __mutex_unlock_slowpath(lock, _RET_IP_); 71401768b42SPeter Zijlstra } 71501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock); 71601768b42SPeter Zijlstra 71701768b42SPeter Zijlstra /** 71801768b42SPeter Zijlstra * ww_mutex_unlock - release the w/w mutex 71901768b42SPeter Zijlstra * @lock: the mutex to be released 72001768b42SPeter Zijlstra * 72101768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously with any of the 72201768b42SPeter Zijlstra * ww_mutex_lock* functions (with or without an acquire context). It is 72301768b42SPeter Zijlstra * forbidden to release the locks after releasing the acquire context. 72401768b42SPeter Zijlstra * 72501768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 72601768b42SPeter Zijlstra * of a unlocked mutex is not allowed. 72701768b42SPeter Zijlstra */ 72801768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock) 72901768b42SPeter Zijlstra { 73001768b42SPeter Zijlstra /* 73101768b42SPeter Zijlstra * The unlocking fastpath is the 0->1 transition from 'locked' 73201768b42SPeter Zijlstra * into 'unlocked' state: 73301768b42SPeter Zijlstra */ 73401768b42SPeter Zijlstra if (lock->ctx) { 73501768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 73601768b42SPeter Zijlstra DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 73701768b42SPeter Zijlstra #endif 73801768b42SPeter Zijlstra if (lock->ctx->acquired > 0) 73901768b42SPeter Zijlstra lock->ctx->acquired--; 74001768b42SPeter Zijlstra lock->ctx = NULL; 74101768b42SPeter Zijlstra } 74201768b42SPeter Zijlstra 7433ca0ff57SPeter Zijlstra mutex_unlock(&lock->base); 74401768b42SPeter Zijlstra } 74501768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock); 74601768b42SPeter Zijlstra 74755f036caSPeter Ziljstra 74855f036caSPeter Ziljstra static __always_inline int __sched 74955f036caSPeter Ziljstra __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 75055f036caSPeter Ziljstra { 75155f036caSPeter Ziljstra if (ww_ctx->acquired > 0) { 75255f036caSPeter Ziljstra #ifdef CONFIG_DEBUG_MUTEXES 75355f036caSPeter Ziljstra struct ww_mutex *ww; 75455f036caSPeter Ziljstra 75555f036caSPeter Ziljstra ww = container_of(lock, struct ww_mutex, base); 75655f036caSPeter Ziljstra DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 75755f036caSPeter Ziljstra ww_ctx->contending_lock = ww; 75855f036caSPeter Ziljstra #endif 75955f036caSPeter Ziljstra return -EDEADLK; 76055f036caSPeter Ziljstra } 76155f036caSPeter Ziljstra 76255f036caSPeter Ziljstra return 0; 76355f036caSPeter Ziljstra } 76455f036caSPeter Ziljstra 76555f036caSPeter Ziljstra 76655f036caSPeter Ziljstra /* 76708295b3bSThomas Hellstrom * Check the wound condition for the current lock acquire. 76808295b3bSThomas Hellstrom * 76908295b3bSThomas Hellstrom * Wound-Wait: If we're wounded, kill ourself. 77055f036caSPeter Ziljstra * 77155f036caSPeter Ziljstra * Wait-Die: If we're trying to acquire a lock already held by an older 77255f036caSPeter Ziljstra * context, kill ourselves. 77355f036caSPeter Ziljstra * 77455f036caSPeter Ziljstra * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to 77555f036caSPeter Ziljstra * look at waiters before us in the wait-list. 77655f036caSPeter Ziljstra */ 77701768b42SPeter Zijlstra static inline int __sched 77855f036caSPeter Ziljstra __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, 779200b1874SNicolai Hähnle struct ww_acquire_ctx *ctx) 78001768b42SPeter Zijlstra { 78101768b42SPeter Zijlstra struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 7824d3199e4SDavidlohr Bueso struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 783200b1874SNicolai Hähnle struct mutex_waiter *cur; 78401768b42SPeter Zijlstra 78555f036caSPeter Ziljstra if (ctx->acquired == 0) 78655f036caSPeter Ziljstra return 0; 78755f036caSPeter Ziljstra 78808295b3bSThomas Hellstrom if (!ctx->is_wait_die) { 78908295b3bSThomas Hellstrom if (ctx->wounded) 79008295b3bSThomas Hellstrom return __ww_mutex_kill(lock, ctx); 79108295b3bSThomas Hellstrom 79208295b3bSThomas Hellstrom return 0; 79308295b3bSThomas Hellstrom } 79408295b3bSThomas Hellstrom 795200b1874SNicolai Hähnle if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 79655f036caSPeter Ziljstra return __ww_mutex_kill(lock, ctx); 797200b1874SNicolai Hähnle 798200b1874SNicolai Hähnle /* 799200b1874SNicolai Hähnle * If there is a waiter in front of us that has a context, then its 80055f036caSPeter Ziljstra * stamp is earlier than ours and we must kill ourself. 801200b1874SNicolai Hähnle */ 802200b1874SNicolai Hähnle cur = waiter; 803200b1874SNicolai Hähnle list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 80455f036caSPeter Ziljstra if (!cur->ww_ctx) 80555f036caSPeter Ziljstra continue; 80655f036caSPeter Ziljstra 80755f036caSPeter Ziljstra return __ww_mutex_kill(lock, ctx); 808200b1874SNicolai Hähnle } 809200b1874SNicolai Hähnle 81001768b42SPeter Zijlstra return 0; 81101768b42SPeter Zijlstra } 81201768b42SPeter Zijlstra 81355f036caSPeter Ziljstra /* 81455f036caSPeter Ziljstra * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest 81555f036caSPeter Ziljstra * first. Such that older contexts are preferred to acquire the lock over 81655f036caSPeter Ziljstra * younger contexts. 81755f036caSPeter Ziljstra * 81855f036caSPeter Ziljstra * Waiters without context are interspersed in FIFO order. 81955f036caSPeter Ziljstra * 82055f036caSPeter Ziljstra * Furthermore, for Wait-Die kill ourself immediately when possible (there are 82108295b3bSThomas Hellstrom * older contexts already waiting) to avoid unnecessary waiting and for 82208295b3bSThomas Hellstrom * Wound-Wait ensure we wound the owning context when it is younger. 82355f036caSPeter Ziljstra */ 8246baa5c60SNicolai Hähnle static inline int __sched 8256baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter, 8266baa5c60SNicolai Hähnle struct mutex *lock, 8276baa5c60SNicolai Hähnle struct ww_acquire_ctx *ww_ctx) 8286baa5c60SNicolai Hähnle { 8296baa5c60SNicolai Hähnle struct mutex_waiter *cur; 8306baa5c60SNicolai Hähnle struct list_head *pos; 83108295b3bSThomas Hellstrom bool is_wait_die; 8326baa5c60SNicolai Hähnle 8336baa5c60SNicolai Hähnle if (!ww_ctx) { 83408295b3bSThomas Hellstrom __mutex_add_waiter(lock, waiter, &lock->wait_list); 8356baa5c60SNicolai Hähnle return 0; 8366baa5c60SNicolai Hähnle } 8376baa5c60SNicolai Hähnle 83808295b3bSThomas Hellstrom is_wait_die = ww_ctx->is_wait_die; 83908295b3bSThomas Hellstrom 8406baa5c60SNicolai Hähnle /* 8416baa5c60SNicolai Hähnle * Add the waiter before the first waiter with a higher stamp. 8426baa5c60SNicolai Hähnle * Waiters without a context are skipped to avoid starving 84308295b3bSThomas Hellstrom * them. Wait-Die waiters may die here. Wound-Wait waiters 84408295b3bSThomas Hellstrom * never die here, but they are sorted in stamp order and 84508295b3bSThomas Hellstrom * may wound the lock holder. 8466baa5c60SNicolai Hähnle */ 8476baa5c60SNicolai Hähnle pos = &lock->wait_list; 8486baa5c60SNicolai Hähnle list_for_each_entry_reverse(cur, &lock->wait_list, list) { 8496baa5c60SNicolai Hähnle if (!cur->ww_ctx) 8506baa5c60SNicolai Hähnle continue; 8516baa5c60SNicolai Hähnle 8526baa5c60SNicolai Hähnle if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 85355f036caSPeter Ziljstra /* 85455f036caSPeter Ziljstra * Wait-Die: if we find an older context waiting, there 85555f036caSPeter Ziljstra * is no point in queueing behind it, as we'd have to 85655f036caSPeter Ziljstra * die the moment it would acquire the lock. 85755f036caSPeter Ziljstra */ 85808295b3bSThomas Hellstrom if (is_wait_die) { 85955f036caSPeter Ziljstra int ret = __ww_mutex_kill(lock, ww_ctx); 8606baa5c60SNicolai Hähnle 86155f036caSPeter Ziljstra if (ret) 86255f036caSPeter Ziljstra return ret; 86308295b3bSThomas Hellstrom } 8646baa5c60SNicolai Hähnle 8656baa5c60SNicolai Hähnle break; 8666baa5c60SNicolai Hähnle } 8676baa5c60SNicolai Hähnle 8686baa5c60SNicolai Hähnle pos = &cur->list; 869200b1874SNicolai Hähnle 87055f036caSPeter Ziljstra /* Wait-Die: ensure younger waiters die. */ 87155f036caSPeter Ziljstra __ww_mutex_die(lock, cur, ww_ctx); 8726baa5c60SNicolai Hähnle } 8736baa5c60SNicolai Hähnle 87408295b3bSThomas Hellstrom __mutex_add_waiter(lock, waiter, pos); 87508295b3bSThomas Hellstrom 87608295b3bSThomas Hellstrom /* 87708295b3bSThomas Hellstrom * Wound-Wait: if we're blocking on a mutex owned by a younger context, 87808295b3bSThomas Hellstrom * wound that such that we might proceed. 87908295b3bSThomas Hellstrom */ 88008295b3bSThomas Hellstrom if (!is_wait_die) { 88108295b3bSThomas Hellstrom struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 88208295b3bSThomas Hellstrom 88308295b3bSThomas Hellstrom /* 88408295b3bSThomas Hellstrom * See ww_mutex_set_context_fastpath(). Orders setting 88508295b3bSThomas Hellstrom * MUTEX_FLAG_WAITERS vs the ww->ctx load, 88608295b3bSThomas Hellstrom * such that either we or the fastpath will wound @ww->ctx. 88708295b3bSThomas Hellstrom */ 88808295b3bSThomas Hellstrom smp_mb(); 88908295b3bSThomas Hellstrom __ww_mutex_wound(lock, ww_ctx, ww->ctx); 89008295b3bSThomas Hellstrom } 89155f036caSPeter Ziljstra 89201768b42SPeter Zijlstra return 0; 89301768b42SPeter Zijlstra } 89401768b42SPeter Zijlstra 89501768b42SPeter Zijlstra /* 89601768b42SPeter Zijlstra * Lock a mutex (possibly interruptible), slowpath: 89701768b42SPeter Zijlstra */ 89801768b42SPeter Zijlstra static __always_inline int __sched 89901768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 90001768b42SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 90101768b42SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 90201768b42SPeter Zijlstra { 90301768b42SPeter Zijlstra struct mutex_waiter waiter; 9049d659ae1SPeter Zijlstra bool first = false; 905a40ca565SWaiman Long struct ww_mutex *ww; 90601768b42SPeter Zijlstra int ret; 90701768b42SPeter Zijlstra 908427b1820SPeter Zijlstra might_sleep(); 909ea9e0fb8SNicolai Hähnle 910a40ca565SWaiman Long ww = container_of(lock, struct ww_mutex, base); 911ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) { 9120422e83dSChris Wilson if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 9130422e83dSChris Wilson return -EALREADY; 91408295b3bSThomas Hellstrom 91508295b3bSThomas Hellstrom /* 91608295b3bSThomas Hellstrom * Reset the wounded flag after a kill. No other process can 91708295b3bSThomas Hellstrom * race and wound us here since they can't have a valid owner 91808295b3bSThomas Hellstrom * pointer if we don't have any locks held. 91908295b3bSThomas Hellstrom */ 92008295b3bSThomas Hellstrom if (ww_ctx->acquired == 0) 92108295b3bSThomas Hellstrom ww_ctx->wounded = 0; 9220422e83dSChris Wilson } 9230422e83dSChris Wilson 92401768b42SPeter Zijlstra preempt_disable(); 92501768b42SPeter Zijlstra mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 92601768b42SPeter Zijlstra 927e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 928c516df97SNicolai Hähnle mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { 92976916515SDavidlohr Bueso /* got the lock, yay! */ 9303ca0ff57SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 931ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) 9323ca0ff57SPeter Zijlstra ww_mutex_set_context_fastpath(ww, ww_ctx); 93301768b42SPeter Zijlstra preempt_enable(); 93401768b42SPeter Zijlstra return 0; 93501768b42SPeter Zijlstra } 93601768b42SPeter Zijlstra 937b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 9381e820c96SJason Low /* 9393ca0ff57SPeter Zijlstra * After waiting to acquire the wait_lock, try again. 9401e820c96SJason Low */ 941659cf9f5SNicolai Hähnle if (__mutex_trylock(lock)) { 942659cf9f5SNicolai Hähnle if (use_ww_ctx && ww_ctx) 94355f036caSPeter Ziljstra __ww_mutex_check_waiters(lock, ww_ctx); 944659cf9f5SNicolai Hähnle 94501768b42SPeter Zijlstra goto skip_wait; 946659cf9f5SNicolai Hähnle } 94701768b42SPeter Zijlstra 94801768b42SPeter Zijlstra debug_mutex_lock_common(lock, &waiter); 94901768b42SPeter Zijlstra 9506baa5c60SNicolai Hähnle lock_contended(&lock->dep_map, ip); 9516baa5c60SNicolai Hähnle 9526baa5c60SNicolai Hähnle if (!use_ww_ctx) { 95301768b42SPeter Zijlstra /* add waiting tasks to the end of the waitqueue (FIFO): */ 95408295b3bSThomas Hellstrom __mutex_add_waiter(lock, &waiter, &lock->wait_list); 95508295b3bSThomas Hellstrom 956977625a6SNicolai Hähnle 957977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES 958977625a6SNicolai Hähnle waiter.ww_ctx = MUTEX_POISON_WW_CTX; 959977625a6SNicolai Hähnle #endif 9606baa5c60SNicolai Hähnle } else { 96155f036caSPeter Ziljstra /* 96255f036caSPeter Ziljstra * Add in stamp order, waking up waiters that must kill 96355f036caSPeter Ziljstra * themselves. 96455f036caSPeter Ziljstra */ 9656baa5c60SNicolai Hähnle ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 9666baa5c60SNicolai Hähnle if (ret) 96755f036caSPeter Ziljstra goto err_early_kill; 9686baa5c60SNicolai Hähnle 9696baa5c60SNicolai Hähnle waiter.ww_ctx = ww_ctx; 9706baa5c60SNicolai Hähnle } 9716baa5c60SNicolai Hähnle 972d269a8b8SDavidlohr Bueso waiter.task = current; 97301768b42SPeter Zijlstra 974642fa448SDavidlohr Bueso set_current_state(state); 97501768b42SPeter Zijlstra for (;;) { 9765bbd7e64SPeter Zijlstra /* 9775bbd7e64SPeter Zijlstra * Once we hold wait_lock, we're serialized against 9785bbd7e64SPeter Zijlstra * mutex_unlock() handing the lock off to us, do a trylock 9795bbd7e64SPeter Zijlstra * before testing the error conditions to make sure we pick up 9805bbd7e64SPeter Zijlstra * the handoff. 9815bbd7e64SPeter Zijlstra */ 982e274795eSPeter Zijlstra if (__mutex_trylock(lock)) 9835bbd7e64SPeter Zijlstra goto acquired; 98401768b42SPeter Zijlstra 98501768b42SPeter Zijlstra /* 98655f036caSPeter Ziljstra * Check for signals and kill conditions while holding 9875bbd7e64SPeter Zijlstra * wait_lock. This ensures the lock cancellation is ordered 9885bbd7e64SPeter Zijlstra * against mutex_unlock() and wake-ups do not go missing. 98901768b42SPeter Zijlstra */ 990*3bb5f4acSDavidlohr Bueso if (signal_pending_state(state, current)) { 99101768b42SPeter Zijlstra ret = -EINTR; 99201768b42SPeter Zijlstra goto err; 99301768b42SPeter Zijlstra } 99401768b42SPeter Zijlstra 99555f036caSPeter Ziljstra if (use_ww_ctx && ww_ctx) { 99655f036caSPeter Ziljstra ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 99701768b42SPeter Zijlstra if (ret) 99801768b42SPeter Zijlstra goto err; 99901768b42SPeter Zijlstra } 100001768b42SPeter Zijlstra 1001b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 100201768b42SPeter Zijlstra schedule_preempt_disabled(); 10039d659ae1SPeter Zijlstra 10046baa5c60SNicolai Hähnle /* 10056baa5c60SNicolai Hähnle * ww_mutex needs to always recheck its position since its waiter 10066baa5c60SNicolai Hähnle * list is not FIFO ordered. 10076baa5c60SNicolai Hähnle */ 10086baa5c60SNicolai Hähnle if ((use_ww_ctx && ww_ctx) || !first) { 10096baa5c60SNicolai Hähnle first = __mutex_waiter_is_first(lock, &waiter); 10106baa5c60SNicolai Hähnle if (first) 10119d659ae1SPeter Zijlstra __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 10129d659ae1SPeter Zijlstra } 10135bbd7e64SPeter Zijlstra 1014642fa448SDavidlohr Bueso set_current_state(state); 10155bbd7e64SPeter Zijlstra /* 10165bbd7e64SPeter Zijlstra * Here we order against unlock; we must either see it change 10175bbd7e64SPeter Zijlstra * state back to RUNNING and fall through the next schedule(), 10185bbd7e64SPeter Zijlstra * or we must see its unlock and acquire. 10195bbd7e64SPeter Zijlstra */ 1020e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 1021c516df97SNicolai Hähnle (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) 10225bbd7e64SPeter Zijlstra break; 10235bbd7e64SPeter Zijlstra 1024b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 102501768b42SPeter Zijlstra } 1026b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 10275bbd7e64SPeter Zijlstra acquired: 1028642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 102951587bcfSDavidlohr Bueso 103008295b3bSThomas Hellstrom if (use_ww_ctx && ww_ctx) { 103108295b3bSThomas Hellstrom /* 103208295b3bSThomas Hellstrom * Wound-Wait; we stole the lock (!first_waiter), check the 103308295b3bSThomas Hellstrom * waiters as anyone might want to wound us. 103408295b3bSThomas Hellstrom */ 103508295b3bSThomas Hellstrom if (!ww_ctx->is_wait_die && 103608295b3bSThomas Hellstrom !__mutex_waiter_is_first(lock, &waiter)) 103708295b3bSThomas Hellstrom __ww_mutex_check_waiters(lock, ww_ctx); 103808295b3bSThomas Hellstrom } 103908295b3bSThomas Hellstrom 1040d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 104101768b42SPeter Zijlstra if (likely(list_empty(&lock->wait_list))) 10429d659ae1SPeter Zijlstra __mutex_clear_flag(lock, MUTEX_FLAGS); 10433ca0ff57SPeter Zijlstra 104401768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 104501768b42SPeter Zijlstra 104601768b42SPeter Zijlstra skip_wait: 104701768b42SPeter Zijlstra /* got the lock - cleanup and rejoice! */ 104801768b42SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 104901768b42SPeter Zijlstra 1050ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) 105155f036caSPeter Ziljstra ww_mutex_lock_acquired(ww, ww_ctx); 105201768b42SPeter Zijlstra 1053b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 105401768b42SPeter Zijlstra preempt_enable(); 105501768b42SPeter Zijlstra return 0; 105601768b42SPeter Zijlstra 105701768b42SPeter Zijlstra err: 1058642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 1059d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 106055f036caSPeter Ziljstra err_early_kill: 1061b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 106201768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 106301768b42SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 106401768b42SPeter Zijlstra preempt_enable(); 106501768b42SPeter Zijlstra return ret; 106601768b42SPeter Zijlstra } 106701768b42SPeter Zijlstra 1068427b1820SPeter Zijlstra static int __sched 1069427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1070427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip) 1071427b1820SPeter Zijlstra { 1072427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 1073427b1820SPeter Zijlstra } 1074427b1820SPeter Zijlstra 1075427b1820SPeter Zijlstra static int __sched 1076427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1077427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 1078427b1820SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 1079427b1820SPeter Zijlstra { 1080427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); 1081427b1820SPeter Zijlstra } 1082427b1820SPeter Zijlstra 108301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC 108401768b42SPeter Zijlstra void __sched 108501768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass) 108601768b42SPeter Zijlstra { 1087427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 108801768b42SPeter Zijlstra } 108901768b42SPeter Zijlstra 109001768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested); 109101768b42SPeter Zijlstra 109201768b42SPeter Zijlstra void __sched 109301768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 109401768b42SPeter Zijlstra { 1095427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 109601768b42SPeter Zijlstra } 109701768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 109801768b42SPeter Zijlstra 109901768b42SPeter Zijlstra int __sched 110001768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 110101768b42SPeter Zijlstra { 1102427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 110301768b42SPeter Zijlstra } 110401768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 110501768b42SPeter Zijlstra 110601768b42SPeter Zijlstra int __sched 110701768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 110801768b42SPeter Zijlstra { 1109427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 111001768b42SPeter Zijlstra } 111101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 111201768b42SPeter Zijlstra 11131460cb65STejun Heo void __sched 11141460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 11151460cb65STejun Heo { 11161460cb65STejun Heo int token; 11171460cb65STejun Heo 11181460cb65STejun Heo might_sleep(); 11191460cb65STejun Heo 11201460cb65STejun Heo token = io_schedule_prepare(); 11211460cb65STejun Heo __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 11221460cb65STejun Heo subclass, NULL, _RET_IP_, NULL, 0); 11231460cb65STejun Heo io_schedule_finish(token); 11241460cb65STejun Heo } 11251460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 11261460cb65STejun Heo 112701768b42SPeter Zijlstra static inline int 112801768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 112901768b42SPeter Zijlstra { 113001768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 113101768b42SPeter Zijlstra unsigned tmp; 113201768b42SPeter Zijlstra 113301768b42SPeter Zijlstra if (ctx->deadlock_inject_countdown-- == 0) { 113401768b42SPeter Zijlstra tmp = ctx->deadlock_inject_interval; 113501768b42SPeter Zijlstra if (tmp > UINT_MAX/4) 113601768b42SPeter Zijlstra tmp = UINT_MAX; 113701768b42SPeter Zijlstra else 113801768b42SPeter Zijlstra tmp = tmp*2 + tmp + tmp/2; 113901768b42SPeter Zijlstra 114001768b42SPeter Zijlstra ctx->deadlock_inject_interval = tmp; 114101768b42SPeter Zijlstra ctx->deadlock_inject_countdown = tmp; 114201768b42SPeter Zijlstra ctx->contending_lock = lock; 114301768b42SPeter Zijlstra 114401768b42SPeter Zijlstra ww_mutex_unlock(lock); 114501768b42SPeter Zijlstra 114601768b42SPeter Zijlstra return -EDEADLK; 114701768b42SPeter Zijlstra } 114801768b42SPeter Zijlstra #endif 114901768b42SPeter Zijlstra 115001768b42SPeter Zijlstra return 0; 115101768b42SPeter Zijlstra } 115201768b42SPeter Zijlstra 115301768b42SPeter Zijlstra int __sched 1154c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 115501768b42SPeter Zijlstra { 115601768b42SPeter Zijlstra int ret; 115701768b42SPeter Zijlstra 115801768b42SPeter Zijlstra might_sleep(); 1159427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 1160ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1161427b1820SPeter Zijlstra ctx); 1162ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 116301768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 116401768b42SPeter Zijlstra 116501768b42SPeter Zijlstra return ret; 116601768b42SPeter Zijlstra } 1167c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock); 116801768b42SPeter Zijlstra 116901768b42SPeter Zijlstra int __sched 1170c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 117101768b42SPeter Zijlstra { 117201768b42SPeter Zijlstra int ret; 117301768b42SPeter Zijlstra 117401768b42SPeter Zijlstra might_sleep(); 1175427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 1176ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1177427b1820SPeter Zijlstra ctx); 117801768b42SPeter Zijlstra 1179ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 118001768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 118101768b42SPeter Zijlstra 118201768b42SPeter Zijlstra return ret; 118301768b42SPeter Zijlstra } 1184c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 118501768b42SPeter Zijlstra 118601768b42SPeter Zijlstra #endif 118701768b42SPeter Zijlstra 118801768b42SPeter Zijlstra /* 118901768b42SPeter Zijlstra * Release the lock, slowpath: 119001768b42SPeter Zijlstra */ 11913ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 119201768b42SPeter Zijlstra { 11939d659ae1SPeter Zijlstra struct task_struct *next = NULL; 1194194a6b5bSWaiman Long DEFINE_WAKE_Q(wake_q); 1195b9c16a0eSPeter Zijlstra unsigned long owner; 119601768b42SPeter Zijlstra 11973ca0ff57SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 11983ca0ff57SPeter Zijlstra 119901768b42SPeter Zijlstra /* 12009d659ae1SPeter Zijlstra * Release the lock before (potentially) taking the spinlock such that 12019d659ae1SPeter Zijlstra * other contenders can get on with things ASAP. 12029d659ae1SPeter Zijlstra * 12039d659ae1SPeter Zijlstra * Except when HANDOFF, in that case we must not clear the owner field, 12049d659ae1SPeter Zijlstra * but instead set it to the top waiter. 120501768b42SPeter Zijlstra */ 12069d659ae1SPeter Zijlstra owner = atomic_long_read(&lock->owner); 12079d659ae1SPeter Zijlstra for (;;) { 12089d659ae1SPeter Zijlstra unsigned long old; 12099d659ae1SPeter Zijlstra 12109d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 12119d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1212e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 12139d659ae1SPeter Zijlstra #endif 12149d659ae1SPeter Zijlstra 12159d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 12169d659ae1SPeter Zijlstra break; 12179d659ae1SPeter Zijlstra 12189d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, 12199d659ae1SPeter Zijlstra __owner_flags(owner)); 12209d659ae1SPeter Zijlstra if (old == owner) { 12219d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_WAITERS) 12229d659ae1SPeter Zijlstra break; 12239d659ae1SPeter Zijlstra 12243ca0ff57SPeter Zijlstra return; 12259d659ae1SPeter Zijlstra } 12269d659ae1SPeter Zijlstra 12279d659ae1SPeter Zijlstra owner = old; 12289d659ae1SPeter Zijlstra } 122901768b42SPeter Zijlstra 1230b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 12311d8fe7dcSJason Low debug_mutex_unlock(lock); 123201768b42SPeter Zijlstra if (!list_empty(&lock->wait_list)) { 123301768b42SPeter Zijlstra /* get the first entry from the wait-list: */ 123401768b42SPeter Zijlstra struct mutex_waiter *waiter = 12359d659ae1SPeter Zijlstra list_first_entry(&lock->wait_list, 123601768b42SPeter Zijlstra struct mutex_waiter, list); 123701768b42SPeter Zijlstra 12389d659ae1SPeter Zijlstra next = waiter->task; 12399d659ae1SPeter Zijlstra 124001768b42SPeter Zijlstra debug_mutex_wake_waiter(lock, waiter); 12419d659ae1SPeter Zijlstra wake_q_add(&wake_q, next); 124201768b42SPeter Zijlstra } 124301768b42SPeter Zijlstra 12449d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 12459d659ae1SPeter Zijlstra __mutex_handoff(lock, next); 12469d659ae1SPeter Zijlstra 1247b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 12489d659ae1SPeter Zijlstra 12491329ce6fSDavidlohr Bueso wake_up_q(&wake_q); 125001768b42SPeter Zijlstra } 125101768b42SPeter Zijlstra 125201768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 125301768b42SPeter Zijlstra /* 125401768b42SPeter Zijlstra * Here come the less common (and hence less performance-critical) APIs: 125501768b42SPeter Zijlstra * mutex_lock_interruptible() and mutex_trylock(). 125601768b42SPeter Zijlstra */ 125701768b42SPeter Zijlstra static noinline int __sched 125801768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock); 125901768b42SPeter Zijlstra 126001768b42SPeter Zijlstra static noinline int __sched 126101768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock); 126201768b42SPeter Zijlstra 126301768b42SPeter Zijlstra /** 126445dbac0eSMatthew Wilcox * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 126545dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 126601768b42SPeter Zijlstra * 126745dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). If a signal is delivered while the 126845dbac0eSMatthew Wilcox * process is sleeping, this function will return without acquiring the 126945dbac0eSMatthew Wilcox * mutex. 127001768b42SPeter Zijlstra * 127145dbac0eSMatthew Wilcox * Context: Process context. 127245dbac0eSMatthew Wilcox * Return: 0 if the lock was successfully acquired or %-EINTR if a 127345dbac0eSMatthew Wilcox * signal arrived. 127401768b42SPeter Zijlstra */ 127501768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock) 127601768b42SPeter Zijlstra { 127701768b42SPeter Zijlstra might_sleep(); 12783ca0ff57SPeter Zijlstra 12793ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 128001768b42SPeter Zijlstra return 0; 12813ca0ff57SPeter Zijlstra 128201768b42SPeter Zijlstra return __mutex_lock_interruptible_slowpath(lock); 128301768b42SPeter Zijlstra } 128401768b42SPeter Zijlstra 128501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible); 128601768b42SPeter Zijlstra 128745dbac0eSMatthew Wilcox /** 128845dbac0eSMatthew Wilcox * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 128945dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 129045dbac0eSMatthew Wilcox * 129145dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). If a signal which will be fatal to 129245dbac0eSMatthew Wilcox * the current process is delivered while the process is sleeping, this 129345dbac0eSMatthew Wilcox * function will return without acquiring the mutex. 129445dbac0eSMatthew Wilcox * 129545dbac0eSMatthew Wilcox * Context: Process context. 129645dbac0eSMatthew Wilcox * Return: 0 if the lock was successfully acquired or %-EINTR if a 129745dbac0eSMatthew Wilcox * fatal signal arrived. 129845dbac0eSMatthew Wilcox */ 129901768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock) 130001768b42SPeter Zijlstra { 130101768b42SPeter Zijlstra might_sleep(); 13023ca0ff57SPeter Zijlstra 13033ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 130401768b42SPeter Zijlstra return 0; 13053ca0ff57SPeter Zijlstra 130601768b42SPeter Zijlstra return __mutex_lock_killable_slowpath(lock); 130701768b42SPeter Zijlstra } 130801768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable); 130901768b42SPeter Zijlstra 131045dbac0eSMatthew Wilcox /** 131145dbac0eSMatthew Wilcox * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 131245dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 131345dbac0eSMatthew Wilcox * 131445dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). While the task is waiting for this 131545dbac0eSMatthew Wilcox * mutex, it will be accounted as being in the IO wait state by the 131645dbac0eSMatthew Wilcox * scheduler. 131745dbac0eSMatthew Wilcox * 131845dbac0eSMatthew Wilcox * Context: Process context. 131945dbac0eSMatthew Wilcox */ 13201460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock) 13211460cb65STejun Heo { 13221460cb65STejun Heo int token; 13231460cb65STejun Heo 13241460cb65STejun Heo token = io_schedule_prepare(); 13251460cb65STejun Heo mutex_lock(lock); 13261460cb65STejun Heo io_schedule_finish(token); 13271460cb65STejun Heo } 13281460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io); 13291460cb65STejun Heo 13303ca0ff57SPeter Zijlstra static noinline void __sched 13313ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock) 133201768b42SPeter Zijlstra { 1333427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 133401768b42SPeter Zijlstra } 133501768b42SPeter Zijlstra 133601768b42SPeter Zijlstra static noinline int __sched 133701768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock) 133801768b42SPeter Zijlstra { 1339427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 134001768b42SPeter Zijlstra } 134101768b42SPeter Zijlstra 134201768b42SPeter Zijlstra static noinline int __sched 134301768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock) 134401768b42SPeter Zijlstra { 1345427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 134601768b42SPeter Zijlstra } 134701768b42SPeter Zijlstra 134801768b42SPeter Zijlstra static noinline int __sched 134901768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 135001768b42SPeter Zijlstra { 1351427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, 1352427b1820SPeter Zijlstra _RET_IP_, ctx); 135301768b42SPeter Zijlstra } 135401768b42SPeter Zijlstra 135501768b42SPeter Zijlstra static noinline int __sched 135601768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 135701768b42SPeter Zijlstra struct ww_acquire_ctx *ctx) 135801768b42SPeter Zijlstra { 1359427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, 1360427b1820SPeter Zijlstra _RET_IP_, ctx); 136101768b42SPeter Zijlstra } 136201768b42SPeter Zijlstra 136301768b42SPeter Zijlstra #endif 136401768b42SPeter Zijlstra 136501768b42SPeter Zijlstra /** 136601768b42SPeter Zijlstra * mutex_trylock - try to acquire the mutex, without waiting 136701768b42SPeter Zijlstra * @lock: the mutex to be acquired 136801768b42SPeter Zijlstra * 136901768b42SPeter Zijlstra * Try to acquire the mutex atomically. Returns 1 if the mutex 137001768b42SPeter Zijlstra * has been acquired successfully, and 0 on contention. 137101768b42SPeter Zijlstra * 137201768b42SPeter Zijlstra * NOTE: this function follows the spin_trylock() convention, so 137301768b42SPeter Zijlstra * it is negated from the down_trylock() return values! Be careful 137401768b42SPeter Zijlstra * about this when converting semaphore users to mutexes. 137501768b42SPeter Zijlstra * 137601768b42SPeter Zijlstra * This function must not be used in interrupt context. The 137701768b42SPeter Zijlstra * mutex must be released by the same task that acquired it. 137801768b42SPeter Zijlstra */ 137901768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock) 138001768b42SPeter Zijlstra { 1381e274795eSPeter Zijlstra bool locked = __mutex_trylock(lock); 138201768b42SPeter Zijlstra 13833ca0ff57SPeter Zijlstra if (locked) 13843ca0ff57SPeter Zijlstra mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 138501768b42SPeter Zijlstra 13863ca0ff57SPeter Zijlstra return locked; 138701768b42SPeter Zijlstra } 138801768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock); 138901768b42SPeter Zijlstra 139001768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 139101768b42SPeter Zijlstra int __sched 1392c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 139301768b42SPeter Zijlstra { 139401768b42SPeter Zijlstra might_sleep(); 139501768b42SPeter Zijlstra 13963ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1397ea9e0fb8SNicolai Hähnle if (ctx) 139801768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 13993ca0ff57SPeter Zijlstra return 0; 14003ca0ff57SPeter Zijlstra } 14013ca0ff57SPeter Zijlstra 14023ca0ff57SPeter Zijlstra return __ww_mutex_lock_slowpath(lock, ctx); 140301768b42SPeter Zijlstra } 1404c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock); 140501768b42SPeter Zijlstra 140601768b42SPeter Zijlstra int __sched 1407c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 140801768b42SPeter Zijlstra { 140901768b42SPeter Zijlstra might_sleep(); 141001768b42SPeter Zijlstra 14113ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1412ea9e0fb8SNicolai Hähnle if (ctx) 141301768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 14143ca0ff57SPeter Zijlstra return 0; 14153ca0ff57SPeter Zijlstra } 14163ca0ff57SPeter Zijlstra 14173ca0ff57SPeter Zijlstra return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 141801768b42SPeter Zijlstra } 1419c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible); 142001768b42SPeter Zijlstra 142101768b42SPeter Zijlstra #endif 142201768b42SPeter Zijlstra 142301768b42SPeter Zijlstra /** 142401768b42SPeter Zijlstra * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 142501768b42SPeter Zijlstra * @cnt: the atomic which we are to dec 142601768b42SPeter Zijlstra * @lock: the mutex to return holding if we dec to 0 142701768b42SPeter Zijlstra * 142801768b42SPeter Zijlstra * return true and hold lock if we dec to 0, return false otherwise 142901768b42SPeter Zijlstra */ 143001768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 143101768b42SPeter Zijlstra { 143201768b42SPeter Zijlstra /* dec if we can't possibly hit 0 */ 143301768b42SPeter Zijlstra if (atomic_add_unless(cnt, -1, 1)) 143401768b42SPeter Zijlstra return 0; 143501768b42SPeter Zijlstra /* we might hit 0, so take the lock */ 143601768b42SPeter Zijlstra mutex_lock(lock); 143701768b42SPeter Zijlstra if (!atomic_dec_and_test(cnt)) { 143801768b42SPeter Zijlstra /* when we actually did the dec, we didn't hit 0 */ 143901768b42SPeter Zijlstra mutex_unlock(lock); 144001768b42SPeter Zijlstra return 0; 144101768b42SPeter Zijlstra } 144201768b42SPeter Zijlstra /* we hit 0, and we hold the lock */ 144301768b42SPeter Zijlstra return 1; 144401768b42SPeter Zijlstra } 144501768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1446