1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 201768b42SPeter Zijlstra /* 367a6de49SPeter Zijlstra * kernel/locking/mutex.c 401768b42SPeter Zijlstra * 501768b42SPeter Zijlstra * Mutexes: blocking mutual exclusion locks 601768b42SPeter Zijlstra * 701768b42SPeter Zijlstra * Started by Ingo Molnar: 801768b42SPeter Zijlstra * 901768b42SPeter Zijlstra * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 1001768b42SPeter Zijlstra * 1101768b42SPeter Zijlstra * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 1201768b42SPeter Zijlstra * David Howells for suggestions and improvements. 1301768b42SPeter Zijlstra * 1401768b42SPeter Zijlstra * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 1501768b42SPeter Zijlstra * from the -rt tree, where it was originally implemented for rtmutexes 1601768b42SPeter Zijlstra * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 1701768b42SPeter Zijlstra * and Sven Dietrich. 1801768b42SPeter Zijlstra * 19387b1468SMauro Carvalho Chehab * Also see Documentation/locking/mutex-design.rst. 2001768b42SPeter Zijlstra */ 2101768b42SPeter Zijlstra #include <linux/mutex.h> 2201768b42SPeter Zijlstra #include <linux/ww_mutex.h> 23174cd4b1SIngo Molnar #include <linux/sched/signal.h> 2401768b42SPeter Zijlstra #include <linux/sched/rt.h> 2584f001e1SIngo Molnar #include <linux/sched/wake_q.h> 26b17b0153SIngo Molnar #include <linux/sched/debug.h> 2701768b42SPeter Zijlstra #include <linux/export.h> 2801768b42SPeter Zijlstra #include <linux/spinlock.h> 2901768b42SPeter Zijlstra #include <linux/interrupt.h> 3001768b42SPeter Zijlstra #include <linux/debug_locks.h> 317a215f89SDavidlohr Bueso #include <linux/osq_lock.h> 3201768b42SPeter Zijlstra 3301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 3401768b42SPeter Zijlstra # include "mutex-debug.h" 3501768b42SPeter Zijlstra #else 3601768b42SPeter Zijlstra # include "mutex.h" 3701768b42SPeter Zijlstra #endif 3801768b42SPeter Zijlstra 3901768b42SPeter Zijlstra void 4001768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 4101768b42SPeter Zijlstra { 423ca0ff57SPeter Zijlstra atomic_long_set(&lock->owner, 0); 4301768b42SPeter Zijlstra spin_lock_init(&lock->wait_lock); 4401768b42SPeter Zijlstra INIT_LIST_HEAD(&lock->wait_list); 4501768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 464d9d951eSJason Low osq_lock_init(&lock->osq); 4701768b42SPeter Zijlstra #endif 4801768b42SPeter Zijlstra 4901768b42SPeter Zijlstra debug_mutex_init(lock, name, key); 5001768b42SPeter Zijlstra } 5101768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init); 5201768b42SPeter Zijlstra 533ca0ff57SPeter Zijlstra /* 543ca0ff57SPeter Zijlstra * @owner: contains: 'struct task_struct *' to the current lock owner, 553ca0ff57SPeter Zijlstra * NULL means not owned. Since task_struct pointers are aligned at 56e274795eSPeter Zijlstra * at least L1_CACHE_BYTES, we have low bits to store extra state. 573ca0ff57SPeter Zijlstra * 583ca0ff57SPeter Zijlstra * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 599d659ae1SPeter Zijlstra * Bit1 indicates unlock needs to hand the lock to the top-waiter 60e274795eSPeter Zijlstra * Bit2 indicates handoff has been done and we're waiting for pickup. 613ca0ff57SPeter Zijlstra */ 623ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS 0x01 639d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF 0x02 64e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP 0x04 653ca0ff57SPeter Zijlstra 66e274795eSPeter Zijlstra #define MUTEX_FLAGS 0x07 673ca0ff57SPeter Zijlstra 685f35d5a6SMukesh Ojha /* 695f35d5a6SMukesh Ojha * Internal helper function; C doesn't allow us to hide it :/ 705f35d5a6SMukesh Ojha * 715f35d5a6SMukesh Ojha * DO NOT USE (outside of mutex code). 725f35d5a6SMukesh Ojha */ 735f35d5a6SMukesh Ojha static inline struct task_struct *__mutex_owner(struct mutex *lock) 745f35d5a6SMukesh Ojha { 75a037d269SMukesh Ojha return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); 765f35d5a6SMukesh Ojha } 775f35d5a6SMukesh Ojha 783ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner) 793ca0ff57SPeter Zijlstra { 803ca0ff57SPeter Zijlstra return (struct task_struct *)(owner & ~MUTEX_FLAGS); 813ca0ff57SPeter Zijlstra } 823ca0ff57SPeter Zijlstra 835f35d5a6SMukesh Ojha bool mutex_is_locked(struct mutex *lock) 845f35d5a6SMukesh Ojha { 855f35d5a6SMukesh Ojha return __mutex_owner(lock) != NULL; 865f35d5a6SMukesh Ojha } 875f35d5a6SMukesh Ojha EXPORT_SYMBOL(mutex_is_locked); 885f35d5a6SMukesh Ojha 893ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner) 903ca0ff57SPeter Zijlstra { 913ca0ff57SPeter Zijlstra return owner & MUTEX_FLAGS; 923ca0ff57SPeter Zijlstra } 933ca0ff57SPeter Zijlstra 943ca0ff57SPeter Zijlstra /* 95*e2db7592SIngo Molnar * Trylock variant that returns the owning task on failure. 963ca0ff57SPeter Zijlstra */ 97e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 983ca0ff57SPeter Zijlstra { 993ca0ff57SPeter Zijlstra unsigned long owner, curr = (unsigned long)current; 1003ca0ff57SPeter Zijlstra 1013ca0ff57SPeter Zijlstra owner = atomic_long_read(&lock->owner); 1023ca0ff57SPeter Zijlstra for (;;) { /* must loop, can race against a flag */ 1039d659ae1SPeter Zijlstra unsigned long old, flags = __owner_flags(owner); 104e274795eSPeter Zijlstra unsigned long task = owner & ~MUTEX_FLAGS; 1053ca0ff57SPeter Zijlstra 106e274795eSPeter Zijlstra if (task) { 107e274795eSPeter Zijlstra if (likely(task != curr)) 108e274795eSPeter Zijlstra break; 1099d659ae1SPeter Zijlstra 110e274795eSPeter Zijlstra if (likely(!(flags & MUTEX_FLAG_PICKUP))) 111e274795eSPeter Zijlstra break; 112e274795eSPeter Zijlstra 113e274795eSPeter Zijlstra flags &= ~MUTEX_FLAG_PICKUP; 114e274795eSPeter Zijlstra } else { 115e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 116e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); 117e274795eSPeter Zijlstra #endif 1189d659ae1SPeter Zijlstra } 1193ca0ff57SPeter Zijlstra 1209d659ae1SPeter Zijlstra /* 1219d659ae1SPeter Zijlstra * We set the HANDOFF bit, we must make sure it doesn't live 1229d659ae1SPeter Zijlstra * past the point where we acquire it. This would be possible 1239d659ae1SPeter Zijlstra * if we (accidentally) set the bit on an unlocked mutex. 1249d659ae1SPeter Zijlstra */ 1259d659ae1SPeter Zijlstra flags &= ~MUTEX_FLAG_HANDOFF; 1269d659ae1SPeter Zijlstra 1279d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 1283ca0ff57SPeter Zijlstra if (old == owner) 129e274795eSPeter Zijlstra return NULL; 1303ca0ff57SPeter Zijlstra 1313ca0ff57SPeter Zijlstra owner = old; 1323ca0ff57SPeter Zijlstra } 133e274795eSPeter Zijlstra 134e274795eSPeter Zijlstra return __owner_task(owner); 135e274795eSPeter Zijlstra } 136e274795eSPeter Zijlstra 137e274795eSPeter Zijlstra /* 138e274795eSPeter Zijlstra * Actual trylock that will work on any unlocked state. 139e274795eSPeter Zijlstra */ 140e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock) 141e274795eSPeter Zijlstra { 142e274795eSPeter Zijlstra return !__mutex_trylock_or_owner(lock); 1433ca0ff57SPeter Zijlstra } 1443ca0ff57SPeter Zijlstra 1453ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 1463ca0ff57SPeter Zijlstra /* 1473ca0ff57SPeter Zijlstra * Lockdep annotations are contained to the slow paths for simplicity. 1483ca0ff57SPeter Zijlstra * There is nothing that would stop spreading the lockdep annotations outwards 1493ca0ff57SPeter Zijlstra * except more code. 1503ca0ff57SPeter Zijlstra */ 1513ca0ff57SPeter Zijlstra 1523ca0ff57SPeter Zijlstra /* 1533ca0ff57SPeter Zijlstra * Optimistic trylock that only works in the uncontended case. Make sure to 1543ca0ff57SPeter Zijlstra * follow with a __mutex_trylock() before failing. 1553ca0ff57SPeter Zijlstra */ 1563ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 1573ca0ff57SPeter Zijlstra { 1583ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 159c427f695SPeter Zijlstra unsigned long zero = 0UL; 1603ca0ff57SPeter Zijlstra 161c427f695SPeter Zijlstra if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 1623ca0ff57SPeter Zijlstra return true; 1633ca0ff57SPeter Zijlstra 1643ca0ff57SPeter Zijlstra return false; 1653ca0ff57SPeter Zijlstra } 1663ca0ff57SPeter Zijlstra 1673ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 1683ca0ff57SPeter Zijlstra { 1693ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 1703ca0ff57SPeter Zijlstra 1713ca0ff57SPeter Zijlstra if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 1723ca0ff57SPeter Zijlstra return true; 1733ca0ff57SPeter Zijlstra 1743ca0ff57SPeter Zijlstra return false; 1753ca0ff57SPeter Zijlstra } 1763ca0ff57SPeter Zijlstra #endif 1773ca0ff57SPeter Zijlstra 1783ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 1793ca0ff57SPeter Zijlstra { 1803ca0ff57SPeter Zijlstra atomic_long_or(flag, &lock->owner); 1813ca0ff57SPeter Zijlstra } 1823ca0ff57SPeter Zijlstra 1833ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 1843ca0ff57SPeter Zijlstra { 1853ca0ff57SPeter Zijlstra atomic_long_andnot(flag, &lock->owner); 1863ca0ff57SPeter Zijlstra } 1873ca0ff57SPeter Zijlstra 1889d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 1899d659ae1SPeter Zijlstra { 1909d659ae1SPeter Zijlstra return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 1919d659ae1SPeter Zijlstra } 1929d659ae1SPeter Zijlstra 1939d659ae1SPeter Zijlstra /* 19408295b3bSThomas Hellstrom * Add @waiter to a given location in the lock wait_list and set the 19508295b3bSThomas Hellstrom * FLAG_WAITERS flag if it's the first waiter. 19608295b3bSThomas Hellstrom */ 19708295b3bSThomas Hellstrom static void __sched 19808295b3bSThomas Hellstrom __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 19908295b3bSThomas Hellstrom struct list_head *list) 20008295b3bSThomas Hellstrom { 20108295b3bSThomas Hellstrom debug_mutex_add_waiter(lock, waiter, current); 20208295b3bSThomas Hellstrom 20308295b3bSThomas Hellstrom list_add_tail(&waiter->list, list); 20408295b3bSThomas Hellstrom if (__mutex_waiter_is_first(lock, waiter)) 20508295b3bSThomas Hellstrom __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 20608295b3bSThomas Hellstrom } 20708295b3bSThomas Hellstrom 20808295b3bSThomas Hellstrom /* 2099d659ae1SPeter Zijlstra * Give up ownership to a specific task, when @task = NULL, this is equivalent 210*e2db7592SIngo Molnar * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves 211e274795eSPeter Zijlstra * WAITERS. Provides RELEASE semantics like a regular unlock, the 212e274795eSPeter Zijlstra * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 2139d659ae1SPeter Zijlstra */ 2149d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 2159d659ae1SPeter Zijlstra { 2169d659ae1SPeter Zijlstra unsigned long owner = atomic_long_read(&lock->owner); 2179d659ae1SPeter Zijlstra 2189d659ae1SPeter Zijlstra for (;;) { 2199d659ae1SPeter Zijlstra unsigned long old, new; 2209d659ae1SPeter Zijlstra 2219d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 2229d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 223e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 2249d659ae1SPeter Zijlstra #endif 2259d659ae1SPeter Zijlstra 2269d659ae1SPeter Zijlstra new = (owner & MUTEX_FLAG_WAITERS); 2279d659ae1SPeter Zijlstra new |= (unsigned long)task; 228e274795eSPeter Zijlstra if (task) 229e274795eSPeter Zijlstra new |= MUTEX_FLAG_PICKUP; 2309d659ae1SPeter Zijlstra 2319d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 2329d659ae1SPeter Zijlstra if (old == owner) 2339d659ae1SPeter Zijlstra break; 2349d659ae1SPeter Zijlstra 2359d659ae1SPeter Zijlstra owner = old; 2369d659ae1SPeter Zijlstra } 2379d659ae1SPeter Zijlstra } 2389d659ae1SPeter Zijlstra 23901768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 24001768b42SPeter Zijlstra /* 24101768b42SPeter Zijlstra * We split the mutex lock/unlock logic into separate fastpath and 24201768b42SPeter Zijlstra * slowpath functions, to reduce the register pressure on the fastpath. 24301768b42SPeter Zijlstra * We also put the fastpath first in the kernel image, to make sure the 24401768b42SPeter Zijlstra * branch is predicted by the CPU as default-untaken. 24501768b42SPeter Zijlstra */ 2463ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock); 24701768b42SPeter Zijlstra 24801768b42SPeter Zijlstra /** 24901768b42SPeter Zijlstra * mutex_lock - acquire the mutex 25001768b42SPeter Zijlstra * @lock: the mutex to be acquired 25101768b42SPeter Zijlstra * 25201768b42SPeter Zijlstra * Lock the mutex exclusively for this task. If the mutex is not 25301768b42SPeter Zijlstra * available right now, it will sleep until it can get it. 25401768b42SPeter Zijlstra * 25501768b42SPeter Zijlstra * The mutex must later on be released by the same task that 25601768b42SPeter Zijlstra * acquired it. Recursive locking is not allowed. The task 25701768b42SPeter Zijlstra * may not exit without first unlocking the mutex. Also, kernel 258139b6fd2SSharon Dvir * memory where the mutex resides must not be freed with 25901768b42SPeter Zijlstra * the mutex still locked. The mutex must first be initialized 26001768b42SPeter Zijlstra * (or statically defined) before it can be locked. memset()-ing 26101768b42SPeter Zijlstra * the mutex to 0 is not allowed. 26201768b42SPeter Zijlstra * 26301768b42SPeter Zijlstra * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 26401768b42SPeter Zijlstra * checks that will enforce the restrictions and will also do 2657b4ff1adSMauro Carvalho Chehab * deadlock debugging) 26601768b42SPeter Zijlstra * 26701768b42SPeter Zijlstra * This function is similar to (but not equivalent to) down(). 26801768b42SPeter Zijlstra */ 26901768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock) 27001768b42SPeter Zijlstra { 27101768b42SPeter Zijlstra might_sleep(); 27201768b42SPeter Zijlstra 2733ca0ff57SPeter Zijlstra if (!__mutex_trylock_fast(lock)) 2743ca0ff57SPeter Zijlstra __mutex_lock_slowpath(lock); 2753ca0ff57SPeter Zijlstra } 27601768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock); 27701768b42SPeter Zijlstra #endif 27801768b42SPeter Zijlstra 27955f036caSPeter Ziljstra /* 28055f036caSPeter Ziljstra * Wait-Die: 28155f036caSPeter Ziljstra * The newer transactions are killed when: 28255f036caSPeter Ziljstra * It (the new transaction) makes a request for a lock being held 28355f036caSPeter Ziljstra * by an older transaction. 28408295b3bSThomas Hellstrom * 28508295b3bSThomas Hellstrom * Wound-Wait: 28608295b3bSThomas Hellstrom * The newer transactions are wounded when: 28708295b3bSThomas Hellstrom * An older transaction makes a request for a lock being held by 28808295b3bSThomas Hellstrom * the newer transaction. 28955f036caSPeter Ziljstra */ 29055f036caSPeter Ziljstra 29155f036caSPeter Ziljstra /* 29255f036caSPeter Ziljstra * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 29355f036caSPeter Ziljstra * it. 29455f036caSPeter Ziljstra */ 295427b1820SPeter Zijlstra static __always_inline void 296427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 29776916515SDavidlohr Bueso { 29876916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES 29976916515SDavidlohr Bueso /* 30076916515SDavidlohr Bueso * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 30176916515SDavidlohr Bueso * but released with a normal mutex_unlock in this call. 30276916515SDavidlohr Bueso * 30376916515SDavidlohr Bueso * This should never happen, always use ww_mutex_unlock. 30476916515SDavidlohr Bueso */ 30576916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww->ctx); 30676916515SDavidlohr Bueso 30776916515SDavidlohr Bueso /* 30876916515SDavidlohr Bueso * Not quite done after calling ww_acquire_done() ? 30976916515SDavidlohr Bueso */ 31076916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 31176916515SDavidlohr Bueso 31276916515SDavidlohr Bueso if (ww_ctx->contending_lock) { 31376916515SDavidlohr Bueso /* 31476916515SDavidlohr Bueso * After -EDEADLK you tried to 31576916515SDavidlohr Bueso * acquire a different ww_mutex? Bad! 31676916515SDavidlohr Bueso */ 31776916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 31876916515SDavidlohr Bueso 31976916515SDavidlohr Bueso /* 32076916515SDavidlohr Bueso * You called ww_mutex_lock after receiving -EDEADLK, 32176916515SDavidlohr Bueso * but 'forgot' to unlock everything else first? 32276916515SDavidlohr Bueso */ 32376916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 32476916515SDavidlohr Bueso ww_ctx->contending_lock = NULL; 32576916515SDavidlohr Bueso } 32676916515SDavidlohr Bueso 32776916515SDavidlohr Bueso /* 32876916515SDavidlohr Bueso * Naughty, using a different class will lead to undefined behavior! 32976916515SDavidlohr Bueso */ 33076916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 33176916515SDavidlohr Bueso #endif 33276916515SDavidlohr Bueso ww_ctx->acquired++; 33355f036caSPeter Ziljstra ww->ctx = ww_ctx; 3343822da3eSNicolai Hähnle } 3353822da3eSNicolai Hähnle 33676916515SDavidlohr Bueso /* 33755f036caSPeter Ziljstra * Determine if context @a is 'after' context @b. IOW, @a is a younger 33855f036caSPeter Ziljstra * transaction than @b and depending on algorithm either needs to wait for 33955f036caSPeter Ziljstra * @b or die. 34055f036caSPeter Ziljstra */ 34155f036caSPeter Ziljstra static inline bool __sched 34255f036caSPeter Ziljstra __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 34355f036caSPeter Ziljstra { 34455f036caSPeter Ziljstra 34555f036caSPeter Ziljstra return (signed long)(a->stamp - b->stamp) > 0; 34655f036caSPeter Ziljstra } 34755f036caSPeter Ziljstra 34855f036caSPeter Ziljstra /* 34955f036caSPeter Ziljstra * Wait-Die; wake a younger waiter context (when locks held) such that it can 35055f036caSPeter Ziljstra * die. 351659cf9f5SNicolai Hähnle * 35255f036caSPeter Ziljstra * Among waiters with context, only the first one can have other locks acquired 35355f036caSPeter Ziljstra * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and 35455f036caSPeter Ziljstra * __ww_mutex_check_kill() wake any but the earliest context. 35555f036caSPeter Ziljstra */ 35655f036caSPeter Ziljstra static bool __sched 35755f036caSPeter Ziljstra __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, 35855f036caSPeter Ziljstra struct ww_acquire_ctx *ww_ctx) 35955f036caSPeter Ziljstra { 36008295b3bSThomas Hellstrom if (!ww_ctx->is_wait_die) 36108295b3bSThomas Hellstrom return false; 36208295b3bSThomas Hellstrom 36355f036caSPeter Ziljstra if (waiter->ww_ctx->acquired > 0 && 36455f036caSPeter Ziljstra __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { 36555f036caSPeter Ziljstra debug_mutex_wake_waiter(lock, waiter); 36655f036caSPeter Ziljstra wake_up_process(waiter->task); 36755f036caSPeter Ziljstra } 36855f036caSPeter Ziljstra 36955f036caSPeter Ziljstra return true; 37055f036caSPeter Ziljstra } 37155f036caSPeter Ziljstra 37255f036caSPeter Ziljstra /* 37308295b3bSThomas Hellstrom * Wound-Wait; wound a younger @hold_ctx if it holds the lock. 37408295b3bSThomas Hellstrom * 37508295b3bSThomas Hellstrom * Wound the lock holder if there are waiters with older transactions than 37608295b3bSThomas Hellstrom * the lock holders. Even if multiple waiters may wound the lock holder, 37708295b3bSThomas Hellstrom * it's sufficient that only one does. 37808295b3bSThomas Hellstrom */ 37908295b3bSThomas Hellstrom static bool __ww_mutex_wound(struct mutex *lock, 38008295b3bSThomas Hellstrom struct ww_acquire_ctx *ww_ctx, 38108295b3bSThomas Hellstrom struct ww_acquire_ctx *hold_ctx) 38208295b3bSThomas Hellstrom { 38308295b3bSThomas Hellstrom struct task_struct *owner = __mutex_owner(lock); 38408295b3bSThomas Hellstrom 38508295b3bSThomas Hellstrom lockdep_assert_held(&lock->wait_lock); 38608295b3bSThomas Hellstrom 38708295b3bSThomas Hellstrom /* 38808295b3bSThomas Hellstrom * Possible through __ww_mutex_add_waiter() when we race with 38908295b3bSThomas Hellstrom * ww_mutex_set_context_fastpath(). In that case we'll get here again 39008295b3bSThomas Hellstrom * through __ww_mutex_check_waiters(). 39108295b3bSThomas Hellstrom */ 39208295b3bSThomas Hellstrom if (!hold_ctx) 39308295b3bSThomas Hellstrom return false; 39408295b3bSThomas Hellstrom 39508295b3bSThomas Hellstrom /* 39608295b3bSThomas Hellstrom * Can have !owner because of __mutex_unlock_slowpath(), but if owner, 39708295b3bSThomas Hellstrom * it cannot go away because we'll have FLAG_WAITERS set and hold 39808295b3bSThomas Hellstrom * wait_lock. 39908295b3bSThomas Hellstrom */ 40008295b3bSThomas Hellstrom if (!owner) 40108295b3bSThomas Hellstrom return false; 40208295b3bSThomas Hellstrom 40308295b3bSThomas Hellstrom if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) { 40408295b3bSThomas Hellstrom hold_ctx->wounded = 1; 40508295b3bSThomas Hellstrom 40608295b3bSThomas Hellstrom /* 40708295b3bSThomas Hellstrom * wake_up_process() paired with set_current_state() 40808295b3bSThomas Hellstrom * inserts sufficient barriers to make sure @owner either sees 409e13e2366SThomas Hellstrom * it's wounded in __ww_mutex_check_kill() or has a 41008295b3bSThomas Hellstrom * wakeup pending to re-read the wounded state. 41108295b3bSThomas Hellstrom */ 41208295b3bSThomas Hellstrom if (owner != current) 41308295b3bSThomas Hellstrom wake_up_process(owner); 41408295b3bSThomas Hellstrom 41508295b3bSThomas Hellstrom return true; 41608295b3bSThomas Hellstrom } 41708295b3bSThomas Hellstrom 41808295b3bSThomas Hellstrom return false; 41908295b3bSThomas Hellstrom } 42008295b3bSThomas Hellstrom 42108295b3bSThomas Hellstrom /* 42255f036caSPeter Ziljstra * We just acquired @lock under @ww_ctx, if there are later contexts waiting 42308295b3bSThomas Hellstrom * behind us on the wait-list, check if they need to die, or wound us. 42455f036caSPeter Ziljstra * 42555f036caSPeter Ziljstra * See __ww_mutex_add_waiter() for the list-order construction; basically the 42655f036caSPeter Ziljstra * list is ordered by stamp, smallest (oldest) first. 427659cf9f5SNicolai Hähnle * 42808295b3bSThomas Hellstrom * This relies on never mixing wait-die/wound-wait on the same wait-list; 42908295b3bSThomas Hellstrom * which is currently ensured by that being a ww_class property. 43008295b3bSThomas Hellstrom * 431659cf9f5SNicolai Hähnle * The current task must not be on the wait list. 432659cf9f5SNicolai Hähnle */ 433659cf9f5SNicolai Hähnle static void __sched 43455f036caSPeter Ziljstra __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 435659cf9f5SNicolai Hähnle { 436659cf9f5SNicolai Hähnle struct mutex_waiter *cur; 437659cf9f5SNicolai Hähnle 438659cf9f5SNicolai Hähnle lockdep_assert_held(&lock->wait_lock); 439659cf9f5SNicolai Hähnle 440659cf9f5SNicolai Hähnle list_for_each_entry(cur, &lock->wait_list, list) { 441659cf9f5SNicolai Hähnle if (!cur->ww_ctx) 442659cf9f5SNicolai Hähnle continue; 443659cf9f5SNicolai Hähnle 44408295b3bSThomas Hellstrom if (__ww_mutex_die(lock, cur, ww_ctx) || 44508295b3bSThomas Hellstrom __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) 446659cf9f5SNicolai Hähnle break; 447659cf9f5SNicolai Hähnle } 448659cf9f5SNicolai Hähnle } 449659cf9f5SNicolai Hähnle 45076916515SDavidlohr Bueso /* 45155f036caSPeter Ziljstra * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 45255f036caSPeter Ziljstra * and wake up any waiters so they can recheck. 45376916515SDavidlohr Bueso */ 45476916515SDavidlohr Bueso static __always_inline void 455427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 45676916515SDavidlohr Bueso { 45776916515SDavidlohr Bueso ww_mutex_lock_acquired(lock, ctx); 45876916515SDavidlohr Bueso 45976916515SDavidlohr Bueso /* 46076916515SDavidlohr Bueso * The lock->ctx update should be visible on all cores before 46155f036caSPeter Ziljstra * the WAITERS check is done, otherwise contended waiters might be 46276916515SDavidlohr Bueso * missed. The contended waiters will either see ww_ctx == NULL 46376916515SDavidlohr Bueso * and keep spinning, or it will acquire wait_lock, add itself 46476916515SDavidlohr Bueso * to waiter list and sleep. 46576916515SDavidlohr Bueso */ 46608295b3bSThomas Hellstrom smp_mb(); /* See comments above and below. */ 46776916515SDavidlohr Bueso 46876916515SDavidlohr Bueso /* 46908295b3bSThomas Hellstrom * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS 47008295b3bSThomas Hellstrom * MB MB 47108295b3bSThomas Hellstrom * [R] MUTEX_FLAG_WAITERS [R] ww->ctx 47208295b3bSThomas Hellstrom * 47308295b3bSThomas Hellstrom * The memory barrier above pairs with the memory barrier in 47408295b3bSThomas Hellstrom * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx 47508295b3bSThomas Hellstrom * and/or !empty list. 47676916515SDavidlohr Bueso */ 4773ca0ff57SPeter Zijlstra if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 47876916515SDavidlohr Bueso return; 47976916515SDavidlohr Bueso 48076916515SDavidlohr Bueso /* 48155f036caSPeter Ziljstra * Uh oh, we raced in fastpath, check if any of the waiters need to 48208295b3bSThomas Hellstrom * die or wound us. 48376916515SDavidlohr Bueso */ 484b9c16a0eSPeter Zijlstra spin_lock(&lock->base.wait_lock); 48555f036caSPeter Ziljstra __ww_mutex_check_waiters(&lock->base, ctx); 486b9c16a0eSPeter Zijlstra spin_unlock(&lock->base.wait_lock); 48776916515SDavidlohr Bueso } 48876916515SDavidlohr Bueso 48901768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 490c516df97SNicolai Hähnle 491c516df97SNicolai Hähnle static inline 492c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 493c516df97SNicolai Hähnle struct mutex_waiter *waiter) 494c516df97SNicolai Hähnle { 495c516df97SNicolai Hähnle struct ww_mutex *ww; 496c516df97SNicolai Hähnle 497c516df97SNicolai Hähnle ww = container_of(lock, struct ww_mutex, base); 498c516df97SNicolai Hähnle 49901768b42SPeter Zijlstra /* 500c516df97SNicolai Hähnle * If ww->ctx is set the contents are undefined, only 501c516df97SNicolai Hähnle * by acquiring wait_lock there is a guarantee that 502c516df97SNicolai Hähnle * they are not invalid when reading. 503c516df97SNicolai Hähnle * 504c516df97SNicolai Hähnle * As such, when deadlock detection needs to be 505c516df97SNicolai Hähnle * performed the optimistic spinning cannot be done. 506c516df97SNicolai Hähnle * 507c516df97SNicolai Hähnle * Check this in every inner iteration because we may 508c516df97SNicolai Hähnle * be racing against another thread's ww_mutex_lock. 509c516df97SNicolai Hähnle */ 510c516df97SNicolai Hähnle if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 511c516df97SNicolai Hähnle return false; 512c516df97SNicolai Hähnle 513c516df97SNicolai Hähnle /* 514c516df97SNicolai Hähnle * If we aren't on the wait list yet, cancel the spin 515c516df97SNicolai Hähnle * if there are waiters. We want to avoid stealing the 516c516df97SNicolai Hähnle * lock from a waiter with an earlier stamp, since the 517c516df97SNicolai Hähnle * other thread may already own a lock that we also 518c516df97SNicolai Hähnle * need. 519c516df97SNicolai Hähnle */ 520c516df97SNicolai Hähnle if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 521c516df97SNicolai Hähnle return false; 522c516df97SNicolai Hähnle 523c516df97SNicolai Hähnle /* 524c516df97SNicolai Hähnle * Similarly, stop spinning if we are no longer the 525c516df97SNicolai Hähnle * first waiter. 526c516df97SNicolai Hähnle */ 527c516df97SNicolai Hähnle if (waiter && !__mutex_waiter_is_first(lock, waiter)) 528c516df97SNicolai Hähnle return false; 529c516df97SNicolai Hähnle 530c516df97SNicolai Hähnle return true; 531c516df97SNicolai Hähnle } 532c516df97SNicolai Hähnle 53301768b42SPeter Zijlstra /* 53425f13b40SNicolai Hähnle * Look out! "owner" is an entirely speculative pointer access and not 53525f13b40SNicolai Hähnle * reliable. 53625f13b40SNicolai Hähnle * 53725f13b40SNicolai Hähnle * "noinline" so that this function shows up on perf profiles. 53801768b42SPeter Zijlstra */ 53901768b42SPeter Zijlstra static noinline 54025f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 541c516df97SNicolai Hähnle struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 54201768b42SPeter Zijlstra { 54301ac33c1SJason Low bool ret = true; 544be1f7bf2SJason Low 54501768b42SPeter Zijlstra rcu_read_lock(); 5463ca0ff57SPeter Zijlstra while (__mutex_owner(lock) == owner) { 547be1f7bf2SJason Low /* 548be1f7bf2SJason Low * Ensure we emit the owner->on_cpu, dereference _after_ 54901ac33c1SJason Low * checking lock->owner still matches owner. If that fails, 55001ac33c1SJason Low * owner might point to freed memory. If it still matches, 551be1f7bf2SJason Low * the rcu_read_lock() ensures the memory stays valid. 552be1f7bf2SJason Low */ 553be1f7bf2SJason Low barrier(); 554be1f7bf2SJason Low 55505ffc951SPan Xinhui /* 55605ffc951SPan Xinhui * Use vcpu_is_preempted to detect lock holder preemption issue. 55705ffc951SPan Xinhui */ 55805ffc951SPan Xinhui if (!owner->on_cpu || need_resched() || 55905ffc951SPan Xinhui vcpu_is_preempted(task_cpu(owner))) { 560be1f7bf2SJason Low ret = false; 561be1f7bf2SJason Low break; 562be1f7bf2SJason Low } 56301768b42SPeter Zijlstra 564c516df97SNicolai Hähnle if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 56525f13b40SNicolai Hähnle ret = false; 56625f13b40SNicolai Hähnle break; 56725f13b40SNicolai Hähnle } 56825f13b40SNicolai Hähnle 569f2f09a4cSChristian Borntraeger cpu_relax(); 57001768b42SPeter Zijlstra } 57101768b42SPeter Zijlstra rcu_read_unlock(); 57201768b42SPeter Zijlstra 573be1f7bf2SJason Low return ret; 57401768b42SPeter Zijlstra } 57501768b42SPeter Zijlstra 57601768b42SPeter Zijlstra /* 57701768b42SPeter Zijlstra * Initial check for entering the mutex spinning loop 57801768b42SPeter Zijlstra */ 57901768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock) 58001768b42SPeter Zijlstra { 58101768b42SPeter Zijlstra struct task_struct *owner; 58201768b42SPeter Zijlstra int retval = 1; 58301768b42SPeter Zijlstra 58446af29e4SJason Low if (need_resched()) 58546af29e4SJason Low return 0; 58646af29e4SJason Low 58701768b42SPeter Zijlstra rcu_read_lock(); 5883ca0ff57SPeter Zijlstra owner = __mutex_owner(lock); 58905ffc951SPan Xinhui 59005ffc951SPan Xinhui /* 59105ffc951SPan Xinhui * As lock holder preemption issue, we both skip spinning if task is not 59205ffc951SPan Xinhui * on cpu or its cpu is preempted 59305ffc951SPan Xinhui */ 59401768b42SPeter Zijlstra if (owner) 59505ffc951SPan Xinhui retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 59601768b42SPeter Zijlstra rcu_read_unlock(); 59776916515SDavidlohr Bueso 59876916515SDavidlohr Bueso /* 5993ca0ff57SPeter Zijlstra * If lock->owner is not set, the mutex has been released. Return true 6003ca0ff57SPeter Zijlstra * such that we'll trylock in the spin path, which is a faster option 6013ca0ff57SPeter Zijlstra * than the blocking slow path. 60276916515SDavidlohr Bueso */ 6033ca0ff57SPeter Zijlstra return retval; 60476916515SDavidlohr Bueso } 60576916515SDavidlohr Bueso 60676916515SDavidlohr Bueso /* 60776916515SDavidlohr Bueso * Optimistic spinning. 60876916515SDavidlohr Bueso * 60976916515SDavidlohr Bueso * We try to spin for acquisition when we find that the lock owner 61076916515SDavidlohr Bueso * is currently running on a (different) CPU and while we don't 61176916515SDavidlohr Bueso * need to reschedule. The rationale is that if the lock owner is 61276916515SDavidlohr Bueso * running, it is likely to release the lock soon. 61376916515SDavidlohr Bueso * 61476916515SDavidlohr Bueso * The mutex spinners are queued up using MCS lock so that only one 61576916515SDavidlohr Bueso * spinner can compete for the mutex. However, if mutex spinning isn't 61676916515SDavidlohr Bueso * going to happen, there is no point in going through the lock/unlock 61776916515SDavidlohr Bueso * overhead. 61876916515SDavidlohr Bueso * 61976916515SDavidlohr Bueso * Returns true when the lock was taken, otherwise false, indicating 62076916515SDavidlohr Bueso * that we need to jump to the slowpath and sleep. 621b341afb3SWaiman Long * 622b341afb3SWaiman Long * The waiter flag is set to true if the spinner is a waiter in the wait 623b341afb3SWaiman Long * queue. The waiter-spinner will spin on the lock directly and concurrently 624b341afb3SWaiman Long * with the spinner at the head of the OSQ, if present, until the owner is 625b341afb3SWaiman Long * changed to itself. 62676916515SDavidlohr Bueso */ 627427b1820SPeter Zijlstra static __always_inline bool 628427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 6295de2055dSWaiman Long struct mutex_waiter *waiter) 63076916515SDavidlohr Bueso { 631b341afb3SWaiman Long if (!waiter) { 632b341afb3SWaiman Long /* 633b341afb3SWaiman Long * The purpose of the mutex_can_spin_on_owner() function is 634b341afb3SWaiman Long * to eliminate the overhead of osq_lock() and osq_unlock() 635b341afb3SWaiman Long * in case spinning isn't possible. As a waiter-spinner 636b341afb3SWaiman Long * is not going to take OSQ lock anyway, there is no need 637b341afb3SWaiman Long * to call mutex_can_spin_on_owner(). 638b341afb3SWaiman Long */ 63976916515SDavidlohr Bueso if (!mutex_can_spin_on_owner(lock)) 640b341afb3SWaiman Long goto fail; 64176916515SDavidlohr Bueso 642e42f678aSDavidlohr Bueso /* 643e42f678aSDavidlohr Bueso * In order to avoid a stampede of mutex spinners trying to 644e42f678aSDavidlohr Bueso * acquire the mutex all at once, the spinners need to take a 645e42f678aSDavidlohr Bueso * MCS (queued) lock first before spinning on the owner field. 646e42f678aSDavidlohr Bueso */ 64776916515SDavidlohr Bueso if (!osq_lock(&lock->osq)) 648b341afb3SWaiman Long goto fail; 649b341afb3SWaiman Long } 65076916515SDavidlohr Bueso 651b341afb3SWaiman Long for (;;) { 65276916515SDavidlohr Bueso struct task_struct *owner; 65376916515SDavidlohr Bueso 654e274795eSPeter Zijlstra /* Try to acquire the mutex... */ 655e274795eSPeter Zijlstra owner = __mutex_trylock_or_owner(lock); 656e274795eSPeter Zijlstra if (!owner) 657e274795eSPeter Zijlstra break; 65876916515SDavidlohr Bueso 65976916515SDavidlohr Bueso /* 660e274795eSPeter Zijlstra * There's an owner, wait for it to either 66176916515SDavidlohr Bueso * release the lock or go to sleep. 66276916515SDavidlohr Bueso */ 663c516df97SNicolai Hähnle if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 664b341afb3SWaiman Long goto fail_unlock; 66576916515SDavidlohr Bueso 66676916515SDavidlohr Bueso /* 66776916515SDavidlohr Bueso * The cpu_relax() call is a compiler barrier which forces 66876916515SDavidlohr Bueso * everything in this loop to be re-loaded. We don't need 66976916515SDavidlohr Bueso * memory barriers as we'll eventually observe the right 67076916515SDavidlohr Bueso * values at the cost of a few extra spins. 67176916515SDavidlohr Bueso */ 672f2f09a4cSChristian Borntraeger cpu_relax(); 67376916515SDavidlohr Bueso } 67476916515SDavidlohr Bueso 675b341afb3SWaiman Long if (!waiter) 67676916515SDavidlohr Bueso osq_unlock(&lock->osq); 677b341afb3SWaiman Long 678b341afb3SWaiman Long return true; 679b341afb3SWaiman Long 680b341afb3SWaiman Long 681b341afb3SWaiman Long fail_unlock: 682b341afb3SWaiman Long if (!waiter) 683b341afb3SWaiman Long osq_unlock(&lock->osq); 684b341afb3SWaiman Long 685b341afb3SWaiman Long fail: 68676916515SDavidlohr Bueso /* 68776916515SDavidlohr Bueso * If we fell out of the spin path because of need_resched(), 68876916515SDavidlohr Bueso * reschedule now, before we try-lock the mutex. This avoids getting 68976916515SDavidlohr Bueso * scheduled out right after we obtained the mutex. 69076916515SDavidlohr Bueso */ 6916f942a1fSPeter Zijlstra if (need_resched()) { 6926f942a1fSPeter Zijlstra /* 6936f942a1fSPeter Zijlstra * We _should_ have TASK_RUNNING here, but just in case 6946f942a1fSPeter Zijlstra * we do not, make it so, otherwise we might get stuck. 6956f942a1fSPeter Zijlstra */ 6966f942a1fSPeter Zijlstra __set_current_state(TASK_RUNNING); 69776916515SDavidlohr Bueso schedule_preempt_disabled(); 6986f942a1fSPeter Zijlstra } 69976916515SDavidlohr Bueso 70076916515SDavidlohr Bueso return false; 70176916515SDavidlohr Bueso } 70276916515SDavidlohr Bueso #else 703427b1820SPeter Zijlstra static __always_inline bool 704427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 7055de2055dSWaiman Long struct mutex_waiter *waiter) 70676916515SDavidlohr Bueso { 70776916515SDavidlohr Bueso return false; 70876916515SDavidlohr Bueso } 70901768b42SPeter Zijlstra #endif 71001768b42SPeter Zijlstra 7113ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 71201768b42SPeter Zijlstra 71301768b42SPeter Zijlstra /** 71401768b42SPeter Zijlstra * mutex_unlock - release the mutex 71501768b42SPeter Zijlstra * @lock: the mutex to be released 71601768b42SPeter Zijlstra * 71701768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously. 71801768b42SPeter Zijlstra * 71901768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 72001768b42SPeter Zijlstra * of a not locked mutex is not allowed. 72101768b42SPeter Zijlstra * 72201768b42SPeter Zijlstra * This function is similar to (but not equivalent to) up(). 72301768b42SPeter Zijlstra */ 72401768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock) 72501768b42SPeter Zijlstra { 7263ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 7273ca0ff57SPeter Zijlstra if (__mutex_unlock_fast(lock)) 7283ca0ff57SPeter Zijlstra return; 72901768b42SPeter Zijlstra #endif 7303ca0ff57SPeter Zijlstra __mutex_unlock_slowpath(lock, _RET_IP_); 73101768b42SPeter Zijlstra } 73201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock); 73301768b42SPeter Zijlstra 73401768b42SPeter Zijlstra /** 73501768b42SPeter Zijlstra * ww_mutex_unlock - release the w/w mutex 73601768b42SPeter Zijlstra * @lock: the mutex to be released 73701768b42SPeter Zijlstra * 73801768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously with any of the 73901768b42SPeter Zijlstra * ww_mutex_lock* functions (with or without an acquire context). It is 74001768b42SPeter Zijlstra * forbidden to release the locks after releasing the acquire context. 74101768b42SPeter Zijlstra * 74201768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 74301768b42SPeter Zijlstra * of a unlocked mutex is not allowed. 74401768b42SPeter Zijlstra */ 74501768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock) 74601768b42SPeter Zijlstra { 74701768b42SPeter Zijlstra /* 74801768b42SPeter Zijlstra * The unlocking fastpath is the 0->1 transition from 'locked' 74901768b42SPeter Zijlstra * into 'unlocked' state: 75001768b42SPeter Zijlstra */ 75101768b42SPeter Zijlstra if (lock->ctx) { 75201768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 75301768b42SPeter Zijlstra DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 75401768b42SPeter Zijlstra #endif 75501768b42SPeter Zijlstra if (lock->ctx->acquired > 0) 75601768b42SPeter Zijlstra lock->ctx->acquired--; 75701768b42SPeter Zijlstra lock->ctx = NULL; 75801768b42SPeter Zijlstra } 75901768b42SPeter Zijlstra 7603ca0ff57SPeter Zijlstra mutex_unlock(&lock->base); 76101768b42SPeter Zijlstra } 76201768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock); 76301768b42SPeter Zijlstra 76455f036caSPeter Ziljstra 76555f036caSPeter Ziljstra static __always_inline int __sched 76655f036caSPeter Ziljstra __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 76755f036caSPeter Ziljstra { 76855f036caSPeter Ziljstra if (ww_ctx->acquired > 0) { 76955f036caSPeter Ziljstra #ifdef CONFIG_DEBUG_MUTEXES 77055f036caSPeter Ziljstra struct ww_mutex *ww; 77155f036caSPeter Ziljstra 77255f036caSPeter Ziljstra ww = container_of(lock, struct ww_mutex, base); 77355f036caSPeter Ziljstra DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 77455f036caSPeter Ziljstra ww_ctx->contending_lock = ww; 77555f036caSPeter Ziljstra #endif 77655f036caSPeter Ziljstra return -EDEADLK; 77755f036caSPeter Ziljstra } 77855f036caSPeter Ziljstra 77955f036caSPeter Ziljstra return 0; 78055f036caSPeter Ziljstra } 78155f036caSPeter Ziljstra 78255f036caSPeter Ziljstra 78355f036caSPeter Ziljstra /* 78408295b3bSThomas Hellstrom * Check the wound condition for the current lock acquire. 78508295b3bSThomas Hellstrom * 78608295b3bSThomas Hellstrom * Wound-Wait: If we're wounded, kill ourself. 78755f036caSPeter Ziljstra * 78855f036caSPeter Ziljstra * Wait-Die: If we're trying to acquire a lock already held by an older 78955f036caSPeter Ziljstra * context, kill ourselves. 79055f036caSPeter Ziljstra * 79155f036caSPeter Ziljstra * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to 79255f036caSPeter Ziljstra * look at waiters before us in the wait-list. 79355f036caSPeter Ziljstra */ 79401768b42SPeter Zijlstra static inline int __sched 79555f036caSPeter Ziljstra __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, 796200b1874SNicolai Hähnle struct ww_acquire_ctx *ctx) 79701768b42SPeter Zijlstra { 79801768b42SPeter Zijlstra struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 7994d3199e4SDavidlohr Bueso struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 800200b1874SNicolai Hähnle struct mutex_waiter *cur; 80101768b42SPeter Zijlstra 80255f036caSPeter Ziljstra if (ctx->acquired == 0) 80355f036caSPeter Ziljstra return 0; 80455f036caSPeter Ziljstra 80508295b3bSThomas Hellstrom if (!ctx->is_wait_die) { 80608295b3bSThomas Hellstrom if (ctx->wounded) 80708295b3bSThomas Hellstrom return __ww_mutex_kill(lock, ctx); 80808295b3bSThomas Hellstrom 80908295b3bSThomas Hellstrom return 0; 81008295b3bSThomas Hellstrom } 81108295b3bSThomas Hellstrom 812200b1874SNicolai Hähnle if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 81355f036caSPeter Ziljstra return __ww_mutex_kill(lock, ctx); 814200b1874SNicolai Hähnle 815200b1874SNicolai Hähnle /* 816200b1874SNicolai Hähnle * If there is a waiter in front of us that has a context, then its 81755f036caSPeter Ziljstra * stamp is earlier than ours and we must kill ourself. 818200b1874SNicolai Hähnle */ 819200b1874SNicolai Hähnle cur = waiter; 820200b1874SNicolai Hähnle list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 82155f036caSPeter Ziljstra if (!cur->ww_ctx) 82255f036caSPeter Ziljstra continue; 82355f036caSPeter Ziljstra 82455f036caSPeter Ziljstra return __ww_mutex_kill(lock, ctx); 825200b1874SNicolai Hähnle } 826200b1874SNicolai Hähnle 82701768b42SPeter Zijlstra return 0; 82801768b42SPeter Zijlstra } 82901768b42SPeter Zijlstra 83055f036caSPeter Ziljstra /* 83155f036caSPeter Ziljstra * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest 83255f036caSPeter Ziljstra * first. Such that older contexts are preferred to acquire the lock over 83355f036caSPeter Ziljstra * younger contexts. 83455f036caSPeter Ziljstra * 83555f036caSPeter Ziljstra * Waiters without context are interspersed in FIFO order. 83655f036caSPeter Ziljstra * 83755f036caSPeter Ziljstra * Furthermore, for Wait-Die kill ourself immediately when possible (there are 83808295b3bSThomas Hellstrom * older contexts already waiting) to avoid unnecessary waiting and for 83908295b3bSThomas Hellstrom * Wound-Wait ensure we wound the owning context when it is younger. 84055f036caSPeter Ziljstra */ 8416baa5c60SNicolai Hähnle static inline int __sched 8426baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter, 8436baa5c60SNicolai Hähnle struct mutex *lock, 8446baa5c60SNicolai Hähnle struct ww_acquire_ctx *ww_ctx) 8456baa5c60SNicolai Hähnle { 8466baa5c60SNicolai Hähnle struct mutex_waiter *cur; 8476baa5c60SNicolai Hähnle struct list_head *pos; 84808295b3bSThomas Hellstrom bool is_wait_die; 8496baa5c60SNicolai Hähnle 8506baa5c60SNicolai Hähnle if (!ww_ctx) { 85108295b3bSThomas Hellstrom __mutex_add_waiter(lock, waiter, &lock->wait_list); 8526baa5c60SNicolai Hähnle return 0; 8536baa5c60SNicolai Hähnle } 8546baa5c60SNicolai Hähnle 85508295b3bSThomas Hellstrom is_wait_die = ww_ctx->is_wait_die; 85608295b3bSThomas Hellstrom 8576baa5c60SNicolai Hähnle /* 8586baa5c60SNicolai Hähnle * Add the waiter before the first waiter with a higher stamp. 8596baa5c60SNicolai Hähnle * Waiters without a context are skipped to avoid starving 86008295b3bSThomas Hellstrom * them. Wait-Die waiters may die here. Wound-Wait waiters 86108295b3bSThomas Hellstrom * never die here, but they are sorted in stamp order and 86208295b3bSThomas Hellstrom * may wound the lock holder. 8636baa5c60SNicolai Hähnle */ 8646baa5c60SNicolai Hähnle pos = &lock->wait_list; 8656baa5c60SNicolai Hähnle list_for_each_entry_reverse(cur, &lock->wait_list, list) { 8666baa5c60SNicolai Hähnle if (!cur->ww_ctx) 8676baa5c60SNicolai Hähnle continue; 8686baa5c60SNicolai Hähnle 8696baa5c60SNicolai Hähnle if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 87055f036caSPeter Ziljstra /* 87155f036caSPeter Ziljstra * Wait-Die: if we find an older context waiting, there 87255f036caSPeter Ziljstra * is no point in queueing behind it, as we'd have to 87355f036caSPeter Ziljstra * die the moment it would acquire the lock. 87455f036caSPeter Ziljstra */ 87508295b3bSThomas Hellstrom if (is_wait_die) { 87655f036caSPeter Ziljstra int ret = __ww_mutex_kill(lock, ww_ctx); 8776baa5c60SNicolai Hähnle 87855f036caSPeter Ziljstra if (ret) 87955f036caSPeter Ziljstra return ret; 88008295b3bSThomas Hellstrom } 8816baa5c60SNicolai Hähnle 8826baa5c60SNicolai Hähnle break; 8836baa5c60SNicolai Hähnle } 8846baa5c60SNicolai Hähnle 8856baa5c60SNicolai Hähnle pos = &cur->list; 886200b1874SNicolai Hähnle 88755f036caSPeter Ziljstra /* Wait-Die: ensure younger waiters die. */ 88855f036caSPeter Ziljstra __ww_mutex_die(lock, cur, ww_ctx); 8896baa5c60SNicolai Hähnle } 8906baa5c60SNicolai Hähnle 89108295b3bSThomas Hellstrom __mutex_add_waiter(lock, waiter, pos); 89208295b3bSThomas Hellstrom 89308295b3bSThomas Hellstrom /* 89408295b3bSThomas Hellstrom * Wound-Wait: if we're blocking on a mutex owned by a younger context, 89508295b3bSThomas Hellstrom * wound that such that we might proceed. 89608295b3bSThomas Hellstrom */ 89708295b3bSThomas Hellstrom if (!is_wait_die) { 89808295b3bSThomas Hellstrom struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 89908295b3bSThomas Hellstrom 90008295b3bSThomas Hellstrom /* 90108295b3bSThomas Hellstrom * See ww_mutex_set_context_fastpath(). Orders setting 90208295b3bSThomas Hellstrom * MUTEX_FLAG_WAITERS vs the ww->ctx load, 90308295b3bSThomas Hellstrom * such that either we or the fastpath will wound @ww->ctx. 90408295b3bSThomas Hellstrom */ 90508295b3bSThomas Hellstrom smp_mb(); 90608295b3bSThomas Hellstrom __ww_mutex_wound(lock, ww_ctx, ww->ctx); 90708295b3bSThomas Hellstrom } 90855f036caSPeter Ziljstra 90901768b42SPeter Zijlstra return 0; 91001768b42SPeter Zijlstra } 91101768b42SPeter Zijlstra 91201768b42SPeter Zijlstra /* 91301768b42SPeter Zijlstra * Lock a mutex (possibly interruptible), slowpath: 91401768b42SPeter Zijlstra */ 91501768b42SPeter Zijlstra static __always_inline int __sched 91601768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 91701768b42SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 91801768b42SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 91901768b42SPeter Zijlstra { 92001768b42SPeter Zijlstra struct mutex_waiter waiter; 9219d659ae1SPeter Zijlstra bool first = false; 922a40ca565SWaiman Long struct ww_mutex *ww; 92301768b42SPeter Zijlstra int ret; 92401768b42SPeter Zijlstra 9255de2055dSWaiman Long if (!use_ww_ctx) 9265de2055dSWaiman Long ww_ctx = NULL; 9275de2055dSWaiman Long 928427b1820SPeter Zijlstra might_sleep(); 929ea9e0fb8SNicolai Hähnle 9306c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES 9316c11c6e3SSebastian Andrzej Siewior DEBUG_LOCKS_WARN_ON(lock->magic != lock); 9326c11c6e3SSebastian Andrzej Siewior #endif 9336c11c6e3SSebastian Andrzej Siewior 934a40ca565SWaiman Long ww = container_of(lock, struct ww_mutex, base); 9355de2055dSWaiman Long if (ww_ctx) { 9360422e83dSChris Wilson if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 9370422e83dSChris Wilson return -EALREADY; 93808295b3bSThomas Hellstrom 93908295b3bSThomas Hellstrom /* 94008295b3bSThomas Hellstrom * Reset the wounded flag after a kill. No other process can 94108295b3bSThomas Hellstrom * race and wound us here since they can't have a valid owner 94208295b3bSThomas Hellstrom * pointer if we don't have any locks held. 94308295b3bSThomas Hellstrom */ 94408295b3bSThomas Hellstrom if (ww_ctx->acquired == 0) 94508295b3bSThomas Hellstrom ww_ctx->wounded = 0; 9460422e83dSChris Wilson } 9470422e83dSChris Wilson 94801768b42SPeter Zijlstra preempt_disable(); 94901768b42SPeter Zijlstra mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 95001768b42SPeter Zijlstra 951e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 9525de2055dSWaiman Long mutex_optimistic_spin(lock, ww_ctx, NULL)) { 95376916515SDavidlohr Bueso /* got the lock, yay! */ 9543ca0ff57SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 9555de2055dSWaiman Long if (ww_ctx) 9563ca0ff57SPeter Zijlstra ww_mutex_set_context_fastpath(ww, ww_ctx); 95701768b42SPeter Zijlstra preempt_enable(); 95801768b42SPeter Zijlstra return 0; 95901768b42SPeter Zijlstra } 96001768b42SPeter Zijlstra 961b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 9621e820c96SJason Low /* 9633ca0ff57SPeter Zijlstra * After waiting to acquire the wait_lock, try again. 9641e820c96SJason Low */ 965659cf9f5SNicolai Hähnle if (__mutex_trylock(lock)) { 9665de2055dSWaiman Long if (ww_ctx) 96755f036caSPeter Ziljstra __ww_mutex_check_waiters(lock, ww_ctx); 968659cf9f5SNicolai Hähnle 96901768b42SPeter Zijlstra goto skip_wait; 970659cf9f5SNicolai Hähnle } 97101768b42SPeter Zijlstra 97201768b42SPeter Zijlstra debug_mutex_lock_common(lock, &waiter); 97301768b42SPeter Zijlstra 9746baa5c60SNicolai Hähnle lock_contended(&lock->dep_map, ip); 9756baa5c60SNicolai Hähnle 9766baa5c60SNicolai Hähnle if (!use_ww_ctx) { 97701768b42SPeter Zijlstra /* add waiting tasks to the end of the waitqueue (FIFO): */ 97808295b3bSThomas Hellstrom __mutex_add_waiter(lock, &waiter, &lock->wait_list); 97908295b3bSThomas Hellstrom 980977625a6SNicolai Hähnle 981977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES 982977625a6SNicolai Hähnle waiter.ww_ctx = MUTEX_POISON_WW_CTX; 983977625a6SNicolai Hähnle #endif 9846baa5c60SNicolai Hähnle } else { 98555f036caSPeter Ziljstra /* 98655f036caSPeter Ziljstra * Add in stamp order, waking up waiters that must kill 98755f036caSPeter Ziljstra * themselves. 98855f036caSPeter Ziljstra */ 9896baa5c60SNicolai Hähnle ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 9906baa5c60SNicolai Hähnle if (ret) 99155f036caSPeter Ziljstra goto err_early_kill; 9926baa5c60SNicolai Hähnle 9936baa5c60SNicolai Hähnle waiter.ww_ctx = ww_ctx; 9946baa5c60SNicolai Hähnle } 9956baa5c60SNicolai Hähnle 996d269a8b8SDavidlohr Bueso waiter.task = current; 99701768b42SPeter Zijlstra 998642fa448SDavidlohr Bueso set_current_state(state); 99901768b42SPeter Zijlstra for (;;) { 10005bbd7e64SPeter Zijlstra /* 10015bbd7e64SPeter Zijlstra * Once we hold wait_lock, we're serialized against 10025bbd7e64SPeter Zijlstra * mutex_unlock() handing the lock off to us, do a trylock 10035bbd7e64SPeter Zijlstra * before testing the error conditions to make sure we pick up 10045bbd7e64SPeter Zijlstra * the handoff. 10055bbd7e64SPeter Zijlstra */ 1006e274795eSPeter Zijlstra if (__mutex_trylock(lock)) 10075bbd7e64SPeter Zijlstra goto acquired; 100801768b42SPeter Zijlstra 100901768b42SPeter Zijlstra /* 101055f036caSPeter Ziljstra * Check for signals and kill conditions while holding 10115bbd7e64SPeter Zijlstra * wait_lock. This ensures the lock cancellation is ordered 10125bbd7e64SPeter Zijlstra * against mutex_unlock() and wake-ups do not go missing. 101301768b42SPeter Zijlstra */ 10143bb5f4acSDavidlohr Bueso if (signal_pending_state(state, current)) { 101501768b42SPeter Zijlstra ret = -EINTR; 101601768b42SPeter Zijlstra goto err; 101701768b42SPeter Zijlstra } 101801768b42SPeter Zijlstra 10195de2055dSWaiman Long if (ww_ctx) { 102055f036caSPeter Ziljstra ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 102101768b42SPeter Zijlstra if (ret) 102201768b42SPeter Zijlstra goto err; 102301768b42SPeter Zijlstra } 102401768b42SPeter Zijlstra 1025b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 102601768b42SPeter Zijlstra schedule_preempt_disabled(); 10279d659ae1SPeter Zijlstra 10286baa5c60SNicolai Hähnle /* 10296baa5c60SNicolai Hähnle * ww_mutex needs to always recheck its position since its waiter 10306baa5c60SNicolai Hähnle * list is not FIFO ordered. 10316baa5c60SNicolai Hähnle */ 10325de2055dSWaiman Long if (ww_ctx || !first) { 10336baa5c60SNicolai Hähnle first = __mutex_waiter_is_first(lock, &waiter); 10346baa5c60SNicolai Hähnle if (first) 10359d659ae1SPeter Zijlstra __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 10369d659ae1SPeter Zijlstra } 10375bbd7e64SPeter Zijlstra 1038642fa448SDavidlohr Bueso set_current_state(state); 10395bbd7e64SPeter Zijlstra /* 10405bbd7e64SPeter Zijlstra * Here we order against unlock; we must either see it change 10415bbd7e64SPeter Zijlstra * state back to RUNNING and fall through the next schedule(), 10425bbd7e64SPeter Zijlstra * or we must see its unlock and acquire. 10435bbd7e64SPeter Zijlstra */ 1044e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 10455de2055dSWaiman Long (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) 10465bbd7e64SPeter Zijlstra break; 10475bbd7e64SPeter Zijlstra 1048b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 104901768b42SPeter Zijlstra } 1050b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 10515bbd7e64SPeter Zijlstra acquired: 1052642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 105351587bcfSDavidlohr Bueso 10545de2055dSWaiman Long if (ww_ctx) { 105508295b3bSThomas Hellstrom /* 105608295b3bSThomas Hellstrom * Wound-Wait; we stole the lock (!first_waiter), check the 105708295b3bSThomas Hellstrom * waiters as anyone might want to wound us. 105808295b3bSThomas Hellstrom */ 105908295b3bSThomas Hellstrom if (!ww_ctx->is_wait_die && 106008295b3bSThomas Hellstrom !__mutex_waiter_is_first(lock, &waiter)) 106108295b3bSThomas Hellstrom __ww_mutex_check_waiters(lock, ww_ctx); 106208295b3bSThomas Hellstrom } 106308295b3bSThomas Hellstrom 1064d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 106501768b42SPeter Zijlstra if (likely(list_empty(&lock->wait_list))) 10669d659ae1SPeter Zijlstra __mutex_clear_flag(lock, MUTEX_FLAGS); 10673ca0ff57SPeter Zijlstra 106801768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 106901768b42SPeter Zijlstra 107001768b42SPeter Zijlstra skip_wait: 107101768b42SPeter Zijlstra /* got the lock - cleanup and rejoice! */ 107201768b42SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 107301768b42SPeter Zijlstra 10745de2055dSWaiman Long if (ww_ctx) 107555f036caSPeter Ziljstra ww_mutex_lock_acquired(ww, ww_ctx); 107601768b42SPeter Zijlstra 1077b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 107801768b42SPeter Zijlstra preempt_enable(); 107901768b42SPeter Zijlstra return 0; 108001768b42SPeter Zijlstra 108101768b42SPeter Zijlstra err: 1082642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 1083d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 108455f036caSPeter Ziljstra err_early_kill: 1085b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 108601768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 10875facae4fSQian Cai mutex_release(&lock->dep_map, ip); 108801768b42SPeter Zijlstra preempt_enable(); 108901768b42SPeter Zijlstra return ret; 109001768b42SPeter Zijlstra } 109101768b42SPeter Zijlstra 1092427b1820SPeter Zijlstra static int __sched 1093427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1094427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip) 1095427b1820SPeter Zijlstra { 1096427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 1097427b1820SPeter Zijlstra } 1098427b1820SPeter Zijlstra 1099427b1820SPeter Zijlstra static int __sched 1100427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1101427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 1102427b1820SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 1103427b1820SPeter Zijlstra { 1104427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); 1105427b1820SPeter Zijlstra } 1106427b1820SPeter Zijlstra 110701768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC 110801768b42SPeter Zijlstra void __sched 110901768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass) 111001768b42SPeter Zijlstra { 1111427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 111201768b42SPeter Zijlstra } 111301768b42SPeter Zijlstra 111401768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested); 111501768b42SPeter Zijlstra 111601768b42SPeter Zijlstra void __sched 111701768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 111801768b42SPeter Zijlstra { 1119427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 112001768b42SPeter Zijlstra } 112101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 112201768b42SPeter Zijlstra 112301768b42SPeter Zijlstra int __sched 112401768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 112501768b42SPeter Zijlstra { 1126427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 112701768b42SPeter Zijlstra } 112801768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 112901768b42SPeter Zijlstra 113001768b42SPeter Zijlstra int __sched 113101768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 113201768b42SPeter Zijlstra { 1133427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 113401768b42SPeter Zijlstra } 113501768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 113601768b42SPeter Zijlstra 11371460cb65STejun Heo void __sched 11381460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 11391460cb65STejun Heo { 11401460cb65STejun Heo int token; 11411460cb65STejun Heo 11421460cb65STejun Heo might_sleep(); 11431460cb65STejun Heo 11441460cb65STejun Heo token = io_schedule_prepare(); 11451460cb65STejun Heo __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 11461460cb65STejun Heo subclass, NULL, _RET_IP_, NULL, 0); 11471460cb65STejun Heo io_schedule_finish(token); 11481460cb65STejun Heo } 11491460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 11501460cb65STejun Heo 115101768b42SPeter Zijlstra static inline int 115201768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 115301768b42SPeter Zijlstra { 115401768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 115501768b42SPeter Zijlstra unsigned tmp; 115601768b42SPeter Zijlstra 115701768b42SPeter Zijlstra if (ctx->deadlock_inject_countdown-- == 0) { 115801768b42SPeter Zijlstra tmp = ctx->deadlock_inject_interval; 115901768b42SPeter Zijlstra if (tmp > UINT_MAX/4) 116001768b42SPeter Zijlstra tmp = UINT_MAX; 116101768b42SPeter Zijlstra else 116201768b42SPeter Zijlstra tmp = tmp*2 + tmp + tmp/2; 116301768b42SPeter Zijlstra 116401768b42SPeter Zijlstra ctx->deadlock_inject_interval = tmp; 116501768b42SPeter Zijlstra ctx->deadlock_inject_countdown = tmp; 116601768b42SPeter Zijlstra ctx->contending_lock = lock; 116701768b42SPeter Zijlstra 116801768b42SPeter Zijlstra ww_mutex_unlock(lock); 116901768b42SPeter Zijlstra 117001768b42SPeter Zijlstra return -EDEADLK; 117101768b42SPeter Zijlstra } 117201768b42SPeter Zijlstra #endif 117301768b42SPeter Zijlstra 117401768b42SPeter Zijlstra return 0; 117501768b42SPeter Zijlstra } 117601768b42SPeter Zijlstra 117701768b42SPeter Zijlstra int __sched 1178c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 117901768b42SPeter Zijlstra { 118001768b42SPeter Zijlstra int ret; 118101768b42SPeter Zijlstra 118201768b42SPeter Zijlstra might_sleep(); 1183427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 1184ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1185427b1820SPeter Zijlstra ctx); 1186ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 118701768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 118801768b42SPeter Zijlstra 118901768b42SPeter Zijlstra return ret; 119001768b42SPeter Zijlstra } 1191c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock); 119201768b42SPeter Zijlstra 119301768b42SPeter Zijlstra int __sched 1194c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 119501768b42SPeter Zijlstra { 119601768b42SPeter Zijlstra int ret; 119701768b42SPeter Zijlstra 119801768b42SPeter Zijlstra might_sleep(); 1199427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 1200ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1201427b1820SPeter Zijlstra ctx); 120201768b42SPeter Zijlstra 1203ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 120401768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 120501768b42SPeter Zijlstra 120601768b42SPeter Zijlstra return ret; 120701768b42SPeter Zijlstra } 1208c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 120901768b42SPeter Zijlstra 121001768b42SPeter Zijlstra #endif 121101768b42SPeter Zijlstra 121201768b42SPeter Zijlstra /* 121301768b42SPeter Zijlstra * Release the lock, slowpath: 121401768b42SPeter Zijlstra */ 12153ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 121601768b42SPeter Zijlstra { 12179d659ae1SPeter Zijlstra struct task_struct *next = NULL; 1218194a6b5bSWaiman Long DEFINE_WAKE_Q(wake_q); 1219b9c16a0eSPeter Zijlstra unsigned long owner; 122001768b42SPeter Zijlstra 12215facae4fSQian Cai mutex_release(&lock->dep_map, ip); 12223ca0ff57SPeter Zijlstra 122301768b42SPeter Zijlstra /* 12249d659ae1SPeter Zijlstra * Release the lock before (potentially) taking the spinlock such that 12259d659ae1SPeter Zijlstra * other contenders can get on with things ASAP. 12269d659ae1SPeter Zijlstra * 12279d659ae1SPeter Zijlstra * Except when HANDOFF, in that case we must not clear the owner field, 12289d659ae1SPeter Zijlstra * but instead set it to the top waiter. 122901768b42SPeter Zijlstra */ 12309d659ae1SPeter Zijlstra owner = atomic_long_read(&lock->owner); 12319d659ae1SPeter Zijlstra for (;;) { 12329d659ae1SPeter Zijlstra unsigned long old; 12339d659ae1SPeter Zijlstra 12349d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 12359d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1236e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 12379d659ae1SPeter Zijlstra #endif 12389d659ae1SPeter Zijlstra 12399d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 12409d659ae1SPeter Zijlstra break; 12419d659ae1SPeter Zijlstra 12429d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, 12439d659ae1SPeter Zijlstra __owner_flags(owner)); 12449d659ae1SPeter Zijlstra if (old == owner) { 12459d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_WAITERS) 12469d659ae1SPeter Zijlstra break; 12479d659ae1SPeter Zijlstra 12483ca0ff57SPeter Zijlstra return; 12499d659ae1SPeter Zijlstra } 12509d659ae1SPeter Zijlstra 12519d659ae1SPeter Zijlstra owner = old; 12529d659ae1SPeter Zijlstra } 125301768b42SPeter Zijlstra 1254b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 12551d8fe7dcSJason Low debug_mutex_unlock(lock); 125601768b42SPeter Zijlstra if (!list_empty(&lock->wait_list)) { 125701768b42SPeter Zijlstra /* get the first entry from the wait-list: */ 125801768b42SPeter Zijlstra struct mutex_waiter *waiter = 12599d659ae1SPeter Zijlstra list_first_entry(&lock->wait_list, 126001768b42SPeter Zijlstra struct mutex_waiter, list); 126101768b42SPeter Zijlstra 12629d659ae1SPeter Zijlstra next = waiter->task; 12639d659ae1SPeter Zijlstra 126401768b42SPeter Zijlstra debug_mutex_wake_waiter(lock, waiter); 12659d659ae1SPeter Zijlstra wake_q_add(&wake_q, next); 126601768b42SPeter Zijlstra } 126701768b42SPeter Zijlstra 12689d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 12699d659ae1SPeter Zijlstra __mutex_handoff(lock, next); 12709d659ae1SPeter Zijlstra 1271b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 12729d659ae1SPeter Zijlstra 12731329ce6fSDavidlohr Bueso wake_up_q(&wake_q); 127401768b42SPeter Zijlstra } 127501768b42SPeter Zijlstra 127601768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 127701768b42SPeter Zijlstra /* 127801768b42SPeter Zijlstra * Here come the less common (and hence less performance-critical) APIs: 127901768b42SPeter Zijlstra * mutex_lock_interruptible() and mutex_trylock(). 128001768b42SPeter Zijlstra */ 128101768b42SPeter Zijlstra static noinline int __sched 128201768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock); 128301768b42SPeter Zijlstra 128401768b42SPeter Zijlstra static noinline int __sched 128501768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock); 128601768b42SPeter Zijlstra 128701768b42SPeter Zijlstra /** 128845dbac0eSMatthew Wilcox * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 128945dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 129001768b42SPeter Zijlstra * 129145dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). If a signal is delivered while the 129245dbac0eSMatthew Wilcox * process is sleeping, this function will return without acquiring the 129345dbac0eSMatthew Wilcox * mutex. 129401768b42SPeter Zijlstra * 129545dbac0eSMatthew Wilcox * Context: Process context. 129645dbac0eSMatthew Wilcox * Return: 0 if the lock was successfully acquired or %-EINTR if a 129745dbac0eSMatthew Wilcox * signal arrived. 129801768b42SPeter Zijlstra */ 129901768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock) 130001768b42SPeter Zijlstra { 130101768b42SPeter Zijlstra might_sleep(); 13023ca0ff57SPeter Zijlstra 13033ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 130401768b42SPeter Zijlstra return 0; 13053ca0ff57SPeter Zijlstra 130601768b42SPeter Zijlstra return __mutex_lock_interruptible_slowpath(lock); 130701768b42SPeter Zijlstra } 130801768b42SPeter Zijlstra 130901768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible); 131001768b42SPeter Zijlstra 131145dbac0eSMatthew Wilcox /** 131245dbac0eSMatthew Wilcox * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 131345dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 131445dbac0eSMatthew Wilcox * 131545dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). If a signal which will be fatal to 131645dbac0eSMatthew Wilcox * the current process is delivered while the process is sleeping, this 131745dbac0eSMatthew Wilcox * function will return without acquiring the mutex. 131845dbac0eSMatthew Wilcox * 131945dbac0eSMatthew Wilcox * Context: Process context. 132045dbac0eSMatthew Wilcox * Return: 0 if the lock was successfully acquired or %-EINTR if a 132145dbac0eSMatthew Wilcox * fatal signal arrived. 132245dbac0eSMatthew Wilcox */ 132301768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock) 132401768b42SPeter Zijlstra { 132501768b42SPeter Zijlstra might_sleep(); 13263ca0ff57SPeter Zijlstra 13273ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 132801768b42SPeter Zijlstra return 0; 13293ca0ff57SPeter Zijlstra 133001768b42SPeter Zijlstra return __mutex_lock_killable_slowpath(lock); 133101768b42SPeter Zijlstra } 133201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable); 133301768b42SPeter Zijlstra 133445dbac0eSMatthew Wilcox /** 133545dbac0eSMatthew Wilcox * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 133645dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 133745dbac0eSMatthew Wilcox * 133845dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). While the task is waiting for this 133945dbac0eSMatthew Wilcox * mutex, it will be accounted as being in the IO wait state by the 134045dbac0eSMatthew Wilcox * scheduler. 134145dbac0eSMatthew Wilcox * 134245dbac0eSMatthew Wilcox * Context: Process context. 134345dbac0eSMatthew Wilcox */ 13441460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock) 13451460cb65STejun Heo { 13461460cb65STejun Heo int token; 13471460cb65STejun Heo 13481460cb65STejun Heo token = io_schedule_prepare(); 13491460cb65STejun Heo mutex_lock(lock); 13501460cb65STejun Heo io_schedule_finish(token); 13511460cb65STejun Heo } 13521460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io); 13531460cb65STejun Heo 13543ca0ff57SPeter Zijlstra static noinline void __sched 13553ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock) 135601768b42SPeter Zijlstra { 1357427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 135801768b42SPeter Zijlstra } 135901768b42SPeter Zijlstra 136001768b42SPeter Zijlstra static noinline int __sched 136101768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock) 136201768b42SPeter Zijlstra { 1363427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 136401768b42SPeter Zijlstra } 136501768b42SPeter Zijlstra 136601768b42SPeter Zijlstra static noinline int __sched 136701768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock) 136801768b42SPeter Zijlstra { 1369427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 137001768b42SPeter Zijlstra } 137101768b42SPeter Zijlstra 137201768b42SPeter Zijlstra static noinline int __sched 137301768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 137401768b42SPeter Zijlstra { 1375427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, 1376427b1820SPeter Zijlstra _RET_IP_, ctx); 137701768b42SPeter Zijlstra } 137801768b42SPeter Zijlstra 137901768b42SPeter Zijlstra static noinline int __sched 138001768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 138101768b42SPeter Zijlstra struct ww_acquire_ctx *ctx) 138201768b42SPeter Zijlstra { 1383427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, 1384427b1820SPeter Zijlstra _RET_IP_, ctx); 138501768b42SPeter Zijlstra } 138601768b42SPeter Zijlstra 138701768b42SPeter Zijlstra #endif 138801768b42SPeter Zijlstra 138901768b42SPeter Zijlstra /** 139001768b42SPeter Zijlstra * mutex_trylock - try to acquire the mutex, without waiting 139101768b42SPeter Zijlstra * @lock: the mutex to be acquired 139201768b42SPeter Zijlstra * 139301768b42SPeter Zijlstra * Try to acquire the mutex atomically. Returns 1 if the mutex 139401768b42SPeter Zijlstra * has been acquired successfully, and 0 on contention. 139501768b42SPeter Zijlstra * 139601768b42SPeter Zijlstra * NOTE: this function follows the spin_trylock() convention, so 139701768b42SPeter Zijlstra * it is negated from the down_trylock() return values! Be careful 139801768b42SPeter Zijlstra * about this when converting semaphore users to mutexes. 139901768b42SPeter Zijlstra * 140001768b42SPeter Zijlstra * This function must not be used in interrupt context. The 140101768b42SPeter Zijlstra * mutex must be released by the same task that acquired it. 140201768b42SPeter Zijlstra */ 140301768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock) 140401768b42SPeter Zijlstra { 14056c11c6e3SSebastian Andrzej Siewior bool locked; 140601768b42SPeter Zijlstra 14076c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES 14086c11c6e3SSebastian Andrzej Siewior DEBUG_LOCKS_WARN_ON(lock->magic != lock); 14096c11c6e3SSebastian Andrzej Siewior #endif 14106c11c6e3SSebastian Andrzej Siewior 14116c11c6e3SSebastian Andrzej Siewior locked = __mutex_trylock(lock); 14123ca0ff57SPeter Zijlstra if (locked) 14133ca0ff57SPeter Zijlstra mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 141401768b42SPeter Zijlstra 14153ca0ff57SPeter Zijlstra return locked; 141601768b42SPeter Zijlstra } 141701768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock); 141801768b42SPeter Zijlstra 141901768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 142001768b42SPeter Zijlstra int __sched 1421c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 142201768b42SPeter Zijlstra { 142301768b42SPeter Zijlstra might_sleep(); 142401768b42SPeter Zijlstra 14253ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1426ea9e0fb8SNicolai Hähnle if (ctx) 142701768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 14283ca0ff57SPeter Zijlstra return 0; 14293ca0ff57SPeter Zijlstra } 14303ca0ff57SPeter Zijlstra 14313ca0ff57SPeter Zijlstra return __ww_mutex_lock_slowpath(lock, ctx); 143201768b42SPeter Zijlstra } 1433c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock); 143401768b42SPeter Zijlstra 143501768b42SPeter Zijlstra int __sched 1436c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 143701768b42SPeter Zijlstra { 143801768b42SPeter Zijlstra might_sleep(); 143901768b42SPeter Zijlstra 14403ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1441ea9e0fb8SNicolai Hähnle if (ctx) 144201768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 14433ca0ff57SPeter Zijlstra return 0; 14443ca0ff57SPeter Zijlstra } 14453ca0ff57SPeter Zijlstra 14463ca0ff57SPeter Zijlstra return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 144701768b42SPeter Zijlstra } 1448c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible); 144901768b42SPeter Zijlstra 145001768b42SPeter Zijlstra #endif 145101768b42SPeter Zijlstra 145201768b42SPeter Zijlstra /** 145301768b42SPeter Zijlstra * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 145401768b42SPeter Zijlstra * @cnt: the atomic which we are to dec 145501768b42SPeter Zijlstra * @lock: the mutex to return holding if we dec to 0 145601768b42SPeter Zijlstra * 145701768b42SPeter Zijlstra * return true and hold lock if we dec to 0, return false otherwise 145801768b42SPeter Zijlstra */ 145901768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 146001768b42SPeter Zijlstra { 146101768b42SPeter Zijlstra /* dec if we can't possibly hit 0 */ 146201768b42SPeter Zijlstra if (atomic_add_unless(cnt, -1, 1)) 146301768b42SPeter Zijlstra return 0; 146401768b42SPeter Zijlstra /* we might hit 0, so take the lock */ 146501768b42SPeter Zijlstra mutex_lock(lock); 146601768b42SPeter Zijlstra if (!atomic_dec_and_test(cnt)) { 146701768b42SPeter Zijlstra /* when we actually did the dec, we didn't hit 0 */ 146801768b42SPeter Zijlstra mutex_unlock(lock); 146901768b42SPeter Zijlstra return 0; 147001768b42SPeter Zijlstra } 147101768b42SPeter Zijlstra /* we hit 0, and we hold the lock */ 147201768b42SPeter Zijlstra return 1; 147301768b42SPeter Zijlstra } 147401768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1475