1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 201768b42SPeter Zijlstra /* 367a6de49SPeter Zijlstra * kernel/locking/mutex.c 401768b42SPeter Zijlstra * 501768b42SPeter Zijlstra * Mutexes: blocking mutual exclusion locks 601768b42SPeter Zijlstra * 701768b42SPeter Zijlstra * Started by Ingo Molnar: 801768b42SPeter Zijlstra * 901768b42SPeter Zijlstra * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 1001768b42SPeter Zijlstra * 1101768b42SPeter Zijlstra * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 1201768b42SPeter Zijlstra * David Howells for suggestions and improvements. 1301768b42SPeter Zijlstra * 1401768b42SPeter Zijlstra * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 1501768b42SPeter Zijlstra * from the -rt tree, where it was originally implemented for rtmutexes 1601768b42SPeter Zijlstra * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 1701768b42SPeter Zijlstra * and Sven Dietrich. 1801768b42SPeter Zijlstra * 19387b1468SMauro Carvalho Chehab * Also see Documentation/locking/mutex-design.rst. 2001768b42SPeter Zijlstra */ 2101768b42SPeter Zijlstra #include <linux/mutex.h> 2201768b42SPeter Zijlstra #include <linux/ww_mutex.h> 23174cd4b1SIngo Molnar #include <linux/sched/signal.h> 2401768b42SPeter Zijlstra #include <linux/sched/rt.h> 2584f001e1SIngo Molnar #include <linux/sched/wake_q.h> 26b17b0153SIngo Molnar #include <linux/sched/debug.h> 2701768b42SPeter Zijlstra #include <linux/export.h> 2801768b42SPeter Zijlstra #include <linux/spinlock.h> 2901768b42SPeter Zijlstra #include <linux/interrupt.h> 3001768b42SPeter Zijlstra #include <linux/debug_locks.h> 317a215f89SDavidlohr Bueso #include <linux/osq_lock.h> 3201768b42SPeter Zijlstra 3301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 3401768b42SPeter Zijlstra # include "mutex-debug.h" 3501768b42SPeter Zijlstra #else 3601768b42SPeter Zijlstra # include "mutex.h" 3701768b42SPeter Zijlstra #endif 3801768b42SPeter Zijlstra 395f35d5a6SMukesh Ojha /* 405f35d5a6SMukesh Ojha * This is the control structure for tasks blocked on mutex, 415f35d5a6SMukesh Ojha * which resides on the blocked task's kernel stack: 425f35d5a6SMukesh Ojha */ 435f35d5a6SMukesh Ojha struct mutex_waiter { 445f35d5a6SMukesh Ojha struct list_head list; 455f35d5a6SMukesh Ojha struct task_struct *task; 465f35d5a6SMukesh Ojha struct ww_acquire_ctx *ww_ctx; 475f35d5a6SMukesh Ojha #ifdef CONFIG_DEBUG_MUTEXES 485f35d5a6SMukesh Ojha void *magic; 495f35d5a6SMukesh Ojha #endif 505f35d5a6SMukesh Ojha }; 515f35d5a6SMukesh Ojha 5201768b42SPeter Zijlstra void 5301768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 5401768b42SPeter Zijlstra { 553ca0ff57SPeter Zijlstra atomic_long_set(&lock->owner, 0); 5601768b42SPeter Zijlstra spin_lock_init(&lock->wait_lock); 5701768b42SPeter Zijlstra INIT_LIST_HEAD(&lock->wait_list); 5801768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 594d9d951eSJason Low osq_lock_init(&lock->osq); 6001768b42SPeter Zijlstra #endif 6101768b42SPeter Zijlstra 6201768b42SPeter Zijlstra debug_mutex_init(lock, name, key); 6301768b42SPeter Zijlstra } 6401768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init); 6501768b42SPeter Zijlstra 663ca0ff57SPeter Zijlstra /* 673ca0ff57SPeter Zijlstra * @owner: contains: 'struct task_struct *' to the current lock owner, 683ca0ff57SPeter Zijlstra * NULL means not owned. Since task_struct pointers are aligned at 69e274795eSPeter Zijlstra * at least L1_CACHE_BYTES, we have low bits to store extra state. 703ca0ff57SPeter Zijlstra * 713ca0ff57SPeter Zijlstra * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 729d659ae1SPeter Zijlstra * Bit1 indicates unlock needs to hand the lock to the top-waiter 73e274795eSPeter Zijlstra * Bit2 indicates handoff has been done and we're waiting for pickup. 743ca0ff57SPeter Zijlstra */ 753ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS 0x01 769d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF 0x02 77e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP 0x04 783ca0ff57SPeter Zijlstra 79e274795eSPeter Zijlstra #define MUTEX_FLAGS 0x07 803ca0ff57SPeter Zijlstra 815f35d5a6SMukesh Ojha /* 825f35d5a6SMukesh Ojha * Internal helper function; C doesn't allow us to hide it :/ 835f35d5a6SMukesh Ojha * 845f35d5a6SMukesh Ojha * DO NOT USE (outside of mutex code). 855f35d5a6SMukesh Ojha */ 865f35d5a6SMukesh Ojha static inline struct task_struct *__mutex_owner(struct mutex *lock) 875f35d5a6SMukesh Ojha { 88*a037d269SMukesh Ojha return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); 895f35d5a6SMukesh Ojha } 905f35d5a6SMukesh Ojha 913ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner) 923ca0ff57SPeter Zijlstra { 933ca0ff57SPeter Zijlstra return (struct task_struct *)(owner & ~MUTEX_FLAGS); 943ca0ff57SPeter Zijlstra } 953ca0ff57SPeter Zijlstra 965f35d5a6SMukesh Ojha bool mutex_is_locked(struct mutex *lock) 975f35d5a6SMukesh Ojha { 985f35d5a6SMukesh Ojha return __mutex_owner(lock) != NULL; 995f35d5a6SMukesh Ojha } 1005f35d5a6SMukesh Ojha EXPORT_SYMBOL(mutex_is_locked); 1015f35d5a6SMukesh Ojha 1025f35d5a6SMukesh Ojha __must_check enum mutex_trylock_recursive_enum 1035f35d5a6SMukesh Ojha mutex_trylock_recursive(struct mutex *lock) 1045f35d5a6SMukesh Ojha { 1055f35d5a6SMukesh Ojha if (unlikely(__mutex_owner(lock) == current)) 1065f35d5a6SMukesh Ojha return MUTEX_TRYLOCK_RECURSIVE; 1075f35d5a6SMukesh Ojha 1085f35d5a6SMukesh Ojha return mutex_trylock(lock); 1095f35d5a6SMukesh Ojha } 1105f35d5a6SMukesh Ojha EXPORT_SYMBOL(mutex_trylock_recursive); 1115f35d5a6SMukesh Ojha 1123ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner) 1133ca0ff57SPeter Zijlstra { 1143ca0ff57SPeter Zijlstra return owner & MUTEX_FLAGS; 1153ca0ff57SPeter Zijlstra } 1163ca0ff57SPeter Zijlstra 1173ca0ff57SPeter Zijlstra /* 118e274795eSPeter Zijlstra * Trylock variant that retuns the owning task on failure. 1193ca0ff57SPeter Zijlstra */ 120e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 1213ca0ff57SPeter Zijlstra { 1223ca0ff57SPeter Zijlstra unsigned long owner, curr = (unsigned long)current; 1233ca0ff57SPeter Zijlstra 1243ca0ff57SPeter Zijlstra owner = atomic_long_read(&lock->owner); 1253ca0ff57SPeter Zijlstra for (;;) { /* must loop, can race against a flag */ 1269d659ae1SPeter Zijlstra unsigned long old, flags = __owner_flags(owner); 127e274795eSPeter Zijlstra unsigned long task = owner & ~MUTEX_FLAGS; 1283ca0ff57SPeter Zijlstra 129e274795eSPeter Zijlstra if (task) { 130e274795eSPeter Zijlstra if (likely(task != curr)) 131e274795eSPeter Zijlstra break; 1329d659ae1SPeter Zijlstra 133e274795eSPeter Zijlstra if (likely(!(flags & MUTEX_FLAG_PICKUP))) 134e274795eSPeter Zijlstra break; 135e274795eSPeter Zijlstra 136e274795eSPeter Zijlstra flags &= ~MUTEX_FLAG_PICKUP; 137e274795eSPeter Zijlstra } else { 138e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 139e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); 140e274795eSPeter Zijlstra #endif 1419d659ae1SPeter Zijlstra } 1423ca0ff57SPeter Zijlstra 1439d659ae1SPeter Zijlstra /* 1449d659ae1SPeter Zijlstra * We set the HANDOFF bit, we must make sure it doesn't live 1459d659ae1SPeter Zijlstra * past the point where we acquire it. This would be possible 1469d659ae1SPeter Zijlstra * if we (accidentally) set the bit on an unlocked mutex. 1479d659ae1SPeter Zijlstra */ 1489d659ae1SPeter Zijlstra flags &= ~MUTEX_FLAG_HANDOFF; 1499d659ae1SPeter Zijlstra 1509d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 1513ca0ff57SPeter Zijlstra if (old == owner) 152e274795eSPeter Zijlstra return NULL; 1533ca0ff57SPeter Zijlstra 1543ca0ff57SPeter Zijlstra owner = old; 1553ca0ff57SPeter Zijlstra } 156e274795eSPeter Zijlstra 157e274795eSPeter Zijlstra return __owner_task(owner); 158e274795eSPeter Zijlstra } 159e274795eSPeter Zijlstra 160e274795eSPeter Zijlstra /* 161e274795eSPeter Zijlstra * Actual trylock that will work on any unlocked state. 162e274795eSPeter Zijlstra */ 163e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock) 164e274795eSPeter Zijlstra { 165e274795eSPeter Zijlstra return !__mutex_trylock_or_owner(lock); 1663ca0ff57SPeter Zijlstra } 1673ca0ff57SPeter Zijlstra 1683ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 1693ca0ff57SPeter Zijlstra /* 1703ca0ff57SPeter Zijlstra * Lockdep annotations are contained to the slow paths for simplicity. 1713ca0ff57SPeter Zijlstra * There is nothing that would stop spreading the lockdep annotations outwards 1723ca0ff57SPeter Zijlstra * except more code. 1733ca0ff57SPeter Zijlstra */ 1743ca0ff57SPeter Zijlstra 1753ca0ff57SPeter Zijlstra /* 1763ca0ff57SPeter Zijlstra * Optimistic trylock that only works in the uncontended case. Make sure to 1773ca0ff57SPeter Zijlstra * follow with a __mutex_trylock() before failing. 1783ca0ff57SPeter Zijlstra */ 1793ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 1803ca0ff57SPeter Zijlstra { 1813ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 182c427f695SPeter Zijlstra unsigned long zero = 0UL; 1833ca0ff57SPeter Zijlstra 184c427f695SPeter Zijlstra if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) 1853ca0ff57SPeter Zijlstra return true; 1863ca0ff57SPeter Zijlstra 1873ca0ff57SPeter Zijlstra return false; 1883ca0ff57SPeter Zijlstra } 1893ca0ff57SPeter Zijlstra 1903ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 1913ca0ff57SPeter Zijlstra { 1923ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 1933ca0ff57SPeter Zijlstra 1943ca0ff57SPeter Zijlstra if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 1953ca0ff57SPeter Zijlstra return true; 1963ca0ff57SPeter Zijlstra 1973ca0ff57SPeter Zijlstra return false; 1983ca0ff57SPeter Zijlstra } 1993ca0ff57SPeter Zijlstra #endif 2003ca0ff57SPeter Zijlstra 2013ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 2023ca0ff57SPeter Zijlstra { 2033ca0ff57SPeter Zijlstra atomic_long_or(flag, &lock->owner); 2043ca0ff57SPeter Zijlstra } 2053ca0ff57SPeter Zijlstra 2063ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 2073ca0ff57SPeter Zijlstra { 2083ca0ff57SPeter Zijlstra atomic_long_andnot(flag, &lock->owner); 2093ca0ff57SPeter Zijlstra } 2103ca0ff57SPeter Zijlstra 2119d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 2129d659ae1SPeter Zijlstra { 2139d659ae1SPeter Zijlstra return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 2149d659ae1SPeter Zijlstra } 2159d659ae1SPeter Zijlstra 2169d659ae1SPeter Zijlstra /* 21708295b3bSThomas Hellstrom * Add @waiter to a given location in the lock wait_list and set the 21808295b3bSThomas Hellstrom * FLAG_WAITERS flag if it's the first waiter. 21908295b3bSThomas Hellstrom */ 22008295b3bSThomas Hellstrom static void __sched 22108295b3bSThomas Hellstrom __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 22208295b3bSThomas Hellstrom struct list_head *list) 22308295b3bSThomas Hellstrom { 22408295b3bSThomas Hellstrom debug_mutex_add_waiter(lock, waiter, current); 22508295b3bSThomas Hellstrom 22608295b3bSThomas Hellstrom list_add_tail(&waiter->list, list); 22708295b3bSThomas Hellstrom if (__mutex_waiter_is_first(lock, waiter)) 22808295b3bSThomas Hellstrom __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 22908295b3bSThomas Hellstrom } 23008295b3bSThomas Hellstrom 23108295b3bSThomas Hellstrom /* 2329d659ae1SPeter Zijlstra * Give up ownership to a specific task, when @task = NULL, this is equivalent 233e274795eSPeter Zijlstra * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves 234e274795eSPeter Zijlstra * WAITERS. Provides RELEASE semantics like a regular unlock, the 235e274795eSPeter Zijlstra * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 2369d659ae1SPeter Zijlstra */ 2379d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 2389d659ae1SPeter Zijlstra { 2399d659ae1SPeter Zijlstra unsigned long owner = atomic_long_read(&lock->owner); 2409d659ae1SPeter Zijlstra 2419d659ae1SPeter Zijlstra for (;;) { 2429d659ae1SPeter Zijlstra unsigned long old, new; 2439d659ae1SPeter Zijlstra 2449d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 2459d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 246e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 2479d659ae1SPeter Zijlstra #endif 2489d659ae1SPeter Zijlstra 2499d659ae1SPeter Zijlstra new = (owner & MUTEX_FLAG_WAITERS); 2509d659ae1SPeter Zijlstra new |= (unsigned long)task; 251e274795eSPeter Zijlstra if (task) 252e274795eSPeter Zijlstra new |= MUTEX_FLAG_PICKUP; 2539d659ae1SPeter Zijlstra 2549d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 2559d659ae1SPeter Zijlstra if (old == owner) 2569d659ae1SPeter Zijlstra break; 2579d659ae1SPeter Zijlstra 2589d659ae1SPeter Zijlstra owner = old; 2599d659ae1SPeter Zijlstra } 2609d659ae1SPeter Zijlstra } 2619d659ae1SPeter Zijlstra 26201768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 26301768b42SPeter Zijlstra /* 26401768b42SPeter Zijlstra * We split the mutex lock/unlock logic into separate fastpath and 26501768b42SPeter Zijlstra * slowpath functions, to reduce the register pressure on the fastpath. 26601768b42SPeter Zijlstra * We also put the fastpath first in the kernel image, to make sure the 26701768b42SPeter Zijlstra * branch is predicted by the CPU as default-untaken. 26801768b42SPeter Zijlstra */ 2693ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock); 27001768b42SPeter Zijlstra 27101768b42SPeter Zijlstra /** 27201768b42SPeter Zijlstra * mutex_lock - acquire the mutex 27301768b42SPeter Zijlstra * @lock: the mutex to be acquired 27401768b42SPeter Zijlstra * 27501768b42SPeter Zijlstra * Lock the mutex exclusively for this task. If the mutex is not 27601768b42SPeter Zijlstra * available right now, it will sleep until it can get it. 27701768b42SPeter Zijlstra * 27801768b42SPeter Zijlstra * The mutex must later on be released by the same task that 27901768b42SPeter Zijlstra * acquired it. Recursive locking is not allowed. The task 28001768b42SPeter Zijlstra * may not exit without first unlocking the mutex. Also, kernel 281139b6fd2SSharon Dvir * memory where the mutex resides must not be freed with 28201768b42SPeter Zijlstra * the mutex still locked. The mutex must first be initialized 28301768b42SPeter Zijlstra * (or statically defined) before it can be locked. memset()-ing 28401768b42SPeter Zijlstra * the mutex to 0 is not allowed. 28501768b42SPeter Zijlstra * 28601768b42SPeter Zijlstra * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 28701768b42SPeter Zijlstra * checks that will enforce the restrictions and will also do 2887b4ff1adSMauro Carvalho Chehab * deadlock debugging) 28901768b42SPeter Zijlstra * 29001768b42SPeter Zijlstra * This function is similar to (but not equivalent to) down(). 29101768b42SPeter Zijlstra */ 29201768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock) 29301768b42SPeter Zijlstra { 29401768b42SPeter Zijlstra might_sleep(); 29501768b42SPeter Zijlstra 2963ca0ff57SPeter Zijlstra if (!__mutex_trylock_fast(lock)) 2973ca0ff57SPeter Zijlstra __mutex_lock_slowpath(lock); 2983ca0ff57SPeter Zijlstra } 29901768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock); 30001768b42SPeter Zijlstra #endif 30101768b42SPeter Zijlstra 30255f036caSPeter Ziljstra /* 30355f036caSPeter Ziljstra * Wait-Die: 30455f036caSPeter Ziljstra * The newer transactions are killed when: 30555f036caSPeter Ziljstra * It (the new transaction) makes a request for a lock being held 30655f036caSPeter Ziljstra * by an older transaction. 30708295b3bSThomas Hellstrom * 30808295b3bSThomas Hellstrom * Wound-Wait: 30908295b3bSThomas Hellstrom * The newer transactions are wounded when: 31008295b3bSThomas Hellstrom * An older transaction makes a request for a lock being held by 31108295b3bSThomas Hellstrom * the newer transaction. 31255f036caSPeter Ziljstra */ 31355f036caSPeter Ziljstra 31455f036caSPeter Ziljstra /* 31555f036caSPeter Ziljstra * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired 31655f036caSPeter Ziljstra * it. 31755f036caSPeter Ziljstra */ 318427b1820SPeter Zijlstra static __always_inline void 319427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 32076916515SDavidlohr Bueso { 32176916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES 32276916515SDavidlohr Bueso /* 32376916515SDavidlohr Bueso * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 32476916515SDavidlohr Bueso * but released with a normal mutex_unlock in this call. 32576916515SDavidlohr Bueso * 32676916515SDavidlohr Bueso * This should never happen, always use ww_mutex_unlock. 32776916515SDavidlohr Bueso */ 32876916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww->ctx); 32976916515SDavidlohr Bueso 33076916515SDavidlohr Bueso /* 33176916515SDavidlohr Bueso * Not quite done after calling ww_acquire_done() ? 33276916515SDavidlohr Bueso */ 33376916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 33476916515SDavidlohr Bueso 33576916515SDavidlohr Bueso if (ww_ctx->contending_lock) { 33676916515SDavidlohr Bueso /* 33776916515SDavidlohr Bueso * After -EDEADLK you tried to 33876916515SDavidlohr Bueso * acquire a different ww_mutex? Bad! 33976916515SDavidlohr Bueso */ 34076916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 34176916515SDavidlohr Bueso 34276916515SDavidlohr Bueso /* 34376916515SDavidlohr Bueso * You called ww_mutex_lock after receiving -EDEADLK, 34476916515SDavidlohr Bueso * but 'forgot' to unlock everything else first? 34576916515SDavidlohr Bueso */ 34676916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 34776916515SDavidlohr Bueso ww_ctx->contending_lock = NULL; 34876916515SDavidlohr Bueso } 34976916515SDavidlohr Bueso 35076916515SDavidlohr Bueso /* 35176916515SDavidlohr Bueso * Naughty, using a different class will lead to undefined behavior! 35276916515SDavidlohr Bueso */ 35376916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 35476916515SDavidlohr Bueso #endif 35576916515SDavidlohr Bueso ww_ctx->acquired++; 35655f036caSPeter Ziljstra ww->ctx = ww_ctx; 3573822da3eSNicolai Hähnle } 3583822da3eSNicolai Hähnle 35976916515SDavidlohr Bueso /* 36055f036caSPeter Ziljstra * Determine if context @a is 'after' context @b. IOW, @a is a younger 36155f036caSPeter Ziljstra * transaction than @b and depending on algorithm either needs to wait for 36255f036caSPeter Ziljstra * @b or die. 36355f036caSPeter Ziljstra */ 36455f036caSPeter Ziljstra static inline bool __sched 36555f036caSPeter Ziljstra __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 36655f036caSPeter Ziljstra { 36755f036caSPeter Ziljstra 36855f036caSPeter Ziljstra return (signed long)(a->stamp - b->stamp) > 0; 36955f036caSPeter Ziljstra } 37055f036caSPeter Ziljstra 37155f036caSPeter Ziljstra /* 37255f036caSPeter Ziljstra * Wait-Die; wake a younger waiter context (when locks held) such that it can 37355f036caSPeter Ziljstra * die. 374659cf9f5SNicolai Hähnle * 37555f036caSPeter Ziljstra * Among waiters with context, only the first one can have other locks acquired 37655f036caSPeter Ziljstra * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and 37755f036caSPeter Ziljstra * __ww_mutex_check_kill() wake any but the earliest context. 37855f036caSPeter Ziljstra */ 37955f036caSPeter Ziljstra static bool __sched 38055f036caSPeter Ziljstra __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, 38155f036caSPeter Ziljstra struct ww_acquire_ctx *ww_ctx) 38255f036caSPeter Ziljstra { 38308295b3bSThomas Hellstrom if (!ww_ctx->is_wait_die) 38408295b3bSThomas Hellstrom return false; 38508295b3bSThomas Hellstrom 38655f036caSPeter Ziljstra if (waiter->ww_ctx->acquired > 0 && 38755f036caSPeter Ziljstra __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { 38855f036caSPeter Ziljstra debug_mutex_wake_waiter(lock, waiter); 38955f036caSPeter Ziljstra wake_up_process(waiter->task); 39055f036caSPeter Ziljstra } 39155f036caSPeter Ziljstra 39255f036caSPeter Ziljstra return true; 39355f036caSPeter Ziljstra } 39455f036caSPeter Ziljstra 39555f036caSPeter Ziljstra /* 39608295b3bSThomas Hellstrom * Wound-Wait; wound a younger @hold_ctx if it holds the lock. 39708295b3bSThomas Hellstrom * 39808295b3bSThomas Hellstrom * Wound the lock holder if there are waiters with older transactions than 39908295b3bSThomas Hellstrom * the lock holders. Even if multiple waiters may wound the lock holder, 40008295b3bSThomas Hellstrom * it's sufficient that only one does. 40108295b3bSThomas Hellstrom */ 40208295b3bSThomas Hellstrom static bool __ww_mutex_wound(struct mutex *lock, 40308295b3bSThomas Hellstrom struct ww_acquire_ctx *ww_ctx, 40408295b3bSThomas Hellstrom struct ww_acquire_ctx *hold_ctx) 40508295b3bSThomas Hellstrom { 40608295b3bSThomas Hellstrom struct task_struct *owner = __mutex_owner(lock); 40708295b3bSThomas Hellstrom 40808295b3bSThomas Hellstrom lockdep_assert_held(&lock->wait_lock); 40908295b3bSThomas Hellstrom 41008295b3bSThomas Hellstrom /* 41108295b3bSThomas Hellstrom * Possible through __ww_mutex_add_waiter() when we race with 41208295b3bSThomas Hellstrom * ww_mutex_set_context_fastpath(). In that case we'll get here again 41308295b3bSThomas Hellstrom * through __ww_mutex_check_waiters(). 41408295b3bSThomas Hellstrom */ 41508295b3bSThomas Hellstrom if (!hold_ctx) 41608295b3bSThomas Hellstrom return false; 41708295b3bSThomas Hellstrom 41808295b3bSThomas Hellstrom /* 41908295b3bSThomas Hellstrom * Can have !owner because of __mutex_unlock_slowpath(), but if owner, 42008295b3bSThomas Hellstrom * it cannot go away because we'll have FLAG_WAITERS set and hold 42108295b3bSThomas Hellstrom * wait_lock. 42208295b3bSThomas Hellstrom */ 42308295b3bSThomas Hellstrom if (!owner) 42408295b3bSThomas Hellstrom return false; 42508295b3bSThomas Hellstrom 42608295b3bSThomas Hellstrom if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) { 42708295b3bSThomas Hellstrom hold_ctx->wounded = 1; 42808295b3bSThomas Hellstrom 42908295b3bSThomas Hellstrom /* 43008295b3bSThomas Hellstrom * wake_up_process() paired with set_current_state() 43108295b3bSThomas Hellstrom * inserts sufficient barriers to make sure @owner either sees 432e13e2366SThomas Hellstrom * it's wounded in __ww_mutex_check_kill() or has a 43308295b3bSThomas Hellstrom * wakeup pending to re-read the wounded state. 43408295b3bSThomas Hellstrom */ 43508295b3bSThomas Hellstrom if (owner != current) 43608295b3bSThomas Hellstrom wake_up_process(owner); 43708295b3bSThomas Hellstrom 43808295b3bSThomas Hellstrom return true; 43908295b3bSThomas Hellstrom } 44008295b3bSThomas Hellstrom 44108295b3bSThomas Hellstrom return false; 44208295b3bSThomas Hellstrom } 44308295b3bSThomas Hellstrom 44408295b3bSThomas Hellstrom /* 44555f036caSPeter Ziljstra * We just acquired @lock under @ww_ctx, if there are later contexts waiting 44608295b3bSThomas Hellstrom * behind us on the wait-list, check if they need to die, or wound us. 44755f036caSPeter Ziljstra * 44855f036caSPeter Ziljstra * See __ww_mutex_add_waiter() for the list-order construction; basically the 44955f036caSPeter Ziljstra * list is ordered by stamp, smallest (oldest) first. 450659cf9f5SNicolai Hähnle * 45108295b3bSThomas Hellstrom * This relies on never mixing wait-die/wound-wait on the same wait-list; 45208295b3bSThomas Hellstrom * which is currently ensured by that being a ww_class property. 45308295b3bSThomas Hellstrom * 454659cf9f5SNicolai Hähnle * The current task must not be on the wait list. 455659cf9f5SNicolai Hähnle */ 456659cf9f5SNicolai Hähnle static void __sched 45755f036caSPeter Ziljstra __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 458659cf9f5SNicolai Hähnle { 459659cf9f5SNicolai Hähnle struct mutex_waiter *cur; 460659cf9f5SNicolai Hähnle 461659cf9f5SNicolai Hähnle lockdep_assert_held(&lock->wait_lock); 462659cf9f5SNicolai Hähnle 463659cf9f5SNicolai Hähnle list_for_each_entry(cur, &lock->wait_list, list) { 464659cf9f5SNicolai Hähnle if (!cur->ww_ctx) 465659cf9f5SNicolai Hähnle continue; 466659cf9f5SNicolai Hähnle 46708295b3bSThomas Hellstrom if (__ww_mutex_die(lock, cur, ww_ctx) || 46808295b3bSThomas Hellstrom __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) 469659cf9f5SNicolai Hähnle break; 470659cf9f5SNicolai Hähnle } 471659cf9f5SNicolai Hähnle } 472659cf9f5SNicolai Hähnle 47376916515SDavidlohr Bueso /* 47455f036caSPeter Ziljstra * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 47555f036caSPeter Ziljstra * and wake up any waiters so they can recheck. 47676916515SDavidlohr Bueso */ 47776916515SDavidlohr Bueso static __always_inline void 478427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 47976916515SDavidlohr Bueso { 48076916515SDavidlohr Bueso ww_mutex_lock_acquired(lock, ctx); 48176916515SDavidlohr Bueso 48276916515SDavidlohr Bueso /* 48376916515SDavidlohr Bueso * The lock->ctx update should be visible on all cores before 48455f036caSPeter Ziljstra * the WAITERS check is done, otherwise contended waiters might be 48576916515SDavidlohr Bueso * missed. The contended waiters will either see ww_ctx == NULL 48676916515SDavidlohr Bueso * and keep spinning, or it will acquire wait_lock, add itself 48776916515SDavidlohr Bueso * to waiter list and sleep. 48876916515SDavidlohr Bueso */ 48908295b3bSThomas Hellstrom smp_mb(); /* See comments above and below. */ 49076916515SDavidlohr Bueso 49176916515SDavidlohr Bueso /* 49208295b3bSThomas Hellstrom * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS 49308295b3bSThomas Hellstrom * MB MB 49408295b3bSThomas Hellstrom * [R] MUTEX_FLAG_WAITERS [R] ww->ctx 49508295b3bSThomas Hellstrom * 49608295b3bSThomas Hellstrom * The memory barrier above pairs with the memory barrier in 49708295b3bSThomas Hellstrom * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx 49808295b3bSThomas Hellstrom * and/or !empty list. 49976916515SDavidlohr Bueso */ 5003ca0ff57SPeter Zijlstra if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 50176916515SDavidlohr Bueso return; 50276916515SDavidlohr Bueso 50376916515SDavidlohr Bueso /* 50455f036caSPeter Ziljstra * Uh oh, we raced in fastpath, check if any of the waiters need to 50508295b3bSThomas Hellstrom * die or wound us. 50676916515SDavidlohr Bueso */ 507b9c16a0eSPeter Zijlstra spin_lock(&lock->base.wait_lock); 50855f036caSPeter Ziljstra __ww_mutex_check_waiters(&lock->base, ctx); 509b9c16a0eSPeter Zijlstra spin_unlock(&lock->base.wait_lock); 51076916515SDavidlohr Bueso } 51176916515SDavidlohr Bueso 51201768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 513c516df97SNicolai Hähnle 514c516df97SNicolai Hähnle static inline 515c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 516c516df97SNicolai Hähnle struct mutex_waiter *waiter) 517c516df97SNicolai Hähnle { 518c516df97SNicolai Hähnle struct ww_mutex *ww; 519c516df97SNicolai Hähnle 520c516df97SNicolai Hähnle ww = container_of(lock, struct ww_mutex, base); 521c516df97SNicolai Hähnle 52201768b42SPeter Zijlstra /* 523c516df97SNicolai Hähnle * If ww->ctx is set the contents are undefined, only 524c516df97SNicolai Hähnle * by acquiring wait_lock there is a guarantee that 525c516df97SNicolai Hähnle * they are not invalid when reading. 526c516df97SNicolai Hähnle * 527c516df97SNicolai Hähnle * As such, when deadlock detection needs to be 528c516df97SNicolai Hähnle * performed the optimistic spinning cannot be done. 529c516df97SNicolai Hähnle * 530c516df97SNicolai Hähnle * Check this in every inner iteration because we may 531c516df97SNicolai Hähnle * be racing against another thread's ww_mutex_lock. 532c516df97SNicolai Hähnle */ 533c516df97SNicolai Hähnle if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 534c516df97SNicolai Hähnle return false; 535c516df97SNicolai Hähnle 536c516df97SNicolai Hähnle /* 537c516df97SNicolai Hähnle * If we aren't on the wait list yet, cancel the spin 538c516df97SNicolai Hähnle * if there are waiters. We want to avoid stealing the 539c516df97SNicolai Hähnle * lock from a waiter with an earlier stamp, since the 540c516df97SNicolai Hähnle * other thread may already own a lock that we also 541c516df97SNicolai Hähnle * need. 542c516df97SNicolai Hähnle */ 543c516df97SNicolai Hähnle if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 544c516df97SNicolai Hähnle return false; 545c516df97SNicolai Hähnle 546c516df97SNicolai Hähnle /* 547c516df97SNicolai Hähnle * Similarly, stop spinning if we are no longer the 548c516df97SNicolai Hähnle * first waiter. 549c516df97SNicolai Hähnle */ 550c516df97SNicolai Hähnle if (waiter && !__mutex_waiter_is_first(lock, waiter)) 551c516df97SNicolai Hähnle return false; 552c516df97SNicolai Hähnle 553c516df97SNicolai Hähnle return true; 554c516df97SNicolai Hähnle } 555c516df97SNicolai Hähnle 55601768b42SPeter Zijlstra /* 55725f13b40SNicolai Hähnle * Look out! "owner" is an entirely speculative pointer access and not 55825f13b40SNicolai Hähnle * reliable. 55925f13b40SNicolai Hähnle * 56025f13b40SNicolai Hähnle * "noinline" so that this function shows up on perf profiles. 56101768b42SPeter Zijlstra */ 56201768b42SPeter Zijlstra static noinline 56325f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 564c516df97SNicolai Hähnle struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 56501768b42SPeter Zijlstra { 56601ac33c1SJason Low bool ret = true; 567be1f7bf2SJason Low 56801768b42SPeter Zijlstra rcu_read_lock(); 5693ca0ff57SPeter Zijlstra while (__mutex_owner(lock) == owner) { 570be1f7bf2SJason Low /* 571be1f7bf2SJason Low * Ensure we emit the owner->on_cpu, dereference _after_ 57201ac33c1SJason Low * checking lock->owner still matches owner. If that fails, 57301ac33c1SJason Low * owner might point to freed memory. If it still matches, 574be1f7bf2SJason Low * the rcu_read_lock() ensures the memory stays valid. 575be1f7bf2SJason Low */ 576be1f7bf2SJason Low barrier(); 577be1f7bf2SJason Low 57805ffc951SPan Xinhui /* 57905ffc951SPan Xinhui * Use vcpu_is_preempted to detect lock holder preemption issue. 58005ffc951SPan Xinhui */ 58105ffc951SPan Xinhui if (!owner->on_cpu || need_resched() || 58205ffc951SPan Xinhui vcpu_is_preempted(task_cpu(owner))) { 583be1f7bf2SJason Low ret = false; 584be1f7bf2SJason Low break; 585be1f7bf2SJason Low } 58601768b42SPeter Zijlstra 587c516df97SNicolai Hähnle if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 58825f13b40SNicolai Hähnle ret = false; 58925f13b40SNicolai Hähnle break; 59025f13b40SNicolai Hähnle } 59125f13b40SNicolai Hähnle 592f2f09a4cSChristian Borntraeger cpu_relax(); 59301768b42SPeter Zijlstra } 59401768b42SPeter Zijlstra rcu_read_unlock(); 59501768b42SPeter Zijlstra 596be1f7bf2SJason Low return ret; 59701768b42SPeter Zijlstra } 59801768b42SPeter Zijlstra 59901768b42SPeter Zijlstra /* 60001768b42SPeter Zijlstra * Initial check for entering the mutex spinning loop 60101768b42SPeter Zijlstra */ 60201768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock) 60301768b42SPeter Zijlstra { 60401768b42SPeter Zijlstra struct task_struct *owner; 60501768b42SPeter Zijlstra int retval = 1; 60601768b42SPeter Zijlstra 60746af29e4SJason Low if (need_resched()) 60846af29e4SJason Low return 0; 60946af29e4SJason Low 61001768b42SPeter Zijlstra rcu_read_lock(); 6113ca0ff57SPeter Zijlstra owner = __mutex_owner(lock); 61205ffc951SPan Xinhui 61305ffc951SPan Xinhui /* 61405ffc951SPan Xinhui * As lock holder preemption issue, we both skip spinning if task is not 61505ffc951SPan Xinhui * on cpu or its cpu is preempted 61605ffc951SPan Xinhui */ 61701768b42SPeter Zijlstra if (owner) 61805ffc951SPan Xinhui retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 61901768b42SPeter Zijlstra rcu_read_unlock(); 62076916515SDavidlohr Bueso 62176916515SDavidlohr Bueso /* 6223ca0ff57SPeter Zijlstra * If lock->owner is not set, the mutex has been released. Return true 6233ca0ff57SPeter Zijlstra * such that we'll trylock in the spin path, which is a faster option 6243ca0ff57SPeter Zijlstra * than the blocking slow path. 62576916515SDavidlohr Bueso */ 6263ca0ff57SPeter Zijlstra return retval; 62776916515SDavidlohr Bueso } 62876916515SDavidlohr Bueso 62976916515SDavidlohr Bueso /* 63076916515SDavidlohr Bueso * Optimistic spinning. 63176916515SDavidlohr Bueso * 63276916515SDavidlohr Bueso * We try to spin for acquisition when we find that the lock owner 63376916515SDavidlohr Bueso * is currently running on a (different) CPU and while we don't 63476916515SDavidlohr Bueso * need to reschedule. The rationale is that if the lock owner is 63576916515SDavidlohr Bueso * running, it is likely to release the lock soon. 63676916515SDavidlohr Bueso * 63776916515SDavidlohr Bueso * The mutex spinners are queued up using MCS lock so that only one 63876916515SDavidlohr Bueso * spinner can compete for the mutex. However, if mutex spinning isn't 63976916515SDavidlohr Bueso * going to happen, there is no point in going through the lock/unlock 64076916515SDavidlohr Bueso * overhead. 64176916515SDavidlohr Bueso * 64276916515SDavidlohr Bueso * Returns true when the lock was taken, otherwise false, indicating 64376916515SDavidlohr Bueso * that we need to jump to the slowpath and sleep. 644b341afb3SWaiman Long * 645b341afb3SWaiman Long * The waiter flag is set to true if the spinner is a waiter in the wait 646b341afb3SWaiman Long * queue. The waiter-spinner will spin on the lock directly and concurrently 647b341afb3SWaiman Long * with the spinner at the head of the OSQ, if present, until the owner is 648b341afb3SWaiman Long * changed to itself. 64976916515SDavidlohr Bueso */ 650427b1820SPeter Zijlstra static __always_inline bool 651427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 652c516df97SNicolai Hähnle const bool use_ww_ctx, struct mutex_waiter *waiter) 65376916515SDavidlohr Bueso { 654b341afb3SWaiman Long if (!waiter) { 655b341afb3SWaiman Long /* 656b341afb3SWaiman Long * The purpose of the mutex_can_spin_on_owner() function is 657b341afb3SWaiman Long * to eliminate the overhead of osq_lock() and osq_unlock() 658b341afb3SWaiman Long * in case spinning isn't possible. As a waiter-spinner 659b341afb3SWaiman Long * is not going to take OSQ lock anyway, there is no need 660b341afb3SWaiman Long * to call mutex_can_spin_on_owner(). 661b341afb3SWaiman Long */ 66276916515SDavidlohr Bueso if (!mutex_can_spin_on_owner(lock)) 663b341afb3SWaiman Long goto fail; 66476916515SDavidlohr Bueso 665e42f678aSDavidlohr Bueso /* 666e42f678aSDavidlohr Bueso * In order to avoid a stampede of mutex spinners trying to 667e42f678aSDavidlohr Bueso * acquire the mutex all at once, the spinners need to take a 668e42f678aSDavidlohr Bueso * MCS (queued) lock first before spinning on the owner field. 669e42f678aSDavidlohr Bueso */ 67076916515SDavidlohr Bueso if (!osq_lock(&lock->osq)) 671b341afb3SWaiman Long goto fail; 672b341afb3SWaiman Long } 67376916515SDavidlohr Bueso 674b341afb3SWaiman Long for (;;) { 67576916515SDavidlohr Bueso struct task_struct *owner; 67676916515SDavidlohr Bueso 677e274795eSPeter Zijlstra /* Try to acquire the mutex... */ 678e274795eSPeter Zijlstra owner = __mutex_trylock_or_owner(lock); 679e274795eSPeter Zijlstra if (!owner) 680e274795eSPeter Zijlstra break; 68176916515SDavidlohr Bueso 68276916515SDavidlohr Bueso /* 683e274795eSPeter Zijlstra * There's an owner, wait for it to either 68476916515SDavidlohr Bueso * release the lock or go to sleep. 68576916515SDavidlohr Bueso */ 686c516df97SNicolai Hähnle if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 687b341afb3SWaiman Long goto fail_unlock; 68876916515SDavidlohr Bueso 68976916515SDavidlohr Bueso /* 69076916515SDavidlohr Bueso * The cpu_relax() call is a compiler barrier which forces 69176916515SDavidlohr Bueso * everything in this loop to be re-loaded. We don't need 69276916515SDavidlohr Bueso * memory barriers as we'll eventually observe the right 69376916515SDavidlohr Bueso * values at the cost of a few extra spins. 69476916515SDavidlohr Bueso */ 695f2f09a4cSChristian Borntraeger cpu_relax(); 69676916515SDavidlohr Bueso } 69776916515SDavidlohr Bueso 698b341afb3SWaiman Long if (!waiter) 69976916515SDavidlohr Bueso osq_unlock(&lock->osq); 700b341afb3SWaiman Long 701b341afb3SWaiman Long return true; 702b341afb3SWaiman Long 703b341afb3SWaiman Long 704b341afb3SWaiman Long fail_unlock: 705b341afb3SWaiman Long if (!waiter) 706b341afb3SWaiman Long osq_unlock(&lock->osq); 707b341afb3SWaiman Long 708b341afb3SWaiman Long fail: 70976916515SDavidlohr Bueso /* 71076916515SDavidlohr Bueso * If we fell out of the spin path because of need_resched(), 71176916515SDavidlohr Bueso * reschedule now, before we try-lock the mutex. This avoids getting 71276916515SDavidlohr Bueso * scheduled out right after we obtained the mutex. 71376916515SDavidlohr Bueso */ 7146f942a1fSPeter Zijlstra if (need_resched()) { 7156f942a1fSPeter Zijlstra /* 7166f942a1fSPeter Zijlstra * We _should_ have TASK_RUNNING here, but just in case 7176f942a1fSPeter Zijlstra * we do not, make it so, otherwise we might get stuck. 7186f942a1fSPeter Zijlstra */ 7196f942a1fSPeter Zijlstra __set_current_state(TASK_RUNNING); 72076916515SDavidlohr Bueso schedule_preempt_disabled(); 7216f942a1fSPeter Zijlstra } 72276916515SDavidlohr Bueso 72376916515SDavidlohr Bueso return false; 72476916515SDavidlohr Bueso } 72576916515SDavidlohr Bueso #else 726427b1820SPeter Zijlstra static __always_inline bool 727427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 728c516df97SNicolai Hähnle const bool use_ww_ctx, struct mutex_waiter *waiter) 72976916515SDavidlohr Bueso { 73076916515SDavidlohr Bueso return false; 73176916515SDavidlohr Bueso } 73201768b42SPeter Zijlstra #endif 73301768b42SPeter Zijlstra 7343ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 73501768b42SPeter Zijlstra 73601768b42SPeter Zijlstra /** 73701768b42SPeter Zijlstra * mutex_unlock - release the mutex 73801768b42SPeter Zijlstra * @lock: the mutex to be released 73901768b42SPeter Zijlstra * 74001768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously. 74101768b42SPeter Zijlstra * 74201768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 74301768b42SPeter Zijlstra * of a not locked mutex is not allowed. 74401768b42SPeter Zijlstra * 74501768b42SPeter Zijlstra * This function is similar to (but not equivalent to) up(). 74601768b42SPeter Zijlstra */ 74701768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock) 74801768b42SPeter Zijlstra { 7493ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 7503ca0ff57SPeter Zijlstra if (__mutex_unlock_fast(lock)) 7513ca0ff57SPeter Zijlstra return; 75201768b42SPeter Zijlstra #endif 7533ca0ff57SPeter Zijlstra __mutex_unlock_slowpath(lock, _RET_IP_); 75401768b42SPeter Zijlstra } 75501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock); 75601768b42SPeter Zijlstra 75701768b42SPeter Zijlstra /** 75801768b42SPeter Zijlstra * ww_mutex_unlock - release the w/w mutex 75901768b42SPeter Zijlstra * @lock: the mutex to be released 76001768b42SPeter Zijlstra * 76101768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously with any of the 76201768b42SPeter Zijlstra * ww_mutex_lock* functions (with or without an acquire context). It is 76301768b42SPeter Zijlstra * forbidden to release the locks after releasing the acquire context. 76401768b42SPeter Zijlstra * 76501768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 76601768b42SPeter Zijlstra * of a unlocked mutex is not allowed. 76701768b42SPeter Zijlstra */ 76801768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock) 76901768b42SPeter Zijlstra { 77001768b42SPeter Zijlstra /* 77101768b42SPeter Zijlstra * The unlocking fastpath is the 0->1 transition from 'locked' 77201768b42SPeter Zijlstra * into 'unlocked' state: 77301768b42SPeter Zijlstra */ 77401768b42SPeter Zijlstra if (lock->ctx) { 77501768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 77601768b42SPeter Zijlstra DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 77701768b42SPeter Zijlstra #endif 77801768b42SPeter Zijlstra if (lock->ctx->acquired > 0) 77901768b42SPeter Zijlstra lock->ctx->acquired--; 78001768b42SPeter Zijlstra lock->ctx = NULL; 78101768b42SPeter Zijlstra } 78201768b42SPeter Zijlstra 7833ca0ff57SPeter Zijlstra mutex_unlock(&lock->base); 78401768b42SPeter Zijlstra } 78501768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock); 78601768b42SPeter Zijlstra 78755f036caSPeter Ziljstra 78855f036caSPeter Ziljstra static __always_inline int __sched 78955f036caSPeter Ziljstra __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 79055f036caSPeter Ziljstra { 79155f036caSPeter Ziljstra if (ww_ctx->acquired > 0) { 79255f036caSPeter Ziljstra #ifdef CONFIG_DEBUG_MUTEXES 79355f036caSPeter Ziljstra struct ww_mutex *ww; 79455f036caSPeter Ziljstra 79555f036caSPeter Ziljstra ww = container_of(lock, struct ww_mutex, base); 79655f036caSPeter Ziljstra DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 79755f036caSPeter Ziljstra ww_ctx->contending_lock = ww; 79855f036caSPeter Ziljstra #endif 79955f036caSPeter Ziljstra return -EDEADLK; 80055f036caSPeter Ziljstra } 80155f036caSPeter Ziljstra 80255f036caSPeter Ziljstra return 0; 80355f036caSPeter Ziljstra } 80455f036caSPeter Ziljstra 80555f036caSPeter Ziljstra 80655f036caSPeter Ziljstra /* 80708295b3bSThomas Hellstrom * Check the wound condition for the current lock acquire. 80808295b3bSThomas Hellstrom * 80908295b3bSThomas Hellstrom * Wound-Wait: If we're wounded, kill ourself. 81055f036caSPeter Ziljstra * 81155f036caSPeter Ziljstra * Wait-Die: If we're trying to acquire a lock already held by an older 81255f036caSPeter Ziljstra * context, kill ourselves. 81355f036caSPeter Ziljstra * 81455f036caSPeter Ziljstra * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to 81555f036caSPeter Ziljstra * look at waiters before us in the wait-list. 81655f036caSPeter Ziljstra */ 81701768b42SPeter Zijlstra static inline int __sched 81855f036caSPeter Ziljstra __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, 819200b1874SNicolai Hähnle struct ww_acquire_ctx *ctx) 82001768b42SPeter Zijlstra { 82101768b42SPeter Zijlstra struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 8224d3199e4SDavidlohr Bueso struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 823200b1874SNicolai Hähnle struct mutex_waiter *cur; 82401768b42SPeter Zijlstra 82555f036caSPeter Ziljstra if (ctx->acquired == 0) 82655f036caSPeter Ziljstra return 0; 82755f036caSPeter Ziljstra 82808295b3bSThomas Hellstrom if (!ctx->is_wait_die) { 82908295b3bSThomas Hellstrom if (ctx->wounded) 83008295b3bSThomas Hellstrom return __ww_mutex_kill(lock, ctx); 83108295b3bSThomas Hellstrom 83208295b3bSThomas Hellstrom return 0; 83308295b3bSThomas Hellstrom } 83408295b3bSThomas Hellstrom 835200b1874SNicolai Hähnle if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 83655f036caSPeter Ziljstra return __ww_mutex_kill(lock, ctx); 837200b1874SNicolai Hähnle 838200b1874SNicolai Hähnle /* 839200b1874SNicolai Hähnle * If there is a waiter in front of us that has a context, then its 84055f036caSPeter Ziljstra * stamp is earlier than ours and we must kill ourself. 841200b1874SNicolai Hähnle */ 842200b1874SNicolai Hähnle cur = waiter; 843200b1874SNicolai Hähnle list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 84455f036caSPeter Ziljstra if (!cur->ww_ctx) 84555f036caSPeter Ziljstra continue; 84655f036caSPeter Ziljstra 84755f036caSPeter Ziljstra return __ww_mutex_kill(lock, ctx); 848200b1874SNicolai Hähnle } 849200b1874SNicolai Hähnle 85001768b42SPeter Zijlstra return 0; 85101768b42SPeter Zijlstra } 85201768b42SPeter Zijlstra 85355f036caSPeter Ziljstra /* 85455f036caSPeter Ziljstra * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest 85555f036caSPeter Ziljstra * first. Such that older contexts are preferred to acquire the lock over 85655f036caSPeter Ziljstra * younger contexts. 85755f036caSPeter Ziljstra * 85855f036caSPeter Ziljstra * Waiters without context are interspersed in FIFO order. 85955f036caSPeter Ziljstra * 86055f036caSPeter Ziljstra * Furthermore, for Wait-Die kill ourself immediately when possible (there are 86108295b3bSThomas Hellstrom * older contexts already waiting) to avoid unnecessary waiting and for 86208295b3bSThomas Hellstrom * Wound-Wait ensure we wound the owning context when it is younger. 86355f036caSPeter Ziljstra */ 8646baa5c60SNicolai Hähnle static inline int __sched 8656baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter, 8666baa5c60SNicolai Hähnle struct mutex *lock, 8676baa5c60SNicolai Hähnle struct ww_acquire_ctx *ww_ctx) 8686baa5c60SNicolai Hähnle { 8696baa5c60SNicolai Hähnle struct mutex_waiter *cur; 8706baa5c60SNicolai Hähnle struct list_head *pos; 87108295b3bSThomas Hellstrom bool is_wait_die; 8726baa5c60SNicolai Hähnle 8736baa5c60SNicolai Hähnle if (!ww_ctx) { 87408295b3bSThomas Hellstrom __mutex_add_waiter(lock, waiter, &lock->wait_list); 8756baa5c60SNicolai Hähnle return 0; 8766baa5c60SNicolai Hähnle } 8776baa5c60SNicolai Hähnle 87808295b3bSThomas Hellstrom is_wait_die = ww_ctx->is_wait_die; 87908295b3bSThomas Hellstrom 8806baa5c60SNicolai Hähnle /* 8816baa5c60SNicolai Hähnle * Add the waiter before the first waiter with a higher stamp. 8826baa5c60SNicolai Hähnle * Waiters without a context are skipped to avoid starving 88308295b3bSThomas Hellstrom * them. Wait-Die waiters may die here. Wound-Wait waiters 88408295b3bSThomas Hellstrom * never die here, but they are sorted in stamp order and 88508295b3bSThomas Hellstrom * may wound the lock holder. 8866baa5c60SNicolai Hähnle */ 8876baa5c60SNicolai Hähnle pos = &lock->wait_list; 8886baa5c60SNicolai Hähnle list_for_each_entry_reverse(cur, &lock->wait_list, list) { 8896baa5c60SNicolai Hähnle if (!cur->ww_ctx) 8906baa5c60SNicolai Hähnle continue; 8916baa5c60SNicolai Hähnle 8926baa5c60SNicolai Hähnle if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 89355f036caSPeter Ziljstra /* 89455f036caSPeter Ziljstra * Wait-Die: if we find an older context waiting, there 89555f036caSPeter Ziljstra * is no point in queueing behind it, as we'd have to 89655f036caSPeter Ziljstra * die the moment it would acquire the lock. 89755f036caSPeter Ziljstra */ 89808295b3bSThomas Hellstrom if (is_wait_die) { 89955f036caSPeter Ziljstra int ret = __ww_mutex_kill(lock, ww_ctx); 9006baa5c60SNicolai Hähnle 90155f036caSPeter Ziljstra if (ret) 90255f036caSPeter Ziljstra return ret; 90308295b3bSThomas Hellstrom } 9046baa5c60SNicolai Hähnle 9056baa5c60SNicolai Hähnle break; 9066baa5c60SNicolai Hähnle } 9076baa5c60SNicolai Hähnle 9086baa5c60SNicolai Hähnle pos = &cur->list; 909200b1874SNicolai Hähnle 91055f036caSPeter Ziljstra /* Wait-Die: ensure younger waiters die. */ 91155f036caSPeter Ziljstra __ww_mutex_die(lock, cur, ww_ctx); 9126baa5c60SNicolai Hähnle } 9136baa5c60SNicolai Hähnle 91408295b3bSThomas Hellstrom __mutex_add_waiter(lock, waiter, pos); 91508295b3bSThomas Hellstrom 91608295b3bSThomas Hellstrom /* 91708295b3bSThomas Hellstrom * Wound-Wait: if we're blocking on a mutex owned by a younger context, 91808295b3bSThomas Hellstrom * wound that such that we might proceed. 91908295b3bSThomas Hellstrom */ 92008295b3bSThomas Hellstrom if (!is_wait_die) { 92108295b3bSThomas Hellstrom struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 92208295b3bSThomas Hellstrom 92308295b3bSThomas Hellstrom /* 92408295b3bSThomas Hellstrom * See ww_mutex_set_context_fastpath(). Orders setting 92508295b3bSThomas Hellstrom * MUTEX_FLAG_WAITERS vs the ww->ctx load, 92608295b3bSThomas Hellstrom * such that either we or the fastpath will wound @ww->ctx. 92708295b3bSThomas Hellstrom */ 92808295b3bSThomas Hellstrom smp_mb(); 92908295b3bSThomas Hellstrom __ww_mutex_wound(lock, ww_ctx, ww->ctx); 93008295b3bSThomas Hellstrom } 93155f036caSPeter Ziljstra 93201768b42SPeter Zijlstra return 0; 93301768b42SPeter Zijlstra } 93401768b42SPeter Zijlstra 93501768b42SPeter Zijlstra /* 93601768b42SPeter Zijlstra * Lock a mutex (possibly interruptible), slowpath: 93701768b42SPeter Zijlstra */ 93801768b42SPeter Zijlstra static __always_inline int __sched 93901768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 94001768b42SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 94101768b42SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 94201768b42SPeter Zijlstra { 94301768b42SPeter Zijlstra struct mutex_waiter waiter; 9449d659ae1SPeter Zijlstra bool first = false; 945a40ca565SWaiman Long struct ww_mutex *ww; 94601768b42SPeter Zijlstra int ret; 94701768b42SPeter Zijlstra 948427b1820SPeter Zijlstra might_sleep(); 949ea9e0fb8SNicolai Hähnle 9506c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES 9516c11c6e3SSebastian Andrzej Siewior DEBUG_LOCKS_WARN_ON(lock->magic != lock); 9526c11c6e3SSebastian Andrzej Siewior #endif 9536c11c6e3SSebastian Andrzej Siewior 954a40ca565SWaiman Long ww = container_of(lock, struct ww_mutex, base); 955ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) { 9560422e83dSChris Wilson if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 9570422e83dSChris Wilson return -EALREADY; 95808295b3bSThomas Hellstrom 95908295b3bSThomas Hellstrom /* 96008295b3bSThomas Hellstrom * Reset the wounded flag after a kill. No other process can 96108295b3bSThomas Hellstrom * race and wound us here since they can't have a valid owner 96208295b3bSThomas Hellstrom * pointer if we don't have any locks held. 96308295b3bSThomas Hellstrom */ 96408295b3bSThomas Hellstrom if (ww_ctx->acquired == 0) 96508295b3bSThomas Hellstrom ww_ctx->wounded = 0; 9660422e83dSChris Wilson } 9670422e83dSChris Wilson 96801768b42SPeter Zijlstra preempt_disable(); 96901768b42SPeter Zijlstra mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 97001768b42SPeter Zijlstra 971e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 972c516df97SNicolai Hähnle mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { 97376916515SDavidlohr Bueso /* got the lock, yay! */ 9743ca0ff57SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 975ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) 9763ca0ff57SPeter Zijlstra ww_mutex_set_context_fastpath(ww, ww_ctx); 97701768b42SPeter Zijlstra preempt_enable(); 97801768b42SPeter Zijlstra return 0; 97901768b42SPeter Zijlstra } 98001768b42SPeter Zijlstra 981b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 9821e820c96SJason Low /* 9833ca0ff57SPeter Zijlstra * After waiting to acquire the wait_lock, try again. 9841e820c96SJason Low */ 985659cf9f5SNicolai Hähnle if (__mutex_trylock(lock)) { 986659cf9f5SNicolai Hähnle if (use_ww_ctx && ww_ctx) 98755f036caSPeter Ziljstra __ww_mutex_check_waiters(lock, ww_ctx); 988659cf9f5SNicolai Hähnle 98901768b42SPeter Zijlstra goto skip_wait; 990659cf9f5SNicolai Hähnle } 99101768b42SPeter Zijlstra 99201768b42SPeter Zijlstra debug_mutex_lock_common(lock, &waiter); 99301768b42SPeter Zijlstra 9946baa5c60SNicolai Hähnle lock_contended(&lock->dep_map, ip); 9956baa5c60SNicolai Hähnle 9966baa5c60SNicolai Hähnle if (!use_ww_ctx) { 99701768b42SPeter Zijlstra /* add waiting tasks to the end of the waitqueue (FIFO): */ 99808295b3bSThomas Hellstrom __mutex_add_waiter(lock, &waiter, &lock->wait_list); 99908295b3bSThomas Hellstrom 1000977625a6SNicolai Hähnle 1001977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES 1002977625a6SNicolai Hähnle waiter.ww_ctx = MUTEX_POISON_WW_CTX; 1003977625a6SNicolai Hähnle #endif 10046baa5c60SNicolai Hähnle } else { 100555f036caSPeter Ziljstra /* 100655f036caSPeter Ziljstra * Add in stamp order, waking up waiters that must kill 100755f036caSPeter Ziljstra * themselves. 100855f036caSPeter Ziljstra */ 10096baa5c60SNicolai Hähnle ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 10106baa5c60SNicolai Hähnle if (ret) 101155f036caSPeter Ziljstra goto err_early_kill; 10126baa5c60SNicolai Hähnle 10136baa5c60SNicolai Hähnle waiter.ww_ctx = ww_ctx; 10146baa5c60SNicolai Hähnle } 10156baa5c60SNicolai Hähnle 1016d269a8b8SDavidlohr Bueso waiter.task = current; 101701768b42SPeter Zijlstra 1018642fa448SDavidlohr Bueso set_current_state(state); 101901768b42SPeter Zijlstra for (;;) { 10205bbd7e64SPeter Zijlstra /* 10215bbd7e64SPeter Zijlstra * Once we hold wait_lock, we're serialized against 10225bbd7e64SPeter Zijlstra * mutex_unlock() handing the lock off to us, do a trylock 10235bbd7e64SPeter Zijlstra * before testing the error conditions to make sure we pick up 10245bbd7e64SPeter Zijlstra * the handoff. 10255bbd7e64SPeter Zijlstra */ 1026e274795eSPeter Zijlstra if (__mutex_trylock(lock)) 10275bbd7e64SPeter Zijlstra goto acquired; 102801768b42SPeter Zijlstra 102901768b42SPeter Zijlstra /* 103055f036caSPeter Ziljstra * Check for signals and kill conditions while holding 10315bbd7e64SPeter Zijlstra * wait_lock. This ensures the lock cancellation is ordered 10325bbd7e64SPeter Zijlstra * against mutex_unlock() and wake-ups do not go missing. 103301768b42SPeter Zijlstra */ 10343bb5f4acSDavidlohr Bueso if (signal_pending_state(state, current)) { 103501768b42SPeter Zijlstra ret = -EINTR; 103601768b42SPeter Zijlstra goto err; 103701768b42SPeter Zijlstra } 103801768b42SPeter Zijlstra 103955f036caSPeter Ziljstra if (use_ww_ctx && ww_ctx) { 104055f036caSPeter Ziljstra ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); 104101768b42SPeter Zijlstra if (ret) 104201768b42SPeter Zijlstra goto err; 104301768b42SPeter Zijlstra } 104401768b42SPeter Zijlstra 1045b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 104601768b42SPeter Zijlstra schedule_preempt_disabled(); 10479d659ae1SPeter Zijlstra 10486baa5c60SNicolai Hähnle /* 10496baa5c60SNicolai Hähnle * ww_mutex needs to always recheck its position since its waiter 10506baa5c60SNicolai Hähnle * list is not FIFO ordered. 10516baa5c60SNicolai Hähnle */ 10526baa5c60SNicolai Hähnle if ((use_ww_ctx && ww_ctx) || !first) { 10536baa5c60SNicolai Hähnle first = __mutex_waiter_is_first(lock, &waiter); 10546baa5c60SNicolai Hähnle if (first) 10559d659ae1SPeter Zijlstra __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 10569d659ae1SPeter Zijlstra } 10575bbd7e64SPeter Zijlstra 1058642fa448SDavidlohr Bueso set_current_state(state); 10595bbd7e64SPeter Zijlstra /* 10605bbd7e64SPeter Zijlstra * Here we order against unlock; we must either see it change 10615bbd7e64SPeter Zijlstra * state back to RUNNING and fall through the next schedule(), 10625bbd7e64SPeter Zijlstra * or we must see its unlock and acquire. 10635bbd7e64SPeter Zijlstra */ 1064e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 1065c516df97SNicolai Hähnle (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) 10665bbd7e64SPeter Zijlstra break; 10675bbd7e64SPeter Zijlstra 1068b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 106901768b42SPeter Zijlstra } 1070b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 10715bbd7e64SPeter Zijlstra acquired: 1072642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 107351587bcfSDavidlohr Bueso 107408295b3bSThomas Hellstrom if (use_ww_ctx && ww_ctx) { 107508295b3bSThomas Hellstrom /* 107608295b3bSThomas Hellstrom * Wound-Wait; we stole the lock (!first_waiter), check the 107708295b3bSThomas Hellstrom * waiters as anyone might want to wound us. 107808295b3bSThomas Hellstrom */ 107908295b3bSThomas Hellstrom if (!ww_ctx->is_wait_die && 108008295b3bSThomas Hellstrom !__mutex_waiter_is_first(lock, &waiter)) 108108295b3bSThomas Hellstrom __ww_mutex_check_waiters(lock, ww_ctx); 108208295b3bSThomas Hellstrom } 108308295b3bSThomas Hellstrom 1084d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 108501768b42SPeter Zijlstra if (likely(list_empty(&lock->wait_list))) 10869d659ae1SPeter Zijlstra __mutex_clear_flag(lock, MUTEX_FLAGS); 10873ca0ff57SPeter Zijlstra 108801768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 108901768b42SPeter Zijlstra 109001768b42SPeter Zijlstra skip_wait: 109101768b42SPeter Zijlstra /* got the lock - cleanup and rejoice! */ 109201768b42SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 109301768b42SPeter Zijlstra 1094ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) 109555f036caSPeter Ziljstra ww_mutex_lock_acquired(ww, ww_ctx); 109601768b42SPeter Zijlstra 1097b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 109801768b42SPeter Zijlstra preempt_enable(); 109901768b42SPeter Zijlstra return 0; 110001768b42SPeter Zijlstra 110101768b42SPeter Zijlstra err: 1102642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 1103d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 110455f036caSPeter Ziljstra err_early_kill: 1105b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 110601768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 110701768b42SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 110801768b42SPeter Zijlstra preempt_enable(); 110901768b42SPeter Zijlstra return ret; 111001768b42SPeter Zijlstra } 111101768b42SPeter Zijlstra 1112427b1820SPeter Zijlstra static int __sched 1113427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1114427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip) 1115427b1820SPeter Zijlstra { 1116427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 1117427b1820SPeter Zijlstra } 1118427b1820SPeter Zijlstra 1119427b1820SPeter Zijlstra static int __sched 1120427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, 1121427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 1122427b1820SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 1123427b1820SPeter Zijlstra { 1124427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); 1125427b1820SPeter Zijlstra } 1126427b1820SPeter Zijlstra 112701768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC 112801768b42SPeter Zijlstra void __sched 112901768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass) 113001768b42SPeter Zijlstra { 1131427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 113201768b42SPeter Zijlstra } 113301768b42SPeter Zijlstra 113401768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested); 113501768b42SPeter Zijlstra 113601768b42SPeter Zijlstra void __sched 113701768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 113801768b42SPeter Zijlstra { 1139427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 114001768b42SPeter Zijlstra } 114101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 114201768b42SPeter Zijlstra 114301768b42SPeter Zijlstra int __sched 114401768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 114501768b42SPeter Zijlstra { 1146427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 114701768b42SPeter Zijlstra } 114801768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 114901768b42SPeter Zijlstra 115001768b42SPeter Zijlstra int __sched 115101768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 115201768b42SPeter Zijlstra { 1153427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 115401768b42SPeter Zijlstra } 115501768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 115601768b42SPeter Zijlstra 11571460cb65STejun Heo void __sched 11581460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 11591460cb65STejun Heo { 11601460cb65STejun Heo int token; 11611460cb65STejun Heo 11621460cb65STejun Heo might_sleep(); 11631460cb65STejun Heo 11641460cb65STejun Heo token = io_schedule_prepare(); 11651460cb65STejun Heo __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 11661460cb65STejun Heo subclass, NULL, _RET_IP_, NULL, 0); 11671460cb65STejun Heo io_schedule_finish(token); 11681460cb65STejun Heo } 11691460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 11701460cb65STejun Heo 117101768b42SPeter Zijlstra static inline int 117201768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 117301768b42SPeter Zijlstra { 117401768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 117501768b42SPeter Zijlstra unsigned tmp; 117601768b42SPeter Zijlstra 117701768b42SPeter Zijlstra if (ctx->deadlock_inject_countdown-- == 0) { 117801768b42SPeter Zijlstra tmp = ctx->deadlock_inject_interval; 117901768b42SPeter Zijlstra if (tmp > UINT_MAX/4) 118001768b42SPeter Zijlstra tmp = UINT_MAX; 118101768b42SPeter Zijlstra else 118201768b42SPeter Zijlstra tmp = tmp*2 + tmp + tmp/2; 118301768b42SPeter Zijlstra 118401768b42SPeter Zijlstra ctx->deadlock_inject_interval = tmp; 118501768b42SPeter Zijlstra ctx->deadlock_inject_countdown = tmp; 118601768b42SPeter Zijlstra ctx->contending_lock = lock; 118701768b42SPeter Zijlstra 118801768b42SPeter Zijlstra ww_mutex_unlock(lock); 118901768b42SPeter Zijlstra 119001768b42SPeter Zijlstra return -EDEADLK; 119101768b42SPeter Zijlstra } 119201768b42SPeter Zijlstra #endif 119301768b42SPeter Zijlstra 119401768b42SPeter Zijlstra return 0; 119501768b42SPeter Zijlstra } 119601768b42SPeter Zijlstra 119701768b42SPeter Zijlstra int __sched 1198c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 119901768b42SPeter Zijlstra { 120001768b42SPeter Zijlstra int ret; 120101768b42SPeter Zijlstra 120201768b42SPeter Zijlstra might_sleep(); 1203427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 1204ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1205427b1820SPeter Zijlstra ctx); 1206ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 120701768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 120801768b42SPeter Zijlstra 120901768b42SPeter Zijlstra return ret; 121001768b42SPeter Zijlstra } 1211c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock); 121201768b42SPeter Zijlstra 121301768b42SPeter Zijlstra int __sched 1214c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 121501768b42SPeter Zijlstra { 121601768b42SPeter Zijlstra int ret; 121701768b42SPeter Zijlstra 121801768b42SPeter Zijlstra might_sleep(); 1219427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 1220ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 1221427b1820SPeter Zijlstra ctx); 122201768b42SPeter Zijlstra 1223ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 122401768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 122501768b42SPeter Zijlstra 122601768b42SPeter Zijlstra return ret; 122701768b42SPeter Zijlstra } 1228c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 122901768b42SPeter Zijlstra 123001768b42SPeter Zijlstra #endif 123101768b42SPeter Zijlstra 123201768b42SPeter Zijlstra /* 123301768b42SPeter Zijlstra * Release the lock, slowpath: 123401768b42SPeter Zijlstra */ 12353ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 123601768b42SPeter Zijlstra { 12379d659ae1SPeter Zijlstra struct task_struct *next = NULL; 1238194a6b5bSWaiman Long DEFINE_WAKE_Q(wake_q); 1239b9c16a0eSPeter Zijlstra unsigned long owner; 124001768b42SPeter Zijlstra 12413ca0ff57SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 12423ca0ff57SPeter Zijlstra 124301768b42SPeter Zijlstra /* 12449d659ae1SPeter Zijlstra * Release the lock before (potentially) taking the spinlock such that 12459d659ae1SPeter Zijlstra * other contenders can get on with things ASAP. 12469d659ae1SPeter Zijlstra * 12479d659ae1SPeter Zijlstra * Except when HANDOFF, in that case we must not clear the owner field, 12489d659ae1SPeter Zijlstra * but instead set it to the top waiter. 124901768b42SPeter Zijlstra */ 12509d659ae1SPeter Zijlstra owner = atomic_long_read(&lock->owner); 12519d659ae1SPeter Zijlstra for (;;) { 12529d659ae1SPeter Zijlstra unsigned long old; 12539d659ae1SPeter Zijlstra 12549d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 12559d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1256e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 12579d659ae1SPeter Zijlstra #endif 12589d659ae1SPeter Zijlstra 12599d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 12609d659ae1SPeter Zijlstra break; 12619d659ae1SPeter Zijlstra 12629d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, 12639d659ae1SPeter Zijlstra __owner_flags(owner)); 12649d659ae1SPeter Zijlstra if (old == owner) { 12659d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_WAITERS) 12669d659ae1SPeter Zijlstra break; 12679d659ae1SPeter Zijlstra 12683ca0ff57SPeter Zijlstra return; 12699d659ae1SPeter Zijlstra } 12709d659ae1SPeter Zijlstra 12719d659ae1SPeter Zijlstra owner = old; 12729d659ae1SPeter Zijlstra } 127301768b42SPeter Zijlstra 1274b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 12751d8fe7dcSJason Low debug_mutex_unlock(lock); 127601768b42SPeter Zijlstra if (!list_empty(&lock->wait_list)) { 127701768b42SPeter Zijlstra /* get the first entry from the wait-list: */ 127801768b42SPeter Zijlstra struct mutex_waiter *waiter = 12799d659ae1SPeter Zijlstra list_first_entry(&lock->wait_list, 128001768b42SPeter Zijlstra struct mutex_waiter, list); 128101768b42SPeter Zijlstra 12829d659ae1SPeter Zijlstra next = waiter->task; 12839d659ae1SPeter Zijlstra 128401768b42SPeter Zijlstra debug_mutex_wake_waiter(lock, waiter); 12859d659ae1SPeter Zijlstra wake_q_add(&wake_q, next); 128601768b42SPeter Zijlstra } 128701768b42SPeter Zijlstra 12889d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 12899d659ae1SPeter Zijlstra __mutex_handoff(lock, next); 12909d659ae1SPeter Zijlstra 1291b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 12929d659ae1SPeter Zijlstra 12931329ce6fSDavidlohr Bueso wake_up_q(&wake_q); 129401768b42SPeter Zijlstra } 129501768b42SPeter Zijlstra 129601768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 129701768b42SPeter Zijlstra /* 129801768b42SPeter Zijlstra * Here come the less common (and hence less performance-critical) APIs: 129901768b42SPeter Zijlstra * mutex_lock_interruptible() and mutex_trylock(). 130001768b42SPeter Zijlstra */ 130101768b42SPeter Zijlstra static noinline int __sched 130201768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock); 130301768b42SPeter Zijlstra 130401768b42SPeter Zijlstra static noinline int __sched 130501768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock); 130601768b42SPeter Zijlstra 130701768b42SPeter Zijlstra /** 130845dbac0eSMatthew Wilcox * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. 130945dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 131001768b42SPeter Zijlstra * 131145dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). If a signal is delivered while the 131245dbac0eSMatthew Wilcox * process is sleeping, this function will return without acquiring the 131345dbac0eSMatthew Wilcox * mutex. 131401768b42SPeter Zijlstra * 131545dbac0eSMatthew Wilcox * Context: Process context. 131645dbac0eSMatthew Wilcox * Return: 0 if the lock was successfully acquired or %-EINTR if a 131745dbac0eSMatthew Wilcox * signal arrived. 131801768b42SPeter Zijlstra */ 131901768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock) 132001768b42SPeter Zijlstra { 132101768b42SPeter Zijlstra might_sleep(); 13223ca0ff57SPeter Zijlstra 13233ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 132401768b42SPeter Zijlstra return 0; 13253ca0ff57SPeter Zijlstra 132601768b42SPeter Zijlstra return __mutex_lock_interruptible_slowpath(lock); 132701768b42SPeter Zijlstra } 132801768b42SPeter Zijlstra 132901768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible); 133001768b42SPeter Zijlstra 133145dbac0eSMatthew Wilcox /** 133245dbac0eSMatthew Wilcox * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. 133345dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 133445dbac0eSMatthew Wilcox * 133545dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). If a signal which will be fatal to 133645dbac0eSMatthew Wilcox * the current process is delivered while the process is sleeping, this 133745dbac0eSMatthew Wilcox * function will return without acquiring the mutex. 133845dbac0eSMatthew Wilcox * 133945dbac0eSMatthew Wilcox * Context: Process context. 134045dbac0eSMatthew Wilcox * Return: 0 if the lock was successfully acquired or %-EINTR if a 134145dbac0eSMatthew Wilcox * fatal signal arrived. 134245dbac0eSMatthew Wilcox */ 134301768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock) 134401768b42SPeter Zijlstra { 134501768b42SPeter Zijlstra might_sleep(); 13463ca0ff57SPeter Zijlstra 13473ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 134801768b42SPeter Zijlstra return 0; 13493ca0ff57SPeter Zijlstra 135001768b42SPeter Zijlstra return __mutex_lock_killable_slowpath(lock); 135101768b42SPeter Zijlstra } 135201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable); 135301768b42SPeter Zijlstra 135445dbac0eSMatthew Wilcox /** 135545dbac0eSMatthew Wilcox * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O 135645dbac0eSMatthew Wilcox * @lock: The mutex to be acquired. 135745dbac0eSMatthew Wilcox * 135845dbac0eSMatthew Wilcox * Lock the mutex like mutex_lock(). While the task is waiting for this 135945dbac0eSMatthew Wilcox * mutex, it will be accounted as being in the IO wait state by the 136045dbac0eSMatthew Wilcox * scheduler. 136145dbac0eSMatthew Wilcox * 136245dbac0eSMatthew Wilcox * Context: Process context. 136345dbac0eSMatthew Wilcox */ 13641460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock) 13651460cb65STejun Heo { 13661460cb65STejun Heo int token; 13671460cb65STejun Heo 13681460cb65STejun Heo token = io_schedule_prepare(); 13691460cb65STejun Heo mutex_lock(lock); 13701460cb65STejun Heo io_schedule_finish(token); 13711460cb65STejun Heo } 13721460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io); 13731460cb65STejun Heo 13743ca0ff57SPeter Zijlstra static noinline void __sched 13753ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock) 137601768b42SPeter Zijlstra { 1377427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 137801768b42SPeter Zijlstra } 137901768b42SPeter Zijlstra 138001768b42SPeter Zijlstra static noinline int __sched 138101768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock) 138201768b42SPeter Zijlstra { 1383427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 138401768b42SPeter Zijlstra } 138501768b42SPeter Zijlstra 138601768b42SPeter Zijlstra static noinline int __sched 138701768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock) 138801768b42SPeter Zijlstra { 1389427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 139001768b42SPeter Zijlstra } 139101768b42SPeter Zijlstra 139201768b42SPeter Zijlstra static noinline int __sched 139301768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 139401768b42SPeter Zijlstra { 1395427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, 1396427b1820SPeter Zijlstra _RET_IP_, ctx); 139701768b42SPeter Zijlstra } 139801768b42SPeter Zijlstra 139901768b42SPeter Zijlstra static noinline int __sched 140001768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 140101768b42SPeter Zijlstra struct ww_acquire_ctx *ctx) 140201768b42SPeter Zijlstra { 1403427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, 1404427b1820SPeter Zijlstra _RET_IP_, ctx); 140501768b42SPeter Zijlstra } 140601768b42SPeter Zijlstra 140701768b42SPeter Zijlstra #endif 140801768b42SPeter Zijlstra 140901768b42SPeter Zijlstra /** 141001768b42SPeter Zijlstra * mutex_trylock - try to acquire the mutex, without waiting 141101768b42SPeter Zijlstra * @lock: the mutex to be acquired 141201768b42SPeter Zijlstra * 141301768b42SPeter Zijlstra * Try to acquire the mutex atomically. Returns 1 if the mutex 141401768b42SPeter Zijlstra * has been acquired successfully, and 0 on contention. 141501768b42SPeter Zijlstra * 141601768b42SPeter Zijlstra * NOTE: this function follows the spin_trylock() convention, so 141701768b42SPeter Zijlstra * it is negated from the down_trylock() return values! Be careful 141801768b42SPeter Zijlstra * about this when converting semaphore users to mutexes. 141901768b42SPeter Zijlstra * 142001768b42SPeter Zijlstra * This function must not be used in interrupt context. The 142101768b42SPeter Zijlstra * mutex must be released by the same task that acquired it. 142201768b42SPeter Zijlstra */ 142301768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock) 142401768b42SPeter Zijlstra { 14256c11c6e3SSebastian Andrzej Siewior bool locked; 142601768b42SPeter Zijlstra 14276c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES 14286c11c6e3SSebastian Andrzej Siewior DEBUG_LOCKS_WARN_ON(lock->magic != lock); 14296c11c6e3SSebastian Andrzej Siewior #endif 14306c11c6e3SSebastian Andrzej Siewior 14316c11c6e3SSebastian Andrzej Siewior locked = __mutex_trylock(lock); 14323ca0ff57SPeter Zijlstra if (locked) 14333ca0ff57SPeter Zijlstra mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 143401768b42SPeter Zijlstra 14353ca0ff57SPeter Zijlstra return locked; 143601768b42SPeter Zijlstra } 143701768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock); 143801768b42SPeter Zijlstra 143901768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 144001768b42SPeter Zijlstra int __sched 1441c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 144201768b42SPeter Zijlstra { 144301768b42SPeter Zijlstra might_sleep(); 144401768b42SPeter Zijlstra 14453ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1446ea9e0fb8SNicolai Hähnle if (ctx) 144701768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 14483ca0ff57SPeter Zijlstra return 0; 14493ca0ff57SPeter Zijlstra } 14503ca0ff57SPeter Zijlstra 14513ca0ff57SPeter Zijlstra return __ww_mutex_lock_slowpath(lock, ctx); 145201768b42SPeter Zijlstra } 1453c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock); 145401768b42SPeter Zijlstra 145501768b42SPeter Zijlstra int __sched 1456c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 145701768b42SPeter Zijlstra { 145801768b42SPeter Zijlstra might_sleep(); 145901768b42SPeter Zijlstra 14603ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1461ea9e0fb8SNicolai Hähnle if (ctx) 146201768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 14633ca0ff57SPeter Zijlstra return 0; 14643ca0ff57SPeter Zijlstra } 14653ca0ff57SPeter Zijlstra 14663ca0ff57SPeter Zijlstra return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 146701768b42SPeter Zijlstra } 1468c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible); 146901768b42SPeter Zijlstra 147001768b42SPeter Zijlstra #endif 147101768b42SPeter Zijlstra 147201768b42SPeter Zijlstra /** 147301768b42SPeter Zijlstra * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 147401768b42SPeter Zijlstra * @cnt: the atomic which we are to dec 147501768b42SPeter Zijlstra * @lock: the mutex to return holding if we dec to 0 147601768b42SPeter Zijlstra * 147701768b42SPeter Zijlstra * return true and hold lock if we dec to 0, return false otherwise 147801768b42SPeter Zijlstra */ 147901768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 148001768b42SPeter Zijlstra { 148101768b42SPeter Zijlstra /* dec if we can't possibly hit 0 */ 148201768b42SPeter Zijlstra if (atomic_add_unless(cnt, -1, 1)) 148301768b42SPeter Zijlstra return 0; 148401768b42SPeter Zijlstra /* we might hit 0, so take the lock */ 148501768b42SPeter Zijlstra mutex_lock(lock); 148601768b42SPeter Zijlstra if (!atomic_dec_and_test(cnt)) { 148701768b42SPeter Zijlstra /* when we actually did the dec, we didn't hit 0 */ 148801768b42SPeter Zijlstra mutex_unlock(lock); 148901768b42SPeter Zijlstra return 0; 149001768b42SPeter Zijlstra } 149101768b42SPeter Zijlstra /* we hit 0, and we hold the lock */ 149201768b42SPeter Zijlstra return 1; 149301768b42SPeter Zijlstra } 149401768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1495