101768b42SPeter Zijlstra /* 267a6de49SPeter Zijlstra * kernel/locking/mutex.c 301768b42SPeter Zijlstra * 401768b42SPeter Zijlstra * Mutexes: blocking mutual exclusion locks 501768b42SPeter Zijlstra * 601768b42SPeter Zijlstra * Started by Ingo Molnar: 701768b42SPeter Zijlstra * 801768b42SPeter Zijlstra * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 901768b42SPeter Zijlstra * 1001768b42SPeter Zijlstra * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 1101768b42SPeter Zijlstra * David Howells for suggestions and improvements. 1201768b42SPeter Zijlstra * 1301768b42SPeter Zijlstra * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 1401768b42SPeter Zijlstra * from the -rt tree, where it was originally implemented for rtmutexes 1501768b42SPeter Zijlstra * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale 1601768b42SPeter Zijlstra * and Sven Dietrich. 1701768b42SPeter Zijlstra * 18214e0aedSDavidlohr Bueso * Also see Documentation/locking/mutex-design.txt. 1901768b42SPeter Zijlstra */ 2001768b42SPeter Zijlstra #include <linux/mutex.h> 2101768b42SPeter Zijlstra #include <linux/ww_mutex.h> 22174cd4b1SIngo Molnar #include <linux/sched/signal.h> 2301768b42SPeter Zijlstra #include <linux/sched/rt.h> 2484f001e1SIngo Molnar #include <linux/sched/wake_q.h> 25b17b0153SIngo Molnar #include <linux/sched/debug.h> 2601768b42SPeter Zijlstra #include <linux/export.h> 2701768b42SPeter Zijlstra #include <linux/spinlock.h> 2801768b42SPeter Zijlstra #include <linux/interrupt.h> 2901768b42SPeter Zijlstra #include <linux/debug_locks.h> 307a215f89SDavidlohr Bueso #include <linux/osq_lock.h> 3101768b42SPeter Zijlstra 3201768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 3301768b42SPeter Zijlstra # include "mutex-debug.h" 3401768b42SPeter Zijlstra #else 3501768b42SPeter Zijlstra # include "mutex.h" 3601768b42SPeter Zijlstra #endif 3701768b42SPeter Zijlstra 3801768b42SPeter Zijlstra void 3901768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 4001768b42SPeter Zijlstra { 413ca0ff57SPeter Zijlstra atomic_long_set(&lock->owner, 0); 4201768b42SPeter Zijlstra spin_lock_init(&lock->wait_lock); 4301768b42SPeter Zijlstra INIT_LIST_HEAD(&lock->wait_list); 4401768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 454d9d951eSJason Low osq_lock_init(&lock->osq); 4601768b42SPeter Zijlstra #endif 4701768b42SPeter Zijlstra 4801768b42SPeter Zijlstra debug_mutex_init(lock, name, key); 4901768b42SPeter Zijlstra } 5001768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init); 5101768b42SPeter Zijlstra 523ca0ff57SPeter Zijlstra /* 533ca0ff57SPeter Zijlstra * @owner: contains: 'struct task_struct *' to the current lock owner, 543ca0ff57SPeter Zijlstra * NULL means not owned. Since task_struct pointers are aligned at 55e274795eSPeter Zijlstra * at least L1_CACHE_BYTES, we have low bits to store extra state. 563ca0ff57SPeter Zijlstra * 573ca0ff57SPeter Zijlstra * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. 589d659ae1SPeter Zijlstra * Bit1 indicates unlock needs to hand the lock to the top-waiter 59e274795eSPeter Zijlstra * Bit2 indicates handoff has been done and we're waiting for pickup. 603ca0ff57SPeter Zijlstra */ 613ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS 0x01 629d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF 0x02 63e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP 0x04 643ca0ff57SPeter Zijlstra 65e274795eSPeter Zijlstra #define MUTEX_FLAGS 0x07 663ca0ff57SPeter Zijlstra 673ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner) 683ca0ff57SPeter Zijlstra { 693ca0ff57SPeter Zijlstra return (struct task_struct *)(owner & ~MUTEX_FLAGS); 703ca0ff57SPeter Zijlstra } 713ca0ff57SPeter Zijlstra 723ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner) 733ca0ff57SPeter Zijlstra { 743ca0ff57SPeter Zijlstra return owner & MUTEX_FLAGS; 753ca0ff57SPeter Zijlstra } 763ca0ff57SPeter Zijlstra 773ca0ff57SPeter Zijlstra /* 78e274795eSPeter Zijlstra * Trylock variant that retuns the owning task on failure. 793ca0ff57SPeter Zijlstra */ 80e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) 813ca0ff57SPeter Zijlstra { 823ca0ff57SPeter Zijlstra unsigned long owner, curr = (unsigned long)current; 833ca0ff57SPeter Zijlstra 843ca0ff57SPeter Zijlstra owner = atomic_long_read(&lock->owner); 853ca0ff57SPeter Zijlstra for (;;) { /* must loop, can race against a flag */ 869d659ae1SPeter Zijlstra unsigned long old, flags = __owner_flags(owner); 87e274795eSPeter Zijlstra unsigned long task = owner & ~MUTEX_FLAGS; 883ca0ff57SPeter Zijlstra 89e274795eSPeter Zijlstra if (task) { 90e274795eSPeter Zijlstra if (likely(task != curr)) 91e274795eSPeter Zijlstra break; 929d659ae1SPeter Zijlstra 93e274795eSPeter Zijlstra if (likely(!(flags & MUTEX_FLAG_PICKUP))) 94e274795eSPeter Zijlstra break; 95e274795eSPeter Zijlstra 96e274795eSPeter Zijlstra flags &= ~MUTEX_FLAG_PICKUP; 97e274795eSPeter Zijlstra } else { 98e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 99e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); 100e274795eSPeter Zijlstra #endif 1019d659ae1SPeter Zijlstra } 1023ca0ff57SPeter Zijlstra 1039d659ae1SPeter Zijlstra /* 1049d659ae1SPeter Zijlstra * We set the HANDOFF bit, we must make sure it doesn't live 1059d659ae1SPeter Zijlstra * past the point where we acquire it. This would be possible 1069d659ae1SPeter Zijlstra * if we (accidentally) set the bit on an unlocked mutex. 1079d659ae1SPeter Zijlstra */ 1089d659ae1SPeter Zijlstra flags &= ~MUTEX_FLAG_HANDOFF; 1099d659ae1SPeter Zijlstra 1109d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); 1113ca0ff57SPeter Zijlstra if (old == owner) 112e274795eSPeter Zijlstra return NULL; 1133ca0ff57SPeter Zijlstra 1143ca0ff57SPeter Zijlstra owner = old; 1153ca0ff57SPeter Zijlstra } 116e274795eSPeter Zijlstra 117e274795eSPeter Zijlstra return __owner_task(owner); 118e274795eSPeter Zijlstra } 119e274795eSPeter Zijlstra 120e274795eSPeter Zijlstra /* 121e274795eSPeter Zijlstra * Actual trylock that will work on any unlocked state. 122e274795eSPeter Zijlstra */ 123e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock) 124e274795eSPeter Zijlstra { 125e274795eSPeter Zijlstra return !__mutex_trylock_or_owner(lock); 1263ca0ff57SPeter Zijlstra } 1273ca0ff57SPeter Zijlstra 1283ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 1293ca0ff57SPeter Zijlstra /* 1303ca0ff57SPeter Zijlstra * Lockdep annotations are contained to the slow paths for simplicity. 1313ca0ff57SPeter Zijlstra * There is nothing that would stop spreading the lockdep annotations outwards 1323ca0ff57SPeter Zijlstra * except more code. 1333ca0ff57SPeter Zijlstra */ 1343ca0ff57SPeter Zijlstra 1353ca0ff57SPeter Zijlstra /* 1363ca0ff57SPeter Zijlstra * Optimistic trylock that only works in the uncontended case. Make sure to 1373ca0ff57SPeter Zijlstra * follow with a __mutex_trylock() before failing. 1383ca0ff57SPeter Zijlstra */ 1393ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock) 1403ca0ff57SPeter Zijlstra { 1413ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 1423ca0ff57SPeter Zijlstra 1433ca0ff57SPeter Zijlstra if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr)) 1443ca0ff57SPeter Zijlstra return true; 1453ca0ff57SPeter Zijlstra 1463ca0ff57SPeter Zijlstra return false; 1473ca0ff57SPeter Zijlstra } 1483ca0ff57SPeter Zijlstra 1493ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock) 1503ca0ff57SPeter Zijlstra { 1513ca0ff57SPeter Zijlstra unsigned long curr = (unsigned long)current; 1523ca0ff57SPeter Zijlstra 1533ca0ff57SPeter Zijlstra if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) 1543ca0ff57SPeter Zijlstra return true; 1553ca0ff57SPeter Zijlstra 1563ca0ff57SPeter Zijlstra return false; 1573ca0ff57SPeter Zijlstra } 1583ca0ff57SPeter Zijlstra #endif 1593ca0ff57SPeter Zijlstra 1603ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) 1613ca0ff57SPeter Zijlstra { 1623ca0ff57SPeter Zijlstra atomic_long_or(flag, &lock->owner); 1633ca0ff57SPeter Zijlstra } 1643ca0ff57SPeter Zijlstra 1653ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) 1663ca0ff57SPeter Zijlstra { 1673ca0ff57SPeter Zijlstra atomic_long_andnot(flag, &lock->owner); 1683ca0ff57SPeter Zijlstra } 1693ca0ff57SPeter Zijlstra 1709d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) 1719d659ae1SPeter Zijlstra { 1729d659ae1SPeter Zijlstra return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; 1739d659ae1SPeter Zijlstra } 1749d659ae1SPeter Zijlstra 1759d659ae1SPeter Zijlstra /* 1769d659ae1SPeter Zijlstra * Give up ownership to a specific task, when @task = NULL, this is equivalent 177e274795eSPeter Zijlstra * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves 178e274795eSPeter Zijlstra * WAITERS. Provides RELEASE semantics like a regular unlock, the 179e274795eSPeter Zijlstra * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. 1809d659ae1SPeter Zijlstra */ 1819d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task) 1829d659ae1SPeter Zijlstra { 1839d659ae1SPeter Zijlstra unsigned long owner = atomic_long_read(&lock->owner); 1849d659ae1SPeter Zijlstra 1859d659ae1SPeter Zijlstra for (;;) { 1869d659ae1SPeter Zijlstra unsigned long old, new; 1879d659ae1SPeter Zijlstra 1889d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 1899d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 190e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 1919d659ae1SPeter Zijlstra #endif 1929d659ae1SPeter Zijlstra 1939d659ae1SPeter Zijlstra new = (owner & MUTEX_FLAG_WAITERS); 1949d659ae1SPeter Zijlstra new |= (unsigned long)task; 195e274795eSPeter Zijlstra if (task) 196e274795eSPeter Zijlstra new |= MUTEX_FLAG_PICKUP; 1979d659ae1SPeter Zijlstra 1989d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, new); 1999d659ae1SPeter Zijlstra if (old == owner) 2009d659ae1SPeter Zijlstra break; 2019d659ae1SPeter Zijlstra 2029d659ae1SPeter Zijlstra owner = old; 2039d659ae1SPeter Zijlstra } 2049d659ae1SPeter Zijlstra } 2059d659ae1SPeter Zijlstra 20601768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 20701768b42SPeter Zijlstra /* 20801768b42SPeter Zijlstra * We split the mutex lock/unlock logic into separate fastpath and 20901768b42SPeter Zijlstra * slowpath functions, to reduce the register pressure on the fastpath. 21001768b42SPeter Zijlstra * We also put the fastpath first in the kernel image, to make sure the 21101768b42SPeter Zijlstra * branch is predicted by the CPU as default-untaken. 21201768b42SPeter Zijlstra */ 2133ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock); 21401768b42SPeter Zijlstra 21501768b42SPeter Zijlstra /** 21601768b42SPeter Zijlstra * mutex_lock - acquire the mutex 21701768b42SPeter Zijlstra * @lock: the mutex to be acquired 21801768b42SPeter Zijlstra * 21901768b42SPeter Zijlstra * Lock the mutex exclusively for this task. If the mutex is not 22001768b42SPeter Zijlstra * available right now, it will sleep until it can get it. 22101768b42SPeter Zijlstra * 22201768b42SPeter Zijlstra * The mutex must later on be released by the same task that 22301768b42SPeter Zijlstra * acquired it. Recursive locking is not allowed. The task 22401768b42SPeter Zijlstra * may not exit without first unlocking the mutex. Also, kernel 225139b6fd2SSharon Dvir * memory where the mutex resides must not be freed with 22601768b42SPeter Zijlstra * the mutex still locked. The mutex must first be initialized 22701768b42SPeter Zijlstra * (or statically defined) before it can be locked. memset()-ing 22801768b42SPeter Zijlstra * the mutex to 0 is not allowed. 22901768b42SPeter Zijlstra * 23001768b42SPeter Zijlstra * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging 23101768b42SPeter Zijlstra * checks that will enforce the restrictions and will also do 232*7b4ff1adSMauro Carvalho Chehab * deadlock debugging) 23301768b42SPeter Zijlstra * 23401768b42SPeter Zijlstra * This function is similar to (but not equivalent to) down(). 23501768b42SPeter Zijlstra */ 23601768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock) 23701768b42SPeter Zijlstra { 23801768b42SPeter Zijlstra might_sleep(); 23901768b42SPeter Zijlstra 2403ca0ff57SPeter Zijlstra if (!__mutex_trylock_fast(lock)) 2413ca0ff57SPeter Zijlstra __mutex_lock_slowpath(lock); 2423ca0ff57SPeter Zijlstra } 24301768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock); 24401768b42SPeter Zijlstra #endif 24501768b42SPeter Zijlstra 246427b1820SPeter Zijlstra static __always_inline void 247427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 24876916515SDavidlohr Bueso { 24976916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES 25076916515SDavidlohr Bueso /* 25176916515SDavidlohr Bueso * If this WARN_ON triggers, you used ww_mutex_lock to acquire, 25276916515SDavidlohr Bueso * but released with a normal mutex_unlock in this call. 25376916515SDavidlohr Bueso * 25476916515SDavidlohr Bueso * This should never happen, always use ww_mutex_unlock. 25576916515SDavidlohr Bueso */ 25676916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww->ctx); 25776916515SDavidlohr Bueso 25876916515SDavidlohr Bueso /* 25976916515SDavidlohr Bueso * Not quite done after calling ww_acquire_done() ? 26076916515SDavidlohr Bueso */ 26176916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); 26276916515SDavidlohr Bueso 26376916515SDavidlohr Bueso if (ww_ctx->contending_lock) { 26476916515SDavidlohr Bueso /* 26576916515SDavidlohr Bueso * After -EDEADLK you tried to 26676916515SDavidlohr Bueso * acquire a different ww_mutex? Bad! 26776916515SDavidlohr Bueso */ 26876916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); 26976916515SDavidlohr Bueso 27076916515SDavidlohr Bueso /* 27176916515SDavidlohr Bueso * You called ww_mutex_lock after receiving -EDEADLK, 27276916515SDavidlohr Bueso * but 'forgot' to unlock everything else first? 27376916515SDavidlohr Bueso */ 27476916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); 27576916515SDavidlohr Bueso ww_ctx->contending_lock = NULL; 27676916515SDavidlohr Bueso } 27776916515SDavidlohr Bueso 27876916515SDavidlohr Bueso /* 27976916515SDavidlohr Bueso * Naughty, using a different class will lead to undefined behavior! 28076916515SDavidlohr Bueso */ 28176916515SDavidlohr Bueso DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 28276916515SDavidlohr Bueso #endif 28376916515SDavidlohr Bueso ww_ctx->acquired++; 28476916515SDavidlohr Bueso } 28576916515SDavidlohr Bueso 2863822da3eSNicolai Hähnle static inline bool __sched 2873822da3eSNicolai Hähnle __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 2883822da3eSNicolai Hähnle { 2893822da3eSNicolai Hähnle return a->stamp - b->stamp <= LONG_MAX && 2903822da3eSNicolai Hähnle (a->stamp != b->stamp || a > b); 2913822da3eSNicolai Hähnle } 2923822da3eSNicolai Hähnle 29376916515SDavidlohr Bueso /* 294659cf9f5SNicolai Hähnle * Wake up any waiters that may have to back off when the lock is held by the 295659cf9f5SNicolai Hähnle * given context. 296659cf9f5SNicolai Hähnle * 297659cf9f5SNicolai Hähnle * Due to the invariants on the wait list, this can only affect the first 298659cf9f5SNicolai Hähnle * waiter with a context. 299659cf9f5SNicolai Hähnle * 300659cf9f5SNicolai Hähnle * The current task must not be on the wait list. 301659cf9f5SNicolai Hähnle */ 302659cf9f5SNicolai Hähnle static void __sched 303659cf9f5SNicolai Hähnle __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 304659cf9f5SNicolai Hähnle { 305659cf9f5SNicolai Hähnle struct mutex_waiter *cur; 306659cf9f5SNicolai Hähnle 307659cf9f5SNicolai Hähnle lockdep_assert_held(&lock->wait_lock); 308659cf9f5SNicolai Hähnle 309659cf9f5SNicolai Hähnle list_for_each_entry(cur, &lock->wait_list, list) { 310659cf9f5SNicolai Hähnle if (!cur->ww_ctx) 311659cf9f5SNicolai Hähnle continue; 312659cf9f5SNicolai Hähnle 313659cf9f5SNicolai Hähnle if (cur->ww_ctx->acquired > 0 && 314659cf9f5SNicolai Hähnle __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) { 315659cf9f5SNicolai Hähnle debug_mutex_wake_waiter(lock, cur); 316659cf9f5SNicolai Hähnle wake_up_process(cur->task); 317659cf9f5SNicolai Hähnle } 318659cf9f5SNicolai Hähnle 319659cf9f5SNicolai Hähnle break; 320659cf9f5SNicolai Hähnle } 321659cf9f5SNicolai Hähnle } 322659cf9f5SNicolai Hähnle 32376916515SDavidlohr Bueso /* 3244bd19084SDavidlohr Bueso * After acquiring lock with fastpath or when we lost out in contested 32576916515SDavidlohr Bueso * slowpath, set ctx and wake up any waiters so they can recheck. 32676916515SDavidlohr Bueso */ 32776916515SDavidlohr Bueso static __always_inline void 328427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 32976916515SDavidlohr Bueso { 33076916515SDavidlohr Bueso ww_mutex_lock_acquired(lock, ctx); 33176916515SDavidlohr Bueso 33276916515SDavidlohr Bueso lock->ctx = ctx; 33376916515SDavidlohr Bueso 33476916515SDavidlohr Bueso /* 33576916515SDavidlohr Bueso * The lock->ctx update should be visible on all cores before 33676916515SDavidlohr Bueso * the atomic read is done, otherwise contended waiters might be 33776916515SDavidlohr Bueso * missed. The contended waiters will either see ww_ctx == NULL 33876916515SDavidlohr Bueso * and keep spinning, or it will acquire wait_lock, add itself 33976916515SDavidlohr Bueso * to waiter list and sleep. 34076916515SDavidlohr Bueso */ 34176916515SDavidlohr Bueso smp_mb(); /* ^^^ */ 34276916515SDavidlohr Bueso 34376916515SDavidlohr Bueso /* 34476916515SDavidlohr Bueso * Check if lock is contended, if not there is nobody to wake up 34576916515SDavidlohr Bueso */ 3463ca0ff57SPeter Zijlstra if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 34776916515SDavidlohr Bueso return; 34876916515SDavidlohr Bueso 34976916515SDavidlohr Bueso /* 35076916515SDavidlohr Bueso * Uh oh, we raced in fastpath, wake up everyone in this case, 35176916515SDavidlohr Bueso * so they can see the new lock->ctx. 35276916515SDavidlohr Bueso */ 353b9c16a0eSPeter Zijlstra spin_lock(&lock->base.wait_lock); 354659cf9f5SNicolai Hähnle __ww_mutex_wakeup_for_backoff(&lock->base, ctx); 355b9c16a0eSPeter Zijlstra spin_unlock(&lock->base.wait_lock); 35676916515SDavidlohr Bueso } 35776916515SDavidlohr Bueso 3584bd19084SDavidlohr Bueso /* 359659cf9f5SNicolai Hähnle * After acquiring lock in the slowpath set ctx. 360659cf9f5SNicolai Hähnle * 361659cf9f5SNicolai Hähnle * Unlike for the fast path, the caller ensures that waiters are woken up where 362659cf9f5SNicolai Hähnle * necessary. 3634bd19084SDavidlohr Bueso * 3644bd19084SDavidlohr Bueso * Callers must hold the mutex wait_lock. 3654bd19084SDavidlohr Bueso */ 3664bd19084SDavidlohr Bueso static __always_inline void 367427b1820SPeter Zijlstra ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 3684bd19084SDavidlohr Bueso { 3694bd19084SDavidlohr Bueso ww_mutex_lock_acquired(lock, ctx); 3704bd19084SDavidlohr Bueso lock->ctx = ctx; 3714bd19084SDavidlohr Bueso } 37276916515SDavidlohr Bueso 37301768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 374c516df97SNicolai Hähnle 375c516df97SNicolai Hähnle static inline 376c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 377c516df97SNicolai Hähnle struct mutex_waiter *waiter) 378c516df97SNicolai Hähnle { 379c516df97SNicolai Hähnle struct ww_mutex *ww; 380c516df97SNicolai Hähnle 381c516df97SNicolai Hähnle ww = container_of(lock, struct ww_mutex, base); 382c516df97SNicolai Hähnle 38301768b42SPeter Zijlstra /* 384c516df97SNicolai Hähnle * If ww->ctx is set the contents are undefined, only 385c516df97SNicolai Hähnle * by acquiring wait_lock there is a guarantee that 386c516df97SNicolai Hähnle * they are not invalid when reading. 387c516df97SNicolai Hähnle * 388c516df97SNicolai Hähnle * As such, when deadlock detection needs to be 389c516df97SNicolai Hähnle * performed the optimistic spinning cannot be done. 390c516df97SNicolai Hähnle * 391c516df97SNicolai Hähnle * Check this in every inner iteration because we may 392c516df97SNicolai Hähnle * be racing against another thread's ww_mutex_lock. 393c516df97SNicolai Hähnle */ 394c516df97SNicolai Hähnle if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) 395c516df97SNicolai Hähnle return false; 396c516df97SNicolai Hähnle 397c516df97SNicolai Hähnle /* 398c516df97SNicolai Hähnle * If we aren't on the wait list yet, cancel the spin 399c516df97SNicolai Hähnle * if there are waiters. We want to avoid stealing the 400c516df97SNicolai Hähnle * lock from a waiter with an earlier stamp, since the 401c516df97SNicolai Hähnle * other thread may already own a lock that we also 402c516df97SNicolai Hähnle * need. 403c516df97SNicolai Hähnle */ 404c516df97SNicolai Hähnle if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) 405c516df97SNicolai Hähnle return false; 406c516df97SNicolai Hähnle 407c516df97SNicolai Hähnle /* 408c516df97SNicolai Hähnle * Similarly, stop spinning if we are no longer the 409c516df97SNicolai Hähnle * first waiter. 410c516df97SNicolai Hähnle */ 411c516df97SNicolai Hähnle if (waiter && !__mutex_waiter_is_first(lock, waiter)) 412c516df97SNicolai Hähnle return false; 413c516df97SNicolai Hähnle 414c516df97SNicolai Hähnle return true; 415c516df97SNicolai Hähnle } 416c516df97SNicolai Hähnle 41701768b42SPeter Zijlstra /* 41825f13b40SNicolai Hähnle * Look out! "owner" is an entirely speculative pointer access and not 41925f13b40SNicolai Hähnle * reliable. 42025f13b40SNicolai Hähnle * 42125f13b40SNicolai Hähnle * "noinline" so that this function shows up on perf profiles. 42201768b42SPeter Zijlstra */ 42301768b42SPeter Zijlstra static noinline 42425f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, 425c516df97SNicolai Hähnle struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) 42601768b42SPeter Zijlstra { 42701ac33c1SJason Low bool ret = true; 428be1f7bf2SJason Low 42901768b42SPeter Zijlstra rcu_read_lock(); 4303ca0ff57SPeter Zijlstra while (__mutex_owner(lock) == owner) { 431be1f7bf2SJason Low /* 432be1f7bf2SJason Low * Ensure we emit the owner->on_cpu, dereference _after_ 43301ac33c1SJason Low * checking lock->owner still matches owner. If that fails, 43401ac33c1SJason Low * owner might point to freed memory. If it still matches, 435be1f7bf2SJason Low * the rcu_read_lock() ensures the memory stays valid. 436be1f7bf2SJason Low */ 437be1f7bf2SJason Low barrier(); 438be1f7bf2SJason Low 43905ffc951SPan Xinhui /* 44005ffc951SPan Xinhui * Use vcpu_is_preempted to detect lock holder preemption issue. 44105ffc951SPan Xinhui */ 44205ffc951SPan Xinhui if (!owner->on_cpu || need_resched() || 44305ffc951SPan Xinhui vcpu_is_preempted(task_cpu(owner))) { 444be1f7bf2SJason Low ret = false; 445be1f7bf2SJason Low break; 446be1f7bf2SJason Low } 44701768b42SPeter Zijlstra 448c516df97SNicolai Hähnle if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { 44925f13b40SNicolai Hähnle ret = false; 45025f13b40SNicolai Hähnle break; 45125f13b40SNicolai Hähnle } 45225f13b40SNicolai Hähnle 453f2f09a4cSChristian Borntraeger cpu_relax(); 45401768b42SPeter Zijlstra } 45501768b42SPeter Zijlstra rcu_read_unlock(); 45601768b42SPeter Zijlstra 457be1f7bf2SJason Low return ret; 45801768b42SPeter Zijlstra } 45901768b42SPeter Zijlstra 46001768b42SPeter Zijlstra /* 46101768b42SPeter Zijlstra * Initial check for entering the mutex spinning loop 46201768b42SPeter Zijlstra */ 46301768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock) 46401768b42SPeter Zijlstra { 46501768b42SPeter Zijlstra struct task_struct *owner; 46601768b42SPeter Zijlstra int retval = 1; 46701768b42SPeter Zijlstra 46846af29e4SJason Low if (need_resched()) 46946af29e4SJason Low return 0; 47046af29e4SJason Low 47101768b42SPeter Zijlstra rcu_read_lock(); 4723ca0ff57SPeter Zijlstra owner = __mutex_owner(lock); 47305ffc951SPan Xinhui 47405ffc951SPan Xinhui /* 47505ffc951SPan Xinhui * As lock holder preemption issue, we both skip spinning if task is not 47605ffc951SPan Xinhui * on cpu or its cpu is preempted 47705ffc951SPan Xinhui */ 47801768b42SPeter Zijlstra if (owner) 47905ffc951SPan Xinhui retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 48001768b42SPeter Zijlstra rcu_read_unlock(); 48176916515SDavidlohr Bueso 48276916515SDavidlohr Bueso /* 4833ca0ff57SPeter Zijlstra * If lock->owner is not set, the mutex has been released. Return true 4843ca0ff57SPeter Zijlstra * such that we'll trylock in the spin path, which is a faster option 4853ca0ff57SPeter Zijlstra * than the blocking slow path. 48676916515SDavidlohr Bueso */ 4873ca0ff57SPeter Zijlstra return retval; 48876916515SDavidlohr Bueso } 48976916515SDavidlohr Bueso 49076916515SDavidlohr Bueso /* 49176916515SDavidlohr Bueso * Optimistic spinning. 49276916515SDavidlohr Bueso * 49376916515SDavidlohr Bueso * We try to spin for acquisition when we find that the lock owner 49476916515SDavidlohr Bueso * is currently running on a (different) CPU and while we don't 49576916515SDavidlohr Bueso * need to reschedule. The rationale is that if the lock owner is 49676916515SDavidlohr Bueso * running, it is likely to release the lock soon. 49776916515SDavidlohr Bueso * 49876916515SDavidlohr Bueso * The mutex spinners are queued up using MCS lock so that only one 49976916515SDavidlohr Bueso * spinner can compete for the mutex. However, if mutex spinning isn't 50076916515SDavidlohr Bueso * going to happen, there is no point in going through the lock/unlock 50176916515SDavidlohr Bueso * overhead. 50276916515SDavidlohr Bueso * 50376916515SDavidlohr Bueso * Returns true when the lock was taken, otherwise false, indicating 50476916515SDavidlohr Bueso * that we need to jump to the slowpath and sleep. 505b341afb3SWaiman Long * 506b341afb3SWaiman Long * The waiter flag is set to true if the spinner is a waiter in the wait 507b341afb3SWaiman Long * queue. The waiter-spinner will spin on the lock directly and concurrently 508b341afb3SWaiman Long * with the spinner at the head of the OSQ, if present, until the owner is 509b341afb3SWaiman Long * changed to itself. 51076916515SDavidlohr Bueso */ 511427b1820SPeter Zijlstra static __always_inline bool 512427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 513c516df97SNicolai Hähnle const bool use_ww_ctx, struct mutex_waiter *waiter) 51476916515SDavidlohr Bueso { 515b341afb3SWaiman Long if (!waiter) { 516b341afb3SWaiman Long /* 517b341afb3SWaiman Long * The purpose of the mutex_can_spin_on_owner() function is 518b341afb3SWaiman Long * to eliminate the overhead of osq_lock() and osq_unlock() 519b341afb3SWaiman Long * in case spinning isn't possible. As a waiter-spinner 520b341afb3SWaiman Long * is not going to take OSQ lock anyway, there is no need 521b341afb3SWaiman Long * to call mutex_can_spin_on_owner(). 522b341afb3SWaiman Long */ 52376916515SDavidlohr Bueso if (!mutex_can_spin_on_owner(lock)) 524b341afb3SWaiman Long goto fail; 52576916515SDavidlohr Bueso 526e42f678aSDavidlohr Bueso /* 527e42f678aSDavidlohr Bueso * In order to avoid a stampede of mutex spinners trying to 528e42f678aSDavidlohr Bueso * acquire the mutex all at once, the spinners need to take a 529e42f678aSDavidlohr Bueso * MCS (queued) lock first before spinning on the owner field. 530e42f678aSDavidlohr Bueso */ 53176916515SDavidlohr Bueso if (!osq_lock(&lock->osq)) 532b341afb3SWaiman Long goto fail; 533b341afb3SWaiman Long } 53476916515SDavidlohr Bueso 535b341afb3SWaiman Long for (;;) { 53676916515SDavidlohr Bueso struct task_struct *owner; 53776916515SDavidlohr Bueso 538e274795eSPeter Zijlstra /* Try to acquire the mutex... */ 539e274795eSPeter Zijlstra owner = __mutex_trylock_or_owner(lock); 540e274795eSPeter Zijlstra if (!owner) 541e274795eSPeter Zijlstra break; 54276916515SDavidlohr Bueso 54376916515SDavidlohr Bueso /* 544e274795eSPeter Zijlstra * There's an owner, wait for it to either 54576916515SDavidlohr Bueso * release the lock or go to sleep. 54676916515SDavidlohr Bueso */ 547c516df97SNicolai Hähnle if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) 548b341afb3SWaiman Long goto fail_unlock; 54976916515SDavidlohr Bueso 55076916515SDavidlohr Bueso /* 55176916515SDavidlohr Bueso * The cpu_relax() call is a compiler barrier which forces 55276916515SDavidlohr Bueso * everything in this loop to be re-loaded. We don't need 55376916515SDavidlohr Bueso * memory barriers as we'll eventually observe the right 55476916515SDavidlohr Bueso * values at the cost of a few extra spins. 55576916515SDavidlohr Bueso */ 556f2f09a4cSChristian Borntraeger cpu_relax(); 55776916515SDavidlohr Bueso } 55876916515SDavidlohr Bueso 559b341afb3SWaiman Long if (!waiter) 56076916515SDavidlohr Bueso osq_unlock(&lock->osq); 561b341afb3SWaiman Long 562b341afb3SWaiman Long return true; 563b341afb3SWaiman Long 564b341afb3SWaiman Long 565b341afb3SWaiman Long fail_unlock: 566b341afb3SWaiman Long if (!waiter) 567b341afb3SWaiman Long osq_unlock(&lock->osq); 568b341afb3SWaiman Long 569b341afb3SWaiman Long fail: 57076916515SDavidlohr Bueso /* 57176916515SDavidlohr Bueso * If we fell out of the spin path because of need_resched(), 57276916515SDavidlohr Bueso * reschedule now, before we try-lock the mutex. This avoids getting 57376916515SDavidlohr Bueso * scheduled out right after we obtained the mutex. 57476916515SDavidlohr Bueso */ 5756f942a1fSPeter Zijlstra if (need_resched()) { 5766f942a1fSPeter Zijlstra /* 5776f942a1fSPeter Zijlstra * We _should_ have TASK_RUNNING here, but just in case 5786f942a1fSPeter Zijlstra * we do not, make it so, otherwise we might get stuck. 5796f942a1fSPeter Zijlstra */ 5806f942a1fSPeter Zijlstra __set_current_state(TASK_RUNNING); 58176916515SDavidlohr Bueso schedule_preempt_disabled(); 5826f942a1fSPeter Zijlstra } 58376916515SDavidlohr Bueso 58476916515SDavidlohr Bueso return false; 58576916515SDavidlohr Bueso } 58676916515SDavidlohr Bueso #else 587427b1820SPeter Zijlstra static __always_inline bool 588427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, 589c516df97SNicolai Hähnle const bool use_ww_ctx, struct mutex_waiter *waiter) 59076916515SDavidlohr Bueso { 59176916515SDavidlohr Bueso return false; 59276916515SDavidlohr Bueso } 59301768b42SPeter Zijlstra #endif 59401768b42SPeter Zijlstra 5953ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); 59601768b42SPeter Zijlstra 59701768b42SPeter Zijlstra /** 59801768b42SPeter Zijlstra * mutex_unlock - release the mutex 59901768b42SPeter Zijlstra * @lock: the mutex to be released 60001768b42SPeter Zijlstra * 60101768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously. 60201768b42SPeter Zijlstra * 60301768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 60401768b42SPeter Zijlstra * of a not locked mutex is not allowed. 60501768b42SPeter Zijlstra * 60601768b42SPeter Zijlstra * This function is similar to (but not equivalent to) up(). 60701768b42SPeter Zijlstra */ 60801768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock) 60901768b42SPeter Zijlstra { 6103ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 6113ca0ff57SPeter Zijlstra if (__mutex_unlock_fast(lock)) 6123ca0ff57SPeter Zijlstra return; 61301768b42SPeter Zijlstra #endif 6143ca0ff57SPeter Zijlstra __mutex_unlock_slowpath(lock, _RET_IP_); 61501768b42SPeter Zijlstra } 61601768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock); 61701768b42SPeter Zijlstra 61801768b42SPeter Zijlstra /** 61901768b42SPeter Zijlstra * ww_mutex_unlock - release the w/w mutex 62001768b42SPeter Zijlstra * @lock: the mutex to be released 62101768b42SPeter Zijlstra * 62201768b42SPeter Zijlstra * Unlock a mutex that has been locked by this task previously with any of the 62301768b42SPeter Zijlstra * ww_mutex_lock* functions (with or without an acquire context). It is 62401768b42SPeter Zijlstra * forbidden to release the locks after releasing the acquire context. 62501768b42SPeter Zijlstra * 62601768b42SPeter Zijlstra * This function must not be used in interrupt context. Unlocking 62701768b42SPeter Zijlstra * of a unlocked mutex is not allowed. 62801768b42SPeter Zijlstra */ 62901768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock) 63001768b42SPeter Zijlstra { 63101768b42SPeter Zijlstra /* 63201768b42SPeter Zijlstra * The unlocking fastpath is the 0->1 transition from 'locked' 63301768b42SPeter Zijlstra * into 'unlocked' state: 63401768b42SPeter Zijlstra */ 63501768b42SPeter Zijlstra if (lock->ctx) { 63601768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 63701768b42SPeter Zijlstra DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); 63801768b42SPeter Zijlstra #endif 63901768b42SPeter Zijlstra if (lock->ctx->acquired > 0) 64001768b42SPeter Zijlstra lock->ctx->acquired--; 64101768b42SPeter Zijlstra lock->ctx = NULL; 64201768b42SPeter Zijlstra } 64301768b42SPeter Zijlstra 6443ca0ff57SPeter Zijlstra mutex_unlock(&lock->base); 64501768b42SPeter Zijlstra } 64601768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock); 64701768b42SPeter Zijlstra 64801768b42SPeter Zijlstra static inline int __sched 649200b1874SNicolai Hähnle __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter, 650200b1874SNicolai Hähnle struct ww_acquire_ctx *ctx) 65101768b42SPeter Zijlstra { 65201768b42SPeter Zijlstra struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 6534d3199e4SDavidlohr Bueso struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 654200b1874SNicolai Hähnle struct mutex_waiter *cur; 65501768b42SPeter Zijlstra 656200b1874SNicolai Hähnle if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 657200b1874SNicolai Hähnle goto deadlock; 658200b1874SNicolai Hähnle 659200b1874SNicolai Hähnle /* 660200b1874SNicolai Hähnle * If there is a waiter in front of us that has a context, then its 661200b1874SNicolai Hähnle * stamp is earlier than ours and we must back off. 662200b1874SNicolai Hähnle */ 663200b1874SNicolai Hähnle cur = waiter; 664200b1874SNicolai Hähnle list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 665200b1874SNicolai Hähnle if (cur->ww_ctx) 666200b1874SNicolai Hähnle goto deadlock; 667200b1874SNicolai Hähnle } 668200b1874SNicolai Hähnle 66901768b42SPeter Zijlstra return 0; 67001768b42SPeter Zijlstra 671200b1874SNicolai Hähnle deadlock: 67201768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 67301768b42SPeter Zijlstra DEBUG_LOCKS_WARN_ON(ctx->contending_lock); 67401768b42SPeter Zijlstra ctx->contending_lock = ww; 67501768b42SPeter Zijlstra #endif 67601768b42SPeter Zijlstra return -EDEADLK; 67701768b42SPeter Zijlstra } 67801768b42SPeter Zijlstra 6796baa5c60SNicolai Hähnle static inline int __sched 6806baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter, 6816baa5c60SNicolai Hähnle struct mutex *lock, 6826baa5c60SNicolai Hähnle struct ww_acquire_ctx *ww_ctx) 6836baa5c60SNicolai Hähnle { 6846baa5c60SNicolai Hähnle struct mutex_waiter *cur; 6856baa5c60SNicolai Hähnle struct list_head *pos; 6866baa5c60SNicolai Hähnle 6876baa5c60SNicolai Hähnle if (!ww_ctx) { 6886baa5c60SNicolai Hähnle list_add_tail(&waiter->list, &lock->wait_list); 6896baa5c60SNicolai Hähnle return 0; 6906baa5c60SNicolai Hähnle } 6916baa5c60SNicolai Hähnle 6926baa5c60SNicolai Hähnle /* 6936baa5c60SNicolai Hähnle * Add the waiter before the first waiter with a higher stamp. 6946baa5c60SNicolai Hähnle * Waiters without a context are skipped to avoid starving 6956baa5c60SNicolai Hähnle * them. 6966baa5c60SNicolai Hähnle */ 6976baa5c60SNicolai Hähnle pos = &lock->wait_list; 6986baa5c60SNicolai Hähnle list_for_each_entry_reverse(cur, &lock->wait_list, list) { 6996baa5c60SNicolai Hähnle if (!cur->ww_ctx) 7006baa5c60SNicolai Hähnle continue; 7016baa5c60SNicolai Hähnle 7026baa5c60SNicolai Hähnle if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 7036baa5c60SNicolai Hähnle /* Back off immediately if necessary. */ 7046baa5c60SNicolai Hähnle if (ww_ctx->acquired > 0) { 7056baa5c60SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES 7066baa5c60SNicolai Hähnle struct ww_mutex *ww; 7076baa5c60SNicolai Hähnle 7086baa5c60SNicolai Hähnle ww = container_of(lock, struct ww_mutex, base); 7096baa5c60SNicolai Hähnle DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 7106baa5c60SNicolai Hähnle ww_ctx->contending_lock = ww; 7116baa5c60SNicolai Hähnle #endif 7126baa5c60SNicolai Hähnle return -EDEADLK; 7136baa5c60SNicolai Hähnle } 7146baa5c60SNicolai Hähnle 7156baa5c60SNicolai Hähnle break; 7166baa5c60SNicolai Hähnle } 7176baa5c60SNicolai Hähnle 7186baa5c60SNicolai Hähnle pos = &cur->list; 719200b1874SNicolai Hähnle 720200b1874SNicolai Hähnle /* 721200b1874SNicolai Hähnle * Wake up the waiter so that it gets a chance to back 722200b1874SNicolai Hähnle * off. 723200b1874SNicolai Hähnle */ 724200b1874SNicolai Hähnle if (cur->ww_ctx->acquired > 0) { 725200b1874SNicolai Hähnle debug_mutex_wake_waiter(lock, cur); 726200b1874SNicolai Hähnle wake_up_process(cur->task); 727200b1874SNicolai Hähnle } 7286baa5c60SNicolai Hähnle } 7296baa5c60SNicolai Hähnle 7306baa5c60SNicolai Hähnle list_add_tail(&waiter->list, pos); 73101768b42SPeter Zijlstra return 0; 73201768b42SPeter Zijlstra } 73301768b42SPeter Zijlstra 73401768b42SPeter Zijlstra /* 73501768b42SPeter Zijlstra * Lock a mutex (possibly interruptible), slowpath: 73601768b42SPeter Zijlstra */ 73701768b42SPeter Zijlstra static __always_inline int __sched 73801768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 73901768b42SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 74001768b42SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) 74101768b42SPeter Zijlstra { 74201768b42SPeter Zijlstra struct mutex_waiter waiter; 7439d659ae1SPeter Zijlstra bool first = false; 744a40ca565SWaiman Long struct ww_mutex *ww; 74501768b42SPeter Zijlstra int ret; 74601768b42SPeter Zijlstra 747427b1820SPeter Zijlstra might_sleep(); 748ea9e0fb8SNicolai Hähnle 749a40ca565SWaiman Long ww = container_of(lock, struct ww_mutex, base); 750ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) { 7510422e83dSChris Wilson if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 7520422e83dSChris Wilson return -EALREADY; 7530422e83dSChris Wilson } 7540422e83dSChris Wilson 75501768b42SPeter Zijlstra preempt_disable(); 75601768b42SPeter Zijlstra mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 75701768b42SPeter Zijlstra 758e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 759c516df97SNicolai Hähnle mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { 76076916515SDavidlohr Bueso /* got the lock, yay! */ 7613ca0ff57SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 762ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) 7633ca0ff57SPeter Zijlstra ww_mutex_set_context_fastpath(ww, ww_ctx); 76401768b42SPeter Zijlstra preempt_enable(); 76501768b42SPeter Zijlstra return 0; 76601768b42SPeter Zijlstra } 76701768b42SPeter Zijlstra 768b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 7691e820c96SJason Low /* 7703ca0ff57SPeter Zijlstra * After waiting to acquire the wait_lock, try again. 7711e820c96SJason Low */ 772659cf9f5SNicolai Hähnle if (__mutex_trylock(lock)) { 773659cf9f5SNicolai Hähnle if (use_ww_ctx && ww_ctx) 774659cf9f5SNicolai Hähnle __ww_mutex_wakeup_for_backoff(lock, ww_ctx); 775659cf9f5SNicolai Hähnle 77601768b42SPeter Zijlstra goto skip_wait; 777659cf9f5SNicolai Hähnle } 77801768b42SPeter Zijlstra 77901768b42SPeter Zijlstra debug_mutex_lock_common(lock, &waiter); 780d269a8b8SDavidlohr Bueso debug_mutex_add_waiter(lock, &waiter, current); 78101768b42SPeter Zijlstra 7826baa5c60SNicolai Hähnle lock_contended(&lock->dep_map, ip); 7836baa5c60SNicolai Hähnle 7846baa5c60SNicolai Hähnle if (!use_ww_ctx) { 78501768b42SPeter Zijlstra /* add waiting tasks to the end of the waitqueue (FIFO): */ 78601768b42SPeter Zijlstra list_add_tail(&waiter.list, &lock->wait_list); 787977625a6SNicolai Hähnle 788977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES 789977625a6SNicolai Hähnle waiter.ww_ctx = MUTEX_POISON_WW_CTX; 790977625a6SNicolai Hähnle #endif 7916baa5c60SNicolai Hähnle } else { 7926baa5c60SNicolai Hähnle /* Add in stamp order, waking up waiters that must back off. */ 7936baa5c60SNicolai Hähnle ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 7946baa5c60SNicolai Hähnle if (ret) 7956baa5c60SNicolai Hähnle goto err_early_backoff; 7966baa5c60SNicolai Hähnle 7976baa5c60SNicolai Hähnle waiter.ww_ctx = ww_ctx; 7986baa5c60SNicolai Hähnle } 7996baa5c60SNicolai Hähnle 800d269a8b8SDavidlohr Bueso waiter.task = current; 80101768b42SPeter Zijlstra 8029d659ae1SPeter Zijlstra if (__mutex_waiter_is_first(lock, &waiter)) 8033ca0ff57SPeter Zijlstra __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); 8043ca0ff57SPeter Zijlstra 805642fa448SDavidlohr Bueso set_current_state(state); 80601768b42SPeter Zijlstra for (;;) { 8075bbd7e64SPeter Zijlstra /* 8085bbd7e64SPeter Zijlstra * Once we hold wait_lock, we're serialized against 8095bbd7e64SPeter Zijlstra * mutex_unlock() handing the lock off to us, do a trylock 8105bbd7e64SPeter Zijlstra * before testing the error conditions to make sure we pick up 8115bbd7e64SPeter Zijlstra * the handoff. 8125bbd7e64SPeter Zijlstra */ 813e274795eSPeter Zijlstra if (__mutex_trylock(lock)) 8145bbd7e64SPeter Zijlstra goto acquired; 81501768b42SPeter Zijlstra 81601768b42SPeter Zijlstra /* 8175bbd7e64SPeter Zijlstra * Check for signals and wound conditions while holding 8185bbd7e64SPeter Zijlstra * wait_lock. This ensures the lock cancellation is ordered 8195bbd7e64SPeter Zijlstra * against mutex_unlock() and wake-ups do not go missing. 82001768b42SPeter Zijlstra */ 821d269a8b8SDavidlohr Bueso if (unlikely(signal_pending_state(state, current))) { 82201768b42SPeter Zijlstra ret = -EINTR; 82301768b42SPeter Zijlstra goto err; 82401768b42SPeter Zijlstra } 82501768b42SPeter Zijlstra 826ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) { 827200b1874SNicolai Hähnle ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx); 82801768b42SPeter Zijlstra if (ret) 82901768b42SPeter Zijlstra goto err; 83001768b42SPeter Zijlstra } 83101768b42SPeter Zijlstra 832b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 83301768b42SPeter Zijlstra schedule_preempt_disabled(); 8349d659ae1SPeter Zijlstra 8356baa5c60SNicolai Hähnle /* 8366baa5c60SNicolai Hähnle * ww_mutex needs to always recheck its position since its waiter 8376baa5c60SNicolai Hähnle * list is not FIFO ordered. 8386baa5c60SNicolai Hähnle */ 8396baa5c60SNicolai Hähnle if ((use_ww_ctx && ww_ctx) || !first) { 8406baa5c60SNicolai Hähnle first = __mutex_waiter_is_first(lock, &waiter); 8416baa5c60SNicolai Hähnle if (first) 8429d659ae1SPeter Zijlstra __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); 8439d659ae1SPeter Zijlstra } 8445bbd7e64SPeter Zijlstra 845642fa448SDavidlohr Bueso set_current_state(state); 8465bbd7e64SPeter Zijlstra /* 8475bbd7e64SPeter Zijlstra * Here we order against unlock; we must either see it change 8485bbd7e64SPeter Zijlstra * state back to RUNNING and fall through the next schedule(), 8495bbd7e64SPeter Zijlstra * or we must see its unlock and acquire. 8505bbd7e64SPeter Zijlstra */ 851e274795eSPeter Zijlstra if (__mutex_trylock(lock) || 852c516df97SNicolai Hähnle (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) 8535bbd7e64SPeter Zijlstra break; 8545bbd7e64SPeter Zijlstra 855b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 85601768b42SPeter Zijlstra } 857b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 8585bbd7e64SPeter Zijlstra acquired: 859642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 86051587bcfSDavidlohr Bueso 861d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 86201768b42SPeter Zijlstra if (likely(list_empty(&lock->wait_list))) 8639d659ae1SPeter Zijlstra __mutex_clear_flag(lock, MUTEX_FLAGS); 8643ca0ff57SPeter Zijlstra 86501768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 86601768b42SPeter Zijlstra 86701768b42SPeter Zijlstra skip_wait: 86801768b42SPeter Zijlstra /* got the lock - cleanup and rejoice! */ 86901768b42SPeter Zijlstra lock_acquired(&lock->dep_map, ip); 87001768b42SPeter Zijlstra 871ea9e0fb8SNicolai Hähnle if (use_ww_ctx && ww_ctx) 8724bd19084SDavidlohr Bueso ww_mutex_set_context_slowpath(ww, ww_ctx); 87301768b42SPeter Zijlstra 874b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 87501768b42SPeter Zijlstra preempt_enable(); 87601768b42SPeter Zijlstra return 0; 87701768b42SPeter Zijlstra 87801768b42SPeter Zijlstra err: 879642fa448SDavidlohr Bueso __set_current_state(TASK_RUNNING); 880d269a8b8SDavidlohr Bueso mutex_remove_waiter(lock, &waiter, current); 8816baa5c60SNicolai Hähnle err_early_backoff: 882b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 88301768b42SPeter Zijlstra debug_mutex_free_waiter(&waiter); 88401768b42SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 88501768b42SPeter Zijlstra preempt_enable(); 88601768b42SPeter Zijlstra return ret; 88701768b42SPeter Zijlstra } 88801768b42SPeter Zijlstra 889427b1820SPeter Zijlstra static int __sched 890427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass, 891427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip) 892427b1820SPeter Zijlstra { 893427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); 894427b1820SPeter Zijlstra } 895427b1820SPeter Zijlstra 896427b1820SPeter Zijlstra static int __sched 897427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, 898427b1820SPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 899427b1820SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 900427b1820SPeter Zijlstra { 901427b1820SPeter Zijlstra return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); 902427b1820SPeter Zijlstra } 903427b1820SPeter Zijlstra 90401768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC 90501768b42SPeter Zijlstra void __sched 90601768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass) 90701768b42SPeter Zijlstra { 908427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 90901768b42SPeter Zijlstra } 91001768b42SPeter Zijlstra 91101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested); 91201768b42SPeter Zijlstra 91301768b42SPeter Zijlstra void __sched 91401768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 91501768b42SPeter Zijlstra { 916427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 91701768b42SPeter Zijlstra } 91801768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 91901768b42SPeter Zijlstra 92001768b42SPeter Zijlstra int __sched 92101768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 92201768b42SPeter Zijlstra { 923427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 92401768b42SPeter Zijlstra } 92501768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 92601768b42SPeter Zijlstra 92701768b42SPeter Zijlstra int __sched 92801768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 92901768b42SPeter Zijlstra { 930427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); 93101768b42SPeter Zijlstra } 93201768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 93301768b42SPeter Zijlstra 9341460cb65STejun Heo void __sched 9351460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) 9361460cb65STejun Heo { 9371460cb65STejun Heo int token; 9381460cb65STejun Heo 9391460cb65STejun Heo might_sleep(); 9401460cb65STejun Heo 9411460cb65STejun Heo token = io_schedule_prepare(); 9421460cb65STejun Heo __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 9431460cb65STejun Heo subclass, NULL, _RET_IP_, NULL, 0); 9441460cb65STejun Heo io_schedule_finish(token); 9451460cb65STejun Heo } 9461460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested); 9471460cb65STejun Heo 94801768b42SPeter Zijlstra static inline int 94901768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 95001768b42SPeter Zijlstra { 95101768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 95201768b42SPeter Zijlstra unsigned tmp; 95301768b42SPeter Zijlstra 95401768b42SPeter Zijlstra if (ctx->deadlock_inject_countdown-- == 0) { 95501768b42SPeter Zijlstra tmp = ctx->deadlock_inject_interval; 95601768b42SPeter Zijlstra if (tmp > UINT_MAX/4) 95701768b42SPeter Zijlstra tmp = UINT_MAX; 95801768b42SPeter Zijlstra else 95901768b42SPeter Zijlstra tmp = tmp*2 + tmp + tmp/2; 96001768b42SPeter Zijlstra 96101768b42SPeter Zijlstra ctx->deadlock_inject_interval = tmp; 96201768b42SPeter Zijlstra ctx->deadlock_inject_countdown = tmp; 96301768b42SPeter Zijlstra ctx->contending_lock = lock; 96401768b42SPeter Zijlstra 96501768b42SPeter Zijlstra ww_mutex_unlock(lock); 96601768b42SPeter Zijlstra 96701768b42SPeter Zijlstra return -EDEADLK; 96801768b42SPeter Zijlstra } 96901768b42SPeter Zijlstra #endif 97001768b42SPeter Zijlstra 97101768b42SPeter Zijlstra return 0; 97201768b42SPeter Zijlstra } 97301768b42SPeter Zijlstra 97401768b42SPeter Zijlstra int __sched 975c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 97601768b42SPeter Zijlstra { 97701768b42SPeter Zijlstra int ret; 97801768b42SPeter Zijlstra 97901768b42SPeter Zijlstra might_sleep(); 980427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 981ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 982427b1820SPeter Zijlstra ctx); 983ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 98401768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 98501768b42SPeter Zijlstra 98601768b42SPeter Zijlstra return ret; 98701768b42SPeter Zijlstra } 988c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock); 98901768b42SPeter Zijlstra 99001768b42SPeter Zijlstra int __sched 991c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 99201768b42SPeter Zijlstra { 99301768b42SPeter Zijlstra int ret; 99401768b42SPeter Zijlstra 99501768b42SPeter Zijlstra might_sleep(); 996427b1820SPeter Zijlstra ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 997ea9e0fb8SNicolai Hähnle 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, 998427b1820SPeter Zijlstra ctx); 99901768b42SPeter Zijlstra 1000ea9e0fb8SNicolai Hähnle if (!ret && ctx && ctx->acquired > 1) 100101768b42SPeter Zijlstra return ww_mutex_deadlock_injection(lock, ctx); 100201768b42SPeter Zijlstra 100301768b42SPeter Zijlstra return ret; 100401768b42SPeter Zijlstra } 1005c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); 100601768b42SPeter Zijlstra 100701768b42SPeter Zijlstra #endif 100801768b42SPeter Zijlstra 100901768b42SPeter Zijlstra /* 101001768b42SPeter Zijlstra * Release the lock, slowpath: 101101768b42SPeter Zijlstra */ 10123ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) 101301768b42SPeter Zijlstra { 10149d659ae1SPeter Zijlstra struct task_struct *next = NULL; 1015194a6b5bSWaiman Long DEFINE_WAKE_Q(wake_q); 1016b9c16a0eSPeter Zijlstra unsigned long owner; 101701768b42SPeter Zijlstra 10183ca0ff57SPeter Zijlstra mutex_release(&lock->dep_map, 1, ip); 10193ca0ff57SPeter Zijlstra 102001768b42SPeter Zijlstra /* 10219d659ae1SPeter Zijlstra * Release the lock before (potentially) taking the spinlock such that 10229d659ae1SPeter Zijlstra * other contenders can get on with things ASAP. 10239d659ae1SPeter Zijlstra * 10249d659ae1SPeter Zijlstra * Except when HANDOFF, in that case we must not clear the owner field, 10259d659ae1SPeter Zijlstra * but instead set it to the top waiter. 102601768b42SPeter Zijlstra */ 10279d659ae1SPeter Zijlstra owner = atomic_long_read(&lock->owner); 10289d659ae1SPeter Zijlstra for (;;) { 10299d659ae1SPeter Zijlstra unsigned long old; 10309d659ae1SPeter Zijlstra 10319d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES 10329d659ae1SPeter Zijlstra DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); 1033e274795eSPeter Zijlstra DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); 10349d659ae1SPeter Zijlstra #endif 10359d659ae1SPeter Zijlstra 10369d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 10379d659ae1SPeter Zijlstra break; 10389d659ae1SPeter Zijlstra 10399d659ae1SPeter Zijlstra old = atomic_long_cmpxchg_release(&lock->owner, owner, 10409d659ae1SPeter Zijlstra __owner_flags(owner)); 10419d659ae1SPeter Zijlstra if (old == owner) { 10429d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_WAITERS) 10439d659ae1SPeter Zijlstra break; 10449d659ae1SPeter Zijlstra 10453ca0ff57SPeter Zijlstra return; 10469d659ae1SPeter Zijlstra } 10479d659ae1SPeter Zijlstra 10489d659ae1SPeter Zijlstra owner = old; 10499d659ae1SPeter Zijlstra } 105001768b42SPeter Zijlstra 1051b9c16a0eSPeter Zijlstra spin_lock(&lock->wait_lock); 10521d8fe7dcSJason Low debug_mutex_unlock(lock); 105301768b42SPeter Zijlstra if (!list_empty(&lock->wait_list)) { 105401768b42SPeter Zijlstra /* get the first entry from the wait-list: */ 105501768b42SPeter Zijlstra struct mutex_waiter *waiter = 10569d659ae1SPeter Zijlstra list_first_entry(&lock->wait_list, 105701768b42SPeter Zijlstra struct mutex_waiter, list); 105801768b42SPeter Zijlstra 10599d659ae1SPeter Zijlstra next = waiter->task; 10609d659ae1SPeter Zijlstra 106101768b42SPeter Zijlstra debug_mutex_wake_waiter(lock, waiter); 10629d659ae1SPeter Zijlstra wake_q_add(&wake_q, next); 106301768b42SPeter Zijlstra } 106401768b42SPeter Zijlstra 10659d659ae1SPeter Zijlstra if (owner & MUTEX_FLAG_HANDOFF) 10669d659ae1SPeter Zijlstra __mutex_handoff(lock, next); 10679d659ae1SPeter Zijlstra 1068b9c16a0eSPeter Zijlstra spin_unlock(&lock->wait_lock); 10699d659ae1SPeter Zijlstra 10701329ce6fSDavidlohr Bueso wake_up_q(&wake_q); 107101768b42SPeter Zijlstra } 107201768b42SPeter Zijlstra 107301768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 107401768b42SPeter Zijlstra /* 107501768b42SPeter Zijlstra * Here come the less common (and hence less performance-critical) APIs: 107601768b42SPeter Zijlstra * mutex_lock_interruptible() and mutex_trylock(). 107701768b42SPeter Zijlstra */ 107801768b42SPeter Zijlstra static noinline int __sched 107901768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock); 108001768b42SPeter Zijlstra 108101768b42SPeter Zijlstra static noinline int __sched 108201768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock); 108301768b42SPeter Zijlstra 108401768b42SPeter Zijlstra /** 108501768b42SPeter Zijlstra * mutex_lock_interruptible - acquire the mutex, interruptible 108601768b42SPeter Zijlstra * @lock: the mutex to be acquired 108701768b42SPeter Zijlstra * 108801768b42SPeter Zijlstra * Lock the mutex like mutex_lock(), and return 0 if the mutex has 108901768b42SPeter Zijlstra * been acquired or sleep until the mutex becomes available. If a 109001768b42SPeter Zijlstra * signal arrives while waiting for the lock then this function 109101768b42SPeter Zijlstra * returns -EINTR. 109201768b42SPeter Zijlstra * 109301768b42SPeter Zijlstra * This function is similar to (but not equivalent to) down_interruptible(). 109401768b42SPeter Zijlstra */ 109501768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock) 109601768b42SPeter Zijlstra { 109701768b42SPeter Zijlstra might_sleep(); 10983ca0ff57SPeter Zijlstra 10993ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 110001768b42SPeter Zijlstra return 0; 11013ca0ff57SPeter Zijlstra 110201768b42SPeter Zijlstra return __mutex_lock_interruptible_slowpath(lock); 110301768b42SPeter Zijlstra } 110401768b42SPeter Zijlstra 110501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible); 110601768b42SPeter Zijlstra 110701768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock) 110801768b42SPeter Zijlstra { 110901768b42SPeter Zijlstra might_sleep(); 11103ca0ff57SPeter Zijlstra 11113ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(lock)) 111201768b42SPeter Zijlstra return 0; 11133ca0ff57SPeter Zijlstra 111401768b42SPeter Zijlstra return __mutex_lock_killable_slowpath(lock); 111501768b42SPeter Zijlstra } 111601768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable); 111701768b42SPeter Zijlstra 11181460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock) 11191460cb65STejun Heo { 11201460cb65STejun Heo int token; 11211460cb65STejun Heo 11221460cb65STejun Heo token = io_schedule_prepare(); 11231460cb65STejun Heo mutex_lock(lock); 11241460cb65STejun Heo io_schedule_finish(token); 11251460cb65STejun Heo } 11261460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io); 11271460cb65STejun Heo 11283ca0ff57SPeter Zijlstra static noinline void __sched 11293ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock) 113001768b42SPeter Zijlstra { 1131427b1820SPeter Zijlstra __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 113201768b42SPeter Zijlstra } 113301768b42SPeter Zijlstra 113401768b42SPeter Zijlstra static noinline int __sched 113501768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock) 113601768b42SPeter Zijlstra { 1137427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 113801768b42SPeter Zijlstra } 113901768b42SPeter Zijlstra 114001768b42SPeter Zijlstra static noinline int __sched 114101768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock) 114201768b42SPeter Zijlstra { 1143427b1820SPeter Zijlstra return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 114401768b42SPeter Zijlstra } 114501768b42SPeter Zijlstra 114601768b42SPeter Zijlstra static noinline int __sched 114701768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 114801768b42SPeter Zijlstra { 1149427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, 1150427b1820SPeter Zijlstra _RET_IP_, ctx); 115101768b42SPeter Zijlstra } 115201768b42SPeter Zijlstra 115301768b42SPeter Zijlstra static noinline int __sched 115401768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, 115501768b42SPeter Zijlstra struct ww_acquire_ctx *ctx) 115601768b42SPeter Zijlstra { 1157427b1820SPeter Zijlstra return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, 1158427b1820SPeter Zijlstra _RET_IP_, ctx); 115901768b42SPeter Zijlstra } 116001768b42SPeter Zijlstra 116101768b42SPeter Zijlstra #endif 116201768b42SPeter Zijlstra 116301768b42SPeter Zijlstra /** 116401768b42SPeter Zijlstra * mutex_trylock - try to acquire the mutex, without waiting 116501768b42SPeter Zijlstra * @lock: the mutex to be acquired 116601768b42SPeter Zijlstra * 116701768b42SPeter Zijlstra * Try to acquire the mutex atomically. Returns 1 if the mutex 116801768b42SPeter Zijlstra * has been acquired successfully, and 0 on contention. 116901768b42SPeter Zijlstra * 117001768b42SPeter Zijlstra * NOTE: this function follows the spin_trylock() convention, so 117101768b42SPeter Zijlstra * it is negated from the down_trylock() return values! Be careful 117201768b42SPeter Zijlstra * about this when converting semaphore users to mutexes. 117301768b42SPeter Zijlstra * 117401768b42SPeter Zijlstra * This function must not be used in interrupt context. The 117501768b42SPeter Zijlstra * mutex must be released by the same task that acquired it. 117601768b42SPeter Zijlstra */ 117701768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock) 117801768b42SPeter Zijlstra { 1179e274795eSPeter Zijlstra bool locked = __mutex_trylock(lock); 118001768b42SPeter Zijlstra 11813ca0ff57SPeter Zijlstra if (locked) 11823ca0ff57SPeter Zijlstra mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 118301768b42SPeter Zijlstra 11843ca0ff57SPeter Zijlstra return locked; 118501768b42SPeter Zijlstra } 118601768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock); 118701768b42SPeter Zijlstra 118801768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC 118901768b42SPeter Zijlstra int __sched 1190c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 119101768b42SPeter Zijlstra { 119201768b42SPeter Zijlstra might_sleep(); 119301768b42SPeter Zijlstra 11943ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1195ea9e0fb8SNicolai Hähnle if (ctx) 119601768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 11973ca0ff57SPeter Zijlstra return 0; 11983ca0ff57SPeter Zijlstra } 11993ca0ff57SPeter Zijlstra 12003ca0ff57SPeter Zijlstra return __ww_mutex_lock_slowpath(lock, ctx); 120101768b42SPeter Zijlstra } 1202c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock); 120301768b42SPeter Zijlstra 120401768b42SPeter Zijlstra int __sched 1205c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 120601768b42SPeter Zijlstra { 120701768b42SPeter Zijlstra might_sleep(); 120801768b42SPeter Zijlstra 12093ca0ff57SPeter Zijlstra if (__mutex_trylock_fast(&lock->base)) { 1210ea9e0fb8SNicolai Hähnle if (ctx) 121101768b42SPeter Zijlstra ww_mutex_set_context_fastpath(lock, ctx); 12123ca0ff57SPeter Zijlstra return 0; 12133ca0ff57SPeter Zijlstra } 12143ca0ff57SPeter Zijlstra 12153ca0ff57SPeter Zijlstra return __ww_mutex_lock_interruptible_slowpath(lock, ctx); 121601768b42SPeter Zijlstra } 1217c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible); 121801768b42SPeter Zijlstra 121901768b42SPeter Zijlstra #endif 122001768b42SPeter Zijlstra 122101768b42SPeter Zijlstra /** 122201768b42SPeter Zijlstra * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 122301768b42SPeter Zijlstra * @cnt: the atomic which we are to dec 122401768b42SPeter Zijlstra * @lock: the mutex to return holding if we dec to 0 122501768b42SPeter Zijlstra * 122601768b42SPeter Zijlstra * return true and hold lock if we dec to 0, return false otherwise 122701768b42SPeter Zijlstra */ 122801768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) 122901768b42SPeter Zijlstra { 123001768b42SPeter Zijlstra /* dec if we can't possibly hit 0 */ 123101768b42SPeter Zijlstra if (atomic_add_unless(cnt, -1, 1)) 123201768b42SPeter Zijlstra return 0; 123301768b42SPeter Zijlstra /* we might hit 0, so take the lock */ 123401768b42SPeter Zijlstra mutex_lock(lock); 123501768b42SPeter Zijlstra if (!atomic_dec_and_test(cnt)) { 123601768b42SPeter Zijlstra /* when we actually did the dec, we didn't hit 0 */ 123701768b42SPeter Zijlstra mutex_unlock(lock); 123801768b42SPeter Zijlstra return 0; 123901768b42SPeter Zijlstra } 124001768b42SPeter Zijlstra /* we hit 0, and we hold the lock */ 124101768b42SPeter Zijlstra return 1; 124201768b42SPeter Zijlstra } 124301768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 1244