xref: /openbmc/linux/kernel/locking/mutex.c (revision 048661a1f963e9517630f080687d48af79ed784c)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
201768b42SPeter Zijlstra /*
367a6de49SPeter Zijlstra  * kernel/locking/mutex.c
401768b42SPeter Zijlstra  *
501768b42SPeter Zijlstra  * Mutexes: blocking mutual exclusion locks
601768b42SPeter Zijlstra  *
701768b42SPeter Zijlstra  * Started by Ingo Molnar:
801768b42SPeter Zijlstra  *
901768b42SPeter Zijlstra  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
1001768b42SPeter Zijlstra  *
1101768b42SPeter Zijlstra  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
1201768b42SPeter Zijlstra  * David Howells for suggestions and improvements.
1301768b42SPeter Zijlstra  *
1401768b42SPeter Zijlstra  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
1501768b42SPeter Zijlstra  *    from the -rt tree, where it was originally implemented for rtmutexes
1601768b42SPeter Zijlstra  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
1701768b42SPeter Zijlstra  *    and Sven Dietrich.
1801768b42SPeter Zijlstra  *
19387b1468SMauro Carvalho Chehab  * Also see Documentation/locking/mutex-design.rst.
2001768b42SPeter Zijlstra  */
2101768b42SPeter Zijlstra #include <linux/mutex.h>
2201768b42SPeter Zijlstra #include <linux/ww_mutex.h>
23174cd4b1SIngo Molnar #include <linux/sched/signal.h>
2401768b42SPeter Zijlstra #include <linux/sched/rt.h>
2584f001e1SIngo Molnar #include <linux/sched/wake_q.h>
26b17b0153SIngo Molnar #include <linux/sched/debug.h>
2701768b42SPeter Zijlstra #include <linux/export.h>
2801768b42SPeter Zijlstra #include <linux/spinlock.h>
2901768b42SPeter Zijlstra #include <linux/interrupt.h>
3001768b42SPeter Zijlstra #include <linux/debug_locks.h>
317a215f89SDavidlohr Bueso #include <linux/osq_lock.h>
3201768b42SPeter Zijlstra 
3301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
3401768b42SPeter Zijlstra # include "mutex-debug.h"
3501768b42SPeter Zijlstra #else
3601768b42SPeter Zijlstra # include "mutex.h"
3701768b42SPeter Zijlstra #endif
3801768b42SPeter Zijlstra 
3901768b42SPeter Zijlstra void
4001768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
4101768b42SPeter Zijlstra {
423ca0ff57SPeter Zijlstra 	atomic_long_set(&lock->owner, 0);
4301768b42SPeter Zijlstra 	spin_lock_init(&lock->wait_lock);
4401768b42SPeter Zijlstra 	INIT_LIST_HEAD(&lock->wait_list);
4501768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
464d9d951eSJason Low 	osq_lock_init(&lock->osq);
4701768b42SPeter Zijlstra #endif
4801768b42SPeter Zijlstra 
4901768b42SPeter Zijlstra 	debug_mutex_init(lock, name, key);
5001768b42SPeter Zijlstra }
5101768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init);
5201768b42SPeter Zijlstra 
533ca0ff57SPeter Zijlstra /*
543ca0ff57SPeter Zijlstra  * @owner: contains: 'struct task_struct *' to the current lock owner,
553ca0ff57SPeter Zijlstra  * NULL means not owned. Since task_struct pointers are aligned at
56e274795eSPeter Zijlstra  * at least L1_CACHE_BYTES, we have low bits to store extra state.
573ca0ff57SPeter Zijlstra  *
583ca0ff57SPeter Zijlstra  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
599d659ae1SPeter Zijlstra  * Bit1 indicates unlock needs to hand the lock to the top-waiter
60e274795eSPeter Zijlstra  * Bit2 indicates handoff has been done and we're waiting for pickup.
613ca0ff57SPeter Zijlstra  */
623ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS	0x01
639d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF	0x02
64e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP	0x04
653ca0ff57SPeter Zijlstra 
66e274795eSPeter Zijlstra #define MUTEX_FLAGS		0x07
673ca0ff57SPeter Zijlstra 
685f35d5a6SMukesh Ojha /*
695f35d5a6SMukesh Ojha  * Internal helper function; C doesn't allow us to hide it :/
705f35d5a6SMukesh Ojha  *
715f35d5a6SMukesh Ojha  * DO NOT USE (outside of mutex code).
725f35d5a6SMukesh Ojha  */
735f35d5a6SMukesh Ojha static inline struct task_struct *__mutex_owner(struct mutex *lock)
745f35d5a6SMukesh Ojha {
75a037d269SMukesh Ojha 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
765f35d5a6SMukesh Ojha }
775f35d5a6SMukesh Ojha 
783ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner)
793ca0ff57SPeter Zijlstra {
803ca0ff57SPeter Zijlstra 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
813ca0ff57SPeter Zijlstra }
823ca0ff57SPeter Zijlstra 
835f35d5a6SMukesh Ojha bool mutex_is_locked(struct mutex *lock)
845f35d5a6SMukesh Ojha {
855f35d5a6SMukesh Ojha 	return __mutex_owner(lock) != NULL;
865f35d5a6SMukesh Ojha }
875f35d5a6SMukesh Ojha EXPORT_SYMBOL(mutex_is_locked);
885f35d5a6SMukesh Ojha 
893ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner)
903ca0ff57SPeter Zijlstra {
913ca0ff57SPeter Zijlstra 	return owner & MUTEX_FLAGS;
923ca0ff57SPeter Zijlstra }
933ca0ff57SPeter Zijlstra 
943ca0ff57SPeter Zijlstra /*
95e2db7592SIngo Molnar  * Trylock variant that returns the owning task on failure.
963ca0ff57SPeter Zijlstra  */
97e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
983ca0ff57SPeter Zijlstra {
993ca0ff57SPeter Zijlstra 	unsigned long owner, curr = (unsigned long)current;
1003ca0ff57SPeter Zijlstra 
1013ca0ff57SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
1023ca0ff57SPeter Zijlstra 	for (;;) { /* must loop, can race against a flag */
103ab4e4d9fSPeter Zijlstra 		unsigned long flags = __owner_flags(owner);
104e274795eSPeter Zijlstra 		unsigned long task = owner & ~MUTEX_FLAGS;
1053ca0ff57SPeter Zijlstra 
106e274795eSPeter Zijlstra 		if (task) {
107e274795eSPeter Zijlstra 			if (likely(task != curr))
108e274795eSPeter Zijlstra 				break;
1099d659ae1SPeter Zijlstra 
110e274795eSPeter Zijlstra 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
111e274795eSPeter Zijlstra 				break;
112e274795eSPeter Zijlstra 
113e274795eSPeter Zijlstra 			flags &= ~MUTEX_FLAG_PICKUP;
114e274795eSPeter Zijlstra 		} else {
115e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
116e274795eSPeter Zijlstra 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
117e274795eSPeter Zijlstra #endif
1189d659ae1SPeter Zijlstra 		}
1193ca0ff57SPeter Zijlstra 
1209d659ae1SPeter Zijlstra 		/*
1219d659ae1SPeter Zijlstra 		 * We set the HANDOFF bit, we must make sure it doesn't live
1229d659ae1SPeter Zijlstra 		 * past the point where we acquire it. This would be possible
1239d659ae1SPeter Zijlstra 		 * if we (accidentally) set the bit on an unlocked mutex.
1249d659ae1SPeter Zijlstra 		 */
1259d659ae1SPeter Zijlstra 		flags &= ~MUTEX_FLAG_HANDOFF;
1269d659ae1SPeter Zijlstra 
127ab4e4d9fSPeter Zijlstra 		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, curr | flags))
128e274795eSPeter Zijlstra 			return NULL;
1293ca0ff57SPeter Zijlstra 	}
130e274795eSPeter Zijlstra 
131e274795eSPeter Zijlstra 	return __owner_task(owner);
132e274795eSPeter Zijlstra }
133e274795eSPeter Zijlstra 
134e274795eSPeter Zijlstra /*
135e274795eSPeter Zijlstra  * Actual trylock that will work on any unlocked state.
136e274795eSPeter Zijlstra  */
137e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock)
138e274795eSPeter Zijlstra {
139e274795eSPeter Zijlstra 	return !__mutex_trylock_or_owner(lock);
1403ca0ff57SPeter Zijlstra }
1413ca0ff57SPeter Zijlstra 
1423ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
1433ca0ff57SPeter Zijlstra /*
1443ca0ff57SPeter Zijlstra  * Lockdep annotations are contained to the slow paths for simplicity.
1453ca0ff57SPeter Zijlstra  * There is nothing that would stop spreading the lockdep annotations outwards
1463ca0ff57SPeter Zijlstra  * except more code.
1473ca0ff57SPeter Zijlstra  */
1483ca0ff57SPeter Zijlstra 
1493ca0ff57SPeter Zijlstra /*
1503ca0ff57SPeter Zijlstra  * Optimistic trylock that only works in the uncontended case. Make sure to
1513ca0ff57SPeter Zijlstra  * follow with a __mutex_trylock() before failing.
1523ca0ff57SPeter Zijlstra  */
1533ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
1543ca0ff57SPeter Zijlstra {
1553ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
156c427f695SPeter Zijlstra 	unsigned long zero = 0UL;
1573ca0ff57SPeter Zijlstra 
158c427f695SPeter Zijlstra 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
1593ca0ff57SPeter Zijlstra 		return true;
1603ca0ff57SPeter Zijlstra 
1613ca0ff57SPeter Zijlstra 	return false;
1623ca0ff57SPeter Zijlstra }
1633ca0ff57SPeter Zijlstra 
1643ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
1653ca0ff57SPeter Zijlstra {
1663ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
1673ca0ff57SPeter Zijlstra 
168ab4e4d9fSPeter Zijlstra 	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
1693ca0ff57SPeter Zijlstra }
1703ca0ff57SPeter Zijlstra #endif
1713ca0ff57SPeter Zijlstra 
1723ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
1733ca0ff57SPeter Zijlstra {
1743ca0ff57SPeter Zijlstra 	atomic_long_or(flag, &lock->owner);
1753ca0ff57SPeter Zijlstra }
1763ca0ff57SPeter Zijlstra 
1773ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
1783ca0ff57SPeter Zijlstra {
1793ca0ff57SPeter Zijlstra 	atomic_long_andnot(flag, &lock->owner);
1803ca0ff57SPeter Zijlstra }
1813ca0ff57SPeter Zijlstra 
1829d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
1839d659ae1SPeter Zijlstra {
1849d659ae1SPeter Zijlstra 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
1859d659ae1SPeter Zijlstra }
1869d659ae1SPeter Zijlstra 
1879d659ae1SPeter Zijlstra /*
18808295b3bSThomas Hellstrom  * Add @waiter to a given location in the lock wait_list and set the
18908295b3bSThomas Hellstrom  * FLAG_WAITERS flag if it's the first waiter.
19008295b3bSThomas Hellstrom  */
19108295b3bSThomas Hellstrom static void __sched
19208295b3bSThomas Hellstrom __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
19308295b3bSThomas Hellstrom 		   struct list_head *list)
19408295b3bSThomas Hellstrom {
19508295b3bSThomas Hellstrom 	debug_mutex_add_waiter(lock, waiter, current);
19608295b3bSThomas Hellstrom 
19708295b3bSThomas Hellstrom 	list_add_tail(&waiter->list, list);
19808295b3bSThomas Hellstrom 	if (__mutex_waiter_is_first(lock, waiter))
19908295b3bSThomas Hellstrom 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
20008295b3bSThomas Hellstrom }
20108295b3bSThomas Hellstrom 
20208295b3bSThomas Hellstrom /*
2039d659ae1SPeter Zijlstra  * Give up ownership to a specific task, when @task = NULL, this is equivalent
204e2db7592SIngo Molnar  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
205e274795eSPeter Zijlstra  * WAITERS. Provides RELEASE semantics like a regular unlock, the
206e274795eSPeter Zijlstra  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
2079d659ae1SPeter Zijlstra  */
2089d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
2099d659ae1SPeter Zijlstra {
2109d659ae1SPeter Zijlstra 	unsigned long owner = atomic_long_read(&lock->owner);
2119d659ae1SPeter Zijlstra 
2129d659ae1SPeter Zijlstra 	for (;;) {
213ab4e4d9fSPeter Zijlstra 		unsigned long new;
2149d659ae1SPeter Zijlstra 
2159d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
2169d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
217e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
2189d659ae1SPeter Zijlstra #endif
2199d659ae1SPeter Zijlstra 
2209d659ae1SPeter Zijlstra 		new = (owner & MUTEX_FLAG_WAITERS);
2219d659ae1SPeter Zijlstra 		new |= (unsigned long)task;
222e274795eSPeter Zijlstra 		if (task)
223e274795eSPeter Zijlstra 			new |= MUTEX_FLAG_PICKUP;
2249d659ae1SPeter Zijlstra 
225ab4e4d9fSPeter Zijlstra 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
2269d659ae1SPeter Zijlstra 			break;
2279d659ae1SPeter Zijlstra 	}
2289d659ae1SPeter Zijlstra }
2299d659ae1SPeter Zijlstra 
23001768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
23101768b42SPeter Zijlstra /*
23201768b42SPeter Zijlstra  * We split the mutex lock/unlock logic into separate fastpath and
23301768b42SPeter Zijlstra  * slowpath functions, to reduce the register pressure on the fastpath.
23401768b42SPeter Zijlstra  * We also put the fastpath first in the kernel image, to make sure the
23501768b42SPeter Zijlstra  * branch is predicted by the CPU as default-untaken.
23601768b42SPeter Zijlstra  */
2373ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock);
23801768b42SPeter Zijlstra 
23901768b42SPeter Zijlstra /**
24001768b42SPeter Zijlstra  * mutex_lock - acquire the mutex
24101768b42SPeter Zijlstra  * @lock: the mutex to be acquired
24201768b42SPeter Zijlstra  *
24301768b42SPeter Zijlstra  * Lock the mutex exclusively for this task. If the mutex is not
24401768b42SPeter Zijlstra  * available right now, it will sleep until it can get it.
24501768b42SPeter Zijlstra  *
24601768b42SPeter Zijlstra  * The mutex must later on be released by the same task that
24701768b42SPeter Zijlstra  * acquired it. Recursive locking is not allowed. The task
24801768b42SPeter Zijlstra  * may not exit without first unlocking the mutex. Also, kernel
249139b6fd2SSharon Dvir  * memory where the mutex resides must not be freed with
25001768b42SPeter Zijlstra  * the mutex still locked. The mutex must first be initialized
25101768b42SPeter Zijlstra  * (or statically defined) before it can be locked. memset()-ing
25201768b42SPeter Zijlstra  * the mutex to 0 is not allowed.
25301768b42SPeter Zijlstra  *
25401768b42SPeter Zijlstra  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
25501768b42SPeter Zijlstra  * checks that will enforce the restrictions and will also do
2567b4ff1adSMauro Carvalho Chehab  * deadlock debugging)
25701768b42SPeter Zijlstra  *
25801768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down().
25901768b42SPeter Zijlstra  */
26001768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock)
26101768b42SPeter Zijlstra {
26201768b42SPeter Zijlstra 	might_sleep();
26301768b42SPeter Zijlstra 
2643ca0ff57SPeter Zijlstra 	if (!__mutex_trylock_fast(lock))
2653ca0ff57SPeter Zijlstra 		__mutex_lock_slowpath(lock);
2663ca0ff57SPeter Zijlstra }
26701768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock);
26801768b42SPeter Zijlstra #endif
26901768b42SPeter Zijlstra 
27055f036caSPeter Ziljstra /*
27155f036caSPeter Ziljstra  * Wait-Die:
27255f036caSPeter Ziljstra  *   The newer transactions are killed when:
27355f036caSPeter Ziljstra  *     It (the new transaction) makes a request for a lock being held
27455f036caSPeter Ziljstra  *     by an older transaction.
27508295b3bSThomas Hellstrom  *
27608295b3bSThomas Hellstrom  * Wound-Wait:
27708295b3bSThomas Hellstrom  *   The newer transactions are wounded when:
27808295b3bSThomas Hellstrom  *     An older transaction makes a request for a lock being held by
27908295b3bSThomas Hellstrom  *     the newer transaction.
28055f036caSPeter Ziljstra  */
28155f036caSPeter Ziljstra 
28255f036caSPeter Ziljstra /*
28355f036caSPeter Ziljstra  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
28455f036caSPeter Ziljstra  * it.
28555f036caSPeter Ziljstra  */
286427b1820SPeter Zijlstra static __always_inline void
287427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
28876916515SDavidlohr Bueso {
28976916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES
29076916515SDavidlohr Bueso 	/*
29176916515SDavidlohr Bueso 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
29276916515SDavidlohr Bueso 	 * but released with a normal mutex_unlock in this call.
29376916515SDavidlohr Bueso 	 *
29476916515SDavidlohr Bueso 	 * This should never happen, always use ww_mutex_unlock.
29576916515SDavidlohr Bueso 	 */
29676916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww->ctx);
29776916515SDavidlohr Bueso 
29876916515SDavidlohr Bueso 	/*
29976916515SDavidlohr Bueso 	 * Not quite done after calling ww_acquire_done() ?
30076916515SDavidlohr Bueso 	 */
30176916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
30276916515SDavidlohr Bueso 
30376916515SDavidlohr Bueso 	if (ww_ctx->contending_lock) {
30476916515SDavidlohr Bueso 		/*
30576916515SDavidlohr Bueso 		 * After -EDEADLK you tried to
30676916515SDavidlohr Bueso 		 * acquire a different ww_mutex? Bad!
30776916515SDavidlohr Bueso 		 */
30876916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
30976916515SDavidlohr Bueso 
31076916515SDavidlohr Bueso 		/*
31176916515SDavidlohr Bueso 		 * You called ww_mutex_lock after receiving -EDEADLK,
31276916515SDavidlohr Bueso 		 * but 'forgot' to unlock everything else first?
31376916515SDavidlohr Bueso 		 */
31476916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
31576916515SDavidlohr Bueso 		ww_ctx->contending_lock = NULL;
31676916515SDavidlohr Bueso 	}
31776916515SDavidlohr Bueso 
31876916515SDavidlohr Bueso 	/*
31976916515SDavidlohr Bueso 	 * Naughty, using a different class will lead to undefined behavior!
32076916515SDavidlohr Bueso 	 */
32176916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
32276916515SDavidlohr Bueso #endif
32376916515SDavidlohr Bueso 	ww_ctx->acquired++;
32455f036caSPeter Ziljstra 	ww->ctx = ww_ctx;
3253822da3eSNicolai Hähnle }
3263822da3eSNicolai Hähnle 
32776916515SDavidlohr Bueso /*
32855f036caSPeter Ziljstra  * Determine if context @a is 'after' context @b. IOW, @a is a younger
32955f036caSPeter Ziljstra  * transaction than @b and depending on algorithm either needs to wait for
33055f036caSPeter Ziljstra  * @b or die.
33155f036caSPeter Ziljstra  */
33255f036caSPeter Ziljstra static inline bool __sched
33355f036caSPeter Ziljstra __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
33455f036caSPeter Ziljstra {
33555f036caSPeter Ziljstra 
33655f036caSPeter Ziljstra 	return (signed long)(a->stamp - b->stamp) > 0;
33755f036caSPeter Ziljstra }
33855f036caSPeter Ziljstra 
33955f036caSPeter Ziljstra /*
34055f036caSPeter Ziljstra  * Wait-Die; wake a younger waiter context (when locks held) such that it can
34155f036caSPeter Ziljstra  * die.
342659cf9f5SNicolai Hähnle  *
34355f036caSPeter Ziljstra  * Among waiters with context, only the first one can have other locks acquired
34455f036caSPeter Ziljstra  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
34555f036caSPeter Ziljstra  * __ww_mutex_check_kill() wake any but the earliest context.
34655f036caSPeter Ziljstra  */
34755f036caSPeter Ziljstra static bool __sched
34855f036caSPeter Ziljstra __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
34955f036caSPeter Ziljstra 	       struct ww_acquire_ctx *ww_ctx)
35055f036caSPeter Ziljstra {
35108295b3bSThomas Hellstrom 	if (!ww_ctx->is_wait_die)
35208295b3bSThomas Hellstrom 		return false;
35308295b3bSThomas Hellstrom 
35455f036caSPeter Ziljstra 	if (waiter->ww_ctx->acquired > 0 &&
35555f036caSPeter Ziljstra 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
35655f036caSPeter Ziljstra 		debug_mutex_wake_waiter(lock, waiter);
35755f036caSPeter Ziljstra 		wake_up_process(waiter->task);
35855f036caSPeter Ziljstra 	}
35955f036caSPeter Ziljstra 
36055f036caSPeter Ziljstra 	return true;
36155f036caSPeter Ziljstra }
36255f036caSPeter Ziljstra 
36355f036caSPeter Ziljstra /*
36408295b3bSThomas Hellstrom  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
36508295b3bSThomas Hellstrom  *
36608295b3bSThomas Hellstrom  * Wound the lock holder if there are waiters with older transactions than
36708295b3bSThomas Hellstrom  * the lock holders. Even if multiple waiters may wound the lock holder,
36808295b3bSThomas Hellstrom  * it's sufficient that only one does.
36908295b3bSThomas Hellstrom  */
37008295b3bSThomas Hellstrom static bool __ww_mutex_wound(struct mutex *lock,
37108295b3bSThomas Hellstrom 			     struct ww_acquire_ctx *ww_ctx,
37208295b3bSThomas Hellstrom 			     struct ww_acquire_ctx *hold_ctx)
37308295b3bSThomas Hellstrom {
37408295b3bSThomas Hellstrom 	struct task_struct *owner = __mutex_owner(lock);
37508295b3bSThomas Hellstrom 
37608295b3bSThomas Hellstrom 	lockdep_assert_held(&lock->wait_lock);
37708295b3bSThomas Hellstrom 
37808295b3bSThomas Hellstrom 	/*
37908295b3bSThomas Hellstrom 	 * Possible through __ww_mutex_add_waiter() when we race with
38008295b3bSThomas Hellstrom 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
38108295b3bSThomas Hellstrom 	 * through __ww_mutex_check_waiters().
38208295b3bSThomas Hellstrom 	 */
38308295b3bSThomas Hellstrom 	if (!hold_ctx)
38408295b3bSThomas Hellstrom 		return false;
38508295b3bSThomas Hellstrom 
38608295b3bSThomas Hellstrom 	/*
38708295b3bSThomas Hellstrom 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
38808295b3bSThomas Hellstrom 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
38908295b3bSThomas Hellstrom 	 * wait_lock.
39008295b3bSThomas Hellstrom 	 */
39108295b3bSThomas Hellstrom 	if (!owner)
39208295b3bSThomas Hellstrom 		return false;
39308295b3bSThomas Hellstrom 
39408295b3bSThomas Hellstrom 	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
39508295b3bSThomas Hellstrom 		hold_ctx->wounded = 1;
39608295b3bSThomas Hellstrom 
39708295b3bSThomas Hellstrom 		/*
39808295b3bSThomas Hellstrom 		 * wake_up_process() paired with set_current_state()
39908295b3bSThomas Hellstrom 		 * inserts sufficient barriers to make sure @owner either sees
400e13e2366SThomas Hellstrom 		 * it's wounded in __ww_mutex_check_kill() or has a
40108295b3bSThomas Hellstrom 		 * wakeup pending to re-read the wounded state.
40208295b3bSThomas Hellstrom 		 */
40308295b3bSThomas Hellstrom 		if (owner != current)
40408295b3bSThomas Hellstrom 			wake_up_process(owner);
40508295b3bSThomas Hellstrom 
40608295b3bSThomas Hellstrom 		return true;
40708295b3bSThomas Hellstrom 	}
40808295b3bSThomas Hellstrom 
40908295b3bSThomas Hellstrom 	return false;
41008295b3bSThomas Hellstrom }
41108295b3bSThomas Hellstrom 
41208295b3bSThomas Hellstrom /*
41355f036caSPeter Ziljstra  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
41408295b3bSThomas Hellstrom  * behind us on the wait-list, check if they need to die, or wound us.
41555f036caSPeter Ziljstra  *
41655f036caSPeter Ziljstra  * See __ww_mutex_add_waiter() for the list-order construction; basically the
41755f036caSPeter Ziljstra  * list is ordered by stamp, smallest (oldest) first.
418659cf9f5SNicolai Hähnle  *
41908295b3bSThomas Hellstrom  * This relies on never mixing wait-die/wound-wait on the same wait-list;
42008295b3bSThomas Hellstrom  * which is currently ensured by that being a ww_class property.
42108295b3bSThomas Hellstrom  *
422659cf9f5SNicolai Hähnle  * The current task must not be on the wait list.
423659cf9f5SNicolai Hähnle  */
424659cf9f5SNicolai Hähnle static void __sched
42555f036caSPeter Ziljstra __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
426659cf9f5SNicolai Hähnle {
427659cf9f5SNicolai Hähnle 	struct mutex_waiter *cur;
428659cf9f5SNicolai Hähnle 
429659cf9f5SNicolai Hähnle 	lockdep_assert_held(&lock->wait_lock);
430659cf9f5SNicolai Hähnle 
431659cf9f5SNicolai Hähnle 	list_for_each_entry(cur, &lock->wait_list, list) {
432659cf9f5SNicolai Hähnle 		if (!cur->ww_ctx)
433659cf9f5SNicolai Hähnle 			continue;
434659cf9f5SNicolai Hähnle 
43508295b3bSThomas Hellstrom 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
43608295b3bSThomas Hellstrom 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
437659cf9f5SNicolai Hähnle 			break;
438659cf9f5SNicolai Hähnle 	}
439659cf9f5SNicolai Hähnle }
440659cf9f5SNicolai Hähnle 
44176916515SDavidlohr Bueso /*
44255f036caSPeter Ziljstra  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
44355f036caSPeter Ziljstra  * and wake up any waiters so they can recheck.
44476916515SDavidlohr Bueso  */
44576916515SDavidlohr Bueso static __always_inline void
446427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
44776916515SDavidlohr Bueso {
44876916515SDavidlohr Bueso 	ww_mutex_lock_acquired(lock, ctx);
44976916515SDavidlohr Bueso 
45076916515SDavidlohr Bueso 	/*
45176916515SDavidlohr Bueso 	 * The lock->ctx update should be visible on all cores before
45255f036caSPeter Ziljstra 	 * the WAITERS check is done, otherwise contended waiters might be
45376916515SDavidlohr Bueso 	 * missed. The contended waiters will either see ww_ctx == NULL
45476916515SDavidlohr Bueso 	 * and keep spinning, or it will acquire wait_lock, add itself
45576916515SDavidlohr Bueso 	 * to waiter list and sleep.
45676916515SDavidlohr Bueso 	 */
45708295b3bSThomas Hellstrom 	smp_mb(); /* See comments above and below. */
45876916515SDavidlohr Bueso 
45976916515SDavidlohr Bueso 	/*
46008295b3bSThomas Hellstrom 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
46108295b3bSThomas Hellstrom 	 *     MB		        MB
46208295b3bSThomas Hellstrom 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
46308295b3bSThomas Hellstrom 	 *
46408295b3bSThomas Hellstrom 	 * The memory barrier above pairs with the memory barrier in
46508295b3bSThomas Hellstrom 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
46608295b3bSThomas Hellstrom 	 * and/or !empty list.
46776916515SDavidlohr Bueso 	 */
4683ca0ff57SPeter Zijlstra 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
46976916515SDavidlohr Bueso 		return;
47076916515SDavidlohr Bueso 
47176916515SDavidlohr Bueso 	/*
47255f036caSPeter Ziljstra 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
47308295b3bSThomas Hellstrom 	 * die or wound us.
47476916515SDavidlohr Bueso 	 */
475b9c16a0eSPeter Zijlstra 	spin_lock(&lock->base.wait_lock);
47655f036caSPeter Ziljstra 	__ww_mutex_check_waiters(&lock->base, ctx);
477b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->base.wait_lock);
47876916515SDavidlohr Bueso }
47976916515SDavidlohr Bueso 
48001768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
481c516df97SNicolai Hähnle 
482c516df97SNicolai Hähnle static inline
483c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
484c516df97SNicolai Hähnle 			    struct mutex_waiter *waiter)
485c516df97SNicolai Hähnle {
486c516df97SNicolai Hähnle 	struct ww_mutex *ww;
487c516df97SNicolai Hähnle 
488c516df97SNicolai Hähnle 	ww = container_of(lock, struct ww_mutex, base);
489c516df97SNicolai Hähnle 
49001768b42SPeter Zijlstra 	/*
491c516df97SNicolai Hähnle 	 * If ww->ctx is set the contents are undefined, only
492c516df97SNicolai Hähnle 	 * by acquiring wait_lock there is a guarantee that
493c516df97SNicolai Hähnle 	 * they are not invalid when reading.
494c516df97SNicolai Hähnle 	 *
495c516df97SNicolai Hähnle 	 * As such, when deadlock detection needs to be
496c516df97SNicolai Hähnle 	 * performed the optimistic spinning cannot be done.
497c516df97SNicolai Hähnle 	 *
498c516df97SNicolai Hähnle 	 * Check this in every inner iteration because we may
499c516df97SNicolai Hähnle 	 * be racing against another thread's ww_mutex_lock.
500c516df97SNicolai Hähnle 	 */
501c516df97SNicolai Hähnle 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
502c516df97SNicolai Hähnle 		return false;
503c516df97SNicolai Hähnle 
504c516df97SNicolai Hähnle 	/*
505c516df97SNicolai Hähnle 	 * If we aren't on the wait list yet, cancel the spin
506c516df97SNicolai Hähnle 	 * if there are waiters. We want  to avoid stealing the
507c516df97SNicolai Hähnle 	 * lock from a waiter with an earlier stamp, since the
508c516df97SNicolai Hähnle 	 * other thread may already own a lock that we also
509c516df97SNicolai Hähnle 	 * need.
510c516df97SNicolai Hähnle 	 */
511c516df97SNicolai Hähnle 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
512c516df97SNicolai Hähnle 		return false;
513c516df97SNicolai Hähnle 
514c516df97SNicolai Hähnle 	/*
515c516df97SNicolai Hähnle 	 * Similarly, stop spinning if we are no longer the
516c516df97SNicolai Hähnle 	 * first waiter.
517c516df97SNicolai Hähnle 	 */
518c516df97SNicolai Hähnle 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
519c516df97SNicolai Hähnle 		return false;
520c516df97SNicolai Hähnle 
521c516df97SNicolai Hähnle 	return true;
522c516df97SNicolai Hähnle }
523c516df97SNicolai Hähnle 
52401768b42SPeter Zijlstra /*
52525f13b40SNicolai Hähnle  * Look out! "owner" is an entirely speculative pointer access and not
52625f13b40SNicolai Hähnle  * reliable.
52725f13b40SNicolai Hähnle  *
52825f13b40SNicolai Hähnle  * "noinline" so that this function shows up on perf profiles.
52901768b42SPeter Zijlstra  */
53001768b42SPeter Zijlstra static noinline
53125f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
532c516df97SNicolai Hähnle 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
53301768b42SPeter Zijlstra {
53401ac33c1SJason Low 	bool ret = true;
535be1f7bf2SJason Low 
53601768b42SPeter Zijlstra 	rcu_read_lock();
5373ca0ff57SPeter Zijlstra 	while (__mutex_owner(lock) == owner) {
538be1f7bf2SJason Low 		/*
539be1f7bf2SJason Low 		 * Ensure we emit the owner->on_cpu, dereference _after_
54001ac33c1SJason Low 		 * checking lock->owner still matches owner. If that fails,
54101ac33c1SJason Low 		 * owner might point to freed memory. If it still matches,
542be1f7bf2SJason Low 		 * the rcu_read_lock() ensures the memory stays valid.
543be1f7bf2SJason Low 		 */
544be1f7bf2SJason Low 		barrier();
545be1f7bf2SJason Low 
54605ffc951SPan Xinhui 		/*
54705ffc951SPan Xinhui 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
54805ffc951SPan Xinhui 		 */
54905ffc951SPan Xinhui 		if (!owner->on_cpu || need_resched() ||
55005ffc951SPan Xinhui 				vcpu_is_preempted(task_cpu(owner))) {
551be1f7bf2SJason Low 			ret = false;
552be1f7bf2SJason Low 			break;
553be1f7bf2SJason Low 		}
55401768b42SPeter Zijlstra 
555c516df97SNicolai Hähnle 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
55625f13b40SNicolai Hähnle 			ret = false;
55725f13b40SNicolai Hähnle 			break;
55825f13b40SNicolai Hähnle 		}
55925f13b40SNicolai Hähnle 
560f2f09a4cSChristian Borntraeger 		cpu_relax();
56101768b42SPeter Zijlstra 	}
56201768b42SPeter Zijlstra 	rcu_read_unlock();
56301768b42SPeter Zijlstra 
564be1f7bf2SJason Low 	return ret;
56501768b42SPeter Zijlstra }
56601768b42SPeter Zijlstra 
56701768b42SPeter Zijlstra /*
56801768b42SPeter Zijlstra  * Initial check for entering the mutex spinning loop
56901768b42SPeter Zijlstra  */
57001768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock)
57101768b42SPeter Zijlstra {
57201768b42SPeter Zijlstra 	struct task_struct *owner;
57301768b42SPeter Zijlstra 	int retval = 1;
57401768b42SPeter Zijlstra 
57546af29e4SJason Low 	if (need_resched())
57646af29e4SJason Low 		return 0;
57746af29e4SJason Low 
57801768b42SPeter Zijlstra 	rcu_read_lock();
5793ca0ff57SPeter Zijlstra 	owner = __mutex_owner(lock);
58005ffc951SPan Xinhui 
58105ffc951SPan Xinhui 	/*
58205ffc951SPan Xinhui 	 * As lock holder preemption issue, we both skip spinning if task is not
58305ffc951SPan Xinhui 	 * on cpu or its cpu is preempted
58405ffc951SPan Xinhui 	 */
58501768b42SPeter Zijlstra 	if (owner)
58605ffc951SPan Xinhui 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
58701768b42SPeter Zijlstra 	rcu_read_unlock();
58876916515SDavidlohr Bueso 
58976916515SDavidlohr Bueso 	/*
5903ca0ff57SPeter Zijlstra 	 * If lock->owner is not set, the mutex has been released. Return true
5913ca0ff57SPeter Zijlstra 	 * such that we'll trylock in the spin path, which is a faster option
5923ca0ff57SPeter Zijlstra 	 * than the blocking slow path.
59376916515SDavidlohr Bueso 	 */
5943ca0ff57SPeter Zijlstra 	return retval;
59576916515SDavidlohr Bueso }
59676916515SDavidlohr Bueso 
59776916515SDavidlohr Bueso /*
59876916515SDavidlohr Bueso  * Optimistic spinning.
59976916515SDavidlohr Bueso  *
60076916515SDavidlohr Bueso  * We try to spin for acquisition when we find that the lock owner
60176916515SDavidlohr Bueso  * is currently running on a (different) CPU and while we don't
60276916515SDavidlohr Bueso  * need to reschedule. The rationale is that if the lock owner is
60376916515SDavidlohr Bueso  * running, it is likely to release the lock soon.
60476916515SDavidlohr Bueso  *
60576916515SDavidlohr Bueso  * The mutex spinners are queued up using MCS lock so that only one
60676916515SDavidlohr Bueso  * spinner can compete for the mutex. However, if mutex spinning isn't
60776916515SDavidlohr Bueso  * going to happen, there is no point in going through the lock/unlock
60876916515SDavidlohr Bueso  * overhead.
60976916515SDavidlohr Bueso  *
61076916515SDavidlohr Bueso  * Returns true when the lock was taken, otherwise false, indicating
61176916515SDavidlohr Bueso  * that we need to jump to the slowpath and sleep.
612b341afb3SWaiman Long  *
613b341afb3SWaiman Long  * The waiter flag is set to true if the spinner is a waiter in the wait
614b341afb3SWaiman Long  * queue. The waiter-spinner will spin on the lock directly and concurrently
615b341afb3SWaiman Long  * with the spinner at the head of the OSQ, if present, until the owner is
616b341afb3SWaiman Long  * changed to itself.
61776916515SDavidlohr Bueso  */
618427b1820SPeter Zijlstra static __always_inline bool
619427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
6205de2055dSWaiman Long 		      struct mutex_waiter *waiter)
62176916515SDavidlohr Bueso {
622b341afb3SWaiman Long 	if (!waiter) {
623b341afb3SWaiman Long 		/*
624b341afb3SWaiman Long 		 * The purpose of the mutex_can_spin_on_owner() function is
625b341afb3SWaiman Long 		 * to eliminate the overhead of osq_lock() and osq_unlock()
626b341afb3SWaiman Long 		 * in case spinning isn't possible. As a waiter-spinner
627b341afb3SWaiman Long 		 * is not going to take OSQ lock anyway, there is no need
628b341afb3SWaiman Long 		 * to call mutex_can_spin_on_owner().
629b341afb3SWaiman Long 		 */
63076916515SDavidlohr Bueso 		if (!mutex_can_spin_on_owner(lock))
631b341afb3SWaiman Long 			goto fail;
63276916515SDavidlohr Bueso 
633e42f678aSDavidlohr Bueso 		/*
634e42f678aSDavidlohr Bueso 		 * In order to avoid a stampede of mutex spinners trying to
635e42f678aSDavidlohr Bueso 		 * acquire the mutex all at once, the spinners need to take a
636e42f678aSDavidlohr Bueso 		 * MCS (queued) lock first before spinning on the owner field.
637e42f678aSDavidlohr Bueso 		 */
63876916515SDavidlohr Bueso 		if (!osq_lock(&lock->osq))
639b341afb3SWaiman Long 			goto fail;
640b341afb3SWaiman Long 	}
64176916515SDavidlohr Bueso 
642b341afb3SWaiman Long 	for (;;) {
64376916515SDavidlohr Bueso 		struct task_struct *owner;
64476916515SDavidlohr Bueso 
645e274795eSPeter Zijlstra 		/* Try to acquire the mutex... */
646e274795eSPeter Zijlstra 		owner = __mutex_trylock_or_owner(lock);
647e274795eSPeter Zijlstra 		if (!owner)
648e274795eSPeter Zijlstra 			break;
64976916515SDavidlohr Bueso 
65076916515SDavidlohr Bueso 		/*
651e274795eSPeter Zijlstra 		 * There's an owner, wait for it to either
65276916515SDavidlohr Bueso 		 * release the lock or go to sleep.
65376916515SDavidlohr Bueso 		 */
654c516df97SNicolai Hähnle 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
655b341afb3SWaiman Long 			goto fail_unlock;
65676916515SDavidlohr Bueso 
65776916515SDavidlohr Bueso 		/*
65876916515SDavidlohr Bueso 		 * The cpu_relax() call is a compiler barrier which forces
65976916515SDavidlohr Bueso 		 * everything in this loop to be re-loaded. We don't need
66076916515SDavidlohr Bueso 		 * memory barriers as we'll eventually observe the right
66176916515SDavidlohr Bueso 		 * values at the cost of a few extra spins.
66276916515SDavidlohr Bueso 		 */
663f2f09a4cSChristian Borntraeger 		cpu_relax();
66476916515SDavidlohr Bueso 	}
66576916515SDavidlohr Bueso 
666b341afb3SWaiman Long 	if (!waiter)
66776916515SDavidlohr Bueso 		osq_unlock(&lock->osq);
668b341afb3SWaiman Long 
669b341afb3SWaiman Long 	return true;
670b341afb3SWaiman Long 
671b341afb3SWaiman Long 
672b341afb3SWaiman Long fail_unlock:
673b341afb3SWaiman Long 	if (!waiter)
674b341afb3SWaiman Long 		osq_unlock(&lock->osq);
675b341afb3SWaiman Long 
676b341afb3SWaiman Long fail:
67776916515SDavidlohr Bueso 	/*
67876916515SDavidlohr Bueso 	 * If we fell out of the spin path because of need_resched(),
67976916515SDavidlohr Bueso 	 * reschedule now, before we try-lock the mutex. This avoids getting
68076916515SDavidlohr Bueso 	 * scheduled out right after we obtained the mutex.
68176916515SDavidlohr Bueso 	 */
6826f942a1fSPeter Zijlstra 	if (need_resched()) {
6836f942a1fSPeter Zijlstra 		/*
6846f942a1fSPeter Zijlstra 		 * We _should_ have TASK_RUNNING here, but just in case
6856f942a1fSPeter Zijlstra 		 * we do not, make it so, otherwise we might get stuck.
6866f942a1fSPeter Zijlstra 		 */
6876f942a1fSPeter Zijlstra 		__set_current_state(TASK_RUNNING);
68876916515SDavidlohr Bueso 		schedule_preempt_disabled();
6896f942a1fSPeter Zijlstra 	}
69076916515SDavidlohr Bueso 
69176916515SDavidlohr Bueso 	return false;
69276916515SDavidlohr Bueso }
69376916515SDavidlohr Bueso #else
694427b1820SPeter Zijlstra static __always_inline bool
695427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
6965de2055dSWaiman Long 		      struct mutex_waiter *waiter)
69776916515SDavidlohr Bueso {
69876916515SDavidlohr Bueso 	return false;
69976916515SDavidlohr Bueso }
70001768b42SPeter Zijlstra #endif
70101768b42SPeter Zijlstra 
7023ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
70301768b42SPeter Zijlstra 
70401768b42SPeter Zijlstra /**
70501768b42SPeter Zijlstra  * mutex_unlock - release the mutex
70601768b42SPeter Zijlstra  * @lock: the mutex to be released
70701768b42SPeter Zijlstra  *
70801768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously.
70901768b42SPeter Zijlstra  *
71001768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
71101768b42SPeter Zijlstra  * of a not locked mutex is not allowed.
71201768b42SPeter Zijlstra  *
71301768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) up().
71401768b42SPeter Zijlstra  */
71501768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock)
71601768b42SPeter Zijlstra {
7173ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
7183ca0ff57SPeter Zijlstra 	if (__mutex_unlock_fast(lock))
7193ca0ff57SPeter Zijlstra 		return;
72001768b42SPeter Zijlstra #endif
7213ca0ff57SPeter Zijlstra 	__mutex_unlock_slowpath(lock, _RET_IP_);
72201768b42SPeter Zijlstra }
72301768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock);
72401768b42SPeter Zijlstra 
72501768b42SPeter Zijlstra /**
72601768b42SPeter Zijlstra  * ww_mutex_unlock - release the w/w mutex
72701768b42SPeter Zijlstra  * @lock: the mutex to be released
72801768b42SPeter Zijlstra  *
72901768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously with any of the
73001768b42SPeter Zijlstra  * ww_mutex_lock* functions (with or without an acquire context). It is
73101768b42SPeter Zijlstra  * forbidden to release the locks after releasing the acquire context.
73201768b42SPeter Zijlstra  *
73301768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
73401768b42SPeter Zijlstra  * of a unlocked mutex is not allowed.
73501768b42SPeter Zijlstra  */
73601768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock)
73701768b42SPeter Zijlstra {
73801768b42SPeter Zijlstra 	/*
73901768b42SPeter Zijlstra 	 * The unlocking fastpath is the 0->1 transition from 'locked'
74001768b42SPeter Zijlstra 	 * into 'unlocked' state:
74101768b42SPeter Zijlstra 	 */
74201768b42SPeter Zijlstra 	if (lock->ctx) {
74301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
74401768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
74501768b42SPeter Zijlstra #endif
74601768b42SPeter Zijlstra 		if (lock->ctx->acquired > 0)
74701768b42SPeter Zijlstra 			lock->ctx->acquired--;
74801768b42SPeter Zijlstra 		lock->ctx = NULL;
74901768b42SPeter Zijlstra 	}
75001768b42SPeter Zijlstra 
7513ca0ff57SPeter Zijlstra 	mutex_unlock(&lock->base);
75201768b42SPeter Zijlstra }
75301768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock);
75401768b42SPeter Zijlstra 
75555f036caSPeter Ziljstra 
75655f036caSPeter Ziljstra static __always_inline int __sched
75755f036caSPeter Ziljstra __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
75855f036caSPeter Ziljstra {
75955f036caSPeter Ziljstra 	if (ww_ctx->acquired > 0) {
76055f036caSPeter Ziljstra #ifdef CONFIG_DEBUG_MUTEXES
76155f036caSPeter Ziljstra 		struct ww_mutex *ww;
76255f036caSPeter Ziljstra 
76355f036caSPeter Ziljstra 		ww = container_of(lock, struct ww_mutex, base);
76455f036caSPeter Ziljstra 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
76555f036caSPeter Ziljstra 		ww_ctx->contending_lock = ww;
76655f036caSPeter Ziljstra #endif
76755f036caSPeter Ziljstra 		return -EDEADLK;
76855f036caSPeter Ziljstra 	}
76955f036caSPeter Ziljstra 
77055f036caSPeter Ziljstra 	return 0;
77155f036caSPeter Ziljstra }
77255f036caSPeter Ziljstra 
77355f036caSPeter Ziljstra 
77455f036caSPeter Ziljstra /*
77508295b3bSThomas Hellstrom  * Check the wound condition for the current lock acquire.
77608295b3bSThomas Hellstrom  *
77708295b3bSThomas Hellstrom  * Wound-Wait: If we're wounded, kill ourself.
77855f036caSPeter Ziljstra  *
77955f036caSPeter Ziljstra  * Wait-Die: If we're trying to acquire a lock already held by an older
78055f036caSPeter Ziljstra  *           context, kill ourselves.
78155f036caSPeter Ziljstra  *
78255f036caSPeter Ziljstra  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
78355f036caSPeter Ziljstra  * look at waiters before us in the wait-list.
78455f036caSPeter Ziljstra  */
78501768b42SPeter Zijlstra static inline int __sched
78655f036caSPeter Ziljstra __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
787200b1874SNicolai Hähnle 		      struct ww_acquire_ctx *ctx)
78801768b42SPeter Zijlstra {
78901768b42SPeter Zijlstra 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
7904d3199e4SDavidlohr Bueso 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
791200b1874SNicolai Hähnle 	struct mutex_waiter *cur;
79201768b42SPeter Zijlstra 
79355f036caSPeter Ziljstra 	if (ctx->acquired == 0)
79455f036caSPeter Ziljstra 		return 0;
79555f036caSPeter Ziljstra 
79608295b3bSThomas Hellstrom 	if (!ctx->is_wait_die) {
79708295b3bSThomas Hellstrom 		if (ctx->wounded)
79808295b3bSThomas Hellstrom 			return __ww_mutex_kill(lock, ctx);
79908295b3bSThomas Hellstrom 
80008295b3bSThomas Hellstrom 		return 0;
80108295b3bSThomas Hellstrom 	}
80208295b3bSThomas Hellstrom 
803200b1874SNicolai Hähnle 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
80455f036caSPeter Ziljstra 		return __ww_mutex_kill(lock, ctx);
805200b1874SNicolai Hähnle 
806200b1874SNicolai Hähnle 	/*
807200b1874SNicolai Hähnle 	 * If there is a waiter in front of us that has a context, then its
80855f036caSPeter Ziljstra 	 * stamp is earlier than ours and we must kill ourself.
809200b1874SNicolai Hähnle 	 */
810200b1874SNicolai Hähnle 	cur = waiter;
811200b1874SNicolai Hähnle 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
81255f036caSPeter Ziljstra 		if (!cur->ww_ctx)
81355f036caSPeter Ziljstra 			continue;
81455f036caSPeter Ziljstra 
81555f036caSPeter Ziljstra 		return __ww_mutex_kill(lock, ctx);
816200b1874SNicolai Hähnle 	}
817200b1874SNicolai Hähnle 
81801768b42SPeter Zijlstra 	return 0;
81901768b42SPeter Zijlstra }
82001768b42SPeter Zijlstra 
82155f036caSPeter Ziljstra /*
82255f036caSPeter Ziljstra  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
82355f036caSPeter Ziljstra  * first. Such that older contexts are preferred to acquire the lock over
82455f036caSPeter Ziljstra  * younger contexts.
82555f036caSPeter Ziljstra  *
82655f036caSPeter Ziljstra  * Waiters without context are interspersed in FIFO order.
82755f036caSPeter Ziljstra  *
82855f036caSPeter Ziljstra  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
82908295b3bSThomas Hellstrom  * older contexts already waiting) to avoid unnecessary waiting and for
83008295b3bSThomas Hellstrom  * Wound-Wait ensure we wound the owning context when it is younger.
83155f036caSPeter Ziljstra  */
8326baa5c60SNicolai Hähnle static inline int __sched
8336baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter,
8346baa5c60SNicolai Hähnle 		      struct mutex *lock,
8356baa5c60SNicolai Hähnle 		      struct ww_acquire_ctx *ww_ctx)
8366baa5c60SNicolai Hähnle {
8376baa5c60SNicolai Hähnle 	struct mutex_waiter *cur;
8386baa5c60SNicolai Hähnle 	struct list_head *pos;
83908295b3bSThomas Hellstrom 	bool is_wait_die;
8406baa5c60SNicolai Hähnle 
8416baa5c60SNicolai Hähnle 	if (!ww_ctx) {
84208295b3bSThomas Hellstrom 		__mutex_add_waiter(lock, waiter, &lock->wait_list);
8436baa5c60SNicolai Hähnle 		return 0;
8446baa5c60SNicolai Hähnle 	}
8456baa5c60SNicolai Hähnle 
84608295b3bSThomas Hellstrom 	is_wait_die = ww_ctx->is_wait_die;
84708295b3bSThomas Hellstrom 
8486baa5c60SNicolai Hähnle 	/*
8496baa5c60SNicolai Hähnle 	 * Add the waiter before the first waiter with a higher stamp.
8506baa5c60SNicolai Hähnle 	 * Waiters without a context are skipped to avoid starving
85108295b3bSThomas Hellstrom 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
85208295b3bSThomas Hellstrom 	 * never die here, but they are sorted in stamp order and
85308295b3bSThomas Hellstrom 	 * may wound the lock holder.
8546baa5c60SNicolai Hähnle 	 */
8556baa5c60SNicolai Hähnle 	pos = &lock->wait_list;
8566baa5c60SNicolai Hähnle 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
8576baa5c60SNicolai Hähnle 		if (!cur->ww_ctx)
8586baa5c60SNicolai Hähnle 			continue;
8596baa5c60SNicolai Hähnle 
8606baa5c60SNicolai Hähnle 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
86155f036caSPeter Ziljstra 			/*
86255f036caSPeter Ziljstra 			 * Wait-Die: if we find an older context waiting, there
86355f036caSPeter Ziljstra 			 * is no point in queueing behind it, as we'd have to
86455f036caSPeter Ziljstra 			 * die the moment it would acquire the lock.
86555f036caSPeter Ziljstra 			 */
86608295b3bSThomas Hellstrom 			if (is_wait_die) {
86755f036caSPeter Ziljstra 				int ret = __ww_mutex_kill(lock, ww_ctx);
8686baa5c60SNicolai Hähnle 
86955f036caSPeter Ziljstra 				if (ret)
87055f036caSPeter Ziljstra 					return ret;
87108295b3bSThomas Hellstrom 			}
8726baa5c60SNicolai Hähnle 
8736baa5c60SNicolai Hähnle 			break;
8746baa5c60SNicolai Hähnle 		}
8756baa5c60SNicolai Hähnle 
8766baa5c60SNicolai Hähnle 		pos = &cur->list;
877200b1874SNicolai Hähnle 
87855f036caSPeter Ziljstra 		/* Wait-Die: ensure younger waiters die. */
87955f036caSPeter Ziljstra 		__ww_mutex_die(lock, cur, ww_ctx);
8806baa5c60SNicolai Hähnle 	}
8816baa5c60SNicolai Hähnle 
88208295b3bSThomas Hellstrom 	__mutex_add_waiter(lock, waiter, pos);
88308295b3bSThomas Hellstrom 
88408295b3bSThomas Hellstrom 	/*
88508295b3bSThomas Hellstrom 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
88608295b3bSThomas Hellstrom 	 * wound that such that we might proceed.
88708295b3bSThomas Hellstrom 	 */
88808295b3bSThomas Hellstrom 	if (!is_wait_die) {
88908295b3bSThomas Hellstrom 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
89008295b3bSThomas Hellstrom 
89108295b3bSThomas Hellstrom 		/*
89208295b3bSThomas Hellstrom 		 * See ww_mutex_set_context_fastpath(). Orders setting
89308295b3bSThomas Hellstrom 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
89408295b3bSThomas Hellstrom 		 * such that either we or the fastpath will wound @ww->ctx.
89508295b3bSThomas Hellstrom 		 */
89608295b3bSThomas Hellstrom 		smp_mb();
89708295b3bSThomas Hellstrom 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
89808295b3bSThomas Hellstrom 	}
89955f036caSPeter Ziljstra 
90001768b42SPeter Zijlstra 	return 0;
90101768b42SPeter Zijlstra }
90201768b42SPeter Zijlstra 
90301768b42SPeter Zijlstra /*
90401768b42SPeter Zijlstra  * Lock a mutex (possibly interruptible), slowpath:
90501768b42SPeter Zijlstra  */
90601768b42SPeter Zijlstra static __always_inline int __sched
90701768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90801768b42SPeter Zijlstra 		    struct lockdep_map *nest_lock, unsigned long ip,
90901768b42SPeter Zijlstra 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
91001768b42SPeter Zijlstra {
91101768b42SPeter Zijlstra 	struct mutex_waiter waiter;
912a40ca565SWaiman Long 	struct ww_mutex *ww;
91301768b42SPeter Zijlstra 	int ret;
91401768b42SPeter Zijlstra 
9155de2055dSWaiman Long 	if (!use_ww_ctx)
9165de2055dSWaiman Long 		ww_ctx = NULL;
9175de2055dSWaiman Long 
918427b1820SPeter Zijlstra 	might_sleep();
919ea9e0fb8SNicolai Hähnle 
9206c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES
9216c11c6e3SSebastian Andrzej Siewior 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
9226c11c6e3SSebastian Andrzej Siewior #endif
9236c11c6e3SSebastian Andrzej Siewior 
924a40ca565SWaiman Long 	ww = container_of(lock, struct ww_mutex, base);
9255de2055dSWaiman Long 	if (ww_ctx) {
9260422e83dSChris Wilson 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
9270422e83dSChris Wilson 			return -EALREADY;
92808295b3bSThomas Hellstrom 
92908295b3bSThomas Hellstrom 		/*
93008295b3bSThomas Hellstrom 		 * Reset the wounded flag after a kill. No other process can
93108295b3bSThomas Hellstrom 		 * race and wound us here since they can't have a valid owner
93208295b3bSThomas Hellstrom 		 * pointer if we don't have any locks held.
93308295b3bSThomas Hellstrom 		 */
93408295b3bSThomas Hellstrom 		if (ww_ctx->acquired == 0)
93508295b3bSThomas Hellstrom 			ww_ctx->wounded = 0;
9360422e83dSChris Wilson 	}
9370422e83dSChris Wilson 
93801768b42SPeter Zijlstra 	preempt_disable();
93901768b42SPeter Zijlstra 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
94001768b42SPeter Zijlstra 
941e274795eSPeter Zijlstra 	if (__mutex_trylock(lock) ||
9425de2055dSWaiman Long 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
94376916515SDavidlohr Bueso 		/* got the lock, yay! */
9443ca0ff57SPeter Zijlstra 		lock_acquired(&lock->dep_map, ip);
9455de2055dSWaiman Long 		if (ww_ctx)
9463ca0ff57SPeter Zijlstra 			ww_mutex_set_context_fastpath(ww, ww_ctx);
94701768b42SPeter Zijlstra 		preempt_enable();
94801768b42SPeter Zijlstra 		return 0;
94901768b42SPeter Zijlstra 	}
95001768b42SPeter Zijlstra 
951b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
9521e820c96SJason Low 	/*
9533ca0ff57SPeter Zijlstra 	 * After waiting to acquire the wait_lock, try again.
9541e820c96SJason Low 	 */
955659cf9f5SNicolai Hähnle 	if (__mutex_trylock(lock)) {
9565de2055dSWaiman Long 		if (ww_ctx)
95755f036caSPeter Ziljstra 			__ww_mutex_check_waiters(lock, ww_ctx);
958659cf9f5SNicolai Hähnle 
95901768b42SPeter Zijlstra 		goto skip_wait;
960659cf9f5SNicolai Hähnle 	}
96101768b42SPeter Zijlstra 
96201768b42SPeter Zijlstra 	debug_mutex_lock_common(lock, &waiter);
96301768b42SPeter Zijlstra 
9646baa5c60SNicolai Hähnle 	lock_contended(&lock->dep_map, ip);
9656baa5c60SNicolai Hähnle 
9666baa5c60SNicolai Hähnle 	if (!use_ww_ctx) {
96701768b42SPeter Zijlstra 		/* add waiting tasks to the end of the waitqueue (FIFO): */
96808295b3bSThomas Hellstrom 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
96908295b3bSThomas Hellstrom 
970977625a6SNicolai Hähnle 
971977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES
972977625a6SNicolai Hähnle 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
973977625a6SNicolai Hähnle #endif
9746baa5c60SNicolai Hähnle 	} else {
97555f036caSPeter Ziljstra 		/*
97655f036caSPeter Ziljstra 		 * Add in stamp order, waking up waiters that must kill
97755f036caSPeter Ziljstra 		 * themselves.
97855f036caSPeter Ziljstra 		 */
9796baa5c60SNicolai Hähnle 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
9806baa5c60SNicolai Hähnle 		if (ret)
98155f036caSPeter Ziljstra 			goto err_early_kill;
9826baa5c60SNicolai Hähnle 
9836baa5c60SNicolai Hähnle 		waiter.ww_ctx = ww_ctx;
9846baa5c60SNicolai Hähnle 	}
9856baa5c60SNicolai Hähnle 
986d269a8b8SDavidlohr Bueso 	waiter.task = current;
98701768b42SPeter Zijlstra 
988642fa448SDavidlohr Bueso 	set_current_state(state);
98901768b42SPeter Zijlstra 	for (;;) {
990*048661a1SPeter Zijlstra 		bool first;
991*048661a1SPeter Zijlstra 
9925bbd7e64SPeter Zijlstra 		/*
9935bbd7e64SPeter Zijlstra 		 * Once we hold wait_lock, we're serialized against
9945bbd7e64SPeter Zijlstra 		 * mutex_unlock() handing the lock off to us, do a trylock
9955bbd7e64SPeter Zijlstra 		 * before testing the error conditions to make sure we pick up
9965bbd7e64SPeter Zijlstra 		 * the handoff.
9975bbd7e64SPeter Zijlstra 		 */
998e274795eSPeter Zijlstra 		if (__mutex_trylock(lock))
9995bbd7e64SPeter Zijlstra 			goto acquired;
100001768b42SPeter Zijlstra 
100101768b42SPeter Zijlstra 		/*
100255f036caSPeter Ziljstra 		 * Check for signals and kill conditions while holding
10035bbd7e64SPeter Zijlstra 		 * wait_lock. This ensures the lock cancellation is ordered
10045bbd7e64SPeter Zijlstra 		 * against mutex_unlock() and wake-ups do not go missing.
100501768b42SPeter Zijlstra 		 */
10063bb5f4acSDavidlohr Bueso 		if (signal_pending_state(state, current)) {
100701768b42SPeter Zijlstra 			ret = -EINTR;
100801768b42SPeter Zijlstra 			goto err;
100901768b42SPeter Zijlstra 		}
101001768b42SPeter Zijlstra 
10115de2055dSWaiman Long 		if (ww_ctx) {
101255f036caSPeter Ziljstra 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
101301768b42SPeter Zijlstra 			if (ret)
101401768b42SPeter Zijlstra 				goto err;
101501768b42SPeter Zijlstra 		}
101601768b42SPeter Zijlstra 
1017b9c16a0eSPeter Zijlstra 		spin_unlock(&lock->wait_lock);
101801768b42SPeter Zijlstra 		schedule_preempt_disabled();
10199d659ae1SPeter Zijlstra 
10206baa5c60SNicolai Hähnle 		first = __mutex_waiter_is_first(lock, &waiter);
10216baa5c60SNicolai Hähnle 		if (first)
10229d659ae1SPeter Zijlstra 			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
10235bbd7e64SPeter Zijlstra 
1024642fa448SDavidlohr Bueso 		set_current_state(state);
10255bbd7e64SPeter Zijlstra 		/*
10265bbd7e64SPeter Zijlstra 		 * Here we order against unlock; we must either see it change
10275bbd7e64SPeter Zijlstra 		 * state back to RUNNING and fall through the next schedule(),
10285bbd7e64SPeter Zijlstra 		 * or we must see its unlock and acquire.
10295bbd7e64SPeter Zijlstra 		 */
1030e274795eSPeter Zijlstra 		if (__mutex_trylock(lock) ||
10315de2055dSWaiman Long 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
10325bbd7e64SPeter Zijlstra 			break;
10335bbd7e64SPeter Zijlstra 
1034b9c16a0eSPeter Zijlstra 		spin_lock(&lock->wait_lock);
103501768b42SPeter Zijlstra 	}
1036b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
10375bbd7e64SPeter Zijlstra acquired:
1038642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
103951587bcfSDavidlohr Bueso 
10405de2055dSWaiman Long 	if (ww_ctx) {
104108295b3bSThomas Hellstrom 		/*
104208295b3bSThomas Hellstrom 		 * Wound-Wait; we stole the lock (!first_waiter), check the
104308295b3bSThomas Hellstrom 		 * waiters as anyone might want to wound us.
104408295b3bSThomas Hellstrom 		 */
104508295b3bSThomas Hellstrom 		if (!ww_ctx->is_wait_die &&
104608295b3bSThomas Hellstrom 		    !__mutex_waiter_is_first(lock, &waiter))
104708295b3bSThomas Hellstrom 			__ww_mutex_check_waiters(lock, ww_ctx);
104808295b3bSThomas Hellstrom 	}
104908295b3bSThomas Hellstrom 
1050d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
105101768b42SPeter Zijlstra 	if (likely(list_empty(&lock->wait_list)))
10529d659ae1SPeter Zijlstra 		__mutex_clear_flag(lock, MUTEX_FLAGS);
10533ca0ff57SPeter Zijlstra 
105401768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
105501768b42SPeter Zijlstra 
105601768b42SPeter Zijlstra skip_wait:
105701768b42SPeter Zijlstra 	/* got the lock - cleanup and rejoice! */
105801768b42SPeter Zijlstra 	lock_acquired(&lock->dep_map, ip);
105901768b42SPeter Zijlstra 
10605de2055dSWaiman Long 	if (ww_ctx)
106155f036caSPeter Ziljstra 		ww_mutex_lock_acquired(ww, ww_ctx);
106201768b42SPeter Zijlstra 
1063b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
106401768b42SPeter Zijlstra 	preempt_enable();
106501768b42SPeter Zijlstra 	return 0;
106601768b42SPeter Zijlstra 
106701768b42SPeter Zijlstra err:
1068642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
1069d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
107055f036caSPeter Ziljstra err_early_kill:
1071b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
107201768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
10735facae4fSQian Cai 	mutex_release(&lock->dep_map, ip);
107401768b42SPeter Zijlstra 	preempt_enable();
107501768b42SPeter Zijlstra 	return ret;
107601768b42SPeter Zijlstra }
107701768b42SPeter Zijlstra 
1078427b1820SPeter Zijlstra static int __sched
1079427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1080427b1820SPeter Zijlstra 	     struct lockdep_map *nest_lock, unsigned long ip)
1081427b1820SPeter Zijlstra {
1082427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1083427b1820SPeter Zijlstra }
1084427b1820SPeter Zijlstra 
1085427b1820SPeter Zijlstra static int __sched
1086427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1087427b1820SPeter Zijlstra 		struct lockdep_map *nest_lock, unsigned long ip,
1088427b1820SPeter Zijlstra 		struct ww_acquire_ctx *ww_ctx)
1089427b1820SPeter Zijlstra {
1090427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1091427b1820SPeter Zijlstra }
1092427b1820SPeter Zijlstra 
109301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
109401768b42SPeter Zijlstra void __sched
109501768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass)
109601768b42SPeter Zijlstra {
1097427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
109801768b42SPeter Zijlstra }
109901768b42SPeter Zijlstra 
110001768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested);
110101768b42SPeter Zijlstra 
110201768b42SPeter Zijlstra void __sched
110301768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
110401768b42SPeter Zijlstra {
1105427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
110601768b42SPeter Zijlstra }
110701768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
110801768b42SPeter Zijlstra 
110901768b42SPeter Zijlstra int __sched
111001768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
111101768b42SPeter Zijlstra {
1112427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
111301768b42SPeter Zijlstra }
111401768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
111501768b42SPeter Zijlstra 
111601768b42SPeter Zijlstra int __sched
111701768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
111801768b42SPeter Zijlstra {
1119427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
112001768b42SPeter Zijlstra }
112101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
112201768b42SPeter Zijlstra 
11231460cb65STejun Heo void __sched
11241460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
11251460cb65STejun Heo {
11261460cb65STejun Heo 	int token;
11271460cb65STejun Heo 
11281460cb65STejun Heo 	might_sleep();
11291460cb65STejun Heo 
11301460cb65STejun Heo 	token = io_schedule_prepare();
11311460cb65STejun Heo 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
11321460cb65STejun Heo 			    subclass, NULL, _RET_IP_, NULL, 0);
11331460cb65STejun Heo 	io_schedule_finish(token);
11341460cb65STejun Heo }
11351460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
11361460cb65STejun Heo 
113701768b42SPeter Zijlstra static inline int
113801768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
113901768b42SPeter Zijlstra {
114001768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
114101768b42SPeter Zijlstra 	unsigned tmp;
114201768b42SPeter Zijlstra 
114301768b42SPeter Zijlstra 	if (ctx->deadlock_inject_countdown-- == 0) {
114401768b42SPeter Zijlstra 		tmp = ctx->deadlock_inject_interval;
114501768b42SPeter Zijlstra 		if (tmp > UINT_MAX/4)
114601768b42SPeter Zijlstra 			tmp = UINT_MAX;
114701768b42SPeter Zijlstra 		else
114801768b42SPeter Zijlstra 			tmp = tmp*2 + tmp + tmp/2;
114901768b42SPeter Zijlstra 
115001768b42SPeter Zijlstra 		ctx->deadlock_inject_interval = tmp;
115101768b42SPeter Zijlstra 		ctx->deadlock_inject_countdown = tmp;
115201768b42SPeter Zijlstra 		ctx->contending_lock = lock;
115301768b42SPeter Zijlstra 
115401768b42SPeter Zijlstra 		ww_mutex_unlock(lock);
115501768b42SPeter Zijlstra 
115601768b42SPeter Zijlstra 		return -EDEADLK;
115701768b42SPeter Zijlstra 	}
115801768b42SPeter Zijlstra #endif
115901768b42SPeter Zijlstra 
116001768b42SPeter Zijlstra 	return 0;
116101768b42SPeter Zijlstra }
116201768b42SPeter Zijlstra 
116301768b42SPeter Zijlstra int __sched
1164c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
116501768b42SPeter Zijlstra {
116601768b42SPeter Zijlstra 	int ret;
116701768b42SPeter Zijlstra 
116801768b42SPeter Zijlstra 	might_sleep();
1169427b1820SPeter Zijlstra 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1170ea9e0fb8SNicolai Hähnle 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1171427b1820SPeter Zijlstra 			       ctx);
1172ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
117301768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
117401768b42SPeter Zijlstra 
117501768b42SPeter Zijlstra 	return ret;
117601768b42SPeter Zijlstra }
1177c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock);
117801768b42SPeter Zijlstra 
117901768b42SPeter Zijlstra int __sched
1180c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
118101768b42SPeter Zijlstra {
118201768b42SPeter Zijlstra 	int ret;
118301768b42SPeter Zijlstra 
118401768b42SPeter Zijlstra 	might_sleep();
1185427b1820SPeter Zijlstra 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1186ea9e0fb8SNicolai Hähnle 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1187427b1820SPeter Zijlstra 			      ctx);
118801768b42SPeter Zijlstra 
1189ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
119001768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
119101768b42SPeter Zijlstra 
119201768b42SPeter Zijlstra 	return ret;
119301768b42SPeter Zijlstra }
1194c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
119501768b42SPeter Zijlstra 
119601768b42SPeter Zijlstra #endif
119701768b42SPeter Zijlstra 
119801768b42SPeter Zijlstra /*
119901768b42SPeter Zijlstra  * Release the lock, slowpath:
120001768b42SPeter Zijlstra  */
12013ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
120201768b42SPeter Zijlstra {
12039d659ae1SPeter Zijlstra 	struct task_struct *next = NULL;
1204194a6b5bSWaiman Long 	DEFINE_WAKE_Q(wake_q);
1205b9c16a0eSPeter Zijlstra 	unsigned long owner;
120601768b42SPeter Zijlstra 
12075facae4fSQian Cai 	mutex_release(&lock->dep_map, ip);
12083ca0ff57SPeter Zijlstra 
120901768b42SPeter Zijlstra 	/*
12109d659ae1SPeter Zijlstra 	 * Release the lock before (potentially) taking the spinlock such that
12119d659ae1SPeter Zijlstra 	 * other contenders can get on with things ASAP.
12129d659ae1SPeter Zijlstra 	 *
12139d659ae1SPeter Zijlstra 	 * Except when HANDOFF, in that case we must not clear the owner field,
12149d659ae1SPeter Zijlstra 	 * but instead set it to the top waiter.
121501768b42SPeter Zijlstra 	 */
12169d659ae1SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
12179d659ae1SPeter Zijlstra 	for (;;) {
12189d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
12199d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1220e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
12219d659ae1SPeter Zijlstra #endif
12229d659ae1SPeter Zijlstra 
12239d659ae1SPeter Zijlstra 		if (owner & MUTEX_FLAG_HANDOFF)
12249d659ae1SPeter Zijlstra 			break;
12259d659ae1SPeter Zijlstra 
1226ab4e4d9fSPeter Zijlstra 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
12279d659ae1SPeter Zijlstra 			if (owner & MUTEX_FLAG_WAITERS)
12289d659ae1SPeter Zijlstra 				break;
12299d659ae1SPeter Zijlstra 
12303ca0ff57SPeter Zijlstra 			return;
12319d659ae1SPeter Zijlstra 		}
12329d659ae1SPeter Zijlstra 	}
123301768b42SPeter Zijlstra 
1234b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
12351d8fe7dcSJason Low 	debug_mutex_unlock(lock);
123601768b42SPeter Zijlstra 	if (!list_empty(&lock->wait_list)) {
123701768b42SPeter Zijlstra 		/* get the first entry from the wait-list: */
123801768b42SPeter Zijlstra 		struct mutex_waiter *waiter =
12399d659ae1SPeter Zijlstra 			list_first_entry(&lock->wait_list,
124001768b42SPeter Zijlstra 					 struct mutex_waiter, list);
124101768b42SPeter Zijlstra 
12429d659ae1SPeter Zijlstra 		next = waiter->task;
12439d659ae1SPeter Zijlstra 
124401768b42SPeter Zijlstra 		debug_mutex_wake_waiter(lock, waiter);
12459d659ae1SPeter Zijlstra 		wake_q_add(&wake_q, next);
124601768b42SPeter Zijlstra 	}
124701768b42SPeter Zijlstra 
12489d659ae1SPeter Zijlstra 	if (owner & MUTEX_FLAG_HANDOFF)
12499d659ae1SPeter Zijlstra 		__mutex_handoff(lock, next);
12509d659ae1SPeter Zijlstra 
1251b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
12529d659ae1SPeter Zijlstra 
12531329ce6fSDavidlohr Bueso 	wake_up_q(&wake_q);
125401768b42SPeter Zijlstra }
125501768b42SPeter Zijlstra 
125601768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
125701768b42SPeter Zijlstra /*
125801768b42SPeter Zijlstra  * Here come the less common (and hence less performance-critical) APIs:
125901768b42SPeter Zijlstra  * mutex_lock_interruptible() and mutex_trylock().
126001768b42SPeter Zijlstra  */
126101768b42SPeter Zijlstra static noinline int __sched
126201768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock);
126301768b42SPeter Zijlstra 
126401768b42SPeter Zijlstra static noinline int __sched
126501768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock);
126601768b42SPeter Zijlstra 
126701768b42SPeter Zijlstra /**
126845dbac0eSMatthew Wilcox  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
126945dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
127001768b42SPeter Zijlstra  *
127145dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  If a signal is delivered while the
127245dbac0eSMatthew Wilcox  * process is sleeping, this function will return without acquiring the
127345dbac0eSMatthew Wilcox  * mutex.
127401768b42SPeter Zijlstra  *
127545dbac0eSMatthew Wilcox  * Context: Process context.
127645dbac0eSMatthew Wilcox  * Return: 0 if the lock was successfully acquired or %-EINTR if a
127745dbac0eSMatthew Wilcox  * signal arrived.
127801768b42SPeter Zijlstra  */
127901768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock)
128001768b42SPeter Zijlstra {
128101768b42SPeter Zijlstra 	might_sleep();
12823ca0ff57SPeter Zijlstra 
12833ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
128401768b42SPeter Zijlstra 		return 0;
12853ca0ff57SPeter Zijlstra 
128601768b42SPeter Zijlstra 	return __mutex_lock_interruptible_slowpath(lock);
128701768b42SPeter Zijlstra }
128801768b42SPeter Zijlstra 
128901768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible);
129001768b42SPeter Zijlstra 
129145dbac0eSMatthew Wilcox /**
129245dbac0eSMatthew Wilcox  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
129345dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
129445dbac0eSMatthew Wilcox  *
129545dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
129645dbac0eSMatthew Wilcox  * the current process is delivered while the process is sleeping, this
129745dbac0eSMatthew Wilcox  * function will return without acquiring the mutex.
129845dbac0eSMatthew Wilcox  *
129945dbac0eSMatthew Wilcox  * Context: Process context.
130045dbac0eSMatthew Wilcox  * Return: 0 if the lock was successfully acquired or %-EINTR if a
130145dbac0eSMatthew Wilcox  * fatal signal arrived.
130245dbac0eSMatthew Wilcox  */
130301768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock)
130401768b42SPeter Zijlstra {
130501768b42SPeter Zijlstra 	might_sleep();
13063ca0ff57SPeter Zijlstra 
13073ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
130801768b42SPeter Zijlstra 		return 0;
13093ca0ff57SPeter Zijlstra 
131001768b42SPeter Zijlstra 	return __mutex_lock_killable_slowpath(lock);
131101768b42SPeter Zijlstra }
131201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable);
131301768b42SPeter Zijlstra 
131445dbac0eSMatthew Wilcox /**
131545dbac0eSMatthew Wilcox  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
131645dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
131745dbac0eSMatthew Wilcox  *
131845dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  While the task is waiting for this
131945dbac0eSMatthew Wilcox  * mutex, it will be accounted as being in the IO wait state by the
132045dbac0eSMatthew Wilcox  * scheduler.
132145dbac0eSMatthew Wilcox  *
132245dbac0eSMatthew Wilcox  * Context: Process context.
132345dbac0eSMatthew Wilcox  */
13241460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock)
13251460cb65STejun Heo {
13261460cb65STejun Heo 	int token;
13271460cb65STejun Heo 
13281460cb65STejun Heo 	token = io_schedule_prepare();
13291460cb65STejun Heo 	mutex_lock(lock);
13301460cb65STejun Heo 	io_schedule_finish(token);
13311460cb65STejun Heo }
13321460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io);
13331460cb65STejun Heo 
13343ca0ff57SPeter Zijlstra static noinline void __sched
13353ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock)
133601768b42SPeter Zijlstra {
1337427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
133801768b42SPeter Zijlstra }
133901768b42SPeter Zijlstra 
134001768b42SPeter Zijlstra static noinline int __sched
134101768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock)
134201768b42SPeter Zijlstra {
1343427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
134401768b42SPeter Zijlstra }
134501768b42SPeter Zijlstra 
134601768b42SPeter Zijlstra static noinline int __sched
134701768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock)
134801768b42SPeter Zijlstra {
1349427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
135001768b42SPeter Zijlstra }
135101768b42SPeter Zijlstra 
135201768b42SPeter Zijlstra static noinline int __sched
135301768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
135401768b42SPeter Zijlstra {
1355427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1356427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
135701768b42SPeter Zijlstra }
135801768b42SPeter Zijlstra 
135901768b42SPeter Zijlstra static noinline int __sched
136001768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
136101768b42SPeter Zijlstra 					    struct ww_acquire_ctx *ctx)
136201768b42SPeter Zijlstra {
1363427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1364427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
136501768b42SPeter Zijlstra }
136601768b42SPeter Zijlstra 
136701768b42SPeter Zijlstra #endif
136801768b42SPeter Zijlstra 
136901768b42SPeter Zijlstra /**
137001768b42SPeter Zijlstra  * mutex_trylock - try to acquire the mutex, without waiting
137101768b42SPeter Zijlstra  * @lock: the mutex to be acquired
137201768b42SPeter Zijlstra  *
137301768b42SPeter Zijlstra  * Try to acquire the mutex atomically. Returns 1 if the mutex
137401768b42SPeter Zijlstra  * has been acquired successfully, and 0 on contention.
137501768b42SPeter Zijlstra  *
137601768b42SPeter Zijlstra  * NOTE: this function follows the spin_trylock() convention, so
137701768b42SPeter Zijlstra  * it is negated from the down_trylock() return values! Be careful
137801768b42SPeter Zijlstra  * about this when converting semaphore users to mutexes.
137901768b42SPeter Zijlstra  *
138001768b42SPeter Zijlstra  * This function must not be used in interrupt context. The
138101768b42SPeter Zijlstra  * mutex must be released by the same task that acquired it.
138201768b42SPeter Zijlstra  */
138301768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock)
138401768b42SPeter Zijlstra {
13856c11c6e3SSebastian Andrzej Siewior 	bool locked;
138601768b42SPeter Zijlstra 
13876c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES
13886c11c6e3SSebastian Andrzej Siewior 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
13896c11c6e3SSebastian Andrzej Siewior #endif
13906c11c6e3SSebastian Andrzej Siewior 
13916c11c6e3SSebastian Andrzej Siewior 	locked = __mutex_trylock(lock);
13923ca0ff57SPeter Zijlstra 	if (locked)
13933ca0ff57SPeter Zijlstra 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
139401768b42SPeter Zijlstra 
13953ca0ff57SPeter Zijlstra 	return locked;
139601768b42SPeter Zijlstra }
139701768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock);
139801768b42SPeter Zijlstra 
139901768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
140001768b42SPeter Zijlstra int __sched
1401c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
140201768b42SPeter Zijlstra {
140301768b42SPeter Zijlstra 	might_sleep();
140401768b42SPeter Zijlstra 
14053ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1406ea9e0fb8SNicolai Hähnle 		if (ctx)
140701768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
14083ca0ff57SPeter Zijlstra 		return 0;
14093ca0ff57SPeter Zijlstra 	}
14103ca0ff57SPeter Zijlstra 
14113ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_slowpath(lock, ctx);
141201768b42SPeter Zijlstra }
1413c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock);
141401768b42SPeter Zijlstra 
141501768b42SPeter Zijlstra int __sched
1416c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
141701768b42SPeter Zijlstra {
141801768b42SPeter Zijlstra 	might_sleep();
141901768b42SPeter Zijlstra 
14203ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1421ea9e0fb8SNicolai Hähnle 		if (ctx)
142201768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
14233ca0ff57SPeter Zijlstra 		return 0;
14243ca0ff57SPeter Zijlstra 	}
14253ca0ff57SPeter Zijlstra 
14263ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
142701768b42SPeter Zijlstra }
1428c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible);
142901768b42SPeter Zijlstra 
143001768b42SPeter Zijlstra #endif
143101768b42SPeter Zijlstra 
143201768b42SPeter Zijlstra /**
143301768b42SPeter Zijlstra  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
143401768b42SPeter Zijlstra  * @cnt: the atomic which we are to dec
143501768b42SPeter Zijlstra  * @lock: the mutex to return holding if we dec to 0
143601768b42SPeter Zijlstra  *
143701768b42SPeter Zijlstra  * return true and hold lock if we dec to 0, return false otherwise
143801768b42SPeter Zijlstra  */
143901768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
144001768b42SPeter Zijlstra {
144101768b42SPeter Zijlstra 	/* dec if we can't possibly hit 0 */
144201768b42SPeter Zijlstra 	if (atomic_add_unless(cnt, -1, 1))
144301768b42SPeter Zijlstra 		return 0;
144401768b42SPeter Zijlstra 	/* we might hit 0, so take the lock */
144501768b42SPeter Zijlstra 	mutex_lock(lock);
144601768b42SPeter Zijlstra 	if (!atomic_dec_and_test(cnt)) {
144701768b42SPeter Zijlstra 		/* when we actually did the dec, we didn't hit 0 */
144801768b42SPeter Zijlstra 		mutex_unlock(lock);
144901768b42SPeter Zijlstra 		return 0;
145001768b42SPeter Zijlstra 	}
145101768b42SPeter Zijlstra 	/* we hit 0, and we hold the lock */
145201768b42SPeter Zijlstra 	return 1;
145301768b42SPeter Zijlstra }
145401768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1455