xref: /openbmc/linux/kernel/locking/mutex.c (revision 6c11c6e3d5e9e5caf8686cd6a5e4552cfc3ea326)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
201768b42SPeter Zijlstra /*
367a6de49SPeter Zijlstra  * kernel/locking/mutex.c
401768b42SPeter Zijlstra  *
501768b42SPeter Zijlstra  * Mutexes: blocking mutual exclusion locks
601768b42SPeter Zijlstra  *
701768b42SPeter Zijlstra  * Started by Ingo Molnar:
801768b42SPeter Zijlstra  *
901768b42SPeter Zijlstra  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
1001768b42SPeter Zijlstra  *
1101768b42SPeter Zijlstra  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
1201768b42SPeter Zijlstra  * David Howells for suggestions and improvements.
1301768b42SPeter Zijlstra  *
1401768b42SPeter Zijlstra  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
1501768b42SPeter Zijlstra  *    from the -rt tree, where it was originally implemented for rtmutexes
1601768b42SPeter Zijlstra  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
1701768b42SPeter Zijlstra  *    and Sven Dietrich.
1801768b42SPeter Zijlstra  *
19387b1468SMauro Carvalho Chehab  * Also see Documentation/locking/mutex-design.rst.
2001768b42SPeter Zijlstra  */
2101768b42SPeter Zijlstra #include <linux/mutex.h>
2201768b42SPeter Zijlstra #include <linux/ww_mutex.h>
23174cd4b1SIngo Molnar #include <linux/sched/signal.h>
2401768b42SPeter Zijlstra #include <linux/sched/rt.h>
2584f001e1SIngo Molnar #include <linux/sched/wake_q.h>
26b17b0153SIngo Molnar #include <linux/sched/debug.h>
2701768b42SPeter Zijlstra #include <linux/export.h>
2801768b42SPeter Zijlstra #include <linux/spinlock.h>
2901768b42SPeter Zijlstra #include <linux/interrupt.h>
3001768b42SPeter Zijlstra #include <linux/debug_locks.h>
317a215f89SDavidlohr Bueso #include <linux/osq_lock.h>
3201768b42SPeter Zijlstra 
3301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
3401768b42SPeter Zijlstra # include "mutex-debug.h"
3501768b42SPeter Zijlstra #else
3601768b42SPeter Zijlstra # include "mutex.h"
3701768b42SPeter Zijlstra #endif
3801768b42SPeter Zijlstra 
3901768b42SPeter Zijlstra void
4001768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
4101768b42SPeter Zijlstra {
423ca0ff57SPeter Zijlstra 	atomic_long_set(&lock->owner, 0);
4301768b42SPeter Zijlstra 	spin_lock_init(&lock->wait_lock);
4401768b42SPeter Zijlstra 	INIT_LIST_HEAD(&lock->wait_list);
4501768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
464d9d951eSJason Low 	osq_lock_init(&lock->osq);
4701768b42SPeter Zijlstra #endif
4801768b42SPeter Zijlstra 
4901768b42SPeter Zijlstra 	debug_mutex_init(lock, name, key);
5001768b42SPeter Zijlstra }
5101768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init);
5201768b42SPeter Zijlstra 
533ca0ff57SPeter Zijlstra /*
543ca0ff57SPeter Zijlstra  * @owner: contains: 'struct task_struct *' to the current lock owner,
553ca0ff57SPeter Zijlstra  * NULL means not owned. Since task_struct pointers are aligned at
56e274795eSPeter Zijlstra  * at least L1_CACHE_BYTES, we have low bits to store extra state.
573ca0ff57SPeter Zijlstra  *
583ca0ff57SPeter Zijlstra  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
599d659ae1SPeter Zijlstra  * Bit1 indicates unlock needs to hand the lock to the top-waiter
60e274795eSPeter Zijlstra  * Bit2 indicates handoff has been done and we're waiting for pickup.
613ca0ff57SPeter Zijlstra  */
623ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS	0x01
639d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF	0x02
64e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP	0x04
653ca0ff57SPeter Zijlstra 
66e274795eSPeter Zijlstra #define MUTEX_FLAGS		0x07
673ca0ff57SPeter Zijlstra 
683ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner)
693ca0ff57SPeter Zijlstra {
703ca0ff57SPeter Zijlstra 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
713ca0ff57SPeter Zijlstra }
723ca0ff57SPeter Zijlstra 
733ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner)
743ca0ff57SPeter Zijlstra {
753ca0ff57SPeter Zijlstra 	return owner & MUTEX_FLAGS;
763ca0ff57SPeter Zijlstra }
773ca0ff57SPeter Zijlstra 
783ca0ff57SPeter Zijlstra /*
79e274795eSPeter Zijlstra  * Trylock variant that retuns the owning task on failure.
803ca0ff57SPeter Zijlstra  */
81e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
823ca0ff57SPeter Zijlstra {
833ca0ff57SPeter Zijlstra 	unsigned long owner, curr = (unsigned long)current;
843ca0ff57SPeter Zijlstra 
853ca0ff57SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
863ca0ff57SPeter Zijlstra 	for (;;) { /* must loop, can race against a flag */
879d659ae1SPeter Zijlstra 		unsigned long old, flags = __owner_flags(owner);
88e274795eSPeter Zijlstra 		unsigned long task = owner & ~MUTEX_FLAGS;
893ca0ff57SPeter Zijlstra 
90e274795eSPeter Zijlstra 		if (task) {
91e274795eSPeter Zijlstra 			if (likely(task != curr))
92e274795eSPeter Zijlstra 				break;
939d659ae1SPeter Zijlstra 
94e274795eSPeter Zijlstra 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
95e274795eSPeter Zijlstra 				break;
96e274795eSPeter Zijlstra 
97e274795eSPeter Zijlstra 			flags &= ~MUTEX_FLAG_PICKUP;
98e274795eSPeter Zijlstra 		} else {
99e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
100e274795eSPeter Zijlstra 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
101e274795eSPeter Zijlstra #endif
1029d659ae1SPeter Zijlstra 		}
1033ca0ff57SPeter Zijlstra 
1049d659ae1SPeter Zijlstra 		/*
1059d659ae1SPeter Zijlstra 		 * We set the HANDOFF bit, we must make sure it doesn't live
1069d659ae1SPeter Zijlstra 		 * past the point where we acquire it. This would be possible
1079d659ae1SPeter Zijlstra 		 * if we (accidentally) set the bit on an unlocked mutex.
1089d659ae1SPeter Zijlstra 		 */
1099d659ae1SPeter Zijlstra 		flags &= ~MUTEX_FLAG_HANDOFF;
1109d659ae1SPeter Zijlstra 
1119d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
1123ca0ff57SPeter Zijlstra 		if (old == owner)
113e274795eSPeter Zijlstra 			return NULL;
1143ca0ff57SPeter Zijlstra 
1153ca0ff57SPeter Zijlstra 		owner = old;
1163ca0ff57SPeter Zijlstra 	}
117e274795eSPeter Zijlstra 
118e274795eSPeter Zijlstra 	return __owner_task(owner);
119e274795eSPeter Zijlstra }
120e274795eSPeter Zijlstra 
121e274795eSPeter Zijlstra /*
122e274795eSPeter Zijlstra  * Actual trylock that will work on any unlocked state.
123e274795eSPeter Zijlstra  */
124e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock)
125e274795eSPeter Zijlstra {
126e274795eSPeter Zijlstra 	return !__mutex_trylock_or_owner(lock);
1273ca0ff57SPeter Zijlstra }
1283ca0ff57SPeter Zijlstra 
1293ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
1303ca0ff57SPeter Zijlstra /*
1313ca0ff57SPeter Zijlstra  * Lockdep annotations are contained to the slow paths for simplicity.
1323ca0ff57SPeter Zijlstra  * There is nothing that would stop spreading the lockdep annotations outwards
1333ca0ff57SPeter Zijlstra  * except more code.
1343ca0ff57SPeter Zijlstra  */
1353ca0ff57SPeter Zijlstra 
1363ca0ff57SPeter Zijlstra /*
1373ca0ff57SPeter Zijlstra  * Optimistic trylock that only works in the uncontended case. Make sure to
1383ca0ff57SPeter Zijlstra  * follow with a __mutex_trylock() before failing.
1393ca0ff57SPeter Zijlstra  */
1403ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
1413ca0ff57SPeter Zijlstra {
1423ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
143c427f695SPeter Zijlstra 	unsigned long zero = 0UL;
1443ca0ff57SPeter Zijlstra 
145c427f695SPeter Zijlstra 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
1463ca0ff57SPeter Zijlstra 		return true;
1473ca0ff57SPeter Zijlstra 
1483ca0ff57SPeter Zijlstra 	return false;
1493ca0ff57SPeter Zijlstra }
1503ca0ff57SPeter Zijlstra 
1513ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
1523ca0ff57SPeter Zijlstra {
1533ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
1543ca0ff57SPeter Zijlstra 
1553ca0ff57SPeter Zijlstra 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
1563ca0ff57SPeter Zijlstra 		return true;
1573ca0ff57SPeter Zijlstra 
1583ca0ff57SPeter Zijlstra 	return false;
1593ca0ff57SPeter Zijlstra }
1603ca0ff57SPeter Zijlstra #endif
1613ca0ff57SPeter Zijlstra 
1623ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
1633ca0ff57SPeter Zijlstra {
1643ca0ff57SPeter Zijlstra 	atomic_long_or(flag, &lock->owner);
1653ca0ff57SPeter Zijlstra }
1663ca0ff57SPeter Zijlstra 
1673ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
1683ca0ff57SPeter Zijlstra {
1693ca0ff57SPeter Zijlstra 	atomic_long_andnot(flag, &lock->owner);
1703ca0ff57SPeter Zijlstra }
1713ca0ff57SPeter Zijlstra 
1729d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
1739d659ae1SPeter Zijlstra {
1749d659ae1SPeter Zijlstra 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
1759d659ae1SPeter Zijlstra }
1769d659ae1SPeter Zijlstra 
1779d659ae1SPeter Zijlstra /*
17808295b3bSThomas Hellstrom  * Add @waiter to a given location in the lock wait_list and set the
17908295b3bSThomas Hellstrom  * FLAG_WAITERS flag if it's the first waiter.
18008295b3bSThomas Hellstrom  */
18108295b3bSThomas Hellstrom static void __sched
18208295b3bSThomas Hellstrom __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
18308295b3bSThomas Hellstrom 		   struct list_head *list)
18408295b3bSThomas Hellstrom {
18508295b3bSThomas Hellstrom 	debug_mutex_add_waiter(lock, waiter, current);
18608295b3bSThomas Hellstrom 
18708295b3bSThomas Hellstrom 	list_add_tail(&waiter->list, list);
18808295b3bSThomas Hellstrom 	if (__mutex_waiter_is_first(lock, waiter))
18908295b3bSThomas Hellstrom 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
19008295b3bSThomas Hellstrom }
19108295b3bSThomas Hellstrom 
19208295b3bSThomas Hellstrom /*
1939d659ae1SPeter Zijlstra  * Give up ownership to a specific task, when @task = NULL, this is equivalent
194e274795eSPeter Zijlstra  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
195e274795eSPeter Zijlstra  * WAITERS. Provides RELEASE semantics like a regular unlock, the
196e274795eSPeter Zijlstra  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
1979d659ae1SPeter Zijlstra  */
1989d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
1999d659ae1SPeter Zijlstra {
2009d659ae1SPeter Zijlstra 	unsigned long owner = atomic_long_read(&lock->owner);
2019d659ae1SPeter Zijlstra 
2029d659ae1SPeter Zijlstra 	for (;;) {
2039d659ae1SPeter Zijlstra 		unsigned long old, new;
2049d659ae1SPeter Zijlstra 
2059d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
2069d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
207e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
2089d659ae1SPeter Zijlstra #endif
2099d659ae1SPeter Zijlstra 
2109d659ae1SPeter Zijlstra 		new = (owner & MUTEX_FLAG_WAITERS);
2119d659ae1SPeter Zijlstra 		new |= (unsigned long)task;
212e274795eSPeter Zijlstra 		if (task)
213e274795eSPeter Zijlstra 			new |= MUTEX_FLAG_PICKUP;
2149d659ae1SPeter Zijlstra 
2159d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
2169d659ae1SPeter Zijlstra 		if (old == owner)
2179d659ae1SPeter Zijlstra 			break;
2189d659ae1SPeter Zijlstra 
2199d659ae1SPeter Zijlstra 		owner = old;
2209d659ae1SPeter Zijlstra 	}
2219d659ae1SPeter Zijlstra }
2229d659ae1SPeter Zijlstra 
22301768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
22401768b42SPeter Zijlstra /*
22501768b42SPeter Zijlstra  * We split the mutex lock/unlock logic into separate fastpath and
22601768b42SPeter Zijlstra  * slowpath functions, to reduce the register pressure on the fastpath.
22701768b42SPeter Zijlstra  * We also put the fastpath first in the kernel image, to make sure the
22801768b42SPeter Zijlstra  * branch is predicted by the CPU as default-untaken.
22901768b42SPeter Zijlstra  */
2303ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock);
23101768b42SPeter Zijlstra 
23201768b42SPeter Zijlstra /**
23301768b42SPeter Zijlstra  * mutex_lock - acquire the mutex
23401768b42SPeter Zijlstra  * @lock: the mutex to be acquired
23501768b42SPeter Zijlstra  *
23601768b42SPeter Zijlstra  * Lock the mutex exclusively for this task. If the mutex is not
23701768b42SPeter Zijlstra  * available right now, it will sleep until it can get it.
23801768b42SPeter Zijlstra  *
23901768b42SPeter Zijlstra  * The mutex must later on be released by the same task that
24001768b42SPeter Zijlstra  * acquired it. Recursive locking is not allowed. The task
24101768b42SPeter Zijlstra  * may not exit without first unlocking the mutex. Also, kernel
242139b6fd2SSharon Dvir  * memory where the mutex resides must not be freed with
24301768b42SPeter Zijlstra  * the mutex still locked. The mutex must first be initialized
24401768b42SPeter Zijlstra  * (or statically defined) before it can be locked. memset()-ing
24501768b42SPeter Zijlstra  * the mutex to 0 is not allowed.
24601768b42SPeter Zijlstra  *
24701768b42SPeter Zijlstra  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
24801768b42SPeter Zijlstra  * checks that will enforce the restrictions and will also do
2497b4ff1adSMauro Carvalho Chehab  * deadlock debugging)
25001768b42SPeter Zijlstra  *
25101768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down().
25201768b42SPeter Zijlstra  */
25301768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock)
25401768b42SPeter Zijlstra {
25501768b42SPeter Zijlstra 	might_sleep();
25601768b42SPeter Zijlstra 
2573ca0ff57SPeter Zijlstra 	if (!__mutex_trylock_fast(lock))
2583ca0ff57SPeter Zijlstra 		__mutex_lock_slowpath(lock);
2593ca0ff57SPeter Zijlstra }
26001768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock);
26101768b42SPeter Zijlstra #endif
26201768b42SPeter Zijlstra 
26355f036caSPeter Ziljstra /*
26455f036caSPeter Ziljstra  * Wait-Die:
26555f036caSPeter Ziljstra  *   The newer transactions are killed when:
26655f036caSPeter Ziljstra  *     It (the new transaction) makes a request for a lock being held
26755f036caSPeter Ziljstra  *     by an older transaction.
26808295b3bSThomas Hellstrom  *
26908295b3bSThomas Hellstrom  * Wound-Wait:
27008295b3bSThomas Hellstrom  *   The newer transactions are wounded when:
27108295b3bSThomas Hellstrom  *     An older transaction makes a request for a lock being held by
27208295b3bSThomas Hellstrom  *     the newer transaction.
27355f036caSPeter Ziljstra  */
27455f036caSPeter Ziljstra 
27555f036caSPeter Ziljstra /*
27655f036caSPeter Ziljstra  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
27755f036caSPeter Ziljstra  * it.
27855f036caSPeter Ziljstra  */
279427b1820SPeter Zijlstra static __always_inline void
280427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
28176916515SDavidlohr Bueso {
28276916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES
28376916515SDavidlohr Bueso 	/*
28476916515SDavidlohr Bueso 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
28576916515SDavidlohr Bueso 	 * but released with a normal mutex_unlock in this call.
28676916515SDavidlohr Bueso 	 *
28776916515SDavidlohr Bueso 	 * This should never happen, always use ww_mutex_unlock.
28876916515SDavidlohr Bueso 	 */
28976916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww->ctx);
29076916515SDavidlohr Bueso 
29176916515SDavidlohr Bueso 	/*
29276916515SDavidlohr Bueso 	 * Not quite done after calling ww_acquire_done() ?
29376916515SDavidlohr Bueso 	 */
29476916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
29576916515SDavidlohr Bueso 
29676916515SDavidlohr Bueso 	if (ww_ctx->contending_lock) {
29776916515SDavidlohr Bueso 		/*
29876916515SDavidlohr Bueso 		 * After -EDEADLK you tried to
29976916515SDavidlohr Bueso 		 * acquire a different ww_mutex? Bad!
30076916515SDavidlohr Bueso 		 */
30176916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
30276916515SDavidlohr Bueso 
30376916515SDavidlohr Bueso 		/*
30476916515SDavidlohr Bueso 		 * You called ww_mutex_lock after receiving -EDEADLK,
30576916515SDavidlohr Bueso 		 * but 'forgot' to unlock everything else first?
30676916515SDavidlohr Bueso 		 */
30776916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
30876916515SDavidlohr Bueso 		ww_ctx->contending_lock = NULL;
30976916515SDavidlohr Bueso 	}
31076916515SDavidlohr Bueso 
31176916515SDavidlohr Bueso 	/*
31276916515SDavidlohr Bueso 	 * Naughty, using a different class will lead to undefined behavior!
31376916515SDavidlohr Bueso 	 */
31476916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
31576916515SDavidlohr Bueso #endif
31676916515SDavidlohr Bueso 	ww_ctx->acquired++;
31755f036caSPeter Ziljstra 	ww->ctx = ww_ctx;
3183822da3eSNicolai Hähnle }
3193822da3eSNicolai Hähnle 
32076916515SDavidlohr Bueso /*
32155f036caSPeter Ziljstra  * Determine if context @a is 'after' context @b. IOW, @a is a younger
32255f036caSPeter Ziljstra  * transaction than @b and depending on algorithm either needs to wait for
32355f036caSPeter Ziljstra  * @b or die.
32455f036caSPeter Ziljstra  */
32555f036caSPeter Ziljstra static inline bool __sched
32655f036caSPeter Ziljstra __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
32755f036caSPeter Ziljstra {
32855f036caSPeter Ziljstra 
32955f036caSPeter Ziljstra 	return (signed long)(a->stamp - b->stamp) > 0;
33055f036caSPeter Ziljstra }
33155f036caSPeter Ziljstra 
33255f036caSPeter Ziljstra /*
33355f036caSPeter Ziljstra  * Wait-Die; wake a younger waiter context (when locks held) such that it can
33455f036caSPeter Ziljstra  * die.
335659cf9f5SNicolai Hähnle  *
33655f036caSPeter Ziljstra  * Among waiters with context, only the first one can have other locks acquired
33755f036caSPeter Ziljstra  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
33855f036caSPeter Ziljstra  * __ww_mutex_check_kill() wake any but the earliest context.
33955f036caSPeter Ziljstra  */
34055f036caSPeter Ziljstra static bool __sched
34155f036caSPeter Ziljstra __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
34255f036caSPeter Ziljstra 	       struct ww_acquire_ctx *ww_ctx)
34355f036caSPeter Ziljstra {
34408295b3bSThomas Hellstrom 	if (!ww_ctx->is_wait_die)
34508295b3bSThomas Hellstrom 		return false;
34608295b3bSThomas Hellstrom 
34755f036caSPeter Ziljstra 	if (waiter->ww_ctx->acquired > 0 &&
34855f036caSPeter Ziljstra 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
34955f036caSPeter Ziljstra 		debug_mutex_wake_waiter(lock, waiter);
35055f036caSPeter Ziljstra 		wake_up_process(waiter->task);
35155f036caSPeter Ziljstra 	}
35255f036caSPeter Ziljstra 
35355f036caSPeter Ziljstra 	return true;
35455f036caSPeter Ziljstra }
35555f036caSPeter Ziljstra 
35655f036caSPeter Ziljstra /*
35708295b3bSThomas Hellstrom  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
35808295b3bSThomas Hellstrom  *
35908295b3bSThomas Hellstrom  * Wound the lock holder if there are waiters with older transactions than
36008295b3bSThomas Hellstrom  * the lock holders. Even if multiple waiters may wound the lock holder,
36108295b3bSThomas Hellstrom  * it's sufficient that only one does.
36208295b3bSThomas Hellstrom  */
36308295b3bSThomas Hellstrom static bool __ww_mutex_wound(struct mutex *lock,
36408295b3bSThomas Hellstrom 			     struct ww_acquire_ctx *ww_ctx,
36508295b3bSThomas Hellstrom 			     struct ww_acquire_ctx *hold_ctx)
36608295b3bSThomas Hellstrom {
36708295b3bSThomas Hellstrom 	struct task_struct *owner = __mutex_owner(lock);
36808295b3bSThomas Hellstrom 
36908295b3bSThomas Hellstrom 	lockdep_assert_held(&lock->wait_lock);
37008295b3bSThomas Hellstrom 
37108295b3bSThomas Hellstrom 	/*
37208295b3bSThomas Hellstrom 	 * Possible through __ww_mutex_add_waiter() when we race with
37308295b3bSThomas Hellstrom 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
37408295b3bSThomas Hellstrom 	 * through __ww_mutex_check_waiters().
37508295b3bSThomas Hellstrom 	 */
37608295b3bSThomas Hellstrom 	if (!hold_ctx)
37708295b3bSThomas Hellstrom 		return false;
37808295b3bSThomas Hellstrom 
37908295b3bSThomas Hellstrom 	/*
38008295b3bSThomas Hellstrom 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
38108295b3bSThomas Hellstrom 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
38208295b3bSThomas Hellstrom 	 * wait_lock.
38308295b3bSThomas Hellstrom 	 */
38408295b3bSThomas Hellstrom 	if (!owner)
38508295b3bSThomas Hellstrom 		return false;
38608295b3bSThomas Hellstrom 
38708295b3bSThomas Hellstrom 	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
38808295b3bSThomas Hellstrom 		hold_ctx->wounded = 1;
38908295b3bSThomas Hellstrom 
39008295b3bSThomas Hellstrom 		/*
39108295b3bSThomas Hellstrom 		 * wake_up_process() paired with set_current_state()
39208295b3bSThomas Hellstrom 		 * inserts sufficient barriers to make sure @owner either sees
393e13e2366SThomas Hellstrom 		 * it's wounded in __ww_mutex_check_kill() or has a
39408295b3bSThomas Hellstrom 		 * wakeup pending to re-read the wounded state.
39508295b3bSThomas Hellstrom 		 */
39608295b3bSThomas Hellstrom 		if (owner != current)
39708295b3bSThomas Hellstrom 			wake_up_process(owner);
39808295b3bSThomas Hellstrom 
39908295b3bSThomas Hellstrom 		return true;
40008295b3bSThomas Hellstrom 	}
40108295b3bSThomas Hellstrom 
40208295b3bSThomas Hellstrom 	return false;
40308295b3bSThomas Hellstrom }
40408295b3bSThomas Hellstrom 
40508295b3bSThomas Hellstrom /*
40655f036caSPeter Ziljstra  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
40708295b3bSThomas Hellstrom  * behind us on the wait-list, check if they need to die, or wound us.
40855f036caSPeter Ziljstra  *
40955f036caSPeter Ziljstra  * See __ww_mutex_add_waiter() for the list-order construction; basically the
41055f036caSPeter Ziljstra  * list is ordered by stamp, smallest (oldest) first.
411659cf9f5SNicolai Hähnle  *
41208295b3bSThomas Hellstrom  * This relies on never mixing wait-die/wound-wait on the same wait-list;
41308295b3bSThomas Hellstrom  * which is currently ensured by that being a ww_class property.
41408295b3bSThomas Hellstrom  *
415659cf9f5SNicolai Hähnle  * The current task must not be on the wait list.
416659cf9f5SNicolai Hähnle  */
417659cf9f5SNicolai Hähnle static void __sched
41855f036caSPeter Ziljstra __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
419659cf9f5SNicolai Hähnle {
420659cf9f5SNicolai Hähnle 	struct mutex_waiter *cur;
421659cf9f5SNicolai Hähnle 
422659cf9f5SNicolai Hähnle 	lockdep_assert_held(&lock->wait_lock);
423659cf9f5SNicolai Hähnle 
424659cf9f5SNicolai Hähnle 	list_for_each_entry(cur, &lock->wait_list, list) {
425659cf9f5SNicolai Hähnle 		if (!cur->ww_ctx)
426659cf9f5SNicolai Hähnle 			continue;
427659cf9f5SNicolai Hähnle 
42808295b3bSThomas Hellstrom 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
42908295b3bSThomas Hellstrom 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
430659cf9f5SNicolai Hähnle 			break;
431659cf9f5SNicolai Hähnle 	}
432659cf9f5SNicolai Hähnle }
433659cf9f5SNicolai Hähnle 
43476916515SDavidlohr Bueso /*
43555f036caSPeter Ziljstra  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
43655f036caSPeter Ziljstra  * and wake up any waiters so they can recheck.
43776916515SDavidlohr Bueso  */
43876916515SDavidlohr Bueso static __always_inline void
439427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
44076916515SDavidlohr Bueso {
44176916515SDavidlohr Bueso 	ww_mutex_lock_acquired(lock, ctx);
44276916515SDavidlohr Bueso 
44376916515SDavidlohr Bueso 	/*
44476916515SDavidlohr Bueso 	 * The lock->ctx update should be visible on all cores before
44555f036caSPeter Ziljstra 	 * the WAITERS check is done, otherwise contended waiters might be
44676916515SDavidlohr Bueso 	 * missed. The contended waiters will either see ww_ctx == NULL
44776916515SDavidlohr Bueso 	 * and keep spinning, or it will acquire wait_lock, add itself
44876916515SDavidlohr Bueso 	 * to waiter list and sleep.
44976916515SDavidlohr Bueso 	 */
45008295b3bSThomas Hellstrom 	smp_mb(); /* See comments above and below. */
45176916515SDavidlohr Bueso 
45276916515SDavidlohr Bueso 	/*
45308295b3bSThomas Hellstrom 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
45408295b3bSThomas Hellstrom 	 *     MB		        MB
45508295b3bSThomas Hellstrom 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
45608295b3bSThomas Hellstrom 	 *
45708295b3bSThomas Hellstrom 	 * The memory barrier above pairs with the memory barrier in
45808295b3bSThomas Hellstrom 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
45908295b3bSThomas Hellstrom 	 * and/or !empty list.
46076916515SDavidlohr Bueso 	 */
4613ca0ff57SPeter Zijlstra 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
46276916515SDavidlohr Bueso 		return;
46376916515SDavidlohr Bueso 
46476916515SDavidlohr Bueso 	/*
46555f036caSPeter Ziljstra 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
46608295b3bSThomas Hellstrom 	 * die or wound us.
46776916515SDavidlohr Bueso 	 */
468b9c16a0eSPeter Zijlstra 	spin_lock(&lock->base.wait_lock);
46955f036caSPeter Ziljstra 	__ww_mutex_check_waiters(&lock->base, ctx);
470b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->base.wait_lock);
47176916515SDavidlohr Bueso }
47276916515SDavidlohr Bueso 
47301768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
474c516df97SNicolai Hähnle 
475c516df97SNicolai Hähnle static inline
476c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
477c516df97SNicolai Hähnle 			    struct mutex_waiter *waiter)
478c516df97SNicolai Hähnle {
479c516df97SNicolai Hähnle 	struct ww_mutex *ww;
480c516df97SNicolai Hähnle 
481c516df97SNicolai Hähnle 	ww = container_of(lock, struct ww_mutex, base);
482c516df97SNicolai Hähnle 
48301768b42SPeter Zijlstra 	/*
484c516df97SNicolai Hähnle 	 * If ww->ctx is set the contents are undefined, only
485c516df97SNicolai Hähnle 	 * by acquiring wait_lock there is a guarantee that
486c516df97SNicolai Hähnle 	 * they are not invalid when reading.
487c516df97SNicolai Hähnle 	 *
488c516df97SNicolai Hähnle 	 * As such, when deadlock detection needs to be
489c516df97SNicolai Hähnle 	 * performed the optimistic spinning cannot be done.
490c516df97SNicolai Hähnle 	 *
491c516df97SNicolai Hähnle 	 * Check this in every inner iteration because we may
492c516df97SNicolai Hähnle 	 * be racing against another thread's ww_mutex_lock.
493c516df97SNicolai Hähnle 	 */
494c516df97SNicolai Hähnle 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
495c516df97SNicolai Hähnle 		return false;
496c516df97SNicolai Hähnle 
497c516df97SNicolai Hähnle 	/*
498c516df97SNicolai Hähnle 	 * If we aren't on the wait list yet, cancel the spin
499c516df97SNicolai Hähnle 	 * if there are waiters. We want  to avoid stealing the
500c516df97SNicolai Hähnle 	 * lock from a waiter with an earlier stamp, since the
501c516df97SNicolai Hähnle 	 * other thread may already own a lock that we also
502c516df97SNicolai Hähnle 	 * need.
503c516df97SNicolai Hähnle 	 */
504c516df97SNicolai Hähnle 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
505c516df97SNicolai Hähnle 		return false;
506c516df97SNicolai Hähnle 
507c516df97SNicolai Hähnle 	/*
508c516df97SNicolai Hähnle 	 * Similarly, stop spinning if we are no longer the
509c516df97SNicolai Hähnle 	 * first waiter.
510c516df97SNicolai Hähnle 	 */
511c516df97SNicolai Hähnle 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
512c516df97SNicolai Hähnle 		return false;
513c516df97SNicolai Hähnle 
514c516df97SNicolai Hähnle 	return true;
515c516df97SNicolai Hähnle }
516c516df97SNicolai Hähnle 
51701768b42SPeter Zijlstra /*
51825f13b40SNicolai Hähnle  * Look out! "owner" is an entirely speculative pointer access and not
51925f13b40SNicolai Hähnle  * reliable.
52025f13b40SNicolai Hähnle  *
52125f13b40SNicolai Hähnle  * "noinline" so that this function shows up on perf profiles.
52201768b42SPeter Zijlstra  */
52301768b42SPeter Zijlstra static noinline
52425f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
525c516df97SNicolai Hähnle 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
52601768b42SPeter Zijlstra {
52701ac33c1SJason Low 	bool ret = true;
528be1f7bf2SJason Low 
52901768b42SPeter Zijlstra 	rcu_read_lock();
5303ca0ff57SPeter Zijlstra 	while (__mutex_owner(lock) == owner) {
531be1f7bf2SJason Low 		/*
532be1f7bf2SJason Low 		 * Ensure we emit the owner->on_cpu, dereference _after_
53301ac33c1SJason Low 		 * checking lock->owner still matches owner. If that fails,
53401ac33c1SJason Low 		 * owner might point to freed memory. If it still matches,
535be1f7bf2SJason Low 		 * the rcu_read_lock() ensures the memory stays valid.
536be1f7bf2SJason Low 		 */
537be1f7bf2SJason Low 		barrier();
538be1f7bf2SJason Low 
53905ffc951SPan Xinhui 		/*
54005ffc951SPan Xinhui 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
54105ffc951SPan Xinhui 		 */
54205ffc951SPan Xinhui 		if (!owner->on_cpu || need_resched() ||
54305ffc951SPan Xinhui 				vcpu_is_preempted(task_cpu(owner))) {
544be1f7bf2SJason Low 			ret = false;
545be1f7bf2SJason Low 			break;
546be1f7bf2SJason Low 		}
54701768b42SPeter Zijlstra 
548c516df97SNicolai Hähnle 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
54925f13b40SNicolai Hähnle 			ret = false;
55025f13b40SNicolai Hähnle 			break;
55125f13b40SNicolai Hähnle 		}
55225f13b40SNicolai Hähnle 
553f2f09a4cSChristian Borntraeger 		cpu_relax();
55401768b42SPeter Zijlstra 	}
55501768b42SPeter Zijlstra 	rcu_read_unlock();
55601768b42SPeter Zijlstra 
557be1f7bf2SJason Low 	return ret;
55801768b42SPeter Zijlstra }
55901768b42SPeter Zijlstra 
56001768b42SPeter Zijlstra /*
56101768b42SPeter Zijlstra  * Initial check for entering the mutex spinning loop
56201768b42SPeter Zijlstra  */
56301768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock)
56401768b42SPeter Zijlstra {
56501768b42SPeter Zijlstra 	struct task_struct *owner;
56601768b42SPeter Zijlstra 	int retval = 1;
56701768b42SPeter Zijlstra 
56846af29e4SJason Low 	if (need_resched())
56946af29e4SJason Low 		return 0;
57046af29e4SJason Low 
57101768b42SPeter Zijlstra 	rcu_read_lock();
5723ca0ff57SPeter Zijlstra 	owner = __mutex_owner(lock);
57305ffc951SPan Xinhui 
57405ffc951SPan Xinhui 	/*
57505ffc951SPan Xinhui 	 * As lock holder preemption issue, we both skip spinning if task is not
57605ffc951SPan Xinhui 	 * on cpu or its cpu is preempted
57705ffc951SPan Xinhui 	 */
57801768b42SPeter Zijlstra 	if (owner)
57905ffc951SPan Xinhui 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
58001768b42SPeter Zijlstra 	rcu_read_unlock();
58176916515SDavidlohr Bueso 
58276916515SDavidlohr Bueso 	/*
5833ca0ff57SPeter Zijlstra 	 * If lock->owner is not set, the mutex has been released. Return true
5843ca0ff57SPeter Zijlstra 	 * such that we'll trylock in the spin path, which is a faster option
5853ca0ff57SPeter Zijlstra 	 * than the blocking slow path.
58676916515SDavidlohr Bueso 	 */
5873ca0ff57SPeter Zijlstra 	return retval;
58876916515SDavidlohr Bueso }
58976916515SDavidlohr Bueso 
59076916515SDavidlohr Bueso /*
59176916515SDavidlohr Bueso  * Optimistic spinning.
59276916515SDavidlohr Bueso  *
59376916515SDavidlohr Bueso  * We try to spin for acquisition when we find that the lock owner
59476916515SDavidlohr Bueso  * is currently running on a (different) CPU and while we don't
59576916515SDavidlohr Bueso  * need to reschedule. The rationale is that if the lock owner is
59676916515SDavidlohr Bueso  * running, it is likely to release the lock soon.
59776916515SDavidlohr Bueso  *
59876916515SDavidlohr Bueso  * The mutex spinners are queued up using MCS lock so that only one
59976916515SDavidlohr Bueso  * spinner can compete for the mutex. However, if mutex spinning isn't
60076916515SDavidlohr Bueso  * going to happen, there is no point in going through the lock/unlock
60176916515SDavidlohr Bueso  * overhead.
60276916515SDavidlohr Bueso  *
60376916515SDavidlohr Bueso  * Returns true when the lock was taken, otherwise false, indicating
60476916515SDavidlohr Bueso  * that we need to jump to the slowpath and sleep.
605b341afb3SWaiman Long  *
606b341afb3SWaiman Long  * The waiter flag is set to true if the spinner is a waiter in the wait
607b341afb3SWaiman Long  * queue. The waiter-spinner will spin on the lock directly and concurrently
608b341afb3SWaiman Long  * with the spinner at the head of the OSQ, if present, until the owner is
609b341afb3SWaiman Long  * changed to itself.
61076916515SDavidlohr Bueso  */
611427b1820SPeter Zijlstra static __always_inline bool
612427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
613c516df97SNicolai Hähnle 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
61476916515SDavidlohr Bueso {
615b341afb3SWaiman Long 	if (!waiter) {
616b341afb3SWaiman Long 		/*
617b341afb3SWaiman Long 		 * The purpose of the mutex_can_spin_on_owner() function is
618b341afb3SWaiman Long 		 * to eliminate the overhead of osq_lock() and osq_unlock()
619b341afb3SWaiman Long 		 * in case spinning isn't possible. As a waiter-spinner
620b341afb3SWaiman Long 		 * is not going to take OSQ lock anyway, there is no need
621b341afb3SWaiman Long 		 * to call mutex_can_spin_on_owner().
622b341afb3SWaiman Long 		 */
62376916515SDavidlohr Bueso 		if (!mutex_can_spin_on_owner(lock))
624b341afb3SWaiman Long 			goto fail;
62576916515SDavidlohr Bueso 
626e42f678aSDavidlohr Bueso 		/*
627e42f678aSDavidlohr Bueso 		 * In order to avoid a stampede of mutex spinners trying to
628e42f678aSDavidlohr Bueso 		 * acquire the mutex all at once, the spinners need to take a
629e42f678aSDavidlohr Bueso 		 * MCS (queued) lock first before spinning on the owner field.
630e42f678aSDavidlohr Bueso 		 */
63176916515SDavidlohr Bueso 		if (!osq_lock(&lock->osq))
632b341afb3SWaiman Long 			goto fail;
633b341afb3SWaiman Long 	}
63476916515SDavidlohr Bueso 
635b341afb3SWaiman Long 	for (;;) {
63676916515SDavidlohr Bueso 		struct task_struct *owner;
63776916515SDavidlohr Bueso 
638e274795eSPeter Zijlstra 		/* Try to acquire the mutex... */
639e274795eSPeter Zijlstra 		owner = __mutex_trylock_or_owner(lock);
640e274795eSPeter Zijlstra 		if (!owner)
641e274795eSPeter Zijlstra 			break;
64276916515SDavidlohr Bueso 
64376916515SDavidlohr Bueso 		/*
644e274795eSPeter Zijlstra 		 * There's an owner, wait for it to either
64576916515SDavidlohr Bueso 		 * release the lock or go to sleep.
64676916515SDavidlohr Bueso 		 */
647c516df97SNicolai Hähnle 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
648b341afb3SWaiman Long 			goto fail_unlock;
64976916515SDavidlohr Bueso 
65076916515SDavidlohr Bueso 		/*
65176916515SDavidlohr Bueso 		 * The cpu_relax() call is a compiler barrier which forces
65276916515SDavidlohr Bueso 		 * everything in this loop to be re-loaded. We don't need
65376916515SDavidlohr Bueso 		 * memory barriers as we'll eventually observe the right
65476916515SDavidlohr Bueso 		 * values at the cost of a few extra spins.
65576916515SDavidlohr Bueso 		 */
656f2f09a4cSChristian Borntraeger 		cpu_relax();
65776916515SDavidlohr Bueso 	}
65876916515SDavidlohr Bueso 
659b341afb3SWaiman Long 	if (!waiter)
66076916515SDavidlohr Bueso 		osq_unlock(&lock->osq);
661b341afb3SWaiman Long 
662b341afb3SWaiman Long 	return true;
663b341afb3SWaiman Long 
664b341afb3SWaiman Long 
665b341afb3SWaiman Long fail_unlock:
666b341afb3SWaiman Long 	if (!waiter)
667b341afb3SWaiman Long 		osq_unlock(&lock->osq);
668b341afb3SWaiman Long 
669b341afb3SWaiman Long fail:
67076916515SDavidlohr Bueso 	/*
67176916515SDavidlohr Bueso 	 * If we fell out of the spin path because of need_resched(),
67276916515SDavidlohr Bueso 	 * reschedule now, before we try-lock the mutex. This avoids getting
67376916515SDavidlohr Bueso 	 * scheduled out right after we obtained the mutex.
67476916515SDavidlohr Bueso 	 */
6756f942a1fSPeter Zijlstra 	if (need_resched()) {
6766f942a1fSPeter Zijlstra 		/*
6776f942a1fSPeter Zijlstra 		 * We _should_ have TASK_RUNNING here, but just in case
6786f942a1fSPeter Zijlstra 		 * we do not, make it so, otherwise we might get stuck.
6796f942a1fSPeter Zijlstra 		 */
6806f942a1fSPeter Zijlstra 		__set_current_state(TASK_RUNNING);
68176916515SDavidlohr Bueso 		schedule_preempt_disabled();
6826f942a1fSPeter Zijlstra 	}
68376916515SDavidlohr Bueso 
68476916515SDavidlohr Bueso 	return false;
68576916515SDavidlohr Bueso }
68676916515SDavidlohr Bueso #else
687427b1820SPeter Zijlstra static __always_inline bool
688427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
689c516df97SNicolai Hähnle 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
69076916515SDavidlohr Bueso {
69176916515SDavidlohr Bueso 	return false;
69276916515SDavidlohr Bueso }
69301768b42SPeter Zijlstra #endif
69401768b42SPeter Zijlstra 
6953ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
69601768b42SPeter Zijlstra 
69701768b42SPeter Zijlstra /**
69801768b42SPeter Zijlstra  * mutex_unlock - release the mutex
69901768b42SPeter Zijlstra  * @lock: the mutex to be released
70001768b42SPeter Zijlstra  *
70101768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously.
70201768b42SPeter Zijlstra  *
70301768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
70401768b42SPeter Zijlstra  * of a not locked mutex is not allowed.
70501768b42SPeter Zijlstra  *
70601768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) up().
70701768b42SPeter Zijlstra  */
70801768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock)
70901768b42SPeter Zijlstra {
7103ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
7113ca0ff57SPeter Zijlstra 	if (__mutex_unlock_fast(lock))
7123ca0ff57SPeter Zijlstra 		return;
71301768b42SPeter Zijlstra #endif
7143ca0ff57SPeter Zijlstra 	__mutex_unlock_slowpath(lock, _RET_IP_);
71501768b42SPeter Zijlstra }
71601768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock);
71701768b42SPeter Zijlstra 
71801768b42SPeter Zijlstra /**
71901768b42SPeter Zijlstra  * ww_mutex_unlock - release the w/w mutex
72001768b42SPeter Zijlstra  * @lock: the mutex to be released
72101768b42SPeter Zijlstra  *
72201768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously with any of the
72301768b42SPeter Zijlstra  * ww_mutex_lock* functions (with or without an acquire context). It is
72401768b42SPeter Zijlstra  * forbidden to release the locks after releasing the acquire context.
72501768b42SPeter Zijlstra  *
72601768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
72701768b42SPeter Zijlstra  * of a unlocked mutex is not allowed.
72801768b42SPeter Zijlstra  */
72901768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock)
73001768b42SPeter Zijlstra {
73101768b42SPeter Zijlstra 	/*
73201768b42SPeter Zijlstra 	 * The unlocking fastpath is the 0->1 transition from 'locked'
73301768b42SPeter Zijlstra 	 * into 'unlocked' state:
73401768b42SPeter Zijlstra 	 */
73501768b42SPeter Zijlstra 	if (lock->ctx) {
73601768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
73701768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
73801768b42SPeter Zijlstra #endif
73901768b42SPeter Zijlstra 		if (lock->ctx->acquired > 0)
74001768b42SPeter Zijlstra 			lock->ctx->acquired--;
74101768b42SPeter Zijlstra 		lock->ctx = NULL;
74201768b42SPeter Zijlstra 	}
74301768b42SPeter Zijlstra 
7443ca0ff57SPeter Zijlstra 	mutex_unlock(&lock->base);
74501768b42SPeter Zijlstra }
74601768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock);
74701768b42SPeter Zijlstra 
74855f036caSPeter Ziljstra 
74955f036caSPeter Ziljstra static __always_inline int __sched
75055f036caSPeter Ziljstra __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
75155f036caSPeter Ziljstra {
75255f036caSPeter Ziljstra 	if (ww_ctx->acquired > 0) {
75355f036caSPeter Ziljstra #ifdef CONFIG_DEBUG_MUTEXES
75455f036caSPeter Ziljstra 		struct ww_mutex *ww;
75555f036caSPeter Ziljstra 
75655f036caSPeter Ziljstra 		ww = container_of(lock, struct ww_mutex, base);
75755f036caSPeter Ziljstra 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
75855f036caSPeter Ziljstra 		ww_ctx->contending_lock = ww;
75955f036caSPeter Ziljstra #endif
76055f036caSPeter Ziljstra 		return -EDEADLK;
76155f036caSPeter Ziljstra 	}
76255f036caSPeter Ziljstra 
76355f036caSPeter Ziljstra 	return 0;
76455f036caSPeter Ziljstra }
76555f036caSPeter Ziljstra 
76655f036caSPeter Ziljstra 
76755f036caSPeter Ziljstra /*
76808295b3bSThomas Hellstrom  * Check the wound condition for the current lock acquire.
76908295b3bSThomas Hellstrom  *
77008295b3bSThomas Hellstrom  * Wound-Wait: If we're wounded, kill ourself.
77155f036caSPeter Ziljstra  *
77255f036caSPeter Ziljstra  * Wait-Die: If we're trying to acquire a lock already held by an older
77355f036caSPeter Ziljstra  *           context, kill ourselves.
77455f036caSPeter Ziljstra  *
77555f036caSPeter Ziljstra  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
77655f036caSPeter Ziljstra  * look at waiters before us in the wait-list.
77755f036caSPeter Ziljstra  */
77801768b42SPeter Zijlstra static inline int __sched
77955f036caSPeter Ziljstra __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
780200b1874SNicolai Hähnle 		      struct ww_acquire_ctx *ctx)
78101768b42SPeter Zijlstra {
78201768b42SPeter Zijlstra 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
7834d3199e4SDavidlohr Bueso 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
784200b1874SNicolai Hähnle 	struct mutex_waiter *cur;
78501768b42SPeter Zijlstra 
78655f036caSPeter Ziljstra 	if (ctx->acquired == 0)
78755f036caSPeter Ziljstra 		return 0;
78855f036caSPeter Ziljstra 
78908295b3bSThomas Hellstrom 	if (!ctx->is_wait_die) {
79008295b3bSThomas Hellstrom 		if (ctx->wounded)
79108295b3bSThomas Hellstrom 			return __ww_mutex_kill(lock, ctx);
79208295b3bSThomas Hellstrom 
79308295b3bSThomas Hellstrom 		return 0;
79408295b3bSThomas Hellstrom 	}
79508295b3bSThomas Hellstrom 
796200b1874SNicolai Hähnle 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
79755f036caSPeter Ziljstra 		return __ww_mutex_kill(lock, ctx);
798200b1874SNicolai Hähnle 
799200b1874SNicolai Hähnle 	/*
800200b1874SNicolai Hähnle 	 * If there is a waiter in front of us that has a context, then its
80155f036caSPeter Ziljstra 	 * stamp is earlier than ours and we must kill ourself.
802200b1874SNicolai Hähnle 	 */
803200b1874SNicolai Hähnle 	cur = waiter;
804200b1874SNicolai Hähnle 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
80555f036caSPeter Ziljstra 		if (!cur->ww_ctx)
80655f036caSPeter Ziljstra 			continue;
80755f036caSPeter Ziljstra 
80855f036caSPeter Ziljstra 		return __ww_mutex_kill(lock, ctx);
809200b1874SNicolai Hähnle 	}
810200b1874SNicolai Hähnle 
81101768b42SPeter Zijlstra 	return 0;
81201768b42SPeter Zijlstra }
81301768b42SPeter Zijlstra 
81455f036caSPeter Ziljstra /*
81555f036caSPeter Ziljstra  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
81655f036caSPeter Ziljstra  * first. Such that older contexts are preferred to acquire the lock over
81755f036caSPeter Ziljstra  * younger contexts.
81855f036caSPeter Ziljstra  *
81955f036caSPeter Ziljstra  * Waiters without context are interspersed in FIFO order.
82055f036caSPeter Ziljstra  *
82155f036caSPeter Ziljstra  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
82208295b3bSThomas Hellstrom  * older contexts already waiting) to avoid unnecessary waiting and for
82308295b3bSThomas Hellstrom  * Wound-Wait ensure we wound the owning context when it is younger.
82455f036caSPeter Ziljstra  */
8256baa5c60SNicolai Hähnle static inline int __sched
8266baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter,
8276baa5c60SNicolai Hähnle 		      struct mutex *lock,
8286baa5c60SNicolai Hähnle 		      struct ww_acquire_ctx *ww_ctx)
8296baa5c60SNicolai Hähnle {
8306baa5c60SNicolai Hähnle 	struct mutex_waiter *cur;
8316baa5c60SNicolai Hähnle 	struct list_head *pos;
83208295b3bSThomas Hellstrom 	bool is_wait_die;
8336baa5c60SNicolai Hähnle 
8346baa5c60SNicolai Hähnle 	if (!ww_ctx) {
83508295b3bSThomas Hellstrom 		__mutex_add_waiter(lock, waiter, &lock->wait_list);
8366baa5c60SNicolai Hähnle 		return 0;
8376baa5c60SNicolai Hähnle 	}
8386baa5c60SNicolai Hähnle 
83908295b3bSThomas Hellstrom 	is_wait_die = ww_ctx->is_wait_die;
84008295b3bSThomas Hellstrom 
8416baa5c60SNicolai Hähnle 	/*
8426baa5c60SNicolai Hähnle 	 * Add the waiter before the first waiter with a higher stamp.
8436baa5c60SNicolai Hähnle 	 * Waiters without a context are skipped to avoid starving
84408295b3bSThomas Hellstrom 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
84508295b3bSThomas Hellstrom 	 * never die here, but they are sorted in stamp order and
84608295b3bSThomas Hellstrom 	 * may wound the lock holder.
8476baa5c60SNicolai Hähnle 	 */
8486baa5c60SNicolai Hähnle 	pos = &lock->wait_list;
8496baa5c60SNicolai Hähnle 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
8506baa5c60SNicolai Hähnle 		if (!cur->ww_ctx)
8516baa5c60SNicolai Hähnle 			continue;
8526baa5c60SNicolai Hähnle 
8536baa5c60SNicolai Hähnle 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
85455f036caSPeter Ziljstra 			/*
85555f036caSPeter Ziljstra 			 * Wait-Die: if we find an older context waiting, there
85655f036caSPeter Ziljstra 			 * is no point in queueing behind it, as we'd have to
85755f036caSPeter Ziljstra 			 * die the moment it would acquire the lock.
85855f036caSPeter Ziljstra 			 */
85908295b3bSThomas Hellstrom 			if (is_wait_die) {
86055f036caSPeter Ziljstra 				int ret = __ww_mutex_kill(lock, ww_ctx);
8616baa5c60SNicolai Hähnle 
86255f036caSPeter Ziljstra 				if (ret)
86355f036caSPeter Ziljstra 					return ret;
86408295b3bSThomas Hellstrom 			}
8656baa5c60SNicolai Hähnle 
8666baa5c60SNicolai Hähnle 			break;
8676baa5c60SNicolai Hähnle 		}
8686baa5c60SNicolai Hähnle 
8696baa5c60SNicolai Hähnle 		pos = &cur->list;
870200b1874SNicolai Hähnle 
87155f036caSPeter Ziljstra 		/* Wait-Die: ensure younger waiters die. */
87255f036caSPeter Ziljstra 		__ww_mutex_die(lock, cur, ww_ctx);
8736baa5c60SNicolai Hähnle 	}
8746baa5c60SNicolai Hähnle 
87508295b3bSThomas Hellstrom 	__mutex_add_waiter(lock, waiter, pos);
87608295b3bSThomas Hellstrom 
87708295b3bSThomas Hellstrom 	/*
87808295b3bSThomas Hellstrom 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
87908295b3bSThomas Hellstrom 	 * wound that such that we might proceed.
88008295b3bSThomas Hellstrom 	 */
88108295b3bSThomas Hellstrom 	if (!is_wait_die) {
88208295b3bSThomas Hellstrom 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
88308295b3bSThomas Hellstrom 
88408295b3bSThomas Hellstrom 		/*
88508295b3bSThomas Hellstrom 		 * See ww_mutex_set_context_fastpath(). Orders setting
88608295b3bSThomas Hellstrom 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
88708295b3bSThomas Hellstrom 		 * such that either we or the fastpath will wound @ww->ctx.
88808295b3bSThomas Hellstrom 		 */
88908295b3bSThomas Hellstrom 		smp_mb();
89008295b3bSThomas Hellstrom 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
89108295b3bSThomas Hellstrom 	}
89255f036caSPeter Ziljstra 
89301768b42SPeter Zijlstra 	return 0;
89401768b42SPeter Zijlstra }
89501768b42SPeter Zijlstra 
89601768b42SPeter Zijlstra /*
89701768b42SPeter Zijlstra  * Lock a mutex (possibly interruptible), slowpath:
89801768b42SPeter Zijlstra  */
89901768b42SPeter Zijlstra static __always_inline int __sched
90001768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90101768b42SPeter Zijlstra 		    struct lockdep_map *nest_lock, unsigned long ip,
90201768b42SPeter Zijlstra 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
90301768b42SPeter Zijlstra {
90401768b42SPeter Zijlstra 	struct mutex_waiter waiter;
9059d659ae1SPeter Zijlstra 	bool first = false;
906a40ca565SWaiman Long 	struct ww_mutex *ww;
90701768b42SPeter Zijlstra 	int ret;
90801768b42SPeter Zijlstra 
909427b1820SPeter Zijlstra 	might_sleep();
910ea9e0fb8SNicolai Hähnle 
911*6c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES
912*6c11c6e3SSebastian Andrzej Siewior 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
913*6c11c6e3SSebastian Andrzej Siewior #endif
914*6c11c6e3SSebastian Andrzej Siewior 
915a40ca565SWaiman Long 	ww = container_of(lock, struct ww_mutex, base);
916ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx) {
9170422e83dSChris Wilson 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
9180422e83dSChris Wilson 			return -EALREADY;
91908295b3bSThomas Hellstrom 
92008295b3bSThomas Hellstrom 		/*
92108295b3bSThomas Hellstrom 		 * Reset the wounded flag after a kill. No other process can
92208295b3bSThomas Hellstrom 		 * race and wound us here since they can't have a valid owner
92308295b3bSThomas Hellstrom 		 * pointer if we don't have any locks held.
92408295b3bSThomas Hellstrom 		 */
92508295b3bSThomas Hellstrom 		if (ww_ctx->acquired == 0)
92608295b3bSThomas Hellstrom 			ww_ctx->wounded = 0;
9270422e83dSChris Wilson 	}
9280422e83dSChris Wilson 
92901768b42SPeter Zijlstra 	preempt_disable();
93001768b42SPeter Zijlstra 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
93101768b42SPeter Zijlstra 
932e274795eSPeter Zijlstra 	if (__mutex_trylock(lock) ||
933c516df97SNicolai Hähnle 	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
93476916515SDavidlohr Bueso 		/* got the lock, yay! */
9353ca0ff57SPeter Zijlstra 		lock_acquired(&lock->dep_map, ip);
936ea9e0fb8SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
9373ca0ff57SPeter Zijlstra 			ww_mutex_set_context_fastpath(ww, ww_ctx);
93801768b42SPeter Zijlstra 		preempt_enable();
93901768b42SPeter Zijlstra 		return 0;
94001768b42SPeter Zijlstra 	}
94101768b42SPeter Zijlstra 
942b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
9431e820c96SJason Low 	/*
9443ca0ff57SPeter Zijlstra 	 * After waiting to acquire the wait_lock, try again.
9451e820c96SJason Low 	 */
946659cf9f5SNicolai Hähnle 	if (__mutex_trylock(lock)) {
947659cf9f5SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
94855f036caSPeter Ziljstra 			__ww_mutex_check_waiters(lock, ww_ctx);
949659cf9f5SNicolai Hähnle 
95001768b42SPeter Zijlstra 		goto skip_wait;
951659cf9f5SNicolai Hähnle 	}
95201768b42SPeter Zijlstra 
95301768b42SPeter Zijlstra 	debug_mutex_lock_common(lock, &waiter);
95401768b42SPeter Zijlstra 
9556baa5c60SNicolai Hähnle 	lock_contended(&lock->dep_map, ip);
9566baa5c60SNicolai Hähnle 
9576baa5c60SNicolai Hähnle 	if (!use_ww_ctx) {
95801768b42SPeter Zijlstra 		/* add waiting tasks to the end of the waitqueue (FIFO): */
95908295b3bSThomas Hellstrom 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
96008295b3bSThomas Hellstrom 
961977625a6SNicolai Hähnle 
962977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES
963977625a6SNicolai Hähnle 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
964977625a6SNicolai Hähnle #endif
9656baa5c60SNicolai Hähnle 	} else {
96655f036caSPeter Ziljstra 		/*
96755f036caSPeter Ziljstra 		 * Add in stamp order, waking up waiters that must kill
96855f036caSPeter Ziljstra 		 * themselves.
96955f036caSPeter Ziljstra 		 */
9706baa5c60SNicolai Hähnle 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
9716baa5c60SNicolai Hähnle 		if (ret)
97255f036caSPeter Ziljstra 			goto err_early_kill;
9736baa5c60SNicolai Hähnle 
9746baa5c60SNicolai Hähnle 		waiter.ww_ctx = ww_ctx;
9756baa5c60SNicolai Hähnle 	}
9766baa5c60SNicolai Hähnle 
977d269a8b8SDavidlohr Bueso 	waiter.task = current;
97801768b42SPeter Zijlstra 
979642fa448SDavidlohr Bueso 	set_current_state(state);
98001768b42SPeter Zijlstra 	for (;;) {
9815bbd7e64SPeter Zijlstra 		/*
9825bbd7e64SPeter Zijlstra 		 * Once we hold wait_lock, we're serialized against
9835bbd7e64SPeter Zijlstra 		 * mutex_unlock() handing the lock off to us, do a trylock
9845bbd7e64SPeter Zijlstra 		 * before testing the error conditions to make sure we pick up
9855bbd7e64SPeter Zijlstra 		 * the handoff.
9865bbd7e64SPeter Zijlstra 		 */
987e274795eSPeter Zijlstra 		if (__mutex_trylock(lock))
9885bbd7e64SPeter Zijlstra 			goto acquired;
98901768b42SPeter Zijlstra 
99001768b42SPeter Zijlstra 		/*
99155f036caSPeter Ziljstra 		 * Check for signals and kill conditions while holding
9925bbd7e64SPeter Zijlstra 		 * wait_lock. This ensures the lock cancellation is ordered
9935bbd7e64SPeter Zijlstra 		 * against mutex_unlock() and wake-ups do not go missing.
99401768b42SPeter Zijlstra 		 */
9953bb5f4acSDavidlohr Bueso 		if (signal_pending_state(state, current)) {
99601768b42SPeter Zijlstra 			ret = -EINTR;
99701768b42SPeter Zijlstra 			goto err;
99801768b42SPeter Zijlstra 		}
99901768b42SPeter Zijlstra 
100055f036caSPeter Ziljstra 		if (use_ww_ctx && ww_ctx) {
100155f036caSPeter Ziljstra 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
100201768b42SPeter Zijlstra 			if (ret)
100301768b42SPeter Zijlstra 				goto err;
100401768b42SPeter Zijlstra 		}
100501768b42SPeter Zijlstra 
1006b9c16a0eSPeter Zijlstra 		spin_unlock(&lock->wait_lock);
100701768b42SPeter Zijlstra 		schedule_preempt_disabled();
10089d659ae1SPeter Zijlstra 
10096baa5c60SNicolai Hähnle 		/*
10106baa5c60SNicolai Hähnle 		 * ww_mutex needs to always recheck its position since its waiter
10116baa5c60SNicolai Hähnle 		 * list is not FIFO ordered.
10126baa5c60SNicolai Hähnle 		 */
10136baa5c60SNicolai Hähnle 		if ((use_ww_ctx && ww_ctx) || !first) {
10146baa5c60SNicolai Hähnle 			first = __mutex_waiter_is_first(lock, &waiter);
10156baa5c60SNicolai Hähnle 			if (first)
10169d659ae1SPeter Zijlstra 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
10179d659ae1SPeter Zijlstra 		}
10185bbd7e64SPeter Zijlstra 
1019642fa448SDavidlohr Bueso 		set_current_state(state);
10205bbd7e64SPeter Zijlstra 		/*
10215bbd7e64SPeter Zijlstra 		 * Here we order against unlock; we must either see it change
10225bbd7e64SPeter Zijlstra 		 * state back to RUNNING and fall through the next schedule(),
10235bbd7e64SPeter Zijlstra 		 * or we must see its unlock and acquire.
10245bbd7e64SPeter Zijlstra 		 */
1025e274795eSPeter Zijlstra 		if (__mutex_trylock(lock) ||
1026c516df97SNicolai Hähnle 		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
10275bbd7e64SPeter Zijlstra 			break;
10285bbd7e64SPeter Zijlstra 
1029b9c16a0eSPeter Zijlstra 		spin_lock(&lock->wait_lock);
103001768b42SPeter Zijlstra 	}
1031b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
10325bbd7e64SPeter Zijlstra acquired:
1033642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
103451587bcfSDavidlohr Bueso 
103508295b3bSThomas Hellstrom 	if (use_ww_ctx && ww_ctx) {
103608295b3bSThomas Hellstrom 		/*
103708295b3bSThomas Hellstrom 		 * Wound-Wait; we stole the lock (!first_waiter), check the
103808295b3bSThomas Hellstrom 		 * waiters as anyone might want to wound us.
103908295b3bSThomas Hellstrom 		 */
104008295b3bSThomas Hellstrom 		if (!ww_ctx->is_wait_die &&
104108295b3bSThomas Hellstrom 		    !__mutex_waiter_is_first(lock, &waiter))
104208295b3bSThomas Hellstrom 			__ww_mutex_check_waiters(lock, ww_ctx);
104308295b3bSThomas Hellstrom 	}
104408295b3bSThomas Hellstrom 
1045d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
104601768b42SPeter Zijlstra 	if (likely(list_empty(&lock->wait_list)))
10479d659ae1SPeter Zijlstra 		__mutex_clear_flag(lock, MUTEX_FLAGS);
10483ca0ff57SPeter Zijlstra 
104901768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
105001768b42SPeter Zijlstra 
105101768b42SPeter Zijlstra skip_wait:
105201768b42SPeter Zijlstra 	/* got the lock - cleanup and rejoice! */
105301768b42SPeter Zijlstra 	lock_acquired(&lock->dep_map, ip);
105401768b42SPeter Zijlstra 
1055ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx)
105655f036caSPeter Ziljstra 		ww_mutex_lock_acquired(ww, ww_ctx);
105701768b42SPeter Zijlstra 
1058b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
105901768b42SPeter Zijlstra 	preempt_enable();
106001768b42SPeter Zijlstra 	return 0;
106101768b42SPeter Zijlstra 
106201768b42SPeter Zijlstra err:
1063642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
1064d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
106555f036caSPeter Ziljstra err_early_kill:
1066b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
106701768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
106801768b42SPeter Zijlstra 	mutex_release(&lock->dep_map, 1, ip);
106901768b42SPeter Zijlstra 	preempt_enable();
107001768b42SPeter Zijlstra 	return ret;
107101768b42SPeter Zijlstra }
107201768b42SPeter Zijlstra 
1073427b1820SPeter Zijlstra static int __sched
1074427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1075427b1820SPeter Zijlstra 	     struct lockdep_map *nest_lock, unsigned long ip)
1076427b1820SPeter Zijlstra {
1077427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1078427b1820SPeter Zijlstra }
1079427b1820SPeter Zijlstra 
1080427b1820SPeter Zijlstra static int __sched
1081427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1082427b1820SPeter Zijlstra 		struct lockdep_map *nest_lock, unsigned long ip,
1083427b1820SPeter Zijlstra 		struct ww_acquire_ctx *ww_ctx)
1084427b1820SPeter Zijlstra {
1085427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1086427b1820SPeter Zijlstra }
1087427b1820SPeter Zijlstra 
108801768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
108901768b42SPeter Zijlstra void __sched
109001768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass)
109101768b42SPeter Zijlstra {
1092427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
109301768b42SPeter Zijlstra }
109401768b42SPeter Zijlstra 
109501768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested);
109601768b42SPeter Zijlstra 
109701768b42SPeter Zijlstra void __sched
109801768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
109901768b42SPeter Zijlstra {
1100427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
110101768b42SPeter Zijlstra }
110201768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
110301768b42SPeter Zijlstra 
110401768b42SPeter Zijlstra int __sched
110501768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
110601768b42SPeter Zijlstra {
1107427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
110801768b42SPeter Zijlstra }
110901768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
111001768b42SPeter Zijlstra 
111101768b42SPeter Zijlstra int __sched
111201768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
111301768b42SPeter Zijlstra {
1114427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
111501768b42SPeter Zijlstra }
111601768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
111701768b42SPeter Zijlstra 
11181460cb65STejun Heo void __sched
11191460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
11201460cb65STejun Heo {
11211460cb65STejun Heo 	int token;
11221460cb65STejun Heo 
11231460cb65STejun Heo 	might_sleep();
11241460cb65STejun Heo 
11251460cb65STejun Heo 	token = io_schedule_prepare();
11261460cb65STejun Heo 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
11271460cb65STejun Heo 			    subclass, NULL, _RET_IP_, NULL, 0);
11281460cb65STejun Heo 	io_schedule_finish(token);
11291460cb65STejun Heo }
11301460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
11311460cb65STejun Heo 
113201768b42SPeter Zijlstra static inline int
113301768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
113401768b42SPeter Zijlstra {
113501768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
113601768b42SPeter Zijlstra 	unsigned tmp;
113701768b42SPeter Zijlstra 
113801768b42SPeter Zijlstra 	if (ctx->deadlock_inject_countdown-- == 0) {
113901768b42SPeter Zijlstra 		tmp = ctx->deadlock_inject_interval;
114001768b42SPeter Zijlstra 		if (tmp > UINT_MAX/4)
114101768b42SPeter Zijlstra 			tmp = UINT_MAX;
114201768b42SPeter Zijlstra 		else
114301768b42SPeter Zijlstra 			tmp = tmp*2 + tmp + tmp/2;
114401768b42SPeter Zijlstra 
114501768b42SPeter Zijlstra 		ctx->deadlock_inject_interval = tmp;
114601768b42SPeter Zijlstra 		ctx->deadlock_inject_countdown = tmp;
114701768b42SPeter Zijlstra 		ctx->contending_lock = lock;
114801768b42SPeter Zijlstra 
114901768b42SPeter Zijlstra 		ww_mutex_unlock(lock);
115001768b42SPeter Zijlstra 
115101768b42SPeter Zijlstra 		return -EDEADLK;
115201768b42SPeter Zijlstra 	}
115301768b42SPeter Zijlstra #endif
115401768b42SPeter Zijlstra 
115501768b42SPeter Zijlstra 	return 0;
115601768b42SPeter Zijlstra }
115701768b42SPeter Zijlstra 
115801768b42SPeter Zijlstra int __sched
1159c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
116001768b42SPeter Zijlstra {
116101768b42SPeter Zijlstra 	int ret;
116201768b42SPeter Zijlstra 
116301768b42SPeter Zijlstra 	might_sleep();
1164427b1820SPeter Zijlstra 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1165ea9e0fb8SNicolai Hähnle 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1166427b1820SPeter Zijlstra 			       ctx);
1167ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
116801768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
116901768b42SPeter Zijlstra 
117001768b42SPeter Zijlstra 	return ret;
117101768b42SPeter Zijlstra }
1172c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock);
117301768b42SPeter Zijlstra 
117401768b42SPeter Zijlstra int __sched
1175c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
117601768b42SPeter Zijlstra {
117701768b42SPeter Zijlstra 	int ret;
117801768b42SPeter Zijlstra 
117901768b42SPeter Zijlstra 	might_sleep();
1180427b1820SPeter Zijlstra 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1181ea9e0fb8SNicolai Hähnle 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1182427b1820SPeter Zijlstra 			      ctx);
118301768b42SPeter Zijlstra 
1184ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
118501768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
118601768b42SPeter Zijlstra 
118701768b42SPeter Zijlstra 	return ret;
118801768b42SPeter Zijlstra }
1189c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
119001768b42SPeter Zijlstra 
119101768b42SPeter Zijlstra #endif
119201768b42SPeter Zijlstra 
119301768b42SPeter Zijlstra /*
119401768b42SPeter Zijlstra  * Release the lock, slowpath:
119501768b42SPeter Zijlstra  */
11963ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
119701768b42SPeter Zijlstra {
11989d659ae1SPeter Zijlstra 	struct task_struct *next = NULL;
1199194a6b5bSWaiman Long 	DEFINE_WAKE_Q(wake_q);
1200b9c16a0eSPeter Zijlstra 	unsigned long owner;
120101768b42SPeter Zijlstra 
12023ca0ff57SPeter Zijlstra 	mutex_release(&lock->dep_map, 1, ip);
12033ca0ff57SPeter Zijlstra 
120401768b42SPeter Zijlstra 	/*
12059d659ae1SPeter Zijlstra 	 * Release the lock before (potentially) taking the spinlock such that
12069d659ae1SPeter Zijlstra 	 * other contenders can get on with things ASAP.
12079d659ae1SPeter Zijlstra 	 *
12089d659ae1SPeter Zijlstra 	 * Except when HANDOFF, in that case we must not clear the owner field,
12099d659ae1SPeter Zijlstra 	 * but instead set it to the top waiter.
121001768b42SPeter Zijlstra 	 */
12119d659ae1SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
12129d659ae1SPeter Zijlstra 	for (;;) {
12139d659ae1SPeter Zijlstra 		unsigned long old;
12149d659ae1SPeter Zijlstra 
12159d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
12169d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1217e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
12189d659ae1SPeter Zijlstra #endif
12199d659ae1SPeter Zijlstra 
12209d659ae1SPeter Zijlstra 		if (owner & MUTEX_FLAG_HANDOFF)
12219d659ae1SPeter Zijlstra 			break;
12229d659ae1SPeter Zijlstra 
12239d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
12249d659ae1SPeter Zijlstra 						  __owner_flags(owner));
12259d659ae1SPeter Zijlstra 		if (old == owner) {
12269d659ae1SPeter Zijlstra 			if (owner & MUTEX_FLAG_WAITERS)
12279d659ae1SPeter Zijlstra 				break;
12289d659ae1SPeter Zijlstra 
12293ca0ff57SPeter Zijlstra 			return;
12309d659ae1SPeter Zijlstra 		}
12319d659ae1SPeter Zijlstra 
12329d659ae1SPeter Zijlstra 		owner = old;
12339d659ae1SPeter Zijlstra 	}
123401768b42SPeter Zijlstra 
1235b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
12361d8fe7dcSJason Low 	debug_mutex_unlock(lock);
123701768b42SPeter Zijlstra 	if (!list_empty(&lock->wait_list)) {
123801768b42SPeter Zijlstra 		/* get the first entry from the wait-list: */
123901768b42SPeter Zijlstra 		struct mutex_waiter *waiter =
12409d659ae1SPeter Zijlstra 			list_first_entry(&lock->wait_list,
124101768b42SPeter Zijlstra 					 struct mutex_waiter, list);
124201768b42SPeter Zijlstra 
12439d659ae1SPeter Zijlstra 		next = waiter->task;
12449d659ae1SPeter Zijlstra 
124501768b42SPeter Zijlstra 		debug_mutex_wake_waiter(lock, waiter);
12469d659ae1SPeter Zijlstra 		wake_q_add(&wake_q, next);
124701768b42SPeter Zijlstra 	}
124801768b42SPeter Zijlstra 
12499d659ae1SPeter Zijlstra 	if (owner & MUTEX_FLAG_HANDOFF)
12509d659ae1SPeter Zijlstra 		__mutex_handoff(lock, next);
12519d659ae1SPeter Zijlstra 
1252b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
12539d659ae1SPeter Zijlstra 
12541329ce6fSDavidlohr Bueso 	wake_up_q(&wake_q);
125501768b42SPeter Zijlstra }
125601768b42SPeter Zijlstra 
125701768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
125801768b42SPeter Zijlstra /*
125901768b42SPeter Zijlstra  * Here come the less common (and hence less performance-critical) APIs:
126001768b42SPeter Zijlstra  * mutex_lock_interruptible() and mutex_trylock().
126101768b42SPeter Zijlstra  */
126201768b42SPeter Zijlstra static noinline int __sched
126301768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock);
126401768b42SPeter Zijlstra 
126501768b42SPeter Zijlstra static noinline int __sched
126601768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock);
126701768b42SPeter Zijlstra 
126801768b42SPeter Zijlstra /**
126945dbac0eSMatthew Wilcox  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
127045dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
127101768b42SPeter Zijlstra  *
127245dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  If a signal is delivered while the
127345dbac0eSMatthew Wilcox  * process is sleeping, this function will return without acquiring the
127445dbac0eSMatthew Wilcox  * mutex.
127501768b42SPeter Zijlstra  *
127645dbac0eSMatthew Wilcox  * Context: Process context.
127745dbac0eSMatthew Wilcox  * Return: 0 if the lock was successfully acquired or %-EINTR if a
127845dbac0eSMatthew Wilcox  * signal arrived.
127901768b42SPeter Zijlstra  */
128001768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock)
128101768b42SPeter Zijlstra {
128201768b42SPeter Zijlstra 	might_sleep();
12833ca0ff57SPeter Zijlstra 
12843ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
128501768b42SPeter Zijlstra 		return 0;
12863ca0ff57SPeter Zijlstra 
128701768b42SPeter Zijlstra 	return __mutex_lock_interruptible_slowpath(lock);
128801768b42SPeter Zijlstra }
128901768b42SPeter Zijlstra 
129001768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible);
129101768b42SPeter Zijlstra 
129245dbac0eSMatthew Wilcox /**
129345dbac0eSMatthew Wilcox  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
129445dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
129545dbac0eSMatthew Wilcox  *
129645dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
129745dbac0eSMatthew Wilcox  * the current process is delivered while the process is sleeping, this
129845dbac0eSMatthew Wilcox  * function will return without acquiring the mutex.
129945dbac0eSMatthew Wilcox  *
130045dbac0eSMatthew Wilcox  * Context: Process context.
130145dbac0eSMatthew Wilcox  * Return: 0 if the lock was successfully acquired or %-EINTR if a
130245dbac0eSMatthew Wilcox  * fatal signal arrived.
130345dbac0eSMatthew Wilcox  */
130401768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock)
130501768b42SPeter Zijlstra {
130601768b42SPeter Zijlstra 	might_sleep();
13073ca0ff57SPeter Zijlstra 
13083ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
130901768b42SPeter Zijlstra 		return 0;
13103ca0ff57SPeter Zijlstra 
131101768b42SPeter Zijlstra 	return __mutex_lock_killable_slowpath(lock);
131201768b42SPeter Zijlstra }
131301768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable);
131401768b42SPeter Zijlstra 
131545dbac0eSMatthew Wilcox /**
131645dbac0eSMatthew Wilcox  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
131745dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
131845dbac0eSMatthew Wilcox  *
131945dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  While the task is waiting for this
132045dbac0eSMatthew Wilcox  * mutex, it will be accounted as being in the IO wait state by the
132145dbac0eSMatthew Wilcox  * scheduler.
132245dbac0eSMatthew Wilcox  *
132345dbac0eSMatthew Wilcox  * Context: Process context.
132445dbac0eSMatthew Wilcox  */
13251460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock)
13261460cb65STejun Heo {
13271460cb65STejun Heo 	int token;
13281460cb65STejun Heo 
13291460cb65STejun Heo 	token = io_schedule_prepare();
13301460cb65STejun Heo 	mutex_lock(lock);
13311460cb65STejun Heo 	io_schedule_finish(token);
13321460cb65STejun Heo }
13331460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io);
13341460cb65STejun Heo 
13353ca0ff57SPeter Zijlstra static noinline void __sched
13363ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock)
133701768b42SPeter Zijlstra {
1338427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
133901768b42SPeter Zijlstra }
134001768b42SPeter Zijlstra 
134101768b42SPeter Zijlstra static noinline int __sched
134201768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock)
134301768b42SPeter Zijlstra {
1344427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
134501768b42SPeter Zijlstra }
134601768b42SPeter Zijlstra 
134701768b42SPeter Zijlstra static noinline int __sched
134801768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock)
134901768b42SPeter Zijlstra {
1350427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
135101768b42SPeter Zijlstra }
135201768b42SPeter Zijlstra 
135301768b42SPeter Zijlstra static noinline int __sched
135401768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
135501768b42SPeter Zijlstra {
1356427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1357427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
135801768b42SPeter Zijlstra }
135901768b42SPeter Zijlstra 
136001768b42SPeter Zijlstra static noinline int __sched
136101768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
136201768b42SPeter Zijlstra 					    struct ww_acquire_ctx *ctx)
136301768b42SPeter Zijlstra {
1364427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1365427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
136601768b42SPeter Zijlstra }
136701768b42SPeter Zijlstra 
136801768b42SPeter Zijlstra #endif
136901768b42SPeter Zijlstra 
137001768b42SPeter Zijlstra /**
137101768b42SPeter Zijlstra  * mutex_trylock - try to acquire the mutex, without waiting
137201768b42SPeter Zijlstra  * @lock: the mutex to be acquired
137301768b42SPeter Zijlstra  *
137401768b42SPeter Zijlstra  * Try to acquire the mutex atomically. Returns 1 if the mutex
137501768b42SPeter Zijlstra  * has been acquired successfully, and 0 on contention.
137601768b42SPeter Zijlstra  *
137701768b42SPeter Zijlstra  * NOTE: this function follows the spin_trylock() convention, so
137801768b42SPeter Zijlstra  * it is negated from the down_trylock() return values! Be careful
137901768b42SPeter Zijlstra  * about this when converting semaphore users to mutexes.
138001768b42SPeter Zijlstra  *
138101768b42SPeter Zijlstra  * This function must not be used in interrupt context. The
138201768b42SPeter Zijlstra  * mutex must be released by the same task that acquired it.
138301768b42SPeter Zijlstra  */
138401768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock)
138501768b42SPeter Zijlstra {
1386*6c11c6e3SSebastian Andrzej Siewior 	bool locked;
138701768b42SPeter Zijlstra 
1388*6c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES
1389*6c11c6e3SSebastian Andrzej Siewior 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1390*6c11c6e3SSebastian Andrzej Siewior #endif
1391*6c11c6e3SSebastian Andrzej Siewior 
1392*6c11c6e3SSebastian Andrzej Siewior 	locked = __mutex_trylock(lock);
13933ca0ff57SPeter Zijlstra 	if (locked)
13943ca0ff57SPeter Zijlstra 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
139501768b42SPeter Zijlstra 
13963ca0ff57SPeter Zijlstra 	return locked;
139701768b42SPeter Zijlstra }
139801768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock);
139901768b42SPeter Zijlstra 
140001768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
140101768b42SPeter Zijlstra int __sched
1402c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
140301768b42SPeter Zijlstra {
140401768b42SPeter Zijlstra 	might_sleep();
140501768b42SPeter Zijlstra 
14063ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1407ea9e0fb8SNicolai Hähnle 		if (ctx)
140801768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
14093ca0ff57SPeter Zijlstra 		return 0;
14103ca0ff57SPeter Zijlstra 	}
14113ca0ff57SPeter Zijlstra 
14123ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_slowpath(lock, ctx);
141301768b42SPeter Zijlstra }
1414c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock);
141501768b42SPeter Zijlstra 
141601768b42SPeter Zijlstra int __sched
1417c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
141801768b42SPeter Zijlstra {
141901768b42SPeter Zijlstra 	might_sleep();
142001768b42SPeter Zijlstra 
14213ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1422ea9e0fb8SNicolai Hähnle 		if (ctx)
142301768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
14243ca0ff57SPeter Zijlstra 		return 0;
14253ca0ff57SPeter Zijlstra 	}
14263ca0ff57SPeter Zijlstra 
14273ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
142801768b42SPeter Zijlstra }
1429c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible);
143001768b42SPeter Zijlstra 
143101768b42SPeter Zijlstra #endif
143201768b42SPeter Zijlstra 
143301768b42SPeter Zijlstra /**
143401768b42SPeter Zijlstra  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
143501768b42SPeter Zijlstra  * @cnt: the atomic which we are to dec
143601768b42SPeter Zijlstra  * @lock: the mutex to return holding if we dec to 0
143701768b42SPeter Zijlstra  *
143801768b42SPeter Zijlstra  * return true and hold lock if we dec to 0, return false otherwise
143901768b42SPeter Zijlstra  */
144001768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
144101768b42SPeter Zijlstra {
144201768b42SPeter Zijlstra 	/* dec if we can't possibly hit 0 */
144301768b42SPeter Zijlstra 	if (atomic_add_unless(cnt, -1, 1))
144401768b42SPeter Zijlstra 		return 0;
144501768b42SPeter Zijlstra 	/* we might hit 0, so take the lock */
144601768b42SPeter Zijlstra 	mutex_lock(lock);
144701768b42SPeter Zijlstra 	if (!atomic_dec_and_test(cnt)) {
144801768b42SPeter Zijlstra 		/* when we actually did the dec, we didn't hit 0 */
144901768b42SPeter Zijlstra 		mutex_unlock(lock);
145001768b42SPeter Zijlstra 		return 0;
145101768b42SPeter Zijlstra 	}
145201768b42SPeter Zijlstra 	/* we hit 0, and we hold the lock */
145301768b42SPeter Zijlstra 	return 1;
145401768b42SPeter Zijlstra }
145501768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1456