xref: /openbmc/linux/kernel/locking/mutex.c (revision 5facae4f3549b5cf7c0e10ec312a65ffd43b5726)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
201768b42SPeter Zijlstra /*
367a6de49SPeter Zijlstra  * kernel/locking/mutex.c
401768b42SPeter Zijlstra  *
501768b42SPeter Zijlstra  * Mutexes: blocking mutual exclusion locks
601768b42SPeter Zijlstra  *
701768b42SPeter Zijlstra  * Started by Ingo Molnar:
801768b42SPeter Zijlstra  *
901768b42SPeter Zijlstra  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
1001768b42SPeter Zijlstra  *
1101768b42SPeter Zijlstra  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
1201768b42SPeter Zijlstra  * David Howells for suggestions and improvements.
1301768b42SPeter Zijlstra  *
1401768b42SPeter Zijlstra  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
1501768b42SPeter Zijlstra  *    from the -rt tree, where it was originally implemented for rtmutexes
1601768b42SPeter Zijlstra  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
1701768b42SPeter Zijlstra  *    and Sven Dietrich.
1801768b42SPeter Zijlstra  *
19387b1468SMauro Carvalho Chehab  * Also see Documentation/locking/mutex-design.rst.
2001768b42SPeter Zijlstra  */
2101768b42SPeter Zijlstra #include <linux/mutex.h>
2201768b42SPeter Zijlstra #include <linux/ww_mutex.h>
23174cd4b1SIngo Molnar #include <linux/sched/signal.h>
2401768b42SPeter Zijlstra #include <linux/sched/rt.h>
2584f001e1SIngo Molnar #include <linux/sched/wake_q.h>
26b17b0153SIngo Molnar #include <linux/sched/debug.h>
2701768b42SPeter Zijlstra #include <linux/export.h>
2801768b42SPeter Zijlstra #include <linux/spinlock.h>
2901768b42SPeter Zijlstra #include <linux/interrupt.h>
3001768b42SPeter Zijlstra #include <linux/debug_locks.h>
317a215f89SDavidlohr Bueso #include <linux/osq_lock.h>
3201768b42SPeter Zijlstra 
3301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
3401768b42SPeter Zijlstra # include "mutex-debug.h"
3501768b42SPeter Zijlstra #else
3601768b42SPeter Zijlstra # include "mutex.h"
3701768b42SPeter Zijlstra #endif
3801768b42SPeter Zijlstra 
3901768b42SPeter Zijlstra void
4001768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
4101768b42SPeter Zijlstra {
423ca0ff57SPeter Zijlstra 	atomic_long_set(&lock->owner, 0);
4301768b42SPeter Zijlstra 	spin_lock_init(&lock->wait_lock);
4401768b42SPeter Zijlstra 	INIT_LIST_HEAD(&lock->wait_list);
4501768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
464d9d951eSJason Low 	osq_lock_init(&lock->osq);
4701768b42SPeter Zijlstra #endif
4801768b42SPeter Zijlstra 
4901768b42SPeter Zijlstra 	debug_mutex_init(lock, name, key);
5001768b42SPeter Zijlstra }
5101768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init);
5201768b42SPeter Zijlstra 
533ca0ff57SPeter Zijlstra /*
543ca0ff57SPeter Zijlstra  * @owner: contains: 'struct task_struct *' to the current lock owner,
553ca0ff57SPeter Zijlstra  * NULL means not owned. Since task_struct pointers are aligned at
56e274795eSPeter Zijlstra  * at least L1_CACHE_BYTES, we have low bits to store extra state.
573ca0ff57SPeter Zijlstra  *
583ca0ff57SPeter Zijlstra  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
599d659ae1SPeter Zijlstra  * Bit1 indicates unlock needs to hand the lock to the top-waiter
60e274795eSPeter Zijlstra  * Bit2 indicates handoff has been done and we're waiting for pickup.
613ca0ff57SPeter Zijlstra  */
623ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS	0x01
639d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF	0x02
64e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP	0x04
653ca0ff57SPeter Zijlstra 
66e274795eSPeter Zijlstra #define MUTEX_FLAGS		0x07
673ca0ff57SPeter Zijlstra 
685f35d5a6SMukesh Ojha /*
695f35d5a6SMukesh Ojha  * Internal helper function; C doesn't allow us to hide it :/
705f35d5a6SMukesh Ojha  *
715f35d5a6SMukesh Ojha  * DO NOT USE (outside of mutex code).
725f35d5a6SMukesh Ojha  */
735f35d5a6SMukesh Ojha static inline struct task_struct *__mutex_owner(struct mutex *lock)
745f35d5a6SMukesh Ojha {
75a037d269SMukesh Ojha 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
765f35d5a6SMukesh Ojha }
775f35d5a6SMukesh Ojha 
783ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner)
793ca0ff57SPeter Zijlstra {
803ca0ff57SPeter Zijlstra 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
813ca0ff57SPeter Zijlstra }
823ca0ff57SPeter Zijlstra 
835f35d5a6SMukesh Ojha bool mutex_is_locked(struct mutex *lock)
845f35d5a6SMukesh Ojha {
855f35d5a6SMukesh Ojha 	return __mutex_owner(lock) != NULL;
865f35d5a6SMukesh Ojha }
875f35d5a6SMukesh Ojha EXPORT_SYMBOL(mutex_is_locked);
885f35d5a6SMukesh Ojha 
895f35d5a6SMukesh Ojha __must_check enum mutex_trylock_recursive_enum
905f35d5a6SMukesh Ojha mutex_trylock_recursive(struct mutex *lock)
915f35d5a6SMukesh Ojha {
925f35d5a6SMukesh Ojha 	if (unlikely(__mutex_owner(lock) == current))
935f35d5a6SMukesh Ojha 		return MUTEX_TRYLOCK_RECURSIVE;
945f35d5a6SMukesh Ojha 
955f35d5a6SMukesh Ojha 	return mutex_trylock(lock);
965f35d5a6SMukesh Ojha }
975f35d5a6SMukesh Ojha EXPORT_SYMBOL(mutex_trylock_recursive);
985f35d5a6SMukesh Ojha 
993ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner)
1003ca0ff57SPeter Zijlstra {
1013ca0ff57SPeter Zijlstra 	return owner & MUTEX_FLAGS;
1023ca0ff57SPeter Zijlstra }
1033ca0ff57SPeter Zijlstra 
1043ca0ff57SPeter Zijlstra /*
105e274795eSPeter Zijlstra  * Trylock variant that retuns the owning task on failure.
1063ca0ff57SPeter Zijlstra  */
107e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
1083ca0ff57SPeter Zijlstra {
1093ca0ff57SPeter Zijlstra 	unsigned long owner, curr = (unsigned long)current;
1103ca0ff57SPeter Zijlstra 
1113ca0ff57SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
1123ca0ff57SPeter Zijlstra 	for (;;) { /* must loop, can race against a flag */
1139d659ae1SPeter Zijlstra 		unsigned long old, flags = __owner_flags(owner);
114e274795eSPeter Zijlstra 		unsigned long task = owner & ~MUTEX_FLAGS;
1153ca0ff57SPeter Zijlstra 
116e274795eSPeter Zijlstra 		if (task) {
117e274795eSPeter Zijlstra 			if (likely(task != curr))
118e274795eSPeter Zijlstra 				break;
1199d659ae1SPeter Zijlstra 
120e274795eSPeter Zijlstra 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
121e274795eSPeter Zijlstra 				break;
122e274795eSPeter Zijlstra 
123e274795eSPeter Zijlstra 			flags &= ~MUTEX_FLAG_PICKUP;
124e274795eSPeter Zijlstra 		} else {
125e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
126e274795eSPeter Zijlstra 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
127e274795eSPeter Zijlstra #endif
1289d659ae1SPeter Zijlstra 		}
1293ca0ff57SPeter Zijlstra 
1309d659ae1SPeter Zijlstra 		/*
1319d659ae1SPeter Zijlstra 		 * We set the HANDOFF bit, we must make sure it doesn't live
1329d659ae1SPeter Zijlstra 		 * past the point where we acquire it. This would be possible
1339d659ae1SPeter Zijlstra 		 * if we (accidentally) set the bit on an unlocked mutex.
1349d659ae1SPeter Zijlstra 		 */
1359d659ae1SPeter Zijlstra 		flags &= ~MUTEX_FLAG_HANDOFF;
1369d659ae1SPeter Zijlstra 
1379d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
1383ca0ff57SPeter Zijlstra 		if (old == owner)
139e274795eSPeter Zijlstra 			return NULL;
1403ca0ff57SPeter Zijlstra 
1413ca0ff57SPeter Zijlstra 		owner = old;
1423ca0ff57SPeter Zijlstra 	}
143e274795eSPeter Zijlstra 
144e274795eSPeter Zijlstra 	return __owner_task(owner);
145e274795eSPeter Zijlstra }
146e274795eSPeter Zijlstra 
147e274795eSPeter Zijlstra /*
148e274795eSPeter Zijlstra  * Actual trylock that will work on any unlocked state.
149e274795eSPeter Zijlstra  */
150e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock)
151e274795eSPeter Zijlstra {
152e274795eSPeter Zijlstra 	return !__mutex_trylock_or_owner(lock);
1533ca0ff57SPeter Zijlstra }
1543ca0ff57SPeter Zijlstra 
1553ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
1563ca0ff57SPeter Zijlstra /*
1573ca0ff57SPeter Zijlstra  * Lockdep annotations are contained to the slow paths for simplicity.
1583ca0ff57SPeter Zijlstra  * There is nothing that would stop spreading the lockdep annotations outwards
1593ca0ff57SPeter Zijlstra  * except more code.
1603ca0ff57SPeter Zijlstra  */
1613ca0ff57SPeter Zijlstra 
1623ca0ff57SPeter Zijlstra /*
1633ca0ff57SPeter Zijlstra  * Optimistic trylock that only works in the uncontended case. Make sure to
1643ca0ff57SPeter Zijlstra  * follow with a __mutex_trylock() before failing.
1653ca0ff57SPeter Zijlstra  */
1663ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
1673ca0ff57SPeter Zijlstra {
1683ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
169c427f695SPeter Zijlstra 	unsigned long zero = 0UL;
1703ca0ff57SPeter Zijlstra 
171c427f695SPeter Zijlstra 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
1723ca0ff57SPeter Zijlstra 		return true;
1733ca0ff57SPeter Zijlstra 
1743ca0ff57SPeter Zijlstra 	return false;
1753ca0ff57SPeter Zijlstra }
1763ca0ff57SPeter Zijlstra 
1773ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
1783ca0ff57SPeter Zijlstra {
1793ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
1803ca0ff57SPeter Zijlstra 
1813ca0ff57SPeter Zijlstra 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
1823ca0ff57SPeter Zijlstra 		return true;
1833ca0ff57SPeter Zijlstra 
1843ca0ff57SPeter Zijlstra 	return false;
1853ca0ff57SPeter Zijlstra }
1863ca0ff57SPeter Zijlstra #endif
1873ca0ff57SPeter Zijlstra 
1883ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
1893ca0ff57SPeter Zijlstra {
1903ca0ff57SPeter Zijlstra 	atomic_long_or(flag, &lock->owner);
1913ca0ff57SPeter Zijlstra }
1923ca0ff57SPeter Zijlstra 
1933ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
1943ca0ff57SPeter Zijlstra {
1953ca0ff57SPeter Zijlstra 	atomic_long_andnot(flag, &lock->owner);
1963ca0ff57SPeter Zijlstra }
1973ca0ff57SPeter Zijlstra 
1989d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
1999d659ae1SPeter Zijlstra {
2009d659ae1SPeter Zijlstra 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
2019d659ae1SPeter Zijlstra }
2029d659ae1SPeter Zijlstra 
2039d659ae1SPeter Zijlstra /*
20408295b3bSThomas Hellstrom  * Add @waiter to a given location in the lock wait_list and set the
20508295b3bSThomas Hellstrom  * FLAG_WAITERS flag if it's the first waiter.
20608295b3bSThomas Hellstrom  */
20708295b3bSThomas Hellstrom static void __sched
20808295b3bSThomas Hellstrom __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
20908295b3bSThomas Hellstrom 		   struct list_head *list)
21008295b3bSThomas Hellstrom {
21108295b3bSThomas Hellstrom 	debug_mutex_add_waiter(lock, waiter, current);
21208295b3bSThomas Hellstrom 
21308295b3bSThomas Hellstrom 	list_add_tail(&waiter->list, list);
21408295b3bSThomas Hellstrom 	if (__mutex_waiter_is_first(lock, waiter))
21508295b3bSThomas Hellstrom 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
21608295b3bSThomas Hellstrom }
21708295b3bSThomas Hellstrom 
21808295b3bSThomas Hellstrom /*
2199d659ae1SPeter Zijlstra  * Give up ownership to a specific task, when @task = NULL, this is equivalent
220e274795eSPeter Zijlstra  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
221e274795eSPeter Zijlstra  * WAITERS. Provides RELEASE semantics like a regular unlock, the
222e274795eSPeter Zijlstra  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
2239d659ae1SPeter Zijlstra  */
2249d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
2259d659ae1SPeter Zijlstra {
2269d659ae1SPeter Zijlstra 	unsigned long owner = atomic_long_read(&lock->owner);
2279d659ae1SPeter Zijlstra 
2289d659ae1SPeter Zijlstra 	for (;;) {
2299d659ae1SPeter Zijlstra 		unsigned long old, new;
2309d659ae1SPeter Zijlstra 
2319d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
2329d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
233e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
2349d659ae1SPeter Zijlstra #endif
2359d659ae1SPeter Zijlstra 
2369d659ae1SPeter Zijlstra 		new = (owner & MUTEX_FLAG_WAITERS);
2379d659ae1SPeter Zijlstra 		new |= (unsigned long)task;
238e274795eSPeter Zijlstra 		if (task)
239e274795eSPeter Zijlstra 			new |= MUTEX_FLAG_PICKUP;
2409d659ae1SPeter Zijlstra 
2419d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
2429d659ae1SPeter Zijlstra 		if (old == owner)
2439d659ae1SPeter Zijlstra 			break;
2449d659ae1SPeter Zijlstra 
2459d659ae1SPeter Zijlstra 		owner = old;
2469d659ae1SPeter Zijlstra 	}
2479d659ae1SPeter Zijlstra }
2489d659ae1SPeter Zijlstra 
24901768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
25001768b42SPeter Zijlstra /*
25101768b42SPeter Zijlstra  * We split the mutex lock/unlock logic into separate fastpath and
25201768b42SPeter Zijlstra  * slowpath functions, to reduce the register pressure on the fastpath.
25301768b42SPeter Zijlstra  * We also put the fastpath first in the kernel image, to make sure the
25401768b42SPeter Zijlstra  * branch is predicted by the CPU as default-untaken.
25501768b42SPeter Zijlstra  */
2563ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock);
25701768b42SPeter Zijlstra 
25801768b42SPeter Zijlstra /**
25901768b42SPeter Zijlstra  * mutex_lock - acquire the mutex
26001768b42SPeter Zijlstra  * @lock: the mutex to be acquired
26101768b42SPeter Zijlstra  *
26201768b42SPeter Zijlstra  * Lock the mutex exclusively for this task. If the mutex is not
26301768b42SPeter Zijlstra  * available right now, it will sleep until it can get it.
26401768b42SPeter Zijlstra  *
26501768b42SPeter Zijlstra  * The mutex must later on be released by the same task that
26601768b42SPeter Zijlstra  * acquired it. Recursive locking is not allowed. The task
26701768b42SPeter Zijlstra  * may not exit without first unlocking the mutex. Also, kernel
268139b6fd2SSharon Dvir  * memory where the mutex resides must not be freed with
26901768b42SPeter Zijlstra  * the mutex still locked. The mutex must first be initialized
27001768b42SPeter Zijlstra  * (or statically defined) before it can be locked. memset()-ing
27101768b42SPeter Zijlstra  * the mutex to 0 is not allowed.
27201768b42SPeter Zijlstra  *
27301768b42SPeter Zijlstra  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
27401768b42SPeter Zijlstra  * checks that will enforce the restrictions and will also do
2757b4ff1adSMauro Carvalho Chehab  * deadlock debugging)
27601768b42SPeter Zijlstra  *
27701768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down().
27801768b42SPeter Zijlstra  */
27901768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock)
28001768b42SPeter Zijlstra {
28101768b42SPeter Zijlstra 	might_sleep();
28201768b42SPeter Zijlstra 
2833ca0ff57SPeter Zijlstra 	if (!__mutex_trylock_fast(lock))
2843ca0ff57SPeter Zijlstra 		__mutex_lock_slowpath(lock);
2853ca0ff57SPeter Zijlstra }
28601768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock);
28701768b42SPeter Zijlstra #endif
28801768b42SPeter Zijlstra 
28955f036caSPeter Ziljstra /*
29055f036caSPeter Ziljstra  * Wait-Die:
29155f036caSPeter Ziljstra  *   The newer transactions are killed when:
29255f036caSPeter Ziljstra  *     It (the new transaction) makes a request for a lock being held
29355f036caSPeter Ziljstra  *     by an older transaction.
29408295b3bSThomas Hellstrom  *
29508295b3bSThomas Hellstrom  * Wound-Wait:
29608295b3bSThomas Hellstrom  *   The newer transactions are wounded when:
29708295b3bSThomas Hellstrom  *     An older transaction makes a request for a lock being held by
29808295b3bSThomas Hellstrom  *     the newer transaction.
29955f036caSPeter Ziljstra  */
30055f036caSPeter Ziljstra 
30155f036caSPeter Ziljstra /*
30255f036caSPeter Ziljstra  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
30355f036caSPeter Ziljstra  * it.
30455f036caSPeter Ziljstra  */
305427b1820SPeter Zijlstra static __always_inline void
306427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
30776916515SDavidlohr Bueso {
30876916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES
30976916515SDavidlohr Bueso 	/*
31076916515SDavidlohr Bueso 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
31176916515SDavidlohr Bueso 	 * but released with a normal mutex_unlock in this call.
31276916515SDavidlohr Bueso 	 *
31376916515SDavidlohr Bueso 	 * This should never happen, always use ww_mutex_unlock.
31476916515SDavidlohr Bueso 	 */
31576916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww->ctx);
31676916515SDavidlohr Bueso 
31776916515SDavidlohr Bueso 	/*
31876916515SDavidlohr Bueso 	 * Not quite done after calling ww_acquire_done() ?
31976916515SDavidlohr Bueso 	 */
32076916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
32176916515SDavidlohr Bueso 
32276916515SDavidlohr Bueso 	if (ww_ctx->contending_lock) {
32376916515SDavidlohr Bueso 		/*
32476916515SDavidlohr Bueso 		 * After -EDEADLK you tried to
32576916515SDavidlohr Bueso 		 * acquire a different ww_mutex? Bad!
32676916515SDavidlohr Bueso 		 */
32776916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
32876916515SDavidlohr Bueso 
32976916515SDavidlohr Bueso 		/*
33076916515SDavidlohr Bueso 		 * You called ww_mutex_lock after receiving -EDEADLK,
33176916515SDavidlohr Bueso 		 * but 'forgot' to unlock everything else first?
33276916515SDavidlohr Bueso 		 */
33376916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
33476916515SDavidlohr Bueso 		ww_ctx->contending_lock = NULL;
33576916515SDavidlohr Bueso 	}
33676916515SDavidlohr Bueso 
33776916515SDavidlohr Bueso 	/*
33876916515SDavidlohr Bueso 	 * Naughty, using a different class will lead to undefined behavior!
33976916515SDavidlohr Bueso 	 */
34076916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
34176916515SDavidlohr Bueso #endif
34276916515SDavidlohr Bueso 	ww_ctx->acquired++;
34355f036caSPeter Ziljstra 	ww->ctx = ww_ctx;
3443822da3eSNicolai Hähnle }
3453822da3eSNicolai Hähnle 
34676916515SDavidlohr Bueso /*
34755f036caSPeter Ziljstra  * Determine if context @a is 'after' context @b. IOW, @a is a younger
34855f036caSPeter Ziljstra  * transaction than @b and depending on algorithm either needs to wait for
34955f036caSPeter Ziljstra  * @b or die.
35055f036caSPeter Ziljstra  */
35155f036caSPeter Ziljstra static inline bool __sched
35255f036caSPeter Ziljstra __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
35355f036caSPeter Ziljstra {
35455f036caSPeter Ziljstra 
35555f036caSPeter Ziljstra 	return (signed long)(a->stamp - b->stamp) > 0;
35655f036caSPeter Ziljstra }
35755f036caSPeter Ziljstra 
35855f036caSPeter Ziljstra /*
35955f036caSPeter Ziljstra  * Wait-Die; wake a younger waiter context (when locks held) such that it can
36055f036caSPeter Ziljstra  * die.
361659cf9f5SNicolai Hähnle  *
36255f036caSPeter Ziljstra  * Among waiters with context, only the first one can have other locks acquired
36355f036caSPeter Ziljstra  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
36455f036caSPeter Ziljstra  * __ww_mutex_check_kill() wake any but the earliest context.
36555f036caSPeter Ziljstra  */
36655f036caSPeter Ziljstra static bool __sched
36755f036caSPeter Ziljstra __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
36855f036caSPeter Ziljstra 	       struct ww_acquire_ctx *ww_ctx)
36955f036caSPeter Ziljstra {
37008295b3bSThomas Hellstrom 	if (!ww_ctx->is_wait_die)
37108295b3bSThomas Hellstrom 		return false;
37208295b3bSThomas Hellstrom 
37355f036caSPeter Ziljstra 	if (waiter->ww_ctx->acquired > 0 &&
37455f036caSPeter Ziljstra 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
37555f036caSPeter Ziljstra 		debug_mutex_wake_waiter(lock, waiter);
37655f036caSPeter Ziljstra 		wake_up_process(waiter->task);
37755f036caSPeter Ziljstra 	}
37855f036caSPeter Ziljstra 
37955f036caSPeter Ziljstra 	return true;
38055f036caSPeter Ziljstra }
38155f036caSPeter Ziljstra 
38255f036caSPeter Ziljstra /*
38308295b3bSThomas Hellstrom  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
38408295b3bSThomas Hellstrom  *
38508295b3bSThomas Hellstrom  * Wound the lock holder if there are waiters with older transactions than
38608295b3bSThomas Hellstrom  * the lock holders. Even if multiple waiters may wound the lock holder,
38708295b3bSThomas Hellstrom  * it's sufficient that only one does.
38808295b3bSThomas Hellstrom  */
38908295b3bSThomas Hellstrom static bool __ww_mutex_wound(struct mutex *lock,
39008295b3bSThomas Hellstrom 			     struct ww_acquire_ctx *ww_ctx,
39108295b3bSThomas Hellstrom 			     struct ww_acquire_ctx *hold_ctx)
39208295b3bSThomas Hellstrom {
39308295b3bSThomas Hellstrom 	struct task_struct *owner = __mutex_owner(lock);
39408295b3bSThomas Hellstrom 
39508295b3bSThomas Hellstrom 	lockdep_assert_held(&lock->wait_lock);
39608295b3bSThomas Hellstrom 
39708295b3bSThomas Hellstrom 	/*
39808295b3bSThomas Hellstrom 	 * Possible through __ww_mutex_add_waiter() when we race with
39908295b3bSThomas Hellstrom 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
40008295b3bSThomas Hellstrom 	 * through __ww_mutex_check_waiters().
40108295b3bSThomas Hellstrom 	 */
40208295b3bSThomas Hellstrom 	if (!hold_ctx)
40308295b3bSThomas Hellstrom 		return false;
40408295b3bSThomas Hellstrom 
40508295b3bSThomas Hellstrom 	/*
40608295b3bSThomas Hellstrom 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
40708295b3bSThomas Hellstrom 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
40808295b3bSThomas Hellstrom 	 * wait_lock.
40908295b3bSThomas Hellstrom 	 */
41008295b3bSThomas Hellstrom 	if (!owner)
41108295b3bSThomas Hellstrom 		return false;
41208295b3bSThomas Hellstrom 
41308295b3bSThomas Hellstrom 	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
41408295b3bSThomas Hellstrom 		hold_ctx->wounded = 1;
41508295b3bSThomas Hellstrom 
41608295b3bSThomas Hellstrom 		/*
41708295b3bSThomas Hellstrom 		 * wake_up_process() paired with set_current_state()
41808295b3bSThomas Hellstrom 		 * inserts sufficient barriers to make sure @owner either sees
419e13e2366SThomas Hellstrom 		 * it's wounded in __ww_mutex_check_kill() or has a
42008295b3bSThomas Hellstrom 		 * wakeup pending to re-read the wounded state.
42108295b3bSThomas Hellstrom 		 */
42208295b3bSThomas Hellstrom 		if (owner != current)
42308295b3bSThomas Hellstrom 			wake_up_process(owner);
42408295b3bSThomas Hellstrom 
42508295b3bSThomas Hellstrom 		return true;
42608295b3bSThomas Hellstrom 	}
42708295b3bSThomas Hellstrom 
42808295b3bSThomas Hellstrom 	return false;
42908295b3bSThomas Hellstrom }
43008295b3bSThomas Hellstrom 
43108295b3bSThomas Hellstrom /*
43255f036caSPeter Ziljstra  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
43308295b3bSThomas Hellstrom  * behind us on the wait-list, check if they need to die, or wound us.
43455f036caSPeter Ziljstra  *
43555f036caSPeter Ziljstra  * See __ww_mutex_add_waiter() for the list-order construction; basically the
43655f036caSPeter Ziljstra  * list is ordered by stamp, smallest (oldest) first.
437659cf9f5SNicolai Hähnle  *
43808295b3bSThomas Hellstrom  * This relies on never mixing wait-die/wound-wait on the same wait-list;
43908295b3bSThomas Hellstrom  * which is currently ensured by that being a ww_class property.
44008295b3bSThomas Hellstrom  *
441659cf9f5SNicolai Hähnle  * The current task must not be on the wait list.
442659cf9f5SNicolai Hähnle  */
443659cf9f5SNicolai Hähnle static void __sched
44455f036caSPeter Ziljstra __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
445659cf9f5SNicolai Hähnle {
446659cf9f5SNicolai Hähnle 	struct mutex_waiter *cur;
447659cf9f5SNicolai Hähnle 
448659cf9f5SNicolai Hähnle 	lockdep_assert_held(&lock->wait_lock);
449659cf9f5SNicolai Hähnle 
450659cf9f5SNicolai Hähnle 	list_for_each_entry(cur, &lock->wait_list, list) {
451659cf9f5SNicolai Hähnle 		if (!cur->ww_ctx)
452659cf9f5SNicolai Hähnle 			continue;
453659cf9f5SNicolai Hähnle 
45408295b3bSThomas Hellstrom 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
45508295b3bSThomas Hellstrom 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
456659cf9f5SNicolai Hähnle 			break;
457659cf9f5SNicolai Hähnle 	}
458659cf9f5SNicolai Hähnle }
459659cf9f5SNicolai Hähnle 
46076916515SDavidlohr Bueso /*
46155f036caSPeter Ziljstra  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
46255f036caSPeter Ziljstra  * and wake up any waiters so they can recheck.
46376916515SDavidlohr Bueso  */
46476916515SDavidlohr Bueso static __always_inline void
465427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
46676916515SDavidlohr Bueso {
46776916515SDavidlohr Bueso 	ww_mutex_lock_acquired(lock, ctx);
46876916515SDavidlohr Bueso 
46976916515SDavidlohr Bueso 	/*
47076916515SDavidlohr Bueso 	 * The lock->ctx update should be visible on all cores before
47155f036caSPeter Ziljstra 	 * the WAITERS check is done, otherwise contended waiters might be
47276916515SDavidlohr Bueso 	 * missed. The contended waiters will either see ww_ctx == NULL
47376916515SDavidlohr Bueso 	 * and keep spinning, or it will acquire wait_lock, add itself
47476916515SDavidlohr Bueso 	 * to waiter list and sleep.
47576916515SDavidlohr Bueso 	 */
47608295b3bSThomas Hellstrom 	smp_mb(); /* See comments above and below. */
47776916515SDavidlohr Bueso 
47876916515SDavidlohr Bueso 	/*
47908295b3bSThomas Hellstrom 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
48008295b3bSThomas Hellstrom 	 *     MB		        MB
48108295b3bSThomas Hellstrom 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
48208295b3bSThomas Hellstrom 	 *
48308295b3bSThomas Hellstrom 	 * The memory barrier above pairs with the memory barrier in
48408295b3bSThomas Hellstrom 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
48508295b3bSThomas Hellstrom 	 * and/or !empty list.
48676916515SDavidlohr Bueso 	 */
4873ca0ff57SPeter Zijlstra 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
48876916515SDavidlohr Bueso 		return;
48976916515SDavidlohr Bueso 
49076916515SDavidlohr Bueso 	/*
49155f036caSPeter Ziljstra 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
49208295b3bSThomas Hellstrom 	 * die or wound us.
49376916515SDavidlohr Bueso 	 */
494b9c16a0eSPeter Zijlstra 	spin_lock(&lock->base.wait_lock);
49555f036caSPeter Ziljstra 	__ww_mutex_check_waiters(&lock->base, ctx);
496b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->base.wait_lock);
49776916515SDavidlohr Bueso }
49876916515SDavidlohr Bueso 
49901768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
500c516df97SNicolai Hähnle 
501c516df97SNicolai Hähnle static inline
502c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
503c516df97SNicolai Hähnle 			    struct mutex_waiter *waiter)
504c516df97SNicolai Hähnle {
505c516df97SNicolai Hähnle 	struct ww_mutex *ww;
506c516df97SNicolai Hähnle 
507c516df97SNicolai Hähnle 	ww = container_of(lock, struct ww_mutex, base);
508c516df97SNicolai Hähnle 
50901768b42SPeter Zijlstra 	/*
510c516df97SNicolai Hähnle 	 * If ww->ctx is set the contents are undefined, only
511c516df97SNicolai Hähnle 	 * by acquiring wait_lock there is a guarantee that
512c516df97SNicolai Hähnle 	 * they are not invalid when reading.
513c516df97SNicolai Hähnle 	 *
514c516df97SNicolai Hähnle 	 * As such, when deadlock detection needs to be
515c516df97SNicolai Hähnle 	 * performed the optimistic spinning cannot be done.
516c516df97SNicolai Hähnle 	 *
517c516df97SNicolai Hähnle 	 * Check this in every inner iteration because we may
518c516df97SNicolai Hähnle 	 * be racing against another thread's ww_mutex_lock.
519c516df97SNicolai Hähnle 	 */
520c516df97SNicolai Hähnle 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
521c516df97SNicolai Hähnle 		return false;
522c516df97SNicolai Hähnle 
523c516df97SNicolai Hähnle 	/*
524c516df97SNicolai Hähnle 	 * If we aren't on the wait list yet, cancel the spin
525c516df97SNicolai Hähnle 	 * if there are waiters. We want  to avoid stealing the
526c516df97SNicolai Hähnle 	 * lock from a waiter with an earlier stamp, since the
527c516df97SNicolai Hähnle 	 * other thread may already own a lock that we also
528c516df97SNicolai Hähnle 	 * need.
529c516df97SNicolai Hähnle 	 */
530c516df97SNicolai Hähnle 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
531c516df97SNicolai Hähnle 		return false;
532c516df97SNicolai Hähnle 
533c516df97SNicolai Hähnle 	/*
534c516df97SNicolai Hähnle 	 * Similarly, stop spinning if we are no longer the
535c516df97SNicolai Hähnle 	 * first waiter.
536c516df97SNicolai Hähnle 	 */
537c516df97SNicolai Hähnle 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
538c516df97SNicolai Hähnle 		return false;
539c516df97SNicolai Hähnle 
540c516df97SNicolai Hähnle 	return true;
541c516df97SNicolai Hähnle }
542c516df97SNicolai Hähnle 
54301768b42SPeter Zijlstra /*
54425f13b40SNicolai Hähnle  * Look out! "owner" is an entirely speculative pointer access and not
54525f13b40SNicolai Hähnle  * reliable.
54625f13b40SNicolai Hähnle  *
54725f13b40SNicolai Hähnle  * "noinline" so that this function shows up on perf profiles.
54801768b42SPeter Zijlstra  */
54901768b42SPeter Zijlstra static noinline
55025f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
551c516df97SNicolai Hähnle 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
55201768b42SPeter Zijlstra {
55301ac33c1SJason Low 	bool ret = true;
554be1f7bf2SJason Low 
55501768b42SPeter Zijlstra 	rcu_read_lock();
5563ca0ff57SPeter Zijlstra 	while (__mutex_owner(lock) == owner) {
557be1f7bf2SJason Low 		/*
558be1f7bf2SJason Low 		 * Ensure we emit the owner->on_cpu, dereference _after_
55901ac33c1SJason Low 		 * checking lock->owner still matches owner. If that fails,
56001ac33c1SJason Low 		 * owner might point to freed memory. If it still matches,
561be1f7bf2SJason Low 		 * the rcu_read_lock() ensures the memory stays valid.
562be1f7bf2SJason Low 		 */
563be1f7bf2SJason Low 		barrier();
564be1f7bf2SJason Low 
56505ffc951SPan Xinhui 		/*
56605ffc951SPan Xinhui 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
56705ffc951SPan Xinhui 		 */
56805ffc951SPan Xinhui 		if (!owner->on_cpu || need_resched() ||
56905ffc951SPan Xinhui 				vcpu_is_preempted(task_cpu(owner))) {
570be1f7bf2SJason Low 			ret = false;
571be1f7bf2SJason Low 			break;
572be1f7bf2SJason Low 		}
57301768b42SPeter Zijlstra 
574c516df97SNicolai Hähnle 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
57525f13b40SNicolai Hähnle 			ret = false;
57625f13b40SNicolai Hähnle 			break;
57725f13b40SNicolai Hähnle 		}
57825f13b40SNicolai Hähnle 
579f2f09a4cSChristian Borntraeger 		cpu_relax();
58001768b42SPeter Zijlstra 	}
58101768b42SPeter Zijlstra 	rcu_read_unlock();
58201768b42SPeter Zijlstra 
583be1f7bf2SJason Low 	return ret;
58401768b42SPeter Zijlstra }
58501768b42SPeter Zijlstra 
58601768b42SPeter Zijlstra /*
58701768b42SPeter Zijlstra  * Initial check for entering the mutex spinning loop
58801768b42SPeter Zijlstra  */
58901768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock)
59001768b42SPeter Zijlstra {
59101768b42SPeter Zijlstra 	struct task_struct *owner;
59201768b42SPeter Zijlstra 	int retval = 1;
59301768b42SPeter Zijlstra 
59446af29e4SJason Low 	if (need_resched())
59546af29e4SJason Low 		return 0;
59646af29e4SJason Low 
59701768b42SPeter Zijlstra 	rcu_read_lock();
5983ca0ff57SPeter Zijlstra 	owner = __mutex_owner(lock);
59905ffc951SPan Xinhui 
60005ffc951SPan Xinhui 	/*
60105ffc951SPan Xinhui 	 * As lock holder preemption issue, we both skip spinning if task is not
60205ffc951SPan Xinhui 	 * on cpu or its cpu is preempted
60305ffc951SPan Xinhui 	 */
60401768b42SPeter Zijlstra 	if (owner)
60505ffc951SPan Xinhui 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
60601768b42SPeter Zijlstra 	rcu_read_unlock();
60776916515SDavidlohr Bueso 
60876916515SDavidlohr Bueso 	/*
6093ca0ff57SPeter Zijlstra 	 * If lock->owner is not set, the mutex has been released. Return true
6103ca0ff57SPeter Zijlstra 	 * such that we'll trylock in the spin path, which is a faster option
6113ca0ff57SPeter Zijlstra 	 * than the blocking slow path.
61276916515SDavidlohr Bueso 	 */
6133ca0ff57SPeter Zijlstra 	return retval;
61476916515SDavidlohr Bueso }
61576916515SDavidlohr Bueso 
61676916515SDavidlohr Bueso /*
61776916515SDavidlohr Bueso  * Optimistic spinning.
61876916515SDavidlohr Bueso  *
61976916515SDavidlohr Bueso  * We try to spin for acquisition when we find that the lock owner
62076916515SDavidlohr Bueso  * is currently running on a (different) CPU and while we don't
62176916515SDavidlohr Bueso  * need to reschedule. The rationale is that if the lock owner is
62276916515SDavidlohr Bueso  * running, it is likely to release the lock soon.
62376916515SDavidlohr Bueso  *
62476916515SDavidlohr Bueso  * The mutex spinners are queued up using MCS lock so that only one
62576916515SDavidlohr Bueso  * spinner can compete for the mutex. However, if mutex spinning isn't
62676916515SDavidlohr Bueso  * going to happen, there is no point in going through the lock/unlock
62776916515SDavidlohr Bueso  * overhead.
62876916515SDavidlohr Bueso  *
62976916515SDavidlohr Bueso  * Returns true when the lock was taken, otherwise false, indicating
63076916515SDavidlohr Bueso  * that we need to jump to the slowpath and sleep.
631b341afb3SWaiman Long  *
632b341afb3SWaiman Long  * The waiter flag is set to true if the spinner is a waiter in the wait
633b341afb3SWaiman Long  * queue. The waiter-spinner will spin on the lock directly and concurrently
634b341afb3SWaiman Long  * with the spinner at the head of the OSQ, if present, until the owner is
635b341afb3SWaiman Long  * changed to itself.
63676916515SDavidlohr Bueso  */
637427b1820SPeter Zijlstra static __always_inline bool
638427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
639c516df97SNicolai Hähnle 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
64076916515SDavidlohr Bueso {
641b341afb3SWaiman Long 	if (!waiter) {
642b341afb3SWaiman Long 		/*
643b341afb3SWaiman Long 		 * The purpose of the mutex_can_spin_on_owner() function is
644b341afb3SWaiman Long 		 * to eliminate the overhead of osq_lock() and osq_unlock()
645b341afb3SWaiman Long 		 * in case spinning isn't possible. As a waiter-spinner
646b341afb3SWaiman Long 		 * is not going to take OSQ lock anyway, there is no need
647b341afb3SWaiman Long 		 * to call mutex_can_spin_on_owner().
648b341afb3SWaiman Long 		 */
64976916515SDavidlohr Bueso 		if (!mutex_can_spin_on_owner(lock))
650b341afb3SWaiman Long 			goto fail;
65176916515SDavidlohr Bueso 
652e42f678aSDavidlohr Bueso 		/*
653e42f678aSDavidlohr Bueso 		 * In order to avoid a stampede of mutex spinners trying to
654e42f678aSDavidlohr Bueso 		 * acquire the mutex all at once, the spinners need to take a
655e42f678aSDavidlohr Bueso 		 * MCS (queued) lock first before spinning on the owner field.
656e42f678aSDavidlohr Bueso 		 */
65776916515SDavidlohr Bueso 		if (!osq_lock(&lock->osq))
658b341afb3SWaiman Long 			goto fail;
659b341afb3SWaiman Long 	}
66076916515SDavidlohr Bueso 
661b341afb3SWaiman Long 	for (;;) {
66276916515SDavidlohr Bueso 		struct task_struct *owner;
66376916515SDavidlohr Bueso 
664e274795eSPeter Zijlstra 		/* Try to acquire the mutex... */
665e274795eSPeter Zijlstra 		owner = __mutex_trylock_or_owner(lock);
666e274795eSPeter Zijlstra 		if (!owner)
667e274795eSPeter Zijlstra 			break;
66876916515SDavidlohr Bueso 
66976916515SDavidlohr Bueso 		/*
670e274795eSPeter Zijlstra 		 * There's an owner, wait for it to either
67176916515SDavidlohr Bueso 		 * release the lock or go to sleep.
67276916515SDavidlohr Bueso 		 */
673c516df97SNicolai Hähnle 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
674b341afb3SWaiman Long 			goto fail_unlock;
67576916515SDavidlohr Bueso 
67676916515SDavidlohr Bueso 		/*
67776916515SDavidlohr Bueso 		 * The cpu_relax() call is a compiler barrier which forces
67876916515SDavidlohr Bueso 		 * everything in this loop to be re-loaded. We don't need
67976916515SDavidlohr Bueso 		 * memory barriers as we'll eventually observe the right
68076916515SDavidlohr Bueso 		 * values at the cost of a few extra spins.
68176916515SDavidlohr Bueso 		 */
682f2f09a4cSChristian Borntraeger 		cpu_relax();
68376916515SDavidlohr Bueso 	}
68476916515SDavidlohr Bueso 
685b341afb3SWaiman Long 	if (!waiter)
68676916515SDavidlohr Bueso 		osq_unlock(&lock->osq);
687b341afb3SWaiman Long 
688b341afb3SWaiman Long 	return true;
689b341afb3SWaiman Long 
690b341afb3SWaiman Long 
691b341afb3SWaiman Long fail_unlock:
692b341afb3SWaiman Long 	if (!waiter)
693b341afb3SWaiman Long 		osq_unlock(&lock->osq);
694b341afb3SWaiman Long 
695b341afb3SWaiman Long fail:
69676916515SDavidlohr Bueso 	/*
69776916515SDavidlohr Bueso 	 * If we fell out of the spin path because of need_resched(),
69876916515SDavidlohr Bueso 	 * reschedule now, before we try-lock the mutex. This avoids getting
69976916515SDavidlohr Bueso 	 * scheduled out right after we obtained the mutex.
70076916515SDavidlohr Bueso 	 */
7016f942a1fSPeter Zijlstra 	if (need_resched()) {
7026f942a1fSPeter Zijlstra 		/*
7036f942a1fSPeter Zijlstra 		 * We _should_ have TASK_RUNNING here, but just in case
7046f942a1fSPeter Zijlstra 		 * we do not, make it so, otherwise we might get stuck.
7056f942a1fSPeter Zijlstra 		 */
7066f942a1fSPeter Zijlstra 		__set_current_state(TASK_RUNNING);
70776916515SDavidlohr Bueso 		schedule_preempt_disabled();
7086f942a1fSPeter Zijlstra 	}
70976916515SDavidlohr Bueso 
71076916515SDavidlohr Bueso 	return false;
71176916515SDavidlohr Bueso }
71276916515SDavidlohr Bueso #else
713427b1820SPeter Zijlstra static __always_inline bool
714427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
715c516df97SNicolai Hähnle 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
71676916515SDavidlohr Bueso {
71776916515SDavidlohr Bueso 	return false;
71876916515SDavidlohr Bueso }
71901768b42SPeter Zijlstra #endif
72001768b42SPeter Zijlstra 
7213ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
72201768b42SPeter Zijlstra 
72301768b42SPeter Zijlstra /**
72401768b42SPeter Zijlstra  * mutex_unlock - release the mutex
72501768b42SPeter Zijlstra  * @lock: the mutex to be released
72601768b42SPeter Zijlstra  *
72701768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously.
72801768b42SPeter Zijlstra  *
72901768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
73001768b42SPeter Zijlstra  * of a not locked mutex is not allowed.
73101768b42SPeter Zijlstra  *
73201768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) up().
73301768b42SPeter Zijlstra  */
73401768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock)
73501768b42SPeter Zijlstra {
7363ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
7373ca0ff57SPeter Zijlstra 	if (__mutex_unlock_fast(lock))
7383ca0ff57SPeter Zijlstra 		return;
73901768b42SPeter Zijlstra #endif
7403ca0ff57SPeter Zijlstra 	__mutex_unlock_slowpath(lock, _RET_IP_);
74101768b42SPeter Zijlstra }
74201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock);
74301768b42SPeter Zijlstra 
74401768b42SPeter Zijlstra /**
74501768b42SPeter Zijlstra  * ww_mutex_unlock - release the w/w mutex
74601768b42SPeter Zijlstra  * @lock: the mutex to be released
74701768b42SPeter Zijlstra  *
74801768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously with any of the
74901768b42SPeter Zijlstra  * ww_mutex_lock* functions (with or without an acquire context). It is
75001768b42SPeter Zijlstra  * forbidden to release the locks after releasing the acquire context.
75101768b42SPeter Zijlstra  *
75201768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
75301768b42SPeter Zijlstra  * of a unlocked mutex is not allowed.
75401768b42SPeter Zijlstra  */
75501768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock)
75601768b42SPeter Zijlstra {
75701768b42SPeter Zijlstra 	/*
75801768b42SPeter Zijlstra 	 * The unlocking fastpath is the 0->1 transition from 'locked'
75901768b42SPeter Zijlstra 	 * into 'unlocked' state:
76001768b42SPeter Zijlstra 	 */
76101768b42SPeter Zijlstra 	if (lock->ctx) {
76201768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
76301768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
76401768b42SPeter Zijlstra #endif
76501768b42SPeter Zijlstra 		if (lock->ctx->acquired > 0)
76601768b42SPeter Zijlstra 			lock->ctx->acquired--;
76701768b42SPeter Zijlstra 		lock->ctx = NULL;
76801768b42SPeter Zijlstra 	}
76901768b42SPeter Zijlstra 
7703ca0ff57SPeter Zijlstra 	mutex_unlock(&lock->base);
77101768b42SPeter Zijlstra }
77201768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock);
77301768b42SPeter Zijlstra 
77455f036caSPeter Ziljstra 
77555f036caSPeter Ziljstra static __always_inline int __sched
77655f036caSPeter Ziljstra __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
77755f036caSPeter Ziljstra {
77855f036caSPeter Ziljstra 	if (ww_ctx->acquired > 0) {
77955f036caSPeter Ziljstra #ifdef CONFIG_DEBUG_MUTEXES
78055f036caSPeter Ziljstra 		struct ww_mutex *ww;
78155f036caSPeter Ziljstra 
78255f036caSPeter Ziljstra 		ww = container_of(lock, struct ww_mutex, base);
78355f036caSPeter Ziljstra 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
78455f036caSPeter Ziljstra 		ww_ctx->contending_lock = ww;
78555f036caSPeter Ziljstra #endif
78655f036caSPeter Ziljstra 		return -EDEADLK;
78755f036caSPeter Ziljstra 	}
78855f036caSPeter Ziljstra 
78955f036caSPeter Ziljstra 	return 0;
79055f036caSPeter Ziljstra }
79155f036caSPeter Ziljstra 
79255f036caSPeter Ziljstra 
79355f036caSPeter Ziljstra /*
79408295b3bSThomas Hellstrom  * Check the wound condition for the current lock acquire.
79508295b3bSThomas Hellstrom  *
79608295b3bSThomas Hellstrom  * Wound-Wait: If we're wounded, kill ourself.
79755f036caSPeter Ziljstra  *
79855f036caSPeter Ziljstra  * Wait-Die: If we're trying to acquire a lock already held by an older
79955f036caSPeter Ziljstra  *           context, kill ourselves.
80055f036caSPeter Ziljstra  *
80155f036caSPeter Ziljstra  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
80255f036caSPeter Ziljstra  * look at waiters before us in the wait-list.
80355f036caSPeter Ziljstra  */
80401768b42SPeter Zijlstra static inline int __sched
80555f036caSPeter Ziljstra __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
806200b1874SNicolai Hähnle 		      struct ww_acquire_ctx *ctx)
80701768b42SPeter Zijlstra {
80801768b42SPeter Zijlstra 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
8094d3199e4SDavidlohr Bueso 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
810200b1874SNicolai Hähnle 	struct mutex_waiter *cur;
81101768b42SPeter Zijlstra 
81255f036caSPeter Ziljstra 	if (ctx->acquired == 0)
81355f036caSPeter Ziljstra 		return 0;
81455f036caSPeter Ziljstra 
81508295b3bSThomas Hellstrom 	if (!ctx->is_wait_die) {
81608295b3bSThomas Hellstrom 		if (ctx->wounded)
81708295b3bSThomas Hellstrom 			return __ww_mutex_kill(lock, ctx);
81808295b3bSThomas Hellstrom 
81908295b3bSThomas Hellstrom 		return 0;
82008295b3bSThomas Hellstrom 	}
82108295b3bSThomas Hellstrom 
822200b1874SNicolai Hähnle 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
82355f036caSPeter Ziljstra 		return __ww_mutex_kill(lock, ctx);
824200b1874SNicolai Hähnle 
825200b1874SNicolai Hähnle 	/*
826200b1874SNicolai Hähnle 	 * If there is a waiter in front of us that has a context, then its
82755f036caSPeter Ziljstra 	 * stamp is earlier than ours and we must kill ourself.
828200b1874SNicolai Hähnle 	 */
829200b1874SNicolai Hähnle 	cur = waiter;
830200b1874SNicolai Hähnle 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
83155f036caSPeter Ziljstra 		if (!cur->ww_ctx)
83255f036caSPeter Ziljstra 			continue;
83355f036caSPeter Ziljstra 
83455f036caSPeter Ziljstra 		return __ww_mutex_kill(lock, ctx);
835200b1874SNicolai Hähnle 	}
836200b1874SNicolai Hähnle 
83701768b42SPeter Zijlstra 	return 0;
83801768b42SPeter Zijlstra }
83901768b42SPeter Zijlstra 
84055f036caSPeter Ziljstra /*
84155f036caSPeter Ziljstra  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
84255f036caSPeter Ziljstra  * first. Such that older contexts are preferred to acquire the lock over
84355f036caSPeter Ziljstra  * younger contexts.
84455f036caSPeter Ziljstra  *
84555f036caSPeter Ziljstra  * Waiters without context are interspersed in FIFO order.
84655f036caSPeter Ziljstra  *
84755f036caSPeter Ziljstra  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
84808295b3bSThomas Hellstrom  * older contexts already waiting) to avoid unnecessary waiting and for
84908295b3bSThomas Hellstrom  * Wound-Wait ensure we wound the owning context when it is younger.
85055f036caSPeter Ziljstra  */
8516baa5c60SNicolai Hähnle static inline int __sched
8526baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter,
8536baa5c60SNicolai Hähnle 		      struct mutex *lock,
8546baa5c60SNicolai Hähnle 		      struct ww_acquire_ctx *ww_ctx)
8556baa5c60SNicolai Hähnle {
8566baa5c60SNicolai Hähnle 	struct mutex_waiter *cur;
8576baa5c60SNicolai Hähnle 	struct list_head *pos;
85808295b3bSThomas Hellstrom 	bool is_wait_die;
8596baa5c60SNicolai Hähnle 
8606baa5c60SNicolai Hähnle 	if (!ww_ctx) {
86108295b3bSThomas Hellstrom 		__mutex_add_waiter(lock, waiter, &lock->wait_list);
8626baa5c60SNicolai Hähnle 		return 0;
8636baa5c60SNicolai Hähnle 	}
8646baa5c60SNicolai Hähnle 
86508295b3bSThomas Hellstrom 	is_wait_die = ww_ctx->is_wait_die;
86608295b3bSThomas Hellstrom 
8676baa5c60SNicolai Hähnle 	/*
8686baa5c60SNicolai Hähnle 	 * Add the waiter before the first waiter with a higher stamp.
8696baa5c60SNicolai Hähnle 	 * Waiters without a context are skipped to avoid starving
87008295b3bSThomas Hellstrom 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
87108295b3bSThomas Hellstrom 	 * never die here, but they are sorted in stamp order and
87208295b3bSThomas Hellstrom 	 * may wound the lock holder.
8736baa5c60SNicolai Hähnle 	 */
8746baa5c60SNicolai Hähnle 	pos = &lock->wait_list;
8756baa5c60SNicolai Hähnle 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
8766baa5c60SNicolai Hähnle 		if (!cur->ww_ctx)
8776baa5c60SNicolai Hähnle 			continue;
8786baa5c60SNicolai Hähnle 
8796baa5c60SNicolai Hähnle 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
88055f036caSPeter Ziljstra 			/*
88155f036caSPeter Ziljstra 			 * Wait-Die: if we find an older context waiting, there
88255f036caSPeter Ziljstra 			 * is no point in queueing behind it, as we'd have to
88355f036caSPeter Ziljstra 			 * die the moment it would acquire the lock.
88455f036caSPeter Ziljstra 			 */
88508295b3bSThomas Hellstrom 			if (is_wait_die) {
88655f036caSPeter Ziljstra 				int ret = __ww_mutex_kill(lock, ww_ctx);
8876baa5c60SNicolai Hähnle 
88855f036caSPeter Ziljstra 				if (ret)
88955f036caSPeter Ziljstra 					return ret;
89008295b3bSThomas Hellstrom 			}
8916baa5c60SNicolai Hähnle 
8926baa5c60SNicolai Hähnle 			break;
8936baa5c60SNicolai Hähnle 		}
8946baa5c60SNicolai Hähnle 
8956baa5c60SNicolai Hähnle 		pos = &cur->list;
896200b1874SNicolai Hähnle 
89755f036caSPeter Ziljstra 		/* Wait-Die: ensure younger waiters die. */
89855f036caSPeter Ziljstra 		__ww_mutex_die(lock, cur, ww_ctx);
8996baa5c60SNicolai Hähnle 	}
9006baa5c60SNicolai Hähnle 
90108295b3bSThomas Hellstrom 	__mutex_add_waiter(lock, waiter, pos);
90208295b3bSThomas Hellstrom 
90308295b3bSThomas Hellstrom 	/*
90408295b3bSThomas Hellstrom 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
90508295b3bSThomas Hellstrom 	 * wound that such that we might proceed.
90608295b3bSThomas Hellstrom 	 */
90708295b3bSThomas Hellstrom 	if (!is_wait_die) {
90808295b3bSThomas Hellstrom 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
90908295b3bSThomas Hellstrom 
91008295b3bSThomas Hellstrom 		/*
91108295b3bSThomas Hellstrom 		 * See ww_mutex_set_context_fastpath(). Orders setting
91208295b3bSThomas Hellstrom 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
91308295b3bSThomas Hellstrom 		 * such that either we or the fastpath will wound @ww->ctx.
91408295b3bSThomas Hellstrom 		 */
91508295b3bSThomas Hellstrom 		smp_mb();
91608295b3bSThomas Hellstrom 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
91708295b3bSThomas Hellstrom 	}
91855f036caSPeter Ziljstra 
91901768b42SPeter Zijlstra 	return 0;
92001768b42SPeter Zijlstra }
92101768b42SPeter Zijlstra 
92201768b42SPeter Zijlstra /*
92301768b42SPeter Zijlstra  * Lock a mutex (possibly interruptible), slowpath:
92401768b42SPeter Zijlstra  */
92501768b42SPeter Zijlstra static __always_inline int __sched
92601768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92701768b42SPeter Zijlstra 		    struct lockdep_map *nest_lock, unsigned long ip,
92801768b42SPeter Zijlstra 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
92901768b42SPeter Zijlstra {
93001768b42SPeter Zijlstra 	struct mutex_waiter waiter;
9319d659ae1SPeter Zijlstra 	bool first = false;
932a40ca565SWaiman Long 	struct ww_mutex *ww;
93301768b42SPeter Zijlstra 	int ret;
93401768b42SPeter Zijlstra 
935427b1820SPeter Zijlstra 	might_sleep();
936ea9e0fb8SNicolai Hähnle 
9376c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES
9386c11c6e3SSebastian Andrzej Siewior 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
9396c11c6e3SSebastian Andrzej Siewior #endif
9406c11c6e3SSebastian Andrzej Siewior 
941a40ca565SWaiman Long 	ww = container_of(lock, struct ww_mutex, base);
942ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx) {
9430422e83dSChris Wilson 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
9440422e83dSChris Wilson 			return -EALREADY;
94508295b3bSThomas Hellstrom 
94608295b3bSThomas Hellstrom 		/*
94708295b3bSThomas Hellstrom 		 * Reset the wounded flag after a kill. No other process can
94808295b3bSThomas Hellstrom 		 * race and wound us here since they can't have a valid owner
94908295b3bSThomas Hellstrom 		 * pointer if we don't have any locks held.
95008295b3bSThomas Hellstrom 		 */
95108295b3bSThomas Hellstrom 		if (ww_ctx->acquired == 0)
95208295b3bSThomas Hellstrom 			ww_ctx->wounded = 0;
9530422e83dSChris Wilson 	}
9540422e83dSChris Wilson 
95501768b42SPeter Zijlstra 	preempt_disable();
95601768b42SPeter Zijlstra 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
95701768b42SPeter Zijlstra 
958e274795eSPeter Zijlstra 	if (__mutex_trylock(lock) ||
959c516df97SNicolai Hähnle 	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
96076916515SDavidlohr Bueso 		/* got the lock, yay! */
9613ca0ff57SPeter Zijlstra 		lock_acquired(&lock->dep_map, ip);
962ea9e0fb8SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
9633ca0ff57SPeter Zijlstra 			ww_mutex_set_context_fastpath(ww, ww_ctx);
96401768b42SPeter Zijlstra 		preempt_enable();
96501768b42SPeter Zijlstra 		return 0;
96601768b42SPeter Zijlstra 	}
96701768b42SPeter Zijlstra 
968b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
9691e820c96SJason Low 	/*
9703ca0ff57SPeter Zijlstra 	 * After waiting to acquire the wait_lock, try again.
9711e820c96SJason Low 	 */
972659cf9f5SNicolai Hähnle 	if (__mutex_trylock(lock)) {
973659cf9f5SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
97455f036caSPeter Ziljstra 			__ww_mutex_check_waiters(lock, ww_ctx);
975659cf9f5SNicolai Hähnle 
97601768b42SPeter Zijlstra 		goto skip_wait;
977659cf9f5SNicolai Hähnle 	}
97801768b42SPeter Zijlstra 
97901768b42SPeter Zijlstra 	debug_mutex_lock_common(lock, &waiter);
98001768b42SPeter Zijlstra 
9816baa5c60SNicolai Hähnle 	lock_contended(&lock->dep_map, ip);
9826baa5c60SNicolai Hähnle 
9836baa5c60SNicolai Hähnle 	if (!use_ww_ctx) {
98401768b42SPeter Zijlstra 		/* add waiting tasks to the end of the waitqueue (FIFO): */
98508295b3bSThomas Hellstrom 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
98608295b3bSThomas Hellstrom 
987977625a6SNicolai Hähnle 
988977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES
989977625a6SNicolai Hähnle 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
990977625a6SNicolai Hähnle #endif
9916baa5c60SNicolai Hähnle 	} else {
99255f036caSPeter Ziljstra 		/*
99355f036caSPeter Ziljstra 		 * Add in stamp order, waking up waiters that must kill
99455f036caSPeter Ziljstra 		 * themselves.
99555f036caSPeter Ziljstra 		 */
9966baa5c60SNicolai Hähnle 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
9976baa5c60SNicolai Hähnle 		if (ret)
99855f036caSPeter Ziljstra 			goto err_early_kill;
9996baa5c60SNicolai Hähnle 
10006baa5c60SNicolai Hähnle 		waiter.ww_ctx = ww_ctx;
10016baa5c60SNicolai Hähnle 	}
10026baa5c60SNicolai Hähnle 
1003d269a8b8SDavidlohr Bueso 	waiter.task = current;
100401768b42SPeter Zijlstra 
1005642fa448SDavidlohr Bueso 	set_current_state(state);
100601768b42SPeter Zijlstra 	for (;;) {
10075bbd7e64SPeter Zijlstra 		/*
10085bbd7e64SPeter Zijlstra 		 * Once we hold wait_lock, we're serialized against
10095bbd7e64SPeter Zijlstra 		 * mutex_unlock() handing the lock off to us, do a trylock
10105bbd7e64SPeter Zijlstra 		 * before testing the error conditions to make sure we pick up
10115bbd7e64SPeter Zijlstra 		 * the handoff.
10125bbd7e64SPeter Zijlstra 		 */
1013e274795eSPeter Zijlstra 		if (__mutex_trylock(lock))
10145bbd7e64SPeter Zijlstra 			goto acquired;
101501768b42SPeter Zijlstra 
101601768b42SPeter Zijlstra 		/*
101755f036caSPeter Ziljstra 		 * Check for signals and kill conditions while holding
10185bbd7e64SPeter Zijlstra 		 * wait_lock. This ensures the lock cancellation is ordered
10195bbd7e64SPeter Zijlstra 		 * against mutex_unlock() and wake-ups do not go missing.
102001768b42SPeter Zijlstra 		 */
10213bb5f4acSDavidlohr Bueso 		if (signal_pending_state(state, current)) {
102201768b42SPeter Zijlstra 			ret = -EINTR;
102301768b42SPeter Zijlstra 			goto err;
102401768b42SPeter Zijlstra 		}
102501768b42SPeter Zijlstra 
102655f036caSPeter Ziljstra 		if (use_ww_ctx && ww_ctx) {
102755f036caSPeter Ziljstra 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
102801768b42SPeter Zijlstra 			if (ret)
102901768b42SPeter Zijlstra 				goto err;
103001768b42SPeter Zijlstra 		}
103101768b42SPeter Zijlstra 
1032b9c16a0eSPeter Zijlstra 		spin_unlock(&lock->wait_lock);
103301768b42SPeter Zijlstra 		schedule_preempt_disabled();
10349d659ae1SPeter Zijlstra 
10356baa5c60SNicolai Hähnle 		/*
10366baa5c60SNicolai Hähnle 		 * ww_mutex needs to always recheck its position since its waiter
10376baa5c60SNicolai Hähnle 		 * list is not FIFO ordered.
10386baa5c60SNicolai Hähnle 		 */
10396baa5c60SNicolai Hähnle 		if ((use_ww_ctx && ww_ctx) || !first) {
10406baa5c60SNicolai Hähnle 			first = __mutex_waiter_is_first(lock, &waiter);
10416baa5c60SNicolai Hähnle 			if (first)
10429d659ae1SPeter Zijlstra 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
10439d659ae1SPeter Zijlstra 		}
10445bbd7e64SPeter Zijlstra 
1045642fa448SDavidlohr Bueso 		set_current_state(state);
10465bbd7e64SPeter Zijlstra 		/*
10475bbd7e64SPeter Zijlstra 		 * Here we order against unlock; we must either see it change
10485bbd7e64SPeter Zijlstra 		 * state back to RUNNING and fall through the next schedule(),
10495bbd7e64SPeter Zijlstra 		 * or we must see its unlock and acquire.
10505bbd7e64SPeter Zijlstra 		 */
1051e274795eSPeter Zijlstra 		if (__mutex_trylock(lock) ||
1052c516df97SNicolai Hähnle 		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
10535bbd7e64SPeter Zijlstra 			break;
10545bbd7e64SPeter Zijlstra 
1055b9c16a0eSPeter Zijlstra 		spin_lock(&lock->wait_lock);
105601768b42SPeter Zijlstra 	}
1057b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
10585bbd7e64SPeter Zijlstra acquired:
1059642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
106051587bcfSDavidlohr Bueso 
106108295b3bSThomas Hellstrom 	if (use_ww_ctx && ww_ctx) {
106208295b3bSThomas Hellstrom 		/*
106308295b3bSThomas Hellstrom 		 * Wound-Wait; we stole the lock (!first_waiter), check the
106408295b3bSThomas Hellstrom 		 * waiters as anyone might want to wound us.
106508295b3bSThomas Hellstrom 		 */
106608295b3bSThomas Hellstrom 		if (!ww_ctx->is_wait_die &&
106708295b3bSThomas Hellstrom 		    !__mutex_waiter_is_first(lock, &waiter))
106808295b3bSThomas Hellstrom 			__ww_mutex_check_waiters(lock, ww_ctx);
106908295b3bSThomas Hellstrom 	}
107008295b3bSThomas Hellstrom 
1071d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
107201768b42SPeter Zijlstra 	if (likely(list_empty(&lock->wait_list)))
10739d659ae1SPeter Zijlstra 		__mutex_clear_flag(lock, MUTEX_FLAGS);
10743ca0ff57SPeter Zijlstra 
107501768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
107601768b42SPeter Zijlstra 
107701768b42SPeter Zijlstra skip_wait:
107801768b42SPeter Zijlstra 	/* got the lock - cleanup and rejoice! */
107901768b42SPeter Zijlstra 	lock_acquired(&lock->dep_map, ip);
108001768b42SPeter Zijlstra 
1081ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx)
108255f036caSPeter Ziljstra 		ww_mutex_lock_acquired(ww, ww_ctx);
108301768b42SPeter Zijlstra 
1084b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
108501768b42SPeter Zijlstra 	preempt_enable();
108601768b42SPeter Zijlstra 	return 0;
108701768b42SPeter Zijlstra 
108801768b42SPeter Zijlstra err:
1089642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
1090d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
109155f036caSPeter Ziljstra err_early_kill:
1092b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
109301768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
1094*5facae4fSQian Cai 	mutex_release(&lock->dep_map, ip);
109501768b42SPeter Zijlstra 	preempt_enable();
109601768b42SPeter Zijlstra 	return ret;
109701768b42SPeter Zijlstra }
109801768b42SPeter Zijlstra 
1099427b1820SPeter Zijlstra static int __sched
1100427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1101427b1820SPeter Zijlstra 	     struct lockdep_map *nest_lock, unsigned long ip)
1102427b1820SPeter Zijlstra {
1103427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1104427b1820SPeter Zijlstra }
1105427b1820SPeter Zijlstra 
1106427b1820SPeter Zijlstra static int __sched
1107427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1108427b1820SPeter Zijlstra 		struct lockdep_map *nest_lock, unsigned long ip,
1109427b1820SPeter Zijlstra 		struct ww_acquire_ctx *ww_ctx)
1110427b1820SPeter Zijlstra {
1111427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1112427b1820SPeter Zijlstra }
1113427b1820SPeter Zijlstra 
111401768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
111501768b42SPeter Zijlstra void __sched
111601768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass)
111701768b42SPeter Zijlstra {
1118427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
111901768b42SPeter Zijlstra }
112001768b42SPeter Zijlstra 
112101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested);
112201768b42SPeter Zijlstra 
112301768b42SPeter Zijlstra void __sched
112401768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
112501768b42SPeter Zijlstra {
1126427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
112701768b42SPeter Zijlstra }
112801768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
112901768b42SPeter Zijlstra 
113001768b42SPeter Zijlstra int __sched
113101768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
113201768b42SPeter Zijlstra {
1133427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
113401768b42SPeter Zijlstra }
113501768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
113601768b42SPeter Zijlstra 
113701768b42SPeter Zijlstra int __sched
113801768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
113901768b42SPeter Zijlstra {
1140427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
114101768b42SPeter Zijlstra }
114201768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
114301768b42SPeter Zijlstra 
11441460cb65STejun Heo void __sched
11451460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
11461460cb65STejun Heo {
11471460cb65STejun Heo 	int token;
11481460cb65STejun Heo 
11491460cb65STejun Heo 	might_sleep();
11501460cb65STejun Heo 
11511460cb65STejun Heo 	token = io_schedule_prepare();
11521460cb65STejun Heo 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
11531460cb65STejun Heo 			    subclass, NULL, _RET_IP_, NULL, 0);
11541460cb65STejun Heo 	io_schedule_finish(token);
11551460cb65STejun Heo }
11561460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
11571460cb65STejun Heo 
115801768b42SPeter Zijlstra static inline int
115901768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
116001768b42SPeter Zijlstra {
116101768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
116201768b42SPeter Zijlstra 	unsigned tmp;
116301768b42SPeter Zijlstra 
116401768b42SPeter Zijlstra 	if (ctx->deadlock_inject_countdown-- == 0) {
116501768b42SPeter Zijlstra 		tmp = ctx->deadlock_inject_interval;
116601768b42SPeter Zijlstra 		if (tmp > UINT_MAX/4)
116701768b42SPeter Zijlstra 			tmp = UINT_MAX;
116801768b42SPeter Zijlstra 		else
116901768b42SPeter Zijlstra 			tmp = tmp*2 + tmp + tmp/2;
117001768b42SPeter Zijlstra 
117101768b42SPeter Zijlstra 		ctx->deadlock_inject_interval = tmp;
117201768b42SPeter Zijlstra 		ctx->deadlock_inject_countdown = tmp;
117301768b42SPeter Zijlstra 		ctx->contending_lock = lock;
117401768b42SPeter Zijlstra 
117501768b42SPeter Zijlstra 		ww_mutex_unlock(lock);
117601768b42SPeter Zijlstra 
117701768b42SPeter Zijlstra 		return -EDEADLK;
117801768b42SPeter Zijlstra 	}
117901768b42SPeter Zijlstra #endif
118001768b42SPeter Zijlstra 
118101768b42SPeter Zijlstra 	return 0;
118201768b42SPeter Zijlstra }
118301768b42SPeter Zijlstra 
118401768b42SPeter Zijlstra int __sched
1185c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
118601768b42SPeter Zijlstra {
118701768b42SPeter Zijlstra 	int ret;
118801768b42SPeter Zijlstra 
118901768b42SPeter Zijlstra 	might_sleep();
1190427b1820SPeter Zijlstra 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1191ea9e0fb8SNicolai Hähnle 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1192427b1820SPeter Zijlstra 			       ctx);
1193ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
119401768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
119501768b42SPeter Zijlstra 
119601768b42SPeter Zijlstra 	return ret;
119701768b42SPeter Zijlstra }
1198c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock);
119901768b42SPeter Zijlstra 
120001768b42SPeter Zijlstra int __sched
1201c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
120201768b42SPeter Zijlstra {
120301768b42SPeter Zijlstra 	int ret;
120401768b42SPeter Zijlstra 
120501768b42SPeter Zijlstra 	might_sleep();
1206427b1820SPeter Zijlstra 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1207ea9e0fb8SNicolai Hähnle 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1208427b1820SPeter Zijlstra 			      ctx);
120901768b42SPeter Zijlstra 
1210ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
121101768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
121201768b42SPeter Zijlstra 
121301768b42SPeter Zijlstra 	return ret;
121401768b42SPeter Zijlstra }
1215c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
121601768b42SPeter Zijlstra 
121701768b42SPeter Zijlstra #endif
121801768b42SPeter Zijlstra 
121901768b42SPeter Zijlstra /*
122001768b42SPeter Zijlstra  * Release the lock, slowpath:
122101768b42SPeter Zijlstra  */
12223ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
122301768b42SPeter Zijlstra {
12249d659ae1SPeter Zijlstra 	struct task_struct *next = NULL;
1225194a6b5bSWaiman Long 	DEFINE_WAKE_Q(wake_q);
1226b9c16a0eSPeter Zijlstra 	unsigned long owner;
122701768b42SPeter Zijlstra 
1228*5facae4fSQian Cai 	mutex_release(&lock->dep_map, ip);
12293ca0ff57SPeter Zijlstra 
123001768b42SPeter Zijlstra 	/*
12319d659ae1SPeter Zijlstra 	 * Release the lock before (potentially) taking the spinlock such that
12329d659ae1SPeter Zijlstra 	 * other contenders can get on with things ASAP.
12339d659ae1SPeter Zijlstra 	 *
12349d659ae1SPeter Zijlstra 	 * Except when HANDOFF, in that case we must not clear the owner field,
12359d659ae1SPeter Zijlstra 	 * but instead set it to the top waiter.
123601768b42SPeter Zijlstra 	 */
12379d659ae1SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
12389d659ae1SPeter Zijlstra 	for (;;) {
12399d659ae1SPeter Zijlstra 		unsigned long old;
12409d659ae1SPeter Zijlstra 
12419d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
12429d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1243e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
12449d659ae1SPeter Zijlstra #endif
12459d659ae1SPeter Zijlstra 
12469d659ae1SPeter Zijlstra 		if (owner & MUTEX_FLAG_HANDOFF)
12479d659ae1SPeter Zijlstra 			break;
12489d659ae1SPeter Zijlstra 
12499d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
12509d659ae1SPeter Zijlstra 						  __owner_flags(owner));
12519d659ae1SPeter Zijlstra 		if (old == owner) {
12529d659ae1SPeter Zijlstra 			if (owner & MUTEX_FLAG_WAITERS)
12539d659ae1SPeter Zijlstra 				break;
12549d659ae1SPeter Zijlstra 
12553ca0ff57SPeter Zijlstra 			return;
12569d659ae1SPeter Zijlstra 		}
12579d659ae1SPeter Zijlstra 
12589d659ae1SPeter Zijlstra 		owner = old;
12599d659ae1SPeter Zijlstra 	}
126001768b42SPeter Zijlstra 
1261b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
12621d8fe7dcSJason Low 	debug_mutex_unlock(lock);
126301768b42SPeter Zijlstra 	if (!list_empty(&lock->wait_list)) {
126401768b42SPeter Zijlstra 		/* get the first entry from the wait-list: */
126501768b42SPeter Zijlstra 		struct mutex_waiter *waiter =
12669d659ae1SPeter Zijlstra 			list_first_entry(&lock->wait_list,
126701768b42SPeter Zijlstra 					 struct mutex_waiter, list);
126801768b42SPeter Zijlstra 
12699d659ae1SPeter Zijlstra 		next = waiter->task;
12709d659ae1SPeter Zijlstra 
127101768b42SPeter Zijlstra 		debug_mutex_wake_waiter(lock, waiter);
12729d659ae1SPeter Zijlstra 		wake_q_add(&wake_q, next);
127301768b42SPeter Zijlstra 	}
127401768b42SPeter Zijlstra 
12759d659ae1SPeter Zijlstra 	if (owner & MUTEX_FLAG_HANDOFF)
12769d659ae1SPeter Zijlstra 		__mutex_handoff(lock, next);
12779d659ae1SPeter Zijlstra 
1278b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
12799d659ae1SPeter Zijlstra 
12801329ce6fSDavidlohr Bueso 	wake_up_q(&wake_q);
128101768b42SPeter Zijlstra }
128201768b42SPeter Zijlstra 
128301768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
128401768b42SPeter Zijlstra /*
128501768b42SPeter Zijlstra  * Here come the less common (and hence less performance-critical) APIs:
128601768b42SPeter Zijlstra  * mutex_lock_interruptible() and mutex_trylock().
128701768b42SPeter Zijlstra  */
128801768b42SPeter Zijlstra static noinline int __sched
128901768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock);
129001768b42SPeter Zijlstra 
129101768b42SPeter Zijlstra static noinline int __sched
129201768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock);
129301768b42SPeter Zijlstra 
129401768b42SPeter Zijlstra /**
129545dbac0eSMatthew Wilcox  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
129645dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
129701768b42SPeter Zijlstra  *
129845dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  If a signal is delivered while the
129945dbac0eSMatthew Wilcox  * process is sleeping, this function will return without acquiring the
130045dbac0eSMatthew Wilcox  * mutex.
130101768b42SPeter Zijlstra  *
130245dbac0eSMatthew Wilcox  * Context: Process context.
130345dbac0eSMatthew Wilcox  * Return: 0 if the lock was successfully acquired or %-EINTR if a
130445dbac0eSMatthew Wilcox  * signal arrived.
130501768b42SPeter Zijlstra  */
130601768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock)
130701768b42SPeter Zijlstra {
130801768b42SPeter Zijlstra 	might_sleep();
13093ca0ff57SPeter Zijlstra 
13103ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
131101768b42SPeter Zijlstra 		return 0;
13123ca0ff57SPeter Zijlstra 
131301768b42SPeter Zijlstra 	return __mutex_lock_interruptible_slowpath(lock);
131401768b42SPeter Zijlstra }
131501768b42SPeter Zijlstra 
131601768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible);
131701768b42SPeter Zijlstra 
131845dbac0eSMatthew Wilcox /**
131945dbac0eSMatthew Wilcox  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
132045dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
132145dbac0eSMatthew Wilcox  *
132245dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
132345dbac0eSMatthew Wilcox  * the current process is delivered while the process is sleeping, this
132445dbac0eSMatthew Wilcox  * function will return without acquiring the mutex.
132545dbac0eSMatthew Wilcox  *
132645dbac0eSMatthew Wilcox  * Context: Process context.
132745dbac0eSMatthew Wilcox  * Return: 0 if the lock was successfully acquired or %-EINTR if a
132845dbac0eSMatthew Wilcox  * fatal signal arrived.
132945dbac0eSMatthew Wilcox  */
133001768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock)
133101768b42SPeter Zijlstra {
133201768b42SPeter Zijlstra 	might_sleep();
13333ca0ff57SPeter Zijlstra 
13343ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
133501768b42SPeter Zijlstra 		return 0;
13363ca0ff57SPeter Zijlstra 
133701768b42SPeter Zijlstra 	return __mutex_lock_killable_slowpath(lock);
133801768b42SPeter Zijlstra }
133901768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable);
134001768b42SPeter Zijlstra 
134145dbac0eSMatthew Wilcox /**
134245dbac0eSMatthew Wilcox  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
134345dbac0eSMatthew Wilcox  * @lock: The mutex to be acquired.
134445dbac0eSMatthew Wilcox  *
134545dbac0eSMatthew Wilcox  * Lock the mutex like mutex_lock().  While the task is waiting for this
134645dbac0eSMatthew Wilcox  * mutex, it will be accounted as being in the IO wait state by the
134745dbac0eSMatthew Wilcox  * scheduler.
134845dbac0eSMatthew Wilcox  *
134945dbac0eSMatthew Wilcox  * Context: Process context.
135045dbac0eSMatthew Wilcox  */
13511460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock)
13521460cb65STejun Heo {
13531460cb65STejun Heo 	int token;
13541460cb65STejun Heo 
13551460cb65STejun Heo 	token = io_schedule_prepare();
13561460cb65STejun Heo 	mutex_lock(lock);
13571460cb65STejun Heo 	io_schedule_finish(token);
13581460cb65STejun Heo }
13591460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io);
13601460cb65STejun Heo 
13613ca0ff57SPeter Zijlstra static noinline void __sched
13623ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock)
136301768b42SPeter Zijlstra {
1364427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
136501768b42SPeter Zijlstra }
136601768b42SPeter Zijlstra 
136701768b42SPeter Zijlstra static noinline int __sched
136801768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock)
136901768b42SPeter Zijlstra {
1370427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
137101768b42SPeter Zijlstra }
137201768b42SPeter Zijlstra 
137301768b42SPeter Zijlstra static noinline int __sched
137401768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock)
137501768b42SPeter Zijlstra {
1376427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
137701768b42SPeter Zijlstra }
137801768b42SPeter Zijlstra 
137901768b42SPeter Zijlstra static noinline int __sched
138001768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
138101768b42SPeter Zijlstra {
1382427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1383427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
138401768b42SPeter Zijlstra }
138501768b42SPeter Zijlstra 
138601768b42SPeter Zijlstra static noinline int __sched
138701768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
138801768b42SPeter Zijlstra 					    struct ww_acquire_ctx *ctx)
138901768b42SPeter Zijlstra {
1390427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1391427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
139201768b42SPeter Zijlstra }
139301768b42SPeter Zijlstra 
139401768b42SPeter Zijlstra #endif
139501768b42SPeter Zijlstra 
139601768b42SPeter Zijlstra /**
139701768b42SPeter Zijlstra  * mutex_trylock - try to acquire the mutex, without waiting
139801768b42SPeter Zijlstra  * @lock: the mutex to be acquired
139901768b42SPeter Zijlstra  *
140001768b42SPeter Zijlstra  * Try to acquire the mutex atomically. Returns 1 if the mutex
140101768b42SPeter Zijlstra  * has been acquired successfully, and 0 on contention.
140201768b42SPeter Zijlstra  *
140301768b42SPeter Zijlstra  * NOTE: this function follows the spin_trylock() convention, so
140401768b42SPeter Zijlstra  * it is negated from the down_trylock() return values! Be careful
140501768b42SPeter Zijlstra  * about this when converting semaphore users to mutexes.
140601768b42SPeter Zijlstra  *
140701768b42SPeter Zijlstra  * This function must not be used in interrupt context. The
140801768b42SPeter Zijlstra  * mutex must be released by the same task that acquired it.
140901768b42SPeter Zijlstra  */
141001768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock)
141101768b42SPeter Zijlstra {
14126c11c6e3SSebastian Andrzej Siewior 	bool locked;
141301768b42SPeter Zijlstra 
14146c11c6e3SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_MUTEXES
14156c11c6e3SSebastian Andrzej Siewior 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
14166c11c6e3SSebastian Andrzej Siewior #endif
14176c11c6e3SSebastian Andrzej Siewior 
14186c11c6e3SSebastian Andrzej Siewior 	locked = __mutex_trylock(lock);
14193ca0ff57SPeter Zijlstra 	if (locked)
14203ca0ff57SPeter Zijlstra 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
142101768b42SPeter Zijlstra 
14223ca0ff57SPeter Zijlstra 	return locked;
142301768b42SPeter Zijlstra }
142401768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock);
142501768b42SPeter Zijlstra 
142601768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
142701768b42SPeter Zijlstra int __sched
1428c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
142901768b42SPeter Zijlstra {
143001768b42SPeter Zijlstra 	might_sleep();
143101768b42SPeter Zijlstra 
14323ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1433ea9e0fb8SNicolai Hähnle 		if (ctx)
143401768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
14353ca0ff57SPeter Zijlstra 		return 0;
14363ca0ff57SPeter Zijlstra 	}
14373ca0ff57SPeter Zijlstra 
14383ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_slowpath(lock, ctx);
143901768b42SPeter Zijlstra }
1440c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock);
144101768b42SPeter Zijlstra 
144201768b42SPeter Zijlstra int __sched
1443c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
144401768b42SPeter Zijlstra {
144501768b42SPeter Zijlstra 	might_sleep();
144601768b42SPeter Zijlstra 
14473ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1448ea9e0fb8SNicolai Hähnle 		if (ctx)
144901768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
14503ca0ff57SPeter Zijlstra 		return 0;
14513ca0ff57SPeter Zijlstra 	}
14523ca0ff57SPeter Zijlstra 
14533ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
145401768b42SPeter Zijlstra }
1455c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible);
145601768b42SPeter Zijlstra 
145701768b42SPeter Zijlstra #endif
145801768b42SPeter Zijlstra 
145901768b42SPeter Zijlstra /**
146001768b42SPeter Zijlstra  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
146101768b42SPeter Zijlstra  * @cnt: the atomic which we are to dec
146201768b42SPeter Zijlstra  * @lock: the mutex to return holding if we dec to 0
146301768b42SPeter Zijlstra  *
146401768b42SPeter Zijlstra  * return true and hold lock if we dec to 0, return false otherwise
146501768b42SPeter Zijlstra  */
146601768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
146701768b42SPeter Zijlstra {
146801768b42SPeter Zijlstra 	/* dec if we can't possibly hit 0 */
146901768b42SPeter Zijlstra 	if (atomic_add_unless(cnt, -1, 1))
147001768b42SPeter Zijlstra 		return 0;
147101768b42SPeter Zijlstra 	/* we might hit 0, so take the lock */
147201768b42SPeter Zijlstra 	mutex_lock(lock);
147301768b42SPeter Zijlstra 	if (!atomic_dec_and_test(cnt)) {
147401768b42SPeter Zijlstra 		/* when we actually did the dec, we didn't hit 0 */
147501768b42SPeter Zijlstra 		mutex_unlock(lock);
147601768b42SPeter Zijlstra 		return 0;
147701768b42SPeter Zijlstra 	}
147801768b42SPeter Zijlstra 	/* we hit 0, and we hold the lock */
147901768b42SPeter Zijlstra 	return 1;
148001768b42SPeter Zijlstra }
148101768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1482