xref: /openbmc/linux/kernel/locking/mutex.c (revision 174cd4b1e5fbd0d74c68cf3a74f5bd4923485512)
101768b42SPeter Zijlstra /*
267a6de49SPeter Zijlstra  * kernel/locking/mutex.c
301768b42SPeter Zijlstra  *
401768b42SPeter Zijlstra  * Mutexes: blocking mutual exclusion locks
501768b42SPeter Zijlstra  *
601768b42SPeter Zijlstra  * Started by Ingo Molnar:
701768b42SPeter Zijlstra  *
801768b42SPeter Zijlstra  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
901768b42SPeter Zijlstra  *
1001768b42SPeter Zijlstra  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
1101768b42SPeter Zijlstra  * David Howells for suggestions and improvements.
1201768b42SPeter Zijlstra  *
1301768b42SPeter Zijlstra  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
1401768b42SPeter Zijlstra  *    from the -rt tree, where it was originally implemented for rtmutexes
1501768b42SPeter Zijlstra  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
1601768b42SPeter Zijlstra  *    and Sven Dietrich.
1701768b42SPeter Zijlstra  *
18214e0aedSDavidlohr Bueso  * Also see Documentation/locking/mutex-design.txt.
1901768b42SPeter Zijlstra  */
2001768b42SPeter Zijlstra #include <linux/mutex.h>
2101768b42SPeter Zijlstra #include <linux/ww_mutex.h>
22*174cd4b1SIngo Molnar #include <linux/sched/signal.h>
2301768b42SPeter Zijlstra #include <linux/sched/rt.h>
2484f001e1SIngo Molnar #include <linux/sched/wake_q.h>
2501768b42SPeter Zijlstra #include <linux/export.h>
2601768b42SPeter Zijlstra #include <linux/spinlock.h>
2701768b42SPeter Zijlstra #include <linux/interrupt.h>
2801768b42SPeter Zijlstra #include <linux/debug_locks.h>
297a215f89SDavidlohr Bueso #include <linux/osq_lock.h>
3001768b42SPeter Zijlstra 
3101768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
3201768b42SPeter Zijlstra # include "mutex-debug.h"
3301768b42SPeter Zijlstra #else
3401768b42SPeter Zijlstra # include "mutex.h"
3501768b42SPeter Zijlstra #endif
3601768b42SPeter Zijlstra 
3701768b42SPeter Zijlstra void
3801768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
3901768b42SPeter Zijlstra {
403ca0ff57SPeter Zijlstra 	atomic_long_set(&lock->owner, 0);
4101768b42SPeter Zijlstra 	spin_lock_init(&lock->wait_lock);
4201768b42SPeter Zijlstra 	INIT_LIST_HEAD(&lock->wait_list);
4301768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
444d9d951eSJason Low 	osq_lock_init(&lock->osq);
4501768b42SPeter Zijlstra #endif
4601768b42SPeter Zijlstra 
4701768b42SPeter Zijlstra 	debug_mutex_init(lock, name, key);
4801768b42SPeter Zijlstra }
4901768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init);
5001768b42SPeter Zijlstra 
513ca0ff57SPeter Zijlstra /*
523ca0ff57SPeter Zijlstra  * @owner: contains: 'struct task_struct *' to the current lock owner,
533ca0ff57SPeter Zijlstra  * NULL means not owned. Since task_struct pointers are aligned at
54e274795eSPeter Zijlstra  * at least L1_CACHE_BYTES, we have low bits to store extra state.
553ca0ff57SPeter Zijlstra  *
563ca0ff57SPeter Zijlstra  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
579d659ae1SPeter Zijlstra  * Bit1 indicates unlock needs to hand the lock to the top-waiter
58e274795eSPeter Zijlstra  * Bit2 indicates handoff has been done and we're waiting for pickup.
593ca0ff57SPeter Zijlstra  */
603ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS	0x01
619d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF	0x02
62e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP	0x04
633ca0ff57SPeter Zijlstra 
64e274795eSPeter Zijlstra #define MUTEX_FLAGS		0x07
653ca0ff57SPeter Zijlstra 
663ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner)
673ca0ff57SPeter Zijlstra {
683ca0ff57SPeter Zijlstra 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
693ca0ff57SPeter Zijlstra }
703ca0ff57SPeter Zijlstra 
713ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner)
723ca0ff57SPeter Zijlstra {
733ca0ff57SPeter Zijlstra 	return owner & MUTEX_FLAGS;
743ca0ff57SPeter Zijlstra }
753ca0ff57SPeter Zijlstra 
763ca0ff57SPeter Zijlstra /*
77e274795eSPeter Zijlstra  * Trylock variant that retuns the owning task on failure.
783ca0ff57SPeter Zijlstra  */
79e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
803ca0ff57SPeter Zijlstra {
813ca0ff57SPeter Zijlstra 	unsigned long owner, curr = (unsigned long)current;
823ca0ff57SPeter Zijlstra 
833ca0ff57SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
843ca0ff57SPeter Zijlstra 	for (;;) { /* must loop, can race against a flag */
859d659ae1SPeter Zijlstra 		unsigned long old, flags = __owner_flags(owner);
86e274795eSPeter Zijlstra 		unsigned long task = owner & ~MUTEX_FLAGS;
873ca0ff57SPeter Zijlstra 
88e274795eSPeter Zijlstra 		if (task) {
89e274795eSPeter Zijlstra 			if (likely(task != curr))
90e274795eSPeter Zijlstra 				break;
919d659ae1SPeter Zijlstra 
92e274795eSPeter Zijlstra 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
93e274795eSPeter Zijlstra 				break;
94e274795eSPeter Zijlstra 
95e274795eSPeter Zijlstra 			flags &= ~MUTEX_FLAG_PICKUP;
96e274795eSPeter Zijlstra 		} else {
97e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
98e274795eSPeter Zijlstra 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
99e274795eSPeter Zijlstra #endif
1009d659ae1SPeter Zijlstra 		}
1013ca0ff57SPeter Zijlstra 
1029d659ae1SPeter Zijlstra 		/*
1039d659ae1SPeter Zijlstra 		 * We set the HANDOFF bit, we must make sure it doesn't live
1049d659ae1SPeter Zijlstra 		 * past the point where we acquire it. This would be possible
1059d659ae1SPeter Zijlstra 		 * if we (accidentally) set the bit on an unlocked mutex.
1069d659ae1SPeter Zijlstra 		 */
1079d659ae1SPeter Zijlstra 		flags &= ~MUTEX_FLAG_HANDOFF;
1089d659ae1SPeter Zijlstra 
1099d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
1103ca0ff57SPeter Zijlstra 		if (old == owner)
111e274795eSPeter Zijlstra 			return NULL;
1123ca0ff57SPeter Zijlstra 
1133ca0ff57SPeter Zijlstra 		owner = old;
1143ca0ff57SPeter Zijlstra 	}
115e274795eSPeter Zijlstra 
116e274795eSPeter Zijlstra 	return __owner_task(owner);
117e274795eSPeter Zijlstra }
118e274795eSPeter Zijlstra 
119e274795eSPeter Zijlstra /*
120e274795eSPeter Zijlstra  * Actual trylock that will work on any unlocked state.
121e274795eSPeter Zijlstra  */
122e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock)
123e274795eSPeter Zijlstra {
124e274795eSPeter Zijlstra 	return !__mutex_trylock_or_owner(lock);
1253ca0ff57SPeter Zijlstra }
1263ca0ff57SPeter Zijlstra 
1273ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
1283ca0ff57SPeter Zijlstra /*
1293ca0ff57SPeter Zijlstra  * Lockdep annotations are contained to the slow paths for simplicity.
1303ca0ff57SPeter Zijlstra  * There is nothing that would stop spreading the lockdep annotations outwards
1313ca0ff57SPeter Zijlstra  * except more code.
1323ca0ff57SPeter Zijlstra  */
1333ca0ff57SPeter Zijlstra 
1343ca0ff57SPeter Zijlstra /*
1353ca0ff57SPeter Zijlstra  * Optimistic trylock that only works in the uncontended case. Make sure to
1363ca0ff57SPeter Zijlstra  * follow with a __mutex_trylock() before failing.
1373ca0ff57SPeter Zijlstra  */
1383ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
1393ca0ff57SPeter Zijlstra {
1403ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
1413ca0ff57SPeter Zijlstra 
1423ca0ff57SPeter Zijlstra 	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
1433ca0ff57SPeter Zijlstra 		return true;
1443ca0ff57SPeter Zijlstra 
1453ca0ff57SPeter Zijlstra 	return false;
1463ca0ff57SPeter Zijlstra }
1473ca0ff57SPeter Zijlstra 
1483ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
1493ca0ff57SPeter Zijlstra {
1503ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
1513ca0ff57SPeter Zijlstra 
1523ca0ff57SPeter Zijlstra 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
1533ca0ff57SPeter Zijlstra 		return true;
1543ca0ff57SPeter Zijlstra 
1553ca0ff57SPeter Zijlstra 	return false;
1563ca0ff57SPeter Zijlstra }
1573ca0ff57SPeter Zijlstra #endif
1583ca0ff57SPeter Zijlstra 
1593ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
1603ca0ff57SPeter Zijlstra {
1613ca0ff57SPeter Zijlstra 	atomic_long_or(flag, &lock->owner);
1623ca0ff57SPeter Zijlstra }
1633ca0ff57SPeter Zijlstra 
1643ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
1653ca0ff57SPeter Zijlstra {
1663ca0ff57SPeter Zijlstra 	atomic_long_andnot(flag, &lock->owner);
1673ca0ff57SPeter Zijlstra }
1683ca0ff57SPeter Zijlstra 
1699d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
1709d659ae1SPeter Zijlstra {
1719d659ae1SPeter Zijlstra 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
1729d659ae1SPeter Zijlstra }
1739d659ae1SPeter Zijlstra 
1749d659ae1SPeter Zijlstra /*
1759d659ae1SPeter Zijlstra  * Give up ownership to a specific task, when @task = NULL, this is equivalent
176e274795eSPeter Zijlstra  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
177e274795eSPeter Zijlstra  * WAITERS. Provides RELEASE semantics like a regular unlock, the
178e274795eSPeter Zijlstra  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
1799d659ae1SPeter Zijlstra  */
1809d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
1819d659ae1SPeter Zijlstra {
1829d659ae1SPeter Zijlstra 	unsigned long owner = atomic_long_read(&lock->owner);
1839d659ae1SPeter Zijlstra 
1849d659ae1SPeter Zijlstra 	for (;;) {
1859d659ae1SPeter Zijlstra 		unsigned long old, new;
1869d659ae1SPeter Zijlstra 
1879d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
1889d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
189e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1909d659ae1SPeter Zijlstra #endif
1919d659ae1SPeter Zijlstra 
1929d659ae1SPeter Zijlstra 		new = (owner & MUTEX_FLAG_WAITERS);
1939d659ae1SPeter Zijlstra 		new |= (unsigned long)task;
194e274795eSPeter Zijlstra 		if (task)
195e274795eSPeter Zijlstra 			new |= MUTEX_FLAG_PICKUP;
1969d659ae1SPeter Zijlstra 
1979d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
1989d659ae1SPeter Zijlstra 		if (old == owner)
1999d659ae1SPeter Zijlstra 			break;
2009d659ae1SPeter Zijlstra 
2019d659ae1SPeter Zijlstra 		owner = old;
2029d659ae1SPeter Zijlstra 	}
2039d659ae1SPeter Zijlstra }
2049d659ae1SPeter Zijlstra 
20501768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
20601768b42SPeter Zijlstra /*
20701768b42SPeter Zijlstra  * We split the mutex lock/unlock logic into separate fastpath and
20801768b42SPeter Zijlstra  * slowpath functions, to reduce the register pressure on the fastpath.
20901768b42SPeter Zijlstra  * We also put the fastpath first in the kernel image, to make sure the
21001768b42SPeter Zijlstra  * branch is predicted by the CPU as default-untaken.
21101768b42SPeter Zijlstra  */
2123ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock);
21301768b42SPeter Zijlstra 
21401768b42SPeter Zijlstra /**
21501768b42SPeter Zijlstra  * mutex_lock - acquire the mutex
21601768b42SPeter Zijlstra  * @lock: the mutex to be acquired
21701768b42SPeter Zijlstra  *
21801768b42SPeter Zijlstra  * Lock the mutex exclusively for this task. If the mutex is not
21901768b42SPeter Zijlstra  * available right now, it will sleep until it can get it.
22001768b42SPeter Zijlstra  *
22101768b42SPeter Zijlstra  * The mutex must later on be released by the same task that
22201768b42SPeter Zijlstra  * acquired it. Recursive locking is not allowed. The task
22301768b42SPeter Zijlstra  * may not exit without first unlocking the mutex. Also, kernel
224139b6fd2SSharon Dvir  * memory where the mutex resides must not be freed with
22501768b42SPeter Zijlstra  * the mutex still locked. The mutex must first be initialized
22601768b42SPeter Zijlstra  * (or statically defined) before it can be locked. memset()-ing
22701768b42SPeter Zijlstra  * the mutex to 0 is not allowed.
22801768b42SPeter Zijlstra  *
22901768b42SPeter Zijlstra  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
23001768b42SPeter Zijlstra  *   checks that will enforce the restrictions and will also do
23101768b42SPeter Zijlstra  *   deadlock debugging. )
23201768b42SPeter Zijlstra  *
23301768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down().
23401768b42SPeter Zijlstra  */
23501768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock)
23601768b42SPeter Zijlstra {
23701768b42SPeter Zijlstra 	might_sleep();
23801768b42SPeter Zijlstra 
2393ca0ff57SPeter Zijlstra 	if (!__mutex_trylock_fast(lock))
2403ca0ff57SPeter Zijlstra 		__mutex_lock_slowpath(lock);
2413ca0ff57SPeter Zijlstra }
24201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock);
24301768b42SPeter Zijlstra #endif
24401768b42SPeter Zijlstra 
245427b1820SPeter Zijlstra static __always_inline void
246427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
24776916515SDavidlohr Bueso {
24876916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES
24976916515SDavidlohr Bueso 	/*
25076916515SDavidlohr Bueso 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
25176916515SDavidlohr Bueso 	 * but released with a normal mutex_unlock in this call.
25276916515SDavidlohr Bueso 	 *
25376916515SDavidlohr Bueso 	 * This should never happen, always use ww_mutex_unlock.
25476916515SDavidlohr Bueso 	 */
25576916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww->ctx);
25676916515SDavidlohr Bueso 
25776916515SDavidlohr Bueso 	/*
25876916515SDavidlohr Bueso 	 * Not quite done after calling ww_acquire_done() ?
25976916515SDavidlohr Bueso 	 */
26076916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
26176916515SDavidlohr Bueso 
26276916515SDavidlohr Bueso 	if (ww_ctx->contending_lock) {
26376916515SDavidlohr Bueso 		/*
26476916515SDavidlohr Bueso 		 * After -EDEADLK you tried to
26576916515SDavidlohr Bueso 		 * acquire a different ww_mutex? Bad!
26676916515SDavidlohr Bueso 		 */
26776916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
26876916515SDavidlohr Bueso 
26976916515SDavidlohr Bueso 		/*
27076916515SDavidlohr Bueso 		 * You called ww_mutex_lock after receiving -EDEADLK,
27176916515SDavidlohr Bueso 		 * but 'forgot' to unlock everything else first?
27276916515SDavidlohr Bueso 		 */
27376916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
27476916515SDavidlohr Bueso 		ww_ctx->contending_lock = NULL;
27576916515SDavidlohr Bueso 	}
27676916515SDavidlohr Bueso 
27776916515SDavidlohr Bueso 	/*
27876916515SDavidlohr Bueso 	 * Naughty, using a different class will lead to undefined behavior!
27976916515SDavidlohr Bueso 	 */
28076916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
28176916515SDavidlohr Bueso #endif
28276916515SDavidlohr Bueso 	ww_ctx->acquired++;
28376916515SDavidlohr Bueso }
28476916515SDavidlohr Bueso 
2853822da3eSNicolai Hähnle static inline bool __sched
2863822da3eSNicolai Hähnle __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
2873822da3eSNicolai Hähnle {
2883822da3eSNicolai Hähnle 	return a->stamp - b->stamp <= LONG_MAX &&
2893822da3eSNicolai Hähnle 	       (a->stamp != b->stamp || a > b);
2903822da3eSNicolai Hähnle }
2913822da3eSNicolai Hähnle 
29276916515SDavidlohr Bueso /*
293659cf9f5SNicolai Hähnle  * Wake up any waiters that may have to back off when the lock is held by the
294659cf9f5SNicolai Hähnle  * given context.
295659cf9f5SNicolai Hähnle  *
296659cf9f5SNicolai Hähnle  * Due to the invariants on the wait list, this can only affect the first
297659cf9f5SNicolai Hähnle  * waiter with a context.
298659cf9f5SNicolai Hähnle  *
299659cf9f5SNicolai Hähnle  * The current task must not be on the wait list.
300659cf9f5SNicolai Hähnle  */
301659cf9f5SNicolai Hähnle static void __sched
302659cf9f5SNicolai Hähnle __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
303659cf9f5SNicolai Hähnle {
304659cf9f5SNicolai Hähnle 	struct mutex_waiter *cur;
305659cf9f5SNicolai Hähnle 
306659cf9f5SNicolai Hähnle 	lockdep_assert_held(&lock->wait_lock);
307659cf9f5SNicolai Hähnle 
308659cf9f5SNicolai Hähnle 	list_for_each_entry(cur, &lock->wait_list, list) {
309659cf9f5SNicolai Hähnle 		if (!cur->ww_ctx)
310659cf9f5SNicolai Hähnle 			continue;
311659cf9f5SNicolai Hähnle 
312659cf9f5SNicolai Hähnle 		if (cur->ww_ctx->acquired > 0 &&
313659cf9f5SNicolai Hähnle 		    __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
314659cf9f5SNicolai Hähnle 			debug_mutex_wake_waiter(lock, cur);
315659cf9f5SNicolai Hähnle 			wake_up_process(cur->task);
316659cf9f5SNicolai Hähnle 		}
317659cf9f5SNicolai Hähnle 
318659cf9f5SNicolai Hähnle 		break;
319659cf9f5SNicolai Hähnle 	}
320659cf9f5SNicolai Hähnle }
321659cf9f5SNicolai Hähnle 
32276916515SDavidlohr Bueso /*
3234bd19084SDavidlohr Bueso  * After acquiring lock with fastpath or when we lost out in contested
32476916515SDavidlohr Bueso  * slowpath, set ctx and wake up any waiters so they can recheck.
32576916515SDavidlohr Bueso  */
32676916515SDavidlohr Bueso static __always_inline void
327427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
32876916515SDavidlohr Bueso {
32976916515SDavidlohr Bueso 	ww_mutex_lock_acquired(lock, ctx);
33076916515SDavidlohr Bueso 
33176916515SDavidlohr Bueso 	lock->ctx = ctx;
33276916515SDavidlohr Bueso 
33376916515SDavidlohr Bueso 	/*
33476916515SDavidlohr Bueso 	 * The lock->ctx update should be visible on all cores before
33576916515SDavidlohr Bueso 	 * the atomic read is done, otherwise contended waiters might be
33676916515SDavidlohr Bueso 	 * missed. The contended waiters will either see ww_ctx == NULL
33776916515SDavidlohr Bueso 	 * and keep spinning, or it will acquire wait_lock, add itself
33876916515SDavidlohr Bueso 	 * to waiter list and sleep.
33976916515SDavidlohr Bueso 	 */
34076916515SDavidlohr Bueso 	smp_mb(); /* ^^^ */
34176916515SDavidlohr Bueso 
34276916515SDavidlohr Bueso 	/*
34376916515SDavidlohr Bueso 	 * Check if lock is contended, if not there is nobody to wake up
34476916515SDavidlohr Bueso 	 */
3453ca0ff57SPeter Zijlstra 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
34676916515SDavidlohr Bueso 		return;
34776916515SDavidlohr Bueso 
34876916515SDavidlohr Bueso 	/*
34976916515SDavidlohr Bueso 	 * Uh oh, we raced in fastpath, wake up everyone in this case,
35076916515SDavidlohr Bueso 	 * so they can see the new lock->ctx.
35176916515SDavidlohr Bueso 	 */
352b9c16a0eSPeter Zijlstra 	spin_lock(&lock->base.wait_lock);
353659cf9f5SNicolai Hähnle 	__ww_mutex_wakeup_for_backoff(&lock->base, ctx);
354b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->base.wait_lock);
35576916515SDavidlohr Bueso }
35676916515SDavidlohr Bueso 
3574bd19084SDavidlohr Bueso /*
358659cf9f5SNicolai Hähnle  * After acquiring lock in the slowpath set ctx.
359659cf9f5SNicolai Hähnle  *
360659cf9f5SNicolai Hähnle  * Unlike for the fast path, the caller ensures that waiters are woken up where
361659cf9f5SNicolai Hähnle  * necessary.
3624bd19084SDavidlohr Bueso  *
3634bd19084SDavidlohr Bueso  * Callers must hold the mutex wait_lock.
3644bd19084SDavidlohr Bueso  */
3654bd19084SDavidlohr Bueso static __always_inline void
366427b1820SPeter Zijlstra ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
3674bd19084SDavidlohr Bueso {
3684bd19084SDavidlohr Bueso 	ww_mutex_lock_acquired(lock, ctx);
3694bd19084SDavidlohr Bueso 	lock->ctx = ctx;
3704bd19084SDavidlohr Bueso }
37176916515SDavidlohr Bueso 
37201768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
373c516df97SNicolai Hähnle 
374c516df97SNicolai Hähnle static inline
375c516df97SNicolai Hähnle bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
376c516df97SNicolai Hähnle 			    struct mutex_waiter *waiter)
377c516df97SNicolai Hähnle {
378c516df97SNicolai Hähnle 	struct ww_mutex *ww;
379c516df97SNicolai Hähnle 
380c516df97SNicolai Hähnle 	ww = container_of(lock, struct ww_mutex, base);
381c516df97SNicolai Hähnle 
38201768b42SPeter Zijlstra 	/*
383c516df97SNicolai Hähnle 	 * If ww->ctx is set the contents are undefined, only
384c516df97SNicolai Hähnle 	 * by acquiring wait_lock there is a guarantee that
385c516df97SNicolai Hähnle 	 * they are not invalid when reading.
386c516df97SNicolai Hähnle 	 *
387c516df97SNicolai Hähnle 	 * As such, when deadlock detection needs to be
388c516df97SNicolai Hähnle 	 * performed the optimistic spinning cannot be done.
389c516df97SNicolai Hähnle 	 *
390c516df97SNicolai Hähnle 	 * Check this in every inner iteration because we may
391c516df97SNicolai Hähnle 	 * be racing against another thread's ww_mutex_lock.
392c516df97SNicolai Hähnle 	 */
393c516df97SNicolai Hähnle 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
394c516df97SNicolai Hähnle 		return false;
395c516df97SNicolai Hähnle 
396c516df97SNicolai Hähnle 	/*
397c516df97SNicolai Hähnle 	 * If we aren't on the wait list yet, cancel the spin
398c516df97SNicolai Hähnle 	 * if there are waiters. We want  to avoid stealing the
399c516df97SNicolai Hähnle 	 * lock from a waiter with an earlier stamp, since the
400c516df97SNicolai Hähnle 	 * other thread may already own a lock that we also
401c516df97SNicolai Hähnle 	 * need.
402c516df97SNicolai Hähnle 	 */
403c516df97SNicolai Hähnle 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
404c516df97SNicolai Hähnle 		return false;
405c516df97SNicolai Hähnle 
406c516df97SNicolai Hähnle 	/*
407c516df97SNicolai Hähnle 	 * Similarly, stop spinning if we are no longer the
408c516df97SNicolai Hähnle 	 * first waiter.
409c516df97SNicolai Hähnle 	 */
410c516df97SNicolai Hähnle 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
411c516df97SNicolai Hähnle 		return false;
412c516df97SNicolai Hähnle 
413c516df97SNicolai Hähnle 	return true;
414c516df97SNicolai Hähnle }
415c516df97SNicolai Hähnle 
41601768b42SPeter Zijlstra /*
41725f13b40SNicolai Hähnle  * Look out! "owner" is an entirely speculative pointer access and not
41825f13b40SNicolai Hähnle  * reliable.
41925f13b40SNicolai Hähnle  *
42025f13b40SNicolai Hähnle  * "noinline" so that this function shows up on perf profiles.
42101768b42SPeter Zijlstra  */
42201768b42SPeter Zijlstra static noinline
42325f13b40SNicolai Hähnle bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
424c516df97SNicolai Hähnle 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
42501768b42SPeter Zijlstra {
42601ac33c1SJason Low 	bool ret = true;
427be1f7bf2SJason Low 
42801768b42SPeter Zijlstra 	rcu_read_lock();
4293ca0ff57SPeter Zijlstra 	while (__mutex_owner(lock) == owner) {
430be1f7bf2SJason Low 		/*
431be1f7bf2SJason Low 		 * Ensure we emit the owner->on_cpu, dereference _after_
43201ac33c1SJason Low 		 * checking lock->owner still matches owner. If that fails,
43301ac33c1SJason Low 		 * owner might point to freed memory. If it still matches,
434be1f7bf2SJason Low 		 * the rcu_read_lock() ensures the memory stays valid.
435be1f7bf2SJason Low 		 */
436be1f7bf2SJason Low 		barrier();
437be1f7bf2SJason Low 
43805ffc951SPan Xinhui 		/*
43905ffc951SPan Xinhui 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
44005ffc951SPan Xinhui 		 */
44105ffc951SPan Xinhui 		if (!owner->on_cpu || need_resched() ||
44205ffc951SPan Xinhui 				vcpu_is_preempted(task_cpu(owner))) {
443be1f7bf2SJason Low 			ret = false;
444be1f7bf2SJason Low 			break;
445be1f7bf2SJason Low 		}
44601768b42SPeter Zijlstra 
447c516df97SNicolai Hähnle 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
44825f13b40SNicolai Hähnle 			ret = false;
44925f13b40SNicolai Hähnle 			break;
45025f13b40SNicolai Hähnle 		}
45125f13b40SNicolai Hähnle 
452f2f09a4cSChristian Borntraeger 		cpu_relax();
45301768b42SPeter Zijlstra 	}
45401768b42SPeter Zijlstra 	rcu_read_unlock();
45501768b42SPeter Zijlstra 
456be1f7bf2SJason Low 	return ret;
45701768b42SPeter Zijlstra }
45801768b42SPeter Zijlstra 
45901768b42SPeter Zijlstra /*
46001768b42SPeter Zijlstra  * Initial check for entering the mutex spinning loop
46101768b42SPeter Zijlstra  */
46201768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock)
46301768b42SPeter Zijlstra {
46401768b42SPeter Zijlstra 	struct task_struct *owner;
46501768b42SPeter Zijlstra 	int retval = 1;
46601768b42SPeter Zijlstra 
46746af29e4SJason Low 	if (need_resched())
46846af29e4SJason Low 		return 0;
46946af29e4SJason Low 
47001768b42SPeter Zijlstra 	rcu_read_lock();
4713ca0ff57SPeter Zijlstra 	owner = __mutex_owner(lock);
47205ffc951SPan Xinhui 
47305ffc951SPan Xinhui 	/*
47405ffc951SPan Xinhui 	 * As lock holder preemption issue, we both skip spinning if task is not
47505ffc951SPan Xinhui 	 * on cpu or its cpu is preempted
47605ffc951SPan Xinhui 	 */
47701768b42SPeter Zijlstra 	if (owner)
47805ffc951SPan Xinhui 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
47901768b42SPeter Zijlstra 	rcu_read_unlock();
48076916515SDavidlohr Bueso 
48176916515SDavidlohr Bueso 	/*
4823ca0ff57SPeter Zijlstra 	 * If lock->owner is not set, the mutex has been released. Return true
4833ca0ff57SPeter Zijlstra 	 * such that we'll trylock in the spin path, which is a faster option
4843ca0ff57SPeter Zijlstra 	 * than the blocking slow path.
48576916515SDavidlohr Bueso 	 */
4863ca0ff57SPeter Zijlstra 	return retval;
48776916515SDavidlohr Bueso }
48876916515SDavidlohr Bueso 
48976916515SDavidlohr Bueso /*
49076916515SDavidlohr Bueso  * Optimistic spinning.
49176916515SDavidlohr Bueso  *
49276916515SDavidlohr Bueso  * We try to spin for acquisition when we find that the lock owner
49376916515SDavidlohr Bueso  * is currently running on a (different) CPU and while we don't
49476916515SDavidlohr Bueso  * need to reschedule. The rationale is that if the lock owner is
49576916515SDavidlohr Bueso  * running, it is likely to release the lock soon.
49676916515SDavidlohr Bueso  *
49776916515SDavidlohr Bueso  * The mutex spinners are queued up using MCS lock so that only one
49876916515SDavidlohr Bueso  * spinner can compete for the mutex. However, if mutex spinning isn't
49976916515SDavidlohr Bueso  * going to happen, there is no point in going through the lock/unlock
50076916515SDavidlohr Bueso  * overhead.
50176916515SDavidlohr Bueso  *
50276916515SDavidlohr Bueso  * Returns true when the lock was taken, otherwise false, indicating
50376916515SDavidlohr Bueso  * that we need to jump to the slowpath and sleep.
504b341afb3SWaiman Long  *
505b341afb3SWaiman Long  * The waiter flag is set to true if the spinner is a waiter in the wait
506b341afb3SWaiman Long  * queue. The waiter-spinner will spin on the lock directly and concurrently
507b341afb3SWaiman Long  * with the spinner at the head of the OSQ, if present, until the owner is
508b341afb3SWaiman Long  * changed to itself.
50976916515SDavidlohr Bueso  */
510427b1820SPeter Zijlstra static __always_inline bool
511427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
512c516df97SNicolai Hähnle 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
51376916515SDavidlohr Bueso {
514b341afb3SWaiman Long 	if (!waiter) {
515b341afb3SWaiman Long 		/*
516b341afb3SWaiman Long 		 * The purpose of the mutex_can_spin_on_owner() function is
517b341afb3SWaiman Long 		 * to eliminate the overhead of osq_lock() and osq_unlock()
518b341afb3SWaiman Long 		 * in case spinning isn't possible. As a waiter-spinner
519b341afb3SWaiman Long 		 * is not going to take OSQ lock anyway, there is no need
520b341afb3SWaiman Long 		 * to call mutex_can_spin_on_owner().
521b341afb3SWaiman Long 		 */
52276916515SDavidlohr Bueso 		if (!mutex_can_spin_on_owner(lock))
523b341afb3SWaiman Long 			goto fail;
52476916515SDavidlohr Bueso 
525e42f678aSDavidlohr Bueso 		/*
526e42f678aSDavidlohr Bueso 		 * In order to avoid a stampede of mutex spinners trying to
527e42f678aSDavidlohr Bueso 		 * acquire the mutex all at once, the spinners need to take a
528e42f678aSDavidlohr Bueso 		 * MCS (queued) lock first before spinning on the owner field.
529e42f678aSDavidlohr Bueso 		 */
53076916515SDavidlohr Bueso 		if (!osq_lock(&lock->osq))
531b341afb3SWaiman Long 			goto fail;
532b341afb3SWaiman Long 	}
53376916515SDavidlohr Bueso 
534b341afb3SWaiman Long 	for (;;) {
53576916515SDavidlohr Bueso 		struct task_struct *owner;
53676916515SDavidlohr Bueso 
537e274795eSPeter Zijlstra 		/* Try to acquire the mutex... */
538e274795eSPeter Zijlstra 		owner = __mutex_trylock_or_owner(lock);
539e274795eSPeter Zijlstra 		if (!owner)
540e274795eSPeter Zijlstra 			break;
54176916515SDavidlohr Bueso 
54276916515SDavidlohr Bueso 		/*
543e274795eSPeter Zijlstra 		 * There's an owner, wait for it to either
54476916515SDavidlohr Bueso 		 * release the lock or go to sleep.
54576916515SDavidlohr Bueso 		 */
546c516df97SNicolai Hähnle 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
547b341afb3SWaiman Long 			goto fail_unlock;
54876916515SDavidlohr Bueso 
54976916515SDavidlohr Bueso 		/*
55076916515SDavidlohr Bueso 		 * The cpu_relax() call is a compiler barrier which forces
55176916515SDavidlohr Bueso 		 * everything in this loop to be re-loaded. We don't need
55276916515SDavidlohr Bueso 		 * memory barriers as we'll eventually observe the right
55376916515SDavidlohr Bueso 		 * values at the cost of a few extra spins.
55476916515SDavidlohr Bueso 		 */
555f2f09a4cSChristian Borntraeger 		cpu_relax();
55676916515SDavidlohr Bueso 	}
55776916515SDavidlohr Bueso 
558b341afb3SWaiman Long 	if (!waiter)
55976916515SDavidlohr Bueso 		osq_unlock(&lock->osq);
560b341afb3SWaiman Long 
561b341afb3SWaiman Long 	return true;
562b341afb3SWaiman Long 
563b341afb3SWaiman Long 
564b341afb3SWaiman Long fail_unlock:
565b341afb3SWaiman Long 	if (!waiter)
566b341afb3SWaiman Long 		osq_unlock(&lock->osq);
567b341afb3SWaiman Long 
568b341afb3SWaiman Long fail:
56976916515SDavidlohr Bueso 	/*
57076916515SDavidlohr Bueso 	 * If we fell out of the spin path because of need_resched(),
57176916515SDavidlohr Bueso 	 * reschedule now, before we try-lock the mutex. This avoids getting
57276916515SDavidlohr Bueso 	 * scheduled out right after we obtained the mutex.
57376916515SDavidlohr Bueso 	 */
5746f942a1fSPeter Zijlstra 	if (need_resched()) {
5756f942a1fSPeter Zijlstra 		/*
5766f942a1fSPeter Zijlstra 		 * We _should_ have TASK_RUNNING here, but just in case
5776f942a1fSPeter Zijlstra 		 * we do not, make it so, otherwise we might get stuck.
5786f942a1fSPeter Zijlstra 		 */
5796f942a1fSPeter Zijlstra 		__set_current_state(TASK_RUNNING);
58076916515SDavidlohr Bueso 		schedule_preempt_disabled();
5816f942a1fSPeter Zijlstra 	}
58276916515SDavidlohr Bueso 
58376916515SDavidlohr Bueso 	return false;
58476916515SDavidlohr Bueso }
58576916515SDavidlohr Bueso #else
586427b1820SPeter Zijlstra static __always_inline bool
587427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
588c516df97SNicolai Hähnle 		      const bool use_ww_ctx, struct mutex_waiter *waiter)
58976916515SDavidlohr Bueso {
59076916515SDavidlohr Bueso 	return false;
59176916515SDavidlohr Bueso }
59201768b42SPeter Zijlstra #endif
59301768b42SPeter Zijlstra 
5943ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
59501768b42SPeter Zijlstra 
59601768b42SPeter Zijlstra /**
59701768b42SPeter Zijlstra  * mutex_unlock - release the mutex
59801768b42SPeter Zijlstra  * @lock: the mutex to be released
59901768b42SPeter Zijlstra  *
60001768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously.
60101768b42SPeter Zijlstra  *
60201768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
60301768b42SPeter Zijlstra  * of a not locked mutex is not allowed.
60401768b42SPeter Zijlstra  *
60501768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) up().
60601768b42SPeter Zijlstra  */
60701768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock)
60801768b42SPeter Zijlstra {
6093ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
6103ca0ff57SPeter Zijlstra 	if (__mutex_unlock_fast(lock))
6113ca0ff57SPeter Zijlstra 		return;
61201768b42SPeter Zijlstra #endif
6133ca0ff57SPeter Zijlstra 	__mutex_unlock_slowpath(lock, _RET_IP_);
61401768b42SPeter Zijlstra }
61501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock);
61601768b42SPeter Zijlstra 
61701768b42SPeter Zijlstra /**
61801768b42SPeter Zijlstra  * ww_mutex_unlock - release the w/w mutex
61901768b42SPeter Zijlstra  * @lock: the mutex to be released
62001768b42SPeter Zijlstra  *
62101768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously with any of the
62201768b42SPeter Zijlstra  * ww_mutex_lock* functions (with or without an acquire context). It is
62301768b42SPeter Zijlstra  * forbidden to release the locks after releasing the acquire context.
62401768b42SPeter Zijlstra  *
62501768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
62601768b42SPeter Zijlstra  * of a unlocked mutex is not allowed.
62701768b42SPeter Zijlstra  */
62801768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock)
62901768b42SPeter Zijlstra {
63001768b42SPeter Zijlstra 	/*
63101768b42SPeter Zijlstra 	 * The unlocking fastpath is the 0->1 transition from 'locked'
63201768b42SPeter Zijlstra 	 * into 'unlocked' state:
63301768b42SPeter Zijlstra 	 */
63401768b42SPeter Zijlstra 	if (lock->ctx) {
63501768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
63601768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
63701768b42SPeter Zijlstra #endif
63801768b42SPeter Zijlstra 		if (lock->ctx->acquired > 0)
63901768b42SPeter Zijlstra 			lock->ctx->acquired--;
64001768b42SPeter Zijlstra 		lock->ctx = NULL;
64101768b42SPeter Zijlstra 	}
64201768b42SPeter Zijlstra 
6433ca0ff57SPeter Zijlstra 	mutex_unlock(&lock->base);
64401768b42SPeter Zijlstra }
64501768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock);
64601768b42SPeter Zijlstra 
64701768b42SPeter Zijlstra static inline int __sched
648200b1874SNicolai Hähnle __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
649200b1874SNicolai Hähnle 			    struct ww_acquire_ctx *ctx)
65001768b42SPeter Zijlstra {
65101768b42SPeter Zijlstra 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
6524d3199e4SDavidlohr Bueso 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
653200b1874SNicolai Hähnle 	struct mutex_waiter *cur;
65401768b42SPeter Zijlstra 
655200b1874SNicolai Hähnle 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
656200b1874SNicolai Hähnle 		goto deadlock;
657200b1874SNicolai Hähnle 
658200b1874SNicolai Hähnle 	/*
659200b1874SNicolai Hähnle 	 * If there is a waiter in front of us that has a context, then its
660200b1874SNicolai Hähnle 	 * stamp is earlier than ours and we must back off.
661200b1874SNicolai Hähnle 	 */
662200b1874SNicolai Hähnle 	cur = waiter;
663200b1874SNicolai Hähnle 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
664200b1874SNicolai Hähnle 		if (cur->ww_ctx)
665200b1874SNicolai Hähnle 			goto deadlock;
666200b1874SNicolai Hähnle 	}
667200b1874SNicolai Hähnle 
66801768b42SPeter Zijlstra 	return 0;
66901768b42SPeter Zijlstra 
670200b1874SNicolai Hähnle deadlock:
67101768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
67201768b42SPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
67301768b42SPeter Zijlstra 	ctx->contending_lock = ww;
67401768b42SPeter Zijlstra #endif
67501768b42SPeter Zijlstra 	return -EDEADLK;
67601768b42SPeter Zijlstra }
67701768b42SPeter Zijlstra 
6786baa5c60SNicolai Hähnle static inline int __sched
6796baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter,
6806baa5c60SNicolai Hähnle 		      struct mutex *lock,
6816baa5c60SNicolai Hähnle 		      struct ww_acquire_ctx *ww_ctx)
6826baa5c60SNicolai Hähnle {
6836baa5c60SNicolai Hähnle 	struct mutex_waiter *cur;
6846baa5c60SNicolai Hähnle 	struct list_head *pos;
6856baa5c60SNicolai Hähnle 
6866baa5c60SNicolai Hähnle 	if (!ww_ctx) {
6876baa5c60SNicolai Hähnle 		list_add_tail(&waiter->list, &lock->wait_list);
6886baa5c60SNicolai Hähnle 		return 0;
6896baa5c60SNicolai Hähnle 	}
6906baa5c60SNicolai Hähnle 
6916baa5c60SNicolai Hähnle 	/*
6926baa5c60SNicolai Hähnle 	 * Add the waiter before the first waiter with a higher stamp.
6936baa5c60SNicolai Hähnle 	 * Waiters without a context are skipped to avoid starving
6946baa5c60SNicolai Hähnle 	 * them.
6956baa5c60SNicolai Hähnle 	 */
6966baa5c60SNicolai Hähnle 	pos = &lock->wait_list;
6976baa5c60SNicolai Hähnle 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
6986baa5c60SNicolai Hähnle 		if (!cur->ww_ctx)
6996baa5c60SNicolai Hähnle 			continue;
7006baa5c60SNicolai Hähnle 
7016baa5c60SNicolai Hähnle 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
7026baa5c60SNicolai Hähnle 			/* Back off immediately if necessary. */
7036baa5c60SNicolai Hähnle 			if (ww_ctx->acquired > 0) {
7046baa5c60SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES
7056baa5c60SNicolai Hähnle 				struct ww_mutex *ww;
7066baa5c60SNicolai Hähnle 
7076baa5c60SNicolai Hähnle 				ww = container_of(lock, struct ww_mutex, base);
7086baa5c60SNicolai Hähnle 				DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
7096baa5c60SNicolai Hähnle 				ww_ctx->contending_lock = ww;
7106baa5c60SNicolai Hähnle #endif
7116baa5c60SNicolai Hähnle 				return -EDEADLK;
7126baa5c60SNicolai Hähnle 			}
7136baa5c60SNicolai Hähnle 
7146baa5c60SNicolai Hähnle 			break;
7156baa5c60SNicolai Hähnle 		}
7166baa5c60SNicolai Hähnle 
7176baa5c60SNicolai Hähnle 		pos = &cur->list;
718200b1874SNicolai Hähnle 
719200b1874SNicolai Hähnle 		/*
720200b1874SNicolai Hähnle 		 * Wake up the waiter so that it gets a chance to back
721200b1874SNicolai Hähnle 		 * off.
722200b1874SNicolai Hähnle 		 */
723200b1874SNicolai Hähnle 		if (cur->ww_ctx->acquired > 0) {
724200b1874SNicolai Hähnle 			debug_mutex_wake_waiter(lock, cur);
725200b1874SNicolai Hähnle 			wake_up_process(cur->task);
726200b1874SNicolai Hähnle 		}
7276baa5c60SNicolai Hähnle 	}
7286baa5c60SNicolai Hähnle 
7296baa5c60SNicolai Hähnle 	list_add_tail(&waiter->list, pos);
73001768b42SPeter Zijlstra 	return 0;
73101768b42SPeter Zijlstra }
73201768b42SPeter Zijlstra 
73301768b42SPeter Zijlstra /*
73401768b42SPeter Zijlstra  * Lock a mutex (possibly interruptible), slowpath:
73501768b42SPeter Zijlstra  */
73601768b42SPeter Zijlstra static __always_inline int __sched
73701768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73801768b42SPeter Zijlstra 		    struct lockdep_map *nest_lock, unsigned long ip,
73901768b42SPeter Zijlstra 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
74001768b42SPeter Zijlstra {
74101768b42SPeter Zijlstra 	struct mutex_waiter waiter;
7429d659ae1SPeter Zijlstra 	bool first = false;
743a40ca565SWaiman Long 	struct ww_mutex *ww;
74401768b42SPeter Zijlstra 	int ret;
74501768b42SPeter Zijlstra 
746427b1820SPeter Zijlstra 	might_sleep();
747ea9e0fb8SNicolai Hähnle 
748a40ca565SWaiman Long 	ww = container_of(lock, struct ww_mutex, base);
749ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx) {
7500422e83dSChris Wilson 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
7510422e83dSChris Wilson 			return -EALREADY;
7520422e83dSChris Wilson 	}
7530422e83dSChris Wilson 
75401768b42SPeter Zijlstra 	preempt_disable();
75501768b42SPeter Zijlstra 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
75601768b42SPeter Zijlstra 
757e274795eSPeter Zijlstra 	if (__mutex_trylock(lock) ||
758c516df97SNicolai Hähnle 	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
75976916515SDavidlohr Bueso 		/* got the lock, yay! */
7603ca0ff57SPeter Zijlstra 		lock_acquired(&lock->dep_map, ip);
761ea9e0fb8SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
7623ca0ff57SPeter Zijlstra 			ww_mutex_set_context_fastpath(ww, ww_ctx);
76301768b42SPeter Zijlstra 		preempt_enable();
76401768b42SPeter Zijlstra 		return 0;
76501768b42SPeter Zijlstra 	}
76601768b42SPeter Zijlstra 
767b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
7681e820c96SJason Low 	/*
7693ca0ff57SPeter Zijlstra 	 * After waiting to acquire the wait_lock, try again.
7701e820c96SJason Low 	 */
771659cf9f5SNicolai Hähnle 	if (__mutex_trylock(lock)) {
772659cf9f5SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
773659cf9f5SNicolai Hähnle 			__ww_mutex_wakeup_for_backoff(lock, ww_ctx);
774659cf9f5SNicolai Hähnle 
77501768b42SPeter Zijlstra 		goto skip_wait;
776659cf9f5SNicolai Hähnle 	}
77701768b42SPeter Zijlstra 
77801768b42SPeter Zijlstra 	debug_mutex_lock_common(lock, &waiter);
779d269a8b8SDavidlohr Bueso 	debug_mutex_add_waiter(lock, &waiter, current);
78001768b42SPeter Zijlstra 
7816baa5c60SNicolai Hähnle 	lock_contended(&lock->dep_map, ip);
7826baa5c60SNicolai Hähnle 
7836baa5c60SNicolai Hähnle 	if (!use_ww_ctx) {
78401768b42SPeter Zijlstra 		/* add waiting tasks to the end of the waitqueue (FIFO): */
78501768b42SPeter Zijlstra 		list_add_tail(&waiter.list, &lock->wait_list);
786977625a6SNicolai Hähnle 
787977625a6SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES
788977625a6SNicolai Hähnle 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
789977625a6SNicolai Hähnle #endif
7906baa5c60SNicolai Hähnle 	} else {
7916baa5c60SNicolai Hähnle 		/* Add in stamp order, waking up waiters that must back off. */
7926baa5c60SNicolai Hähnle 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
7936baa5c60SNicolai Hähnle 		if (ret)
7946baa5c60SNicolai Hähnle 			goto err_early_backoff;
7956baa5c60SNicolai Hähnle 
7966baa5c60SNicolai Hähnle 		waiter.ww_ctx = ww_ctx;
7976baa5c60SNicolai Hähnle 	}
7986baa5c60SNicolai Hähnle 
799d269a8b8SDavidlohr Bueso 	waiter.task = current;
80001768b42SPeter Zijlstra 
8019d659ae1SPeter Zijlstra 	if (__mutex_waiter_is_first(lock, &waiter))
8023ca0ff57SPeter Zijlstra 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
8033ca0ff57SPeter Zijlstra 
804642fa448SDavidlohr Bueso 	set_current_state(state);
80501768b42SPeter Zijlstra 	for (;;) {
8065bbd7e64SPeter Zijlstra 		/*
8075bbd7e64SPeter Zijlstra 		 * Once we hold wait_lock, we're serialized against
8085bbd7e64SPeter Zijlstra 		 * mutex_unlock() handing the lock off to us, do a trylock
8095bbd7e64SPeter Zijlstra 		 * before testing the error conditions to make sure we pick up
8105bbd7e64SPeter Zijlstra 		 * the handoff.
8115bbd7e64SPeter Zijlstra 		 */
812e274795eSPeter Zijlstra 		if (__mutex_trylock(lock))
8135bbd7e64SPeter Zijlstra 			goto acquired;
81401768b42SPeter Zijlstra 
81501768b42SPeter Zijlstra 		/*
8165bbd7e64SPeter Zijlstra 		 * Check for signals and wound conditions while holding
8175bbd7e64SPeter Zijlstra 		 * wait_lock. This ensures the lock cancellation is ordered
8185bbd7e64SPeter Zijlstra 		 * against mutex_unlock() and wake-ups do not go missing.
81901768b42SPeter Zijlstra 		 */
820d269a8b8SDavidlohr Bueso 		if (unlikely(signal_pending_state(state, current))) {
82101768b42SPeter Zijlstra 			ret = -EINTR;
82201768b42SPeter Zijlstra 			goto err;
82301768b42SPeter Zijlstra 		}
82401768b42SPeter Zijlstra 
825ea9e0fb8SNicolai Hähnle 		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
826200b1874SNicolai Hähnle 			ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
82701768b42SPeter Zijlstra 			if (ret)
82801768b42SPeter Zijlstra 				goto err;
82901768b42SPeter Zijlstra 		}
83001768b42SPeter Zijlstra 
831b9c16a0eSPeter Zijlstra 		spin_unlock(&lock->wait_lock);
83201768b42SPeter Zijlstra 		schedule_preempt_disabled();
8339d659ae1SPeter Zijlstra 
8346baa5c60SNicolai Hähnle 		/*
8356baa5c60SNicolai Hähnle 		 * ww_mutex needs to always recheck its position since its waiter
8366baa5c60SNicolai Hähnle 		 * list is not FIFO ordered.
8376baa5c60SNicolai Hähnle 		 */
8386baa5c60SNicolai Hähnle 		if ((use_ww_ctx && ww_ctx) || !first) {
8396baa5c60SNicolai Hähnle 			first = __mutex_waiter_is_first(lock, &waiter);
8406baa5c60SNicolai Hähnle 			if (first)
8419d659ae1SPeter Zijlstra 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
8429d659ae1SPeter Zijlstra 		}
8435bbd7e64SPeter Zijlstra 
844642fa448SDavidlohr Bueso 		set_current_state(state);
8455bbd7e64SPeter Zijlstra 		/*
8465bbd7e64SPeter Zijlstra 		 * Here we order against unlock; we must either see it change
8475bbd7e64SPeter Zijlstra 		 * state back to RUNNING and fall through the next schedule(),
8485bbd7e64SPeter Zijlstra 		 * or we must see its unlock and acquire.
8495bbd7e64SPeter Zijlstra 		 */
850e274795eSPeter Zijlstra 		if (__mutex_trylock(lock) ||
851c516df97SNicolai Hähnle 		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
8525bbd7e64SPeter Zijlstra 			break;
8535bbd7e64SPeter Zijlstra 
854b9c16a0eSPeter Zijlstra 		spin_lock(&lock->wait_lock);
85501768b42SPeter Zijlstra 	}
856b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
8575bbd7e64SPeter Zijlstra acquired:
858642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
85951587bcfSDavidlohr Bueso 
860d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
86101768b42SPeter Zijlstra 	if (likely(list_empty(&lock->wait_list)))
8629d659ae1SPeter Zijlstra 		__mutex_clear_flag(lock, MUTEX_FLAGS);
8633ca0ff57SPeter Zijlstra 
86401768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
86501768b42SPeter Zijlstra 
86601768b42SPeter Zijlstra skip_wait:
86701768b42SPeter Zijlstra 	/* got the lock - cleanup and rejoice! */
86801768b42SPeter Zijlstra 	lock_acquired(&lock->dep_map, ip);
86901768b42SPeter Zijlstra 
870ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx)
8714bd19084SDavidlohr Bueso 		ww_mutex_set_context_slowpath(ww, ww_ctx);
87201768b42SPeter Zijlstra 
873b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
87401768b42SPeter Zijlstra 	preempt_enable();
87501768b42SPeter Zijlstra 	return 0;
87601768b42SPeter Zijlstra 
87701768b42SPeter Zijlstra err:
878642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
879d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
8806baa5c60SNicolai Hähnle err_early_backoff:
881b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
88201768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
88301768b42SPeter Zijlstra 	mutex_release(&lock->dep_map, 1, ip);
88401768b42SPeter Zijlstra 	preempt_enable();
88501768b42SPeter Zijlstra 	return ret;
88601768b42SPeter Zijlstra }
88701768b42SPeter Zijlstra 
888427b1820SPeter Zijlstra static int __sched
889427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
890427b1820SPeter Zijlstra 	     struct lockdep_map *nest_lock, unsigned long ip)
891427b1820SPeter Zijlstra {
892427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
893427b1820SPeter Zijlstra }
894427b1820SPeter Zijlstra 
895427b1820SPeter Zijlstra static int __sched
896427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
897427b1820SPeter Zijlstra 		struct lockdep_map *nest_lock, unsigned long ip,
898427b1820SPeter Zijlstra 		struct ww_acquire_ctx *ww_ctx)
899427b1820SPeter Zijlstra {
900427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
901427b1820SPeter Zijlstra }
902427b1820SPeter Zijlstra 
90301768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
90401768b42SPeter Zijlstra void __sched
90501768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass)
90601768b42SPeter Zijlstra {
907427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
90801768b42SPeter Zijlstra }
90901768b42SPeter Zijlstra 
91001768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested);
91101768b42SPeter Zijlstra 
91201768b42SPeter Zijlstra void __sched
91301768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
91401768b42SPeter Zijlstra {
915427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
91601768b42SPeter Zijlstra }
91701768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
91801768b42SPeter Zijlstra 
91901768b42SPeter Zijlstra int __sched
92001768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
92101768b42SPeter Zijlstra {
922427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
92301768b42SPeter Zijlstra }
92401768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
92501768b42SPeter Zijlstra 
92601768b42SPeter Zijlstra int __sched
92701768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
92801768b42SPeter Zijlstra {
929427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
93001768b42SPeter Zijlstra }
93101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
93201768b42SPeter Zijlstra 
9331460cb65STejun Heo void __sched
9341460cb65STejun Heo mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
9351460cb65STejun Heo {
9361460cb65STejun Heo 	int token;
9371460cb65STejun Heo 
9381460cb65STejun Heo 	might_sleep();
9391460cb65STejun Heo 
9401460cb65STejun Heo 	token = io_schedule_prepare();
9411460cb65STejun Heo 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
9421460cb65STejun Heo 			    subclass, NULL, _RET_IP_, NULL, 0);
9431460cb65STejun Heo 	io_schedule_finish(token);
9441460cb65STejun Heo }
9451460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
9461460cb65STejun Heo 
94701768b42SPeter Zijlstra static inline int
94801768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
94901768b42SPeter Zijlstra {
95001768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
95101768b42SPeter Zijlstra 	unsigned tmp;
95201768b42SPeter Zijlstra 
95301768b42SPeter Zijlstra 	if (ctx->deadlock_inject_countdown-- == 0) {
95401768b42SPeter Zijlstra 		tmp = ctx->deadlock_inject_interval;
95501768b42SPeter Zijlstra 		if (tmp > UINT_MAX/4)
95601768b42SPeter Zijlstra 			tmp = UINT_MAX;
95701768b42SPeter Zijlstra 		else
95801768b42SPeter Zijlstra 			tmp = tmp*2 + tmp + tmp/2;
95901768b42SPeter Zijlstra 
96001768b42SPeter Zijlstra 		ctx->deadlock_inject_interval = tmp;
96101768b42SPeter Zijlstra 		ctx->deadlock_inject_countdown = tmp;
96201768b42SPeter Zijlstra 		ctx->contending_lock = lock;
96301768b42SPeter Zijlstra 
96401768b42SPeter Zijlstra 		ww_mutex_unlock(lock);
96501768b42SPeter Zijlstra 
96601768b42SPeter Zijlstra 		return -EDEADLK;
96701768b42SPeter Zijlstra 	}
96801768b42SPeter Zijlstra #endif
96901768b42SPeter Zijlstra 
97001768b42SPeter Zijlstra 	return 0;
97101768b42SPeter Zijlstra }
97201768b42SPeter Zijlstra 
97301768b42SPeter Zijlstra int __sched
974c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
97501768b42SPeter Zijlstra {
97601768b42SPeter Zijlstra 	int ret;
97701768b42SPeter Zijlstra 
97801768b42SPeter Zijlstra 	might_sleep();
979427b1820SPeter Zijlstra 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
980ea9e0fb8SNicolai Hähnle 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
981427b1820SPeter Zijlstra 			       ctx);
982ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
98301768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
98401768b42SPeter Zijlstra 
98501768b42SPeter Zijlstra 	return ret;
98601768b42SPeter Zijlstra }
987c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock);
98801768b42SPeter Zijlstra 
98901768b42SPeter Zijlstra int __sched
990c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
99101768b42SPeter Zijlstra {
99201768b42SPeter Zijlstra 	int ret;
99301768b42SPeter Zijlstra 
99401768b42SPeter Zijlstra 	might_sleep();
995427b1820SPeter Zijlstra 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
996ea9e0fb8SNicolai Hähnle 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
997427b1820SPeter Zijlstra 			      ctx);
99801768b42SPeter Zijlstra 
999ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
100001768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
100101768b42SPeter Zijlstra 
100201768b42SPeter Zijlstra 	return ret;
100301768b42SPeter Zijlstra }
1004c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
100501768b42SPeter Zijlstra 
100601768b42SPeter Zijlstra #endif
100701768b42SPeter Zijlstra 
100801768b42SPeter Zijlstra /*
100901768b42SPeter Zijlstra  * Release the lock, slowpath:
101001768b42SPeter Zijlstra  */
10113ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
101201768b42SPeter Zijlstra {
10139d659ae1SPeter Zijlstra 	struct task_struct *next = NULL;
1014194a6b5bSWaiman Long 	DEFINE_WAKE_Q(wake_q);
1015b9c16a0eSPeter Zijlstra 	unsigned long owner;
101601768b42SPeter Zijlstra 
10173ca0ff57SPeter Zijlstra 	mutex_release(&lock->dep_map, 1, ip);
10183ca0ff57SPeter Zijlstra 
101901768b42SPeter Zijlstra 	/*
10209d659ae1SPeter Zijlstra 	 * Release the lock before (potentially) taking the spinlock such that
10219d659ae1SPeter Zijlstra 	 * other contenders can get on with things ASAP.
10229d659ae1SPeter Zijlstra 	 *
10239d659ae1SPeter Zijlstra 	 * Except when HANDOFF, in that case we must not clear the owner field,
10249d659ae1SPeter Zijlstra 	 * but instead set it to the top waiter.
102501768b42SPeter Zijlstra 	 */
10269d659ae1SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
10279d659ae1SPeter Zijlstra 	for (;;) {
10289d659ae1SPeter Zijlstra 		unsigned long old;
10299d659ae1SPeter Zijlstra 
10309d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
10319d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1032e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
10339d659ae1SPeter Zijlstra #endif
10349d659ae1SPeter Zijlstra 
10359d659ae1SPeter Zijlstra 		if (owner & MUTEX_FLAG_HANDOFF)
10369d659ae1SPeter Zijlstra 			break;
10379d659ae1SPeter Zijlstra 
10389d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
10399d659ae1SPeter Zijlstra 						  __owner_flags(owner));
10409d659ae1SPeter Zijlstra 		if (old == owner) {
10419d659ae1SPeter Zijlstra 			if (owner & MUTEX_FLAG_WAITERS)
10429d659ae1SPeter Zijlstra 				break;
10439d659ae1SPeter Zijlstra 
10443ca0ff57SPeter Zijlstra 			return;
10459d659ae1SPeter Zijlstra 		}
10469d659ae1SPeter Zijlstra 
10479d659ae1SPeter Zijlstra 		owner = old;
10489d659ae1SPeter Zijlstra 	}
104901768b42SPeter Zijlstra 
1050b9c16a0eSPeter Zijlstra 	spin_lock(&lock->wait_lock);
10511d8fe7dcSJason Low 	debug_mutex_unlock(lock);
105201768b42SPeter Zijlstra 	if (!list_empty(&lock->wait_list)) {
105301768b42SPeter Zijlstra 		/* get the first entry from the wait-list: */
105401768b42SPeter Zijlstra 		struct mutex_waiter *waiter =
10559d659ae1SPeter Zijlstra 			list_first_entry(&lock->wait_list,
105601768b42SPeter Zijlstra 					 struct mutex_waiter, list);
105701768b42SPeter Zijlstra 
10589d659ae1SPeter Zijlstra 		next = waiter->task;
10599d659ae1SPeter Zijlstra 
106001768b42SPeter Zijlstra 		debug_mutex_wake_waiter(lock, waiter);
10619d659ae1SPeter Zijlstra 		wake_q_add(&wake_q, next);
106201768b42SPeter Zijlstra 	}
106301768b42SPeter Zijlstra 
10649d659ae1SPeter Zijlstra 	if (owner & MUTEX_FLAG_HANDOFF)
10659d659ae1SPeter Zijlstra 		__mutex_handoff(lock, next);
10669d659ae1SPeter Zijlstra 
1067b9c16a0eSPeter Zijlstra 	spin_unlock(&lock->wait_lock);
10689d659ae1SPeter Zijlstra 
10691329ce6fSDavidlohr Bueso 	wake_up_q(&wake_q);
107001768b42SPeter Zijlstra }
107101768b42SPeter Zijlstra 
107201768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
107301768b42SPeter Zijlstra /*
107401768b42SPeter Zijlstra  * Here come the less common (and hence less performance-critical) APIs:
107501768b42SPeter Zijlstra  * mutex_lock_interruptible() and mutex_trylock().
107601768b42SPeter Zijlstra  */
107701768b42SPeter Zijlstra static noinline int __sched
107801768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock);
107901768b42SPeter Zijlstra 
108001768b42SPeter Zijlstra static noinline int __sched
108101768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock);
108201768b42SPeter Zijlstra 
108301768b42SPeter Zijlstra /**
108401768b42SPeter Zijlstra  * mutex_lock_interruptible - acquire the mutex, interruptible
108501768b42SPeter Zijlstra  * @lock: the mutex to be acquired
108601768b42SPeter Zijlstra  *
108701768b42SPeter Zijlstra  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
108801768b42SPeter Zijlstra  * been acquired or sleep until the mutex becomes available. If a
108901768b42SPeter Zijlstra  * signal arrives while waiting for the lock then this function
109001768b42SPeter Zijlstra  * returns -EINTR.
109101768b42SPeter Zijlstra  *
109201768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down_interruptible().
109301768b42SPeter Zijlstra  */
109401768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock)
109501768b42SPeter Zijlstra {
109601768b42SPeter Zijlstra 	might_sleep();
10973ca0ff57SPeter Zijlstra 
10983ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
109901768b42SPeter Zijlstra 		return 0;
11003ca0ff57SPeter Zijlstra 
110101768b42SPeter Zijlstra 	return __mutex_lock_interruptible_slowpath(lock);
110201768b42SPeter Zijlstra }
110301768b42SPeter Zijlstra 
110401768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible);
110501768b42SPeter Zijlstra 
110601768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock)
110701768b42SPeter Zijlstra {
110801768b42SPeter Zijlstra 	might_sleep();
11093ca0ff57SPeter Zijlstra 
11103ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
111101768b42SPeter Zijlstra 		return 0;
11123ca0ff57SPeter Zijlstra 
111301768b42SPeter Zijlstra 	return __mutex_lock_killable_slowpath(lock);
111401768b42SPeter Zijlstra }
111501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable);
111601768b42SPeter Zijlstra 
11171460cb65STejun Heo void __sched mutex_lock_io(struct mutex *lock)
11181460cb65STejun Heo {
11191460cb65STejun Heo 	int token;
11201460cb65STejun Heo 
11211460cb65STejun Heo 	token = io_schedule_prepare();
11221460cb65STejun Heo 	mutex_lock(lock);
11231460cb65STejun Heo 	io_schedule_finish(token);
11241460cb65STejun Heo }
11251460cb65STejun Heo EXPORT_SYMBOL_GPL(mutex_lock_io);
11261460cb65STejun Heo 
11273ca0ff57SPeter Zijlstra static noinline void __sched
11283ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock)
112901768b42SPeter Zijlstra {
1130427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
113101768b42SPeter Zijlstra }
113201768b42SPeter Zijlstra 
113301768b42SPeter Zijlstra static noinline int __sched
113401768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock)
113501768b42SPeter Zijlstra {
1136427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
113701768b42SPeter Zijlstra }
113801768b42SPeter Zijlstra 
113901768b42SPeter Zijlstra static noinline int __sched
114001768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock)
114101768b42SPeter Zijlstra {
1142427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
114301768b42SPeter Zijlstra }
114401768b42SPeter Zijlstra 
114501768b42SPeter Zijlstra static noinline int __sched
114601768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
114701768b42SPeter Zijlstra {
1148427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1149427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
115001768b42SPeter Zijlstra }
115101768b42SPeter Zijlstra 
115201768b42SPeter Zijlstra static noinline int __sched
115301768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
115401768b42SPeter Zijlstra 					    struct ww_acquire_ctx *ctx)
115501768b42SPeter Zijlstra {
1156427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1157427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
115801768b42SPeter Zijlstra }
115901768b42SPeter Zijlstra 
116001768b42SPeter Zijlstra #endif
116101768b42SPeter Zijlstra 
116201768b42SPeter Zijlstra /**
116301768b42SPeter Zijlstra  * mutex_trylock - try to acquire the mutex, without waiting
116401768b42SPeter Zijlstra  * @lock: the mutex to be acquired
116501768b42SPeter Zijlstra  *
116601768b42SPeter Zijlstra  * Try to acquire the mutex atomically. Returns 1 if the mutex
116701768b42SPeter Zijlstra  * has been acquired successfully, and 0 on contention.
116801768b42SPeter Zijlstra  *
116901768b42SPeter Zijlstra  * NOTE: this function follows the spin_trylock() convention, so
117001768b42SPeter Zijlstra  * it is negated from the down_trylock() return values! Be careful
117101768b42SPeter Zijlstra  * about this when converting semaphore users to mutexes.
117201768b42SPeter Zijlstra  *
117301768b42SPeter Zijlstra  * This function must not be used in interrupt context. The
117401768b42SPeter Zijlstra  * mutex must be released by the same task that acquired it.
117501768b42SPeter Zijlstra  */
117601768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock)
117701768b42SPeter Zijlstra {
1178e274795eSPeter Zijlstra 	bool locked = __mutex_trylock(lock);
117901768b42SPeter Zijlstra 
11803ca0ff57SPeter Zijlstra 	if (locked)
11813ca0ff57SPeter Zijlstra 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
118201768b42SPeter Zijlstra 
11833ca0ff57SPeter Zijlstra 	return locked;
118401768b42SPeter Zijlstra }
118501768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock);
118601768b42SPeter Zijlstra 
118701768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
118801768b42SPeter Zijlstra int __sched
1189c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
119001768b42SPeter Zijlstra {
119101768b42SPeter Zijlstra 	might_sleep();
119201768b42SPeter Zijlstra 
11933ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1194ea9e0fb8SNicolai Hähnle 		if (ctx)
119501768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
11963ca0ff57SPeter Zijlstra 		return 0;
11973ca0ff57SPeter Zijlstra 	}
11983ca0ff57SPeter Zijlstra 
11993ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_slowpath(lock, ctx);
120001768b42SPeter Zijlstra }
1201c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock);
120201768b42SPeter Zijlstra 
120301768b42SPeter Zijlstra int __sched
1204c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
120501768b42SPeter Zijlstra {
120601768b42SPeter Zijlstra 	might_sleep();
120701768b42SPeter Zijlstra 
12083ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1209ea9e0fb8SNicolai Hähnle 		if (ctx)
121001768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
12113ca0ff57SPeter Zijlstra 		return 0;
12123ca0ff57SPeter Zijlstra 	}
12133ca0ff57SPeter Zijlstra 
12143ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
121501768b42SPeter Zijlstra }
1216c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible);
121701768b42SPeter Zijlstra 
121801768b42SPeter Zijlstra #endif
121901768b42SPeter Zijlstra 
122001768b42SPeter Zijlstra /**
122101768b42SPeter Zijlstra  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
122201768b42SPeter Zijlstra  * @cnt: the atomic which we are to dec
122301768b42SPeter Zijlstra  * @lock: the mutex to return holding if we dec to 0
122401768b42SPeter Zijlstra  *
122501768b42SPeter Zijlstra  * return true and hold lock if we dec to 0, return false otherwise
122601768b42SPeter Zijlstra  */
122701768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
122801768b42SPeter Zijlstra {
122901768b42SPeter Zijlstra 	/* dec if we can't possibly hit 0 */
123001768b42SPeter Zijlstra 	if (atomic_add_unless(cnt, -1, 1))
123101768b42SPeter Zijlstra 		return 0;
123201768b42SPeter Zijlstra 	/* we might hit 0, so take the lock */
123301768b42SPeter Zijlstra 	mutex_lock(lock);
123401768b42SPeter Zijlstra 	if (!atomic_dec_and_test(cnt)) {
123501768b42SPeter Zijlstra 		/* when we actually did the dec, we didn't hit 0 */
123601768b42SPeter Zijlstra 		mutex_unlock(lock);
123701768b42SPeter Zijlstra 		return 0;
123801768b42SPeter Zijlstra 	}
123901768b42SPeter Zijlstra 	/* we hit 0, and we hold the lock */
124001768b42SPeter Zijlstra 	return 1;
124101768b42SPeter Zijlstra }
124201768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1243