xref: /openbmc/linux/kernel/locking/mutex.c (revision 427b18207a87f6306bd53a74e03dbe17392b0045)
101768b42SPeter Zijlstra /*
267a6de49SPeter Zijlstra  * kernel/locking/mutex.c
301768b42SPeter Zijlstra  *
401768b42SPeter Zijlstra  * Mutexes: blocking mutual exclusion locks
501768b42SPeter Zijlstra  *
601768b42SPeter Zijlstra  * Started by Ingo Molnar:
701768b42SPeter Zijlstra  *
801768b42SPeter Zijlstra  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
901768b42SPeter Zijlstra  *
1001768b42SPeter Zijlstra  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
1101768b42SPeter Zijlstra  * David Howells for suggestions and improvements.
1201768b42SPeter Zijlstra  *
1301768b42SPeter Zijlstra  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
1401768b42SPeter Zijlstra  *    from the -rt tree, where it was originally implemented for rtmutexes
1501768b42SPeter Zijlstra  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
1601768b42SPeter Zijlstra  *    and Sven Dietrich.
1701768b42SPeter Zijlstra  *
18214e0aedSDavidlohr Bueso  * Also see Documentation/locking/mutex-design.txt.
1901768b42SPeter Zijlstra  */
2001768b42SPeter Zijlstra #include <linux/mutex.h>
2101768b42SPeter Zijlstra #include <linux/ww_mutex.h>
2201768b42SPeter Zijlstra #include <linux/sched.h>
2301768b42SPeter Zijlstra #include <linux/sched/rt.h>
2401768b42SPeter Zijlstra #include <linux/export.h>
2501768b42SPeter Zijlstra #include <linux/spinlock.h>
2601768b42SPeter Zijlstra #include <linux/interrupt.h>
2701768b42SPeter Zijlstra #include <linux/debug_locks.h>
287a215f89SDavidlohr Bueso #include <linux/osq_lock.h>
2901768b42SPeter Zijlstra 
3001768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
3101768b42SPeter Zijlstra # include "mutex-debug.h"
3201768b42SPeter Zijlstra #else
3301768b42SPeter Zijlstra # include "mutex.h"
3401768b42SPeter Zijlstra #endif
3501768b42SPeter Zijlstra 
3601768b42SPeter Zijlstra void
3701768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
3801768b42SPeter Zijlstra {
393ca0ff57SPeter Zijlstra 	atomic_long_set(&lock->owner, 0);
4001768b42SPeter Zijlstra 	spin_lock_init(&lock->wait_lock);
4101768b42SPeter Zijlstra 	INIT_LIST_HEAD(&lock->wait_list);
4201768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
434d9d951eSJason Low 	osq_lock_init(&lock->osq);
4401768b42SPeter Zijlstra #endif
4501768b42SPeter Zijlstra 
4601768b42SPeter Zijlstra 	debug_mutex_init(lock, name, key);
4701768b42SPeter Zijlstra }
4801768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init);
4901768b42SPeter Zijlstra 
503ca0ff57SPeter Zijlstra /*
513ca0ff57SPeter Zijlstra  * @owner: contains: 'struct task_struct *' to the current lock owner,
523ca0ff57SPeter Zijlstra  * NULL means not owned. Since task_struct pointers are aligned at
53e274795eSPeter Zijlstra  * at least L1_CACHE_BYTES, we have low bits to store extra state.
543ca0ff57SPeter Zijlstra  *
553ca0ff57SPeter Zijlstra  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
569d659ae1SPeter Zijlstra  * Bit1 indicates unlock needs to hand the lock to the top-waiter
57e274795eSPeter Zijlstra  * Bit2 indicates handoff has been done and we're waiting for pickup.
583ca0ff57SPeter Zijlstra  */
593ca0ff57SPeter Zijlstra #define MUTEX_FLAG_WAITERS	0x01
609d659ae1SPeter Zijlstra #define MUTEX_FLAG_HANDOFF	0x02
61e274795eSPeter Zijlstra #define MUTEX_FLAG_PICKUP	0x04
623ca0ff57SPeter Zijlstra 
63e274795eSPeter Zijlstra #define MUTEX_FLAGS		0x07
643ca0ff57SPeter Zijlstra 
653ca0ff57SPeter Zijlstra static inline struct task_struct *__owner_task(unsigned long owner)
663ca0ff57SPeter Zijlstra {
673ca0ff57SPeter Zijlstra 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
683ca0ff57SPeter Zijlstra }
693ca0ff57SPeter Zijlstra 
703ca0ff57SPeter Zijlstra static inline unsigned long __owner_flags(unsigned long owner)
713ca0ff57SPeter Zijlstra {
723ca0ff57SPeter Zijlstra 	return owner & MUTEX_FLAGS;
733ca0ff57SPeter Zijlstra }
743ca0ff57SPeter Zijlstra 
753ca0ff57SPeter Zijlstra /*
76e274795eSPeter Zijlstra  * Trylock variant that retuns the owning task on failure.
773ca0ff57SPeter Zijlstra  */
78e274795eSPeter Zijlstra static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
793ca0ff57SPeter Zijlstra {
803ca0ff57SPeter Zijlstra 	unsigned long owner, curr = (unsigned long)current;
813ca0ff57SPeter Zijlstra 
823ca0ff57SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
833ca0ff57SPeter Zijlstra 	for (;;) { /* must loop, can race against a flag */
849d659ae1SPeter Zijlstra 		unsigned long old, flags = __owner_flags(owner);
85e274795eSPeter Zijlstra 		unsigned long task = owner & ~MUTEX_FLAGS;
863ca0ff57SPeter Zijlstra 
87e274795eSPeter Zijlstra 		if (task) {
88e274795eSPeter Zijlstra 			if (likely(task != curr))
89e274795eSPeter Zijlstra 				break;
909d659ae1SPeter Zijlstra 
91e274795eSPeter Zijlstra 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
92e274795eSPeter Zijlstra 				break;
93e274795eSPeter Zijlstra 
94e274795eSPeter Zijlstra 			flags &= ~MUTEX_FLAG_PICKUP;
95e274795eSPeter Zijlstra 		} else {
96e274795eSPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
97e274795eSPeter Zijlstra 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
98e274795eSPeter Zijlstra #endif
999d659ae1SPeter Zijlstra 		}
1003ca0ff57SPeter Zijlstra 
1019d659ae1SPeter Zijlstra 		/*
1029d659ae1SPeter Zijlstra 		 * We set the HANDOFF bit, we must make sure it doesn't live
1039d659ae1SPeter Zijlstra 		 * past the point where we acquire it. This would be possible
1049d659ae1SPeter Zijlstra 		 * if we (accidentally) set the bit on an unlocked mutex.
1059d659ae1SPeter Zijlstra 		 */
1069d659ae1SPeter Zijlstra 		flags &= ~MUTEX_FLAG_HANDOFF;
1079d659ae1SPeter Zijlstra 
1089d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
1093ca0ff57SPeter Zijlstra 		if (old == owner)
110e274795eSPeter Zijlstra 			return NULL;
1113ca0ff57SPeter Zijlstra 
1123ca0ff57SPeter Zijlstra 		owner = old;
1133ca0ff57SPeter Zijlstra 	}
114e274795eSPeter Zijlstra 
115e274795eSPeter Zijlstra 	return __owner_task(owner);
116e274795eSPeter Zijlstra }
117e274795eSPeter Zijlstra 
118e274795eSPeter Zijlstra /*
119e274795eSPeter Zijlstra  * Actual trylock that will work on any unlocked state.
120e274795eSPeter Zijlstra  */
121e274795eSPeter Zijlstra static inline bool __mutex_trylock(struct mutex *lock)
122e274795eSPeter Zijlstra {
123e274795eSPeter Zijlstra 	return !__mutex_trylock_or_owner(lock);
1243ca0ff57SPeter Zijlstra }
1253ca0ff57SPeter Zijlstra 
1263ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
1273ca0ff57SPeter Zijlstra /*
1283ca0ff57SPeter Zijlstra  * Lockdep annotations are contained to the slow paths for simplicity.
1293ca0ff57SPeter Zijlstra  * There is nothing that would stop spreading the lockdep annotations outwards
1303ca0ff57SPeter Zijlstra  * except more code.
1313ca0ff57SPeter Zijlstra  */
1323ca0ff57SPeter Zijlstra 
1333ca0ff57SPeter Zijlstra /*
1343ca0ff57SPeter Zijlstra  * Optimistic trylock that only works in the uncontended case. Make sure to
1353ca0ff57SPeter Zijlstra  * follow with a __mutex_trylock() before failing.
1363ca0ff57SPeter Zijlstra  */
1373ca0ff57SPeter Zijlstra static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
1383ca0ff57SPeter Zijlstra {
1393ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
1403ca0ff57SPeter Zijlstra 
1413ca0ff57SPeter Zijlstra 	if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
1423ca0ff57SPeter Zijlstra 		return true;
1433ca0ff57SPeter Zijlstra 
1443ca0ff57SPeter Zijlstra 	return false;
1453ca0ff57SPeter Zijlstra }
1463ca0ff57SPeter Zijlstra 
1473ca0ff57SPeter Zijlstra static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
1483ca0ff57SPeter Zijlstra {
1493ca0ff57SPeter Zijlstra 	unsigned long curr = (unsigned long)current;
1503ca0ff57SPeter Zijlstra 
1513ca0ff57SPeter Zijlstra 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
1523ca0ff57SPeter Zijlstra 		return true;
1533ca0ff57SPeter Zijlstra 
1543ca0ff57SPeter Zijlstra 	return false;
1553ca0ff57SPeter Zijlstra }
1563ca0ff57SPeter Zijlstra #endif
1573ca0ff57SPeter Zijlstra 
1583ca0ff57SPeter Zijlstra static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
1593ca0ff57SPeter Zijlstra {
1603ca0ff57SPeter Zijlstra 	atomic_long_or(flag, &lock->owner);
1613ca0ff57SPeter Zijlstra }
1623ca0ff57SPeter Zijlstra 
1633ca0ff57SPeter Zijlstra static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
1643ca0ff57SPeter Zijlstra {
1653ca0ff57SPeter Zijlstra 	atomic_long_andnot(flag, &lock->owner);
1663ca0ff57SPeter Zijlstra }
1673ca0ff57SPeter Zijlstra 
1689d659ae1SPeter Zijlstra static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
1699d659ae1SPeter Zijlstra {
1709d659ae1SPeter Zijlstra 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
1719d659ae1SPeter Zijlstra }
1729d659ae1SPeter Zijlstra 
1739d659ae1SPeter Zijlstra /*
1749d659ae1SPeter Zijlstra  * Give up ownership to a specific task, when @task = NULL, this is equivalent
175e274795eSPeter Zijlstra  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
176e274795eSPeter Zijlstra  * WAITERS. Provides RELEASE semantics like a regular unlock, the
177e274795eSPeter Zijlstra  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
1789d659ae1SPeter Zijlstra  */
1799d659ae1SPeter Zijlstra static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
1809d659ae1SPeter Zijlstra {
1819d659ae1SPeter Zijlstra 	unsigned long owner = atomic_long_read(&lock->owner);
1829d659ae1SPeter Zijlstra 
1839d659ae1SPeter Zijlstra 	for (;;) {
1849d659ae1SPeter Zijlstra 		unsigned long old, new;
1859d659ae1SPeter Zijlstra 
1869d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
1879d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
188e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1899d659ae1SPeter Zijlstra #endif
1909d659ae1SPeter Zijlstra 
1919d659ae1SPeter Zijlstra 		new = (owner & MUTEX_FLAG_WAITERS);
1929d659ae1SPeter Zijlstra 		new |= (unsigned long)task;
193e274795eSPeter Zijlstra 		if (task)
194e274795eSPeter Zijlstra 			new |= MUTEX_FLAG_PICKUP;
1959d659ae1SPeter Zijlstra 
1969d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
1979d659ae1SPeter Zijlstra 		if (old == owner)
1989d659ae1SPeter Zijlstra 			break;
1999d659ae1SPeter Zijlstra 
2009d659ae1SPeter Zijlstra 		owner = old;
2019d659ae1SPeter Zijlstra 	}
2029d659ae1SPeter Zijlstra }
2039d659ae1SPeter Zijlstra 
20401768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
20501768b42SPeter Zijlstra /*
20601768b42SPeter Zijlstra  * We split the mutex lock/unlock logic into separate fastpath and
20701768b42SPeter Zijlstra  * slowpath functions, to reduce the register pressure on the fastpath.
20801768b42SPeter Zijlstra  * We also put the fastpath first in the kernel image, to make sure the
20901768b42SPeter Zijlstra  * branch is predicted by the CPU as default-untaken.
21001768b42SPeter Zijlstra  */
2113ca0ff57SPeter Zijlstra static void __sched __mutex_lock_slowpath(struct mutex *lock);
21201768b42SPeter Zijlstra 
21301768b42SPeter Zijlstra /**
21401768b42SPeter Zijlstra  * mutex_lock - acquire the mutex
21501768b42SPeter Zijlstra  * @lock: the mutex to be acquired
21601768b42SPeter Zijlstra  *
21701768b42SPeter Zijlstra  * Lock the mutex exclusively for this task. If the mutex is not
21801768b42SPeter Zijlstra  * available right now, it will sleep until it can get it.
21901768b42SPeter Zijlstra  *
22001768b42SPeter Zijlstra  * The mutex must later on be released by the same task that
22101768b42SPeter Zijlstra  * acquired it. Recursive locking is not allowed. The task
22201768b42SPeter Zijlstra  * may not exit without first unlocking the mutex. Also, kernel
223139b6fd2SSharon Dvir  * memory where the mutex resides must not be freed with
22401768b42SPeter Zijlstra  * the mutex still locked. The mutex must first be initialized
22501768b42SPeter Zijlstra  * (or statically defined) before it can be locked. memset()-ing
22601768b42SPeter Zijlstra  * the mutex to 0 is not allowed.
22701768b42SPeter Zijlstra  *
22801768b42SPeter Zijlstra  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
22901768b42SPeter Zijlstra  *   checks that will enforce the restrictions and will also do
23001768b42SPeter Zijlstra  *   deadlock debugging. )
23101768b42SPeter Zijlstra  *
23201768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down().
23301768b42SPeter Zijlstra  */
23401768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock)
23501768b42SPeter Zijlstra {
23601768b42SPeter Zijlstra 	might_sleep();
23701768b42SPeter Zijlstra 
2383ca0ff57SPeter Zijlstra 	if (!__mutex_trylock_fast(lock))
2393ca0ff57SPeter Zijlstra 		__mutex_lock_slowpath(lock);
2403ca0ff57SPeter Zijlstra }
24101768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock);
24201768b42SPeter Zijlstra #endif
24301768b42SPeter Zijlstra 
244*427b1820SPeter Zijlstra static __always_inline void
245*427b1820SPeter Zijlstra ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
24676916515SDavidlohr Bueso {
24776916515SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES
24876916515SDavidlohr Bueso 	/*
24976916515SDavidlohr Bueso 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
25076916515SDavidlohr Bueso 	 * but released with a normal mutex_unlock in this call.
25176916515SDavidlohr Bueso 	 *
25276916515SDavidlohr Bueso 	 * This should never happen, always use ww_mutex_unlock.
25376916515SDavidlohr Bueso 	 */
25476916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww->ctx);
25576916515SDavidlohr Bueso 
25676916515SDavidlohr Bueso 	/*
25776916515SDavidlohr Bueso 	 * Not quite done after calling ww_acquire_done() ?
25876916515SDavidlohr Bueso 	 */
25976916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
26076916515SDavidlohr Bueso 
26176916515SDavidlohr Bueso 	if (ww_ctx->contending_lock) {
26276916515SDavidlohr Bueso 		/*
26376916515SDavidlohr Bueso 		 * After -EDEADLK you tried to
26476916515SDavidlohr Bueso 		 * acquire a different ww_mutex? Bad!
26576916515SDavidlohr Bueso 		 */
26676916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
26776916515SDavidlohr Bueso 
26876916515SDavidlohr Bueso 		/*
26976916515SDavidlohr Bueso 		 * You called ww_mutex_lock after receiving -EDEADLK,
27076916515SDavidlohr Bueso 		 * but 'forgot' to unlock everything else first?
27176916515SDavidlohr Bueso 		 */
27276916515SDavidlohr Bueso 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
27376916515SDavidlohr Bueso 		ww_ctx->contending_lock = NULL;
27476916515SDavidlohr Bueso 	}
27576916515SDavidlohr Bueso 
27676916515SDavidlohr Bueso 	/*
27776916515SDavidlohr Bueso 	 * Naughty, using a different class will lead to undefined behavior!
27876916515SDavidlohr Bueso 	 */
27976916515SDavidlohr Bueso 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
28076916515SDavidlohr Bueso #endif
28176916515SDavidlohr Bueso 	ww_ctx->acquired++;
28276916515SDavidlohr Bueso }
28376916515SDavidlohr Bueso 
2843822da3eSNicolai Hähnle static inline bool __sched
2853822da3eSNicolai Hähnle __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
2863822da3eSNicolai Hähnle {
2873822da3eSNicolai Hähnle 	return a->stamp - b->stamp <= LONG_MAX &&
2883822da3eSNicolai Hähnle 	       (a->stamp != b->stamp || a > b);
2893822da3eSNicolai Hähnle }
2903822da3eSNicolai Hähnle 
29176916515SDavidlohr Bueso /*
292659cf9f5SNicolai Hähnle  * Wake up any waiters that may have to back off when the lock is held by the
293659cf9f5SNicolai Hähnle  * given context.
294659cf9f5SNicolai Hähnle  *
295659cf9f5SNicolai Hähnle  * Due to the invariants on the wait list, this can only affect the first
296659cf9f5SNicolai Hähnle  * waiter with a context.
297659cf9f5SNicolai Hähnle  *
298659cf9f5SNicolai Hähnle  * The current task must not be on the wait list.
299659cf9f5SNicolai Hähnle  */
300659cf9f5SNicolai Hähnle static void __sched
301659cf9f5SNicolai Hähnle __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
302659cf9f5SNicolai Hähnle {
303659cf9f5SNicolai Hähnle 	struct mutex_waiter *cur;
304659cf9f5SNicolai Hähnle 
305659cf9f5SNicolai Hähnle 	lockdep_assert_held(&lock->wait_lock);
306659cf9f5SNicolai Hähnle 
307659cf9f5SNicolai Hähnle 	list_for_each_entry(cur, &lock->wait_list, list) {
308659cf9f5SNicolai Hähnle 		if (!cur->ww_ctx)
309659cf9f5SNicolai Hähnle 			continue;
310659cf9f5SNicolai Hähnle 
311659cf9f5SNicolai Hähnle 		if (cur->ww_ctx->acquired > 0 &&
312659cf9f5SNicolai Hähnle 		    __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
313659cf9f5SNicolai Hähnle 			debug_mutex_wake_waiter(lock, cur);
314659cf9f5SNicolai Hähnle 			wake_up_process(cur->task);
315659cf9f5SNicolai Hähnle 		}
316659cf9f5SNicolai Hähnle 
317659cf9f5SNicolai Hähnle 		break;
318659cf9f5SNicolai Hähnle 	}
319659cf9f5SNicolai Hähnle }
320659cf9f5SNicolai Hähnle 
321659cf9f5SNicolai Hähnle /*
3224bd19084SDavidlohr Bueso  * After acquiring lock with fastpath or when we lost out in contested
32376916515SDavidlohr Bueso  * slowpath, set ctx and wake up any waiters so they can recheck.
32476916515SDavidlohr Bueso  */
32576916515SDavidlohr Bueso static __always_inline void
326*427b1820SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
32776916515SDavidlohr Bueso {
32876916515SDavidlohr Bueso 	unsigned long flags;
32976916515SDavidlohr Bueso 
33076916515SDavidlohr Bueso 	ww_mutex_lock_acquired(lock, ctx);
33176916515SDavidlohr Bueso 
33276916515SDavidlohr Bueso 	lock->ctx = ctx;
33376916515SDavidlohr Bueso 
33476916515SDavidlohr Bueso 	/*
33576916515SDavidlohr Bueso 	 * The lock->ctx update should be visible on all cores before
33676916515SDavidlohr Bueso 	 * the atomic read is done, otherwise contended waiters might be
33776916515SDavidlohr Bueso 	 * missed. The contended waiters will either see ww_ctx == NULL
33876916515SDavidlohr Bueso 	 * and keep spinning, or it will acquire wait_lock, add itself
33976916515SDavidlohr Bueso 	 * to waiter list and sleep.
34076916515SDavidlohr Bueso 	 */
34176916515SDavidlohr Bueso 	smp_mb(); /* ^^^ */
34276916515SDavidlohr Bueso 
34376916515SDavidlohr Bueso 	/*
34476916515SDavidlohr Bueso 	 * Check if lock is contended, if not there is nobody to wake up
34576916515SDavidlohr Bueso 	 */
3463ca0ff57SPeter Zijlstra 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
34776916515SDavidlohr Bueso 		return;
34876916515SDavidlohr Bueso 
34976916515SDavidlohr Bueso 	/*
35076916515SDavidlohr Bueso 	 * Uh oh, we raced in fastpath, wake up everyone in this case,
35176916515SDavidlohr Bueso 	 * so they can see the new lock->ctx.
35276916515SDavidlohr Bueso 	 */
35376916515SDavidlohr Bueso 	spin_lock_mutex(&lock->base.wait_lock, flags);
354659cf9f5SNicolai Hähnle 	__ww_mutex_wakeup_for_backoff(&lock->base, ctx);
35576916515SDavidlohr Bueso 	spin_unlock_mutex(&lock->base.wait_lock, flags);
35676916515SDavidlohr Bueso }
35776916515SDavidlohr Bueso 
3584bd19084SDavidlohr Bueso /*
359659cf9f5SNicolai Hähnle  * After acquiring lock in the slowpath set ctx.
360659cf9f5SNicolai Hähnle  *
361659cf9f5SNicolai Hähnle  * Unlike for the fast path, the caller ensures that waiters are woken up where
362659cf9f5SNicolai Hähnle  * necessary.
3634bd19084SDavidlohr Bueso  *
3644bd19084SDavidlohr Bueso  * Callers must hold the mutex wait_lock.
3654bd19084SDavidlohr Bueso  */
3664bd19084SDavidlohr Bueso static __always_inline void
367*427b1820SPeter Zijlstra ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
3684bd19084SDavidlohr Bueso {
3694bd19084SDavidlohr Bueso 	ww_mutex_lock_acquired(lock, ctx);
3704bd19084SDavidlohr Bueso 	lock->ctx = ctx;
3714bd19084SDavidlohr Bueso }
37276916515SDavidlohr Bueso 
37301768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
37401768b42SPeter Zijlstra /*
37501768b42SPeter Zijlstra  * Look out! "owner" is an entirely speculative pointer
37601768b42SPeter Zijlstra  * access and not reliable.
37701768b42SPeter Zijlstra  */
37801768b42SPeter Zijlstra static noinline
379be1f7bf2SJason Low bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
38001768b42SPeter Zijlstra {
38101ac33c1SJason Low 	bool ret = true;
382be1f7bf2SJason Low 
38301768b42SPeter Zijlstra 	rcu_read_lock();
3843ca0ff57SPeter Zijlstra 	while (__mutex_owner(lock) == owner) {
385be1f7bf2SJason Low 		/*
386be1f7bf2SJason Low 		 * Ensure we emit the owner->on_cpu, dereference _after_
38701ac33c1SJason Low 		 * checking lock->owner still matches owner. If that fails,
38801ac33c1SJason Low 		 * owner might point to freed memory. If it still matches,
389be1f7bf2SJason Low 		 * the rcu_read_lock() ensures the memory stays valid.
390be1f7bf2SJason Low 		 */
391be1f7bf2SJason Low 		barrier();
392be1f7bf2SJason Low 
39305ffc951SPan Xinhui 		/*
39405ffc951SPan Xinhui 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
39505ffc951SPan Xinhui 		 */
39605ffc951SPan Xinhui 		if (!owner->on_cpu || need_resched() ||
39705ffc951SPan Xinhui 				vcpu_is_preempted(task_cpu(owner))) {
398be1f7bf2SJason Low 			ret = false;
399be1f7bf2SJason Low 			break;
400be1f7bf2SJason Low 		}
40101768b42SPeter Zijlstra 
402f2f09a4cSChristian Borntraeger 		cpu_relax();
40301768b42SPeter Zijlstra 	}
40401768b42SPeter Zijlstra 	rcu_read_unlock();
40501768b42SPeter Zijlstra 
406be1f7bf2SJason Low 	return ret;
40701768b42SPeter Zijlstra }
40801768b42SPeter Zijlstra 
40901768b42SPeter Zijlstra /*
41001768b42SPeter Zijlstra  * Initial check for entering the mutex spinning loop
41101768b42SPeter Zijlstra  */
41201768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock)
41301768b42SPeter Zijlstra {
41401768b42SPeter Zijlstra 	struct task_struct *owner;
41501768b42SPeter Zijlstra 	int retval = 1;
41601768b42SPeter Zijlstra 
41746af29e4SJason Low 	if (need_resched())
41846af29e4SJason Low 		return 0;
41946af29e4SJason Low 
42001768b42SPeter Zijlstra 	rcu_read_lock();
4213ca0ff57SPeter Zijlstra 	owner = __mutex_owner(lock);
42205ffc951SPan Xinhui 
42305ffc951SPan Xinhui 	/*
42405ffc951SPan Xinhui 	 * As lock holder preemption issue, we both skip spinning if task is not
42505ffc951SPan Xinhui 	 * on cpu or its cpu is preempted
42605ffc951SPan Xinhui 	 */
42701768b42SPeter Zijlstra 	if (owner)
42805ffc951SPan Xinhui 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
42901768b42SPeter Zijlstra 	rcu_read_unlock();
43076916515SDavidlohr Bueso 
43176916515SDavidlohr Bueso 	/*
4323ca0ff57SPeter Zijlstra 	 * If lock->owner is not set, the mutex has been released. Return true
4333ca0ff57SPeter Zijlstra 	 * such that we'll trylock in the spin path, which is a faster option
4343ca0ff57SPeter Zijlstra 	 * than the blocking slow path.
43576916515SDavidlohr Bueso 	 */
4363ca0ff57SPeter Zijlstra 	return retval;
43776916515SDavidlohr Bueso }
43876916515SDavidlohr Bueso 
43976916515SDavidlohr Bueso /*
44076916515SDavidlohr Bueso  * Optimistic spinning.
44176916515SDavidlohr Bueso  *
44276916515SDavidlohr Bueso  * We try to spin for acquisition when we find that the lock owner
44376916515SDavidlohr Bueso  * is currently running on a (different) CPU and while we don't
44476916515SDavidlohr Bueso  * need to reschedule. The rationale is that if the lock owner is
44576916515SDavidlohr Bueso  * running, it is likely to release the lock soon.
44676916515SDavidlohr Bueso  *
44776916515SDavidlohr Bueso  * The mutex spinners are queued up using MCS lock so that only one
44876916515SDavidlohr Bueso  * spinner can compete for the mutex. However, if mutex spinning isn't
44976916515SDavidlohr Bueso  * going to happen, there is no point in going through the lock/unlock
45076916515SDavidlohr Bueso  * overhead.
45176916515SDavidlohr Bueso  *
45276916515SDavidlohr Bueso  * Returns true when the lock was taken, otherwise false, indicating
45376916515SDavidlohr Bueso  * that we need to jump to the slowpath and sleep.
454b341afb3SWaiman Long  *
455b341afb3SWaiman Long  * The waiter flag is set to true if the spinner is a waiter in the wait
456b341afb3SWaiman Long  * queue. The waiter-spinner will spin on the lock directly and concurrently
457b341afb3SWaiman Long  * with the spinner at the head of the OSQ, if present, until the owner is
458b341afb3SWaiman Long  * changed to itself.
45976916515SDavidlohr Bueso  */
460*427b1820SPeter Zijlstra static __always_inline bool
461*427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
462b341afb3SWaiman Long 		      const bool use_ww_ctx, const bool waiter)
46376916515SDavidlohr Bueso {
464b341afb3SWaiman Long 	if (!waiter) {
465b341afb3SWaiman Long 		/*
466b341afb3SWaiman Long 		 * The purpose of the mutex_can_spin_on_owner() function is
467b341afb3SWaiman Long 		 * to eliminate the overhead of osq_lock() and osq_unlock()
468b341afb3SWaiman Long 		 * in case spinning isn't possible. As a waiter-spinner
469b341afb3SWaiman Long 		 * is not going to take OSQ lock anyway, there is no need
470b341afb3SWaiman Long 		 * to call mutex_can_spin_on_owner().
471b341afb3SWaiman Long 		 */
47276916515SDavidlohr Bueso 		if (!mutex_can_spin_on_owner(lock))
473b341afb3SWaiman Long 			goto fail;
47476916515SDavidlohr Bueso 
475e42f678aSDavidlohr Bueso 		/*
476e42f678aSDavidlohr Bueso 		 * In order to avoid a stampede of mutex spinners trying to
477e42f678aSDavidlohr Bueso 		 * acquire the mutex all at once, the spinners need to take a
478e42f678aSDavidlohr Bueso 		 * MCS (queued) lock first before spinning on the owner field.
479e42f678aSDavidlohr Bueso 		 */
48076916515SDavidlohr Bueso 		if (!osq_lock(&lock->osq))
481b341afb3SWaiman Long 			goto fail;
482b341afb3SWaiman Long 	}
48376916515SDavidlohr Bueso 
484b341afb3SWaiman Long 	for (;;) {
48576916515SDavidlohr Bueso 		struct task_struct *owner;
48676916515SDavidlohr Bueso 
487ea9e0fb8SNicolai Hähnle 		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
48876916515SDavidlohr Bueso 			struct ww_mutex *ww;
48976916515SDavidlohr Bueso 
49076916515SDavidlohr Bueso 			ww = container_of(lock, struct ww_mutex, base);
49176916515SDavidlohr Bueso 			/*
49276916515SDavidlohr Bueso 			 * If ww->ctx is set the contents are undefined, only
49376916515SDavidlohr Bueso 			 * by acquiring wait_lock there is a guarantee that
49476916515SDavidlohr Bueso 			 * they are not invalid when reading.
49576916515SDavidlohr Bueso 			 *
49676916515SDavidlohr Bueso 			 * As such, when deadlock detection needs to be
49776916515SDavidlohr Bueso 			 * performed the optimistic spinning cannot be done.
49876916515SDavidlohr Bueso 			 */
4994d3199e4SDavidlohr Bueso 			if (READ_ONCE(ww->ctx))
500b341afb3SWaiman Long 				goto fail_unlock;
50176916515SDavidlohr Bueso 		}
50276916515SDavidlohr Bueso 
503e274795eSPeter Zijlstra 		/* Try to acquire the mutex... */
504e274795eSPeter Zijlstra 		owner = __mutex_trylock_or_owner(lock);
505e274795eSPeter Zijlstra 		if (!owner)
506e274795eSPeter Zijlstra 			break;
507e274795eSPeter Zijlstra 
50876916515SDavidlohr Bueso 		/*
509e274795eSPeter Zijlstra 		 * There's an owner, wait for it to either
51076916515SDavidlohr Bueso 		 * release the lock or go to sleep.
51176916515SDavidlohr Bueso 		 */
512b341afb3SWaiman Long 		if (!mutex_spin_on_owner(lock, owner))
513b341afb3SWaiman Long 			goto fail_unlock;
51476916515SDavidlohr Bueso 
51576916515SDavidlohr Bueso 		/*
51676916515SDavidlohr Bueso 		 * The cpu_relax() call is a compiler barrier which forces
51776916515SDavidlohr Bueso 		 * everything in this loop to be re-loaded. We don't need
51876916515SDavidlohr Bueso 		 * memory barriers as we'll eventually observe the right
51976916515SDavidlohr Bueso 		 * values at the cost of a few extra spins.
52076916515SDavidlohr Bueso 		 */
521f2f09a4cSChristian Borntraeger 		cpu_relax();
52276916515SDavidlohr Bueso 	}
52376916515SDavidlohr Bueso 
524b341afb3SWaiman Long 	if (!waiter)
52576916515SDavidlohr Bueso 		osq_unlock(&lock->osq);
526b341afb3SWaiman Long 
527b341afb3SWaiman Long 	return true;
528b341afb3SWaiman Long 
529b341afb3SWaiman Long 
530b341afb3SWaiman Long fail_unlock:
531b341afb3SWaiman Long 	if (!waiter)
532b341afb3SWaiman Long 		osq_unlock(&lock->osq);
533b341afb3SWaiman Long 
534b341afb3SWaiman Long fail:
53576916515SDavidlohr Bueso 	/*
53676916515SDavidlohr Bueso 	 * If we fell out of the spin path because of need_resched(),
53776916515SDavidlohr Bueso 	 * reschedule now, before we try-lock the mutex. This avoids getting
53876916515SDavidlohr Bueso 	 * scheduled out right after we obtained the mutex.
53976916515SDavidlohr Bueso 	 */
5406f942a1fSPeter Zijlstra 	if (need_resched()) {
5416f942a1fSPeter Zijlstra 		/*
5426f942a1fSPeter Zijlstra 		 * We _should_ have TASK_RUNNING here, but just in case
5436f942a1fSPeter Zijlstra 		 * we do not, make it so, otherwise we might get stuck.
5446f942a1fSPeter Zijlstra 		 */
5456f942a1fSPeter Zijlstra 		__set_current_state(TASK_RUNNING);
54676916515SDavidlohr Bueso 		schedule_preempt_disabled();
5476f942a1fSPeter Zijlstra 	}
54876916515SDavidlohr Bueso 
54976916515SDavidlohr Bueso 	return false;
55076916515SDavidlohr Bueso }
55176916515SDavidlohr Bueso #else
552*427b1820SPeter Zijlstra static __always_inline bool
553*427b1820SPeter Zijlstra mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
554b341afb3SWaiman Long 		      const bool use_ww_ctx, const bool waiter)
55576916515SDavidlohr Bueso {
55676916515SDavidlohr Bueso 	return false;
55776916515SDavidlohr Bueso }
55801768b42SPeter Zijlstra #endif
55901768b42SPeter Zijlstra 
5603ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
56101768b42SPeter Zijlstra 
56201768b42SPeter Zijlstra /**
56301768b42SPeter Zijlstra  * mutex_unlock - release the mutex
56401768b42SPeter Zijlstra  * @lock: the mutex to be released
56501768b42SPeter Zijlstra  *
56601768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously.
56701768b42SPeter Zijlstra  *
56801768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
56901768b42SPeter Zijlstra  * of a not locked mutex is not allowed.
57001768b42SPeter Zijlstra  *
57101768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) up().
57201768b42SPeter Zijlstra  */
57301768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock)
57401768b42SPeter Zijlstra {
5753ca0ff57SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
5763ca0ff57SPeter Zijlstra 	if (__mutex_unlock_fast(lock))
5773ca0ff57SPeter Zijlstra 		return;
57801768b42SPeter Zijlstra #endif
5793ca0ff57SPeter Zijlstra 	__mutex_unlock_slowpath(lock, _RET_IP_);
58001768b42SPeter Zijlstra }
58101768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock);
58201768b42SPeter Zijlstra 
58301768b42SPeter Zijlstra /**
58401768b42SPeter Zijlstra  * ww_mutex_unlock - release the w/w mutex
58501768b42SPeter Zijlstra  * @lock: the mutex to be released
58601768b42SPeter Zijlstra  *
58701768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously with any of the
58801768b42SPeter Zijlstra  * ww_mutex_lock* functions (with or without an acquire context). It is
58901768b42SPeter Zijlstra  * forbidden to release the locks after releasing the acquire context.
59001768b42SPeter Zijlstra  *
59101768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
59201768b42SPeter Zijlstra  * of a unlocked mutex is not allowed.
59301768b42SPeter Zijlstra  */
59401768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock)
59501768b42SPeter Zijlstra {
59601768b42SPeter Zijlstra 	/*
59701768b42SPeter Zijlstra 	 * The unlocking fastpath is the 0->1 transition from 'locked'
59801768b42SPeter Zijlstra 	 * into 'unlocked' state:
59901768b42SPeter Zijlstra 	 */
60001768b42SPeter Zijlstra 	if (lock->ctx) {
60101768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
60201768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
60301768b42SPeter Zijlstra #endif
60401768b42SPeter Zijlstra 		if (lock->ctx->acquired > 0)
60501768b42SPeter Zijlstra 			lock->ctx->acquired--;
60601768b42SPeter Zijlstra 		lock->ctx = NULL;
60701768b42SPeter Zijlstra 	}
60801768b42SPeter Zijlstra 
6093ca0ff57SPeter Zijlstra 	mutex_unlock(&lock->base);
61001768b42SPeter Zijlstra }
61101768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock);
61201768b42SPeter Zijlstra 
61301768b42SPeter Zijlstra static inline int __sched
614200b1874SNicolai Hähnle __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
615200b1874SNicolai Hähnle 			    struct ww_acquire_ctx *ctx)
61601768b42SPeter Zijlstra {
61701768b42SPeter Zijlstra 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
6184d3199e4SDavidlohr Bueso 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
619200b1874SNicolai Hähnle 	struct mutex_waiter *cur;
62001768b42SPeter Zijlstra 
621200b1874SNicolai Hähnle 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
622200b1874SNicolai Hähnle 		goto deadlock;
623200b1874SNicolai Hähnle 
624200b1874SNicolai Hähnle 	/*
625200b1874SNicolai Hähnle 	 * If there is a waiter in front of us that has a context, then its
626200b1874SNicolai Hähnle 	 * stamp is earlier than ours and we must back off.
627200b1874SNicolai Hähnle 	 */
628200b1874SNicolai Hähnle 	cur = waiter;
629200b1874SNicolai Hähnle 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
630200b1874SNicolai Hähnle 		if (cur->ww_ctx)
631200b1874SNicolai Hähnle 			goto deadlock;
632200b1874SNicolai Hähnle 	}
633200b1874SNicolai Hähnle 
63401768b42SPeter Zijlstra 	return 0;
63501768b42SPeter Zijlstra 
636200b1874SNicolai Hähnle deadlock:
63701768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
63801768b42SPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
63901768b42SPeter Zijlstra 	ctx->contending_lock = ww;
64001768b42SPeter Zijlstra #endif
64101768b42SPeter Zijlstra 	return -EDEADLK;
64201768b42SPeter Zijlstra }
64301768b42SPeter Zijlstra 
6446baa5c60SNicolai Hähnle static inline int __sched
6456baa5c60SNicolai Hähnle __ww_mutex_add_waiter(struct mutex_waiter *waiter,
6466baa5c60SNicolai Hähnle 		      struct mutex *lock,
6476baa5c60SNicolai Hähnle 		      struct ww_acquire_ctx *ww_ctx)
6486baa5c60SNicolai Hähnle {
6496baa5c60SNicolai Hähnle 	struct mutex_waiter *cur;
6506baa5c60SNicolai Hähnle 	struct list_head *pos;
6516baa5c60SNicolai Hähnle 
6526baa5c60SNicolai Hähnle 	if (!ww_ctx) {
6536baa5c60SNicolai Hähnle 		list_add_tail(&waiter->list, &lock->wait_list);
6546baa5c60SNicolai Hähnle 		return 0;
6556baa5c60SNicolai Hähnle 	}
6566baa5c60SNicolai Hähnle 
6576baa5c60SNicolai Hähnle 	/*
6586baa5c60SNicolai Hähnle 	 * Add the waiter before the first waiter with a higher stamp.
6596baa5c60SNicolai Hähnle 	 * Waiters without a context are skipped to avoid starving
6606baa5c60SNicolai Hähnle 	 * them.
6616baa5c60SNicolai Hähnle 	 */
6626baa5c60SNicolai Hähnle 	pos = &lock->wait_list;
6636baa5c60SNicolai Hähnle 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
6646baa5c60SNicolai Hähnle 		if (!cur->ww_ctx)
6656baa5c60SNicolai Hähnle 			continue;
6666baa5c60SNicolai Hähnle 
6676baa5c60SNicolai Hähnle 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
6686baa5c60SNicolai Hähnle 			/* Back off immediately if necessary. */
6696baa5c60SNicolai Hähnle 			if (ww_ctx->acquired > 0) {
6706baa5c60SNicolai Hähnle #ifdef CONFIG_DEBUG_MUTEXES
6716baa5c60SNicolai Hähnle 				struct ww_mutex *ww;
6726baa5c60SNicolai Hähnle 
6736baa5c60SNicolai Hähnle 				ww = container_of(lock, struct ww_mutex, base);
6746baa5c60SNicolai Hähnle 				DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
6756baa5c60SNicolai Hähnle 				ww_ctx->contending_lock = ww;
6766baa5c60SNicolai Hähnle #endif
6776baa5c60SNicolai Hähnle 				return -EDEADLK;
6786baa5c60SNicolai Hähnle 			}
6796baa5c60SNicolai Hähnle 
6806baa5c60SNicolai Hähnle 			break;
6816baa5c60SNicolai Hähnle 		}
6826baa5c60SNicolai Hähnle 
6836baa5c60SNicolai Hähnle 		pos = &cur->list;
684200b1874SNicolai Hähnle 
685200b1874SNicolai Hähnle 		/*
686200b1874SNicolai Hähnle 		 * Wake up the waiter so that it gets a chance to back
687200b1874SNicolai Hähnle 		 * off.
688200b1874SNicolai Hähnle 		 */
689200b1874SNicolai Hähnle 		if (cur->ww_ctx->acquired > 0) {
690200b1874SNicolai Hähnle 			debug_mutex_wake_waiter(lock, cur);
691200b1874SNicolai Hähnle 			wake_up_process(cur->task);
692200b1874SNicolai Hähnle 		}
6936baa5c60SNicolai Hähnle 	}
6946baa5c60SNicolai Hähnle 
6956baa5c60SNicolai Hähnle 	list_add_tail(&waiter->list, pos);
6966baa5c60SNicolai Hähnle 	return 0;
6976baa5c60SNicolai Hähnle }
6986baa5c60SNicolai Hähnle 
69901768b42SPeter Zijlstra /*
70001768b42SPeter Zijlstra  * Lock a mutex (possibly interruptible), slowpath:
70101768b42SPeter Zijlstra  */
70201768b42SPeter Zijlstra static __always_inline int __sched
70301768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70401768b42SPeter Zijlstra 		    struct lockdep_map *nest_lock, unsigned long ip,
70501768b42SPeter Zijlstra 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
70601768b42SPeter Zijlstra {
70701768b42SPeter Zijlstra 	struct mutex_waiter waiter;
70801768b42SPeter Zijlstra 	unsigned long flags;
7099d659ae1SPeter Zijlstra 	bool first = false;
710a40ca565SWaiman Long 	struct ww_mutex *ww;
71101768b42SPeter Zijlstra 	int ret;
71201768b42SPeter Zijlstra 
713*427b1820SPeter Zijlstra 	might_sleep();
714ea9e0fb8SNicolai Hähnle 
715*427b1820SPeter Zijlstra 	ww = container_of(lock, struct ww_mutex, base);
716ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx) {
7170422e83dSChris Wilson 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
7180422e83dSChris Wilson 			return -EALREADY;
7190422e83dSChris Wilson 	}
7200422e83dSChris Wilson 
72101768b42SPeter Zijlstra 	preempt_disable();
72201768b42SPeter Zijlstra 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
72301768b42SPeter Zijlstra 
724e274795eSPeter Zijlstra 	if (__mutex_trylock(lock) ||
725b341afb3SWaiman Long 	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
72676916515SDavidlohr Bueso 		/* got the lock, yay! */
7273ca0ff57SPeter Zijlstra 		lock_acquired(&lock->dep_map, ip);
728ea9e0fb8SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
7293ca0ff57SPeter Zijlstra 			ww_mutex_set_context_fastpath(ww, ww_ctx);
73001768b42SPeter Zijlstra 		preempt_enable();
73101768b42SPeter Zijlstra 		return 0;
73201768b42SPeter Zijlstra 	}
73301768b42SPeter Zijlstra 
73401768b42SPeter Zijlstra 	spin_lock_mutex(&lock->wait_lock, flags);
7351e820c96SJason Low 	/*
7363ca0ff57SPeter Zijlstra 	 * After waiting to acquire the wait_lock, try again.
7371e820c96SJason Low 	 */
738659cf9f5SNicolai Hähnle 	if (__mutex_trylock(lock)) {
739659cf9f5SNicolai Hähnle 		if (use_ww_ctx && ww_ctx)
740659cf9f5SNicolai Hähnle 			__ww_mutex_wakeup_for_backoff(lock, ww_ctx);
741659cf9f5SNicolai Hähnle 
74201768b42SPeter Zijlstra 		goto skip_wait;
743659cf9f5SNicolai Hähnle 	}
74401768b42SPeter Zijlstra 
74501768b42SPeter Zijlstra 	debug_mutex_lock_common(lock, &waiter);
746d269a8b8SDavidlohr Bueso 	debug_mutex_add_waiter(lock, &waiter, current);
74701768b42SPeter Zijlstra 
7486baa5c60SNicolai Hähnle 	lock_contended(&lock->dep_map, ip);
7496baa5c60SNicolai Hähnle 
7506baa5c60SNicolai Hähnle 	if (!use_ww_ctx) {
75101768b42SPeter Zijlstra 		/* add waiting tasks to the end of the waitqueue (FIFO): */
75201768b42SPeter Zijlstra 		list_add_tail(&waiter.list, &lock->wait_list);
7536baa5c60SNicolai Hähnle 	} else {
7546baa5c60SNicolai Hähnle 		/* Add in stamp order, waking up waiters that must back off. */
7556baa5c60SNicolai Hähnle 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
7566baa5c60SNicolai Hähnle 		if (ret)
7576baa5c60SNicolai Hähnle 			goto err_early_backoff;
7586baa5c60SNicolai Hähnle 
7596baa5c60SNicolai Hähnle 		waiter.ww_ctx = ww_ctx;
7606baa5c60SNicolai Hähnle 	}
7616baa5c60SNicolai Hähnle 
762d269a8b8SDavidlohr Bueso 	waiter.task = current;
76301768b42SPeter Zijlstra 
7649d659ae1SPeter Zijlstra 	if (__mutex_waiter_is_first(lock, &waiter))
7653ca0ff57SPeter Zijlstra 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
7663ca0ff57SPeter Zijlstra 
767642fa448SDavidlohr Bueso 	set_current_state(state);
76801768b42SPeter Zijlstra 	for (;;) {
7695bbd7e64SPeter Zijlstra 		/*
7705bbd7e64SPeter Zijlstra 		 * Once we hold wait_lock, we're serialized against
7715bbd7e64SPeter Zijlstra 		 * mutex_unlock() handing the lock off to us, do a trylock
7725bbd7e64SPeter Zijlstra 		 * before testing the error conditions to make sure we pick up
7735bbd7e64SPeter Zijlstra 		 * the handoff.
7745bbd7e64SPeter Zijlstra 		 */
775e274795eSPeter Zijlstra 		if (__mutex_trylock(lock))
7765bbd7e64SPeter Zijlstra 			goto acquired;
77701768b42SPeter Zijlstra 
77801768b42SPeter Zijlstra 		/*
7795bbd7e64SPeter Zijlstra 		 * Check for signals and wound conditions while holding
7805bbd7e64SPeter Zijlstra 		 * wait_lock. This ensures the lock cancellation is ordered
7815bbd7e64SPeter Zijlstra 		 * against mutex_unlock() and wake-ups do not go missing.
78201768b42SPeter Zijlstra 		 */
783d269a8b8SDavidlohr Bueso 		if (unlikely(signal_pending_state(state, current))) {
78401768b42SPeter Zijlstra 			ret = -EINTR;
78501768b42SPeter Zijlstra 			goto err;
78601768b42SPeter Zijlstra 		}
78701768b42SPeter Zijlstra 
788ea9e0fb8SNicolai Hähnle 		if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
789200b1874SNicolai Hähnle 			ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
79001768b42SPeter Zijlstra 			if (ret)
79101768b42SPeter Zijlstra 				goto err;
79201768b42SPeter Zijlstra 		}
79301768b42SPeter Zijlstra 
79401768b42SPeter Zijlstra 		spin_unlock_mutex(&lock->wait_lock, flags);
79501768b42SPeter Zijlstra 		schedule_preempt_disabled();
7969d659ae1SPeter Zijlstra 
7976baa5c60SNicolai Hähnle 		/*
7986baa5c60SNicolai Hähnle 		 * ww_mutex needs to always recheck its position since its waiter
7996baa5c60SNicolai Hähnle 		 * list is not FIFO ordered.
8006baa5c60SNicolai Hähnle 		 */
8016baa5c60SNicolai Hähnle 		if ((use_ww_ctx && ww_ctx) || !first) {
8026baa5c60SNicolai Hähnle 			first = __mutex_waiter_is_first(lock, &waiter);
8036baa5c60SNicolai Hähnle 			if (first)
8049d659ae1SPeter Zijlstra 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
8059d659ae1SPeter Zijlstra 		}
8065bbd7e64SPeter Zijlstra 
807642fa448SDavidlohr Bueso 		set_current_state(state);
8085bbd7e64SPeter Zijlstra 		/*
8095bbd7e64SPeter Zijlstra 		 * Here we order against unlock; we must either see it change
8105bbd7e64SPeter Zijlstra 		 * state back to RUNNING and fall through the next schedule(),
8115bbd7e64SPeter Zijlstra 		 * or we must see its unlock and acquire.
8125bbd7e64SPeter Zijlstra 		 */
813e274795eSPeter Zijlstra 		if (__mutex_trylock(lock) ||
814e274795eSPeter Zijlstra 		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)))
8155bbd7e64SPeter Zijlstra 			break;
8165bbd7e64SPeter Zijlstra 
8175bbd7e64SPeter Zijlstra 		spin_lock_mutex(&lock->wait_lock, flags);
81801768b42SPeter Zijlstra 	}
8195bbd7e64SPeter Zijlstra 	spin_lock_mutex(&lock->wait_lock, flags);
8205bbd7e64SPeter Zijlstra acquired:
821642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
82251587bcfSDavidlohr Bueso 
823d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
82401768b42SPeter Zijlstra 	if (likely(list_empty(&lock->wait_list)))
8259d659ae1SPeter Zijlstra 		__mutex_clear_flag(lock, MUTEX_FLAGS);
8263ca0ff57SPeter Zijlstra 
82701768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
82801768b42SPeter Zijlstra 
82901768b42SPeter Zijlstra skip_wait:
83001768b42SPeter Zijlstra 	/* got the lock - cleanup and rejoice! */
83101768b42SPeter Zijlstra 	lock_acquired(&lock->dep_map, ip);
83201768b42SPeter Zijlstra 
833ea9e0fb8SNicolai Hähnle 	if (use_ww_ctx && ww_ctx)
8344bd19084SDavidlohr Bueso 		ww_mutex_set_context_slowpath(ww, ww_ctx);
83501768b42SPeter Zijlstra 
83601768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->wait_lock, flags);
83701768b42SPeter Zijlstra 	preempt_enable();
83801768b42SPeter Zijlstra 	return 0;
83901768b42SPeter Zijlstra 
84001768b42SPeter Zijlstra err:
841642fa448SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
842d269a8b8SDavidlohr Bueso 	mutex_remove_waiter(lock, &waiter, current);
8436baa5c60SNicolai Hähnle err_early_backoff:
84401768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->wait_lock, flags);
84501768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
84601768b42SPeter Zijlstra 	mutex_release(&lock->dep_map, 1, ip);
84701768b42SPeter Zijlstra 	preempt_enable();
84801768b42SPeter Zijlstra 	return ret;
84901768b42SPeter Zijlstra }
85001768b42SPeter Zijlstra 
851*427b1820SPeter Zijlstra static int __sched
852*427b1820SPeter Zijlstra __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
853*427b1820SPeter Zijlstra 	     struct lockdep_map *nest_lock, unsigned long ip)
854*427b1820SPeter Zijlstra {
855*427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
856*427b1820SPeter Zijlstra }
857*427b1820SPeter Zijlstra 
858*427b1820SPeter Zijlstra static int __sched
859*427b1820SPeter Zijlstra __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
860*427b1820SPeter Zijlstra 		struct lockdep_map *nest_lock, unsigned long ip,
861*427b1820SPeter Zijlstra 		struct ww_acquire_ctx *ww_ctx)
862*427b1820SPeter Zijlstra {
863*427b1820SPeter Zijlstra 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
864*427b1820SPeter Zijlstra }
865*427b1820SPeter Zijlstra 
86601768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
86701768b42SPeter Zijlstra void __sched
86801768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass)
86901768b42SPeter Zijlstra {
870*427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
87101768b42SPeter Zijlstra }
87201768b42SPeter Zijlstra 
87301768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested);
87401768b42SPeter Zijlstra 
87501768b42SPeter Zijlstra void __sched
87601768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
87701768b42SPeter Zijlstra {
878*427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
87901768b42SPeter Zijlstra }
88001768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
88101768b42SPeter Zijlstra 
88201768b42SPeter Zijlstra int __sched
88301768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
88401768b42SPeter Zijlstra {
885*427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
88601768b42SPeter Zijlstra }
88701768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
88801768b42SPeter Zijlstra 
88901768b42SPeter Zijlstra int __sched
89001768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
89101768b42SPeter Zijlstra {
892*427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
89301768b42SPeter Zijlstra }
89401768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
89501768b42SPeter Zijlstra 
89601768b42SPeter Zijlstra static inline int
89701768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
89801768b42SPeter Zijlstra {
89901768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
90001768b42SPeter Zijlstra 	unsigned tmp;
90101768b42SPeter Zijlstra 
90201768b42SPeter Zijlstra 	if (ctx->deadlock_inject_countdown-- == 0) {
90301768b42SPeter Zijlstra 		tmp = ctx->deadlock_inject_interval;
90401768b42SPeter Zijlstra 		if (tmp > UINT_MAX/4)
90501768b42SPeter Zijlstra 			tmp = UINT_MAX;
90601768b42SPeter Zijlstra 		else
90701768b42SPeter Zijlstra 			tmp = tmp*2 + tmp + tmp/2;
90801768b42SPeter Zijlstra 
90901768b42SPeter Zijlstra 		ctx->deadlock_inject_interval = tmp;
91001768b42SPeter Zijlstra 		ctx->deadlock_inject_countdown = tmp;
91101768b42SPeter Zijlstra 		ctx->contending_lock = lock;
91201768b42SPeter Zijlstra 
91301768b42SPeter Zijlstra 		ww_mutex_unlock(lock);
91401768b42SPeter Zijlstra 
91501768b42SPeter Zijlstra 		return -EDEADLK;
91601768b42SPeter Zijlstra 	}
91701768b42SPeter Zijlstra #endif
91801768b42SPeter Zijlstra 
91901768b42SPeter Zijlstra 	return 0;
92001768b42SPeter Zijlstra }
92101768b42SPeter Zijlstra 
92201768b42SPeter Zijlstra int __sched
923c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
92401768b42SPeter Zijlstra {
92501768b42SPeter Zijlstra 	int ret;
92601768b42SPeter Zijlstra 
92701768b42SPeter Zijlstra 	might_sleep();
928*427b1820SPeter Zijlstra 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
929ea9e0fb8SNicolai Hähnle 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
930*427b1820SPeter Zijlstra 			       ctx);
931ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
93201768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
93301768b42SPeter Zijlstra 
93401768b42SPeter Zijlstra 	return ret;
93501768b42SPeter Zijlstra }
936c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock);
93701768b42SPeter Zijlstra 
93801768b42SPeter Zijlstra int __sched
939c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
94001768b42SPeter Zijlstra {
94101768b42SPeter Zijlstra 	int ret;
94201768b42SPeter Zijlstra 
94301768b42SPeter Zijlstra 	might_sleep();
944*427b1820SPeter Zijlstra 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
945ea9e0fb8SNicolai Hähnle 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
946*427b1820SPeter Zijlstra 			      ctx);
94701768b42SPeter Zijlstra 
948ea9e0fb8SNicolai Hähnle 	if (!ret && ctx && ctx->acquired > 1)
94901768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
95001768b42SPeter Zijlstra 
95101768b42SPeter Zijlstra 	return ret;
95201768b42SPeter Zijlstra }
953c5470b22SNicolai Hähnle EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
95401768b42SPeter Zijlstra 
95501768b42SPeter Zijlstra #endif
95601768b42SPeter Zijlstra 
95701768b42SPeter Zijlstra /*
95801768b42SPeter Zijlstra  * Release the lock, slowpath:
95901768b42SPeter Zijlstra  */
9603ca0ff57SPeter Zijlstra static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
96101768b42SPeter Zijlstra {
9629d659ae1SPeter Zijlstra 	struct task_struct *next = NULL;
9633ca0ff57SPeter Zijlstra 	unsigned long owner, flags;
964194a6b5bSWaiman Long 	DEFINE_WAKE_Q(wake_q);
96501768b42SPeter Zijlstra 
9663ca0ff57SPeter Zijlstra 	mutex_release(&lock->dep_map, 1, ip);
9673ca0ff57SPeter Zijlstra 
96801768b42SPeter Zijlstra 	/*
9699d659ae1SPeter Zijlstra 	 * Release the lock before (potentially) taking the spinlock such that
9709d659ae1SPeter Zijlstra 	 * other contenders can get on with things ASAP.
9719d659ae1SPeter Zijlstra 	 *
9729d659ae1SPeter Zijlstra 	 * Except when HANDOFF, in that case we must not clear the owner field,
9739d659ae1SPeter Zijlstra 	 * but instead set it to the top waiter.
97401768b42SPeter Zijlstra 	 */
9759d659ae1SPeter Zijlstra 	owner = atomic_long_read(&lock->owner);
9769d659ae1SPeter Zijlstra 	for (;;) {
9779d659ae1SPeter Zijlstra 		unsigned long old;
9789d659ae1SPeter Zijlstra 
9799d659ae1SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
9809d659ae1SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
981e274795eSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
9829d659ae1SPeter Zijlstra #endif
9839d659ae1SPeter Zijlstra 
9849d659ae1SPeter Zijlstra 		if (owner & MUTEX_FLAG_HANDOFF)
9859d659ae1SPeter Zijlstra 			break;
9869d659ae1SPeter Zijlstra 
9879d659ae1SPeter Zijlstra 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
9889d659ae1SPeter Zijlstra 						  __owner_flags(owner));
9899d659ae1SPeter Zijlstra 		if (old == owner) {
9909d659ae1SPeter Zijlstra 			if (owner & MUTEX_FLAG_WAITERS)
9919d659ae1SPeter Zijlstra 				break;
9929d659ae1SPeter Zijlstra 
9933ca0ff57SPeter Zijlstra 			return;
9949d659ae1SPeter Zijlstra 		}
9959d659ae1SPeter Zijlstra 
9969d659ae1SPeter Zijlstra 		owner = old;
9979d659ae1SPeter Zijlstra 	}
99801768b42SPeter Zijlstra 
9991d8fe7dcSJason Low 	spin_lock_mutex(&lock->wait_lock, flags);
10001d8fe7dcSJason Low 	debug_mutex_unlock(lock);
100101768b42SPeter Zijlstra 	if (!list_empty(&lock->wait_list)) {
100201768b42SPeter Zijlstra 		/* get the first entry from the wait-list: */
100301768b42SPeter Zijlstra 		struct mutex_waiter *waiter =
10049d659ae1SPeter Zijlstra 			list_first_entry(&lock->wait_list,
100501768b42SPeter Zijlstra 					 struct mutex_waiter, list);
100601768b42SPeter Zijlstra 
10079d659ae1SPeter Zijlstra 		next = waiter->task;
10089d659ae1SPeter Zijlstra 
100901768b42SPeter Zijlstra 		debug_mutex_wake_waiter(lock, waiter);
10109d659ae1SPeter Zijlstra 		wake_q_add(&wake_q, next);
101101768b42SPeter Zijlstra 	}
101201768b42SPeter Zijlstra 
10139d659ae1SPeter Zijlstra 	if (owner & MUTEX_FLAG_HANDOFF)
10149d659ae1SPeter Zijlstra 		__mutex_handoff(lock, next);
10159d659ae1SPeter Zijlstra 
101601768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->wait_lock, flags);
10179d659ae1SPeter Zijlstra 
10181329ce6fSDavidlohr Bueso 	wake_up_q(&wake_q);
101901768b42SPeter Zijlstra }
102001768b42SPeter Zijlstra 
102101768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
102201768b42SPeter Zijlstra /*
102301768b42SPeter Zijlstra  * Here come the less common (and hence less performance-critical) APIs:
102401768b42SPeter Zijlstra  * mutex_lock_interruptible() and mutex_trylock().
102501768b42SPeter Zijlstra  */
102601768b42SPeter Zijlstra static noinline int __sched
102701768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock);
102801768b42SPeter Zijlstra 
102901768b42SPeter Zijlstra static noinline int __sched
103001768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock);
103101768b42SPeter Zijlstra 
103201768b42SPeter Zijlstra /**
103301768b42SPeter Zijlstra  * mutex_lock_interruptible - acquire the mutex, interruptible
103401768b42SPeter Zijlstra  * @lock: the mutex to be acquired
103501768b42SPeter Zijlstra  *
103601768b42SPeter Zijlstra  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
103701768b42SPeter Zijlstra  * been acquired or sleep until the mutex becomes available. If a
103801768b42SPeter Zijlstra  * signal arrives while waiting for the lock then this function
103901768b42SPeter Zijlstra  * returns -EINTR.
104001768b42SPeter Zijlstra  *
104101768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down_interruptible().
104201768b42SPeter Zijlstra  */
104301768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock)
104401768b42SPeter Zijlstra {
104501768b42SPeter Zijlstra 	might_sleep();
10463ca0ff57SPeter Zijlstra 
10473ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
104801768b42SPeter Zijlstra 		return 0;
10493ca0ff57SPeter Zijlstra 
105001768b42SPeter Zijlstra 	return __mutex_lock_interruptible_slowpath(lock);
105101768b42SPeter Zijlstra }
105201768b42SPeter Zijlstra 
105301768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible);
105401768b42SPeter Zijlstra 
105501768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock)
105601768b42SPeter Zijlstra {
105701768b42SPeter Zijlstra 	might_sleep();
10583ca0ff57SPeter Zijlstra 
10593ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(lock))
106001768b42SPeter Zijlstra 		return 0;
10613ca0ff57SPeter Zijlstra 
106201768b42SPeter Zijlstra 	return __mutex_lock_killable_slowpath(lock);
106301768b42SPeter Zijlstra }
106401768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable);
106501768b42SPeter Zijlstra 
10663ca0ff57SPeter Zijlstra static noinline void __sched
10673ca0ff57SPeter Zijlstra __mutex_lock_slowpath(struct mutex *lock)
106801768b42SPeter Zijlstra {
1069*427b1820SPeter Zijlstra 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
107001768b42SPeter Zijlstra }
107101768b42SPeter Zijlstra 
107201768b42SPeter Zijlstra static noinline int __sched
107301768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock)
107401768b42SPeter Zijlstra {
1075*427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
107601768b42SPeter Zijlstra }
107701768b42SPeter Zijlstra 
107801768b42SPeter Zijlstra static noinline int __sched
107901768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock)
108001768b42SPeter Zijlstra {
1081*427b1820SPeter Zijlstra 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
108201768b42SPeter Zijlstra }
108301768b42SPeter Zijlstra 
108401768b42SPeter Zijlstra static noinline int __sched
108501768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
108601768b42SPeter Zijlstra {
1087*427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1088*427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
108901768b42SPeter Zijlstra }
109001768b42SPeter Zijlstra 
109101768b42SPeter Zijlstra static noinline int __sched
109201768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
109301768b42SPeter Zijlstra 					    struct ww_acquire_ctx *ctx)
109401768b42SPeter Zijlstra {
1095*427b1820SPeter Zijlstra 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1096*427b1820SPeter Zijlstra 			       _RET_IP_, ctx);
109701768b42SPeter Zijlstra }
109801768b42SPeter Zijlstra 
109901768b42SPeter Zijlstra #endif
110001768b42SPeter Zijlstra 
110101768b42SPeter Zijlstra /**
110201768b42SPeter Zijlstra  * mutex_trylock - try to acquire the mutex, without waiting
110301768b42SPeter Zijlstra  * @lock: the mutex to be acquired
110401768b42SPeter Zijlstra  *
110501768b42SPeter Zijlstra  * Try to acquire the mutex atomically. Returns 1 if the mutex
110601768b42SPeter Zijlstra  * has been acquired successfully, and 0 on contention.
110701768b42SPeter Zijlstra  *
110801768b42SPeter Zijlstra  * NOTE: this function follows the spin_trylock() convention, so
110901768b42SPeter Zijlstra  * it is negated from the down_trylock() return values! Be careful
111001768b42SPeter Zijlstra  * about this when converting semaphore users to mutexes.
111101768b42SPeter Zijlstra  *
111201768b42SPeter Zijlstra  * This function must not be used in interrupt context. The
111301768b42SPeter Zijlstra  * mutex must be released by the same task that acquired it.
111401768b42SPeter Zijlstra  */
111501768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock)
111601768b42SPeter Zijlstra {
1117e274795eSPeter Zijlstra 	bool locked = __mutex_trylock(lock);
111801768b42SPeter Zijlstra 
11193ca0ff57SPeter Zijlstra 	if (locked)
11203ca0ff57SPeter Zijlstra 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
112101768b42SPeter Zijlstra 
11223ca0ff57SPeter Zijlstra 	return locked;
112301768b42SPeter Zijlstra }
112401768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock);
112501768b42SPeter Zijlstra 
112601768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
112701768b42SPeter Zijlstra int __sched
1128c5470b22SNicolai Hähnle ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
112901768b42SPeter Zijlstra {
113001768b42SPeter Zijlstra 	might_sleep();
113101768b42SPeter Zijlstra 
11323ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1133ea9e0fb8SNicolai Hähnle 		if (ctx)
113401768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
11353ca0ff57SPeter Zijlstra 		return 0;
11363ca0ff57SPeter Zijlstra 	}
11373ca0ff57SPeter Zijlstra 
11383ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_slowpath(lock, ctx);
113901768b42SPeter Zijlstra }
1140c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock);
114101768b42SPeter Zijlstra 
114201768b42SPeter Zijlstra int __sched
1143c5470b22SNicolai Hähnle ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
114401768b42SPeter Zijlstra {
114501768b42SPeter Zijlstra 	might_sleep();
114601768b42SPeter Zijlstra 
11473ca0ff57SPeter Zijlstra 	if (__mutex_trylock_fast(&lock->base)) {
1148ea9e0fb8SNicolai Hähnle 		if (ctx)
114901768b42SPeter Zijlstra 			ww_mutex_set_context_fastpath(lock, ctx);
11503ca0ff57SPeter Zijlstra 		return 0;
11513ca0ff57SPeter Zijlstra 	}
11523ca0ff57SPeter Zijlstra 
11533ca0ff57SPeter Zijlstra 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
115401768b42SPeter Zijlstra }
1155c5470b22SNicolai Hähnle EXPORT_SYMBOL(ww_mutex_lock_interruptible);
115601768b42SPeter Zijlstra 
115701768b42SPeter Zijlstra #endif
115801768b42SPeter Zijlstra 
115901768b42SPeter Zijlstra /**
116001768b42SPeter Zijlstra  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
116101768b42SPeter Zijlstra  * @cnt: the atomic which we are to dec
116201768b42SPeter Zijlstra  * @lock: the mutex to return holding if we dec to 0
116301768b42SPeter Zijlstra  *
116401768b42SPeter Zijlstra  * return true and hold lock if we dec to 0, return false otherwise
116501768b42SPeter Zijlstra  */
116601768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
116701768b42SPeter Zijlstra {
116801768b42SPeter Zijlstra 	/* dec if we can't possibly hit 0 */
116901768b42SPeter Zijlstra 	if (atomic_add_unless(cnt, -1, 1))
117001768b42SPeter Zijlstra 		return 0;
117101768b42SPeter Zijlstra 	/* we might hit 0, so take the lock */
117201768b42SPeter Zijlstra 	mutex_lock(lock);
117301768b42SPeter Zijlstra 	if (!atomic_dec_and_test(cnt)) {
117401768b42SPeter Zijlstra 		/* when we actually did the dec, we didn't hit 0 */
117501768b42SPeter Zijlstra 		mutex_unlock(lock);
117601768b42SPeter Zijlstra 		return 0;
117701768b42SPeter Zijlstra 	}
117801768b42SPeter Zijlstra 	/* we hit 0, and we hold the lock */
117901768b42SPeter Zijlstra 	return 1;
118001768b42SPeter Zijlstra }
118101768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1182