xref: /openbmc/linux/kernel/locking/mutex.c (revision 4d9d951e6b5df85ccfca2c5bd8b4f5c71d256b65)
101768b42SPeter Zijlstra /*
267a6de49SPeter Zijlstra  * kernel/locking/mutex.c
301768b42SPeter Zijlstra  *
401768b42SPeter Zijlstra  * Mutexes: blocking mutual exclusion locks
501768b42SPeter Zijlstra  *
601768b42SPeter Zijlstra  * Started by Ingo Molnar:
701768b42SPeter Zijlstra  *
801768b42SPeter Zijlstra  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
901768b42SPeter Zijlstra  *
1001768b42SPeter Zijlstra  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
1101768b42SPeter Zijlstra  * David Howells for suggestions and improvements.
1201768b42SPeter Zijlstra  *
1301768b42SPeter Zijlstra  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
1401768b42SPeter Zijlstra  *    from the -rt tree, where it was originally implemented for rtmutexes
1501768b42SPeter Zijlstra  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
1601768b42SPeter Zijlstra  *    and Sven Dietrich.
1701768b42SPeter Zijlstra  *
1801768b42SPeter Zijlstra  * Also see Documentation/mutex-design.txt.
1901768b42SPeter Zijlstra  */
2001768b42SPeter Zijlstra #include <linux/mutex.h>
2101768b42SPeter Zijlstra #include <linux/ww_mutex.h>
2201768b42SPeter Zijlstra #include <linux/sched.h>
2301768b42SPeter Zijlstra #include <linux/sched/rt.h>
2401768b42SPeter Zijlstra #include <linux/export.h>
2501768b42SPeter Zijlstra #include <linux/spinlock.h>
2601768b42SPeter Zijlstra #include <linux/interrupt.h>
2701768b42SPeter Zijlstra #include <linux/debug_locks.h>
28c9122da1SPeter Zijlstra #include "mcs_spinlock.h"
2901768b42SPeter Zijlstra 
3001768b42SPeter Zijlstra /*
3101768b42SPeter Zijlstra  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
3201768b42SPeter Zijlstra  * which forces all calls into the slowpath:
3301768b42SPeter Zijlstra  */
3401768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
3501768b42SPeter Zijlstra # include "mutex-debug.h"
3601768b42SPeter Zijlstra # include <asm-generic/mutex-null.h>
376f008e72SPeter Zijlstra /*
386f008e72SPeter Zijlstra  * Must be 0 for the debug case so we do not do the unlock outside of the
396f008e72SPeter Zijlstra  * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
406f008e72SPeter Zijlstra  * case.
416f008e72SPeter Zijlstra  */
426f008e72SPeter Zijlstra # undef __mutex_slowpath_needs_to_unlock
436f008e72SPeter Zijlstra # define  __mutex_slowpath_needs_to_unlock()	0
4401768b42SPeter Zijlstra #else
4501768b42SPeter Zijlstra # include "mutex.h"
4601768b42SPeter Zijlstra # include <asm/mutex.h>
4701768b42SPeter Zijlstra #endif
4801768b42SPeter Zijlstra 
4901768b42SPeter Zijlstra /*
5001768b42SPeter Zijlstra  * A negative mutex count indicates that waiters are sleeping waiting for the
5101768b42SPeter Zijlstra  * mutex.
5201768b42SPeter Zijlstra  */
5301768b42SPeter Zijlstra #define	MUTEX_SHOW_NO_WAITER(mutex)	(atomic_read(&(mutex)->count) >= 0)
5401768b42SPeter Zijlstra 
5501768b42SPeter Zijlstra void
5601768b42SPeter Zijlstra __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
5701768b42SPeter Zijlstra {
5801768b42SPeter Zijlstra 	atomic_set(&lock->count, 1);
5901768b42SPeter Zijlstra 	spin_lock_init(&lock->wait_lock);
6001768b42SPeter Zijlstra 	INIT_LIST_HEAD(&lock->wait_list);
6101768b42SPeter Zijlstra 	mutex_clear_owner(lock);
6201768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
63*4d9d951eSJason Low 	osq_lock_init(&lock->osq);
6401768b42SPeter Zijlstra #endif
6501768b42SPeter Zijlstra 
6601768b42SPeter Zijlstra 	debug_mutex_init(lock, name, key);
6701768b42SPeter Zijlstra }
6801768b42SPeter Zijlstra 
6901768b42SPeter Zijlstra EXPORT_SYMBOL(__mutex_init);
7001768b42SPeter Zijlstra 
7101768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
7201768b42SPeter Zijlstra /*
7301768b42SPeter Zijlstra  * We split the mutex lock/unlock logic into separate fastpath and
7401768b42SPeter Zijlstra  * slowpath functions, to reduce the register pressure on the fastpath.
7501768b42SPeter Zijlstra  * We also put the fastpath first in the kernel image, to make sure the
7601768b42SPeter Zijlstra  * branch is predicted by the CPU as default-untaken.
7701768b42SPeter Zijlstra  */
7822d9fd34SAndi Kleen __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
7901768b42SPeter Zijlstra 
8001768b42SPeter Zijlstra /**
8101768b42SPeter Zijlstra  * mutex_lock - acquire the mutex
8201768b42SPeter Zijlstra  * @lock: the mutex to be acquired
8301768b42SPeter Zijlstra  *
8401768b42SPeter Zijlstra  * Lock the mutex exclusively for this task. If the mutex is not
8501768b42SPeter Zijlstra  * available right now, it will sleep until it can get it.
8601768b42SPeter Zijlstra  *
8701768b42SPeter Zijlstra  * The mutex must later on be released by the same task that
8801768b42SPeter Zijlstra  * acquired it. Recursive locking is not allowed. The task
8901768b42SPeter Zijlstra  * may not exit without first unlocking the mutex. Also, kernel
9001768b42SPeter Zijlstra  * memory where the mutex resides mutex must not be freed with
9101768b42SPeter Zijlstra  * the mutex still locked. The mutex must first be initialized
9201768b42SPeter Zijlstra  * (or statically defined) before it can be locked. memset()-ing
9301768b42SPeter Zijlstra  * the mutex to 0 is not allowed.
9401768b42SPeter Zijlstra  *
9501768b42SPeter Zijlstra  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
9601768b42SPeter Zijlstra  *   checks that will enforce the restrictions and will also do
9701768b42SPeter Zijlstra  *   deadlock debugging. )
9801768b42SPeter Zijlstra  *
9901768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down().
10001768b42SPeter Zijlstra  */
10101768b42SPeter Zijlstra void __sched mutex_lock(struct mutex *lock)
10201768b42SPeter Zijlstra {
10301768b42SPeter Zijlstra 	might_sleep();
10401768b42SPeter Zijlstra 	/*
10501768b42SPeter Zijlstra 	 * The locking fastpath is the 1->0 transition from
10601768b42SPeter Zijlstra 	 * 'unlocked' into 'locked' state.
10701768b42SPeter Zijlstra 	 */
10801768b42SPeter Zijlstra 	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
10901768b42SPeter Zijlstra 	mutex_set_owner(lock);
11001768b42SPeter Zijlstra }
11101768b42SPeter Zijlstra 
11201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock);
11301768b42SPeter Zijlstra #endif
11401768b42SPeter Zijlstra 
11501768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
11601768b42SPeter Zijlstra /*
11701768b42SPeter Zijlstra  * In order to avoid a stampede of mutex spinners from acquiring the mutex
11801768b42SPeter Zijlstra  * more or less simultaneously, the spinners need to acquire a MCS lock
11901768b42SPeter Zijlstra  * first before spinning on the owner field.
12001768b42SPeter Zijlstra  *
12101768b42SPeter Zijlstra  */
12201768b42SPeter Zijlstra 
12301768b42SPeter Zijlstra /*
12401768b42SPeter Zijlstra  * Mutex spinning code migrated from kernel/sched/core.c
12501768b42SPeter Zijlstra  */
12601768b42SPeter Zijlstra 
12701768b42SPeter Zijlstra static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
12801768b42SPeter Zijlstra {
12901768b42SPeter Zijlstra 	if (lock->owner != owner)
13001768b42SPeter Zijlstra 		return false;
13101768b42SPeter Zijlstra 
13201768b42SPeter Zijlstra 	/*
13301768b42SPeter Zijlstra 	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
13401768b42SPeter Zijlstra 	 * lock->owner still matches owner, if that fails, owner might
13501768b42SPeter Zijlstra 	 * point to free()d memory, if it still matches, the rcu_read_lock()
13601768b42SPeter Zijlstra 	 * ensures the memory stays valid.
13701768b42SPeter Zijlstra 	 */
13801768b42SPeter Zijlstra 	barrier();
13901768b42SPeter Zijlstra 
14001768b42SPeter Zijlstra 	return owner->on_cpu;
14101768b42SPeter Zijlstra }
14201768b42SPeter Zijlstra 
14301768b42SPeter Zijlstra /*
14401768b42SPeter Zijlstra  * Look out! "owner" is an entirely speculative pointer
14501768b42SPeter Zijlstra  * access and not reliable.
14601768b42SPeter Zijlstra  */
14701768b42SPeter Zijlstra static noinline
14801768b42SPeter Zijlstra int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
14901768b42SPeter Zijlstra {
15001768b42SPeter Zijlstra 	rcu_read_lock();
15101768b42SPeter Zijlstra 	while (owner_running(lock, owner)) {
15201768b42SPeter Zijlstra 		if (need_resched())
15301768b42SPeter Zijlstra 			break;
15401768b42SPeter Zijlstra 
15501768b42SPeter Zijlstra 		arch_mutex_cpu_relax();
15601768b42SPeter Zijlstra 	}
15701768b42SPeter Zijlstra 	rcu_read_unlock();
15801768b42SPeter Zijlstra 
15901768b42SPeter Zijlstra 	/*
16001768b42SPeter Zijlstra 	 * We break out the loop above on need_resched() and when the
16101768b42SPeter Zijlstra 	 * owner changed, which is a sign for heavy contention. Return
16201768b42SPeter Zijlstra 	 * success only when lock->owner is NULL.
16301768b42SPeter Zijlstra 	 */
16401768b42SPeter Zijlstra 	return lock->owner == NULL;
16501768b42SPeter Zijlstra }
16601768b42SPeter Zijlstra 
16701768b42SPeter Zijlstra /*
16801768b42SPeter Zijlstra  * Initial check for entering the mutex spinning loop
16901768b42SPeter Zijlstra  */
17001768b42SPeter Zijlstra static inline int mutex_can_spin_on_owner(struct mutex *lock)
17101768b42SPeter Zijlstra {
17201768b42SPeter Zijlstra 	struct task_struct *owner;
17301768b42SPeter Zijlstra 	int retval = 1;
17401768b42SPeter Zijlstra 
17546af29e4SJason Low 	if (need_resched())
17646af29e4SJason Low 		return 0;
17746af29e4SJason Low 
17801768b42SPeter Zijlstra 	rcu_read_lock();
17901768b42SPeter Zijlstra 	owner = ACCESS_ONCE(lock->owner);
18001768b42SPeter Zijlstra 	if (owner)
18101768b42SPeter Zijlstra 		retval = owner->on_cpu;
18201768b42SPeter Zijlstra 	rcu_read_unlock();
18301768b42SPeter Zijlstra 	/*
18401768b42SPeter Zijlstra 	 * if lock->owner is not set, the mutex owner may have just acquired
18501768b42SPeter Zijlstra 	 * it and not set the owner yet or the mutex has been released.
18601768b42SPeter Zijlstra 	 */
18701768b42SPeter Zijlstra 	return retval;
18801768b42SPeter Zijlstra }
18901768b42SPeter Zijlstra #endif
19001768b42SPeter Zijlstra 
19122d9fd34SAndi Kleen __visible __used noinline
19222d9fd34SAndi Kleen void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
19301768b42SPeter Zijlstra 
19401768b42SPeter Zijlstra /**
19501768b42SPeter Zijlstra  * mutex_unlock - release the mutex
19601768b42SPeter Zijlstra  * @lock: the mutex to be released
19701768b42SPeter Zijlstra  *
19801768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously.
19901768b42SPeter Zijlstra  *
20001768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
20101768b42SPeter Zijlstra  * of a not locked mutex is not allowed.
20201768b42SPeter Zijlstra  *
20301768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) up().
20401768b42SPeter Zijlstra  */
20501768b42SPeter Zijlstra void __sched mutex_unlock(struct mutex *lock)
20601768b42SPeter Zijlstra {
20701768b42SPeter Zijlstra 	/*
20801768b42SPeter Zijlstra 	 * The unlocking fastpath is the 0->1 transition from 'locked'
20901768b42SPeter Zijlstra 	 * into 'unlocked' state:
21001768b42SPeter Zijlstra 	 */
21101768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_MUTEXES
21201768b42SPeter Zijlstra 	/*
21301768b42SPeter Zijlstra 	 * When debugging is enabled we must not clear the owner before time,
21401768b42SPeter Zijlstra 	 * the slow path will always be taken, and that clears the owner field
21501768b42SPeter Zijlstra 	 * after verifying that it was indeed current.
21601768b42SPeter Zijlstra 	 */
21701768b42SPeter Zijlstra 	mutex_clear_owner(lock);
21801768b42SPeter Zijlstra #endif
21901768b42SPeter Zijlstra 	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
22001768b42SPeter Zijlstra }
22101768b42SPeter Zijlstra 
22201768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_unlock);
22301768b42SPeter Zijlstra 
22401768b42SPeter Zijlstra /**
22501768b42SPeter Zijlstra  * ww_mutex_unlock - release the w/w mutex
22601768b42SPeter Zijlstra  * @lock: the mutex to be released
22701768b42SPeter Zijlstra  *
22801768b42SPeter Zijlstra  * Unlock a mutex that has been locked by this task previously with any of the
22901768b42SPeter Zijlstra  * ww_mutex_lock* functions (with or without an acquire context). It is
23001768b42SPeter Zijlstra  * forbidden to release the locks after releasing the acquire context.
23101768b42SPeter Zijlstra  *
23201768b42SPeter Zijlstra  * This function must not be used in interrupt context. Unlocking
23301768b42SPeter Zijlstra  * of a unlocked mutex is not allowed.
23401768b42SPeter Zijlstra  */
23501768b42SPeter Zijlstra void __sched ww_mutex_unlock(struct ww_mutex *lock)
23601768b42SPeter Zijlstra {
23701768b42SPeter Zijlstra 	/*
23801768b42SPeter Zijlstra 	 * The unlocking fastpath is the 0->1 transition from 'locked'
23901768b42SPeter Zijlstra 	 * into 'unlocked' state:
24001768b42SPeter Zijlstra 	 */
24101768b42SPeter Zijlstra 	if (lock->ctx) {
24201768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
24301768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
24401768b42SPeter Zijlstra #endif
24501768b42SPeter Zijlstra 		if (lock->ctx->acquired > 0)
24601768b42SPeter Zijlstra 			lock->ctx->acquired--;
24701768b42SPeter Zijlstra 		lock->ctx = NULL;
24801768b42SPeter Zijlstra 	}
24901768b42SPeter Zijlstra 
25001768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_MUTEXES
25101768b42SPeter Zijlstra 	/*
25201768b42SPeter Zijlstra 	 * When debugging is enabled we must not clear the owner before time,
25301768b42SPeter Zijlstra 	 * the slow path will always be taken, and that clears the owner field
25401768b42SPeter Zijlstra 	 * after verifying that it was indeed current.
25501768b42SPeter Zijlstra 	 */
25601768b42SPeter Zijlstra 	mutex_clear_owner(&lock->base);
25701768b42SPeter Zijlstra #endif
25801768b42SPeter Zijlstra 	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
25901768b42SPeter Zijlstra }
26001768b42SPeter Zijlstra EXPORT_SYMBOL(ww_mutex_unlock);
26101768b42SPeter Zijlstra 
26201768b42SPeter Zijlstra static inline int __sched
26301768b42SPeter Zijlstra __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
26401768b42SPeter Zijlstra {
26501768b42SPeter Zijlstra 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
26601768b42SPeter Zijlstra 	struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
26701768b42SPeter Zijlstra 
26801768b42SPeter Zijlstra 	if (!hold_ctx)
26901768b42SPeter Zijlstra 		return 0;
27001768b42SPeter Zijlstra 
27101768b42SPeter Zijlstra 	if (unlikely(ctx == hold_ctx))
27201768b42SPeter Zijlstra 		return -EALREADY;
27301768b42SPeter Zijlstra 
27401768b42SPeter Zijlstra 	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
27501768b42SPeter Zijlstra 	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
27601768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
27701768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
27801768b42SPeter Zijlstra 		ctx->contending_lock = ww;
27901768b42SPeter Zijlstra #endif
28001768b42SPeter Zijlstra 		return -EDEADLK;
28101768b42SPeter Zijlstra 	}
28201768b42SPeter Zijlstra 
28301768b42SPeter Zijlstra 	return 0;
28401768b42SPeter Zijlstra }
28501768b42SPeter Zijlstra 
28601768b42SPeter Zijlstra static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
28701768b42SPeter Zijlstra 						   struct ww_acquire_ctx *ww_ctx)
28801768b42SPeter Zijlstra {
28901768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_MUTEXES
29001768b42SPeter Zijlstra 	/*
29101768b42SPeter Zijlstra 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
29201768b42SPeter Zijlstra 	 * but released with a normal mutex_unlock in this call.
29301768b42SPeter Zijlstra 	 *
29401768b42SPeter Zijlstra 	 * This should never happen, always use ww_mutex_unlock.
29501768b42SPeter Zijlstra 	 */
29601768b42SPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(ww->ctx);
29701768b42SPeter Zijlstra 
29801768b42SPeter Zijlstra 	/*
29901768b42SPeter Zijlstra 	 * Not quite done after calling ww_acquire_done() ?
30001768b42SPeter Zijlstra 	 */
30101768b42SPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
30201768b42SPeter Zijlstra 
30301768b42SPeter Zijlstra 	if (ww_ctx->contending_lock) {
30401768b42SPeter Zijlstra 		/*
30501768b42SPeter Zijlstra 		 * After -EDEADLK you tried to
30601768b42SPeter Zijlstra 		 * acquire a different ww_mutex? Bad!
30701768b42SPeter Zijlstra 		 */
30801768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
30901768b42SPeter Zijlstra 
31001768b42SPeter Zijlstra 		/*
31101768b42SPeter Zijlstra 		 * You called ww_mutex_lock after receiving -EDEADLK,
31201768b42SPeter Zijlstra 		 * but 'forgot' to unlock everything else first?
31301768b42SPeter Zijlstra 		 */
31401768b42SPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
31501768b42SPeter Zijlstra 		ww_ctx->contending_lock = NULL;
31601768b42SPeter Zijlstra 	}
31701768b42SPeter Zijlstra 
31801768b42SPeter Zijlstra 	/*
31901768b42SPeter Zijlstra 	 * Naughty, using a different class will lead to undefined behavior!
32001768b42SPeter Zijlstra 	 */
32101768b42SPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
32201768b42SPeter Zijlstra #endif
32301768b42SPeter Zijlstra 	ww_ctx->acquired++;
32401768b42SPeter Zijlstra }
32501768b42SPeter Zijlstra 
32601768b42SPeter Zijlstra /*
32701768b42SPeter Zijlstra  * after acquiring lock with fastpath or when we lost out in contested
32801768b42SPeter Zijlstra  * slowpath, set ctx and wake up any waiters so they can recheck.
32901768b42SPeter Zijlstra  *
33001768b42SPeter Zijlstra  * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
33101768b42SPeter Zijlstra  * as the fastpath and opportunistic spinning are disabled in that case.
33201768b42SPeter Zijlstra  */
33301768b42SPeter Zijlstra static __always_inline void
33401768b42SPeter Zijlstra ww_mutex_set_context_fastpath(struct ww_mutex *lock,
33501768b42SPeter Zijlstra 			       struct ww_acquire_ctx *ctx)
33601768b42SPeter Zijlstra {
33701768b42SPeter Zijlstra 	unsigned long flags;
33801768b42SPeter Zijlstra 	struct mutex_waiter *cur;
33901768b42SPeter Zijlstra 
34001768b42SPeter Zijlstra 	ww_mutex_lock_acquired(lock, ctx);
34101768b42SPeter Zijlstra 
34201768b42SPeter Zijlstra 	lock->ctx = ctx;
34301768b42SPeter Zijlstra 
34401768b42SPeter Zijlstra 	/*
34501768b42SPeter Zijlstra 	 * The lock->ctx update should be visible on all cores before
34601768b42SPeter Zijlstra 	 * the atomic read is done, otherwise contended waiters might be
34701768b42SPeter Zijlstra 	 * missed. The contended waiters will either see ww_ctx == NULL
34801768b42SPeter Zijlstra 	 * and keep spinning, or it will acquire wait_lock, add itself
34901768b42SPeter Zijlstra 	 * to waiter list and sleep.
35001768b42SPeter Zijlstra 	 */
35101768b42SPeter Zijlstra 	smp_mb(); /* ^^^ */
35201768b42SPeter Zijlstra 
35301768b42SPeter Zijlstra 	/*
35401768b42SPeter Zijlstra 	 * Check if lock is contended, if not there is nobody to wake up
35501768b42SPeter Zijlstra 	 */
35601768b42SPeter Zijlstra 	if (likely(atomic_read(&lock->base.count) == 0))
35701768b42SPeter Zijlstra 		return;
35801768b42SPeter Zijlstra 
35901768b42SPeter Zijlstra 	/*
36001768b42SPeter Zijlstra 	 * Uh oh, we raced in fastpath, wake up everyone in this case,
36101768b42SPeter Zijlstra 	 * so they can see the new lock->ctx.
36201768b42SPeter Zijlstra 	 */
36301768b42SPeter Zijlstra 	spin_lock_mutex(&lock->base.wait_lock, flags);
36401768b42SPeter Zijlstra 	list_for_each_entry(cur, &lock->base.wait_list, list) {
36501768b42SPeter Zijlstra 		debug_mutex_wake_waiter(&lock->base, cur);
36601768b42SPeter Zijlstra 		wake_up_process(cur->task);
36701768b42SPeter Zijlstra 	}
36801768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->base.wait_lock, flags);
36901768b42SPeter Zijlstra }
37001768b42SPeter Zijlstra 
37101768b42SPeter Zijlstra /*
37201768b42SPeter Zijlstra  * Lock a mutex (possibly interruptible), slowpath:
37301768b42SPeter Zijlstra  */
37401768b42SPeter Zijlstra static __always_inline int __sched
37501768b42SPeter Zijlstra __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
37601768b42SPeter Zijlstra 		    struct lockdep_map *nest_lock, unsigned long ip,
37701768b42SPeter Zijlstra 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
37801768b42SPeter Zijlstra {
37901768b42SPeter Zijlstra 	struct task_struct *task = current;
38001768b42SPeter Zijlstra 	struct mutex_waiter waiter;
38101768b42SPeter Zijlstra 	unsigned long flags;
38201768b42SPeter Zijlstra 	int ret;
38301768b42SPeter Zijlstra 
38401768b42SPeter Zijlstra 	preempt_disable();
38501768b42SPeter Zijlstra 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
38601768b42SPeter Zijlstra 
38701768b42SPeter Zijlstra #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
38801768b42SPeter Zijlstra 	/*
38901768b42SPeter Zijlstra 	 * Optimistic spinning.
39001768b42SPeter Zijlstra 	 *
39101768b42SPeter Zijlstra 	 * We try to spin for acquisition when we find that there are no
39201768b42SPeter Zijlstra 	 * pending waiters and the lock owner is currently running on a
39301768b42SPeter Zijlstra 	 * (different) CPU.
39401768b42SPeter Zijlstra 	 *
39501768b42SPeter Zijlstra 	 * The rationale is that if the lock owner is running, it is likely to
39601768b42SPeter Zijlstra 	 * release the lock soon.
39701768b42SPeter Zijlstra 	 *
39801768b42SPeter Zijlstra 	 * Since this needs the lock owner, and this mutex implementation
39901768b42SPeter Zijlstra 	 * doesn't track the owner atomically in the lock field, we need to
40001768b42SPeter Zijlstra 	 * track it non-atomically.
40101768b42SPeter Zijlstra 	 *
40201768b42SPeter Zijlstra 	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
40301768b42SPeter Zijlstra 	 * to serialize everything.
40401768b42SPeter Zijlstra 	 *
40501768b42SPeter Zijlstra 	 * The mutex spinners are queued up using MCS lock so that only one
40601768b42SPeter Zijlstra 	 * spinner can compete for the mutex. However, if mutex spinning isn't
40701768b42SPeter Zijlstra 	 * going to happen, there is no point in going through the lock/unlock
40801768b42SPeter Zijlstra 	 * overhead.
40901768b42SPeter Zijlstra 	 */
41001768b42SPeter Zijlstra 	if (!mutex_can_spin_on_owner(lock))
41101768b42SPeter Zijlstra 		goto slowpath;
41201768b42SPeter Zijlstra 
413fb0527bdSPeter Zijlstra 	if (!osq_lock(&lock->osq))
414fb0527bdSPeter Zijlstra 		goto slowpath;
415fb0527bdSPeter Zijlstra 
41601768b42SPeter Zijlstra 	for (;;) {
41701768b42SPeter Zijlstra 		struct task_struct *owner;
41801768b42SPeter Zijlstra 
41901768b42SPeter Zijlstra 		if (use_ww_ctx && ww_ctx->acquired > 0) {
42001768b42SPeter Zijlstra 			struct ww_mutex *ww;
42101768b42SPeter Zijlstra 
42201768b42SPeter Zijlstra 			ww = container_of(lock, struct ww_mutex, base);
42301768b42SPeter Zijlstra 			/*
42401768b42SPeter Zijlstra 			 * If ww->ctx is set the contents are undefined, only
42501768b42SPeter Zijlstra 			 * by acquiring wait_lock there is a guarantee that
42601768b42SPeter Zijlstra 			 * they are not invalid when reading.
42701768b42SPeter Zijlstra 			 *
42801768b42SPeter Zijlstra 			 * As such, when deadlock detection needs to be
42901768b42SPeter Zijlstra 			 * performed the optimistic spinning cannot be done.
43001768b42SPeter Zijlstra 			 */
43101768b42SPeter Zijlstra 			if (ACCESS_ONCE(ww->ctx))
43247667fa1SJason Low 				break;
43301768b42SPeter Zijlstra 		}
43401768b42SPeter Zijlstra 
43501768b42SPeter Zijlstra 		/*
43601768b42SPeter Zijlstra 		 * If there's an owner, wait for it to either
43701768b42SPeter Zijlstra 		 * release the lock or go to sleep.
43801768b42SPeter Zijlstra 		 */
43901768b42SPeter Zijlstra 		owner = ACCESS_ONCE(lock->owner);
44047667fa1SJason Low 		if (owner && !mutex_spin_on_owner(lock, owner))
44147667fa1SJason Low 			break;
44201768b42SPeter Zijlstra 
44301768b42SPeter Zijlstra 		if ((atomic_read(&lock->count) == 1) &&
44401768b42SPeter Zijlstra 		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
44501768b42SPeter Zijlstra 			lock_acquired(&lock->dep_map, ip);
44601768b42SPeter Zijlstra 			if (use_ww_ctx) {
44701768b42SPeter Zijlstra 				struct ww_mutex *ww;
44801768b42SPeter Zijlstra 				ww = container_of(lock, struct ww_mutex, base);
44901768b42SPeter Zijlstra 
45001768b42SPeter Zijlstra 				ww_mutex_set_context_fastpath(ww, ww_ctx);
45101768b42SPeter Zijlstra 			}
45201768b42SPeter Zijlstra 
45301768b42SPeter Zijlstra 			mutex_set_owner(lock);
454fb0527bdSPeter Zijlstra 			osq_unlock(&lock->osq);
45501768b42SPeter Zijlstra 			preempt_enable();
45601768b42SPeter Zijlstra 			return 0;
45701768b42SPeter Zijlstra 		}
45801768b42SPeter Zijlstra 
45901768b42SPeter Zijlstra 		/*
46001768b42SPeter Zijlstra 		 * When there's no owner, we might have preempted between the
46101768b42SPeter Zijlstra 		 * owner acquiring the lock and setting the owner field. If
46201768b42SPeter Zijlstra 		 * we're an RT task that will live-lock because we won't let
46301768b42SPeter Zijlstra 		 * the owner complete.
46401768b42SPeter Zijlstra 		 */
46501768b42SPeter Zijlstra 		if (!owner && (need_resched() || rt_task(task)))
46647667fa1SJason Low 			break;
46701768b42SPeter Zijlstra 
46801768b42SPeter Zijlstra 		/*
46901768b42SPeter Zijlstra 		 * The cpu_relax() call is a compiler barrier which forces
47001768b42SPeter Zijlstra 		 * everything in this loop to be re-loaded. We don't need
47101768b42SPeter Zijlstra 		 * memory barriers as we'll eventually observe the right
47201768b42SPeter Zijlstra 		 * values at the cost of a few extra spins.
47301768b42SPeter Zijlstra 		 */
47401768b42SPeter Zijlstra 		arch_mutex_cpu_relax();
47501768b42SPeter Zijlstra 	}
476fb0527bdSPeter Zijlstra 	osq_unlock(&lock->osq);
47701768b42SPeter Zijlstra slowpath:
47834c6bc2cSPeter Zijlstra 	/*
47934c6bc2cSPeter Zijlstra 	 * If we fell out of the spin path because of need_resched(),
48034c6bc2cSPeter Zijlstra 	 * reschedule now, before we try-lock the mutex. This avoids getting
48134c6bc2cSPeter Zijlstra 	 * scheduled out right after we obtained the mutex.
48234c6bc2cSPeter Zijlstra 	 */
48334c6bc2cSPeter Zijlstra 	if (need_resched())
48434c6bc2cSPeter Zijlstra 		schedule_preempt_disabled();
48501768b42SPeter Zijlstra #endif
48601768b42SPeter Zijlstra 	spin_lock_mutex(&lock->wait_lock, flags);
48701768b42SPeter Zijlstra 
48801768b42SPeter Zijlstra 	/* once more, can we acquire the lock? */
48901768b42SPeter Zijlstra 	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
49001768b42SPeter Zijlstra 		goto skip_wait;
49101768b42SPeter Zijlstra 
49201768b42SPeter Zijlstra 	debug_mutex_lock_common(lock, &waiter);
49301768b42SPeter Zijlstra 	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
49401768b42SPeter Zijlstra 
49501768b42SPeter Zijlstra 	/* add waiting tasks to the end of the waitqueue (FIFO): */
49601768b42SPeter Zijlstra 	list_add_tail(&waiter.list, &lock->wait_list);
49701768b42SPeter Zijlstra 	waiter.task = task;
49801768b42SPeter Zijlstra 
49901768b42SPeter Zijlstra 	lock_contended(&lock->dep_map, ip);
50001768b42SPeter Zijlstra 
50101768b42SPeter Zijlstra 	for (;;) {
50201768b42SPeter Zijlstra 		/*
50301768b42SPeter Zijlstra 		 * Lets try to take the lock again - this is needed even if
50401768b42SPeter Zijlstra 		 * we get here for the first time (shortly after failing to
50501768b42SPeter Zijlstra 		 * acquire the lock), to make sure that we get a wakeup once
50601768b42SPeter Zijlstra 		 * it's unlocked. Later on, if we sleep, this is the
50701768b42SPeter Zijlstra 		 * operation that gives us the lock. We xchg it to -1, so
50801768b42SPeter Zijlstra 		 * that when we release the lock, we properly wake up the
50901768b42SPeter Zijlstra 		 * other waiters:
51001768b42SPeter Zijlstra 		 */
51101768b42SPeter Zijlstra 		if (MUTEX_SHOW_NO_WAITER(lock) &&
51201768b42SPeter Zijlstra 		    (atomic_xchg(&lock->count, -1) == 1))
51301768b42SPeter Zijlstra 			break;
51401768b42SPeter Zijlstra 
51501768b42SPeter Zijlstra 		/*
51601768b42SPeter Zijlstra 		 * got a signal? (This code gets eliminated in the
51701768b42SPeter Zijlstra 		 * TASK_UNINTERRUPTIBLE case.)
51801768b42SPeter Zijlstra 		 */
51901768b42SPeter Zijlstra 		if (unlikely(signal_pending_state(state, task))) {
52001768b42SPeter Zijlstra 			ret = -EINTR;
52101768b42SPeter Zijlstra 			goto err;
52201768b42SPeter Zijlstra 		}
52301768b42SPeter Zijlstra 
52401768b42SPeter Zijlstra 		if (use_ww_ctx && ww_ctx->acquired > 0) {
52501768b42SPeter Zijlstra 			ret = __mutex_lock_check_stamp(lock, ww_ctx);
52601768b42SPeter Zijlstra 			if (ret)
52701768b42SPeter Zijlstra 				goto err;
52801768b42SPeter Zijlstra 		}
52901768b42SPeter Zijlstra 
53001768b42SPeter Zijlstra 		__set_task_state(task, state);
53101768b42SPeter Zijlstra 
53201768b42SPeter Zijlstra 		/* didn't get the lock, go to sleep: */
53301768b42SPeter Zijlstra 		spin_unlock_mutex(&lock->wait_lock, flags);
53401768b42SPeter Zijlstra 		schedule_preempt_disabled();
53501768b42SPeter Zijlstra 		spin_lock_mutex(&lock->wait_lock, flags);
53601768b42SPeter Zijlstra 	}
53701768b42SPeter Zijlstra 	mutex_remove_waiter(lock, &waiter, current_thread_info());
53801768b42SPeter Zijlstra 	/* set it to 0 if there are no waiters left: */
53901768b42SPeter Zijlstra 	if (likely(list_empty(&lock->wait_list)))
54001768b42SPeter Zijlstra 		atomic_set(&lock->count, 0);
54101768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
54201768b42SPeter Zijlstra 
54301768b42SPeter Zijlstra skip_wait:
54401768b42SPeter Zijlstra 	/* got the lock - cleanup and rejoice! */
54501768b42SPeter Zijlstra 	lock_acquired(&lock->dep_map, ip);
54601768b42SPeter Zijlstra 	mutex_set_owner(lock);
54701768b42SPeter Zijlstra 
54801768b42SPeter Zijlstra 	if (use_ww_ctx) {
54901768b42SPeter Zijlstra 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
55001768b42SPeter Zijlstra 		struct mutex_waiter *cur;
55101768b42SPeter Zijlstra 
55201768b42SPeter Zijlstra 		/*
55301768b42SPeter Zijlstra 		 * This branch gets optimized out for the common case,
55401768b42SPeter Zijlstra 		 * and is only important for ww_mutex_lock.
55501768b42SPeter Zijlstra 		 */
55601768b42SPeter Zijlstra 		ww_mutex_lock_acquired(ww, ww_ctx);
55701768b42SPeter Zijlstra 		ww->ctx = ww_ctx;
55801768b42SPeter Zijlstra 
55901768b42SPeter Zijlstra 		/*
56001768b42SPeter Zijlstra 		 * Give any possible sleeping processes the chance to wake up,
56101768b42SPeter Zijlstra 		 * so they can recheck if they have to back off.
56201768b42SPeter Zijlstra 		 */
56301768b42SPeter Zijlstra 		list_for_each_entry(cur, &lock->wait_list, list) {
56401768b42SPeter Zijlstra 			debug_mutex_wake_waiter(lock, cur);
56501768b42SPeter Zijlstra 			wake_up_process(cur->task);
56601768b42SPeter Zijlstra 		}
56701768b42SPeter Zijlstra 	}
56801768b42SPeter Zijlstra 
56901768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->wait_lock, flags);
57001768b42SPeter Zijlstra 	preempt_enable();
57101768b42SPeter Zijlstra 	return 0;
57201768b42SPeter Zijlstra 
57301768b42SPeter Zijlstra err:
57401768b42SPeter Zijlstra 	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
57501768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->wait_lock, flags);
57601768b42SPeter Zijlstra 	debug_mutex_free_waiter(&waiter);
57701768b42SPeter Zijlstra 	mutex_release(&lock->dep_map, 1, ip);
57801768b42SPeter Zijlstra 	preempt_enable();
57901768b42SPeter Zijlstra 	return ret;
58001768b42SPeter Zijlstra }
58101768b42SPeter Zijlstra 
58201768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
58301768b42SPeter Zijlstra void __sched
58401768b42SPeter Zijlstra mutex_lock_nested(struct mutex *lock, unsigned int subclass)
58501768b42SPeter Zijlstra {
58601768b42SPeter Zijlstra 	might_sleep();
58701768b42SPeter Zijlstra 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
58801768b42SPeter Zijlstra 			    subclass, NULL, _RET_IP_, NULL, 0);
58901768b42SPeter Zijlstra }
59001768b42SPeter Zijlstra 
59101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_nested);
59201768b42SPeter Zijlstra 
59301768b42SPeter Zijlstra void __sched
59401768b42SPeter Zijlstra _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
59501768b42SPeter Zijlstra {
59601768b42SPeter Zijlstra 	might_sleep();
59701768b42SPeter Zijlstra 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
59801768b42SPeter Zijlstra 			    0, nest, _RET_IP_, NULL, 0);
59901768b42SPeter Zijlstra }
60001768b42SPeter Zijlstra 
60101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
60201768b42SPeter Zijlstra 
60301768b42SPeter Zijlstra int __sched
60401768b42SPeter Zijlstra mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
60501768b42SPeter Zijlstra {
60601768b42SPeter Zijlstra 	might_sleep();
60701768b42SPeter Zijlstra 	return __mutex_lock_common(lock, TASK_KILLABLE,
60801768b42SPeter Zijlstra 				   subclass, NULL, _RET_IP_, NULL, 0);
60901768b42SPeter Zijlstra }
61001768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
61101768b42SPeter Zijlstra 
61201768b42SPeter Zijlstra int __sched
61301768b42SPeter Zijlstra mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
61401768b42SPeter Zijlstra {
61501768b42SPeter Zijlstra 	might_sleep();
61601768b42SPeter Zijlstra 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
61701768b42SPeter Zijlstra 				   subclass, NULL, _RET_IP_, NULL, 0);
61801768b42SPeter Zijlstra }
61901768b42SPeter Zijlstra 
62001768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
62101768b42SPeter Zijlstra 
62201768b42SPeter Zijlstra static inline int
62301768b42SPeter Zijlstra ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
62401768b42SPeter Zijlstra {
62501768b42SPeter Zijlstra #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
62601768b42SPeter Zijlstra 	unsigned tmp;
62701768b42SPeter Zijlstra 
62801768b42SPeter Zijlstra 	if (ctx->deadlock_inject_countdown-- == 0) {
62901768b42SPeter Zijlstra 		tmp = ctx->deadlock_inject_interval;
63001768b42SPeter Zijlstra 		if (tmp > UINT_MAX/4)
63101768b42SPeter Zijlstra 			tmp = UINT_MAX;
63201768b42SPeter Zijlstra 		else
63301768b42SPeter Zijlstra 			tmp = tmp*2 + tmp + tmp/2;
63401768b42SPeter Zijlstra 
63501768b42SPeter Zijlstra 		ctx->deadlock_inject_interval = tmp;
63601768b42SPeter Zijlstra 		ctx->deadlock_inject_countdown = tmp;
63701768b42SPeter Zijlstra 		ctx->contending_lock = lock;
63801768b42SPeter Zijlstra 
63901768b42SPeter Zijlstra 		ww_mutex_unlock(lock);
64001768b42SPeter Zijlstra 
64101768b42SPeter Zijlstra 		return -EDEADLK;
64201768b42SPeter Zijlstra 	}
64301768b42SPeter Zijlstra #endif
64401768b42SPeter Zijlstra 
64501768b42SPeter Zijlstra 	return 0;
64601768b42SPeter Zijlstra }
64701768b42SPeter Zijlstra 
64801768b42SPeter Zijlstra int __sched
64901768b42SPeter Zijlstra __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
65001768b42SPeter Zijlstra {
65101768b42SPeter Zijlstra 	int ret;
65201768b42SPeter Zijlstra 
65301768b42SPeter Zijlstra 	might_sleep();
65401768b42SPeter Zijlstra 	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
65501768b42SPeter Zijlstra 				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
65601768b42SPeter Zijlstra 	if (!ret && ctx->acquired > 1)
65701768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
65801768b42SPeter Zijlstra 
65901768b42SPeter Zijlstra 	return ret;
66001768b42SPeter Zijlstra }
66101768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(__ww_mutex_lock);
66201768b42SPeter Zijlstra 
66301768b42SPeter Zijlstra int __sched
66401768b42SPeter Zijlstra __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
66501768b42SPeter Zijlstra {
66601768b42SPeter Zijlstra 	int ret;
66701768b42SPeter Zijlstra 
66801768b42SPeter Zijlstra 	might_sleep();
66901768b42SPeter Zijlstra 	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
67001768b42SPeter Zijlstra 				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
67101768b42SPeter Zijlstra 
67201768b42SPeter Zijlstra 	if (!ret && ctx->acquired > 1)
67301768b42SPeter Zijlstra 		return ww_mutex_deadlock_injection(lock, ctx);
67401768b42SPeter Zijlstra 
67501768b42SPeter Zijlstra 	return ret;
67601768b42SPeter Zijlstra }
67701768b42SPeter Zijlstra EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
67801768b42SPeter Zijlstra 
67901768b42SPeter Zijlstra #endif
68001768b42SPeter Zijlstra 
68101768b42SPeter Zijlstra /*
68201768b42SPeter Zijlstra  * Release the lock, slowpath:
68301768b42SPeter Zijlstra  */
68401768b42SPeter Zijlstra static inline void
68501768b42SPeter Zijlstra __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
68601768b42SPeter Zijlstra {
68701768b42SPeter Zijlstra 	struct mutex *lock = container_of(lock_count, struct mutex, count);
68801768b42SPeter Zijlstra 	unsigned long flags;
68901768b42SPeter Zijlstra 
69001768b42SPeter Zijlstra 	/*
69101768b42SPeter Zijlstra 	 * some architectures leave the lock unlocked in the fastpath failure
69201768b42SPeter Zijlstra 	 * case, others need to leave it locked. In the later case we have to
69301768b42SPeter Zijlstra 	 * unlock it here
69401768b42SPeter Zijlstra 	 */
69501768b42SPeter Zijlstra 	if (__mutex_slowpath_needs_to_unlock())
69601768b42SPeter Zijlstra 		atomic_set(&lock->count, 1);
69701768b42SPeter Zijlstra 
6981d8fe7dcSJason Low 	spin_lock_mutex(&lock->wait_lock, flags);
6991d8fe7dcSJason Low 	mutex_release(&lock->dep_map, nested, _RET_IP_);
7001d8fe7dcSJason Low 	debug_mutex_unlock(lock);
7011d8fe7dcSJason Low 
70201768b42SPeter Zijlstra 	if (!list_empty(&lock->wait_list)) {
70301768b42SPeter Zijlstra 		/* get the first entry from the wait-list: */
70401768b42SPeter Zijlstra 		struct mutex_waiter *waiter =
70501768b42SPeter Zijlstra 				list_entry(lock->wait_list.next,
70601768b42SPeter Zijlstra 					   struct mutex_waiter, list);
70701768b42SPeter Zijlstra 
70801768b42SPeter Zijlstra 		debug_mutex_wake_waiter(lock, waiter);
70901768b42SPeter Zijlstra 
71001768b42SPeter Zijlstra 		wake_up_process(waiter->task);
71101768b42SPeter Zijlstra 	}
71201768b42SPeter Zijlstra 
71301768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->wait_lock, flags);
71401768b42SPeter Zijlstra }
71501768b42SPeter Zijlstra 
71601768b42SPeter Zijlstra /*
71701768b42SPeter Zijlstra  * Release the lock, slowpath:
71801768b42SPeter Zijlstra  */
71922d9fd34SAndi Kleen __visible void
72001768b42SPeter Zijlstra __mutex_unlock_slowpath(atomic_t *lock_count)
72101768b42SPeter Zijlstra {
72201768b42SPeter Zijlstra 	__mutex_unlock_common_slowpath(lock_count, 1);
72301768b42SPeter Zijlstra }
72401768b42SPeter Zijlstra 
72501768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
72601768b42SPeter Zijlstra /*
72701768b42SPeter Zijlstra  * Here come the less common (and hence less performance-critical) APIs:
72801768b42SPeter Zijlstra  * mutex_lock_interruptible() and mutex_trylock().
72901768b42SPeter Zijlstra  */
73001768b42SPeter Zijlstra static noinline int __sched
73101768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock);
73201768b42SPeter Zijlstra 
73301768b42SPeter Zijlstra static noinline int __sched
73401768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock);
73501768b42SPeter Zijlstra 
73601768b42SPeter Zijlstra /**
73701768b42SPeter Zijlstra  * mutex_lock_interruptible - acquire the mutex, interruptible
73801768b42SPeter Zijlstra  * @lock: the mutex to be acquired
73901768b42SPeter Zijlstra  *
74001768b42SPeter Zijlstra  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
74101768b42SPeter Zijlstra  * been acquired or sleep until the mutex becomes available. If a
74201768b42SPeter Zijlstra  * signal arrives while waiting for the lock then this function
74301768b42SPeter Zijlstra  * returns -EINTR.
74401768b42SPeter Zijlstra  *
74501768b42SPeter Zijlstra  * This function is similar to (but not equivalent to) down_interruptible().
74601768b42SPeter Zijlstra  */
74701768b42SPeter Zijlstra int __sched mutex_lock_interruptible(struct mutex *lock)
74801768b42SPeter Zijlstra {
74901768b42SPeter Zijlstra 	int ret;
75001768b42SPeter Zijlstra 
75101768b42SPeter Zijlstra 	might_sleep();
75201768b42SPeter Zijlstra 	ret =  __mutex_fastpath_lock_retval(&lock->count);
75301768b42SPeter Zijlstra 	if (likely(!ret)) {
75401768b42SPeter Zijlstra 		mutex_set_owner(lock);
75501768b42SPeter Zijlstra 		return 0;
75601768b42SPeter Zijlstra 	} else
75701768b42SPeter Zijlstra 		return __mutex_lock_interruptible_slowpath(lock);
75801768b42SPeter Zijlstra }
75901768b42SPeter Zijlstra 
76001768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_interruptible);
76101768b42SPeter Zijlstra 
76201768b42SPeter Zijlstra int __sched mutex_lock_killable(struct mutex *lock)
76301768b42SPeter Zijlstra {
76401768b42SPeter Zijlstra 	int ret;
76501768b42SPeter Zijlstra 
76601768b42SPeter Zijlstra 	might_sleep();
76701768b42SPeter Zijlstra 	ret = __mutex_fastpath_lock_retval(&lock->count);
76801768b42SPeter Zijlstra 	if (likely(!ret)) {
76901768b42SPeter Zijlstra 		mutex_set_owner(lock);
77001768b42SPeter Zijlstra 		return 0;
77101768b42SPeter Zijlstra 	} else
77201768b42SPeter Zijlstra 		return __mutex_lock_killable_slowpath(lock);
77301768b42SPeter Zijlstra }
77401768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_lock_killable);
77501768b42SPeter Zijlstra 
77622d9fd34SAndi Kleen __visible void __sched
77701768b42SPeter Zijlstra __mutex_lock_slowpath(atomic_t *lock_count)
77801768b42SPeter Zijlstra {
77901768b42SPeter Zijlstra 	struct mutex *lock = container_of(lock_count, struct mutex, count);
78001768b42SPeter Zijlstra 
78101768b42SPeter Zijlstra 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
78201768b42SPeter Zijlstra 			    NULL, _RET_IP_, NULL, 0);
78301768b42SPeter Zijlstra }
78401768b42SPeter Zijlstra 
78501768b42SPeter Zijlstra static noinline int __sched
78601768b42SPeter Zijlstra __mutex_lock_killable_slowpath(struct mutex *lock)
78701768b42SPeter Zijlstra {
78801768b42SPeter Zijlstra 	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
78901768b42SPeter Zijlstra 				   NULL, _RET_IP_, NULL, 0);
79001768b42SPeter Zijlstra }
79101768b42SPeter Zijlstra 
79201768b42SPeter Zijlstra static noinline int __sched
79301768b42SPeter Zijlstra __mutex_lock_interruptible_slowpath(struct mutex *lock)
79401768b42SPeter Zijlstra {
79501768b42SPeter Zijlstra 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
79601768b42SPeter Zijlstra 				   NULL, _RET_IP_, NULL, 0);
79701768b42SPeter Zijlstra }
79801768b42SPeter Zijlstra 
79901768b42SPeter Zijlstra static noinline int __sched
80001768b42SPeter Zijlstra __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
80101768b42SPeter Zijlstra {
80201768b42SPeter Zijlstra 	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
80301768b42SPeter Zijlstra 				   NULL, _RET_IP_, ctx, 1);
80401768b42SPeter Zijlstra }
80501768b42SPeter Zijlstra 
80601768b42SPeter Zijlstra static noinline int __sched
80701768b42SPeter Zijlstra __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
80801768b42SPeter Zijlstra 					    struct ww_acquire_ctx *ctx)
80901768b42SPeter Zijlstra {
81001768b42SPeter Zijlstra 	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
81101768b42SPeter Zijlstra 				   NULL, _RET_IP_, ctx, 1);
81201768b42SPeter Zijlstra }
81301768b42SPeter Zijlstra 
81401768b42SPeter Zijlstra #endif
81501768b42SPeter Zijlstra 
81601768b42SPeter Zijlstra /*
81701768b42SPeter Zijlstra  * Spinlock based trylock, we take the spinlock and check whether we
81801768b42SPeter Zijlstra  * can get the lock:
81901768b42SPeter Zijlstra  */
82001768b42SPeter Zijlstra static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
82101768b42SPeter Zijlstra {
82201768b42SPeter Zijlstra 	struct mutex *lock = container_of(lock_count, struct mutex, count);
82301768b42SPeter Zijlstra 	unsigned long flags;
82401768b42SPeter Zijlstra 	int prev;
82501768b42SPeter Zijlstra 
82601768b42SPeter Zijlstra 	spin_lock_mutex(&lock->wait_lock, flags);
82701768b42SPeter Zijlstra 
82801768b42SPeter Zijlstra 	prev = atomic_xchg(&lock->count, -1);
82901768b42SPeter Zijlstra 	if (likely(prev == 1)) {
83001768b42SPeter Zijlstra 		mutex_set_owner(lock);
83101768b42SPeter Zijlstra 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
83201768b42SPeter Zijlstra 	}
83301768b42SPeter Zijlstra 
83401768b42SPeter Zijlstra 	/* Set it back to 0 if there are no waiters: */
83501768b42SPeter Zijlstra 	if (likely(list_empty(&lock->wait_list)))
83601768b42SPeter Zijlstra 		atomic_set(&lock->count, 0);
83701768b42SPeter Zijlstra 
83801768b42SPeter Zijlstra 	spin_unlock_mutex(&lock->wait_lock, flags);
83901768b42SPeter Zijlstra 
84001768b42SPeter Zijlstra 	return prev == 1;
84101768b42SPeter Zijlstra }
84201768b42SPeter Zijlstra 
84301768b42SPeter Zijlstra /**
84401768b42SPeter Zijlstra  * mutex_trylock - try to acquire the mutex, without waiting
84501768b42SPeter Zijlstra  * @lock: the mutex to be acquired
84601768b42SPeter Zijlstra  *
84701768b42SPeter Zijlstra  * Try to acquire the mutex atomically. Returns 1 if the mutex
84801768b42SPeter Zijlstra  * has been acquired successfully, and 0 on contention.
84901768b42SPeter Zijlstra  *
85001768b42SPeter Zijlstra  * NOTE: this function follows the spin_trylock() convention, so
85101768b42SPeter Zijlstra  * it is negated from the down_trylock() return values! Be careful
85201768b42SPeter Zijlstra  * about this when converting semaphore users to mutexes.
85301768b42SPeter Zijlstra  *
85401768b42SPeter Zijlstra  * This function must not be used in interrupt context. The
85501768b42SPeter Zijlstra  * mutex must be released by the same task that acquired it.
85601768b42SPeter Zijlstra  */
85701768b42SPeter Zijlstra int __sched mutex_trylock(struct mutex *lock)
85801768b42SPeter Zijlstra {
85901768b42SPeter Zijlstra 	int ret;
86001768b42SPeter Zijlstra 
86101768b42SPeter Zijlstra 	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
86201768b42SPeter Zijlstra 	if (ret)
86301768b42SPeter Zijlstra 		mutex_set_owner(lock);
86401768b42SPeter Zijlstra 
86501768b42SPeter Zijlstra 	return ret;
86601768b42SPeter Zijlstra }
86701768b42SPeter Zijlstra EXPORT_SYMBOL(mutex_trylock);
86801768b42SPeter Zijlstra 
86901768b42SPeter Zijlstra #ifndef CONFIG_DEBUG_LOCK_ALLOC
87001768b42SPeter Zijlstra int __sched
87101768b42SPeter Zijlstra __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
87201768b42SPeter Zijlstra {
87301768b42SPeter Zijlstra 	int ret;
87401768b42SPeter Zijlstra 
87501768b42SPeter Zijlstra 	might_sleep();
87601768b42SPeter Zijlstra 
87701768b42SPeter Zijlstra 	ret = __mutex_fastpath_lock_retval(&lock->base.count);
87801768b42SPeter Zijlstra 
87901768b42SPeter Zijlstra 	if (likely(!ret)) {
88001768b42SPeter Zijlstra 		ww_mutex_set_context_fastpath(lock, ctx);
88101768b42SPeter Zijlstra 		mutex_set_owner(&lock->base);
88201768b42SPeter Zijlstra 	} else
88301768b42SPeter Zijlstra 		ret = __ww_mutex_lock_slowpath(lock, ctx);
88401768b42SPeter Zijlstra 	return ret;
88501768b42SPeter Zijlstra }
88601768b42SPeter Zijlstra EXPORT_SYMBOL(__ww_mutex_lock);
88701768b42SPeter Zijlstra 
88801768b42SPeter Zijlstra int __sched
88901768b42SPeter Zijlstra __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
89001768b42SPeter Zijlstra {
89101768b42SPeter Zijlstra 	int ret;
89201768b42SPeter Zijlstra 
89301768b42SPeter Zijlstra 	might_sleep();
89401768b42SPeter Zijlstra 
89501768b42SPeter Zijlstra 	ret = __mutex_fastpath_lock_retval(&lock->base.count);
89601768b42SPeter Zijlstra 
89701768b42SPeter Zijlstra 	if (likely(!ret)) {
89801768b42SPeter Zijlstra 		ww_mutex_set_context_fastpath(lock, ctx);
89901768b42SPeter Zijlstra 		mutex_set_owner(&lock->base);
90001768b42SPeter Zijlstra 	} else
90101768b42SPeter Zijlstra 		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
90201768b42SPeter Zijlstra 	return ret;
90301768b42SPeter Zijlstra }
90401768b42SPeter Zijlstra EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
90501768b42SPeter Zijlstra 
90601768b42SPeter Zijlstra #endif
90701768b42SPeter Zijlstra 
90801768b42SPeter Zijlstra /**
90901768b42SPeter Zijlstra  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
91001768b42SPeter Zijlstra  * @cnt: the atomic which we are to dec
91101768b42SPeter Zijlstra  * @lock: the mutex to return holding if we dec to 0
91201768b42SPeter Zijlstra  *
91301768b42SPeter Zijlstra  * return true and hold lock if we dec to 0, return false otherwise
91401768b42SPeter Zijlstra  */
91501768b42SPeter Zijlstra int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
91601768b42SPeter Zijlstra {
91701768b42SPeter Zijlstra 	/* dec if we can't possibly hit 0 */
91801768b42SPeter Zijlstra 	if (atomic_add_unless(cnt, -1, 1))
91901768b42SPeter Zijlstra 		return 0;
92001768b42SPeter Zijlstra 	/* we might hit 0, so take the lock */
92101768b42SPeter Zijlstra 	mutex_lock(lock);
92201768b42SPeter Zijlstra 	if (!atomic_dec_and_test(cnt)) {
92301768b42SPeter Zijlstra 		/* when we actually did the dec, we didn't hit 0 */
92401768b42SPeter Zijlstra 		mutex_unlock(lock);
92501768b42SPeter Zijlstra 		return 0;
92601768b42SPeter Zijlstra 	}
92701768b42SPeter Zijlstra 	/* we hit 0, and we hold the lock */
92801768b42SPeter Zijlstra 	return 1;
92901768b42SPeter Zijlstra }
93001768b42SPeter Zijlstra EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
931