xref: /openbmc/linux/kernel/locking/ww_mutex.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
12674bd18SPeter Zijlstra (Intel) /* SPDX-License-Identifier: GPL-2.0-only */
22674bd18SPeter Zijlstra (Intel) 
3dc4564f5SPeter Zijlstra #ifndef WW_RT
4dc4564f5SPeter Zijlstra 
5bdb18914SPeter Zijlstra #define MUTEX		mutex
6bdb18914SPeter Zijlstra #define MUTEX_WAITER	mutex_waiter
7bdb18914SPeter Zijlstra 
823d599ebSPeter Zijlstra static inline struct mutex_waiter *
__ww_waiter_first(struct mutex * lock)923d599ebSPeter Zijlstra __ww_waiter_first(struct mutex *lock)
1023d599ebSPeter Zijlstra {
1123d599ebSPeter Zijlstra 	struct mutex_waiter *w;
1223d599ebSPeter Zijlstra 
1323d599ebSPeter Zijlstra 	w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
1423d599ebSPeter Zijlstra 	if (list_entry_is_head(w, &lock->wait_list, list))
1523d599ebSPeter Zijlstra 		return NULL;
1623d599ebSPeter Zijlstra 
1723d599ebSPeter Zijlstra 	return w;
1823d599ebSPeter Zijlstra }
1923d599ebSPeter Zijlstra 
2023d599ebSPeter Zijlstra static inline struct mutex_waiter *
__ww_waiter_next(struct mutex * lock,struct mutex_waiter * w)2123d599ebSPeter Zijlstra __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
2223d599ebSPeter Zijlstra {
2323d599ebSPeter Zijlstra 	w = list_next_entry(w, list);
2423d599ebSPeter Zijlstra 	if (list_entry_is_head(w, &lock->wait_list, list))
2523d599ebSPeter Zijlstra 		return NULL;
2623d599ebSPeter Zijlstra 
2723d599ebSPeter Zijlstra 	return w;
2823d599ebSPeter Zijlstra }
2923d599ebSPeter Zijlstra 
3023d599ebSPeter Zijlstra static inline struct mutex_waiter *
__ww_waiter_prev(struct mutex * lock,struct mutex_waiter * w)3123d599ebSPeter Zijlstra __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
3223d599ebSPeter Zijlstra {
3323d599ebSPeter Zijlstra 	w = list_prev_entry(w, list);
3423d599ebSPeter Zijlstra 	if (list_entry_is_head(w, &lock->wait_list, list))
3523d599ebSPeter Zijlstra 		return NULL;
3623d599ebSPeter Zijlstra 
3723d599ebSPeter Zijlstra 	return w;
3823d599ebSPeter Zijlstra }
3923d599ebSPeter Zijlstra 
4023d599ebSPeter Zijlstra static inline struct mutex_waiter *
__ww_waiter_last(struct mutex * lock)4123d599ebSPeter Zijlstra __ww_waiter_last(struct mutex *lock)
4223d599ebSPeter Zijlstra {
4323d599ebSPeter Zijlstra 	struct mutex_waiter *w;
4423d599ebSPeter Zijlstra 
4523d599ebSPeter Zijlstra 	w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
4623d599ebSPeter Zijlstra 	if (list_entry_is_head(w, &lock->wait_list, list))
4723d599ebSPeter Zijlstra 		return NULL;
4823d599ebSPeter Zijlstra 
4923d599ebSPeter Zijlstra 	return w;
5023d599ebSPeter Zijlstra }
5123d599ebSPeter Zijlstra 
52843dac28SPeter Zijlstra static inline void
__ww_waiter_add(struct mutex * lock,struct mutex_waiter * waiter,struct mutex_waiter * pos)53843dac28SPeter Zijlstra __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
54843dac28SPeter Zijlstra {
55843dac28SPeter Zijlstra 	struct list_head *p = &lock->wait_list;
56843dac28SPeter Zijlstra 	if (pos)
57843dac28SPeter Zijlstra 		p = &pos->list;
58843dac28SPeter Zijlstra 	__mutex_add_waiter(lock, waiter, p);
59843dac28SPeter Zijlstra }
60843dac28SPeter Zijlstra 
619934ccc7SPeter Zijlstra static inline struct task_struct *
__ww_mutex_owner(struct mutex * lock)629934ccc7SPeter Zijlstra __ww_mutex_owner(struct mutex *lock)
639934ccc7SPeter Zijlstra {
649934ccc7SPeter Zijlstra 	return __mutex_owner(lock);
659934ccc7SPeter Zijlstra }
669934ccc7SPeter Zijlstra 
679934ccc7SPeter Zijlstra static inline bool
__ww_mutex_has_waiters(struct mutex * lock)689934ccc7SPeter Zijlstra __ww_mutex_has_waiters(struct mutex *lock)
699934ccc7SPeter Zijlstra {
709934ccc7SPeter Zijlstra 	return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
719934ccc7SPeter Zijlstra }
729934ccc7SPeter Zijlstra 
lock_wait_lock(struct mutex * lock)73653a5b0bSThomas Gleixner static inline void lock_wait_lock(struct mutex *lock)
74653a5b0bSThomas Gleixner {
75653a5b0bSThomas Gleixner 	raw_spin_lock(&lock->wait_lock);
76653a5b0bSThomas Gleixner }
77653a5b0bSThomas Gleixner 
unlock_wait_lock(struct mutex * lock)78653a5b0bSThomas Gleixner static inline void unlock_wait_lock(struct mutex *lock)
79653a5b0bSThomas Gleixner {
80653a5b0bSThomas Gleixner 	raw_spin_unlock(&lock->wait_lock);
81653a5b0bSThomas Gleixner }
82653a5b0bSThomas Gleixner 
lockdep_assert_wait_lock_held(struct mutex * lock)83653a5b0bSThomas Gleixner static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
84653a5b0bSThomas Gleixner {
85653a5b0bSThomas Gleixner 	lockdep_assert_held(&lock->wait_lock);
86653a5b0bSThomas Gleixner }
87653a5b0bSThomas Gleixner 
88dc4564f5SPeter Zijlstra #else /* WW_RT */
89dc4564f5SPeter Zijlstra 
90dc4564f5SPeter Zijlstra #define MUTEX		rt_mutex
91dc4564f5SPeter Zijlstra #define MUTEX_WAITER	rt_mutex_waiter
92dc4564f5SPeter Zijlstra 
93dc4564f5SPeter Zijlstra static inline struct rt_mutex_waiter *
__ww_waiter_first(struct rt_mutex * lock)94dc4564f5SPeter Zijlstra __ww_waiter_first(struct rt_mutex *lock)
95dc4564f5SPeter Zijlstra {
96dc4564f5SPeter Zijlstra 	struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
97dc4564f5SPeter Zijlstra 	if (!n)
98dc4564f5SPeter Zijlstra 		return NULL;
99*f7853c34SPeter Zijlstra 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
100dc4564f5SPeter Zijlstra }
101dc4564f5SPeter Zijlstra 
102dc4564f5SPeter Zijlstra static inline struct rt_mutex_waiter *
__ww_waiter_next(struct rt_mutex * lock,struct rt_mutex_waiter * w)103dc4564f5SPeter Zijlstra __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
104dc4564f5SPeter Zijlstra {
105*f7853c34SPeter Zijlstra 	struct rb_node *n = rb_next(&w->tree.entry);
106dc4564f5SPeter Zijlstra 	if (!n)
107dc4564f5SPeter Zijlstra 		return NULL;
108*f7853c34SPeter Zijlstra 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
109dc4564f5SPeter Zijlstra }
110dc4564f5SPeter Zijlstra 
111dc4564f5SPeter Zijlstra static inline struct rt_mutex_waiter *
__ww_waiter_prev(struct rt_mutex * lock,struct rt_mutex_waiter * w)112dc4564f5SPeter Zijlstra __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
113dc4564f5SPeter Zijlstra {
114*f7853c34SPeter Zijlstra 	struct rb_node *n = rb_prev(&w->tree.entry);
115dc4564f5SPeter Zijlstra 	if (!n)
116dc4564f5SPeter Zijlstra 		return NULL;
117*f7853c34SPeter Zijlstra 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
118dc4564f5SPeter Zijlstra }
119dc4564f5SPeter Zijlstra 
120dc4564f5SPeter Zijlstra static inline struct rt_mutex_waiter *
__ww_waiter_last(struct rt_mutex * lock)121dc4564f5SPeter Zijlstra __ww_waiter_last(struct rt_mutex *lock)
122dc4564f5SPeter Zijlstra {
123dc4564f5SPeter Zijlstra 	struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
124dc4564f5SPeter Zijlstra 	if (!n)
125dc4564f5SPeter Zijlstra 		return NULL;
126*f7853c34SPeter Zijlstra 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
127dc4564f5SPeter Zijlstra }
128dc4564f5SPeter Zijlstra 
129dc4564f5SPeter Zijlstra static inline void
__ww_waiter_add(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct rt_mutex_waiter * pos)130dc4564f5SPeter Zijlstra __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
131dc4564f5SPeter Zijlstra {
132dc4564f5SPeter Zijlstra 	/* RT unconditionally adds the waiter first and then removes it on error */
133dc4564f5SPeter Zijlstra }
134dc4564f5SPeter Zijlstra 
135dc4564f5SPeter Zijlstra static inline struct task_struct *
__ww_mutex_owner(struct rt_mutex * lock)136dc4564f5SPeter Zijlstra __ww_mutex_owner(struct rt_mutex *lock)
137dc4564f5SPeter Zijlstra {
138dc4564f5SPeter Zijlstra 	return rt_mutex_owner(&lock->rtmutex);
139dc4564f5SPeter Zijlstra }
140dc4564f5SPeter Zijlstra 
141dc4564f5SPeter Zijlstra static inline bool
__ww_mutex_has_waiters(struct rt_mutex * lock)142dc4564f5SPeter Zijlstra __ww_mutex_has_waiters(struct rt_mutex *lock)
143dc4564f5SPeter Zijlstra {
144dc4564f5SPeter Zijlstra 	return rt_mutex_has_waiters(&lock->rtmutex);
145dc4564f5SPeter Zijlstra }
146dc4564f5SPeter Zijlstra 
lock_wait_lock(struct rt_mutex * lock)147dc4564f5SPeter Zijlstra static inline void lock_wait_lock(struct rt_mutex *lock)
148dc4564f5SPeter Zijlstra {
149dc4564f5SPeter Zijlstra 	raw_spin_lock(&lock->rtmutex.wait_lock);
150dc4564f5SPeter Zijlstra }
151dc4564f5SPeter Zijlstra 
unlock_wait_lock(struct rt_mutex * lock)152dc4564f5SPeter Zijlstra static inline void unlock_wait_lock(struct rt_mutex *lock)
153dc4564f5SPeter Zijlstra {
154dc4564f5SPeter Zijlstra 	raw_spin_unlock(&lock->rtmutex.wait_lock);
155dc4564f5SPeter Zijlstra }
156dc4564f5SPeter Zijlstra 
lockdep_assert_wait_lock_held(struct rt_mutex * lock)157dc4564f5SPeter Zijlstra static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
158dc4564f5SPeter Zijlstra {
159dc4564f5SPeter Zijlstra 	lockdep_assert_held(&lock->rtmutex.wait_lock);
160dc4564f5SPeter Zijlstra }
161dc4564f5SPeter Zijlstra 
162dc4564f5SPeter Zijlstra #endif /* WW_RT */
163dc4564f5SPeter Zijlstra 
1642674bd18SPeter Zijlstra (Intel) /*
1652674bd18SPeter Zijlstra (Intel)  * Wait-Die:
1662674bd18SPeter Zijlstra (Intel)  *   The newer transactions are killed when:
1672674bd18SPeter Zijlstra (Intel)  *     It (the new transaction) makes a request for a lock being held
1682674bd18SPeter Zijlstra (Intel)  *     by an older transaction.
1692674bd18SPeter Zijlstra (Intel)  *
1702674bd18SPeter Zijlstra (Intel)  * Wound-Wait:
1712674bd18SPeter Zijlstra (Intel)  *   The newer transactions are wounded when:
1722674bd18SPeter Zijlstra (Intel)  *     An older transaction makes a request for a lock being held by
1732674bd18SPeter Zijlstra (Intel)  *     the newer transaction.
1742674bd18SPeter Zijlstra (Intel)  */
1752674bd18SPeter Zijlstra (Intel) 
1762674bd18SPeter Zijlstra (Intel) /*
1772674bd18SPeter Zijlstra (Intel)  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
1782674bd18SPeter Zijlstra (Intel)  * it.
1792674bd18SPeter Zijlstra (Intel)  */
1802674bd18SPeter Zijlstra (Intel) static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)1812674bd18SPeter Zijlstra (Intel) ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
1822674bd18SPeter Zijlstra (Intel) {
1832408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
1842674bd18SPeter Zijlstra (Intel) 	/*
1852674bd18SPeter Zijlstra (Intel) 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
1862674bd18SPeter Zijlstra (Intel) 	 * but released with a normal mutex_unlock in this call.
1872674bd18SPeter Zijlstra (Intel) 	 *
1882674bd18SPeter Zijlstra (Intel) 	 * This should never happen, always use ww_mutex_unlock.
1892674bd18SPeter Zijlstra (Intel) 	 */
1902674bd18SPeter Zijlstra (Intel) 	DEBUG_LOCKS_WARN_ON(ww->ctx);
1912674bd18SPeter Zijlstra (Intel) 
1922674bd18SPeter Zijlstra (Intel) 	/*
1932674bd18SPeter Zijlstra (Intel) 	 * Not quite done after calling ww_acquire_done() ?
1942674bd18SPeter Zijlstra (Intel) 	 */
1952674bd18SPeter Zijlstra (Intel) 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
1962674bd18SPeter Zijlstra (Intel) 
1972674bd18SPeter Zijlstra (Intel) 	if (ww_ctx->contending_lock) {
1982674bd18SPeter Zijlstra (Intel) 		/*
1992674bd18SPeter Zijlstra (Intel) 		 * After -EDEADLK you tried to
2002674bd18SPeter Zijlstra (Intel) 		 * acquire a different ww_mutex? Bad!
2012674bd18SPeter Zijlstra (Intel) 		 */
2022674bd18SPeter Zijlstra (Intel) 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
2032674bd18SPeter Zijlstra (Intel) 
2042674bd18SPeter Zijlstra (Intel) 		/*
2052674bd18SPeter Zijlstra (Intel) 		 * You called ww_mutex_lock after receiving -EDEADLK,
2062674bd18SPeter Zijlstra (Intel) 		 * but 'forgot' to unlock everything else first?
2072674bd18SPeter Zijlstra (Intel) 		 */
2082674bd18SPeter Zijlstra (Intel) 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
2092674bd18SPeter Zijlstra (Intel) 		ww_ctx->contending_lock = NULL;
2102674bd18SPeter Zijlstra (Intel) 	}
2112674bd18SPeter Zijlstra (Intel) 
2122674bd18SPeter Zijlstra (Intel) 	/*
2132674bd18SPeter Zijlstra (Intel) 	 * Naughty, using a different class will lead to undefined behavior!
2142674bd18SPeter Zijlstra (Intel) 	 */
2152674bd18SPeter Zijlstra (Intel) 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
2162674bd18SPeter Zijlstra (Intel) #endif
2172674bd18SPeter Zijlstra (Intel) 	ww_ctx->acquired++;
2182674bd18SPeter Zijlstra (Intel) 	ww->ctx = ww_ctx;
2192674bd18SPeter Zijlstra (Intel) }
2202674bd18SPeter Zijlstra (Intel) 
2212674bd18SPeter Zijlstra (Intel) /*
2228850d773SPeter Zijlstra  * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
2238850d773SPeter Zijlstra  * or, when of equal priority, a younger transaction than @b.
2248850d773SPeter Zijlstra  *
2258850d773SPeter Zijlstra  * Depending on the algorithm, @a will either need to wait for @b, or die.
2262674bd18SPeter Zijlstra (Intel)  */
2275297ccb2SPeter Zijlstra static inline bool
__ww_ctx_less(struct ww_acquire_ctx * a,struct ww_acquire_ctx * b)2288850d773SPeter Zijlstra __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
2292674bd18SPeter Zijlstra (Intel) {
2308850d773SPeter Zijlstra /*
2318850d773SPeter Zijlstra  * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
2328850d773SPeter Zijlstra  * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
2338850d773SPeter Zijlstra  * isn't affected by this.
2348850d773SPeter Zijlstra  */
2358850d773SPeter Zijlstra #ifdef WW_RT
2368850d773SPeter Zijlstra 	/* kernel prio; less is more */
2378850d773SPeter Zijlstra 	int a_prio = a->task->prio;
2388850d773SPeter Zijlstra 	int b_prio = b->task->prio;
2392674bd18SPeter Zijlstra (Intel) 
2408850d773SPeter Zijlstra 	if (rt_prio(a_prio) || rt_prio(b_prio)) {
2418850d773SPeter Zijlstra 
2428850d773SPeter Zijlstra 		if (a_prio > b_prio)
2438850d773SPeter Zijlstra 			return true;
2448850d773SPeter Zijlstra 
2458850d773SPeter Zijlstra 		if (a_prio < b_prio)
2468850d773SPeter Zijlstra 			return false;
2478850d773SPeter Zijlstra 
2488850d773SPeter Zijlstra 		/* equal static prio */
2498850d773SPeter Zijlstra 
2508850d773SPeter Zijlstra 		if (dl_prio(a_prio)) {
2518850d773SPeter Zijlstra 			if (dl_time_before(b->task->dl.deadline,
2528850d773SPeter Zijlstra 					   a->task->dl.deadline))
2538850d773SPeter Zijlstra 				return true;
2548850d773SPeter Zijlstra 
2558850d773SPeter Zijlstra 			if (dl_time_before(a->task->dl.deadline,
2568850d773SPeter Zijlstra 					   b->task->dl.deadline))
2578850d773SPeter Zijlstra 				return false;
2588850d773SPeter Zijlstra 		}
2598850d773SPeter Zijlstra 
2608850d773SPeter Zijlstra 		/* equal prio */
2618850d773SPeter Zijlstra 	}
2628850d773SPeter Zijlstra #endif
2638850d773SPeter Zijlstra 
2648850d773SPeter Zijlstra 	/* FIFO order tie break -- bigger is younger */
2652674bd18SPeter Zijlstra (Intel) 	return (signed long)(a->stamp - b->stamp) > 0;
2662674bd18SPeter Zijlstra (Intel) }
2672674bd18SPeter Zijlstra (Intel) 
2682674bd18SPeter Zijlstra (Intel) /*
2698850d773SPeter Zijlstra  * Wait-Die; wake a lesser waiter context (when locks held) such that it can
2702674bd18SPeter Zijlstra (Intel)  * die.
2712674bd18SPeter Zijlstra (Intel)  *
2722674bd18SPeter Zijlstra (Intel)  * Among waiters with context, only the first one can have other locks acquired
2732674bd18SPeter Zijlstra (Intel)  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
2742674bd18SPeter Zijlstra (Intel)  * __ww_mutex_check_kill() wake any but the earliest context.
2752674bd18SPeter Zijlstra (Intel)  */
2765297ccb2SPeter Zijlstra static bool
__ww_mutex_die(struct MUTEX * lock,struct MUTEX_WAITER * waiter,struct ww_acquire_ctx * ww_ctx)277bdb18914SPeter Zijlstra __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
2782674bd18SPeter Zijlstra (Intel) 	       struct ww_acquire_ctx *ww_ctx)
2792674bd18SPeter Zijlstra (Intel) {
2802674bd18SPeter Zijlstra (Intel) 	if (!ww_ctx->is_wait_die)
2812674bd18SPeter Zijlstra (Intel) 		return false;
2822674bd18SPeter Zijlstra (Intel) 
2838850d773SPeter Zijlstra 	if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
284dc4564f5SPeter Zijlstra #ifndef WW_RT
2852674bd18SPeter Zijlstra (Intel) 		debug_mutex_wake_waiter(lock, waiter);
286dc4564f5SPeter Zijlstra #endif
2872674bd18SPeter Zijlstra (Intel) 		wake_up_process(waiter->task);
2882674bd18SPeter Zijlstra (Intel) 	}
2892674bd18SPeter Zijlstra (Intel) 
2902674bd18SPeter Zijlstra (Intel) 	return true;
2912674bd18SPeter Zijlstra (Intel) }
2922674bd18SPeter Zijlstra (Intel) 
2932674bd18SPeter Zijlstra (Intel) /*
2948850d773SPeter Zijlstra  * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
2952674bd18SPeter Zijlstra (Intel)  *
2968850d773SPeter Zijlstra  * Wound the lock holder if there are waiters with more important transactions
2978850d773SPeter Zijlstra  * than the lock holders. Even if multiple waiters may wound the lock holder,
2982674bd18SPeter Zijlstra (Intel)  * it's sufficient that only one does.
2992674bd18SPeter Zijlstra (Intel)  */
__ww_mutex_wound(struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx,struct ww_acquire_ctx * hold_ctx)300bdb18914SPeter Zijlstra static bool __ww_mutex_wound(struct MUTEX *lock,
3012674bd18SPeter Zijlstra (Intel) 			     struct ww_acquire_ctx *ww_ctx,
3022674bd18SPeter Zijlstra (Intel) 			     struct ww_acquire_ctx *hold_ctx)
3032674bd18SPeter Zijlstra (Intel) {
3049934ccc7SPeter Zijlstra 	struct task_struct *owner = __ww_mutex_owner(lock);
3052674bd18SPeter Zijlstra (Intel) 
306653a5b0bSThomas Gleixner 	lockdep_assert_wait_lock_held(lock);
3072674bd18SPeter Zijlstra (Intel) 
3082674bd18SPeter Zijlstra (Intel) 	/*
3092674bd18SPeter Zijlstra (Intel) 	 * Possible through __ww_mutex_add_waiter() when we race with
3102674bd18SPeter Zijlstra (Intel) 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
3112674bd18SPeter Zijlstra (Intel) 	 * through __ww_mutex_check_waiters().
3122674bd18SPeter Zijlstra (Intel) 	 */
3132674bd18SPeter Zijlstra (Intel) 	if (!hold_ctx)
3142674bd18SPeter Zijlstra (Intel) 		return false;
3152674bd18SPeter Zijlstra (Intel) 
3162674bd18SPeter Zijlstra (Intel) 	/*
3172674bd18SPeter Zijlstra (Intel) 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
3182674bd18SPeter Zijlstra (Intel) 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
3192674bd18SPeter Zijlstra (Intel) 	 * wait_lock.
3202674bd18SPeter Zijlstra (Intel) 	 */
3212674bd18SPeter Zijlstra (Intel) 	if (!owner)
3222674bd18SPeter Zijlstra (Intel) 		return false;
3232674bd18SPeter Zijlstra (Intel) 
3248850d773SPeter Zijlstra 	if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
3252674bd18SPeter Zijlstra (Intel) 		hold_ctx->wounded = 1;
3262674bd18SPeter Zijlstra (Intel) 
3272674bd18SPeter Zijlstra (Intel) 		/*
3282674bd18SPeter Zijlstra (Intel) 		 * wake_up_process() paired with set_current_state()
3292674bd18SPeter Zijlstra (Intel) 		 * inserts sufficient barriers to make sure @owner either sees
3302674bd18SPeter Zijlstra (Intel) 		 * it's wounded in __ww_mutex_check_kill() or has a
3312674bd18SPeter Zijlstra (Intel) 		 * wakeup pending to re-read the wounded state.
3322674bd18SPeter Zijlstra (Intel) 		 */
3332674bd18SPeter Zijlstra (Intel) 		if (owner != current)
3342674bd18SPeter Zijlstra (Intel) 			wake_up_process(owner);
3352674bd18SPeter Zijlstra (Intel) 
3362674bd18SPeter Zijlstra (Intel) 		return true;
3372674bd18SPeter Zijlstra (Intel) 	}
3382674bd18SPeter Zijlstra (Intel) 
3392674bd18SPeter Zijlstra (Intel) 	return false;
3402674bd18SPeter Zijlstra (Intel) }
3412674bd18SPeter Zijlstra (Intel) 
3422674bd18SPeter Zijlstra (Intel) /*
3438850d773SPeter Zijlstra  * We just acquired @lock under @ww_ctx, if there are more important contexts
3448850d773SPeter Zijlstra  * waiting behind us on the wait-list, check if they need to die, or wound us.
3452674bd18SPeter Zijlstra (Intel)  *
3462674bd18SPeter Zijlstra (Intel)  * See __ww_mutex_add_waiter() for the list-order construction; basically the
3472674bd18SPeter Zijlstra (Intel)  * list is ordered by stamp, smallest (oldest) first.
3482674bd18SPeter Zijlstra (Intel)  *
3492674bd18SPeter Zijlstra (Intel)  * This relies on never mixing wait-die/wound-wait on the same wait-list;
3502674bd18SPeter Zijlstra (Intel)  * which is currently ensured by that being a ww_class property.
3512674bd18SPeter Zijlstra (Intel)  *
3522674bd18SPeter Zijlstra (Intel)  * The current task must not be on the wait list.
3532674bd18SPeter Zijlstra (Intel)  */
3545297ccb2SPeter Zijlstra static void
__ww_mutex_check_waiters(struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx)355bdb18914SPeter Zijlstra __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
3562674bd18SPeter Zijlstra (Intel) {
357bdb18914SPeter Zijlstra 	struct MUTEX_WAITER *cur;
3582674bd18SPeter Zijlstra (Intel) 
359653a5b0bSThomas Gleixner 	lockdep_assert_wait_lock_held(lock);
3602674bd18SPeter Zijlstra (Intel) 
36123d599ebSPeter Zijlstra 	for (cur = __ww_waiter_first(lock); cur;
36223d599ebSPeter Zijlstra 	     cur = __ww_waiter_next(lock, cur)) {
36323d599ebSPeter Zijlstra 
3642674bd18SPeter Zijlstra (Intel) 		if (!cur->ww_ctx)
3652674bd18SPeter Zijlstra (Intel) 			continue;
3662674bd18SPeter Zijlstra (Intel) 
3672674bd18SPeter Zijlstra (Intel) 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
3682674bd18SPeter Zijlstra (Intel) 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
3692674bd18SPeter Zijlstra (Intel) 			break;
3702674bd18SPeter Zijlstra (Intel) 	}
3712674bd18SPeter Zijlstra (Intel) }
3722674bd18SPeter Zijlstra (Intel) 
3732674bd18SPeter Zijlstra (Intel) /*
3742674bd18SPeter Zijlstra (Intel)  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
3752674bd18SPeter Zijlstra (Intel)  * and wake up any waiters so they can recheck.
3762674bd18SPeter Zijlstra (Intel)  */
3772674bd18SPeter Zijlstra (Intel) static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)3782674bd18SPeter Zijlstra (Intel) ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
3792674bd18SPeter Zijlstra (Intel) {
3802674bd18SPeter Zijlstra (Intel) 	ww_mutex_lock_acquired(lock, ctx);
3812674bd18SPeter Zijlstra (Intel) 
3822674bd18SPeter Zijlstra (Intel) 	/*
3832674bd18SPeter Zijlstra (Intel) 	 * The lock->ctx update should be visible on all cores before
3842674bd18SPeter Zijlstra (Intel) 	 * the WAITERS check is done, otherwise contended waiters might be
3852674bd18SPeter Zijlstra (Intel) 	 * missed. The contended waiters will either see ww_ctx == NULL
3862674bd18SPeter Zijlstra (Intel) 	 * and keep spinning, or it will acquire wait_lock, add itself
3872674bd18SPeter Zijlstra (Intel) 	 * to waiter list and sleep.
3882674bd18SPeter Zijlstra (Intel) 	 */
3892674bd18SPeter Zijlstra (Intel) 	smp_mb(); /* See comments above and below. */
3902674bd18SPeter Zijlstra (Intel) 
3912674bd18SPeter Zijlstra (Intel) 	/*
3922674bd18SPeter Zijlstra (Intel) 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
3932674bd18SPeter Zijlstra (Intel) 	 *     MB		        MB
3942674bd18SPeter Zijlstra (Intel) 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
3952674bd18SPeter Zijlstra (Intel) 	 *
3962674bd18SPeter Zijlstra (Intel) 	 * The memory barrier above pairs with the memory barrier in
3972674bd18SPeter Zijlstra (Intel) 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
3982674bd18SPeter Zijlstra (Intel) 	 * and/or !empty list.
3992674bd18SPeter Zijlstra (Intel) 	 */
4009934ccc7SPeter Zijlstra 	if (likely(!__ww_mutex_has_waiters(&lock->base)))
4012674bd18SPeter Zijlstra (Intel) 		return;
4022674bd18SPeter Zijlstra (Intel) 
4032674bd18SPeter Zijlstra (Intel) 	/*
4042674bd18SPeter Zijlstra (Intel) 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
4052674bd18SPeter Zijlstra (Intel) 	 * die or wound us.
4062674bd18SPeter Zijlstra (Intel) 	 */
407653a5b0bSThomas Gleixner 	lock_wait_lock(&lock->base);
4082674bd18SPeter Zijlstra (Intel) 	__ww_mutex_check_waiters(&lock->base, ctx);
409653a5b0bSThomas Gleixner 	unlock_wait_lock(&lock->base);
4102674bd18SPeter Zijlstra (Intel) }
4112674bd18SPeter Zijlstra (Intel) 
4125297ccb2SPeter Zijlstra static __always_inline int
__ww_mutex_kill(struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx)413bdb18914SPeter Zijlstra __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
4142674bd18SPeter Zijlstra (Intel) {
4152674bd18SPeter Zijlstra (Intel) 	if (ww_ctx->acquired > 0) {
4162408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
4172674bd18SPeter Zijlstra (Intel) 		struct ww_mutex *ww;
4182674bd18SPeter Zijlstra (Intel) 
4192674bd18SPeter Zijlstra (Intel) 		ww = container_of(lock, struct ww_mutex, base);
4202674bd18SPeter Zijlstra (Intel) 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
4212674bd18SPeter Zijlstra (Intel) 		ww_ctx->contending_lock = ww;
4222674bd18SPeter Zijlstra (Intel) #endif
4232674bd18SPeter Zijlstra (Intel) 		return -EDEADLK;
4242674bd18SPeter Zijlstra (Intel) 	}
4252674bd18SPeter Zijlstra (Intel) 
4262674bd18SPeter Zijlstra (Intel) 	return 0;
4272674bd18SPeter Zijlstra (Intel) }
4282674bd18SPeter Zijlstra (Intel) 
4292674bd18SPeter Zijlstra (Intel) /*
4302674bd18SPeter Zijlstra (Intel)  * Check the wound condition for the current lock acquire.
4312674bd18SPeter Zijlstra (Intel)  *
4322674bd18SPeter Zijlstra (Intel)  * Wound-Wait: If we're wounded, kill ourself.
4332674bd18SPeter Zijlstra (Intel)  *
4342674bd18SPeter Zijlstra (Intel)  * Wait-Die: If we're trying to acquire a lock already held by an older
4352674bd18SPeter Zijlstra (Intel)  *           context, kill ourselves.
4362674bd18SPeter Zijlstra (Intel)  *
4372674bd18SPeter Zijlstra (Intel)  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
4382674bd18SPeter Zijlstra (Intel)  * look at waiters before us in the wait-list.
4392674bd18SPeter Zijlstra (Intel)  */
4405297ccb2SPeter Zijlstra static inline int
__ww_mutex_check_kill(struct MUTEX * lock,struct MUTEX_WAITER * waiter,struct ww_acquire_ctx * ctx)441bdb18914SPeter Zijlstra __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
4422674bd18SPeter Zijlstra (Intel) 		      struct ww_acquire_ctx *ctx)
4432674bd18SPeter Zijlstra (Intel) {
4442674bd18SPeter Zijlstra (Intel) 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
4452674bd18SPeter Zijlstra (Intel) 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
446bdb18914SPeter Zijlstra 	struct MUTEX_WAITER *cur;
4472674bd18SPeter Zijlstra (Intel) 
4482674bd18SPeter Zijlstra (Intel) 	if (ctx->acquired == 0)
4492674bd18SPeter Zijlstra (Intel) 		return 0;
4502674bd18SPeter Zijlstra (Intel) 
4512674bd18SPeter Zijlstra (Intel) 	if (!ctx->is_wait_die) {
4522674bd18SPeter Zijlstra (Intel) 		if (ctx->wounded)
4532674bd18SPeter Zijlstra (Intel) 			return __ww_mutex_kill(lock, ctx);
4542674bd18SPeter Zijlstra (Intel) 
4552674bd18SPeter Zijlstra (Intel) 		return 0;
4562674bd18SPeter Zijlstra (Intel) 	}
4572674bd18SPeter Zijlstra (Intel) 
4588850d773SPeter Zijlstra 	if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
4592674bd18SPeter Zijlstra (Intel) 		return __ww_mutex_kill(lock, ctx);
4602674bd18SPeter Zijlstra (Intel) 
4612674bd18SPeter Zijlstra (Intel) 	/*
4622674bd18SPeter Zijlstra (Intel) 	 * If there is a waiter in front of us that has a context, then its
4632674bd18SPeter Zijlstra (Intel) 	 * stamp is earlier than ours and we must kill ourself.
4642674bd18SPeter Zijlstra (Intel) 	 */
46523d599ebSPeter Zijlstra 	for (cur = __ww_waiter_prev(lock, waiter); cur;
46623d599ebSPeter Zijlstra 	     cur = __ww_waiter_prev(lock, cur)) {
46723d599ebSPeter Zijlstra 
4682674bd18SPeter Zijlstra (Intel) 		if (!cur->ww_ctx)
4692674bd18SPeter Zijlstra (Intel) 			continue;
4702674bd18SPeter Zijlstra (Intel) 
4712674bd18SPeter Zijlstra (Intel) 		return __ww_mutex_kill(lock, ctx);
4722674bd18SPeter Zijlstra (Intel) 	}
4732674bd18SPeter Zijlstra (Intel) 
4742674bd18SPeter Zijlstra (Intel) 	return 0;
4752674bd18SPeter Zijlstra (Intel) }
4762674bd18SPeter Zijlstra (Intel) 
4772674bd18SPeter Zijlstra (Intel) /*
4782674bd18SPeter Zijlstra (Intel)  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
4792674bd18SPeter Zijlstra (Intel)  * first. Such that older contexts are preferred to acquire the lock over
4802674bd18SPeter Zijlstra (Intel)  * younger contexts.
4812674bd18SPeter Zijlstra (Intel)  *
4822674bd18SPeter Zijlstra (Intel)  * Waiters without context are interspersed in FIFO order.
4832674bd18SPeter Zijlstra (Intel)  *
4842674bd18SPeter Zijlstra (Intel)  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
4852674bd18SPeter Zijlstra (Intel)  * older contexts already waiting) to avoid unnecessary waiting and for
4862674bd18SPeter Zijlstra (Intel)  * Wound-Wait ensure we wound the owning context when it is younger.
4872674bd18SPeter Zijlstra (Intel)  */
4885297ccb2SPeter Zijlstra static inline int
__ww_mutex_add_waiter(struct MUTEX_WAITER * waiter,struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx)489bdb18914SPeter Zijlstra __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
490bdb18914SPeter Zijlstra 		      struct MUTEX *lock,
4912674bd18SPeter Zijlstra (Intel) 		      struct ww_acquire_ctx *ww_ctx)
4922674bd18SPeter Zijlstra (Intel) {
493bdb18914SPeter Zijlstra 	struct MUTEX_WAITER *cur, *pos = NULL;
4942674bd18SPeter Zijlstra (Intel) 	bool is_wait_die;
4952674bd18SPeter Zijlstra (Intel) 
4962674bd18SPeter Zijlstra (Intel) 	if (!ww_ctx) {
497843dac28SPeter Zijlstra 		__ww_waiter_add(lock, waiter, NULL);
4982674bd18SPeter Zijlstra (Intel) 		return 0;
4992674bd18SPeter Zijlstra (Intel) 	}
5002674bd18SPeter Zijlstra (Intel) 
5012674bd18SPeter Zijlstra (Intel) 	is_wait_die = ww_ctx->is_wait_die;
5022674bd18SPeter Zijlstra (Intel) 
5032674bd18SPeter Zijlstra (Intel) 	/*
5042674bd18SPeter Zijlstra (Intel) 	 * Add the waiter before the first waiter with a higher stamp.
5052674bd18SPeter Zijlstra (Intel) 	 * Waiters without a context are skipped to avoid starving
5062674bd18SPeter Zijlstra (Intel) 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
5072674bd18SPeter Zijlstra (Intel) 	 * never die here, but they are sorted in stamp order and
5082674bd18SPeter Zijlstra (Intel) 	 * may wound the lock holder.
5092674bd18SPeter Zijlstra (Intel) 	 */
51023d599ebSPeter Zijlstra 	for (cur = __ww_waiter_last(lock); cur;
51123d599ebSPeter Zijlstra 	     cur = __ww_waiter_prev(lock, cur)) {
51223d599ebSPeter Zijlstra 
5132674bd18SPeter Zijlstra (Intel) 		if (!cur->ww_ctx)
5142674bd18SPeter Zijlstra (Intel) 			continue;
5152674bd18SPeter Zijlstra (Intel) 
5168850d773SPeter Zijlstra 		if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
5172674bd18SPeter Zijlstra (Intel) 			/*
5182674bd18SPeter Zijlstra (Intel) 			 * Wait-Die: if we find an older context waiting, there
5192674bd18SPeter Zijlstra (Intel) 			 * is no point in queueing behind it, as we'd have to
5202674bd18SPeter Zijlstra (Intel) 			 * die the moment it would acquire the lock.
5212674bd18SPeter Zijlstra (Intel) 			 */
5222674bd18SPeter Zijlstra (Intel) 			if (is_wait_die) {
5232674bd18SPeter Zijlstra (Intel) 				int ret = __ww_mutex_kill(lock, ww_ctx);
5242674bd18SPeter Zijlstra (Intel) 
5252674bd18SPeter Zijlstra (Intel) 				if (ret)
5262674bd18SPeter Zijlstra (Intel) 					return ret;
5272674bd18SPeter Zijlstra (Intel) 			}
5282674bd18SPeter Zijlstra (Intel) 
5292674bd18SPeter Zijlstra (Intel) 			break;
5302674bd18SPeter Zijlstra (Intel) 		}
5312674bd18SPeter Zijlstra (Intel) 
532843dac28SPeter Zijlstra 		pos = cur;
5332674bd18SPeter Zijlstra (Intel) 
5342674bd18SPeter Zijlstra (Intel) 		/* Wait-Die: ensure younger waiters die. */
5352674bd18SPeter Zijlstra (Intel) 		__ww_mutex_die(lock, cur, ww_ctx);
5362674bd18SPeter Zijlstra (Intel) 	}
5372674bd18SPeter Zijlstra (Intel) 
538843dac28SPeter Zijlstra 	__ww_waiter_add(lock, waiter, pos);
5392674bd18SPeter Zijlstra (Intel) 
5402674bd18SPeter Zijlstra (Intel) 	/*
5412674bd18SPeter Zijlstra (Intel) 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
5422674bd18SPeter Zijlstra (Intel) 	 * wound that such that we might proceed.
5432674bd18SPeter Zijlstra (Intel) 	 */
5442674bd18SPeter Zijlstra (Intel) 	if (!is_wait_die) {
5452674bd18SPeter Zijlstra (Intel) 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
5462674bd18SPeter Zijlstra (Intel) 
5472674bd18SPeter Zijlstra (Intel) 		/*
5482674bd18SPeter Zijlstra (Intel) 		 * See ww_mutex_set_context_fastpath(). Orders setting
5492674bd18SPeter Zijlstra (Intel) 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
5502674bd18SPeter Zijlstra (Intel) 		 * such that either we or the fastpath will wound @ww->ctx.
5512674bd18SPeter Zijlstra (Intel) 		 */
5522674bd18SPeter Zijlstra (Intel) 		smp_mb();
5532674bd18SPeter Zijlstra (Intel) 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
5542674bd18SPeter Zijlstra (Intel) 	}
5552674bd18SPeter Zijlstra (Intel) 
5562674bd18SPeter Zijlstra (Intel) 	return 0;
5572674bd18SPeter Zijlstra (Intel) }
5582674bd18SPeter Zijlstra (Intel) 
__ww_mutex_unlock(struct ww_mutex * lock)5592674bd18SPeter Zijlstra (Intel) static inline void __ww_mutex_unlock(struct ww_mutex *lock)
5602674bd18SPeter Zijlstra (Intel) {
5612674bd18SPeter Zijlstra (Intel) 	if (lock->ctx) {
5622408f7a3SPeter Zijlstra #ifdef DEBUG_WW_MUTEXES
5632674bd18SPeter Zijlstra (Intel) 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
5642674bd18SPeter Zijlstra (Intel) #endif
5652674bd18SPeter Zijlstra (Intel) 		if (lock->ctx->acquired > 0)
5662674bd18SPeter Zijlstra (Intel) 			lock->ctx->acquired--;
5672674bd18SPeter Zijlstra (Intel) 		lock->ctx = NULL;
5682674bd18SPeter Zijlstra (Intel) 	}
5692674bd18SPeter Zijlstra (Intel) }
570