1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21696a8beSPeter Zijlstra /*
31696a8beSPeter Zijlstra  * RT Mutexes: blocking mutual exclusion locks with PI support
41696a8beSPeter Zijlstra  *
51696a8beSPeter Zijlstra  * started by Ingo Molnar and Thomas Gleixner:
61696a8beSPeter Zijlstra  *
71696a8beSPeter Zijlstra  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
81696a8beSPeter Zijlstra  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
91696a8beSPeter Zijlstra  *
101696a8beSPeter Zijlstra  * This file contains the private data structure and API definitions.
111696a8beSPeter Zijlstra  */
121696a8beSPeter Zijlstra 
131696a8beSPeter Zijlstra #ifndef __KERNEL_RTMUTEX_COMMON_H
141696a8beSPeter Zijlstra #define __KERNEL_RTMUTEX_COMMON_H
151696a8beSPeter Zijlstra 
16f41dcc18SThomas Gleixner #include <linux/debug_locks.h>
171696a8beSPeter Zijlstra #include <linux/rtmutex.h>
1884f001e1SIngo Molnar #include <linux/sched/wake_q.h>
191696a8beSPeter Zijlstra 
20*f7853c34SPeter Zijlstra 
21*f7853c34SPeter Zijlstra /*
22*f7853c34SPeter Zijlstra  * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two
23*f7853c34SPeter Zijlstra  * separate trees and they need their own copy of the sort keys because of
24*f7853c34SPeter Zijlstra  * different locking requirements.
25*f7853c34SPeter Zijlstra  *
26*f7853c34SPeter Zijlstra  * @entry:		rbtree node to enqueue into the waiters tree
27*f7853c34SPeter Zijlstra  * @prio:		Priority of the waiter
28*f7853c34SPeter Zijlstra  * @deadline:		Deadline of the waiter if applicable
29*f7853c34SPeter Zijlstra  *
30*f7853c34SPeter Zijlstra  * See rt_waiter_node_less() and waiter_*_prio().
31*f7853c34SPeter Zijlstra  */
32*f7853c34SPeter Zijlstra struct rt_waiter_node {
33*f7853c34SPeter Zijlstra 	struct rb_node	entry;
34*f7853c34SPeter Zijlstra 	int		prio;
35*f7853c34SPeter Zijlstra 	u64		deadline;
36*f7853c34SPeter Zijlstra };
37*f7853c34SPeter Zijlstra 
381696a8beSPeter Zijlstra /*
391696a8beSPeter Zijlstra  * This is the control structure for tasks blocked on a rt_mutex,
401696a8beSPeter Zijlstra  * which is allocated on the kernel stack on of the blocked task.
411696a8beSPeter Zijlstra  *
42*f7853c34SPeter Zijlstra  * @tree:		node to enqueue into the mutex waiters tree
43*f7853c34SPeter Zijlstra  * @pi_tree:		node to enqueue into the mutex owner waiters tree
441696a8beSPeter Zijlstra  * @task:		task reference to the blocked task
4537350e3bSThomas Gleixner  * @lock:		Pointer to the rt_mutex on which the waiter blocks
46c014ef69SThomas Gleixner  * @wake_state:		Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
47add46132SPeter Zijlstra  * @ww_ctx:		WW context pointer
48*f7853c34SPeter Zijlstra  *
49*f7853c34SPeter Zijlstra  * @tree is ordered by @lock->wait_lock
50*f7853c34SPeter Zijlstra  * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock
511696a8beSPeter Zijlstra  */
521696a8beSPeter Zijlstra struct rt_mutex_waiter {
53*f7853c34SPeter Zijlstra 	struct rt_waiter_node	tree;
54*f7853c34SPeter Zijlstra 	struct rt_waiter_node	pi_tree;
551696a8beSPeter Zijlstra 	struct task_struct	*task;
56830e6accSPeter Zijlstra 	struct rt_mutex_base	*lock;
57c014ef69SThomas Gleixner 	unsigned int		wake_state;
58add46132SPeter Zijlstra 	struct ww_acquire_ctx	*ww_ctx;
591696a8beSPeter Zijlstra };
601696a8beSPeter Zijlstra 
61b576e640SThomas Gleixner /**
62b576e640SThomas Gleixner  * rt_wake_q_head - Wrapper around regular wake_q_head to support
63b576e640SThomas Gleixner  *		    "sleeping" spinlocks on RT
64b576e640SThomas Gleixner  * @head:		The regular wake_q_head for sleeping lock variants
65456cfbc6SThomas Gleixner  * @rtlock_task:	Task pointer for RT lock (spin/rwlock) wakeups
66b576e640SThomas Gleixner  */
67b576e640SThomas Gleixner struct rt_wake_q_head {
68b576e640SThomas Gleixner 	struct wake_q_head	head;
69456cfbc6SThomas Gleixner 	struct task_struct	*rtlock_task;
70b576e640SThomas Gleixner };
71b576e640SThomas Gleixner 
72b576e640SThomas Gleixner #define DEFINE_RT_WAKE_Q(name)						\
73b576e640SThomas Gleixner 	struct rt_wake_q_head name = {					\
74b576e640SThomas Gleixner 		.head		= WAKE_Q_HEAD_INITIALIZER(name.head),	\
75456cfbc6SThomas Gleixner 		.rtlock_task	= NULL,					\
76b576e640SThomas Gleixner 	}
77b576e640SThomas Gleixner 
781696a8beSPeter Zijlstra /*
79531ae4b0SThomas Gleixner  * PI-futex support (proxy locking functions, etc.):
80531ae4b0SThomas Gleixner  */
81830e6accSPeter Zijlstra extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
82531ae4b0SThomas Gleixner 				       struct task_struct *proxy_owner);
83830e6accSPeter Zijlstra extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
84830e6accSPeter Zijlstra extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
85531ae4b0SThomas Gleixner 				     struct rt_mutex_waiter *waiter,
86531ae4b0SThomas Gleixner 				     struct task_struct *task);
87830e6accSPeter Zijlstra extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
88531ae4b0SThomas Gleixner 				     struct rt_mutex_waiter *waiter,
89531ae4b0SThomas Gleixner 				     struct task_struct *task);
90830e6accSPeter Zijlstra extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
91531ae4b0SThomas Gleixner 			       struct hrtimer_sleeper *to,
92531ae4b0SThomas Gleixner 			       struct rt_mutex_waiter *waiter);
93830e6accSPeter Zijlstra extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
94531ae4b0SThomas Gleixner 				 struct rt_mutex_waiter *waiter);
95531ae4b0SThomas Gleixner 
96830e6accSPeter Zijlstra extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
97830e6accSPeter Zijlstra extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
98531ae4b0SThomas Gleixner 
99830e6accSPeter Zijlstra extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
100830e6accSPeter Zijlstra extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
1017980aa39SThomas Gleixner 				struct rt_wake_q_head *wqh);
102531ae4b0SThomas Gleixner 
1037980aa39SThomas Gleixner extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
104531ae4b0SThomas Gleixner 
105531ae4b0SThomas Gleixner /*
10637350e3bSThomas Gleixner  * Must be guarded because this header is included from rcu/tree_plugin.h
10737350e3bSThomas Gleixner  * unconditionally.
1081696a8beSPeter Zijlstra  */
109bc2eecd7SNicolas Pitre #ifdef CONFIG_RT_MUTEXES
rt_mutex_has_waiters(struct rt_mutex_base * lock)110830e6accSPeter Zijlstra static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
1111696a8beSPeter Zijlstra {
112a23ba907SDavidlohr Bueso 	return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
1131696a8beSPeter Zijlstra }
1141696a8beSPeter Zijlstra 
115c3123c43SThomas Gleixner /*
116c3123c43SThomas Gleixner  * Lockless speculative check whether @waiter is still the top waiter on
117c3123c43SThomas Gleixner  * @lock. This is solely comparing pointers and not derefencing the
118c3123c43SThomas Gleixner  * leftmost entry which might be about to vanish.
119c3123c43SThomas Gleixner  */
rt_mutex_waiter_is_top_waiter(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)120c3123c43SThomas Gleixner static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
121c3123c43SThomas Gleixner 						 struct rt_mutex_waiter *waiter)
122c3123c43SThomas Gleixner {
123c3123c43SThomas Gleixner 	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
124c3123c43SThomas Gleixner 
125*f7853c34SPeter Zijlstra 	return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter;
126c3123c43SThomas Gleixner }
127c3123c43SThomas Gleixner 
rt_mutex_top_waiter(struct rt_mutex_base * lock)128830e6accSPeter Zijlstra static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
1291696a8beSPeter Zijlstra {
130c28d62cfSPeter Zijlstra 	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
131c28d62cfSPeter Zijlstra 	struct rt_mutex_waiter *w = NULL;
1321696a8beSPeter Zijlstra 
133*f7853c34SPeter Zijlstra 	lockdep_assert_held(&lock->wait_lock);
134*f7853c34SPeter Zijlstra 
135c28d62cfSPeter Zijlstra 	if (leftmost) {
136*f7853c34SPeter Zijlstra 		w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry);
1371696a8beSPeter Zijlstra 		BUG_ON(w->lock != lock);
138c28d62cfSPeter Zijlstra 	}
1391696a8beSPeter Zijlstra 	return w;
1401696a8beSPeter Zijlstra }
1411696a8beSPeter Zijlstra 
task_has_pi_waiters(struct task_struct * p)1421696a8beSPeter Zijlstra static inline int task_has_pi_waiters(struct task_struct *p)
1431696a8beSPeter Zijlstra {
144a23ba907SDavidlohr Bueso 	return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
1451696a8beSPeter Zijlstra }
1461696a8beSPeter Zijlstra 
task_top_pi_waiter(struct task_struct * p)14737350e3bSThomas Gleixner static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
1481696a8beSPeter Zijlstra {
149*f7853c34SPeter Zijlstra 	lockdep_assert_held(&p->pi_lock);
150*f7853c34SPeter Zijlstra 
15137350e3bSThomas Gleixner 	return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
152*f7853c34SPeter Zijlstra 			pi_tree.entry);
1531696a8beSPeter Zijlstra }
1541696a8beSPeter Zijlstra 
1551696a8beSPeter Zijlstra #define RT_MUTEX_HAS_WAITERS	1UL
1561696a8beSPeter Zijlstra 
rt_mutex_owner(struct rt_mutex_base * lock)157830e6accSPeter Zijlstra static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
1581696a8beSPeter Zijlstra {
1591be5d4faSThomas Gleixner 	unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
1601be5d4faSThomas Gleixner 
161b5016e82SThomas Gleixner 	return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
1621696a8beSPeter Zijlstra }
1631696a8beSPeter Zijlstra 
1641696a8beSPeter Zijlstra /*
1658930ed80SThomas Gleixner  * Constants for rt mutex functions which have a selectable deadlock
1668930ed80SThomas Gleixner  * detection.
1678930ed80SThomas Gleixner  *
1688930ed80SThomas Gleixner  * RT_MUTEX_MIN_CHAINWALK:	Stops the lock chain walk when there are
1698930ed80SThomas Gleixner  *				no further PI adjustments to be made.
1708930ed80SThomas Gleixner  *
1718930ed80SThomas Gleixner  * RT_MUTEX_FULL_CHAINWALK:	Invoke deadlock detection with a full
1728930ed80SThomas Gleixner  *				walk of the lock chain.
1738930ed80SThomas Gleixner  */
1748930ed80SThomas Gleixner enum rtmutex_chainwalk {
1758930ed80SThomas Gleixner 	RT_MUTEX_MIN_CHAINWALK,
1768930ed80SThomas Gleixner 	RT_MUTEX_FULL_CHAINWALK,
1778930ed80SThomas Gleixner };
1788930ed80SThomas Gleixner 
__rt_mutex_base_init(struct rt_mutex_base * lock)179830e6accSPeter Zijlstra static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
180f5a98866SThomas Gleixner {
181f5a98866SThomas Gleixner 	raw_spin_lock_init(&lock->wait_lock);
182f5a98866SThomas Gleixner 	lock->waiters = RB_ROOT_CACHED;
183830e6accSPeter Zijlstra 	lock->owner = NULL;
184f5a98866SThomas Gleixner }
185f5a98866SThomas Gleixner 
186f41dcc18SThomas Gleixner /* Debug functions */
debug_rt_mutex_unlock(struct rt_mutex_base * lock)187830e6accSPeter Zijlstra static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
188f41dcc18SThomas Gleixner {
189f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
190f41dcc18SThomas Gleixner 		DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
191f41dcc18SThomas Gleixner }
192f41dcc18SThomas Gleixner 
debug_rt_mutex_proxy_unlock(struct rt_mutex_base * lock)193830e6accSPeter Zijlstra static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
194f41dcc18SThomas Gleixner {
195f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
196f41dcc18SThomas Gleixner 		DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
197f41dcc18SThomas Gleixner }
198f41dcc18SThomas Gleixner 
debug_rt_mutex_init_waiter(struct rt_mutex_waiter * waiter)199f41dcc18SThomas Gleixner static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
200f41dcc18SThomas Gleixner {
201f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
202f41dcc18SThomas Gleixner 		memset(waiter, 0x11, sizeof(*waiter));
203f41dcc18SThomas Gleixner }
204f41dcc18SThomas Gleixner 
debug_rt_mutex_free_waiter(struct rt_mutex_waiter * waiter)205f41dcc18SThomas Gleixner static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
206f41dcc18SThomas Gleixner {
207f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
208f41dcc18SThomas Gleixner 		memset(waiter, 0x22, sizeof(*waiter));
209f41dcc18SThomas Gleixner }
2101696a8beSPeter Zijlstra 
rt_mutex_init_waiter(struct rt_mutex_waiter * waiter)211531ae4b0SThomas Gleixner static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
212531ae4b0SThomas Gleixner {
213531ae4b0SThomas Gleixner 	debug_rt_mutex_init_waiter(waiter);
214*f7853c34SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->pi_tree.entry);
215*f7853c34SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->tree.entry);
216c014ef69SThomas Gleixner 	waiter->wake_state = TASK_NORMAL;
217531ae4b0SThomas Gleixner 	waiter->task = NULL;
218531ae4b0SThomas Gleixner }
219531ae4b0SThomas Gleixner 
rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter * waiter)2201c143c4bSThomas Gleixner static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
221c014ef69SThomas Gleixner {
222c014ef69SThomas Gleixner 	rt_mutex_init_waiter(waiter);
223c014ef69SThomas Gleixner 	waiter->wake_state = TASK_RTLOCK_WAIT;
224c014ef69SThomas Gleixner }
225c014ef69SThomas Gleixner 
226531ae4b0SThomas Gleixner #else /* CONFIG_RT_MUTEXES */
227531ae4b0SThomas Gleixner /* Used in rcu/tree_plugin.h */
rt_mutex_owner(struct rt_mutex_base * lock)228830e6accSPeter Zijlstra static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
229531ae4b0SThomas Gleixner {
230531ae4b0SThomas Gleixner 	return NULL;
231531ae4b0SThomas Gleixner }
232531ae4b0SThomas Gleixner #endif  /* !CONFIG_RT_MUTEXES */
233531ae4b0SThomas Gleixner 
2341696a8beSPeter Zijlstra #endif
235