xref: /openbmc/linux/kernel/futex/futex.h (revision 807ff7ed)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _FUTEX_H
3 #define _FUTEX_H
4 
5 #include <linux/futex.h>
6 #include <linux/rtmutex.h>
7 #include <linux/sched/wake_q.h>
8 
9 #ifdef CONFIG_PREEMPT_RT
10 #include <linux/rcuwait.h>
11 #endif
12 
13 #include <asm/futex.h>
14 
15 /*
16  * Futex flags used to encode options to functions and preserve them across
17  * restarts.
18  */
19 #ifdef CONFIG_MMU
20 # define FLAGS_SHARED		0x01
21 #else
22 /*
23  * NOMMU does not have per process address space. Let the compiler optimize
24  * code away.
25  */
26 # define FLAGS_SHARED		0x00
27 #endif
28 #define FLAGS_CLOCKRT		0x02
29 #define FLAGS_HAS_TIMEOUT	0x04
30 
31 #ifdef CONFIG_FAIL_FUTEX
32 extern bool should_fail_futex(bool fshared);
33 #else
should_fail_futex(bool fshared)34 static inline bool should_fail_futex(bool fshared)
35 {
36 	return false;
37 }
38 #endif
39 
40 /*
41  * Hash buckets are shared by all the futex_keys that hash to the same
42  * location.  Each key may have multiple futex_q structures, one for each task
43  * waiting on a futex.
44  */
45 struct futex_hash_bucket {
46 	atomic_t waiters;
47 	spinlock_t lock;
48 	struct plist_head chain;
49 } ____cacheline_aligned_in_smp;
50 
51 /*
52  * Priority Inheritance state:
53  */
54 struct futex_pi_state {
55 	/*
56 	 * list of 'owned' pi_state instances - these have to be
57 	 * cleaned up in do_exit() if the task exits prematurely:
58 	 */
59 	struct list_head list;
60 
61 	/*
62 	 * The PI object:
63 	 */
64 	struct rt_mutex_base pi_mutex;
65 
66 	struct task_struct *owner;
67 	refcount_t refcount;
68 
69 	union futex_key key;
70 } __randomize_layout;
71 
72 /**
73  * struct futex_q - The hashed futex queue entry, one per waiting task
74  * @list:		priority-sorted list of tasks waiting on this futex
75  * @task:		the task waiting on the futex
76  * @lock_ptr:		the hash bucket lock
77  * @key:		the key the futex is hashed on
78  * @pi_state:		optional priority inheritance state
79  * @rt_waiter:		rt_waiter storage for use with requeue_pi
80  * @requeue_pi_key:	the requeue_pi target futex key
81  * @bitset:		bitset for the optional bitmasked wakeup
82  * @requeue_state:	State field for futex_requeue_pi()
83  * @requeue_wait:	RCU wait for futex_requeue_pi() (RT only)
84  *
85  * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
86  * we can wake only the relevant ones (hashed queues may be shared).
87  *
88  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
89  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
90  * The order of wakeup is always to make the first condition true, then
91  * the second.
92  *
93  * PI futexes are typically woken before they are removed from the hash list via
94  * the rt_mutex code. See futex_unqueue_pi().
95  */
96 struct futex_q {
97 	struct plist_node list;
98 
99 	struct task_struct *task;
100 	spinlock_t *lock_ptr;
101 	union futex_key key;
102 	struct futex_pi_state *pi_state;
103 	struct rt_mutex_waiter *rt_waiter;
104 	union futex_key *requeue_pi_key;
105 	u32 bitset;
106 	atomic_t requeue_state;
107 #ifdef CONFIG_PREEMPT_RT
108 	struct rcuwait requeue_wait;
109 #endif
110 } __randomize_layout;
111 
112 extern const struct futex_q futex_q_init;
113 
114 enum futex_access {
115 	FUTEX_READ,
116 	FUTEX_WRITE
117 };
118 
119 extern int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
120 			 enum futex_access rw);
121 
122 extern struct hrtimer_sleeper *
123 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
124 		  int flags, u64 range_ns);
125 
126 extern struct futex_hash_bucket *futex_hash(union futex_key *key);
127 
128 /**
129  * futex_match - Check whether two futex keys are equal
130  * @key1:	Pointer to key1
131  * @key2:	Pointer to key2
132  *
133  * Return 1 if two futex_keys are equal, 0 otherwise.
134  */
futex_match(union futex_key * key1,union futex_key * key2)135 static inline int futex_match(union futex_key *key1, union futex_key *key2)
136 {
137 	return (key1 && key2
138 		&& key1->both.word == key2->both.word
139 		&& key1->both.ptr == key2->both.ptr
140 		&& key1->both.offset == key2->both.offset);
141 }
142 
143 extern int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
144 			    struct futex_q *q, struct futex_hash_bucket **hb);
145 extern void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q,
146 				   struct hrtimer_sleeper *timeout);
147 extern void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q);
148 
149 extern int fault_in_user_writeable(u32 __user *uaddr);
150 extern int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval);
151 extern int futex_get_value_locked(u32 *dest, u32 __user *from);
152 extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key);
153 
154 extern void __futex_unqueue(struct futex_q *q);
155 extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb);
156 extern int futex_unqueue(struct futex_q *q);
157 
158 /**
159  * futex_queue() - Enqueue the futex_q on the futex_hash_bucket
160  * @q:	The futex_q to enqueue
161  * @hb:	The destination hash bucket
162  *
163  * The hb->lock must be held by the caller, and is released here. A call to
164  * futex_queue() is typically paired with exactly one call to futex_unqueue().  The
165  * exceptions involve the PI related operations, which may use futex_unqueue_pi()
166  * or nothing if the unqueue is done as part of the wake process and the unqueue
167  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
168  * an example).
169  */
futex_queue(struct futex_q * q,struct futex_hash_bucket * hb)170 static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
171 	__releases(&hb->lock)
172 {
173 	__futex_queue(q, hb);
174 	spin_unlock(&hb->lock);
175 }
176 
177 extern void futex_unqueue_pi(struct futex_q *q);
178 
179 extern void wait_for_owner_exiting(int ret, struct task_struct *exiting);
180 
181 /*
182  * Reflects a new waiter being added to the waitqueue.
183  */
futex_hb_waiters_inc(struct futex_hash_bucket * hb)184 static inline void futex_hb_waiters_inc(struct futex_hash_bucket *hb)
185 {
186 #ifdef CONFIG_SMP
187 	atomic_inc(&hb->waiters);
188 	/*
189 	 * Full barrier (A), see the ordering comment above.
190 	 */
191 	smp_mb__after_atomic();
192 #endif
193 }
194 
195 /*
196  * Reflects a waiter being removed from the waitqueue by wakeup
197  * paths.
198  */
futex_hb_waiters_dec(struct futex_hash_bucket * hb)199 static inline void futex_hb_waiters_dec(struct futex_hash_bucket *hb)
200 {
201 #ifdef CONFIG_SMP
202 	atomic_dec(&hb->waiters);
203 #endif
204 }
205 
futex_hb_waiters_pending(struct futex_hash_bucket * hb)206 static inline int futex_hb_waiters_pending(struct futex_hash_bucket *hb)
207 {
208 #ifdef CONFIG_SMP
209 	/*
210 	 * Full barrier (B), see the ordering comment above.
211 	 */
212 	smp_mb();
213 	return atomic_read(&hb->waiters);
214 #else
215 	return 1;
216 #endif
217 }
218 
219 extern struct futex_hash_bucket *futex_q_lock(struct futex_q *q);
220 extern void futex_q_unlock(struct futex_hash_bucket *hb);
221 
222 
223 extern int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
224 				union futex_key *key,
225 				struct futex_pi_state **ps,
226 				struct task_struct *task,
227 				struct task_struct **exiting,
228 				int set_waiters);
229 
230 extern int refill_pi_state_cache(void);
231 extern void get_pi_state(struct futex_pi_state *pi_state);
232 extern void put_pi_state(struct futex_pi_state *pi_state);
233 extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked);
234 
235 /*
236  * Express the locking dependencies for lockdep:
237  */
238 static inline void
double_lock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)239 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
240 {
241 	if (hb1 > hb2)
242 		swap(hb1, hb2);
243 
244 	spin_lock(&hb1->lock);
245 	if (hb1 != hb2)
246 		spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
247 }
248 
249 static inline void
double_unlock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)250 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
251 {
252 	spin_unlock(&hb1->lock);
253 	if (hb1 != hb2)
254 		spin_unlock(&hb2->lock);
255 }
256 
257 /* syscalls */
258 
259 extern int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32
260 				 val, ktime_t *abs_time, u32 bitset, u32 __user
261 				 *uaddr2);
262 
263 extern int futex_requeue(u32 __user *uaddr1, unsigned int flags,
264 			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
265 			 u32 *cmpval, int requeue_pi);
266 
267 extern int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
268 		      ktime_t *abs_time, u32 bitset);
269 
270 /**
271  * struct futex_vector - Auxiliary struct for futex_waitv()
272  * @w: Userspace provided data
273  * @q: Kernel side data
274  *
275  * Struct used to build an array with all data need for futex_waitv()
276  */
277 struct futex_vector {
278 	struct futex_waitv w;
279 	struct futex_q q;
280 };
281 
282 extern int futex_wait_multiple(struct futex_vector *vs, unsigned int count,
283 			       struct hrtimer_sleeper *to);
284 
285 extern int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset);
286 
287 extern int futex_wake_op(u32 __user *uaddr1, unsigned int flags,
288 			 u32 __user *uaddr2, int nr_wake, int nr_wake2, int op);
289 
290 extern int futex_unlock_pi(u32 __user *uaddr, unsigned int flags);
291 
292 extern int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock);
293 
294 #endif /* _FUTEX_H */
295