xref: /openbmc/linux/include/linux/wait.h (revision 9e3bd0f6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10 
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13 
14 typedef struct wait_queue_entry wait_queue_entry_t;
15 
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18 
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE	0x01
21 #define WQ_FLAG_WOKEN		0x02
22 #define WQ_FLAG_BOOKMARK	0x04
23 
24 /*
25  * A single wait-queue entry structure:
26  */
27 struct wait_queue_entry {
28 	unsigned int		flags;
29 	void			*private;
30 	wait_queue_func_t	func;
31 	struct list_head	entry;
32 };
33 
34 struct wait_queue_head {
35 	spinlock_t		lock;
36 	struct list_head	head;
37 };
38 typedef struct wait_queue_head wait_queue_head_t;
39 
40 struct task_struct;
41 
42 /*
43  * Macros for declaration and initialisaton of the datatypes
44  */
45 
46 #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
47 	.private	= tsk,							\
48 	.func		= default_wake_function,				\
49 	.entry		= { NULL, NULL } }
50 
51 #define DECLARE_WAITQUEUE(name, tsk)						\
52 	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
53 
54 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
55 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
56 	.head		= { &(name).head, &(name).head } }
57 
58 #define DECLARE_WAIT_QUEUE_HEAD(name) \
59 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60 
61 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
62 
63 #define init_waitqueue_head(wq_head)						\
64 	do {									\
65 		static struct lock_class_key __key;				\
66 										\
67 		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
68 	} while (0)
69 
70 #ifdef CONFIG_LOCKDEP
71 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
72 	({ init_waitqueue_head(&name); name; })
73 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
74 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
75 #else
76 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
77 #endif
78 
79 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
80 {
81 	wq_entry->flags		= 0;
82 	wq_entry->private	= p;
83 	wq_entry->func		= default_wake_function;
84 }
85 
86 static inline void
87 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
88 {
89 	wq_entry->flags		= 0;
90 	wq_entry->private	= NULL;
91 	wq_entry->func		= func;
92 }
93 
94 /**
95  * waitqueue_active -- locklessly test for waiters on the queue
96  * @wq_head: the waitqueue to test for waiters
97  *
98  * returns true if the wait list is not empty
99  *
100  * NOTE: this function is lockless and requires care, incorrect usage _will_
101  * lead to sporadic and non-obvious failure.
102  *
103  * Use either while holding wait_queue_head::lock or when used for wakeups
104  * with an extra smp_mb() like::
105  *
106  *      CPU0 - waker                    CPU1 - waiter
107  *
108  *                                      for (;;) {
109  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
110  *      smp_mb();                         // smp_mb() from set_current_state()
111  *      if (waitqueue_active(wq_head))         if (@cond)
112  *        wake_up(wq_head);                      break;
113  *                                        schedule();
114  *                                      }
115  *                                      finish_wait(&wq_head, &wait);
116  *
117  * Because without the explicit smp_mb() it's possible for the
118  * waitqueue_active() load to get hoisted over the @cond store such that we'll
119  * observe an empty wait list while the waiter might not observe @cond.
120  *
121  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
122  * which (when the lock is uncontended) are of roughly equal cost.
123  */
124 static inline int waitqueue_active(struct wait_queue_head *wq_head)
125 {
126 	return !list_empty(&wq_head->head);
127 }
128 
129 /**
130  * wq_has_single_sleeper - check if there is only one sleeper
131  * @wq_head: wait queue head
132  *
133  * Returns true of wq_head has only one sleeper on the list.
134  *
135  * Please refer to the comment for waitqueue_active.
136  */
137 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
138 {
139 	return list_is_singular(&wq_head->head);
140 }
141 
142 /**
143  * wq_has_sleeper - check if there are any waiting processes
144  * @wq_head: wait queue head
145  *
146  * Returns true if wq_head has waiting processes
147  *
148  * Please refer to the comment for waitqueue_active.
149  */
150 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
151 {
152 	/*
153 	 * We need to be sure we are in sync with the
154 	 * add_wait_queue modifications to the wait queue.
155 	 *
156 	 * This memory barrier should be paired with one on the
157 	 * waiting side.
158 	 */
159 	smp_mb();
160 	return waitqueue_active(wq_head);
161 }
162 
163 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
164 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
165 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166 
167 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
168 {
169 	list_add(&wq_entry->entry, &wq_head->head);
170 }
171 
172 /*
173  * Used for wake-one threads:
174  */
175 static inline void
176 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
177 {
178 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
179 	__add_wait_queue(wq_head, wq_entry);
180 }
181 
182 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
183 {
184 	list_add_tail(&wq_entry->entry, &wq_head->head);
185 }
186 
187 static inline void
188 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189 {
190 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191 	__add_wait_queue_entry_tail(wq_head, wq_entry);
192 }
193 
194 static inline void
195 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
196 {
197 	list_del(&wq_entry->entry);
198 }
199 
200 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
201 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
202 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
203 		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
204 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
205 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
206 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
207 
208 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
209 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
210 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
211 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
212 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
213 
214 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
215 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
216 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
217 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
218 
219 /*
220  * Wakeup macros to be used to report events to the targets.
221  */
222 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
223 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
224 #define wake_up_poll(x, m)							\
225 	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
226 #define wake_up_locked_poll(x, m)						\
227 	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
228 #define wake_up_interruptible_poll(x, m)					\
229 	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
230 #define wake_up_interruptible_sync_poll(x, m)					\
231 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
232 
233 #define ___wait_cond_timeout(condition)						\
234 ({										\
235 	bool __cond = (condition);						\
236 	if (__cond && !__ret)							\
237 		__ret = 1;							\
238 	__cond || !__ret;							\
239 })
240 
241 #define ___wait_is_interruptible(state)						\
242 	(!__builtin_constant_p(state) ||					\
243 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
244 
245 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
246 
247 /*
248  * The below macro ___wait_event() has an explicit shadow of the __ret
249  * variable when used from the wait_event_*() macros.
250  *
251  * This is so that both can use the ___wait_cond_timeout() construct
252  * to wrap the condition.
253  *
254  * The type inconsistency of the wait_event_*() __ret variable is also
255  * on purpose; we use long where we can return timeout values and int
256  * otherwise.
257  */
258 
259 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
260 ({										\
261 	__label__ __out;							\
262 	struct wait_queue_entry __wq_entry;					\
263 	long __ret = ret;	/* explicit shadow */				\
264 										\
265 	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
266 	for (;;) {								\
267 		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
268 										\
269 		if (condition)							\
270 			break;							\
271 										\
272 		if (___wait_is_interruptible(state) && __int) {			\
273 			__ret = __int;						\
274 			goto __out;						\
275 		}								\
276 										\
277 		cmd;								\
278 	}									\
279 	finish_wait(&wq_head, &__wq_entry);					\
280 __out:	__ret;									\
281 })
282 
283 #define __wait_event(wq_head, condition)					\
284 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
285 			    schedule())
286 
287 /**
288  * wait_event - sleep until a condition gets true
289  * @wq_head: the waitqueue to wait on
290  * @condition: a C expression for the event to wait for
291  *
292  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
293  * @condition evaluates to true. The @condition is checked each time
294  * the waitqueue @wq_head is woken up.
295  *
296  * wake_up() has to be called after changing any variable that could
297  * change the result of the wait condition.
298  */
299 #define wait_event(wq_head, condition)						\
300 do {										\
301 	might_sleep();								\
302 	if (condition)								\
303 		break;								\
304 	__wait_event(wq_head, condition);					\
305 } while (0)
306 
307 #define __io_wait_event(wq_head, condition)					\
308 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
309 			    io_schedule())
310 
311 /*
312  * io_wait_event() -- like wait_event() but with io_schedule()
313  */
314 #define io_wait_event(wq_head, condition)					\
315 do {										\
316 	might_sleep();								\
317 	if (condition)								\
318 		break;								\
319 	__io_wait_event(wq_head, condition);					\
320 } while (0)
321 
322 #define __wait_event_freezable(wq_head, condition)				\
323 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
324 			    freezable_schedule())
325 
326 /**
327  * wait_event_freezable - sleep (or freeze) until a condition gets true
328  * @wq_head: the waitqueue to wait on
329  * @condition: a C expression for the event to wait for
330  *
331  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
332  * to system load) until the @condition evaluates to true. The
333  * @condition is checked each time the waitqueue @wq_head is woken up.
334  *
335  * wake_up() has to be called after changing any variable that could
336  * change the result of the wait condition.
337  */
338 #define wait_event_freezable(wq_head, condition)				\
339 ({										\
340 	int __ret = 0;								\
341 	might_sleep();								\
342 	if (!(condition))							\
343 		__ret = __wait_event_freezable(wq_head, condition);		\
344 	__ret;									\
345 })
346 
347 #define __wait_event_timeout(wq_head, condition, timeout)			\
348 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
349 		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
350 		      __ret = schedule_timeout(__ret))
351 
352 /**
353  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
354  * @wq_head: the waitqueue to wait on
355  * @condition: a C expression for the event to wait for
356  * @timeout: timeout, in jiffies
357  *
358  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
359  * @condition evaluates to true. The @condition is checked each time
360  * the waitqueue @wq_head is woken up.
361  *
362  * wake_up() has to be called after changing any variable that could
363  * change the result of the wait condition.
364  *
365  * Returns:
366  * 0 if the @condition evaluated to %false after the @timeout elapsed,
367  * 1 if the @condition evaluated to %true after the @timeout elapsed,
368  * or the remaining jiffies (at least 1) if the @condition evaluated
369  * to %true before the @timeout elapsed.
370  */
371 #define wait_event_timeout(wq_head, condition, timeout)				\
372 ({										\
373 	long __ret = timeout;							\
374 	might_sleep();								\
375 	if (!___wait_cond_timeout(condition))					\
376 		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
377 	__ret;									\
378 })
379 
380 #define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
381 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
382 		      TASK_INTERRUPTIBLE, 0, timeout,				\
383 		      __ret = freezable_schedule_timeout(__ret))
384 
385 /*
386  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
387  * increasing load and is freezable.
388  */
389 #define wait_event_freezable_timeout(wq_head, condition, timeout)		\
390 ({										\
391 	long __ret = timeout;							\
392 	might_sleep();								\
393 	if (!___wait_cond_timeout(condition))					\
394 		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
395 	__ret;									\
396 })
397 
398 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
399 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
400 			    cmd1; schedule(); cmd2)
401 /*
402  * Just like wait_event_cmd(), except it sets exclusive flag
403  */
404 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
405 do {										\
406 	if (condition)								\
407 		break;								\
408 	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
409 } while (0)
410 
411 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
412 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
413 			    cmd1; schedule(); cmd2)
414 
415 /**
416  * wait_event_cmd - sleep until a condition gets true
417  * @wq_head: the waitqueue to wait on
418  * @condition: a C expression for the event to wait for
419  * @cmd1: the command will be executed before sleep
420  * @cmd2: the command will be executed after sleep
421  *
422  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
423  * @condition evaluates to true. The @condition is checked each time
424  * the waitqueue @wq_head is woken up.
425  *
426  * wake_up() has to be called after changing any variable that could
427  * change the result of the wait condition.
428  */
429 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
430 do {										\
431 	if (condition)								\
432 		break;								\
433 	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
434 } while (0)
435 
436 #define __wait_event_interruptible(wq_head, condition)				\
437 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
438 		      schedule())
439 
440 /**
441  * wait_event_interruptible - sleep until a condition gets true
442  * @wq_head: the waitqueue to wait on
443  * @condition: a C expression for the event to wait for
444  *
445  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
446  * @condition evaluates to true or a signal is received.
447  * The @condition is checked each time the waitqueue @wq_head is woken up.
448  *
449  * wake_up() has to be called after changing any variable that could
450  * change the result of the wait condition.
451  *
452  * The function will return -ERESTARTSYS if it was interrupted by a
453  * signal and 0 if @condition evaluated to true.
454  */
455 #define wait_event_interruptible(wq_head, condition)				\
456 ({										\
457 	int __ret = 0;								\
458 	might_sleep();								\
459 	if (!(condition))							\
460 		__ret = __wait_event_interruptible(wq_head, condition);		\
461 	__ret;									\
462 })
463 
464 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
465 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
466 		      TASK_INTERRUPTIBLE, 0, timeout,				\
467 		      __ret = schedule_timeout(__ret))
468 
469 /**
470  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
471  * @wq_head: the waitqueue to wait on
472  * @condition: a C expression for the event to wait for
473  * @timeout: timeout, in jiffies
474  *
475  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
476  * @condition evaluates to true or a signal is received.
477  * The @condition is checked each time the waitqueue @wq_head is woken up.
478  *
479  * wake_up() has to be called after changing any variable that could
480  * change the result of the wait condition.
481  *
482  * Returns:
483  * 0 if the @condition evaluated to %false after the @timeout elapsed,
484  * 1 if the @condition evaluated to %true after the @timeout elapsed,
485  * the remaining jiffies (at least 1) if the @condition evaluated
486  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
487  * interrupted by a signal.
488  */
489 #define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
490 ({										\
491 	long __ret = timeout;							\
492 	might_sleep();								\
493 	if (!___wait_cond_timeout(condition))					\
494 		__ret = __wait_event_interruptible_timeout(wq_head,		\
495 						condition, timeout);		\
496 	__ret;									\
497 })
498 
499 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
500 ({										\
501 	int __ret = 0;								\
502 	struct hrtimer_sleeper __t;						\
503 										\
504 	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
505 				      HRTIMER_MODE_REL);			\
506 	if ((timeout) != KTIME_MAX)						\
507 		hrtimer_start_range_ns(&__t.timer, timeout,			\
508 				       current->timer_slack_ns,			\
509 				       HRTIMER_MODE_REL);			\
510 										\
511 	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
512 		if (!__t.task) {						\
513 			__ret = -ETIME;						\
514 			break;							\
515 		}								\
516 		schedule());							\
517 										\
518 	hrtimer_cancel(&__t.timer);						\
519 	destroy_hrtimer_on_stack(&__t.timer);					\
520 	__ret;									\
521 })
522 
523 /**
524  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
525  * @wq_head: the waitqueue to wait on
526  * @condition: a C expression for the event to wait for
527  * @timeout: timeout, as a ktime_t
528  *
529  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
530  * @condition evaluates to true or a signal is received.
531  * The @condition is checked each time the waitqueue @wq_head is woken up.
532  *
533  * wake_up() has to be called after changing any variable that could
534  * change the result of the wait condition.
535  *
536  * The function returns 0 if @condition became true, or -ETIME if the timeout
537  * elapsed.
538  */
539 #define wait_event_hrtimeout(wq_head, condition, timeout)			\
540 ({										\
541 	int __ret = 0;								\
542 	might_sleep();								\
543 	if (!(condition))							\
544 		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
545 					       TASK_UNINTERRUPTIBLE);		\
546 	__ret;									\
547 })
548 
549 /**
550  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
551  * @wq: the waitqueue to wait on
552  * @condition: a C expression for the event to wait for
553  * @timeout: timeout, as a ktime_t
554  *
555  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
556  * @condition evaluates to true or a signal is received.
557  * The @condition is checked each time the waitqueue @wq is woken up.
558  *
559  * wake_up() has to be called after changing any variable that could
560  * change the result of the wait condition.
561  *
562  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
563  * interrupted by a signal, or -ETIME if the timeout elapsed.
564  */
565 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
566 ({										\
567 	long __ret = 0;								\
568 	might_sleep();								\
569 	if (!(condition))							\
570 		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
571 					       TASK_INTERRUPTIBLE);		\
572 	__ret;									\
573 })
574 
575 #define __wait_event_interruptible_exclusive(wq, condition)			\
576 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
577 		      schedule())
578 
579 #define wait_event_interruptible_exclusive(wq, condition)			\
580 ({										\
581 	int __ret = 0;								\
582 	might_sleep();								\
583 	if (!(condition))							\
584 		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
585 	__ret;									\
586 })
587 
588 #define __wait_event_killable_exclusive(wq, condition)				\
589 	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
590 		      schedule())
591 
592 #define wait_event_killable_exclusive(wq, condition)				\
593 ({										\
594 	int __ret = 0;								\
595 	might_sleep();								\
596 	if (!(condition))							\
597 		__ret = __wait_event_killable_exclusive(wq, condition);		\
598 	__ret;									\
599 })
600 
601 
602 #define __wait_event_freezable_exclusive(wq, condition)				\
603 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
604 			freezable_schedule())
605 
606 #define wait_event_freezable_exclusive(wq, condition)				\
607 ({										\
608 	int __ret = 0;								\
609 	might_sleep();								\
610 	if (!(condition))							\
611 		__ret = __wait_event_freezable_exclusive(wq, condition);	\
612 	__ret;									\
613 })
614 
615 /**
616  * wait_event_idle - wait for a condition without contributing to system load
617  * @wq_head: the waitqueue to wait on
618  * @condition: a C expression for the event to wait for
619  *
620  * The process is put to sleep (TASK_IDLE) until the
621  * @condition evaluates to true.
622  * The @condition is checked each time the waitqueue @wq_head is woken up.
623  *
624  * wake_up() has to be called after changing any variable that could
625  * change the result of the wait condition.
626  *
627  */
628 #define wait_event_idle(wq_head, condition)					\
629 do {										\
630 	might_sleep();								\
631 	if (!(condition))							\
632 		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
633 } while (0)
634 
635 /**
636  * wait_event_idle_exclusive - wait for a condition with contributing to system load
637  * @wq_head: the waitqueue to wait on
638  * @condition: a C expression for the event to wait for
639  *
640  * The process is put to sleep (TASK_IDLE) until the
641  * @condition evaluates to true.
642  * The @condition is checked each time the waitqueue @wq_head is woken up.
643  *
644  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
645  * set thus if other processes wait on the same list, when this
646  * process is woken further processes are not considered.
647  *
648  * wake_up() has to be called after changing any variable that could
649  * change the result of the wait condition.
650  *
651  */
652 #define wait_event_idle_exclusive(wq_head, condition)				\
653 do {										\
654 	might_sleep();								\
655 	if (!(condition))							\
656 		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
657 } while (0)
658 
659 #define __wait_event_idle_timeout(wq_head, condition, timeout)			\
660 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
661 		      TASK_IDLE, 0, timeout,					\
662 		      __ret = schedule_timeout(__ret))
663 
664 /**
665  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
666  * @wq_head: the waitqueue to wait on
667  * @condition: a C expression for the event to wait for
668  * @timeout: timeout, in jiffies
669  *
670  * The process is put to sleep (TASK_IDLE) until the
671  * @condition evaluates to true. The @condition is checked each time
672  * the waitqueue @wq_head is woken up.
673  *
674  * wake_up() has to be called after changing any variable that could
675  * change the result of the wait condition.
676  *
677  * Returns:
678  * 0 if the @condition evaluated to %false after the @timeout elapsed,
679  * 1 if the @condition evaluated to %true after the @timeout elapsed,
680  * or the remaining jiffies (at least 1) if the @condition evaluated
681  * to %true before the @timeout elapsed.
682  */
683 #define wait_event_idle_timeout(wq_head, condition, timeout)			\
684 ({										\
685 	long __ret = timeout;							\
686 	might_sleep();								\
687 	if (!___wait_cond_timeout(condition))					\
688 		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
689 	__ret;									\
690 })
691 
692 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
693 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
694 		      TASK_IDLE, 1, timeout,					\
695 		      __ret = schedule_timeout(__ret))
696 
697 /**
698  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
699  * @wq_head: the waitqueue to wait on
700  * @condition: a C expression for the event to wait for
701  * @timeout: timeout, in jiffies
702  *
703  * The process is put to sleep (TASK_IDLE) until the
704  * @condition evaluates to true. The @condition is checked each time
705  * the waitqueue @wq_head is woken up.
706  *
707  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
708  * set thus if other processes wait on the same list, when this
709  * process is woken further processes are not considered.
710  *
711  * wake_up() has to be called after changing any variable that could
712  * change the result of the wait condition.
713  *
714  * Returns:
715  * 0 if the @condition evaluated to %false after the @timeout elapsed,
716  * 1 if the @condition evaluated to %true after the @timeout elapsed,
717  * or the remaining jiffies (at least 1) if the @condition evaluated
718  * to %true before the @timeout elapsed.
719  */
720 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
721 ({										\
722 	long __ret = timeout;							\
723 	might_sleep();								\
724 	if (!___wait_cond_timeout(condition))					\
725 		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
726 	__ret;									\
727 })
728 
729 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
730 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
731 
732 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
733 ({										\
734 	int __ret;								\
735 	DEFINE_WAIT(__wait);							\
736 	if (exclusive)								\
737 		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
738 	do {									\
739 		__ret = fn(&(wq), &__wait);					\
740 		if (__ret)							\
741 			break;							\
742 	} while (!(condition));							\
743 	__remove_wait_queue(&(wq), &__wait);					\
744 	__set_current_state(TASK_RUNNING);					\
745 	__ret;									\
746 })
747 
748 
749 /**
750  * wait_event_interruptible_locked - sleep until a condition gets true
751  * @wq: the waitqueue to wait on
752  * @condition: a C expression for the event to wait for
753  *
754  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
755  * @condition evaluates to true or a signal is received.
756  * The @condition is checked each time the waitqueue @wq is woken up.
757  *
758  * It must be called with wq.lock being held.  This spinlock is
759  * unlocked while sleeping but @condition testing is done while lock
760  * is held and when this macro exits the lock is held.
761  *
762  * The lock is locked/unlocked using spin_lock()/spin_unlock()
763  * functions which must match the way they are locked/unlocked outside
764  * of this macro.
765  *
766  * wake_up_locked() has to be called after changing any variable that could
767  * change the result of the wait condition.
768  *
769  * The function will return -ERESTARTSYS if it was interrupted by a
770  * signal and 0 if @condition evaluated to true.
771  */
772 #define wait_event_interruptible_locked(wq, condition)				\
773 	((condition)								\
774 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
775 
776 /**
777  * wait_event_interruptible_locked_irq - sleep until a condition gets true
778  * @wq: the waitqueue to wait on
779  * @condition: a C expression for the event to wait for
780  *
781  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
782  * @condition evaluates to true or a signal is received.
783  * The @condition is checked each time the waitqueue @wq is woken up.
784  *
785  * It must be called with wq.lock being held.  This spinlock is
786  * unlocked while sleeping but @condition testing is done while lock
787  * is held and when this macro exits the lock is held.
788  *
789  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
790  * functions which must match the way they are locked/unlocked outside
791  * of this macro.
792  *
793  * wake_up_locked() has to be called after changing any variable that could
794  * change the result of the wait condition.
795  *
796  * The function will return -ERESTARTSYS if it was interrupted by a
797  * signal and 0 if @condition evaluated to true.
798  */
799 #define wait_event_interruptible_locked_irq(wq, condition)			\
800 	((condition)								\
801 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
802 
803 /**
804  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
805  * @wq: the waitqueue to wait on
806  * @condition: a C expression for the event to wait for
807  *
808  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
809  * @condition evaluates to true or a signal is received.
810  * The @condition is checked each time the waitqueue @wq is woken up.
811  *
812  * It must be called with wq.lock being held.  This spinlock is
813  * unlocked while sleeping but @condition testing is done while lock
814  * is held and when this macro exits the lock is held.
815  *
816  * The lock is locked/unlocked using spin_lock()/spin_unlock()
817  * functions which must match the way they are locked/unlocked outside
818  * of this macro.
819  *
820  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
821  * set thus when other process waits process on the list if this
822  * process is awaken further processes are not considered.
823  *
824  * wake_up_locked() has to be called after changing any variable that could
825  * change the result of the wait condition.
826  *
827  * The function will return -ERESTARTSYS if it was interrupted by a
828  * signal and 0 if @condition evaluated to true.
829  */
830 #define wait_event_interruptible_exclusive_locked(wq, condition)		\
831 	((condition)								\
832 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
833 
834 /**
835  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
836  * @wq: the waitqueue to wait on
837  * @condition: a C expression for the event to wait for
838  *
839  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
840  * @condition evaluates to true or a signal is received.
841  * The @condition is checked each time the waitqueue @wq is woken up.
842  *
843  * It must be called with wq.lock being held.  This spinlock is
844  * unlocked while sleeping but @condition testing is done while lock
845  * is held and when this macro exits the lock is held.
846  *
847  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
848  * functions which must match the way they are locked/unlocked outside
849  * of this macro.
850  *
851  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
852  * set thus when other process waits process on the list if this
853  * process is awaken further processes are not considered.
854  *
855  * wake_up_locked() has to be called after changing any variable that could
856  * change the result of the wait condition.
857  *
858  * The function will return -ERESTARTSYS if it was interrupted by a
859  * signal and 0 if @condition evaluated to true.
860  */
861 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
862 	((condition)								\
863 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
864 
865 
866 #define __wait_event_killable(wq, condition)					\
867 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
868 
869 /**
870  * wait_event_killable - sleep until a condition gets true
871  * @wq_head: the waitqueue to wait on
872  * @condition: a C expression for the event to wait for
873  *
874  * The process is put to sleep (TASK_KILLABLE) until the
875  * @condition evaluates to true or a signal is received.
876  * The @condition is checked each time the waitqueue @wq_head is woken up.
877  *
878  * wake_up() has to be called after changing any variable that could
879  * change the result of the wait condition.
880  *
881  * The function will return -ERESTARTSYS if it was interrupted by a
882  * signal and 0 if @condition evaluated to true.
883  */
884 #define wait_event_killable(wq_head, condition)					\
885 ({										\
886 	int __ret = 0;								\
887 	might_sleep();								\
888 	if (!(condition))							\
889 		__ret = __wait_event_killable(wq_head, condition);		\
890 	__ret;									\
891 })
892 
893 #define __wait_event_killable_timeout(wq_head, condition, timeout)		\
894 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
895 		      TASK_KILLABLE, 0, timeout,				\
896 		      __ret = schedule_timeout(__ret))
897 
898 /**
899  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
900  * @wq_head: the waitqueue to wait on
901  * @condition: a C expression for the event to wait for
902  * @timeout: timeout, in jiffies
903  *
904  * The process is put to sleep (TASK_KILLABLE) until the
905  * @condition evaluates to true or a kill signal is received.
906  * The @condition is checked each time the waitqueue @wq_head is woken up.
907  *
908  * wake_up() has to be called after changing any variable that could
909  * change the result of the wait condition.
910  *
911  * Returns:
912  * 0 if the @condition evaluated to %false after the @timeout elapsed,
913  * 1 if the @condition evaluated to %true after the @timeout elapsed,
914  * the remaining jiffies (at least 1) if the @condition evaluated
915  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
916  * interrupted by a kill signal.
917  *
918  * Only kill signals interrupt this process.
919  */
920 #define wait_event_killable_timeout(wq_head, condition, timeout)		\
921 ({										\
922 	long __ret = timeout;							\
923 	might_sleep();								\
924 	if (!___wait_cond_timeout(condition))					\
925 		__ret = __wait_event_killable_timeout(wq_head,			\
926 						condition, timeout);		\
927 	__ret;									\
928 })
929 
930 
931 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
932 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
933 			    spin_unlock_irq(&lock);				\
934 			    cmd;						\
935 			    schedule();						\
936 			    spin_lock_irq(&lock))
937 
938 /**
939  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
940  *			     condition is checked under the lock. This
941  *			     is expected to be called with the lock
942  *			     taken.
943  * @wq_head: the waitqueue to wait on
944  * @condition: a C expression for the event to wait for
945  * @lock: a locked spinlock_t, which will be released before cmd
946  *	  and schedule() and reacquired afterwards.
947  * @cmd: a command which is invoked outside the critical section before
948  *	 sleep
949  *
950  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
951  * @condition evaluates to true. The @condition is checked each time
952  * the waitqueue @wq_head is woken up.
953  *
954  * wake_up() has to be called after changing any variable that could
955  * change the result of the wait condition.
956  *
957  * This is supposed to be called while holding the lock. The lock is
958  * dropped before invoking the cmd and going to sleep and is reacquired
959  * afterwards.
960  */
961 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
962 do {										\
963 	if (condition)								\
964 		break;								\
965 	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
966 } while (0)
967 
968 /**
969  * wait_event_lock_irq - sleep until a condition gets true. The
970  *			 condition is checked under the lock. This
971  *			 is expected to be called with the lock
972  *			 taken.
973  * @wq_head: the waitqueue to wait on
974  * @condition: a C expression for the event to wait for
975  * @lock: a locked spinlock_t, which will be released before schedule()
976  *	  and reacquired afterwards.
977  *
978  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
979  * @condition evaluates to true. The @condition is checked each time
980  * the waitqueue @wq_head is woken up.
981  *
982  * wake_up() has to be called after changing any variable that could
983  * change the result of the wait condition.
984  *
985  * This is supposed to be called while holding the lock. The lock is
986  * dropped before going to sleep and is reacquired afterwards.
987  */
988 #define wait_event_lock_irq(wq_head, condition, lock)				\
989 do {										\
990 	if (condition)								\
991 		break;								\
992 	__wait_event_lock_irq(wq_head, condition, lock, );			\
993 } while (0)
994 
995 
996 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
997 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
998 		      spin_unlock_irq(&lock);					\
999 		      cmd;							\
1000 		      schedule();						\
1001 		      spin_lock_irq(&lock))
1002 
1003 /**
1004  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1005  *		The condition is checked under the lock. This is expected to
1006  *		be called with the lock taken.
1007  * @wq_head: the waitqueue to wait on
1008  * @condition: a C expression for the event to wait for
1009  * @lock: a locked spinlock_t, which will be released before cmd and
1010  *	  schedule() and reacquired afterwards.
1011  * @cmd: a command which is invoked outside the critical section before
1012  *	 sleep
1013  *
1014  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1015  * @condition evaluates to true or a signal is received. The @condition is
1016  * checked each time the waitqueue @wq_head is woken up.
1017  *
1018  * wake_up() has to be called after changing any variable that could
1019  * change the result of the wait condition.
1020  *
1021  * This is supposed to be called while holding the lock. The lock is
1022  * dropped before invoking the cmd and going to sleep and is reacquired
1023  * afterwards.
1024  *
1025  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1026  * and 0 if @condition evaluated to true.
1027  */
1028 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1029 ({										\
1030 	int __ret = 0;								\
1031 	if (!(condition))							\
1032 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1033 						condition, lock, cmd);		\
1034 	__ret;									\
1035 })
1036 
1037 /**
1038  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1039  *		The condition is checked under the lock. This is expected
1040  *		to be called with the lock taken.
1041  * @wq_head: the waitqueue to wait on
1042  * @condition: a C expression for the event to wait for
1043  * @lock: a locked spinlock_t, which will be released before schedule()
1044  *	  and reacquired afterwards.
1045  *
1046  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1047  * @condition evaluates to true or signal is received. The @condition is
1048  * checked each time the waitqueue @wq_head is woken up.
1049  *
1050  * wake_up() has to be called after changing any variable that could
1051  * change the result of the wait condition.
1052  *
1053  * This is supposed to be called while holding the lock. The lock is
1054  * dropped before going to sleep and is reacquired afterwards.
1055  *
1056  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1057  * and 0 if @condition evaluated to true.
1058  */
1059 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1060 ({										\
1061 	int __ret = 0;								\
1062 	if (!(condition))							\
1063 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1064 						condition, lock,);		\
1065 	__ret;									\
1066 })
1067 
1068 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1069 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1070 		      state, 0, timeout,					\
1071 		      spin_unlock_irq(&lock);					\
1072 		      __ret = schedule_timeout(__ret);				\
1073 		      spin_lock_irq(&lock));
1074 
1075 /**
1076  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1077  *		true or a timeout elapses. The condition is checked under
1078  *		the lock. This is expected to be called with the lock taken.
1079  * @wq_head: the waitqueue to wait on
1080  * @condition: a C expression for the event to wait for
1081  * @lock: a locked spinlock_t, which will be released before schedule()
1082  *	  and reacquired afterwards.
1083  * @timeout: timeout, in jiffies
1084  *
1085  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1086  * @condition evaluates to true or signal is received. The @condition is
1087  * checked each time the waitqueue @wq_head is woken up.
1088  *
1089  * wake_up() has to be called after changing any variable that could
1090  * change the result of the wait condition.
1091  *
1092  * This is supposed to be called while holding the lock. The lock is
1093  * dropped before going to sleep and is reacquired afterwards.
1094  *
1095  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1096  * was interrupted by a signal, and the remaining jiffies otherwise
1097  * if the condition evaluated to true before the timeout elapsed.
1098  */
1099 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1100 						  timeout)			\
1101 ({										\
1102 	long __ret = timeout;							\
1103 	if (!___wait_cond_timeout(condition))					\
1104 		__ret = __wait_event_lock_irq_timeout(				\
1105 					wq_head, condition, lock, timeout,	\
1106 					TASK_INTERRUPTIBLE);			\
1107 	__ret;									\
1108 })
1109 
1110 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1111 ({										\
1112 	long __ret = timeout;							\
1113 	if (!___wait_cond_timeout(condition))					\
1114 		__ret = __wait_event_lock_irq_timeout(				\
1115 					wq_head, condition, lock, timeout,	\
1116 					TASK_UNINTERRUPTIBLE);			\
1117 	__ret;									\
1118 })
1119 
1120 /*
1121  * Waitqueues which are removed from the waitqueue_head at wakeup time
1122  */
1123 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1124 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1125 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1126 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1127 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1128 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1129 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1130 
1131 #define DEFINE_WAIT_FUNC(name, function)					\
1132 	struct wait_queue_entry name = {					\
1133 		.private	= current,					\
1134 		.func		= function,					\
1135 		.entry		= LIST_HEAD_INIT((name).entry),			\
1136 	}
1137 
1138 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1139 
1140 #define init_wait(wait)								\
1141 	do {									\
1142 		(wait)->private = current;					\
1143 		(wait)->func = autoremove_wake_function;			\
1144 		INIT_LIST_HEAD(&(wait)->entry);					\
1145 		(wait)->flags = 0;						\
1146 	} while (0)
1147 
1148 #endif /* _LINUX_WAIT_H */
1149