xref: /openbmc/linux/include/linux/wait.h (revision f0702555)
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11 
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15 
16 /* __wait_queue::flags */
17 #define WQ_FLAG_EXCLUSIVE	0x01
18 #define WQ_FLAG_WOKEN		0x02
19 
20 struct __wait_queue {
21 	unsigned int		flags;
22 	void			*private;
23 	wait_queue_func_t	func;
24 	struct list_head	task_list;
25 };
26 
27 struct wait_bit_key {
28 	void			*flags;
29 	int			bit_nr;
30 #define WAIT_ATOMIC_T_BIT_NR	-1
31 	unsigned long		timeout;
32 };
33 
34 struct wait_bit_queue {
35 	struct wait_bit_key	key;
36 	wait_queue_t		wait;
37 };
38 
39 struct __wait_queue_head {
40 	spinlock_t		lock;
41 	struct list_head	task_list;
42 };
43 typedef struct __wait_queue_head wait_queue_head_t;
44 
45 struct task_struct;
46 
47 /*
48  * Macros for declaration and initialisaton of the datatypes
49  */
50 
51 #define __WAITQUEUE_INITIALIZER(name, tsk) {				\
52 	.private	= tsk,						\
53 	.func		= default_wake_function,			\
54 	.task_list	= { NULL, NULL } }
55 
56 #define DECLARE_WAITQUEUE(name, tsk)					\
57 	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58 
59 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
60 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),		\
61 	.task_list	= { &(name).task_list, &(name).task_list } }
62 
63 #define DECLARE_WAIT_QUEUE_HEAD(name) \
64 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65 
66 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
67 	{ .flags = word, .bit_nr = bit, }
68 
69 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)				\
70 	{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71 
72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73 
74 #define init_waitqueue_head(q)				\
75 	do {						\
76 		static struct lock_class_key __key;	\
77 							\
78 		__init_waitqueue_head((q), #q, &__key);	\
79 	} while (0)
80 
81 #ifdef CONFIG_LOCKDEP
82 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83 	({ init_waitqueue_head(&name); name; })
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86 #else
87 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88 #endif
89 
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91 {
92 	q->flags	= 0;
93 	q->private	= p;
94 	q->func		= default_wake_function;
95 }
96 
97 static inline void
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99 {
100 	q->flags	= 0;
101 	q->private	= NULL;
102 	q->func		= func;
103 }
104 
105 /**
106  * waitqueue_active -- locklessly test for waiters on the queue
107  * @q: the waitqueue to test for waiters
108  *
109  * returns true if the wait list is not empty
110  *
111  * NOTE: this function is lockless and requires care, incorrect usage _will_
112  * lead to sporadic and non-obvious failure.
113  *
114  * Use either while holding wait_queue_head_t::lock or when used for wakeups
115  * with an extra smp_mb() like:
116  *
117  *      CPU0 - waker                    CPU1 - waiter
118  *
119  *                                      for (;;) {
120  *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
121  *      smp_mb();                         // smp_mb() from set_current_state()
122  *      if (waitqueue_active(wq))         if (@cond)
123  *        wake_up(wq);                      break;
124  *                                        schedule();
125  *                                      }
126  *                                      finish_wait(&wq, &wait);
127  *
128  * Because without the explicit smp_mb() it's possible for the
129  * waitqueue_active() load to get hoisted over the @cond store such that we'll
130  * observe an empty wait list while the waiter might not observe @cond.
131  *
132  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
133  * which (when the lock is uncontended) are of roughly equal cost.
134  */
135 static inline int waitqueue_active(wait_queue_head_t *q)
136 {
137 	return !list_empty(&q->task_list);
138 }
139 
140 /**
141  * wq_has_sleeper - check if there are any waiting processes
142  * @wq: wait queue head
143  *
144  * Returns true if wq has waiting processes
145  *
146  * Please refer to the comment for waitqueue_active.
147  */
148 static inline bool wq_has_sleeper(wait_queue_head_t *wq)
149 {
150 	/*
151 	 * We need to be sure we are in sync with the
152 	 * add_wait_queue modifications to the wait queue.
153 	 *
154 	 * This memory barrier should be paired with one on the
155 	 * waiting side.
156 	 */
157 	smp_mb();
158 	return waitqueue_active(wq);
159 }
160 
161 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
162 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
163 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
164 
165 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
166 {
167 	list_add(&new->task_list, &head->task_list);
168 }
169 
170 /*
171  * Used for wake-one threads:
172  */
173 static inline void
174 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
175 {
176 	wait->flags |= WQ_FLAG_EXCLUSIVE;
177 	__add_wait_queue(q, wait);
178 }
179 
180 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
181 					 wait_queue_t *new)
182 {
183 	list_add_tail(&new->task_list, &head->task_list);
184 }
185 
186 static inline void
187 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
188 {
189 	wait->flags |= WQ_FLAG_EXCLUSIVE;
190 	__add_wait_queue_tail(q, wait);
191 }
192 
193 static inline void
194 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
195 {
196 	list_del(&old->task_list);
197 }
198 
199 typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
200 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
201 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
202 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
203 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
204 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
205 void __wake_up_bit(wait_queue_head_t *, void *, int);
206 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
207 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
208 void wake_up_bit(void *, int);
209 void wake_up_atomic_t(atomic_t *);
210 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
211 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
212 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
213 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
214 wait_queue_head_t *bit_waitqueue(void *, int);
215 
216 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
217 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
218 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
219 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
220 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
221 
222 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
223 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
224 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
225 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
226 
227 /*
228  * Wakeup macros to be used to report events to the targets.
229  */
230 #define wake_up_poll(x, m)						\
231 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
232 #define wake_up_locked_poll(x, m)					\
233 	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
234 #define wake_up_interruptible_poll(x, m)				\
235 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
236 #define wake_up_interruptible_sync_poll(x, m)				\
237 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
238 
239 #define ___wait_cond_timeout(condition)					\
240 ({									\
241 	bool __cond = (condition);					\
242 	if (__cond && !__ret)						\
243 		__ret = 1;						\
244 	__cond || !__ret;						\
245 })
246 
247 #define ___wait_is_interruptible(state)					\
248 	(!__builtin_constant_p(state) ||				\
249 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)	\
250 
251 /*
252  * The below macro ___wait_event() has an explicit shadow of the __ret
253  * variable when used from the wait_event_*() macros.
254  *
255  * This is so that both can use the ___wait_cond_timeout() construct
256  * to wrap the condition.
257  *
258  * The type inconsistency of the wait_event_*() __ret variable is also
259  * on purpose; we use long where we can return timeout values and int
260  * otherwise.
261  */
262 
263 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)	\
264 ({									\
265 	__label__ __out;						\
266 	wait_queue_t __wait;						\
267 	long __ret = ret;	/* explicit shadow */			\
268 									\
269 	INIT_LIST_HEAD(&__wait.task_list);				\
270 	if (exclusive)							\
271 		__wait.flags = WQ_FLAG_EXCLUSIVE;			\
272 	else								\
273 		__wait.flags = 0;					\
274 									\
275 	for (;;) {							\
276 		long __int = prepare_to_wait_event(&wq, &__wait, state);\
277 									\
278 		if (condition)						\
279 			break;						\
280 									\
281 		if (___wait_is_interruptible(state) && __int) {		\
282 			__ret = __int;					\
283 			if (exclusive) {				\
284 				abort_exclusive_wait(&wq, &__wait,	\
285 						     state, NULL);	\
286 				goto __out;				\
287 			}						\
288 			break;						\
289 		}							\
290 									\
291 		cmd;							\
292 	}								\
293 	finish_wait(&wq, &__wait);					\
294 __out:	__ret;								\
295 })
296 
297 #define __wait_event(wq, condition)					\
298 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
299 			    schedule())
300 
301 /**
302  * wait_event - sleep until a condition gets true
303  * @wq: the waitqueue to wait on
304  * @condition: a C expression for the event to wait for
305  *
306  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
307  * @condition evaluates to true. The @condition is checked each time
308  * the waitqueue @wq is woken up.
309  *
310  * wake_up() has to be called after changing any variable that could
311  * change the result of the wait condition.
312  */
313 #define wait_event(wq, condition)					\
314 do {									\
315 	might_sleep();							\
316 	if (condition)							\
317 		break;							\
318 	__wait_event(wq, condition);					\
319 } while (0)
320 
321 #define __io_wait_event(wq, condition)					\
322 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
323 			    io_schedule())
324 
325 /*
326  * io_wait_event() -- like wait_event() but with io_schedule()
327  */
328 #define io_wait_event(wq, condition)					\
329 do {									\
330 	might_sleep();							\
331 	if (condition)							\
332 		break;							\
333 	__io_wait_event(wq, condition);					\
334 } while (0)
335 
336 #define __wait_event_freezable(wq, condition)				\
337 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
338 			    schedule(); try_to_freeze())
339 
340 /**
341  * wait_event_freezable - sleep (or freeze) until a condition gets true
342  * @wq: the waitqueue to wait on
343  * @condition: a C expression for the event to wait for
344  *
345  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
346  * to system load) until the @condition evaluates to true. The
347  * @condition is checked each time the waitqueue @wq is woken up.
348  *
349  * wake_up() has to be called after changing any variable that could
350  * change the result of the wait condition.
351  */
352 #define wait_event_freezable(wq, condition)				\
353 ({									\
354 	int __ret = 0;							\
355 	might_sleep();							\
356 	if (!(condition))						\
357 		__ret = __wait_event_freezable(wq, condition);		\
358 	__ret;								\
359 })
360 
361 #define __wait_event_timeout(wq, condition, timeout)			\
362 	___wait_event(wq, ___wait_cond_timeout(condition),		\
363 		      TASK_UNINTERRUPTIBLE, 0, timeout,			\
364 		      __ret = schedule_timeout(__ret))
365 
366 /**
367  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
368  * @wq: the waitqueue to wait on
369  * @condition: a C expression for the event to wait for
370  * @timeout: timeout, in jiffies
371  *
372  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
373  * @condition evaluates to true. The @condition is checked each time
374  * the waitqueue @wq is woken up.
375  *
376  * wake_up() has to be called after changing any variable that could
377  * change the result of the wait condition.
378  *
379  * Returns:
380  * 0 if the @condition evaluated to %false after the @timeout elapsed,
381  * 1 if the @condition evaluated to %true after the @timeout elapsed,
382  * or the remaining jiffies (at least 1) if the @condition evaluated
383  * to %true before the @timeout elapsed.
384  */
385 #define wait_event_timeout(wq, condition, timeout)			\
386 ({									\
387 	long __ret = timeout;						\
388 	might_sleep();							\
389 	if (!___wait_cond_timeout(condition))				\
390 		__ret = __wait_event_timeout(wq, condition, timeout);	\
391 	__ret;								\
392 })
393 
394 #define __wait_event_freezable_timeout(wq, condition, timeout)		\
395 	___wait_event(wq, ___wait_cond_timeout(condition),		\
396 		      TASK_INTERRUPTIBLE, 0, timeout,			\
397 		      __ret = schedule_timeout(__ret); try_to_freeze())
398 
399 /*
400  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
401  * increasing load and is freezable.
402  */
403 #define wait_event_freezable_timeout(wq, condition, timeout)		\
404 ({									\
405 	long __ret = timeout;						\
406 	might_sleep();							\
407 	if (!___wait_cond_timeout(condition))				\
408 		__ret = __wait_event_freezable_timeout(wq, condition, timeout);	\
409 	__ret;								\
410 })
411 
412 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)		\
413 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
414 			    cmd1; schedule(); cmd2)
415 /*
416  * Just like wait_event_cmd(), except it sets exclusive flag
417  */
418 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)		\
419 do {									\
420 	if (condition)							\
421 		break;							\
422 	__wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);		\
423 } while (0)
424 
425 #define __wait_event_cmd(wq, condition, cmd1, cmd2)			\
426 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
427 			    cmd1; schedule(); cmd2)
428 
429 /**
430  * wait_event_cmd - sleep until a condition gets true
431  * @wq: the waitqueue to wait on
432  * @condition: a C expression for the event to wait for
433  * @cmd1: the command will be executed before sleep
434  * @cmd2: the command will be executed after sleep
435  *
436  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
437  * @condition evaluates to true. The @condition is checked each time
438  * the waitqueue @wq is woken up.
439  *
440  * wake_up() has to be called after changing any variable that could
441  * change the result of the wait condition.
442  */
443 #define wait_event_cmd(wq, condition, cmd1, cmd2)			\
444 do {									\
445 	if (condition)							\
446 		break;							\
447 	__wait_event_cmd(wq, condition, cmd1, cmd2);			\
448 } while (0)
449 
450 #define __wait_event_interruptible(wq, condition)			\
451 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
452 		      schedule())
453 
454 /**
455  * wait_event_interruptible - sleep until a condition gets true
456  * @wq: the waitqueue to wait on
457  * @condition: a C expression for the event to wait for
458  *
459  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
460  * @condition evaluates to true or a signal is received.
461  * The @condition is checked each time the waitqueue @wq is woken up.
462  *
463  * wake_up() has to be called after changing any variable that could
464  * change the result of the wait condition.
465  *
466  * The function will return -ERESTARTSYS if it was interrupted by a
467  * signal and 0 if @condition evaluated to true.
468  */
469 #define wait_event_interruptible(wq, condition)				\
470 ({									\
471 	int __ret = 0;							\
472 	might_sleep();							\
473 	if (!(condition))						\
474 		__ret = __wait_event_interruptible(wq, condition);	\
475 	__ret;								\
476 })
477 
478 #define __wait_event_interruptible_timeout(wq, condition, timeout)	\
479 	___wait_event(wq, ___wait_cond_timeout(condition),		\
480 		      TASK_INTERRUPTIBLE, 0, timeout,			\
481 		      __ret = schedule_timeout(__ret))
482 
483 /**
484  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
485  * @wq: the waitqueue to wait on
486  * @condition: a C expression for the event to wait for
487  * @timeout: timeout, in jiffies
488  *
489  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
490  * @condition evaluates to true or a signal is received.
491  * The @condition is checked each time the waitqueue @wq is woken up.
492  *
493  * wake_up() has to be called after changing any variable that could
494  * change the result of the wait condition.
495  *
496  * Returns:
497  * 0 if the @condition evaluated to %false after the @timeout elapsed,
498  * 1 if the @condition evaluated to %true after the @timeout elapsed,
499  * the remaining jiffies (at least 1) if the @condition evaluated
500  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
501  * interrupted by a signal.
502  */
503 #define wait_event_interruptible_timeout(wq, condition, timeout)	\
504 ({									\
505 	long __ret = timeout;						\
506 	might_sleep();							\
507 	if (!___wait_cond_timeout(condition))				\
508 		__ret = __wait_event_interruptible_timeout(wq,		\
509 						condition, timeout);	\
510 	__ret;								\
511 })
512 
513 #define __wait_event_hrtimeout(wq, condition, timeout, state)		\
514 ({									\
515 	int __ret = 0;							\
516 	struct hrtimer_sleeper __t;					\
517 									\
518 	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,		\
519 			      HRTIMER_MODE_REL);			\
520 	hrtimer_init_sleeper(&__t, current);				\
521 	if ((timeout).tv64 != KTIME_MAX)				\
522 		hrtimer_start_range_ns(&__t.timer, timeout,		\
523 				       current->timer_slack_ns,		\
524 				       HRTIMER_MODE_REL);		\
525 									\
526 	__ret = ___wait_event(wq, condition, state, 0, 0,		\
527 		if (!__t.task) {					\
528 			__ret = -ETIME;					\
529 			break;						\
530 		}							\
531 		schedule());						\
532 									\
533 	hrtimer_cancel(&__t.timer);					\
534 	destroy_hrtimer_on_stack(&__t.timer);				\
535 	__ret;								\
536 })
537 
538 /**
539  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
540  * @wq: the waitqueue to wait on
541  * @condition: a C expression for the event to wait for
542  * @timeout: timeout, as a ktime_t
543  *
544  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
545  * @condition evaluates to true or a signal is received.
546  * The @condition is checked each time the waitqueue @wq is woken up.
547  *
548  * wake_up() has to be called after changing any variable that could
549  * change the result of the wait condition.
550  *
551  * The function returns 0 if @condition became true, or -ETIME if the timeout
552  * elapsed.
553  */
554 #define wait_event_hrtimeout(wq, condition, timeout)			\
555 ({									\
556 	int __ret = 0;							\
557 	might_sleep();							\
558 	if (!(condition))						\
559 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
560 					       TASK_UNINTERRUPTIBLE);	\
561 	__ret;								\
562 })
563 
564 /**
565  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
566  * @wq: the waitqueue to wait on
567  * @condition: a C expression for the event to wait for
568  * @timeout: timeout, as a ktime_t
569  *
570  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
571  * @condition evaluates to true or a signal is received.
572  * The @condition is checked each time the waitqueue @wq is woken up.
573  *
574  * wake_up() has to be called after changing any variable that could
575  * change the result of the wait condition.
576  *
577  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
578  * interrupted by a signal, or -ETIME if the timeout elapsed.
579  */
580 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)	\
581 ({									\
582 	long __ret = 0;							\
583 	might_sleep();							\
584 	if (!(condition))						\
585 		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
586 					       TASK_INTERRUPTIBLE);	\
587 	__ret;								\
588 })
589 
590 #define __wait_event_interruptible_exclusive(wq, condition)		\
591 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
592 		      schedule())
593 
594 #define wait_event_interruptible_exclusive(wq, condition)		\
595 ({									\
596 	int __ret = 0;							\
597 	might_sleep();							\
598 	if (!(condition))						\
599 		__ret = __wait_event_interruptible_exclusive(wq, condition);\
600 	__ret;								\
601 })
602 
603 
604 #define __wait_event_freezable_exclusive(wq, condition)			\
605 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
606 			schedule(); try_to_freeze())
607 
608 #define wait_event_freezable_exclusive(wq, condition)			\
609 ({									\
610 	int __ret = 0;							\
611 	might_sleep();							\
612 	if (!(condition))						\
613 		__ret = __wait_event_freezable_exclusive(wq, condition);\
614 	__ret;								\
615 })
616 
617 
618 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
619 ({									\
620 	int __ret = 0;							\
621 	DEFINE_WAIT(__wait);						\
622 	if (exclusive)							\
623 		__wait.flags |= WQ_FLAG_EXCLUSIVE;			\
624 	do {								\
625 		if (likely(list_empty(&__wait.task_list)))		\
626 			__add_wait_queue_tail(&(wq), &__wait);		\
627 		set_current_state(TASK_INTERRUPTIBLE);			\
628 		if (signal_pending(current)) {				\
629 			__ret = -ERESTARTSYS;				\
630 			break;						\
631 		}							\
632 		if (irq)						\
633 			spin_unlock_irq(&(wq).lock);			\
634 		else							\
635 			spin_unlock(&(wq).lock);			\
636 		schedule();						\
637 		if (irq)						\
638 			spin_lock_irq(&(wq).lock);			\
639 		else							\
640 			spin_lock(&(wq).lock);				\
641 	} while (!(condition));						\
642 	__remove_wait_queue(&(wq), &__wait);				\
643 	__set_current_state(TASK_RUNNING);				\
644 	__ret;								\
645 })
646 
647 
648 /**
649  * wait_event_interruptible_locked - sleep until a condition gets true
650  * @wq: the waitqueue to wait on
651  * @condition: a C expression for the event to wait for
652  *
653  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
654  * @condition evaluates to true or a signal is received.
655  * The @condition is checked each time the waitqueue @wq is woken up.
656  *
657  * It must be called with wq.lock being held.  This spinlock is
658  * unlocked while sleeping but @condition testing is done while lock
659  * is held and when this macro exits the lock is held.
660  *
661  * The lock is locked/unlocked using spin_lock()/spin_unlock()
662  * functions which must match the way they are locked/unlocked outside
663  * of this macro.
664  *
665  * wake_up_locked() has to be called after changing any variable that could
666  * change the result of the wait condition.
667  *
668  * The function will return -ERESTARTSYS if it was interrupted by a
669  * signal and 0 if @condition evaluated to true.
670  */
671 #define wait_event_interruptible_locked(wq, condition)			\
672 	((condition)							\
673 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
674 
675 /**
676  * wait_event_interruptible_locked_irq - sleep until a condition gets true
677  * @wq: the waitqueue to wait on
678  * @condition: a C expression for the event to wait for
679  *
680  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
681  * @condition evaluates to true or a signal is received.
682  * The @condition is checked each time the waitqueue @wq is woken up.
683  *
684  * It must be called with wq.lock being held.  This spinlock is
685  * unlocked while sleeping but @condition testing is done while lock
686  * is held and when this macro exits the lock is held.
687  *
688  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
689  * functions which must match the way they are locked/unlocked outside
690  * of this macro.
691  *
692  * wake_up_locked() has to be called after changing any variable that could
693  * change the result of the wait condition.
694  *
695  * The function will return -ERESTARTSYS if it was interrupted by a
696  * signal and 0 if @condition evaluated to true.
697  */
698 #define wait_event_interruptible_locked_irq(wq, condition)		\
699 	((condition)							\
700 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
701 
702 /**
703  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
704  * @wq: the waitqueue to wait on
705  * @condition: a C expression for the event to wait for
706  *
707  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
708  * @condition evaluates to true or a signal is received.
709  * The @condition is checked each time the waitqueue @wq is woken up.
710  *
711  * It must be called with wq.lock being held.  This spinlock is
712  * unlocked while sleeping but @condition testing is done while lock
713  * is held and when this macro exits the lock is held.
714  *
715  * The lock is locked/unlocked using spin_lock()/spin_unlock()
716  * functions which must match the way they are locked/unlocked outside
717  * of this macro.
718  *
719  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
720  * set thus when other process waits process on the list if this
721  * process is awaken further processes are not considered.
722  *
723  * wake_up_locked() has to be called after changing any variable that could
724  * change the result of the wait condition.
725  *
726  * The function will return -ERESTARTSYS if it was interrupted by a
727  * signal and 0 if @condition evaluated to true.
728  */
729 #define wait_event_interruptible_exclusive_locked(wq, condition)	\
730 	((condition)							\
731 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
732 
733 /**
734  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
735  * @wq: the waitqueue to wait on
736  * @condition: a C expression for the event to wait for
737  *
738  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
739  * @condition evaluates to true or a signal is received.
740  * The @condition is checked each time the waitqueue @wq is woken up.
741  *
742  * It must be called with wq.lock being held.  This spinlock is
743  * unlocked while sleeping but @condition testing is done while lock
744  * is held and when this macro exits the lock is held.
745  *
746  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
747  * functions which must match the way they are locked/unlocked outside
748  * of this macro.
749  *
750  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
751  * set thus when other process waits process on the list if this
752  * process is awaken further processes are not considered.
753  *
754  * wake_up_locked() has to be called after changing any variable that could
755  * change the result of the wait condition.
756  *
757  * The function will return -ERESTARTSYS if it was interrupted by a
758  * signal and 0 if @condition evaluated to true.
759  */
760 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)	\
761 	((condition)							\
762 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
763 
764 
765 #define __wait_event_killable(wq, condition)				\
766 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
767 
768 /**
769  * wait_event_killable - sleep until a condition gets true
770  * @wq: the waitqueue to wait on
771  * @condition: a C expression for the event to wait for
772  *
773  * The process is put to sleep (TASK_KILLABLE) until the
774  * @condition evaluates to true or a signal is received.
775  * The @condition is checked each time the waitqueue @wq is woken up.
776  *
777  * wake_up() has to be called after changing any variable that could
778  * change the result of the wait condition.
779  *
780  * The function will return -ERESTARTSYS if it was interrupted by a
781  * signal and 0 if @condition evaluated to true.
782  */
783 #define wait_event_killable(wq, condition)				\
784 ({									\
785 	int __ret = 0;							\
786 	might_sleep();							\
787 	if (!(condition))						\
788 		__ret = __wait_event_killable(wq, condition);		\
789 	__ret;								\
790 })
791 
792 
793 #define __wait_event_lock_irq(wq, condition, lock, cmd)			\
794 	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
795 			    spin_unlock_irq(&lock);			\
796 			    cmd;					\
797 			    schedule();					\
798 			    spin_lock_irq(&lock))
799 
800 /**
801  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
802  *			     condition is checked under the lock. This
803  *			     is expected to be called with the lock
804  *			     taken.
805  * @wq: the waitqueue to wait on
806  * @condition: a C expression for the event to wait for
807  * @lock: a locked spinlock_t, which will be released before cmd
808  *	  and schedule() and reacquired afterwards.
809  * @cmd: a command which is invoked outside the critical section before
810  *	 sleep
811  *
812  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
813  * @condition evaluates to true. The @condition is checked each time
814  * the waitqueue @wq is woken up.
815  *
816  * wake_up() has to be called after changing any variable that could
817  * change the result of the wait condition.
818  *
819  * This is supposed to be called while holding the lock. The lock is
820  * dropped before invoking the cmd and going to sleep and is reacquired
821  * afterwards.
822  */
823 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)		\
824 do {									\
825 	if (condition)							\
826 		break;							\
827 	__wait_event_lock_irq(wq, condition, lock, cmd);		\
828 } while (0)
829 
830 /**
831  * wait_event_lock_irq - sleep until a condition gets true. The
832  *			 condition is checked under the lock. This
833  *			 is expected to be called with the lock
834  *			 taken.
835  * @wq: the waitqueue to wait on
836  * @condition: a C expression for the event to wait for
837  * @lock: a locked spinlock_t, which will be released before schedule()
838  *	  and reacquired afterwards.
839  *
840  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
841  * @condition evaluates to true. The @condition is checked each time
842  * the waitqueue @wq is woken up.
843  *
844  * wake_up() has to be called after changing any variable that could
845  * change the result of the wait condition.
846  *
847  * This is supposed to be called while holding the lock. The lock is
848  * dropped before going to sleep and is reacquired afterwards.
849  */
850 #define wait_event_lock_irq(wq, condition, lock)			\
851 do {									\
852 	if (condition)							\
853 		break;							\
854 	__wait_event_lock_irq(wq, condition, lock, );			\
855 } while (0)
856 
857 
858 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)	\
859 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
860 		      spin_unlock_irq(&lock);				\
861 		      cmd;						\
862 		      schedule();					\
863 		      spin_lock_irq(&lock))
864 
865 /**
866  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
867  *		The condition is checked under the lock. This is expected to
868  *		be called with the lock taken.
869  * @wq: the waitqueue to wait on
870  * @condition: a C expression for the event to wait for
871  * @lock: a locked spinlock_t, which will be released before cmd and
872  *	  schedule() and reacquired afterwards.
873  * @cmd: a command which is invoked outside the critical section before
874  *	 sleep
875  *
876  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
877  * @condition evaluates to true or a signal is received. The @condition is
878  * checked each time the waitqueue @wq is woken up.
879  *
880  * wake_up() has to be called after changing any variable that could
881  * change the result of the wait condition.
882  *
883  * This is supposed to be called while holding the lock. The lock is
884  * dropped before invoking the cmd and going to sleep and is reacquired
885  * afterwards.
886  *
887  * The macro will return -ERESTARTSYS if it was interrupted by a signal
888  * and 0 if @condition evaluated to true.
889  */
890 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd)	\
891 ({									\
892 	int __ret = 0;							\
893 	if (!(condition))						\
894 		__ret = __wait_event_interruptible_lock_irq(wq,		\
895 						condition, lock, cmd);	\
896 	__ret;								\
897 })
898 
899 /**
900  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
901  *		The condition is checked under the lock. This is expected
902  *		to be called with the lock taken.
903  * @wq: the waitqueue to wait on
904  * @condition: a C expression for the event to wait for
905  * @lock: a locked spinlock_t, which will be released before schedule()
906  *	  and reacquired afterwards.
907  *
908  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
909  * @condition evaluates to true or signal is received. The @condition is
910  * checked each time the waitqueue @wq is woken up.
911  *
912  * wake_up() has to be called after changing any variable that could
913  * change the result of the wait condition.
914  *
915  * This is supposed to be called while holding the lock. The lock is
916  * dropped before going to sleep and is reacquired afterwards.
917  *
918  * The macro will return -ERESTARTSYS if it was interrupted by a signal
919  * and 0 if @condition evaluated to true.
920  */
921 #define wait_event_interruptible_lock_irq(wq, condition, lock)		\
922 ({									\
923 	int __ret = 0;							\
924 	if (!(condition))						\
925 		__ret = __wait_event_interruptible_lock_irq(wq,		\
926 						condition, lock,);	\
927 	__ret;								\
928 })
929 
930 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,	\
931 						    lock, timeout)	\
932 	___wait_event(wq, ___wait_cond_timeout(condition),		\
933 		      TASK_INTERRUPTIBLE, 0, timeout,			\
934 		      spin_unlock_irq(&lock);				\
935 		      __ret = schedule_timeout(__ret);			\
936 		      spin_lock_irq(&lock));
937 
938 /**
939  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
940  *		true or a timeout elapses. The condition is checked under
941  *		the lock. This is expected to be called with the lock taken.
942  * @wq: the waitqueue to wait on
943  * @condition: a C expression for the event to wait for
944  * @lock: a locked spinlock_t, which will be released before schedule()
945  *	  and reacquired afterwards.
946  * @timeout: timeout, in jiffies
947  *
948  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
949  * @condition evaluates to true or signal is received. The @condition is
950  * checked each time the waitqueue @wq is woken up.
951  *
952  * wake_up() has to be called after changing any variable that could
953  * change the result of the wait condition.
954  *
955  * This is supposed to be called while holding the lock. The lock is
956  * dropped before going to sleep and is reacquired afterwards.
957  *
958  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
959  * was interrupted by a signal, and the remaining jiffies otherwise
960  * if the condition evaluated to true before the timeout elapsed.
961  */
962 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,	\
963 						  timeout)		\
964 ({									\
965 	long __ret = timeout;						\
966 	if (!___wait_cond_timeout(condition))				\
967 		__ret = __wait_event_interruptible_lock_irq_timeout(	\
968 					wq, condition, lock, timeout);	\
969 	__ret;								\
970 })
971 
972 /*
973  * Waitqueues which are removed from the waitqueue_head at wakeup time
974  */
975 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
976 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
977 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
978 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
979 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
980 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
981 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
982 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
983 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
984 
985 #define DEFINE_WAIT_FUNC(name, function)				\
986 	wait_queue_t name = {						\
987 		.private	= current,				\
988 		.func		= function,				\
989 		.task_list	= LIST_HEAD_INIT((name).task_list),	\
990 	}
991 
992 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
993 
994 #define DEFINE_WAIT_BIT(name, word, bit)				\
995 	struct wait_bit_queue name = {					\
996 		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),		\
997 		.wait	= {						\
998 			.private	= current,			\
999 			.func		= wake_bit_function,		\
1000 			.task_list	=				\
1001 				LIST_HEAD_INIT((name).wait.task_list),	\
1002 		},							\
1003 	}
1004 
1005 #define init_wait(wait)							\
1006 	do {								\
1007 		(wait)->private = current;				\
1008 		(wait)->func = autoremove_wake_function;		\
1009 		INIT_LIST_HEAD(&(wait)->task_list);			\
1010 		(wait)->flags = 0;					\
1011 	} while (0)
1012 
1013 
1014 extern int bit_wait(struct wait_bit_key *, int);
1015 extern int bit_wait_io(struct wait_bit_key *, int);
1016 extern int bit_wait_timeout(struct wait_bit_key *, int);
1017 extern int bit_wait_io_timeout(struct wait_bit_key *, int);
1018 
1019 /**
1020  * wait_on_bit - wait for a bit to be cleared
1021  * @word: the word being waited on, a kernel virtual address
1022  * @bit: the bit of the word being waited on
1023  * @mode: the task state to sleep in
1024  *
1025  * There is a standard hashed waitqueue table for generic use. This
1026  * is the part of the hashtable's accessor API that waits on a bit.
1027  * For instance, if one were to have waiters on a bitflag, one would
1028  * call wait_on_bit() in threads waiting for the bit to clear.
1029  * One uses wait_on_bit() where one is waiting for the bit to clear,
1030  * but has no intention of setting it.
1031  * Returned value will be zero if the bit was cleared, or non-zero
1032  * if the process received a signal and the mode permitted wakeup
1033  * on that signal.
1034  */
1035 static inline int
1036 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1037 {
1038 	might_sleep();
1039 	if (!test_bit(bit, word))
1040 		return 0;
1041 	return out_of_line_wait_on_bit(word, bit,
1042 				       bit_wait,
1043 				       mode);
1044 }
1045 
1046 /**
1047  * wait_on_bit_io - wait for a bit to be cleared
1048  * @word: the word being waited on, a kernel virtual address
1049  * @bit: the bit of the word being waited on
1050  * @mode: the task state to sleep in
1051  *
1052  * Use the standard hashed waitqueue table to wait for a bit
1053  * to be cleared.  This is similar to wait_on_bit(), but calls
1054  * io_schedule() instead of schedule() for the actual waiting.
1055  *
1056  * Returned value will be zero if the bit was cleared, or non-zero
1057  * if the process received a signal and the mode permitted wakeup
1058  * on that signal.
1059  */
1060 static inline int
1061 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1062 {
1063 	might_sleep();
1064 	if (!test_bit(bit, word))
1065 		return 0;
1066 	return out_of_line_wait_on_bit(word, bit,
1067 				       bit_wait_io,
1068 				       mode);
1069 }
1070 
1071 /**
1072  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1073  * @word: the word being waited on, a kernel virtual address
1074  * @bit: the bit of the word being waited on
1075  * @mode: the task state to sleep in
1076  * @timeout: timeout, in jiffies
1077  *
1078  * Use the standard hashed waitqueue table to wait for a bit
1079  * to be cleared. This is similar to wait_on_bit(), except also takes a
1080  * timeout parameter.
1081  *
1082  * Returned value will be zero if the bit was cleared before the
1083  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1084  * received a signal and the mode permitted wakeup on that signal.
1085  */
1086 static inline int
1087 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1088 		    unsigned long timeout)
1089 {
1090 	might_sleep();
1091 	if (!test_bit(bit, word))
1092 		return 0;
1093 	return out_of_line_wait_on_bit_timeout(word, bit,
1094 					       bit_wait_timeout,
1095 					       mode, timeout);
1096 }
1097 
1098 /**
1099  * wait_on_bit_action - wait for a bit to be cleared
1100  * @word: the word being waited on, a kernel virtual address
1101  * @bit: the bit of the word being waited on
1102  * @action: the function used to sleep, which may take special actions
1103  * @mode: the task state to sleep in
1104  *
1105  * Use the standard hashed waitqueue table to wait for a bit
1106  * to be cleared, and allow the waiting action to be specified.
1107  * This is like wait_on_bit() but allows fine control of how the waiting
1108  * is done.
1109  *
1110  * Returned value will be zero if the bit was cleared, or non-zero
1111  * if the process received a signal and the mode permitted wakeup
1112  * on that signal.
1113  */
1114 static inline int
1115 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1116 		   unsigned mode)
1117 {
1118 	might_sleep();
1119 	if (!test_bit(bit, word))
1120 		return 0;
1121 	return out_of_line_wait_on_bit(word, bit, action, mode);
1122 }
1123 
1124 /**
1125  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1126  * @word: the word being waited on, a kernel virtual address
1127  * @bit: the bit of the word being waited on
1128  * @mode: the task state to sleep in
1129  *
1130  * There is a standard hashed waitqueue table for generic use. This
1131  * is the part of the hashtable's accessor API that waits on a bit
1132  * when one intends to set it, for instance, trying to lock bitflags.
1133  * For instance, if one were to have waiters trying to set bitflag
1134  * and waiting for it to clear before setting it, one would call
1135  * wait_on_bit() in threads waiting to be able to set the bit.
1136  * One uses wait_on_bit_lock() where one is waiting for the bit to
1137  * clear with the intention of setting it, and when done, clearing it.
1138  *
1139  * Returns zero if the bit was (eventually) found to be clear and was
1140  * set.  Returns non-zero if a signal was delivered to the process and
1141  * the @mode allows that signal to wake the process.
1142  */
1143 static inline int
1144 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1145 {
1146 	might_sleep();
1147 	if (!test_and_set_bit(bit, word))
1148 		return 0;
1149 	return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1150 }
1151 
1152 /**
1153  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1154  * @word: the word being waited on, a kernel virtual address
1155  * @bit: the bit of the word being waited on
1156  * @mode: the task state to sleep in
1157  *
1158  * Use the standard hashed waitqueue table to wait for a bit
1159  * to be cleared and then to atomically set it.  This is similar
1160  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1161  * for the actual waiting.
1162  *
1163  * Returns zero if the bit was (eventually) found to be clear and was
1164  * set.  Returns non-zero if a signal was delivered to the process and
1165  * the @mode allows that signal to wake the process.
1166  */
1167 static inline int
1168 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1169 {
1170 	might_sleep();
1171 	if (!test_and_set_bit(bit, word))
1172 		return 0;
1173 	return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1174 }
1175 
1176 /**
1177  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1178  * @word: the word being waited on, a kernel virtual address
1179  * @bit: the bit of the word being waited on
1180  * @action: the function used to sleep, which may take special actions
1181  * @mode: the task state to sleep in
1182  *
1183  * Use the standard hashed waitqueue table to wait for a bit
1184  * to be cleared and then to set it, and allow the waiting action
1185  * to be specified.
1186  * This is like wait_on_bit() but allows fine control of how the waiting
1187  * is done.
1188  *
1189  * Returns zero if the bit was (eventually) found to be clear and was
1190  * set.  Returns non-zero if a signal was delivered to the process and
1191  * the @mode allows that signal to wake the process.
1192  */
1193 static inline int
1194 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1195 			unsigned mode)
1196 {
1197 	might_sleep();
1198 	if (!test_and_set_bit(bit, word))
1199 		return 0;
1200 	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1201 }
1202 
1203 /**
1204  * wait_on_atomic_t - Wait for an atomic_t to become 0
1205  * @val: The atomic value being waited on, a kernel virtual address
1206  * @action: the function used to sleep, which may take special actions
1207  * @mode: the task state to sleep in
1208  *
1209  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1210  * the purpose of getting a waitqueue, but we set the key to a bit number
1211  * outside of the target 'word'.
1212  */
1213 static inline
1214 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1215 {
1216 	might_sleep();
1217 	if (atomic_read(val) == 0)
1218 		return 0;
1219 	return out_of_line_wait_on_atomic_t(val, action, mode);
1220 }
1221 
1222 #endif /* _LINUX_WAIT_H */
1223