xref: /openbmc/linux/kernel/sched/completion.c (revision 239480ab)
1 /*
2  * Generic wait-for-completion handler;
3  *
4  * It differs from semaphores in that their default case is the opposite,
5  * wait_for_completion default blocks whereas semaphore default non-block. The
6  * interface also makes it easy to 'complete' multiple waiting threads,
7  * something which isn't entirely natural for semaphores.
8  *
9  * But more importantly, the primitive documents the usage. Semaphores would
10  * typically be used for exclusion which gives rise to priority inversion.
11  * Waiting for completion is a typically sync point, but not an exclusion point.
12  */
13 
14 #include <linux/sched/signal.h>
15 #include <linux/sched/debug.h>
16 #include <linux/completion.h>
17 
18 /**
19  * complete: - signals a single thread waiting on this completion
20  * @x:  holds the state of this particular completion
21  *
22  * This will wake up a single thread waiting on this completion. Threads will be
23  * awakened in the same order in which they were queued.
24  *
25  * See also complete_all(), wait_for_completion() and related routines.
26  *
27  * It may be assumed that this function implies a write memory barrier before
28  * changing the task state if and only if any tasks are woken up.
29  */
30 void complete(struct completion *x)
31 {
32 	unsigned long flags;
33 
34 	spin_lock_irqsave(&x->wait.lock, flags);
35 	if (x->done != UINT_MAX)
36 		x->done++;
37 	__wake_up_locked(&x->wait, TASK_NORMAL, 1);
38 	spin_unlock_irqrestore(&x->wait.lock, flags);
39 }
40 EXPORT_SYMBOL(complete);
41 
42 /**
43  * complete_all: - signals all threads waiting on this completion
44  * @x:  holds the state of this particular completion
45  *
46  * This will wake up all threads waiting on this particular completion event.
47  *
48  * It may be assumed that this function implies a write memory barrier before
49  * changing the task state if and only if any tasks are woken up.
50  */
51 void complete_all(struct completion *x)
52 {
53 	unsigned long flags;
54 
55 	spin_lock_irqsave(&x->wait.lock, flags);
56 	x->done = UINT_MAX;
57 	__wake_up_locked(&x->wait, TASK_NORMAL, 0);
58 	spin_unlock_irqrestore(&x->wait.lock, flags);
59 }
60 EXPORT_SYMBOL(complete_all);
61 
62 static inline long __sched
63 do_wait_for_common(struct completion *x,
64 		   long (*action)(long), long timeout, int state)
65 {
66 	if (!x->done) {
67 		DECLARE_WAITQUEUE(wait, current);
68 
69 		__add_wait_queue_tail_exclusive(&x->wait, &wait);
70 		do {
71 			if (signal_pending_state(state, current)) {
72 				timeout = -ERESTARTSYS;
73 				break;
74 			}
75 			__set_current_state(state);
76 			spin_unlock_irq(&x->wait.lock);
77 			timeout = action(timeout);
78 			spin_lock_irq(&x->wait.lock);
79 		} while (!x->done && timeout);
80 		__remove_wait_queue(&x->wait, &wait);
81 		if (!x->done)
82 			return timeout;
83 	}
84 	if (x->done != UINT_MAX)
85 		x->done--;
86 	return timeout ?: 1;
87 }
88 
89 static inline long __sched
90 __wait_for_common(struct completion *x,
91 		  long (*action)(long), long timeout, int state)
92 {
93 	might_sleep();
94 
95 	spin_lock_irq(&x->wait.lock);
96 	timeout = do_wait_for_common(x, action, timeout, state);
97 	spin_unlock_irq(&x->wait.lock);
98 	return timeout;
99 }
100 
101 static long __sched
102 wait_for_common(struct completion *x, long timeout, int state)
103 {
104 	return __wait_for_common(x, schedule_timeout, timeout, state);
105 }
106 
107 static long __sched
108 wait_for_common_io(struct completion *x, long timeout, int state)
109 {
110 	return __wait_for_common(x, io_schedule_timeout, timeout, state);
111 }
112 
113 /**
114  * wait_for_completion: - waits for completion of a task
115  * @x:  holds the state of this particular completion
116  *
117  * This waits to be signaled for completion of a specific task. It is NOT
118  * interruptible and there is no timeout.
119  *
120  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
121  * and interrupt capability. Also see complete().
122  */
123 void __sched wait_for_completion(struct completion *x)
124 {
125 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
126 }
127 EXPORT_SYMBOL(wait_for_completion);
128 
129 /**
130  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
131  * @x:  holds the state of this particular completion
132  * @timeout:  timeout value in jiffies
133  *
134  * This waits for either a completion of a specific task to be signaled or for a
135  * specified timeout to expire. The timeout is in jiffies. It is not
136  * interruptible.
137  *
138  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
139  * till timeout) if completed.
140  */
141 unsigned long __sched
142 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
143 {
144 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
145 }
146 EXPORT_SYMBOL(wait_for_completion_timeout);
147 
148 /**
149  * wait_for_completion_io: - waits for completion of a task
150  * @x:  holds the state of this particular completion
151  *
152  * This waits to be signaled for completion of a specific task. It is NOT
153  * interruptible and there is no timeout. The caller is accounted as waiting
154  * for IO (which traditionally means blkio only).
155  */
156 void __sched wait_for_completion_io(struct completion *x)
157 {
158 	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
159 }
160 EXPORT_SYMBOL(wait_for_completion_io);
161 
162 /**
163  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
164  * @x:  holds the state of this particular completion
165  * @timeout:  timeout value in jiffies
166  *
167  * This waits for either a completion of a specific task to be signaled or for a
168  * specified timeout to expire. The timeout is in jiffies. It is not
169  * interruptible. The caller is accounted as waiting for IO (which traditionally
170  * means blkio only).
171  *
172  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
173  * till timeout) if completed.
174  */
175 unsigned long __sched
176 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
177 {
178 	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
179 }
180 EXPORT_SYMBOL(wait_for_completion_io_timeout);
181 
182 /**
183  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
184  * @x:  holds the state of this particular completion
185  *
186  * This waits for completion of a specific task to be signaled. It is
187  * interruptible.
188  *
189  * Return: -ERESTARTSYS if interrupted, 0 if completed.
190  */
191 int __sched wait_for_completion_interruptible(struct completion *x)
192 {
193 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
194 	if (t == -ERESTARTSYS)
195 		return t;
196 	return 0;
197 }
198 EXPORT_SYMBOL(wait_for_completion_interruptible);
199 
200 /**
201  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
202  * @x:  holds the state of this particular completion
203  * @timeout:  timeout value in jiffies
204  *
205  * This waits for either a completion of a specific task to be signaled or for a
206  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
207  *
208  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
209  * or number of jiffies left till timeout) if completed.
210  */
211 long __sched
212 wait_for_completion_interruptible_timeout(struct completion *x,
213 					  unsigned long timeout)
214 {
215 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
216 }
217 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
218 
219 /**
220  * wait_for_completion_killable: - waits for completion of a task (killable)
221  * @x:  holds the state of this particular completion
222  *
223  * This waits to be signaled for completion of a specific task. It can be
224  * interrupted by a kill signal.
225  *
226  * Return: -ERESTARTSYS if interrupted, 0 if completed.
227  */
228 int __sched wait_for_completion_killable(struct completion *x)
229 {
230 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
231 	if (t == -ERESTARTSYS)
232 		return t;
233 	return 0;
234 }
235 EXPORT_SYMBOL(wait_for_completion_killable);
236 
237 /**
238  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
239  * @x:  holds the state of this particular completion
240  * @timeout:  timeout value in jiffies
241  *
242  * This waits for either a completion of a specific task to be
243  * signaled or for a specified timeout to expire. It can be
244  * interrupted by a kill signal. The timeout is in jiffies.
245  *
246  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
247  * or number of jiffies left till timeout) if completed.
248  */
249 long __sched
250 wait_for_completion_killable_timeout(struct completion *x,
251 				     unsigned long timeout)
252 {
253 	return wait_for_common(x, timeout, TASK_KILLABLE);
254 }
255 EXPORT_SYMBOL(wait_for_completion_killable_timeout);
256 
257 /**
258  *	try_wait_for_completion - try to decrement a completion without blocking
259  *	@x:	completion structure
260  *
261  *	Return: 0 if a decrement cannot be done without blocking
262  *		 1 if a decrement succeeded.
263  *
264  *	If a completion is being used as a counting completion,
265  *	attempt to decrement the counter without blocking. This
266  *	enables us to avoid waiting if the resource the completion
267  *	is protecting is not available.
268  */
269 bool try_wait_for_completion(struct completion *x)
270 {
271 	unsigned long flags;
272 	int ret = 1;
273 
274 	/*
275 	 * Since x->done will need to be locked only
276 	 * in the non-blocking case, we check x->done
277 	 * first without taking the lock so we can
278 	 * return early in the blocking case.
279 	 */
280 	if (!READ_ONCE(x->done))
281 		return 0;
282 
283 	spin_lock_irqsave(&x->wait.lock, flags);
284 	if (!x->done)
285 		ret = 0;
286 	else if (x->done != UINT_MAX)
287 		x->done--;
288 	spin_unlock_irqrestore(&x->wait.lock, flags);
289 	return ret;
290 }
291 EXPORT_SYMBOL(try_wait_for_completion);
292 
293 /**
294  *	completion_done - Test to see if a completion has any waiters
295  *	@x:	completion structure
296  *
297  *	Return: 0 if there are waiters (wait_for_completion() in progress)
298  *		 1 if there are no waiters.
299  *
300  */
301 bool completion_done(struct completion *x)
302 {
303 	if (!READ_ONCE(x->done))
304 		return false;
305 
306 	/*
307 	 * If ->done, we need to wait for complete() to release ->wait.lock
308 	 * otherwise we can end up freeing the completion before complete()
309 	 * is done referencing it.
310 	 *
311 	 * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
312 	 * the loads of ->done and ->wait.lock such that we cannot observe
313 	 * the lock before complete() acquires it while observing the ->done
314 	 * after it's acquired the lock.
315 	 */
316 	smp_rmb();
317 	spin_unlock_wait(&x->wait.lock);
318 	return true;
319 }
320 EXPORT_SYMBOL(completion_done);
321