xref: /openbmc/linux/kernel/sched/completion.c (revision 711aab1d)
1 /*
2  * Generic wait-for-completion handler;
3  *
4  * It differs from semaphores in that their default case is the opposite,
5  * wait_for_completion default blocks whereas semaphore default non-block. The
6  * interface also makes it easy to 'complete' multiple waiting threads,
7  * something which isn't entirely natural for semaphores.
8  *
9  * But more importantly, the primitive documents the usage. Semaphores would
10  * typically be used for exclusion which gives rise to priority inversion.
11  * Waiting for completion is a typically sync point, but not an exclusion point.
12  */
13 
14 #include <linux/sched/signal.h>
15 #include <linux/sched/debug.h>
16 #include <linux/completion.h>
17 
18 /**
19  * complete: - signals a single thread waiting on this completion
20  * @x:  holds the state of this particular completion
21  *
22  * This will wake up a single thread waiting on this completion. Threads will be
23  * awakened in the same order in which they were queued.
24  *
25  * See also complete_all(), wait_for_completion() and related routines.
26  *
27  * It may be assumed that this function implies a write memory barrier before
28  * changing the task state if and only if any tasks are woken up.
29  */
30 void complete(struct completion *x)
31 {
32 	unsigned long flags;
33 
34 	spin_lock_irqsave(&x->wait.lock, flags);
35 
36 	/*
37 	 * Perform commit of crossrelease here.
38 	 */
39 	complete_release_commit(x);
40 
41 	if (x->done != UINT_MAX)
42 		x->done++;
43 	__wake_up_locked(&x->wait, TASK_NORMAL, 1);
44 	spin_unlock_irqrestore(&x->wait.lock, flags);
45 }
46 EXPORT_SYMBOL(complete);
47 
48 /**
49  * complete_all: - signals all threads waiting on this completion
50  * @x:  holds the state of this particular completion
51  *
52  * This will wake up all threads waiting on this particular completion event.
53  *
54  * It may be assumed that this function implies a write memory barrier before
55  * changing the task state if and only if any tasks are woken up.
56  *
57  * Since complete_all() sets the completion of @x permanently to done
58  * to allow multiple waiters to finish, a call to reinit_completion()
59  * must be used on @x if @x is to be used again. The code must make
60  * sure that all waiters have woken and finished before reinitializing
61  * @x. Also note that the function completion_done() can not be used
62  * to know if there are still waiters after complete_all() has been called.
63  */
64 void complete_all(struct completion *x)
65 {
66 	unsigned long flags;
67 
68 	spin_lock_irqsave(&x->wait.lock, flags);
69 	x->done = UINT_MAX;
70 	__wake_up_locked(&x->wait, TASK_NORMAL, 0);
71 	spin_unlock_irqrestore(&x->wait.lock, flags);
72 }
73 EXPORT_SYMBOL(complete_all);
74 
75 static inline long __sched
76 do_wait_for_common(struct completion *x,
77 		   long (*action)(long), long timeout, int state)
78 {
79 	if (!x->done) {
80 		DECLARE_WAITQUEUE(wait, current);
81 
82 		__add_wait_queue_entry_tail_exclusive(&x->wait, &wait);
83 		do {
84 			if (signal_pending_state(state, current)) {
85 				timeout = -ERESTARTSYS;
86 				break;
87 			}
88 			__set_current_state(state);
89 			spin_unlock_irq(&x->wait.lock);
90 			timeout = action(timeout);
91 			spin_lock_irq(&x->wait.lock);
92 		} while (!x->done && timeout);
93 		__remove_wait_queue(&x->wait, &wait);
94 		if (!x->done)
95 			return timeout;
96 	}
97 	if (x->done != UINT_MAX)
98 		x->done--;
99 	return timeout ?: 1;
100 }
101 
102 static inline long __sched
103 __wait_for_common(struct completion *x,
104 		  long (*action)(long), long timeout, int state)
105 {
106 	might_sleep();
107 
108 	complete_acquire(x);
109 
110 	spin_lock_irq(&x->wait.lock);
111 	timeout = do_wait_for_common(x, action, timeout, state);
112 	spin_unlock_irq(&x->wait.lock);
113 
114 	complete_release(x);
115 
116 	return timeout;
117 }
118 
119 static long __sched
120 wait_for_common(struct completion *x, long timeout, int state)
121 {
122 	return __wait_for_common(x, schedule_timeout, timeout, state);
123 }
124 
125 static long __sched
126 wait_for_common_io(struct completion *x, long timeout, int state)
127 {
128 	return __wait_for_common(x, io_schedule_timeout, timeout, state);
129 }
130 
131 /**
132  * wait_for_completion: - waits for completion of a task
133  * @x:  holds the state of this particular completion
134  *
135  * This waits to be signaled for completion of a specific task. It is NOT
136  * interruptible and there is no timeout.
137  *
138  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
139  * and interrupt capability. Also see complete().
140  */
141 void __sched wait_for_completion(struct completion *x)
142 {
143 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
144 }
145 EXPORT_SYMBOL(wait_for_completion);
146 
147 /**
148  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
149  * @x:  holds the state of this particular completion
150  * @timeout:  timeout value in jiffies
151  *
152  * This waits for either a completion of a specific task to be signaled or for a
153  * specified timeout to expire. The timeout is in jiffies. It is not
154  * interruptible.
155  *
156  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
157  * till timeout) if completed.
158  */
159 unsigned long __sched
160 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
161 {
162 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
163 }
164 EXPORT_SYMBOL(wait_for_completion_timeout);
165 
166 /**
167  * wait_for_completion_io: - waits for completion of a task
168  * @x:  holds the state of this particular completion
169  *
170  * This waits to be signaled for completion of a specific task. It is NOT
171  * interruptible and there is no timeout. The caller is accounted as waiting
172  * for IO (which traditionally means blkio only).
173  */
174 void __sched wait_for_completion_io(struct completion *x)
175 {
176 	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
177 }
178 EXPORT_SYMBOL(wait_for_completion_io);
179 
180 /**
181  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
182  * @x:  holds the state of this particular completion
183  * @timeout:  timeout value in jiffies
184  *
185  * This waits for either a completion of a specific task to be signaled or for a
186  * specified timeout to expire. The timeout is in jiffies. It is not
187  * interruptible. The caller is accounted as waiting for IO (which traditionally
188  * means blkio only).
189  *
190  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
191  * till timeout) if completed.
192  */
193 unsigned long __sched
194 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
195 {
196 	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
197 }
198 EXPORT_SYMBOL(wait_for_completion_io_timeout);
199 
200 /**
201  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
202  * @x:  holds the state of this particular completion
203  *
204  * This waits for completion of a specific task to be signaled. It is
205  * interruptible.
206  *
207  * Return: -ERESTARTSYS if interrupted, 0 if completed.
208  */
209 int __sched wait_for_completion_interruptible(struct completion *x)
210 {
211 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
212 	if (t == -ERESTARTSYS)
213 		return t;
214 	return 0;
215 }
216 EXPORT_SYMBOL(wait_for_completion_interruptible);
217 
218 /**
219  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
220  * @x:  holds the state of this particular completion
221  * @timeout:  timeout value in jiffies
222  *
223  * This waits for either a completion of a specific task to be signaled or for a
224  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
225  *
226  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
227  * or number of jiffies left till timeout) if completed.
228  */
229 long __sched
230 wait_for_completion_interruptible_timeout(struct completion *x,
231 					  unsigned long timeout)
232 {
233 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
234 }
235 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
236 
237 /**
238  * wait_for_completion_killable: - waits for completion of a task (killable)
239  * @x:  holds the state of this particular completion
240  *
241  * This waits to be signaled for completion of a specific task. It can be
242  * interrupted by a kill signal.
243  *
244  * Return: -ERESTARTSYS if interrupted, 0 if completed.
245  */
246 int __sched wait_for_completion_killable(struct completion *x)
247 {
248 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
249 	if (t == -ERESTARTSYS)
250 		return t;
251 	return 0;
252 }
253 EXPORT_SYMBOL(wait_for_completion_killable);
254 
255 /**
256  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
257  * @x:  holds the state of this particular completion
258  * @timeout:  timeout value in jiffies
259  *
260  * This waits for either a completion of a specific task to be
261  * signaled or for a specified timeout to expire. It can be
262  * interrupted by a kill signal. The timeout is in jiffies.
263  *
264  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
265  * or number of jiffies left till timeout) if completed.
266  */
267 long __sched
268 wait_for_completion_killable_timeout(struct completion *x,
269 				     unsigned long timeout)
270 {
271 	return wait_for_common(x, timeout, TASK_KILLABLE);
272 }
273 EXPORT_SYMBOL(wait_for_completion_killable_timeout);
274 
275 /**
276  *	try_wait_for_completion - try to decrement a completion without blocking
277  *	@x:	completion structure
278  *
279  *	Return: 0 if a decrement cannot be done without blocking
280  *		 1 if a decrement succeeded.
281  *
282  *	If a completion is being used as a counting completion,
283  *	attempt to decrement the counter without blocking. This
284  *	enables us to avoid waiting if the resource the completion
285  *	is protecting is not available.
286  */
287 bool try_wait_for_completion(struct completion *x)
288 {
289 	unsigned long flags;
290 	int ret = 1;
291 
292 	/*
293 	 * Since x->done will need to be locked only
294 	 * in the non-blocking case, we check x->done
295 	 * first without taking the lock so we can
296 	 * return early in the blocking case.
297 	 */
298 	if (!READ_ONCE(x->done))
299 		return 0;
300 
301 	spin_lock_irqsave(&x->wait.lock, flags);
302 	if (!x->done)
303 		ret = 0;
304 	else if (x->done != UINT_MAX)
305 		x->done--;
306 	spin_unlock_irqrestore(&x->wait.lock, flags);
307 	return ret;
308 }
309 EXPORT_SYMBOL(try_wait_for_completion);
310 
311 /**
312  *	completion_done - Test to see if a completion has any waiters
313  *	@x:	completion structure
314  *
315  *	Return: 0 if there are waiters (wait_for_completion() in progress)
316  *		 1 if there are no waiters.
317  *
318  *	Note, this will always return true if complete_all() was called on @X.
319  */
320 bool completion_done(struct completion *x)
321 {
322 	unsigned long flags;
323 
324 	if (!READ_ONCE(x->done))
325 		return false;
326 
327 	/*
328 	 * If ->done, we need to wait for complete() to release ->wait.lock
329 	 * otherwise we can end up freeing the completion before complete()
330 	 * is done referencing it.
331 	 */
332 	spin_lock_irqsave(&x->wait.lock, flags);
333 	spin_unlock_irqrestore(&x->wait.lock, flags);
334 	return true;
335 }
336 EXPORT_SYMBOL(completion_done);
337