xref: /openbmc/linux/kernel/sched/completion.c (revision d3964221)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic wait-for-completion handler;
4  *
5  * It differs from semaphores in that their default case is the opposite,
6  * wait_for_completion default blocks whereas semaphore default non-block. The
7  * interface also makes it easy to 'complete' multiple waiting threads,
8  * something which isn't entirely natural for semaphores.
9  *
10  * But more importantly, the primitive documents the usage. Semaphores would
11  * typically be used for exclusion which gives rise to priority inversion.
12  * Waiting for completion is a typically sync point, but not an exclusion point.
13  */
14 
15 #include <linux/sched/signal.h>
16 #include <linux/sched/debug.h>
17 #include <linux/completion.h>
18 
19 /**
20  * complete: - signals a single thread waiting on this completion
21  * @x:  holds the state of this particular completion
22  *
23  * This will wake up a single thread waiting on this completion. Threads will be
24  * awakened in the same order in which they were queued.
25  *
26  * See also complete_all(), wait_for_completion() and related routines.
27  *
28  * It may be assumed that this function implies a write memory barrier before
29  * changing the task state if and only if any tasks are woken up.
30  */
31 void complete(struct completion *x)
32 {
33 	unsigned long flags;
34 
35 	spin_lock_irqsave(&x->wait.lock, flags);
36 
37 	/*
38 	 * Perform commit of crossrelease here.
39 	 */
40 	complete_release_commit(x);
41 
42 	if (x->done != UINT_MAX)
43 		x->done++;
44 	__wake_up_locked(&x->wait, TASK_NORMAL, 1);
45 	spin_unlock_irqrestore(&x->wait.lock, flags);
46 }
47 EXPORT_SYMBOL(complete);
48 
49 /**
50  * complete_all: - signals all threads waiting on this completion
51  * @x:  holds the state of this particular completion
52  *
53  * This will wake up all threads waiting on this particular completion event.
54  *
55  * It may be assumed that this function implies a write memory barrier before
56  * changing the task state if and only if any tasks are woken up.
57  *
58  * Since complete_all() sets the completion of @x permanently to done
59  * to allow multiple waiters to finish, a call to reinit_completion()
60  * must be used on @x if @x is to be used again. The code must make
61  * sure that all waiters have woken and finished before reinitializing
62  * @x. Also note that the function completion_done() can not be used
63  * to know if there are still waiters after complete_all() has been called.
64  */
65 void complete_all(struct completion *x)
66 {
67 	unsigned long flags;
68 
69 	spin_lock_irqsave(&x->wait.lock, flags);
70 	x->done = UINT_MAX;
71 	__wake_up_locked(&x->wait, TASK_NORMAL, 0);
72 	spin_unlock_irqrestore(&x->wait.lock, flags);
73 }
74 EXPORT_SYMBOL(complete_all);
75 
76 static inline long __sched
77 do_wait_for_common(struct completion *x,
78 		   long (*action)(long), long timeout, int state)
79 {
80 	if (!x->done) {
81 		DECLARE_WAITQUEUE(wait, current);
82 
83 		__add_wait_queue_entry_tail_exclusive(&x->wait, &wait);
84 		do {
85 			if (signal_pending_state(state, current)) {
86 				timeout = -ERESTARTSYS;
87 				break;
88 			}
89 			__set_current_state(state);
90 			spin_unlock_irq(&x->wait.lock);
91 			timeout = action(timeout);
92 			spin_lock_irq(&x->wait.lock);
93 		} while (!x->done && timeout);
94 		__remove_wait_queue(&x->wait, &wait);
95 		if (!x->done)
96 			return timeout;
97 	}
98 	if (x->done != UINT_MAX)
99 		x->done--;
100 	return timeout ?: 1;
101 }
102 
103 static inline long __sched
104 __wait_for_common(struct completion *x,
105 		  long (*action)(long), long timeout, int state)
106 {
107 	might_sleep();
108 
109 	complete_acquire(x);
110 
111 	spin_lock_irq(&x->wait.lock);
112 	timeout = do_wait_for_common(x, action, timeout, state);
113 	spin_unlock_irq(&x->wait.lock);
114 
115 	complete_release(x);
116 
117 	return timeout;
118 }
119 
120 static long __sched
121 wait_for_common(struct completion *x, long timeout, int state)
122 {
123 	return __wait_for_common(x, schedule_timeout, timeout, state);
124 }
125 
126 static long __sched
127 wait_for_common_io(struct completion *x, long timeout, int state)
128 {
129 	return __wait_for_common(x, io_schedule_timeout, timeout, state);
130 }
131 
132 /**
133  * wait_for_completion: - waits for completion of a task
134  * @x:  holds the state of this particular completion
135  *
136  * This waits to be signaled for completion of a specific task. It is NOT
137  * interruptible and there is no timeout.
138  *
139  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
140  * and interrupt capability. Also see complete().
141  */
142 void __sched wait_for_completion(struct completion *x)
143 {
144 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
145 }
146 EXPORT_SYMBOL(wait_for_completion);
147 
148 /**
149  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
150  * @x:  holds the state of this particular completion
151  * @timeout:  timeout value in jiffies
152  *
153  * This waits for either a completion of a specific task to be signaled or for a
154  * specified timeout to expire. The timeout is in jiffies. It is not
155  * interruptible.
156  *
157  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
158  * till timeout) if completed.
159  */
160 unsigned long __sched
161 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
162 {
163 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
164 }
165 EXPORT_SYMBOL(wait_for_completion_timeout);
166 
167 /**
168  * wait_for_completion_io: - waits for completion of a task
169  * @x:  holds the state of this particular completion
170  *
171  * This waits to be signaled for completion of a specific task. It is NOT
172  * interruptible and there is no timeout. The caller is accounted as waiting
173  * for IO (which traditionally means blkio only).
174  */
175 void __sched wait_for_completion_io(struct completion *x)
176 {
177 	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
178 }
179 EXPORT_SYMBOL(wait_for_completion_io);
180 
181 /**
182  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
183  * @x:  holds the state of this particular completion
184  * @timeout:  timeout value in jiffies
185  *
186  * This waits for either a completion of a specific task to be signaled or for a
187  * specified timeout to expire. The timeout is in jiffies. It is not
188  * interruptible. The caller is accounted as waiting for IO (which traditionally
189  * means blkio only).
190  *
191  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
192  * till timeout) if completed.
193  */
194 unsigned long __sched
195 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
196 {
197 	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
198 }
199 EXPORT_SYMBOL(wait_for_completion_io_timeout);
200 
201 /**
202  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
203  * @x:  holds the state of this particular completion
204  *
205  * This waits for completion of a specific task to be signaled. It is
206  * interruptible.
207  *
208  * Return: -ERESTARTSYS if interrupted, 0 if completed.
209  */
210 int __sched wait_for_completion_interruptible(struct completion *x)
211 {
212 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
213 	if (t == -ERESTARTSYS)
214 		return t;
215 	return 0;
216 }
217 EXPORT_SYMBOL(wait_for_completion_interruptible);
218 
219 /**
220  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
221  * @x:  holds the state of this particular completion
222  * @timeout:  timeout value in jiffies
223  *
224  * This waits for either a completion of a specific task to be signaled or for a
225  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
226  *
227  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
228  * or number of jiffies left till timeout) if completed.
229  */
230 long __sched
231 wait_for_completion_interruptible_timeout(struct completion *x,
232 					  unsigned long timeout)
233 {
234 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
235 }
236 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
237 
238 /**
239  * wait_for_completion_killable: - waits for completion of a task (killable)
240  * @x:  holds the state of this particular completion
241  *
242  * This waits to be signaled for completion of a specific task. It can be
243  * interrupted by a kill signal.
244  *
245  * Return: -ERESTARTSYS if interrupted, 0 if completed.
246  */
247 int __sched wait_for_completion_killable(struct completion *x)
248 {
249 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
250 	if (t == -ERESTARTSYS)
251 		return t;
252 	return 0;
253 }
254 EXPORT_SYMBOL(wait_for_completion_killable);
255 
256 /**
257  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
258  * @x:  holds the state of this particular completion
259  * @timeout:  timeout value in jiffies
260  *
261  * This waits for either a completion of a specific task to be
262  * signaled or for a specified timeout to expire. It can be
263  * interrupted by a kill signal. The timeout is in jiffies.
264  *
265  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
266  * or number of jiffies left till timeout) if completed.
267  */
268 long __sched
269 wait_for_completion_killable_timeout(struct completion *x,
270 				     unsigned long timeout)
271 {
272 	return wait_for_common(x, timeout, TASK_KILLABLE);
273 }
274 EXPORT_SYMBOL(wait_for_completion_killable_timeout);
275 
276 /**
277  *	try_wait_for_completion - try to decrement a completion without blocking
278  *	@x:	completion structure
279  *
280  *	Return: 0 if a decrement cannot be done without blocking
281  *		 1 if a decrement succeeded.
282  *
283  *	If a completion is being used as a counting completion,
284  *	attempt to decrement the counter without blocking. This
285  *	enables us to avoid waiting if the resource the completion
286  *	is protecting is not available.
287  */
288 bool try_wait_for_completion(struct completion *x)
289 {
290 	unsigned long flags;
291 	int ret = 1;
292 
293 	/*
294 	 * Since x->done will need to be locked only
295 	 * in the non-blocking case, we check x->done
296 	 * first without taking the lock so we can
297 	 * return early in the blocking case.
298 	 */
299 	if (!READ_ONCE(x->done))
300 		return 0;
301 
302 	spin_lock_irqsave(&x->wait.lock, flags);
303 	if (!x->done)
304 		ret = 0;
305 	else if (x->done != UINT_MAX)
306 		x->done--;
307 	spin_unlock_irqrestore(&x->wait.lock, flags);
308 	return ret;
309 }
310 EXPORT_SYMBOL(try_wait_for_completion);
311 
312 /**
313  *	completion_done - Test to see if a completion has any waiters
314  *	@x:	completion structure
315  *
316  *	Return: 0 if there are waiters (wait_for_completion() in progress)
317  *		 1 if there are no waiters.
318  *
319  *	Note, this will always return true if complete_all() was called on @X.
320  */
321 bool completion_done(struct completion *x)
322 {
323 	unsigned long flags;
324 
325 	if (!READ_ONCE(x->done))
326 		return false;
327 
328 	/*
329 	 * If ->done, we need to wait for complete() to release ->wait.lock
330 	 * otherwise we can end up freeing the completion before complete()
331 	 * is done referencing it.
332 	 */
333 	spin_lock_irqsave(&x->wait.lock, flags);
334 	spin_unlock_irqrestore(&x->wait.lock, flags);
335 	return true;
336 }
337 EXPORT_SYMBOL(completion_done);
338