xref: /openbmc/linux/kernel/sched/wait.c (revision efe84d40)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic waiting primitives.
4  *
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  */
7 #include "sched.h"
8 
9 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10 {
11 	spin_lock_init(&wq_head->lock);
12 	lockdep_set_class_and_name(&wq_head->lock, key, name);
13 	INIT_LIST_HEAD(&wq_head->head);
14 }
15 
16 EXPORT_SYMBOL(__init_waitqueue_head);
17 
18 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
19 {
20 	unsigned long flags;
21 
22 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
23 	spin_lock_irqsave(&wq_head->lock, flags);
24 	__add_wait_queue(wq_head, wq_entry);
25 	spin_unlock_irqrestore(&wq_head->lock, flags);
26 }
27 EXPORT_SYMBOL(add_wait_queue);
28 
29 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
30 {
31 	unsigned long flags;
32 
33 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
34 	spin_lock_irqsave(&wq_head->lock, flags);
35 	__add_wait_queue_entry_tail(wq_head, wq_entry);
36 	spin_unlock_irqrestore(&wq_head->lock, flags);
37 }
38 EXPORT_SYMBOL(add_wait_queue_exclusive);
39 
40 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
41 {
42 	unsigned long flags;
43 
44 	spin_lock_irqsave(&wq_head->lock, flags);
45 	__remove_wait_queue(wq_head, wq_entry);
46 	spin_unlock_irqrestore(&wq_head->lock, flags);
47 }
48 EXPORT_SYMBOL(remove_wait_queue);
49 
50 /*
51  * Scan threshold to break wait queue walk.
52  * This allows a waker to take a break from holding the
53  * wait queue lock during the wait queue walk.
54  */
55 #define WAITQUEUE_WALK_BREAK_CNT 64
56 
57 /*
58  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60  * number) then we wake all the non-exclusive tasks and one exclusive task.
61  *
62  * There are circumstances in which we can try to wake a task which has already
63  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64  * zero in this (rare) case, and we handle it by continuing to scan the queue.
65  */
66 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
67 			int nr_exclusive, int wake_flags, void *key,
68 			wait_queue_entry_t *bookmark)
69 {
70 	wait_queue_entry_t *curr, *next;
71 	int cnt = 0;
72 
73 	lockdep_assert_held(&wq_head->lock);
74 
75 	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
76 		curr = list_next_entry(bookmark, entry);
77 
78 		list_del(&bookmark->entry);
79 		bookmark->flags = 0;
80 	} else
81 		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82 
83 	if (&curr->entry == &wq_head->head)
84 		return nr_exclusive;
85 
86 	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
87 		unsigned flags = curr->flags;
88 		int ret;
89 
90 		if (flags & WQ_FLAG_BOOKMARK)
91 			continue;
92 
93 		ret = curr->func(curr, mode, wake_flags, key);
94 		if (ret < 0)
95 			break;
96 		if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
97 			break;
98 
99 		if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100 				(&next->entry != &wq_head->head)) {
101 			bookmark->flags = WQ_FLAG_BOOKMARK;
102 			list_add_tail(&bookmark->entry, &next->entry);
103 			break;
104 		}
105 	}
106 
107 	return nr_exclusive;
108 }
109 
110 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111 			int nr_exclusive, int wake_flags, void *key)
112 {
113 	unsigned long flags;
114 	wait_queue_entry_t bookmark;
115 
116 	bookmark.flags = 0;
117 	bookmark.private = NULL;
118 	bookmark.func = NULL;
119 	INIT_LIST_HEAD(&bookmark.entry);
120 
121 	do {
122 		spin_lock_irqsave(&wq_head->lock, flags);
123 		nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124 						wake_flags, key, &bookmark);
125 		spin_unlock_irqrestore(&wq_head->lock, flags);
126 	} while (bookmark.flags & WQ_FLAG_BOOKMARK);
127 }
128 
129 /**
130  * __wake_up - wake up threads blocked on a waitqueue.
131  * @wq_head: the waitqueue
132  * @mode: which threads
133  * @nr_exclusive: how many wake-one or wake-many threads to wake up
134  * @key: is directly passed to the wakeup function
135  *
136  * If this function wakes up a task, it executes a full memory barrier before
137  * accessing the task state.
138  */
139 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140 			int nr_exclusive, void *key)
141 {
142 	__wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143 }
144 EXPORT_SYMBOL(__wake_up);
145 
146 /*
147  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148  */
149 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150 {
151 	__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152 }
153 EXPORT_SYMBOL_GPL(__wake_up_locked);
154 
155 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156 {
157 	__wake_up_common(wq_head, mode, 1, 0, key, NULL);
158 }
159 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160 
161 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162 		unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163 {
164 	__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165 }
166 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167 
168 /**
169  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170  * @wq_head: the waitqueue
171  * @mode: which threads
172  * @key: opaque value to be passed to wakeup targets
173  *
174  * The sync wakeup differs that the waker knows that it will schedule
175  * away soon, so while the target thread will be woken up, it will not
176  * be migrated to another CPU - ie. the two threads are 'synchronized'
177  * with each other. This can prevent needless bouncing between CPUs.
178  *
179  * On UP it can prevent extra preemption.
180  *
181  * If this function wakes up a task, it executes a full memory barrier before
182  * accessing the task state.
183  */
184 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
185 			void *key)
186 {
187 	if (unlikely(!wq_head))
188 		return;
189 
190 	__wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
191 }
192 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
193 
194 /**
195  * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
196  * @wq_head: the waitqueue
197  * @mode: which threads
198  * @key: opaque value to be passed to wakeup targets
199  *
200  * The sync wakeup differs in that the waker knows that it will schedule
201  * away soon, so while the target thread will be woken up, it will not
202  * be migrated to another CPU - ie. the two threads are 'synchronized'
203  * with each other. This can prevent needless bouncing between CPUs.
204  *
205  * On UP it can prevent extra preemption.
206  *
207  * If this function wakes up a task, it executes a full memory barrier before
208  * accessing the task state.
209  */
210 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
211 			       unsigned int mode, void *key)
212 {
213         __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
214 }
215 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
216 
217 /*
218  * __wake_up_sync - see __wake_up_sync_key()
219  */
220 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
221 {
222 	__wake_up_sync_key(wq_head, mode, NULL);
223 }
224 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
225 
226 /*
227  * Note: we use "set_current_state()" _after_ the wait-queue add,
228  * because we need a memory barrier there on SMP, so that any
229  * wake-function that tests for the wait-queue being active
230  * will be guaranteed to see waitqueue addition _or_ subsequent
231  * tests in this thread will see the wakeup having taken place.
232  *
233  * The spin_unlock() itself is semi-permeable and only protects
234  * one way (it only protects stuff inside the critical region and
235  * stops them from bleeding out - it would still allow subsequent
236  * loads to move into the critical region).
237  */
238 void
239 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
240 {
241 	unsigned long flags;
242 
243 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
244 	spin_lock_irqsave(&wq_head->lock, flags);
245 	if (list_empty(&wq_entry->entry))
246 		__add_wait_queue(wq_head, wq_entry);
247 	set_current_state(state);
248 	spin_unlock_irqrestore(&wq_head->lock, flags);
249 }
250 EXPORT_SYMBOL(prepare_to_wait);
251 
252 void
253 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
254 {
255 	unsigned long flags;
256 
257 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
258 	spin_lock_irqsave(&wq_head->lock, flags);
259 	if (list_empty(&wq_entry->entry))
260 		__add_wait_queue_entry_tail(wq_head, wq_entry);
261 	set_current_state(state);
262 	spin_unlock_irqrestore(&wq_head->lock, flags);
263 }
264 EXPORT_SYMBOL(prepare_to_wait_exclusive);
265 
266 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
267 {
268 	wq_entry->flags = flags;
269 	wq_entry->private = current;
270 	wq_entry->func = autoremove_wake_function;
271 	INIT_LIST_HEAD(&wq_entry->entry);
272 }
273 EXPORT_SYMBOL(init_wait_entry);
274 
275 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
276 {
277 	unsigned long flags;
278 	long ret = 0;
279 
280 	spin_lock_irqsave(&wq_head->lock, flags);
281 	if (signal_pending_state(state, current)) {
282 		/*
283 		 * Exclusive waiter must not fail if it was selected by wakeup,
284 		 * it should "consume" the condition we were waiting for.
285 		 *
286 		 * The caller will recheck the condition and return success if
287 		 * we were already woken up, we can not miss the event because
288 		 * wakeup locks/unlocks the same wq_head->lock.
289 		 *
290 		 * But we need to ensure that set-condition + wakeup after that
291 		 * can't see us, it should wake up another exclusive waiter if
292 		 * we fail.
293 		 */
294 		list_del_init(&wq_entry->entry);
295 		ret = -ERESTARTSYS;
296 	} else {
297 		if (list_empty(&wq_entry->entry)) {
298 			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
299 				__add_wait_queue_entry_tail(wq_head, wq_entry);
300 			else
301 				__add_wait_queue(wq_head, wq_entry);
302 		}
303 		set_current_state(state);
304 	}
305 	spin_unlock_irqrestore(&wq_head->lock, flags);
306 
307 	return ret;
308 }
309 EXPORT_SYMBOL(prepare_to_wait_event);
310 
311 /*
312  * Note! These two wait functions are entered with the
313  * wait-queue lock held (and interrupts off in the _irq
314  * case), so there is no race with testing the wakeup
315  * condition in the caller before they add the wait
316  * entry to the wake queue.
317  */
318 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
319 {
320 	if (likely(list_empty(&wait->entry)))
321 		__add_wait_queue_entry_tail(wq, wait);
322 
323 	set_current_state(TASK_INTERRUPTIBLE);
324 	if (signal_pending(current))
325 		return -ERESTARTSYS;
326 
327 	spin_unlock(&wq->lock);
328 	schedule();
329 	spin_lock(&wq->lock);
330 
331 	return 0;
332 }
333 EXPORT_SYMBOL(do_wait_intr);
334 
335 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
336 {
337 	if (likely(list_empty(&wait->entry)))
338 		__add_wait_queue_entry_tail(wq, wait);
339 
340 	set_current_state(TASK_INTERRUPTIBLE);
341 	if (signal_pending(current))
342 		return -ERESTARTSYS;
343 
344 	spin_unlock_irq(&wq->lock);
345 	schedule();
346 	spin_lock_irq(&wq->lock);
347 
348 	return 0;
349 }
350 EXPORT_SYMBOL(do_wait_intr_irq);
351 
352 /**
353  * finish_wait - clean up after waiting in a queue
354  * @wq_head: waitqueue waited on
355  * @wq_entry: wait descriptor
356  *
357  * Sets current thread back to running state and removes
358  * the wait descriptor from the given waitqueue if still
359  * queued.
360  */
361 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
362 {
363 	unsigned long flags;
364 
365 	__set_current_state(TASK_RUNNING);
366 	/*
367 	 * We can check for list emptiness outside the lock
368 	 * IFF:
369 	 *  - we use the "careful" check that verifies both
370 	 *    the next and prev pointers, so that there cannot
371 	 *    be any half-pending updates in progress on other
372 	 *    CPU's that we haven't seen yet (and that might
373 	 *    still change the stack area.
374 	 * and
375 	 *  - all other users take the lock (ie we can only
376 	 *    have _one_ other CPU that looks at or modifies
377 	 *    the list).
378 	 */
379 	if (!list_empty_careful(&wq_entry->entry)) {
380 		spin_lock_irqsave(&wq_head->lock, flags);
381 		list_del_init(&wq_entry->entry);
382 		spin_unlock_irqrestore(&wq_head->lock, flags);
383 	}
384 }
385 EXPORT_SYMBOL(finish_wait);
386 
387 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
388 {
389 	int ret = default_wake_function(wq_entry, mode, sync, key);
390 
391 	if (ret)
392 		list_del_init_careful(&wq_entry->entry);
393 
394 	return ret;
395 }
396 EXPORT_SYMBOL(autoremove_wake_function);
397 
398 static inline bool is_kthread_should_stop(void)
399 {
400 	return (current->flags & PF_KTHREAD) && kthread_should_stop();
401 }
402 
403 /*
404  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
405  *
406  * add_wait_queue(&wq_head, &wait);
407  * for (;;) {
408  *     if (condition)
409  *         break;
410  *
411  *     // in wait_woken()			// in woken_wake_function()
412  *
413  *     p->state = mode;				wq_entry->flags |= WQ_FLAG_WOKEN;
414  *     smp_mb(); // A				try_to_wake_up():
415  *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))	   <full barrier>
416  *         schedule()				   if (p->state & mode)
417  *     p->state = TASK_RUNNING;			      p->state = TASK_RUNNING;
418  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;	~~~~~~~~~~~~~~~~~~
419  *     smp_mb(); // B				condition = true;
420  * }						smp_mb(); // C
421  * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
422  */
423 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
424 {
425 	/*
426 	 * The below executes an smp_mb(), which matches with the full barrier
427 	 * executed by the try_to_wake_up() in woken_wake_function() such that
428 	 * either we see the store to wq_entry->flags in woken_wake_function()
429 	 * or woken_wake_function() sees our store to current->state.
430 	 */
431 	set_current_state(mode); /* A */
432 	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
433 		timeout = schedule_timeout(timeout);
434 	__set_current_state(TASK_RUNNING);
435 
436 	/*
437 	 * The below executes an smp_mb(), which matches with the smp_mb() (C)
438 	 * in woken_wake_function() such that either we see the wait condition
439 	 * being true or the store to wq_entry->flags in woken_wake_function()
440 	 * follows ours in the coherence order.
441 	 */
442 	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
443 
444 	return timeout;
445 }
446 EXPORT_SYMBOL(wait_woken);
447 
448 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
449 {
450 	/* Pairs with the smp_store_mb() in wait_woken(). */
451 	smp_mb(); /* C */
452 	wq_entry->flags |= WQ_FLAG_WOKEN;
453 
454 	return default_wake_function(wq_entry, mode, sync, key);
455 }
456 EXPORT_SYMBOL(woken_wake_function);
457