xref: /openbmc/linux/kernel/sched/swait.c (revision 6f63904c)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
297fb7a0aSIngo Molnar /*
397fb7a0aSIngo Molnar  * <linux/swait.h> (simple wait queues ) implementation:
497fb7a0aSIngo Molnar  */
513b35686SPeter Zijlstra (Intel) 
__init_swait_queue_head(struct swait_queue_head * q,const char * name,struct lock_class_key * key)613b35686SPeter Zijlstra (Intel) void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
713b35686SPeter Zijlstra (Intel) 			     struct lock_class_key *key)
813b35686SPeter Zijlstra (Intel) {
913b35686SPeter Zijlstra (Intel) 	raw_spin_lock_init(&q->lock);
1013b35686SPeter Zijlstra (Intel) 	lockdep_set_class_and_name(&q->lock, key, name);
1113b35686SPeter Zijlstra (Intel) 	INIT_LIST_HEAD(&q->task_list);
1213b35686SPeter Zijlstra (Intel) }
1313b35686SPeter Zijlstra (Intel) EXPORT_SYMBOL(__init_swait_queue_head);
1413b35686SPeter Zijlstra (Intel) 
1513b35686SPeter Zijlstra (Intel) /*
1613b35686SPeter Zijlstra (Intel)  * The thing about the wake_up_state() return value; I think we can ignore it.
1713b35686SPeter Zijlstra (Intel)  *
1813b35686SPeter Zijlstra (Intel)  * If for some reason it would return 0, that means the previously waiting
1913b35686SPeter Zijlstra (Intel)  * task is already running, so it will observe condition true (or has already).
2013b35686SPeter Zijlstra (Intel)  */
swake_up_locked(struct swait_queue_head * q,int wake_flags)21*6f63904cSAndrei Vagin void swake_up_locked(struct swait_queue_head *q, int wake_flags)
2213b35686SPeter Zijlstra (Intel) {
2313b35686SPeter Zijlstra (Intel) 	struct swait_queue *curr;
2413b35686SPeter Zijlstra (Intel) 
2513b35686SPeter Zijlstra (Intel) 	if (list_empty(&q->task_list))
2613b35686SPeter Zijlstra (Intel) 		return;
2713b35686SPeter Zijlstra (Intel) 
2813b35686SPeter Zijlstra (Intel) 	curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
29*6f63904cSAndrei Vagin 	try_to_wake_up(curr->task, TASK_NORMAL, wake_flags);
3013b35686SPeter Zijlstra (Intel) 	list_del_init(&curr->task_list);
3113b35686SPeter Zijlstra (Intel) }
3213b35686SPeter Zijlstra (Intel) EXPORT_SYMBOL(swake_up_locked);
3313b35686SPeter Zijlstra (Intel) 
34b3212fe2SThomas Gleixner /*
35b3212fe2SThomas Gleixner  * Wake up all waiters. This is an interface which is solely exposed for
36b3212fe2SThomas Gleixner  * completions and not for general usage.
37b3212fe2SThomas Gleixner  *
38b3212fe2SThomas Gleixner  * It is intentionally different from swake_up_all() to allow usage from
39b3212fe2SThomas Gleixner  * hard interrupt context and interrupt disabled regions.
40b3212fe2SThomas Gleixner  */
swake_up_all_locked(struct swait_queue_head * q)41b3212fe2SThomas Gleixner void swake_up_all_locked(struct swait_queue_head *q)
42b3212fe2SThomas Gleixner {
43b3212fe2SThomas Gleixner 	while (!list_empty(&q->task_list))
44*6f63904cSAndrei Vagin 		swake_up_locked(q, 0);
45b3212fe2SThomas Gleixner }
46b3212fe2SThomas Gleixner 
swake_up_one(struct swait_queue_head * q)47b3dae109SPeter Zijlstra void swake_up_one(struct swait_queue_head *q)
4813b35686SPeter Zijlstra (Intel) {
4913b35686SPeter Zijlstra (Intel) 	unsigned long flags;
5013b35686SPeter Zijlstra (Intel) 
5113b35686SPeter Zijlstra (Intel) 	raw_spin_lock_irqsave(&q->lock, flags);
52*6f63904cSAndrei Vagin 	swake_up_locked(q, 0);
5313b35686SPeter Zijlstra (Intel) 	raw_spin_unlock_irqrestore(&q->lock, flags);
5413b35686SPeter Zijlstra (Intel) }
55b3dae109SPeter Zijlstra EXPORT_SYMBOL(swake_up_one);
5613b35686SPeter Zijlstra (Intel) 
5713b35686SPeter Zijlstra (Intel) /*
5813b35686SPeter Zijlstra (Intel)  * Does not allow usage from IRQ disabled, since we must be able to
5913b35686SPeter Zijlstra (Intel)  * release IRQs to guarantee bounded hold time.
6013b35686SPeter Zijlstra (Intel)  */
swake_up_all(struct swait_queue_head * q)6113b35686SPeter Zijlstra (Intel) void swake_up_all(struct swait_queue_head *q)
6213b35686SPeter Zijlstra (Intel) {
6313b35686SPeter Zijlstra (Intel) 	struct swait_queue *curr;
6413b35686SPeter Zijlstra (Intel) 	LIST_HEAD(tmp);
6513b35686SPeter Zijlstra (Intel) 
6613b35686SPeter Zijlstra (Intel) 	raw_spin_lock_irq(&q->lock);
6713b35686SPeter Zijlstra (Intel) 	list_splice_init(&q->task_list, &tmp);
6813b35686SPeter Zijlstra (Intel) 	while (!list_empty(&tmp)) {
6913b35686SPeter Zijlstra (Intel) 		curr = list_first_entry(&tmp, typeof(*curr), task_list);
7013b35686SPeter Zijlstra (Intel) 
7113b35686SPeter Zijlstra (Intel) 		wake_up_state(curr->task, TASK_NORMAL);
7213b35686SPeter Zijlstra (Intel) 		list_del_init(&curr->task_list);
7313b35686SPeter Zijlstra (Intel) 
7413b35686SPeter Zijlstra (Intel) 		if (list_empty(&tmp))
7513b35686SPeter Zijlstra (Intel) 			break;
7613b35686SPeter Zijlstra (Intel) 
7713b35686SPeter Zijlstra (Intel) 		raw_spin_unlock_irq(&q->lock);
7813b35686SPeter Zijlstra (Intel) 		raw_spin_lock_irq(&q->lock);
7913b35686SPeter Zijlstra (Intel) 	}
8013b35686SPeter Zijlstra (Intel) 	raw_spin_unlock_irq(&q->lock);
8113b35686SPeter Zijlstra (Intel) }
8213b35686SPeter Zijlstra (Intel) EXPORT_SYMBOL(swake_up_all);
8313b35686SPeter Zijlstra (Intel) 
__prepare_to_swait(struct swait_queue_head * q,struct swait_queue * wait)84b3212fe2SThomas Gleixner void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
8513b35686SPeter Zijlstra (Intel) {
8613b35686SPeter Zijlstra (Intel) 	wait->task = current;
8713b35686SPeter Zijlstra (Intel) 	if (list_empty(&wait->task_list))
880abf17bcSPeter Zijlstra 		list_add_tail(&wait->task_list, &q->task_list);
8913b35686SPeter Zijlstra (Intel) }
9013b35686SPeter Zijlstra (Intel) 
prepare_to_swait_exclusive(struct swait_queue_head * q,struct swait_queue * wait,int state)91b3dae109SPeter Zijlstra void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
9213b35686SPeter Zijlstra (Intel) {
9313b35686SPeter Zijlstra (Intel) 	unsigned long flags;
9413b35686SPeter Zijlstra (Intel) 
9513b35686SPeter Zijlstra (Intel) 	raw_spin_lock_irqsave(&q->lock, flags);
9613b35686SPeter Zijlstra (Intel) 	__prepare_to_swait(q, wait);
9713b35686SPeter Zijlstra (Intel) 	set_current_state(state);
9813b35686SPeter Zijlstra (Intel) 	raw_spin_unlock_irqrestore(&q->lock, flags);
9913b35686SPeter Zijlstra (Intel) }
100b3dae109SPeter Zijlstra EXPORT_SYMBOL(prepare_to_swait_exclusive);
10113b35686SPeter Zijlstra (Intel) 
prepare_to_swait_event(struct swait_queue_head * q,struct swait_queue * wait,int state)10213b35686SPeter Zijlstra (Intel) long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
10313b35686SPeter Zijlstra (Intel) {
1040abf17bcSPeter Zijlstra 	unsigned long flags;
1050abf17bcSPeter Zijlstra 	long ret = 0;
10613b35686SPeter Zijlstra (Intel) 
1070abf17bcSPeter Zijlstra 	raw_spin_lock_irqsave(&q->lock, flags);
10834ec35adSDavidlohr Bueso 	if (signal_pending_state(state, current)) {
1090abf17bcSPeter Zijlstra 		/*
110b3dae109SPeter Zijlstra 		 * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
1110abf17bcSPeter Zijlstra 		 * must not see us.
1120abf17bcSPeter Zijlstra 		 */
1130abf17bcSPeter Zijlstra 		list_del_init(&wait->task_list);
1140abf17bcSPeter Zijlstra 		ret = -ERESTARTSYS;
1150abf17bcSPeter Zijlstra 	} else {
1160abf17bcSPeter Zijlstra 		__prepare_to_swait(q, wait);
1170abf17bcSPeter Zijlstra 		set_current_state(state);
1180abf17bcSPeter Zijlstra 	}
1190abf17bcSPeter Zijlstra 	raw_spin_unlock_irqrestore(&q->lock, flags);
12013b35686SPeter Zijlstra (Intel) 
1210abf17bcSPeter Zijlstra 	return ret;
12213b35686SPeter Zijlstra (Intel) }
12313b35686SPeter Zijlstra (Intel) EXPORT_SYMBOL(prepare_to_swait_event);
12413b35686SPeter Zijlstra (Intel) 
__finish_swait(struct swait_queue_head * q,struct swait_queue * wait)12513b35686SPeter Zijlstra (Intel) void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
12613b35686SPeter Zijlstra (Intel) {
12713b35686SPeter Zijlstra (Intel) 	__set_current_state(TASK_RUNNING);
12813b35686SPeter Zijlstra (Intel) 	if (!list_empty(&wait->task_list))
12913b35686SPeter Zijlstra (Intel) 		list_del_init(&wait->task_list);
13013b35686SPeter Zijlstra (Intel) }
13113b35686SPeter Zijlstra (Intel) 
finish_swait(struct swait_queue_head * q,struct swait_queue * wait)13213b35686SPeter Zijlstra (Intel) void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
13313b35686SPeter Zijlstra (Intel) {
13413b35686SPeter Zijlstra (Intel) 	unsigned long flags;
13513b35686SPeter Zijlstra (Intel) 
13613b35686SPeter Zijlstra (Intel) 	__set_current_state(TASK_RUNNING);
13713b35686SPeter Zijlstra (Intel) 
13813b35686SPeter Zijlstra (Intel) 	if (!list_empty_careful(&wait->task_list)) {
13913b35686SPeter Zijlstra (Intel) 		raw_spin_lock_irqsave(&q->lock, flags);
14013b35686SPeter Zijlstra (Intel) 		list_del_init(&wait->task_list);
14113b35686SPeter Zijlstra (Intel) 		raw_spin_unlock_irqrestore(&q->lock, flags);
14213b35686SPeter Zijlstra (Intel) 	}
14313b35686SPeter Zijlstra (Intel) }
14413b35686SPeter Zijlstra (Intel) EXPORT_SYMBOL(finish_swait);
145