1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * <linux/swait.h> (simple wait queues ) implementation: 4 */ 5 #include "sched.h" 6 7 void __init_swait_queue_head(struct swait_queue_head *q, const char *name, 8 struct lock_class_key *key) 9 { 10 raw_spin_lock_init(&q->lock); 11 lockdep_set_class_and_name(&q->lock, key, name); 12 INIT_LIST_HEAD(&q->task_list); 13 } 14 EXPORT_SYMBOL(__init_swait_queue_head); 15 16 /* 17 * The thing about the wake_up_state() return value; I think we can ignore it. 18 * 19 * If for some reason it would return 0, that means the previously waiting 20 * task is already running, so it will observe condition true (or has already). 21 */ 22 void swake_up_locked(struct swait_queue_head *q) 23 { 24 struct swait_queue *curr; 25 26 if (list_empty(&q->task_list)) 27 return; 28 29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); 30 wake_up_process(curr->task); 31 list_del_init(&curr->task_list); 32 } 33 EXPORT_SYMBOL(swake_up_locked); 34 35 /* 36 * Wake up all waiters. This is an interface which is solely exposed for 37 * completions and not for general usage. 38 * 39 * It is intentionally different from swake_up_all() to allow usage from 40 * hard interrupt context and interrupt disabled regions. 41 */ 42 void swake_up_all_locked(struct swait_queue_head *q) 43 { 44 while (!list_empty(&q->task_list)) 45 swake_up_locked(q); 46 } 47 48 void swake_up_one(struct swait_queue_head *q) 49 { 50 unsigned long flags; 51 52 raw_spin_lock_irqsave(&q->lock, flags); 53 swake_up_locked(q); 54 raw_spin_unlock_irqrestore(&q->lock, flags); 55 } 56 EXPORT_SYMBOL(swake_up_one); 57 58 /* 59 * Does not allow usage from IRQ disabled, since we must be able to 60 * release IRQs to guarantee bounded hold time. 61 */ 62 void swake_up_all(struct swait_queue_head *q) 63 { 64 struct swait_queue *curr; 65 LIST_HEAD(tmp); 66 67 raw_spin_lock_irq(&q->lock); 68 list_splice_init(&q->task_list, &tmp); 69 while (!list_empty(&tmp)) { 70 curr = list_first_entry(&tmp, typeof(*curr), task_list); 71 72 wake_up_state(curr->task, TASK_NORMAL); 73 list_del_init(&curr->task_list); 74 75 if (list_empty(&tmp)) 76 break; 77 78 raw_spin_unlock_irq(&q->lock); 79 raw_spin_lock_irq(&q->lock); 80 } 81 raw_spin_unlock_irq(&q->lock); 82 } 83 EXPORT_SYMBOL(swake_up_all); 84 85 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) 86 { 87 wait->task = current; 88 if (list_empty(&wait->task_list)) 89 list_add_tail(&wait->task_list, &q->task_list); 90 } 91 92 void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) 93 { 94 unsigned long flags; 95 96 raw_spin_lock_irqsave(&q->lock, flags); 97 __prepare_to_swait(q, wait); 98 set_current_state(state); 99 raw_spin_unlock_irqrestore(&q->lock, flags); 100 } 101 EXPORT_SYMBOL(prepare_to_swait_exclusive); 102 103 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) 104 { 105 unsigned long flags; 106 long ret = 0; 107 108 raw_spin_lock_irqsave(&q->lock, flags); 109 if (signal_pending_state(state, current)) { 110 /* 111 * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one() 112 * must not see us. 113 */ 114 list_del_init(&wait->task_list); 115 ret = -ERESTARTSYS; 116 } else { 117 __prepare_to_swait(q, wait); 118 set_current_state(state); 119 } 120 raw_spin_unlock_irqrestore(&q->lock, flags); 121 122 return ret; 123 } 124 EXPORT_SYMBOL(prepare_to_swait_event); 125 126 void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 127 { 128 __set_current_state(TASK_RUNNING); 129 if (!list_empty(&wait->task_list)) 130 list_del_init(&wait->task_list); 131 } 132 133 void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 134 { 135 unsigned long flags; 136 137 __set_current_state(TASK_RUNNING); 138 139 if (!list_empty_careful(&wait->task_list)) { 140 raw_spin_lock_irqsave(&q->lock, flags); 141 list_del_init(&wait->task_list); 142 raw_spin_unlock_irqrestore(&q->lock, flags); 143 } 144 } 145 EXPORT_SYMBOL(finish_swait); 146