1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * <linux/swait.h> (simple wait queues ) implementation: 4 */ 5 #include "sched.h" 6 7 void __init_swait_queue_head(struct swait_queue_head *q, const char *name, 8 struct lock_class_key *key) 9 { 10 raw_spin_lock_init(&q->lock); 11 lockdep_set_class_and_name(&q->lock, key, name); 12 INIT_LIST_HEAD(&q->task_list); 13 } 14 EXPORT_SYMBOL(__init_swait_queue_head); 15 16 /* 17 * The thing about the wake_up_state() return value; I think we can ignore it. 18 * 19 * If for some reason it would return 0, that means the previously waiting 20 * task is already running, so it will observe condition true (or has already). 21 */ 22 void swake_up_locked(struct swait_queue_head *q) 23 { 24 struct swait_queue *curr; 25 26 if (list_empty(&q->task_list)) 27 return; 28 29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); 30 wake_up_process(curr->task); 31 list_del_init(&curr->task_list); 32 } 33 EXPORT_SYMBOL(swake_up_locked); 34 35 void swake_up_one(struct swait_queue_head *q) 36 { 37 unsigned long flags; 38 39 raw_spin_lock_irqsave(&q->lock, flags); 40 swake_up_locked(q); 41 raw_spin_unlock_irqrestore(&q->lock, flags); 42 } 43 EXPORT_SYMBOL(swake_up_one); 44 45 /* 46 * Does not allow usage from IRQ disabled, since we must be able to 47 * release IRQs to guarantee bounded hold time. 48 */ 49 void swake_up_all(struct swait_queue_head *q) 50 { 51 struct swait_queue *curr; 52 LIST_HEAD(tmp); 53 54 raw_spin_lock_irq(&q->lock); 55 list_splice_init(&q->task_list, &tmp); 56 while (!list_empty(&tmp)) { 57 curr = list_first_entry(&tmp, typeof(*curr), task_list); 58 59 wake_up_state(curr->task, TASK_NORMAL); 60 list_del_init(&curr->task_list); 61 62 if (list_empty(&tmp)) 63 break; 64 65 raw_spin_unlock_irq(&q->lock); 66 raw_spin_lock_irq(&q->lock); 67 } 68 raw_spin_unlock_irq(&q->lock); 69 } 70 EXPORT_SYMBOL(swake_up_all); 71 72 static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) 73 { 74 wait->task = current; 75 if (list_empty(&wait->task_list)) 76 list_add_tail(&wait->task_list, &q->task_list); 77 } 78 79 void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) 80 { 81 unsigned long flags; 82 83 raw_spin_lock_irqsave(&q->lock, flags); 84 __prepare_to_swait(q, wait); 85 set_current_state(state); 86 raw_spin_unlock_irqrestore(&q->lock, flags); 87 } 88 EXPORT_SYMBOL(prepare_to_swait_exclusive); 89 90 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) 91 { 92 unsigned long flags; 93 long ret = 0; 94 95 raw_spin_lock_irqsave(&q->lock, flags); 96 if (unlikely(signal_pending_state(state, current))) { 97 /* 98 * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one() 99 * must not see us. 100 */ 101 list_del_init(&wait->task_list); 102 ret = -ERESTARTSYS; 103 } else { 104 __prepare_to_swait(q, wait); 105 set_current_state(state); 106 } 107 raw_spin_unlock_irqrestore(&q->lock, flags); 108 109 return ret; 110 } 111 EXPORT_SYMBOL(prepare_to_swait_event); 112 113 void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 114 { 115 __set_current_state(TASK_RUNNING); 116 if (!list_empty(&wait->task_list)) 117 list_del_init(&wait->task_list); 118 } 119 120 void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 121 { 122 unsigned long flags; 123 124 __set_current_state(TASK_RUNNING); 125 126 if (!list_empty_careful(&wait->task_list)) { 127 raw_spin_lock_irqsave(&q->lock, flags); 128 list_del_init(&wait->task_list); 129 raw_spin_unlock_irqrestore(&q->lock, flags); 130 } 131 } 132 EXPORT_SYMBOL(finish_swait); 133