1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * <linux/swait.h> (simple wait queues ) implementation: 4 */ 5 #include "sched.h" 6 7 void __init_swait_queue_head(struct swait_queue_head *q, const char *name, 8 struct lock_class_key *key) 9 { 10 raw_spin_lock_init(&q->lock); 11 lockdep_set_class_and_name(&q->lock, key, name); 12 INIT_LIST_HEAD(&q->task_list); 13 } 14 EXPORT_SYMBOL(__init_swait_queue_head); 15 16 /* 17 * The thing about the wake_up_state() return value; I think we can ignore it. 18 * 19 * If for some reason it would return 0, that means the previously waiting 20 * task is already running, so it will observe condition true (or has already). 21 */ 22 void swake_up_locked(struct swait_queue_head *q) 23 { 24 struct swait_queue *curr; 25 26 if (list_empty(&q->task_list)) 27 return; 28 29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); 30 wake_up_process(curr->task); 31 list_del_init(&curr->task_list); 32 } 33 EXPORT_SYMBOL(swake_up_locked); 34 35 void swake_up(struct swait_queue_head *q) 36 { 37 unsigned long flags; 38 39 raw_spin_lock_irqsave(&q->lock, flags); 40 swake_up_locked(q); 41 raw_spin_unlock_irqrestore(&q->lock, flags); 42 } 43 EXPORT_SYMBOL(swake_up); 44 45 /* 46 * Does not allow usage from IRQ disabled, since we must be able to 47 * release IRQs to guarantee bounded hold time. 48 */ 49 void swake_up_all(struct swait_queue_head *q) 50 { 51 struct swait_queue *curr; 52 LIST_HEAD(tmp); 53 54 raw_spin_lock_irq(&q->lock); 55 list_splice_init(&q->task_list, &tmp); 56 while (!list_empty(&tmp)) { 57 curr = list_first_entry(&tmp, typeof(*curr), task_list); 58 59 wake_up_state(curr->task, TASK_NORMAL); 60 list_del_init(&curr->task_list); 61 62 if (list_empty(&tmp)) 63 break; 64 65 raw_spin_unlock_irq(&q->lock); 66 raw_spin_lock_irq(&q->lock); 67 } 68 raw_spin_unlock_irq(&q->lock); 69 } 70 EXPORT_SYMBOL(swake_up_all); 71 72 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) 73 { 74 wait->task = current; 75 if (list_empty(&wait->task_list)) 76 list_add(&wait->task_list, &q->task_list); 77 } 78 79 void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) 80 { 81 unsigned long flags; 82 83 raw_spin_lock_irqsave(&q->lock, flags); 84 __prepare_to_swait(q, wait); 85 set_current_state(state); 86 raw_spin_unlock_irqrestore(&q->lock, flags); 87 } 88 EXPORT_SYMBOL(prepare_to_swait); 89 90 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) 91 { 92 if (signal_pending_state(state, current)) 93 return -ERESTARTSYS; 94 95 prepare_to_swait(q, wait, state); 96 97 return 0; 98 } 99 EXPORT_SYMBOL(prepare_to_swait_event); 100 101 void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 102 { 103 __set_current_state(TASK_RUNNING); 104 if (!list_empty(&wait->task_list)) 105 list_del_init(&wait->task_list); 106 } 107 108 void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 109 { 110 unsigned long flags; 111 112 __set_current_state(TASK_RUNNING); 113 114 if (!list_empty_careful(&wait->task_list)) { 115 raw_spin_lock_irqsave(&q->lock, flags); 116 list_del_init(&wait->task_list); 117 raw_spin_unlock_irqrestore(&q->lock, flags); 118 } 119 } 120 EXPORT_SYMBOL(finish_swait); 121