1 #include <linux/sched/signal.h> 2 #include <linux/swait.h> 3 4 void __init_swait_queue_head(struct swait_queue_head *q, const char *name, 5 struct lock_class_key *key) 6 { 7 raw_spin_lock_init(&q->lock); 8 lockdep_set_class_and_name(&q->lock, key, name); 9 INIT_LIST_HEAD(&q->task_list); 10 } 11 EXPORT_SYMBOL(__init_swait_queue_head); 12 13 /* 14 * The thing about the wake_up_state() return value; I think we can ignore it. 15 * 16 * If for some reason it would return 0, that means the previously waiting 17 * task is already running, so it will observe condition true (or has already). 18 */ 19 void swake_up_locked(struct swait_queue_head *q) 20 { 21 struct swait_queue *curr; 22 23 if (list_empty(&q->task_list)) 24 return; 25 26 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); 27 wake_up_process(curr->task); 28 list_del_init(&curr->task_list); 29 } 30 EXPORT_SYMBOL(swake_up_locked); 31 32 void swake_up(struct swait_queue_head *q) 33 { 34 unsigned long flags; 35 36 raw_spin_lock_irqsave(&q->lock, flags); 37 swake_up_locked(q); 38 raw_spin_unlock_irqrestore(&q->lock, flags); 39 } 40 EXPORT_SYMBOL(swake_up); 41 42 /* 43 * Does not allow usage from IRQ disabled, since we must be able to 44 * release IRQs to guarantee bounded hold time. 45 */ 46 void swake_up_all(struct swait_queue_head *q) 47 { 48 struct swait_queue *curr; 49 LIST_HEAD(tmp); 50 51 raw_spin_lock_irq(&q->lock); 52 list_splice_init(&q->task_list, &tmp); 53 while (!list_empty(&tmp)) { 54 curr = list_first_entry(&tmp, typeof(*curr), task_list); 55 56 wake_up_state(curr->task, TASK_NORMAL); 57 list_del_init(&curr->task_list); 58 59 if (list_empty(&tmp)) 60 break; 61 62 raw_spin_unlock_irq(&q->lock); 63 raw_spin_lock_irq(&q->lock); 64 } 65 raw_spin_unlock_irq(&q->lock); 66 } 67 EXPORT_SYMBOL(swake_up_all); 68 69 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) 70 { 71 wait->task = current; 72 if (list_empty(&wait->task_list)) 73 list_add(&wait->task_list, &q->task_list); 74 } 75 76 void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) 77 { 78 unsigned long flags; 79 80 raw_spin_lock_irqsave(&q->lock, flags); 81 __prepare_to_swait(q, wait); 82 set_current_state(state); 83 raw_spin_unlock_irqrestore(&q->lock, flags); 84 } 85 EXPORT_SYMBOL(prepare_to_swait); 86 87 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) 88 { 89 if (signal_pending_state(state, current)) 90 return -ERESTARTSYS; 91 92 prepare_to_swait(q, wait, state); 93 94 return 0; 95 } 96 EXPORT_SYMBOL(prepare_to_swait_event); 97 98 void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 99 { 100 __set_current_state(TASK_RUNNING); 101 if (!list_empty(&wait->task_list)) 102 list_del_init(&wait->task_list); 103 } 104 105 void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) 106 { 107 unsigned long flags; 108 109 __set_current_state(TASK_RUNNING); 110 111 if (!list_empty_careful(&wait->task_list)) { 112 raw_spin_lock_irqsave(&q->lock, flags); 113 list_del_init(&wait->task_list); 114 raw_spin_unlock_irqrestore(&q->lock, flags); 115 } 116 } 117 EXPORT_SYMBOL(finish_swait); 118