1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/spinlock.h> 3 #include <linux/task_work.h> 4 #include <linux/tracehook.h> 5 6 static struct callback_head work_exited; /* all we need is ->next == NULL */ 7 8 /* 9 * TWA_SIGNAL signaling - use TIF_NOTIFY_SIGNAL, if available, as it's faster 10 * than TIF_SIGPENDING as there's no dependency on ->sighand. The latter is 11 * shared for threads, and can cause contention on sighand->lock. Even for 12 * the non-threaded case TIF_NOTIFY_SIGNAL is more efficient, as no locking 13 * or IRQ disabling is involved for notification (or running) purposes. 14 */ 15 static void task_work_notify_signal(struct task_struct *task) 16 { 17 #if defined(TIF_NOTIFY_SIGNAL) 18 set_notify_signal(task); 19 #else 20 unsigned long flags; 21 22 /* 23 * Only grab the sighand lock if we don't already have some 24 * task_work pending. This pairs with the smp_store_mb() 25 * in get_signal(), see comment there. 26 */ 27 if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) && 28 lock_task_sighand(task, &flags)) { 29 task->jobctl |= JOBCTL_TASK_WORK; 30 signal_wake_up(task, 0); 31 unlock_task_sighand(task, &flags); 32 } 33 #endif 34 } 35 36 /** 37 * task_work_add - ask the @task to execute @work->func() 38 * @task: the task which should run the callback 39 * @work: the callback to run 40 * @notify: how to notify the targeted task 41 * 42 * Queue @work for task_work_run() below and notify the @task if @notify 43 * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the 44 * it will interrupt the targeted task and run the task_work. @TWA_RESUME 45 * work is run only when the task exits the kernel and returns to user mode, 46 * or before entering guest mode. Fails if the @task is exiting/exited and thus 47 * it can't process this @work. Otherwise @work->func() will be called when the 48 * @task goes through one of the aforementioned transitions, or exits. 49 * 50 * If the targeted task is exiting, then an error is returned and the work item 51 * is not queued. It's up to the caller to arrange for an alternative mechanism 52 * in that case. 53 * 54 * Note: there is no ordering guarantee on works queued here. The task_work 55 * list is LIFO. 56 * 57 * RETURNS: 58 * 0 if succeeds or -ESRCH. 59 */ 60 int task_work_add(struct task_struct *task, struct callback_head *work, 61 enum task_work_notify_mode notify) 62 { 63 struct callback_head *head; 64 65 do { 66 head = READ_ONCE(task->task_works); 67 if (unlikely(head == &work_exited)) 68 return -ESRCH; 69 work->next = head; 70 } while (cmpxchg(&task->task_works, head, work) != head); 71 72 switch (notify) { 73 case TWA_NONE: 74 break; 75 case TWA_RESUME: 76 set_notify_resume(task); 77 break; 78 case TWA_SIGNAL: 79 task_work_notify_signal(task); 80 break; 81 default: 82 WARN_ON_ONCE(1); 83 break; 84 } 85 86 return 0; 87 } 88 89 /** 90 * task_work_cancel - cancel a pending work added by task_work_add() 91 * @task: the task which should execute the work 92 * @func: identifies the work to remove 93 * 94 * Find the last queued pending work with ->func == @func and remove 95 * it from queue. 96 * 97 * RETURNS: 98 * The found work or NULL if not found. 99 */ 100 struct callback_head * 101 task_work_cancel(struct task_struct *task, task_work_func_t func) 102 { 103 struct callback_head **pprev = &task->task_works; 104 struct callback_head *work; 105 unsigned long flags; 106 107 if (likely(!task->task_works)) 108 return NULL; 109 /* 110 * If cmpxchg() fails we continue without updating pprev. 111 * Either we raced with task_work_add() which added the 112 * new entry before this work, we will find it again. Or 113 * we raced with task_work_run(), *pprev == NULL/exited. 114 */ 115 raw_spin_lock_irqsave(&task->pi_lock, flags); 116 while ((work = READ_ONCE(*pprev))) { 117 if (work->func != func) 118 pprev = &work->next; 119 else if (cmpxchg(pprev, work, work->next) == work) 120 break; 121 } 122 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 123 124 return work; 125 } 126 127 /** 128 * task_work_run - execute the works added by task_work_add() 129 * 130 * Flush the pending works. Should be used by the core kernel code. 131 * Called before the task returns to the user-mode or stops, or when 132 * it exits. In the latter case task_work_add() can no longer add the 133 * new work after task_work_run() returns. 134 */ 135 void task_work_run(void) 136 { 137 struct task_struct *task = current; 138 struct callback_head *work, *head, *next; 139 140 for (;;) { 141 /* 142 * work->func() can do task_work_add(), do not set 143 * work_exited unless the list is empty. 144 */ 145 do { 146 head = NULL; 147 work = READ_ONCE(task->task_works); 148 if (!work) { 149 if (task->flags & PF_EXITING) 150 head = &work_exited; 151 else 152 break; 153 } 154 } while (cmpxchg(&task->task_works, work, head) != work); 155 156 if (!work) 157 break; 158 /* 159 * Synchronize with task_work_cancel(). It can not remove 160 * the first entry == work, cmpxchg(task_works) must fail. 161 * But it can remove another entry from the ->next list. 162 */ 163 raw_spin_lock_irq(&task->pi_lock); 164 raw_spin_unlock_irq(&task->pi_lock); 165 166 do { 167 next = work->next; 168 work->func(work); 169 work = next; 170 cond_resched(); 171 } while (work); 172 } 173 } 174