1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/spinlock.h> 3 #include <linux/task_work.h> 4 #include <linux/tracehook.h> 5 6 static struct callback_head work_exited; /* all we need is ->next == NULL */ 7 8 /** 9 * task_work_add - ask the @task to execute @work->func() 10 * @task: the task which should run the callback 11 * @work: the callback to run 12 * @notify: send the notification if true 13 * 14 * Queue @work for task_work_run() below and notify the @task if @notify. 15 * Fails if the @task is exiting/exited and thus it can't process this @work. 16 * Otherwise @work->func() will be called when the @task returns from kernel 17 * mode or exits. 18 * 19 * This is like the signal handler which runs in kernel mode, but it doesn't 20 * try to wake up the @task. 21 * 22 * Note: there is no ordering guarantee on works queued here. 23 * 24 * RETURNS: 25 * 0 if succeeds or -ESRCH. 26 */ 27 int 28 task_work_add(struct task_struct *task, struct callback_head *work, int notify) 29 { 30 struct callback_head *head; 31 unsigned long flags; 32 33 do { 34 head = READ_ONCE(task->task_works); 35 if (unlikely(head == &work_exited)) 36 return -ESRCH; 37 work->next = head; 38 } while (cmpxchg(&task->task_works, head, work) != head); 39 40 switch (notify) { 41 case TWA_RESUME: 42 set_notify_resume(task); 43 break; 44 case TWA_SIGNAL: 45 /* 46 * Only grab the sighand lock if we don't already have some 47 * task_work pending. This pairs with the smp_store_mb() 48 * in get_signal(), see comment there. 49 */ 50 if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) && 51 lock_task_sighand(task, &flags)) { 52 task->jobctl |= JOBCTL_TASK_WORK; 53 signal_wake_up(task, 0); 54 unlock_task_sighand(task, &flags); 55 } 56 break; 57 } 58 59 return 0; 60 } 61 62 /** 63 * task_work_cancel - cancel a pending work added by task_work_add() 64 * @task: the task which should execute the work 65 * @func: identifies the work to remove 66 * 67 * Find the last queued pending work with ->func == @func and remove 68 * it from queue. 69 * 70 * RETURNS: 71 * The found work or NULL if not found. 72 */ 73 struct callback_head * 74 task_work_cancel(struct task_struct *task, task_work_func_t func) 75 { 76 struct callback_head **pprev = &task->task_works; 77 struct callback_head *work; 78 unsigned long flags; 79 80 if (likely(!task->task_works)) 81 return NULL; 82 /* 83 * If cmpxchg() fails we continue without updating pprev. 84 * Either we raced with task_work_add() which added the 85 * new entry before this work, we will find it again. Or 86 * we raced with task_work_run(), *pprev == NULL/exited. 87 */ 88 raw_spin_lock_irqsave(&task->pi_lock, flags); 89 while ((work = READ_ONCE(*pprev))) { 90 if (work->func != func) 91 pprev = &work->next; 92 else if (cmpxchg(pprev, work, work->next) == work) 93 break; 94 } 95 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 96 97 return work; 98 } 99 100 /** 101 * task_work_run - execute the works added by task_work_add() 102 * 103 * Flush the pending works. Should be used by the core kernel code. 104 * Called before the task returns to the user-mode or stops, or when 105 * it exits. In the latter case task_work_add() can no longer add the 106 * new work after task_work_run() returns. 107 */ 108 void task_work_run(void) 109 { 110 struct task_struct *task = current; 111 struct callback_head *work, *head, *next; 112 113 for (;;) { 114 /* 115 * work->func() can do task_work_add(), do not set 116 * work_exited unless the list is empty. 117 */ 118 do { 119 head = NULL; 120 work = READ_ONCE(task->task_works); 121 if (!work) { 122 if (task->flags & PF_EXITING) 123 head = &work_exited; 124 else 125 break; 126 } 127 } while (cmpxchg(&task->task_works, work, head) != work); 128 129 if (!work) 130 break; 131 /* 132 * Synchronize with task_work_cancel(). It can not remove 133 * the first entry == work, cmpxchg(task_works) must fail. 134 * But it can remove another entry from the ->next list. 135 */ 136 raw_spin_lock_irq(&task->pi_lock); 137 raw_spin_unlock_irq(&task->pi_lock); 138 139 do { 140 next = work->next; 141 work->func(work); 142 work = next; 143 cond_resched(); 144 } while (work); 145 } 146 } 147