xref: /openbmc/linux/kernel/task_work.c (revision d10cd7bf574ead01fae140ce117a11bcdacbe6a8)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/irq_work.h>
3 #include <linux/spinlock.h>
4 #include <linux/task_work.h>
5 #include <linux/resume_user_mode.h>
6 
7 static struct callback_head work_exited; /* all we need is ->next == NULL */
8 
9 static void task_work_set_notify_irq(struct irq_work *entry)
10 {
11 	test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
12 }
13 static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
14 	IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
15 
16 /**
17  * task_work_add - ask the @task to execute @work->func()
18  * @task: the task which should run the callback
19  * @work: the callback to run
20  * @notify: how to notify the targeted task
21  *
22  * Queue @work for task_work_run() below and notify the @task if @notify
23  * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
24  *
25  * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
26  * task and run the task_work, regardless of whether the task is currently
27  * running in the kernel or userspace.
28  * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
29  * reschedule IPI to force the targeted task to reschedule and run task_work.
30  * This can be advantageous if there's no strict requirement that the
31  * task_work be run as soon as possible, just whenever the task enters the
32  * kernel anyway.
33  * @TWA_RESUME work is run only when the task exits the kernel and returns to
34  * user mode, or before entering guest mode.
35  * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
36  * current @task and if the current context is NMI.
37  *
38  * Fails if the @task is exiting/exited and thus it can't process this @work.
39  * Otherwise @work->func() will be called when the @task goes through one of
40  * the aforementioned transitions, or exits.
41  *
42  * If the targeted task is exiting, then an error is returned and the work item
43  * is not queued. It's up to the caller to arrange for an alternative mechanism
44  * in that case.
45  *
46  * Note: there is no ordering guarantee on works queued here. The task_work
47  * list is LIFO.
48  *
49  * RETURNS:
50  * 0 if succeeds or -ESRCH.
51  */
52 int task_work_add(struct task_struct *task, struct callback_head *work,
53 		  enum task_work_notify_mode notify)
54 {
55 	struct callback_head *head;
56 	int flags = notify & TWA_FLAGS;
57 
58 	notify &= ~TWA_FLAGS;
59 	if (notify == TWA_NMI_CURRENT) {
60 		if (WARN_ON_ONCE(task != current))
61 			return -EINVAL;
62 	} else {
63 		/*
64 		 * Record the work call stack in order to print it in KASAN
65 		 * reports.
66 		 *
67 		 * Note that stack allocation can fail if TWAF_NO_ALLOC flag
68 		 * is set and new page is needed to expand the stack buffer.
69 		 */
70 		if (flags & TWAF_NO_ALLOC)
71 			kasan_record_aux_stack_noalloc(work);
72 		else
73 			kasan_record_aux_stack(work);
74 	}
75 
76 	head = READ_ONCE(task->task_works);
77 	do {
78 		if (unlikely(head == &work_exited))
79 			return -ESRCH;
80 		work->next = head;
81 	} while (!try_cmpxchg(&task->task_works, &head, work));
82 
83 	switch (notify) {
84 	case TWA_NONE:
85 		break;
86 	case TWA_RESUME:
87 		set_notify_resume(task);
88 		break;
89 	case TWA_SIGNAL:
90 		set_notify_signal(task);
91 		break;
92 	case TWA_SIGNAL_NO_IPI:
93 		__set_notify_signal(task);
94 		break;
95 	case TWA_NMI_CURRENT:
96 		irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
97 		break;
98 	default:
99 		WARN_ON_ONCE(1);
100 		break;
101 	}
102 
103 	return 0;
104 }
105 
106 /**
107  * task_work_cancel_match - cancel a pending work added by task_work_add()
108  * @task: the task which should execute the work
109  * @match: match function to call
110  * @data: data to be passed in to match function
111  *
112  * RETURNS:
113  * The found work or NULL if not found.
114  */
115 struct callback_head *
116 task_work_cancel_match(struct task_struct *task,
117 		       bool (*match)(struct callback_head *, void *data),
118 		       void *data)
119 {
120 	struct callback_head **pprev = &task->task_works;
121 	struct callback_head *work;
122 	unsigned long flags;
123 
124 	if (likely(!task_work_pending(task)))
125 		return NULL;
126 	/*
127 	 * If cmpxchg() fails we continue without updating pprev.
128 	 * Either we raced with task_work_add() which added the
129 	 * new entry before this work, we will find it again. Or
130 	 * we raced with task_work_run(), *pprev == NULL/exited.
131 	 */
132 	raw_spin_lock_irqsave(&task->pi_lock, flags);
133 	work = READ_ONCE(*pprev);
134 	while (work) {
135 		if (!match(work, data)) {
136 			pprev = &work->next;
137 			work = READ_ONCE(*pprev);
138 		} else if (try_cmpxchg(pprev, &work, work->next))
139 			break;
140 	}
141 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
142 
143 	return work;
144 }
145 
146 static bool task_work_func_match(struct callback_head *cb, void *data)
147 {
148 	return cb->func == data;
149 }
150 
151 /**
152  * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
153  * @task: the task which should execute the func's work
154  * @func: identifies the func to match with a work to remove
155  *
156  * Find the last queued pending work with ->func == @func and remove
157  * it from queue.
158  *
159  * RETURNS:
160  * The found work or NULL if not found.
161  */
162 struct callback_head *
163 task_work_cancel_func(struct task_struct *task, task_work_func_t func)
164 {
165 	return task_work_cancel_match(task, task_work_func_match, func);
166 }
167 
168 static bool task_work_match(struct callback_head *cb, void *data)
169 {
170 	return cb == data;
171 }
172 
173 /**
174  * task_work_cancel - cancel a pending work added by task_work_add()
175  * @task: the task which should execute the work
176  * @cb: the callback to remove if queued
177  *
178  * Remove a callback from a task's queue if queued.
179  *
180  * RETURNS:
181  * True if the callback was queued and got cancelled, false otherwise.
182  */
183 bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
184 {
185 	struct callback_head *ret;
186 
187 	ret = task_work_cancel_match(task, task_work_match, cb);
188 
189 	return ret == cb;
190 }
191 
192 /**
193  * task_work_run - execute the works added by task_work_add()
194  *
195  * Flush the pending works. Should be used by the core kernel code.
196  * Called before the task returns to the user-mode or stops, or when
197  * it exits. In the latter case task_work_add() can no longer add the
198  * new work after task_work_run() returns.
199  */
200 void task_work_run(void)
201 {
202 	struct task_struct *task = current;
203 	struct callback_head *work, *head, *next;
204 
205 	for (;;) {
206 		/*
207 		 * work->func() can do task_work_add(), do not set
208 		 * work_exited unless the list is empty.
209 		 */
210 		work = READ_ONCE(task->task_works);
211 		do {
212 			head = NULL;
213 			if (!work) {
214 				if (task->flags & PF_EXITING)
215 					head = &work_exited;
216 				else
217 					break;
218 			}
219 		} while (!try_cmpxchg(&task->task_works, &work, head));
220 
221 		if (!work)
222 			break;
223 		/*
224 		 * Synchronize with task_work_cancel_match(). It can not remove
225 		 * the first entry == work, cmpxchg(task_works) must fail.
226 		 * But it can remove another entry from the ->next list.
227 		 */
228 		raw_spin_lock_irq(&task->pi_lock);
229 		raw_spin_unlock_irq(&task->pi_lock);
230 
231 		do {
232 			next = work->next;
233 			work->func(work);
234 			work = next;
235 			cond_resched();
236 		} while (work);
237 	}
238 }
239