xref: /openbmc/linux/kernel/task_work.c (revision 474be445555ba8f2e776b4b6458c310bc215f76b)
1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/spinlock.h>
3  #include <linux/task_work.h>
4  #include <linux/resume_user_mode.h>
5  
6  static struct callback_head work_exited; /* all we need is ->next == NULL */
7  
8  /**
9   * task_work_add - ask the @task to execute @work->func()
10   * @task: the task which should run the callback
11   * @work: the callback to run
12   * @notify: how to notify the targeted task
13   *
14   * Queue @work for task_work_run() below and notify the @task if @notify
15   * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
16   *
17   * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
18   * task and run the task_work, regardless of whether the task is currently
19   * running in the kernel or userspace.
20   * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
21   * reschedule IPI to force the targeted task to reschedule and run task_work.
22   * This can be advantageous if there's no strict requirement that the
23   * task_work be run as soon as possible, just whenever the task enters the
24   * kernel anyway.
25   * @TWA_RESUME work is run only when the task exits the kernel and returns to
26   * user mode, or before entering guest mode.
27   *
28   * Fails if the @task is exiting/exited and thus it can't process this @work.
29   * Otherwise @work->func() will be called when the @task goes through one of
30   * the aforementioned transitions, or exits.
31   *
32   * If the targeted task is exiting, then an error is returned and the work item
33   * is not queued. It's up to the caller to arrange for an alternative mechanism
34   * in that case.
35   *
36   * Note: there is no ordering guarantee on works queued here. The task_work
37   * list is LIFO.
38   *
39   * RETURNS:
40   * 0 if succeeds or -ESRCH.
41   */
42  int task_work_add(struct task_struct *task, struct callback_head *work,
43  		  enum task_work_notify_mode notify)
44  {
45  	struct callback_head *head;
46  
47  	/* record the work call stack in order to print it in KASAN reports */
48  	kasan_record_aux_stack(work);
49  
50  	head = READ_ONCE(task->task_works);
51  	do {
52  		if (unlikely(head == &work_exited))
53  			return -ESRCH;
54  		work->next = head;
55  	} while (!try_cmpxchg(&task->task_works, &head, work));
56  
57  	switch (notify) {
58  	case TWA_NONE:
59  		break;
60  	case TWA_RESUME:
61  		set_notify_resume(task);
62  		break;
63  	case TWA_SIGNAL:
64  		set_notify_signal(task);
65  		break;
66  	case TWA_SIGNAL_NO_IPI:
67  		__set_notify_signal(task);
68  		break;
69  	default:
70  		WARN_ON_ONCE(1);
71  		break;
72  	}
73  
74  	return 0;
75  }
76  
77  /**
78   * task_work_cancel_match - cancel a pending work added by task_work_add()
79   * @task: the task which should execute the work
80   * @match: match function to call
81   *
82   * RETURNS:
83   * The found work or NULL if not found.
84   */
85  struct callback_head *
86  task_work_cancel_match(struct task_struct *task,
87  		       bool (*match)(struct callback_head *, void *data),
88  		       void *data)
89  {
90  	struct callback_head **pprev = &task->task_works;
91  	struct callback_head *work;
92  	unsigned long flags;
93  
94  	if (likely(!task_work_pending(task)))
95  		return NULL;
96  	/*
97  	 * If cmpxchg() fails we continue without updating pprev.
98  	 * Either we raced with task_work_add() which added the
99  	 * new entry before this work, we will find it again. Or
100  	 * we raced with task_work_run(), *pprev == NULL/exited.
101  	 */
102  	raw_spin_lock_irqsave(&task->pi_lock, flags);
103  	work = READ_ONCE(*pprev);
104  	while (work) {
105  		if (!match(work, data)) {
106  			pprev = &work->next;
107  			work = READ_ONCE(*pprev);
108  		} else if (try_cmpxchg(pprev, &work, work->next))
109  			break;
110  	}
111  	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
112  
113  	return work;
114  }
115  
116  static bool task_work_func_match(struct callback_head *cb, void *data)
117  {
118  	return cb->func == data;
119  }
120  
121  /**
122   * task_work_cancel - cancel a pending work added by task_work_add()
123   * @task: the task which should execute the work
124   * @func: identifies the work to remove
125   *
126   * Find the last queued pending work with ->func == @func and remove
127   * it from queue.
128   *
129   * RETURNS:
130   * The found work or NULL if not found.
131   */
132  struct callback_head *
133  task_work_cancel(struct task_struct *task, task_work_func_t func)
134  {
135  	return task_work_cancel_match(task, task_work_func_match, func);
136  }
137  
138  /**
139   * task_work_run - execute the works added by task_work_add()
140   *
141   * Flush the pending works. Should be used by the core kernel code.
142   * Called before the task returns to the user-mode or stops, or when
143   * it exits. In the latter case task_work_add() can no longer add the
144   * new work after task_work_run() returns.
145   */
146  void task_work_run(void)
147  {
148  	struct task_struct *task = current;
149  	struct callback_head *work, *head, *next;
150  
151  	for (;;) {
152  		/*
153  		 * work->func() can do task_work_add(), do not set
154  		 * work_exited unless the list is empty.
155  		 */
156  		work = READ_ONCE(task->task_works);
157  		do {
158  			head = NULL;
159  			if (!work) {
160  				if (task->flags & PF_EXITING)
161  					head = &work_exited;
162  				else
163  					break;
164  			}
165  		} while (!try_cmpxchg(&task->task_works, &work, head));
166  
167  		if (!work)
168  			break;
169  		/*
170  		 * Synchronize with task_work_cancel(). It can not remove
171  		 * the first entry == work, cmpxchg(task_works) must fail.
172  		 * But it can remove another entry from the ->next list.
173  		 */
174  		raw_spin_lock_irq(&task->pi_lock);
175  		raw_spin_unlock_irq(&task->pi_lock);
176  
177  		do {
178  			next = work->next;
179  			work->func(work);
180  			work = next;
181  			cond_resched();
182  		} while (work);
183  	}
184  }
185