1e73f8959SOleg Nesterov #include <linux/spinlock.h> 2e73f8959SOleg Nesterov #include <linux/task_work.h> 3e73f8959SOleg Nesterov #include <linux/tracehook.h> 4e73f8959SOleg Nesterov 5e73f8959SOleg Nesterov int 6e73f8959SOleg Nesterov task_work_add(struct task_struct *task, struct task_work *twork, bool notify) 7e73f8959SOleg Nesterov { 8e73f8959SOleg Nesterov unsigned long flags; 9e73f8959SOleg Nesterov int err = -ESRCH; 10e73f8959SOleg Nesterov 11e73f8959SOleg Nesterov #ifndef TIF_NOTIFY_RESUME 12e73f8959SOleg Nesterov if (notify) 13e73f8959SOleg Nesterov return -ENOTSUPP; 14e73f8959SOleg Nesterov #endif 15e73f8959SOleg Nesterov /* 16e73f8959SOleg Nesterov * We must not insert the new work if the task has already passed 17e73f8959SOleg Nesterov * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait() 18e73f8959SOleg Nesterov * and check PF_EXITING under pi_lock. 19e73f8959SOleg Nesterov */ 20e73f8959SOleg Nesterov raw_spin_lock_irqsave(&task->pi_lock, flags); 21e73f8959SOleg Nesterov if (likely(!(task->flags & PF_EXITING))) { 22e73f8959SOleg Nesterov hlist_add_head(&twork->hlist, &task->task_works); 23e73f8959SOleg Nesterov err = 0; 24e73f8959SOleg Nesterov } 25e73f8959SOleg Nesterov raw_spin_unlock_irqrestore(&task->pi_lock, flags); 26e73f8959SOleg Nesterov 27e73f8959SOleg Nesterov /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ 28e73f8959SOleg Nesterov if (likely(!err) && notify) 29e73f8959SOleg Nesterov set_notify_resume(task); 30e73f8959SOleg Nesterov return err; 31e73f8959SOleg Nesterov } 32e73f8959SOleg Nesterov 33e73f8959SOleg Nesterov struct task_work * 34e73f8959SOleg Nesterov task_work_cancel(struct task_struct *task, task_work_func_t func) 35e73f8959SOleg Nesterov { 36e73f8959SOleg Nesterov unsigned long flags; 37e73f8959SOleg Nesterov struct task_work *twork; 38e73f8959SOleg Nesterov struct hlist_node *pos; 39e73f8959SOleg Nesterov 40e73f8959SOleg Nesterov raw_spin_lock_irqsave(&task->pi_lock, flags); 41e73f8959SOleg Nesterov hlist_for_each_entry(twork, pos, &task->task_works, hlist) { 42e73f8959SOleg Nesterov if (twork->func == func) { 43e73f8959SOleg Nesterov hlist_del(&twork->hlist); 44e73f8959SOleg Nesterov goto found; 45e73f8959SOleg Nesterov } 46e73f8959SOleg Nesterov } 47e73f8959SOleg Nesterov twork = NULL; 48e73f8959SOleg Nesterov found: 49e73f8959SOleg Nesterov raw_spin_unlock_irqrestore(&task->pi_lock, flags); 50e73f8959SOleg Nesterov 51e73f8959SOleg Nesterov return twork; 52e73f8959SOleg Nesterov } 53e73f8959SOleg Nesterov 54e73f8959SOleg Nesterov void task_work_run(void) 55e73f8959SOleg Nesterov { 56e73f8959SOleg Nesterov struct task_struct *task = current; 57e73f8959SOleg Nesterov struct hlist_head task_works; 58e73f8959SOleg Nesterov struct hlist_node *pos; 59e73f8959SOleg Nesterov 60e73f8959SOleg Nesterov raw_spin_lock_irq(&task->pi_lock); 61e73f8959SOleg Nesterov hlist_move_list(&task->task_works, &task_works); 62e73f8959SOleg Nesterov raw_spin_unlock_irq(&task->pi_lock); 63e73f8959SOleg Nesterov 64e73f8959SOleg Nesterov if (unlikely(hlist_empty(&task_works))) 65e73f8959SOleg Nesterov return; 66e73f8959SOleg Nesterov /* 67e73f8959SOleg Nesterov * We use hlist to save the space in task_struct, but we want fifo. 68e73f8959SOleg Nesterov * Find the last entry, the list should be short, then process them 69e73f8959SOleg Nesterov * in reverse order. 70e73f8959SOleg Nesterov */ 71e73f8959SOleg Nesterov for (pos = task_works.first; pos->next; pos = pos->next) 72e73f8959SOleg Nesterov ; 73e73f8959SOleg Nesterov 74e73f8959SOleg Nesterov for (;;) { 75e73f8959SOleg Nesterov struct hlist_node **pprev = pos->pprev; 76e73f8959SOleg Nesterov struct task_work *twork = container_of(pos, struct task_work, 77e73f8959SOleg Nesterov hlist); 78e73f8959SOleg Nesterov twork->func(twork); 79e73f8959SOleg Nesterov 80e73f8959SOleg Nesterov if (pprev == &task_works.first) 81e73f8959SOleg Nesterov break; 82e73f8959SOleg Nesterov pos = container_of(pprev, struct hlist_node, next); 83e73f8959SOleg Nesterov } 84e73f8959SOleg Nesterov } 85