11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 12e1f8e874SFrancois Cami * Andrew Morton 131da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 141da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1589ada679SChristoph Lameter * 16cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 354e6045f1SJohannes Berg #include <linux/lockdep.h> 36fb39125fSZhaolei #define CREATE_TRACE_POINTS 37fb39125fSZhaolei #include <trace/events/workqueue.h> 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds /* 40f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 41f756d5e2SNathan Lynch * possible cpu). 421da177e4SLinus Torvalds */ 431da177e4SLinus Torvalds struct cpu_workqueue_struct { 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds spinlock_t lock; 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds struct list_head worklist; 481da177e4SLinus Torvalds wait_queue_head_t more_work; 493af24433SOleg Nesterov struct work_struct *current_work; 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds struct workqueue_struct *wq; 5236c8b586SIngo Molnar struct task_struct *thread; 531da177e4SLinus Torvalds } ____cacheline_aligned; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 561da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 571da177e4SLinus Torvalds * per-CPU workqueues: 581da177e4SLinus Torvalds */ 591da177e4SLinus Torvalds struct workqueue_struct { 6089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 61cce1a165SOleg Nesterov struct list_head list; 621da177e4SLinus Torvalds const char *name; 63cce1a165SOleg Nesterov int singlethread; 64319c2a98SOleg Nesterov int freezeable; /* Freeze threads during suspend */ 650d557dc9SHeiko Carstens int rt; 664e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 674e6045f1SJohannes Berg struct lockdep_map lockdep_map; 684e6045f1SJohannes Berg #endif 691da177e4SLinus Torvalds }; 701da177e4SLinus Torvalds 71dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 72dc186ad7SThomas Gleixner 73dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr; 74dc186ad7SThomas Gleixner 75dc186ad7SThomas Gleixner /* 76dc186ad7SThomas Gleixner * fixup_init is called when: 77dc186ad7SThomas Gleixner * - an active object is initialized 78dc186ad7SThomas Gleixner */ 79dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state) 80dc186ad7SThomas Gleixner { 81dc186ad7SThomas Gleixner struct work_struct *work = addr; 82dc186ad7SThomas Gleixner 83dc186ad7SThomas Gleixner switch (state) { 84dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 85dc186ad7SThomas Gleixner cancel_work_sync(work); 86dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 87dc186ad7SThomas Gleixner return 1; 88dc186ad7SThomas Gleixner default: 89dc186ad7SThomas Gleixner return 0; 90dc186ad7SThomas Gleixner } 91dc186ad7SThomas Gleixner } 92dc186ad7SThomas Gleixner 93dc186ad7SThomas Gleixner /* 94dc186ad7SThomas Gleixner * fixup_activate is called when: 95dc186ad7SThomas Gleixner * - an active object is activated 96dc186ad7SThomas Gleixner * - an unknown object is activated (might be a statically initialized object) 97dc186ad7SThomas Gleixner */ 98dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state) 99dc186ad7SThomas Gleixner { 100dc186ad7SThomas Gleixner struct work_struct *work = addr; 101dc186ad7SThomas Gleixner 102dc186ad7SThomas Gleixner switch (state) { 103dc186ad7SThomas Gleixner 104dc186ad7SThomas Gleixner case ODEBUG_STATE_NOTAVAILABLE: 105dc186ad7SThomas Gleixner /* 106dc186ad7SThomas Gleixner * This is not really a fixup. The work struct was 107dc186ad7SThomas Gleixner * statically initialized. We just make sure that it 108dc186ad7SThomas Gleixner * is tracked in the object tracker. 109dc186ad7SThomas Gleixner */ 110dc186ad7SThomas Gleixner if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { 111dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 112dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 113dc186ad7SThomas Gleixner return 0; 114dc186ad7SThomas Gleixner } 115dc186ad7SThomas Gleixner WARN_ON_ONCE(1); 116dc186ad7SThomas Gleixner return 0; 117dc186ad7SThomas Gleixner 118dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 119dc186ad7SThomas Gleixner WARN_ON(1); 120dc186ad7SThomas Gleixner 121dc186ad7SThomas Gleixner default: 122dc186ad7SThomas Gleixner return 0; 123dc186ad7SThomas Gleixner } 124dc186ad7SThomas Gleixner } 125dc186ad7SThomas Gleixner 126dc186ad7SThomas Gleixner /* 127dc186ad7SThomas Gleixner * fixup_free is called when: 128dc186ad7SThomas Gleixner * - an active object is freed 129dc186ad7SThomas Gleixner */ 130dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state) 131dc186ad7SThomas Gleixner { 132dc186ad7SThomas Gleixner struct work_struct *work = addr; 133dc186ad7SThomas Gleixner 134dc186ad7SThomas Gleixner switch (state) { 135dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 136dc186ad7SThomas Gleixner cancel_work_sync(work); 137dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 138dc186ad7SThomas Gleixner return 1; 139dc186ad7SThomas Gleixner default: 140dc186ad7SThomas Gleixner return 0; 141dc186ad7SThomas Gleixner } 142dc186ad7SThomas Gleixner } 143dc186ad7SThomas Gleixner 144dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = { 145dc186ad7SThomas Gleixner .name = "work_struct", 146dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 147dc186ad7SThomas Gleixner .fixup_activate = work_fixup_activate, 148dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 149dc186ad7SThomas Gleixner }; 150dc186ad7SThomas Gleixner 151dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 152dc186ad7SThomas Gleixner { 153dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 154dc186ad7SThomas Gleixner } 155dc186ad7SThomas Gleixner 156dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 157dc186ad7SThomas Gleixner { 158dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 159dc186ad7SThomas Gleixner } 160dc186ad7SThomas Gleixner 161dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 162dc186ad7SThomas Gleixner { 163dc186ad7SThomas Gleixner if (onstack) 164dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 165dc186ad7SThomas Gleixner else 166dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 167dc186ad7SThomas Gleixner } 168dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 169dc186ad7SThomas Gleixner 170dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 171dc186ad7SThomas Gleixner { 172dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 173dc186ad7SThomas Gleixner } 174dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 175dc186ad7SThomas Gleixner 176dc186ad7SThomas Gleixner #else 177dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 178dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 179dc186ad7SThomas Gleixner #endif 180dc186ad7SThomas Gleixner 18195402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 18295402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 1831da177e4SLinus Torvalds static LIST_HEAD(workqueues); 1841da177e4SLinus Torvalds 1853af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 186e7577c50SRusty Russell static const struct cpumask *cpu_singlethread_map __read_mostly; 18714441960SOleg Nesterov /* 18814441960SOleg Nesterov * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 18914441960SOleg Nesterov * flushes cwq->worklist. This means that flush_workqueue/wait_on_work 19014441960SOleg Nesterov * which comes in between can't use for_each_online_cpu(). We could 19114441960SOleg Nesterov * use cpu_possible_map, the cpumask below is more a documentation 19214441960SOleg Nesterov * than optimization. 19314441960SOleg Nesterov */ 194e7577c50SRusty Russell static cpumask_var_t cpu_populated_map __read_mostly; 195f756d5e2SNathan Lynch 1961da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 1976cc88bc4SDavid Howells static inline int is_wq_single_threaded(struct workqueue_struct *wq) 1981da177e4SLinus Torvalds { 199cce1a165SOleg Nesterov return wq->singlethread; 2001da177e4SLinus Torvalds } 2011da177e4SLinus Torvalds 202e7577c50SRusty Russell static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) 203b1f4ec17SOleg Nesterov { 2046cc88bc4SDavid Howells return is_wq_single_threaded(wq) 205e7577c50SRusty Russell ? cpu_singlethread_map : cpu_populated_map; 206b1f4ec17SOleg Nesterov } 207b1f4ec17SOleg Nesterov 208a848e3b6SOleg Nesterov static 209a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) 210a848e3b6SOleg Nesterov { 2116cc88bc4SDavid Howells if (unlikely(is_wq_single_threaded(wq))) 212a848e3b6SOleg Nesterov cpu = singlethread_cpu; 213a848e3b6SOleg Nesterov return per_cpu_ptr(wq->cpu_wq, cpu); 214a848e3b6SOleg Nesterov } 215a848e3b6SOleg Nesterov 2164594bf15SDavid Howells /* 2174594bf15SDavid Howells * Set the workqueue on which a work item is to be run 2184594bf15SDavid Howells * - Must *only* be called if the pending flag is set 2194594bf15SDavid Howells */ 220ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work, 221ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq) 222365970a1SDavid Howells { 2234594bf15SDavid Howells unsigned long new; 224365970a1SDavid Howells 2254594bf15SDavid Howells BUG_ON(!work_pending(work)); 2264594bf15SDavid Howells 227ed7c0feeSOleg Nesterov new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); 228a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 229a08727baSLinus Torvalds atomic_long_set(&work->data, new); 230365970a1SDavid Howells } 231365970a1SDavid Howells 232ed7c0feeSOleg Nesterov static inline 233ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 234365970a1SDavid Howells { 235a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 236365970a1SDavid Howells } 237365970a1SDavid Howells 238b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 2391a4d9b0aSOleg Nesterov struct work_struct *work, struct list_head *head) 240b89deed3SOleg Nesterov { 241e1d8aa9fSFrederic Weisbecker trace_workqueue_insertion(cwq->thread, work); 242e1d8aa9fSFrederic Weisbecker 243b89deed3SOleg Nesterov set_wq_data(work, cwq); 2446e84d644SOleg Nesterov /* 2456e84d644SOleg Nesterov * Ensure that we get the right work->data if we see the 2466e84d644SOleg Nesterov * result of list_add() below, see try_to_grab_pending(). 2476e84d644SOleg Nesterov */ 2486e84d644SOleg Nesterov smp_wmb(); 2491a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 250b89deed3SOleg Nesterov wake_up(&cwq->more_work); 251b89deed3SOleg Nesterov } 252b89deed3SOleg Nesterov 2531da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 2541da177e4SLinus Torvalds struct work_struct *work) 2551da177e4SLinus Torvalds { 2561da177e4SLinus Torvalds unsigned long flags; 2571da177e4SLinus Torvalds 258dc186ad7SThomas Gleixner debug_work_activate(work); 2591da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2601a4d9b0aSOleg Nesterov insert_work(cwq, work, &cwq->worklist); 2611da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2640fcb78c2SRolf Eike Beer /** 2650fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 2660fcb78c2SRolf Eike Beer * @wq: workqueue to use 2670fcb78c2SRolf Eike Beer * @work: work to queue 2680fcb78c2SRolf Eike Beer * 269057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2701da177e4SLinus Torvalds * 27100dfcaf7SOleg Nesterov * We queue the work to the CPU on which it was submitted, but if the CPU dies 27200dfcaf7SOleg Nesterov * it can be processed by another CPU. 2731da177e4SLinus Torvalds */ 2747ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work) 2751da177e4SLinus Torvalds { 276ef1ca236SOleg Nesterov int ret; 2771da177e4SLinus Torvalds 278ef1ca236SOleg Nesterov ret = queue_work_on(get_cpu(), wq, work); 279a848e3b6SOleg Nesterov put_cpu(); 280ef1ca236SOleg Nesterov 2811da177e4SLinus Torvalds return ret; 2821da177e4SLinus Torvalds } 283ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 2841da177e4SLinus Torvalds 285c1a220e7SZhang Rui /** 286c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 287c1a220e7SZhang Rui * @cpu: CPU number to execute work on 288c1a220e7SZhang Rui * @wq: workqueue to use 289c1a220e7SZhang Rui * @work: work to queue 290c1a220e7SZhang Rui * 291c1a220e7SZhang Rui * Returns 0 if @work was already on a queue, non-zero otherwise. 292c1a220e7SZhang Rui * 293c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 294c1a220e7SZhang Rui * can't go away. 295c1a220e7SZhang Rui */ 296c1a220e7SZhang Rui int 297c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 298c1a220e7SZhang Rui { 299c1a220e7SZhang Rui int ret = 0; 300c1a220e7SZhang Rui 301c1a220e7SZhang Rui if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 302c1a220e7SZhang Rui BUG_ON(!list_empty(&work->entry)); 303c1a220e7SZhang Rui __queue_work(wq_per_cpu(wq, cpu), work); 304c1a220e7SZhang Rui ret = 1; 305c1a220e7SZhang Rui } 306c1a220e7SZhang Rui return ret; 307c1a220e7SZhang Rui } 308c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 309c1a220e7SZhang Rui 3106d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data) 3111da177e4SLinus Torvalds { 31252bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 313ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 314ed7c0feeSOleg Nesterov struct workqueue_struct *wq = cwq->wq; 3151da177e4SLinus Torvalds 316a848e3b6SOleg Nesterov __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); 3171da177e4SLinus Torvalds } 3181da177e4SLinus Torvalds 3190fcb78c2SRolf Eike Beer /** 3200fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 3210fcb78c2SRolf Eike Beer * @wq: workqueue to use 322af9997e4SRandy Dunlap * @dwork: delayable work to queue 3230fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 3240fcb78c2SRolf Eike Beer * 325057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 3260fcb78c2SRolf Eike Beer */ 3277ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq, 32852bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 3291da177e4SLinus Torvalds { 33052bad64dSDavid Howells if (delay == 0) 33163bc0362SOleg Nesterov return queue_work(wq, &dwork->work); 3321da177e4SLinus Torvalds 33363bc0362SOleg Nesterov return queue_delayed_work_on(-1, wq, dwork, delay); 3341da177e4SLinus Torvalds } 335ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 3361da177e4SLinus Torvalds 3370fcb78c2SRolf Eike Beer /** 3380fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 3390fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 3400fcb78c2SRolf Eike Beer * @wq: workqueue to use 341af9997e4SRandy Dunlap * @dwork: work to queue 3420fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 3430fcb78c2SRolf Eike Beer * 344057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 3450fcb78c2SRolf Eike Beer */ 3467a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 34752bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 3487a6bc1cdSVenkatesh Pallipadi { 3497a6bc1cdSVenkatesh Pallipadi int ret = 0; 35052bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 35152bad64dSDavid Howells struct work_struct *work = &dwork->work; 3527a6bc1cdSVenkatesh Pallipadi 353a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 3547a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 3557a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 3567a6bc1cdSVenkatesh Pallipadi 3578a3e77ccSAndrew Liu timer_stats_timer_set_start_info(&dwork->timer); 3588a3e77ccSAndrew Liu 359ed7c0feeSOleg Nesterov /* This stores cwq for the moment, for the timer_fn */ 360a848e3b6SOleg Nesterov set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); 3617a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 36252bad64dSDavid Howells timer->data = (unsigned long)dwork; 3637a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 36463bc0362SOleg Nesterov 36563bc0362SOleg Nesterov if (unlikely(cpu >= 0)) 3667a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 36763bc0362SOleg Nesterov else 36863bc0362SOleg Nesterov add_timer(timer); 3697a6bc1cdSVenkatesh Pallipadi ret = 1; 3707a6bc1cdSVenkatesh Pallipadi } 3717a6bc1cdSVenkatesh Pallipadi return ret; 3727a6bc1cdSVenkatesh Pallipadi } 373ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 3741da177e4SLinus Torvalds 375858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 3761da177e4SLinus Torvalds { 377f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 3781da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 3791da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 3801da177e4SLinus Torvalds struct work_struct, entry); 3816bb49e59SDavid Howells work_func_t f = work->func; 3824e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 3834e6045f1SJohannes Berg /* 3844e6045f1SJohannes Berg * It is permissible to free the struct work_struct 3854e6045f1SJohannes Berg * from inside the function that is called from it, 3864e6045f1SJohannes Berg * this we need to take into account for lockdep too. 3874e6045f1SJohannes Berg * To avoid bogus "held lock freed" warnings as well 3884e6045f1SJohannes Berg * as problems when looking into work->lockdep_map, 3894e6045f1SJohannes Berg * make a copy and use that here. 3904e6045f1SJohannes Berg */ 3914e6045f1SJohannes Berg struct lockdep_map lockdep_map = work->lockdep_map; 3924e6045f1SJohannes Berg #endif 393e1d8aa9fSFrederic Weisbecker trace_workqueue_execution(cwq->thread, work); 394dc186ad7SThomas Gleixner debug_work_deactivate(work); 395b89deed3SOleg Nesterov cwq->current_work = work; 3961da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 397f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 3981da177e4SLinus Torvalds 399365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 40023b2e599SOleg Nesterov work_clear_pending(work); 4013295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 4023295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 40365f27f38SDavid Howells f(work); 4043295f0efSIngo Molnar lock_map_release(&lockdep_map); 4053295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 4061da177e4SLinus Torvalds 407d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 408d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 409d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 410d5abe669SPeter Zijlstra current->comm, preempt_count(), 411ba25f9dcSPavel Emelyanov task_pid_nr(current)); 412d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 413d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 414d5abe669SPeter Zijlstra debug_show_held_locks(current); 415d5abe669SPeter Zijlstra dump_stack(); 416d5abe669SPeter Zijlstra } 417d5abe669SPeter Zijlstra 418f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 419b89deed3SOleg Nesterov cwq->current_work = NULL; 4201da177e4SLinus Torvalds } 421f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds static int worker_thread(void *__cwq) 4251da177e4SLinus Torvalds { 4261da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 4273af24433SOleg Nesterov DEFINE_WAIT(wait); 4281da177e4SLinus Torvalds 42983144186SRafael J. Wysocki if (cwq->wq->freezeable) 43083144186SRafael J. Wysocki set_freezable(); 4311da177e4SLinus Torvalds 4323af24433SOleg Nesterov for (;;) { 4333af24433SOleg Nesterov prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 43414441960SOleg Nesterov if (!freezing(current) && 43514441960SOleg Nesterov !kthread_should_stop() && 43614441960SOleg Nesterov list_empty(&cwq->worklist)) 4371da177e4SLinus Torvalds schedule(); 4383af24433SOleg Nesterov finish_wait(&cwq->more_work, &wait); 4391da177e4SLinus Torvalds 44085f4186aSOleg Nesterov try_to_freeze(); 44185f4186aSOleg Nesterov 44214441960SOleg Nesterov if (kthread_should_stop()) 4433af24433SOleg Nesterov break; 4443af24433SOleg Nesterov 4451da177e4SLinus Torvalds run_workqueue(cwq); 4461da177e4SLinus Torvalds } 4473af24433SOleg Nesterov 4481da177e4SLinus Torvalds return 0; 4491da177e4SLinus Torvalds } 4501da177e4SLinus Torvalds 451fc2e4d70SOleg Nesterov struct wq_barrier { 452fc2e4d70SOleg Nesterov struct work_struct work; 453fc2e4d70SOleg Nesterov struct completion done; 454fc2e4d70SOleg Nesterov }; 455fc2e4d70SOleg Nesterov 456fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 457fc2e4d70SOleg Nesterov { 458fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 459fc2e4d70SOleg Nesterov complete(&barr->done); 460fc2e4d70SOleg Nesterov } 461fc2e4d70SOleg Nesterov 46283c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 4631a4d9b0aSOleg Nesterov struct wq_barrier *barr, struct list_head *head) 464fc2e4d70SOleg Nesterov { 465dc186ad7SThomas Gleixner /* 466dc186ad7SThomas Gleixner * debugobject calls are safe here even with cwq->lock locked 467dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 468dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 469dc186ad7SThomas Gleixner * might deadlock. 470dc186ad7SThomas Gleixner */ 471dc186ad7SThomas Gleixner INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); 472fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 473fc2e4d70SOleg Nesterov 474fc2e4d70SOleg Nesterov init_completion(&barr->done); 47583c22520SOleg Nesterov 476dc186ad7SThomas Gleixner debug_work_activate(&barr->work); 4771a4d9b0aSOleg Nesterov insert_work(cwq, &barr->work, head); 478fc2e4d70SOleg Nesterov } 479fc2e4d70SOleg Nesterov 48014441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 4811da177e4SLinus Torvalds { 4822355b70fSLai Jiangshan int active = 0; 483fc2e4d70SOleg Nesterov struct wq_barrier barr; 4841da177e4SLinus Torvalds 4852355b70fSLai Jiangshan WARN_ON(cwq->thread == current); 4862355b70fSLai Jiangshan 48783c22520SOleg Nesterov spin_lock_irq(&cwq->lock); 48883c22520SOleg Nesterov if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 4891a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, &cwq->worklist); 49083c22520SOleg Nesterov active = 1; 49183c22520SOleg Nesterov } 49283c22520SOleg Nesterov spin_unlock_irq(&cwq->lock); 4931da177e4SLinus Torvalds 494dc186ad7SThomas Gleixner if (active) { 495fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 496dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 497dc186ad7SThomas Gleixner } 49814441960SOleg Nesterov 49914441960SOleg Nesterov return active; 50083c22520SOleg Nesterov } 5011da177e4SLinus Torvalds 5020fcb78c2SRolf Eike Beer /** 5031da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 5040fcb78c2SRolf Eike Beer * @wq: workqueue to flush 5051da177e4SLinus Torvalds * 5061da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 5071da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 5081da177e4SLinus Torvalds * 509fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 510fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 5111da177e4SLinus Torvalds * 5121da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 5131da177e4SLinus Torvalds * helper threads to do it. 5141da177e4SLinus Torvalds */ 5157ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 5161da177e4SLinus Torvalds { 517e7577c50SRusty Russell const struct cpumask *cpu_map = wq_cpu_map(wq); 518cce1a165SOleg Nesterov int cpu; 519b1f4ec17SOleg Nesterov 520f293ea92SOleg Nesterov might_sleep(); 5213295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 5223295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 523aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 52489ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 5251da177e4SLinus Torvalds } 526ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 5271da177e4SLinus Torvalds 528db700897SOleg Nesterov /** 529db700897SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 530db700897SOleg Nesterov * @work: the work which is to be flushed 531db700897SOleg Nesterov * 532a67da70dSOleg Nesterov * Returns false if @work has already terminated. 533a67da70dSOleg Nesterov * 534db700897SOleg Nesterov * It is expected that, prior to calling flush_work(), the caller has 535db700897SOleg Nesterov * arranged for the work to not be requeued, otherwise it doesn't make 536db700897SOleg Nesterov * sense to use this function. 537db700897SOleg Nesterov */ 538db700897SOleg Nesterov int flush_work(struct work_struct *work) 539db700897SOleg Nesterov { 540db700897SOleg Nesterov struct cpu_workqueue_struct *cwq; 541db700897SOleg Nesterov struct list_head *prev; 542db700897SOleg Nesterov struct wq_barrier barr; 543db700897SOleg Nesterov 544db700897SOleg Nesterov might_sleep(); 545db700897SOleg Nesterov cwq = get_wq_data(work); 546db700897SOleg Nesterov if (!cwq) 547db700897SOleg Nesterov return 0; 548db700897SOleg Nesterov 5493295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 5503295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 551a67da70dSOleg Nesterov 552db700897SOleg Nesterov prev = NULL; 553db700897SOleg Nesterov spin_lock_irq(&cwq->lock); 554db700897SOleg Nesterov if (!list_empty(&work->entry)) { 555db700897SOleg Nesterov /* 556db700897SOleg Nesterov * See the comment near try_to_grab_pending()->smp_rmb(). 557db700897SOleg Nesterov * If it was re-queued under us we are not going to wait. 558db700897SOleg Nesterov */ 559db700897SOleg Nesterov smp_rmb(); 560db700897SOleg Nesterov if (unlikely(cwq != get_wq_data(work))) 561db700897SOleg Nesterov goto out; 562db700897SOleg Nesterov prev = &work->entry; 563db700897SOleg Nesterov } else { 564db700897SOleg Nesterov if (cwq->current_work != work) 565db700897SOleg Nesterov goto out; 566db700897SOleg Nesterov prev = &cwq->worklist; 567db700897SOleg Nesterov } 568db700897SOleg Nesterov insert_wq_barrier(cwq, &barr, prev->next); 569db700897SOleg Nesterov out: 570db700897SOleg Nesterov spin_unlock_irq(&cwq->lock); 571db700897SOleg Nesterov if (!prev) 572db700897SOleg Nesterov return 0; 573db700897SOleg Nesterov 574db700897SOleg Nesterov wait_for_completion(&barr.done); 575dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 576db700897SOleg Nesterov return 1; 577db700897SOleg Nesterov } 578db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 579db700897SOleg Nesterov 5806e84d644SOleg Nesterov /* 5811f1f642eSOleg Nesterov * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 5826e84d644SOleg Nesterov * so this work can't be re-armed in any way. 5836e84d644SOleg Nesterov */ 5846e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work) 5856e84d644SOleg Nesterov { 5866e84d644SOleg Nesterov struct cpu_workqueue_struct *cwq; 5871f1f642eSOleg Nesterov int ret = -1; 5886e84d644SOleg Nesterov 5896e84d644SOleg Nesterov if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) 5901f1f642eSOleg Nesterov return 0; 5916e84d644SOleg Nesterov 5926e84d644SOleg Nesterov /* 5936e84d644SOleg Nesterov * The queueing is in progress, or it is already queued. Try to 5946e84d644SOleg Nesterov * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 5956e84d644SOleg Nesterov */ 5966e84d644SOleg Nesterov 5976e84d644SOleg Nesterov cwq = get_wq_data(work); 5986e84d644SOleg Nesterov if (!cwq) 5996e84d644SOleg Nesterov return ret; 6006e84d644SOleg Nesterov 6016e84d644SOleg Nesterov spin_lock_irq(&cwq->lock); 6026e84d644SOleg Nesterov if (!list_empty(&work->entry)) { 6036e84d644SOleg Nesterov /* 6046e84d644SOleg Nesterov * This work is queued, but perhaps we locked the wrong cwq. 6056e84d644SOleg Nesterov * In that case we must see the new value after rmb(), see 6066e84d644SOleg Nesterov * insert_work()->wmb(). 6076e84d644SOleg Nesterov */ 6086e84d644SOleg Nesterov smp_rmb(); 6096e84d644SOleg Nesterov if (cwq == get_wq_data(work)) { 610dc186ad7SThomas Gleixner debug_work_deactivate(work); 6116e84d644SOleg Nesterov list_del_init(&work->entry); 6126e84d644SOleg Nesterov ret = 1; 6136e84d644SOleg Nesterov } 6146e84d644SOleg Nesterov } 6156e84d644SOleg Nesterov spin_unlock_irq(&cwq->lock); 6166e84d644SOleg Nesterov 6176e84d644SOleg Nesterov return ret; 6186e84d644SOleg Nesterov } 6196e84d644SOleg Nesterov 6206e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 621b89deed3SOleg Nesterov struct work_struct *work) 622b89deed3SOleg Nesterov { 623b89deed3SOleg Nesterov struct wq_barrier barr; 624b89deed3SOleg Nesterov int running = 0; 625b89deed3SOleg Nesterov 626b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 627b89deed3SOleg Nesterov if (unlikely(cwq->current_work == work)) { 6281a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, cwq->worklist.next); 629b89deed3SOleg Nesterov running = 1; 630b89deed3SOleg Nesterov } 631b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 632b89deed3SOleg Nesterov 633dc186ad7SThomas Gleixner if (unlikely(running)) { 634b89deed3SOleg Nesterov wait_for_completion(&barr.done); 635dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 636dc186ad7SThomas Gleixner } 637b89deed3SOleg Nesterov } 638b89deed3SOleg Nesterov 6396e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work) 640b89deed3SOleg Nesterov { 641b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 64228e53bddSOleg Nesterov struct workqueue_struct *wq; 643e7577c50SRusty Russell const struct cpumask *cpu_map; 644b1f4ec17SOleg Nesterov int cpu; 645b89deed3SOleg Nesterov 646f293ea92SOleg Nesterov might_sleep(); 647f293ea92SOleg Nesterov 6483295f0efSIngo Molnar lock_map_acquire(&work->lockdep_map); 6493295f0efSIngo Molnar lock_map_release(&work->lockdep_map); 6504e6045f1SJohannes Berg 651b89deed3SOleg Nesterov cwq = get_wq_data(work); 652b89deed3SOleg Nesterov if (!cwq) 6533af24433SOleg Nesterov return; 654b89deed3SOleg Nesterov 65528e53bddSOleg Nesterov wq = cwq->wq; 65628e53bddSOleg Nesterov cpu_map = wq_cpu_map(wq); 65728e53bddSOleg Nesterov 658aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 6596e84d644SOleg Nesterov wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 6606e84d644SOleg Nesterov } 6616e84d644SOleg Nesterov 6621f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work, 6631f1f642eSOleg Nesterov struct timer_list* timer) 6641f1f642eSOleg Nesterov { 6651f1f642eSOleg Nesterov int ret; 6661f1f642eSOleg Nesterov 6671f1f642eSOleg Nesterov do { 6681f1f642eSOleg Nesterov ret = (timer && likely(del_timer(timer))); 6691f1f642eSOleg Nesterov if (!ret) 6701f1f642eSOleg Nesterov ret = try_to_grab_pending(work); 6711f1f642eSOleg Nesterov wait_on_work(work); 6721f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 6731f1f642eSOleg Nesterov 6741f1f642eSOleg Nesterov work_clear_pending(work); 6751f1f642eSOleg Nesterov return ret; 6761f1f642eSOleg Nesterov } 6771f1f642eSOleg Nesterov 6786e84d644SOleg Nesterov /** 6796e84d644SOleg Nesterov * cancel_work_sync - block until a work_struct's callback has terminated 6806e84d644SOleg Nesterov * @work: the work which is to be flushed 6816e84d644SOleg Nesterov * 6821f1f642eSOleg Nesterov * Returns true if @work was pending. 6831f1f642eSOleg Nesterov * 6846e84d644SOleg Nesterov * cancel_work_sync() will cancel the work if it is queued. If the work's 6856e84d644SOleg Nesterov * callback appears to be running, cancel_work_sync() will block until it 6866e84d644SOleg Nesterov * has completed. 6876e84d644SOleg Nesterov * 6886e84d644SOleg Nesterov * It is possible to use this function if the work re-queues itself. It can 6896e84d644SOleg Nesterov * cancel the work even if it migrates to another workqueue, however in that 6906e84d644SOleg Nesterov * case it only guarantees that work->func() has completed on the last queued 6916e84d644SOleg Nesterov * workqueue. 6926e84d644SOleg Nesterov * 6936e84d644SOleg Nesterov * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 6946e84d644SOleg Nesterov * pending, otherwise it goes into a busy-wait loop until the timer expires. 6956e84d644SOleg Nesterov * 6966e84d644SOleg Nesterov * The caller must ensure that workqueue_struct on which this work was last 6976e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 6986e84d644SOleg Nesterov */ 6991f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work) 7006e84d644SOleg Nesterov { 7011f1f642eSOleg Nesterov return __cancel_work_timer(work, NULL); 702b89deed3SOleg Nesterov } 70328e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 704b89deed3SOleg Nesterov 7056e84d644SOleg Nesterov /** 706f5a421a4SOleg Nesterov * cancel_delayed_work_sync - reliably kill off a delayed work. 7076e84d644SOleg Nesterov * @dwork: the delayed work struct 7086e84d644SOleg Nesterov * 7091f1f642eSOleg Nesterov * Returns true if @dwork was pending. 7101f1f642eSOleg Nesterov * 7116e84d644SOleg Nesterov * It is possible to use this function if @dwork rearms itself via queue_work() 7126e84d644SOleg Nesterov * or queue_delayed_work(). See also the comment for cancel_work_sync(). 7136e84d644SOleg Nesterov */ 7141f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork) 7156e84d644SOleg Nesterov { 7161f1f642eSOleg Nesterov return __cancel_work_timer(&dwork->work, &dwork->timer); 7176e84d644SOleg Nesterov } 718f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 7191da177e4SLinus Torvalds 7206e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly; 7211da177e4SLinus Torvalds 7220fcb78c2SRolf Eike Beer /** 7230fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 7240fcb78c2SRolf Eike Beer * @work: job to be done 7250fcb78c2SRolf Eike Beer * 7265b0f437dSBart Van Assche * Returns zero if @work was already on the kernel-global workqueue and 7275b0f437dSBart Van Assche * non-zero otherwise. 7285b0f437dSBart Van Assche * 7295b0f437dSBart Van Assche * This puts a job in the kernel-global workqueue if it was not already 7305b0f437dSBart Van Assche * queued and leaves it in the same position on the kernel-global 7315b0f437dSBart Van Assche * workqueue otherwise. 7320fcb78c2SRolf Eike Beer */ 7337ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work) 7341da177e4SLinus Torvalds { 7351da177e4SLinus Torvalds return queue_work(keventd_wq, work); 7361da177e4SLinus Torvalds } 737ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 7381da177e4SLinus Torvalds 739c1a220e7SZhang Rui /* 740c1a220e7SZhang Rui * schedule_work_on - put work task on a specific cpu 741c1a220e7SZhang Rui * @cpu: cpu to put the work task on 742c1a220e7SZhang Rui * @work: job to be done 743c1a220e7SZhang Rui * 744c1a220e7SZhang Rui * This puts a job on a specific cpu 745c1a220e7SZhang Rui */ 746c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work) 747c1a220e7SZhang Rui { 748c1a220e7SZhang Rui return queue_work_on(cpu, keventd_wq, work); 749c1a220e7SZhang Rui } 750c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on); 751c1a220e7SZhang Rui 7520fcb78c2SRolf Eike Beer /** 7530fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 75452bad64dSDavid Howells * @dwork: job to be done 75552bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 7560fcb78c2SRolf Eike Beer * 7570fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 7580fcb78c2SRolf Eike Beer * workqueue. 7590fcb78c2SRolf Eike Beer */ 7607ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork, 76182f67cd9SIngo Molnar unsigned long delay) 7621da177e4SLinus Torvalds { 76352bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 7641da177e4SLinus Torvalds } 765ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 7661da177e4SLinus Torvalds 7670fcb78c2SRolf Eike Beer /** 7688c53e463SLinus Torvalds * flush_delayed_work - block until a dwork_struct's callback has terminated 7698c53e463SLinus Torvalds * @dwork: the delayed work which is to be flushed 7708c53e463SLinus Torvalds * 7718c53e463SLinus Torvalds * Any timeout is cancelled, and any pending work is run immediately. 7728c53e463SLinus Torvalds */ 7738c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork) 7748c53e463SLinus Torvalds { 7758c53e463SLinus Torvalds if (del_timer_sync(&dwork->timer)) { 7768c53e463SLinus Torvalds struct cpu_workqueue_struct *cwq; 77747dd5be2SOleg Nesterov cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu()); 7788c53e463SLinus Torvalds __queue_work(cwq, &dwork->work); 7798c53e463SLinus Torvalds put_cpu(); 7808c53e463SLinus Torvalds } 7818c53e463SLinus Torvalds flush_work(&dwork->work); 7828c53e463SLinus Torvalds } 7838c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work); 7848c53e463SLinus Torvalds 7858c53e463SLinus Torvalds /** 7860fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 7870fcb78c2SRolf Eike Beer * @cpu: cpu to use 78852bad64dSDavid Howells * @dwork: job to be done 7890fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 7900fcb78c2SRolf Eike Beer * 7910fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 7920fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 7930fcb78c2SRolf Eike Beer */ 7941da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 79552bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 7961da177e4SLinus Torvalds { 79752bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 7981da177e4SLinus Torvalds } 799ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 8001da177e4SLinus Torvalds 801b6136773SAndrew Morton /** 802b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 803b6136773SAndrew Morton * @func: the function to call 804b6136773SAndrew Morton * 805b6136773SAndrew Morton * Returns zero on success. 806b6136773SAndrew Morton * Returns -ve errno on failure. 807b6136773SAndrew Morton * 808b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 809b6136773SAndrew Morton */ 81065f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 81115316ba8SChristoph Lameter { 81215316ba8SChristoph Lameter int cpu; 81365a64464SAndi Kleen int orig = -1; 814b6136773SAndrew Morton struct work_struct *works; 81515316ba8SChristoph Lameter 816b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 817b6136773SAndrew Morton if (!works) 81815316ba8SChristoph Lameter return -ENOMEM; 819b6136773SAndrew Morton 82095402b38SGautham R Shenoy get_online_cpus(); 82193981800STejun Heo 82293981800STejun Heo /* 82393981800STejun Heo * When running in keventd don't schedule a work item on 82493981800STejun Heo * itself. Can just call directly because the work queue is 82593981800STejun Heo * already bound. This also is faster. 82693981800STejun Heo */ 82793981800STejun Heo if (current_is_keventd()) 82893981800STejun Heo orig = raw_smp_processor_id(); 82993981800STejun Heo 83015316ba8SChristoph Lameter for_each_online_cpu(cpu) { 8319bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 8329bfb1839SIngo Molnar 8339bfb1839SIngo Molnar INIT_WORK(work, func); 83493981800STejun Heo if (cpu != orig) 8358de6d308SOleg Nesterov schedule_work_on(cpu, work); 83615316ba8SChristoph Lameter } 83793981800STejun Heo if (orig >= 0) 83893981800STejun Heo func(per_cpu_ptr(works, orig)); 83993981800STejun Heo 84093981800STejun Heo for_each_online_cpu(cpu) 8418616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 84293981800STejun Heo 84395402b38SGautham R Shenoy put_online_cpus(); 844b6136773SAndrew Morton free_percpu(works); 84515316ba8SChristoph Lameter return 0; 84615316ba8SChristoph Lameter } 84715316ba8SChristoph Lameter 848eef6a7d5SAlan Stern /** 849eef6a7d5SAlan Stern * flush_scheduled_work - ensure that any scheduled work has run to completion. 850eef6a7d5SAlan Stern * 851eef6a7d5SAlan Stern * Forces execution of the kernel-global workqueue and blocks until its 852eef6a7d5SAlan Stern * completion. 853eef6a7d5SAlan Stern * 854eef6a7d5SAlan Stern * Think twice before calling this function! It's very easy to get into 855eef6a7d5SAlan Stern * trouble if you don't take great care. Either of the following situations 856eef6a7d5SAlan Stern * will lead to deadlock: 857eef6a7d5SAlan Stern * 858eef6a7d5SAlan Stern * One of the work items currently on the workqueue needs to acquire 859eef6a7d5SAlan Stern * a lock held by your code or its caller. 860eef6a7d5SAlan Stern * 861eef6a7d5SAlan Stern * Your code is running in the context of a work routine. 862eef6a7d5SAlan Stern * 863eef6a7d5SAlan Stern * They will be detected by lockdep when they occur, but the first might not 864eef6a7d5SAlan Stern * occur very often. It depends on what work items are on the workqueue and 865eef6a7d5SAlan Stern * what locks they need, which you have no control over. 866eef6a7d5SAlan Stern * 867eef6a7d5SAlan Stern * In most situations flushing the entire workqueue is overkill; you merely 868eef6a7d5SAlan Stern * need to know that a particular work item isn't queued and isn't running. 869eef6a7d5SAlan Stern * In such cases you should use cancel_delayed_work_sync() or 870eef6a7d5SAlan Stern * cancel_work_sync() instead. 871eef6a7d5SAlan Stern */ 8721da177e4SLinus Torvalds void flush_scheduled_work(void) 8731da177e4SLinus Torvalds { 8741da177e4SLinus Torvalds flush_workqueue(keventd_wq); 8751da177e4SLinus Torvalds } 876ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds /** 8791fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 8801fa44ecaSJames Bottomley * @fn: the function to execute 8811fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 8821fa44ecaSJames Bottomley * be available when the work executes) 8831fa44ecaSJames Bottomley * 8841fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 8851fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 8861fa44ecaSJames Bottomley * 8871fa44ecaSJames Bottomley * Returns: 0 - function was executed 8881fa44ecaSJames Bottomley * 1 - function was scheduled for execution 8891fa44ecaSJames Bottomley */ 89065f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 8911fa44ecaSJames Bottomley { 8921fa44ecaSJames Bottomley if (!in_interrupt()) { 89365f27f38SDavid Howells fn(&ew->work); 8941fa44ecaSJames Bottomley return 0; 8951fa44ecaSJames Bottomley } 8961fa44ecaSJames Bottomley 89765f27f38SDavid Howells INIT_WORK(&ew->work, fn); 8981fa44ecaSJames Bottomley schedule_work(&ew->work); 8991fa44ecaSJames Bottomley 9001fa44ecaSJames Bottomley return 1; 9011fa44ecaSJames Bottomley } 9021fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 9031fa44ecaSJames Bottomley 9041da177e4SLinus Torvalds int keventd_up(void) 9051da177e4SLinus Torvalds { 9061da177e4SLinus Torvalds return keventd_wq != NULL; 9071da177e4SLinus Torvalds } 9081da177e4SLinus Torvalds 9091da177e4SLinus Torvalds int current_is_keventd(void) 9101da177e4SLinus Torvalds { 9111da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 912d243769dSHugh Dickins int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 9131da177e4SLinus Torvalds int ret = 0; 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds BUG_ON(!keventd_wq); 9161da177e4SLinus Torvalds 91789ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 9181da177e4SLinus Torvalds if (current == cwq->thread) 9191da177e4SLinus Torvalds ret = 1; 9201da177e4SLinus Torvalds 9211da177e4SLinus Torvalds return ret; 9221da177e4SLinus Torvalds 9231da177e4SLinus Torvalds } 9241da177e4SLinus Torvalds 9253af24433SOleg Nesterov static struct cpu_workqueue_struct * 9263af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu) 9271da177e4SLinus Torvalds { 92889ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 9293af24433SOleg Nesterov 9303af24433SOleg Nesterov cwq->wq = wq; 9313af24433SOleg Nesterov spin_lock_init(&cwq->lock); 9323af24433SOleg Nesterov INIT_LIST_HEAD(&cwq->worklist); 9333af24433SOleg Nesterov init_waitqueue_head(&cwq->more_work); 9343af24433SOleg Nesterov 9353af24433SOleg Nesterov return cwq; 9363af24433SOleg Nesterov } 9373af24433SOleg Nesterov 9383af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 9393af24433SOleg Nesterov { 9400d557dc9SHeiko Carstens struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 9413af24433SOleg Nesterov struct workqueue_struct *wq = cwq->wq; 9426cc88bc4SDavid Howells const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; 9433af24433SOleg Nesterov struct task_struct *p; 9443af24433SOleg Nesterov 9453af24433SOleg Nesterov p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 9463af24433SOleg Nesterov /* 9473af24433SOleg Nesterov * Nobody can add the work_struct to this cwq, 9483af24433SOleg Nesterov * if (caller is __create_workqueue) 9493af24433SOleg Nesterov * nobody should see this wq 9503af24433SOleg Nesterov * else // caller is CPU_UP_PREPARE 9513af24433SOleg Nesterov * cpu is not on cpu_online_map 9523af24433SOleg Nesterov * so we can abort safely. 9533af24433SOleg Nesterov */ 9543af24433SOleg Nesterov if (IS_ERR(p)) 9553af24433SOleg Nesterov return PTR_ERR(p); 9560d557dc9SHeiko Carstens if (cwq->wq->rt) 9570d557dc9SHeiko Carstens sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); 9583af24433SOleg Nesterov cwq->thread = p; 9593af24433SOleg Nesterov 960e1d8aa9fSFrederic Weisbecker trace_workqueue_creation(cwq->thread, cpu); 961e1d8aa9fSFrederic Weisbecker 9623af24433SOleg Nesterov return 0; 9633af24433SOleg Nesterov } 9643af24433SOleg Nesterov 96506ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 96606ba38a9SOleg Nesterov { 96706ba38a9SOleg Nesterov struct task_struct *p = cwq->thread; 96806ba38a9SOleg Nesterov 96906ba38a9SOleg Nesterov if (p != NULL) { 97006ba38a9SOleg Nesterov if (cpu >= 0) 97106ba38a9SOleg Nesterov kthread_bind(p, cpu); 97206ba38a9SOleg Nesterov wake_up_process(p); 97306ba38a9SOleg Nesterov } 97406ba38a9SOleg Nesterov } 97506ba38a9SOleg Nesterov 9764e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name, 9774e6045f1SJohannes Berg int singlethread, 9784e6045f1SJohannes Berg int freezeable, 9790d557dc9SHeiko Carstens int rt, 980eb13ba87SJohannes Berg struct lock_class_key *key, 981eb13ba87SJohannes Berg const char *lock_name) 9823af24433SOleg Nesterov { 9833af24433SOleg Nesterov struct workqueue_struct *wq; 9843af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 9853af24433SOleg Nesterov int err = 0, cpu; 9863af24433SOleg Nesterov 9873af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 9883af24433SOleg Nesterov if (!wq) 9893af24433SOleg Nesterov return NULL; 9903af24433SOleg Nesterov 9913af24433SOleg Nesterov wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 9923af24433SOleg Nesterov if (!wq->cpu_wq) { 9933af24433SOleg Nesterov kfree(wq); 9943af24433SOleg Nesterov return NULL; 9953af24433SOleg Nesterov } 9963af24433SOleg Nesterov 9973af24433SOleg Nesterov wq->name = name; 998eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 999cce1a165SOleg Nesterov wq->singlethread = singlethread; 10003af24433SOleg Nesterov wq->freezeable = freezeable; 10010d557dc9SHeiko Carstens wq->rt = rt; 1002cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 10033af24433SOleg Nesterov 10043af24433SOleg Nesterov if (singlethread) { 10053af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, singlethread_cpu); 10063af24433SOleg Nesterov err = create_workqueue_thread(cwq, singlethread_cpu); 100706ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 10083af24433SOleg Nesterov } else { 10093da1c84cSOleg Nesterov cpu_maps_update_begin(); 10106af8bf3dSOleg Nesterov /* 10116af8bf3dSOleg Nesterov * We must place this wq on list even if the code below fails. 10126af8bf3dSOleg Nesterov * cpu_down(cpu) can remove cpu from cpu_populated_map before 10136af8bf3dSOleg Nesterov * destroy_workqueue() takes the lock, in that case we leak 10146af8bf3dSOleg Nesterov * cwq[cpu]->thread. 10156af8bf3dSOleg Nesterov */ 101695402b38SGautham R Shenoy spin_lock(&workqueue_lock); 10173af24433SOleg Nesterov list_add(&wq->list, &workqueues); 101895402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 10196af8bf3dSOleg Nesterov /* 10206af8bf3dSOleg Nesterov * We must initialize cwqs for each possible cpu even if we 10216af8bf3dSOleg Nesterov * are going to call destroy_workqueue() finally. Otherwise 10226af8bf3dSOleg Nesterov * cpu_up() can hit the uninitialized cwq once we drop the 10236af8bf3dSOleg Nesterov * lock. 10246af8bf3dSOleg Nesterov */ 10253af24433SOleg Nesterov for_each_possible_cpu(cpu) { 10263af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, cpu); 10273af24433SOleg Nesterov if (err || !cpu_online(cpu)) 10283af24433SOleg Nesterov continue; 10293af24433SOleg Nesterov err = create_workqueue_thread(cwq, cpu); 103006ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 10313af24433SOleg Nesterov } 10323da1c84cSOleg Nesterov cpu_maps_update_done(); 10333af24433SOleg Nesterov } 10343af24433SOleg Nesterov 10353af24433SOleg Nesterov if (err) { 10363af24433SOleg Nesterov destroy_workqueue(wq); 10373af24433SOleg Nesterov wq = NULL; 10383af24433SOleg Nesterov } 10393af24433SOleg Nesterov return wq; 10403af24433SOleg Nesterov } 10414e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key); 10423af24433SOleg Nesterov 10431e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 10443af24433SOleg Nesterov { 10453af24433SOleg Nesterov /* 10463da1c84cSOleg Nesterov * Our caller is either destroy_workqueue() or CPU_POST_DEAD, 10473da1c84cSOleg Nesterov * cpu_add_remove_lock protects cwq->thread. 10483af24433SOleg Nesterov */ 104914441960SOleg Nesterov if (cwq->thread == NULL) 105014441960SOleg Nesterov return; 105114441960SOleg Nesterov 10523295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 10533295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 10544e6045f1SJohannes Berg 105513c22168SOleg Nesterov flush_cpu_workqueue(cwq); 105614441960SOleg Nesterov /* 10573da1c84cSOleg Nesterov * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, 105813c22168SOleg Nesterov * a concurrent flush_workqueue() can insert a barrier after us. 105913c22168SOleg Nesterov * However, in that case run_workqueue() won't return and check 106013c22168SOleg Nesterov * kthread_should_stop() until it flushes all work_struct's. 106114441960SOleg Nesterov * When ->worklist becomes empty it is safe to exit because no 106214441960SOleg Nesterov * more work_structs can be queued on this cwq: flush_workqueue 106314441960SOleg Nesterov * checks list_empty(), and a "normal" queue_work() can't use 106414441960SOleg Nesterov * a dead CPU. 106514441960SOleg Nesterov */ 1066e1d8aa9fSFrederic Weisbecker trace_workqueue_destruction(cwq->thread); 106714441960SOleg Nesterov kthread_stop(cwq->thread); 106814441960SOleg Nesterov cwq->thread = NULL; 10691da177e4SLinus Torvalds } 10701da177e4SLinus Torvalds 10713af24433SOleg Nesterov /** 10723af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 10733af24433SOleg Nesterov * @wq: target workqueue 10743af24433SOleg Nesterov * 10753af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 10763af24433SOleg Nesterov */ 10773af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 10783af24433SOleg Nesterov { 1079e7577c50SRusty Russell const struct cpumask *cpu_map = wq_cpu_map(wq); 10803af24433SOleg Nesterov int cpu; 10813af24433SOleg Nesterov 10823da1c84cSOleg Nesterov cpu_maps_update_begin(); 108395402b38SGautham R Shenoy spin_lock(&workqueue_lock); 10843af24433SOleg Nesterov list_del(&wq->list); 108595402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 10863af24433SOleg Nesterov 1087aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 10881e35eaa2SOleg Nesterov cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 10893da1c84cSOleg Nesterov cpu_maps_update_done(); 10903af24433SOleg Nesterov 10913af24433SOleg Nesterov free_percpu(wq->cpu_wq); 10923af24433SOleg Nesterov kfree(wq); 10933af24433SOleg Nesterov } 10943af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 10953af24433SOleg Nesterov 10969c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 10971da177e4SLinus Torvalds unsigned long action, 10981da177e4SLinus Torvalds void *hcpu) 10991da177e4SLinus Torvalds { 11003af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 11013af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 11021da177e4SLinus Torvalds struct workqueue_struct *wq; 11038448502cSOleg Nesterov int ret = NOTIFY_OK; 11041da177e4SLinus Torvalds 11058bb78442SRafael J. Wysocki action &= ~CPU_TASKS_FROZEN; 11068bb78442SRafael J. Wysocki 11071da177e4SLinus Torvalds switch (action) { 11083af24433SOleg Nesterov case CPU_UP_PREPARE: 1109e7577c50SRusty Russell cpumask_set_cpu(cpu, cpu_populated_map); 11103af24433SOleg Nesterov } 11118448502cSOleg Nesterov undo: 11121da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 11133af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 11143af24433SOleg Nesterov 11153af24433SOleg Nesterov switch (action) { 11163af24433SOleg Nesterov case CPU_UP_PREPARE: 11173af24433SOleg Nesterov if (!create_workqueue_thread(cwq, cpu)) 11181da177e4SLinus Torvalds break; 111995402b38SGautham R Shenoy printk(KERN_ERR "workqueue [%s] for %i failed\n", 112095402b38SGautham R Shenoy wq->name, cpu); 11218448502cSOleg Nesterov action = CPU_UP_CANCELED; 11228448502cSOleg Nesterov ret = NOTIFY_BAD; 11238448502cSOleg Nesterov goto undo; 11241da177e4SLinus Torvalds 11251da177e4SLinus Torvalds case CPU_ONLINE: 112606ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 11271da177e4SLinus Torvalds break; 11281da177e4SLinus Torvalds 11291da177e4SLinus Torvalds case CPU_UP_CANCELED: 113006ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 11313da1c84cSOleg Nesterov case CPU_POST_DEAD: 11321e35eaa2SOleg Nesterov cleanup_workqueue_thread(cwq); 11331da177e4SLinus Torvalds break; 11341da177e4SLinus Torvalds } 11353af24433SOleg Nesterov } 11361da177e4SLinus Torvalds 113700dfcaf7SOleg Nesterov switch (action) { 113800dfcaf7SOleg Nesterov case CPU_UP_CANCELED: 11393da1c84cSOleg Nesterov case CPU_POST_DEAD: 1140e7577c50SRusty Russell cpumask_clear_cpu(cpu, cpu_populated_map); 114100dfcaf7SOleg Nesterov } 114200dfcaf7SOleg Nesterov 11438448502cSOleg Nesterov return ret; 11441da177e4SLinus Torvalds } 11451da177e4SLinus Torvalds 11462d3854a3SRusty Russell #ifdef CONFIG_SMP 11478ccad40dSRusty Russell 11482d3854a3SRusty Russell struct work_for_cpu { 11496b44003eSAndrew Morton struct completion completion; 11502d3854a3SRusty Russell long (*fn)(void *); 11512d3854a3SRusty Russell void *arg; 11522d3854a3SRusty Russell long ret; 11532d3854a3SRusty Russell }; 11542d3854a3SRusty Russell 11556b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc) 11562d3854a3SRusty Russell { 11576b44003eSAndrew Morton struct work_for_cpu *wfc = _wfc; 11582d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 11596b44003eSAndrew Morton complete(&wfc->completion); 11606b44003eSAndrew Morton return 0; 11612d3854a3SRusty Russell } 11622d3854a3SRusty Russell 11632d3854a3SRusty Russell /** 11642d3854a3SRusty Russell * work_on_cpu - run a function in user context on a particular cpu 11652d3854a3SRusty Russell * @cpu: the cpu to run on 11662d3854a3SRusty Russell * @fn: the function to run 11672d3854a3SRusty Russell * @arg: the function arg 11682d3854a3SRusty Russell * 116931ad9081SRusty Russell * This will return the value @fn returns. 117031ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 11716b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 11722d3854a3SRusty Russell */ 11732d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 11742d3854a3SRusty Russell { 11756b44003eSAndrew Morton struct task_struct *sub_thread; 11766b44003eSAndrew Morton struct work_for_cpu wfc = { 11776b44003eSAndrew Morton .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 11786b44003eSAndrew Morton .fn = fn, 11796b44003eSAndrew Morton .arg = arg, 11806b44003eSAndrew Morton }; 11812d3854a3SRusty Russell 11826b44003eSAndrew Morton sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 11836b44003eSAndrew Morton if (IS_ERR(sub_thread)) 11846b44003eSAndrew Morton return PTR_ERR(sub_thread); 11856b44003eSAndrew Morton kthread_bind(sub_thread, cpu); 11866b44003eSAndrew Morton wake_up_process(sub_thread); 11876b44003eSAndrew Morton wait_for_completion(&wfc.completion); 11882d3854a3SRusty Russell return wfc.ret; 11892d3854a3SRusty Russell } 11902d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu); 11912d3854a3SRusty Russell #endif /* CONFIG_SMP */ 11922d3854a3SRusty Russell 1193c12920d1SOleg Nesterov void __init init_workqueues(void) 11941da177e4SLinus Torvalds { 1195e7577c50SRusty Russell alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); 1196e7577c50SRusty Russell 1197e7577c50SRusty Russell cpumask_copy(cpu_populated_map, cpu_online_mask); 1198e7577c50SRusty Russell singlethread_cpu = cpumask_first(cpu_possible_mask); 1199e7577c50SRusty Russell cpu_singlethread_map = cpumask_of(singlethread_cpu); 12001da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 12011da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 12021da177e4SLinus Torvalds BUG_ON(!keventd_wq); 12031da177e4SLinus Torvalds } 1204