11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 12e1f8e874SFrancois Cami * Andrew Morton 131da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 141da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1589ada679SChristoph Lameter * 16cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 354e6045f1SJohannes Berg #include <linux/lockdep.h> 36fb39125fSZhaolei #define CREATE_TRACE_POINTS 37fb39125fSZhaolei #include <trace/events/workqueue.h> 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds /* 404690c4abSTejun Heo * Structure fields follow one of the following exclusion rules. 414690c4abSTejun Heo * 424690c4abSTejun Heo * I: Set during initialization and read-only afterwards. 434690c4abSTejun Heo * 444690c4abSTejun Heo * L: cwq->lock protected. Access with cwq->lock held. 454690c4abSTejun Heo * 464690c4abSTejun Heo * W: workqueue_lock protected. 474690c4abSTejun Heo */ 484690c4abSTejun Heo 494690c4abSTejun Heo /* 50f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 51f756d5e2SNathan Lynch * possible cpu). 521da177e4SLinus Torvalds */ 531da177e4SLinus Torvalds struct cpu_workqueue_struct { 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds spinlock_t lock; 561da177e4SLinus Torvalds 571da177e4SLinus Torvalds struct list_head worklist; 581da177e4SLinus Torvalds wait_queue_head_t more_work; 593af24433SOleg Nesterov struct work_struct *current_work; 601da177e4SLinus Torvalds 614690c4abSTejun Heo struct workqueue_struct *wq; /* I: the owning workqueue */ 6236c8b586SIngo Molnar struct task_struct *thread; 631da177e4SLinus Torvalds } ____cacheline_aligned; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds /* 661da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 671da177e4SLinus Torvalds * per-CPU workqueues: 681da177e4SLinus Torvalds */ 691da177e4SLinus Torvalds struct workqueue_struct { 704690c4abSTejun Heo struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ 714690c4abSTejun Heo struct list_head list; /* W: list of all workqueues */ 724690c4abSTejun Heo const char *name; /* I: workqueue name */ 73cce1a165SOleg Nesterov int singlethread; 74319c2a98SOleg Nesterov int freezeable; /* Freeze threads during suspend */ 754e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 764e6045f1SJohannes Berg struct lockdep_map lockdep_map; 774e6045f1SJohannes Berg #endif 781da177e4SLinus Torvalds }; 791da177e4SLinus Torvalds 80dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK 81dc186ad7SThomas Gleixner 82dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr; 83dc186ad7SThomas Gleixner 84dc186ad7SThomas Gleixner /* 85dc186ad7SThomas Gleixner * fixup_init is called when: 86dc186ad7SThomas Gleixner * - an active object is initialized 87dc186ad7SThomas Gleixner */ 88dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state) 89dc186ad7SThomas Gleixner { 90dc186ad7SThomas Gleixner struct work_struct *work = addr; 91dc186ad7SThomas Gleixner 92dc186ad7SThomas Gleixner switch (state) { 93dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 94dc186ad7SThomas Gleixner cancel_work_sync(work); 95dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 96dc186ad7SThomas Gleixner return 1; 97dc186ad7SThomas Gleixner default: 98dc186ad7SThomas Gleixner return 0; 99dc186ad7SThomas Gleixner } 100dc186ad7SThomas Gleixner } 101dc186ad7SThomas Gleixner 102dc186ad7SThomas Gleixner /* 103dc186ad7SThomas Gleixner * fixup_activate is called when: 104dc186ad7SThomas Gleixner * - an active object is activated 105dc186ad7SThomas Gleixner * - an unknown object is activated (might be a statically initialized object) 106dc186ad7SThomas Gleixner */ 107dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state) 108dc186ad7SThomas Gleixner { 109dc186ad7SThomas Gleixner struct work_struct *work = addr; 110dc186ad7SThomas Gleixner 111dc186ad7SThomas Gleixner switch (state) { 112dc186ad7SThomas Gleixner 113dc186ad7SThomas Gleixner case ODEBUG_STATE_NOTAVAILABLE: 114dc186ad7SThomas Gleixner /* 115dc186ad7SThomas Gleixner * This is not really a fixup. The work struct was 116dc186ad7SThomas Gleixner * statically initialized. We just make sure that it 117dc186ad7SThomas Gleixner * is tracked in the object tracker. 118dc186ad7SThomas Gleixner */ 119dc186ad7SThomas Gleixner if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { 120dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 121dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 122dc186ad7SThomas Gleixner return 0; 123dc186ad7SThomas Gleixner } 124dc186ad7SThomas Gleixner WARN_ON_ONCE(1); 125dc186ad7SThomas Gleixner return 0; 126dc186ad7SThomas Gleixner 127dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 128dc186ad7SThomas Gleixner WARN_ON(1); 129dc186ad7SThomas Gleixner 130dc186ad7SThomas Gleixner default: 131dc186ad7SThomas Gleixner return 0; 132dc186ad7SThomas Gleixner } 133dc186ad7SThomas Gleixner } 134dc186ad7SThomas Gleixner 135dc186ad7SThomas Gleixner /* 136dc186ad7SThomas Gleixner * fixup_free is called when: 137dc186ad7SThomas Gleixner * - an active object is freed 138dc186ad7SThomas Gleixner */ 139dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state) 140dc186ad7SThomas Gleixner { 141dc186ad7SThomas Gleixner struct work_struct *work = addr; 142dc186ad7SThomas Gleixner 143dc186ad7SThomas Gleixner switch (state) { 144dc186ad7SThomas Gleixner case ODEBUG_STATE_ACTIVE: 145dc186ad7SThomas Gleixner cancel_work_sync(work); 146dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 147dc186ad7SThomas Gleixner return 1; 148dc186ad7SThomas Gleixner default: 149dc186ad7SThomas Gleixner return 0; 150dc186ad7SThomas Gleixner } 151dc186ad7SThomas Gleixner } 152dc186ad7SThomas Gleixner 153dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = { 154dc186ad7SThomas Gleixner .name = "work_struct", 155dc186ad7SThomas Gleixner .fixup_init = work_fixup_init, 156dc186ad7SThomas Gleixner .fixup_activate = work_fixup_activate, 157dc186ad7SThomas Gleixner .fixup_free = work_fixup_free, 158dc186ad7SThomas Gleixner }; 159dc186ad7SThomas Gleixner 160dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) 161dc186ad7SThomas Gleixner { 162dc186ad7SThomas Gleixner debug_object_activate(work, &work_debug_descr); 163dc186ad7SThomas Gleixner } 164dc186ad7SThomas Gleixner 165dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) 166dc186ad7SThomas Gleixner { 167dc186ad7SThomas Gleixner debug_object_deactivate(work, &work_debug_descr); 168dc186ad7SThomas Gleixner } 169dc186ad7SThomas Gleixner 170dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack) 171dc186ad7SThomas Gleixner { 172dc186ad7SThomas Gleixner if (onstack) 173dc186ad7SThomas Gleixner debug_object_init_on_stack(work, &work_debug_descr); 174dc186ad7SThomas Gleixner else 175dc186ad7SThomas Gleixner debug_object_init(work, &work_debug_descr); 176dc186ad7SThomas Gleixner } 177dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work); 178dc186ad7SThomas Gleixner 179dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work) 180dc186ad7SThomas Gleixner { 181dc186ad7SThomas Gleixner debug_object_free(work, &work_debug_descr); 182dc186ad7SThomas Gleixner } 183dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack); 184dc186ad7SThomas Gleixner 185dc186ad7SThomas Gleixner #else 186dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { } 187dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { } 188dc186ad7SThomas Gleixner #endif 189dc186ad7SThomas Gleixner 19095402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 19195402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 1921da177e4SLinus Torvalds static LIST_HEAD(workqueues); 1931da177e4SLinus Torvalds 1943af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 195e7577c50SRusty Russell static const struct cpumask *cpu_singlethread_map __read_mostly; 19614441960SOleg Nesterov /* 19714441960SOleg Nesterov * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 19814441960SOleg Nesterov * flushes cwq->worklist. This means that flush_workqueue/wait_on_work 19914441960SOleg Nesterov * which comes in between can't use for_each_online_cpu(). We could 20014441960SOleg Nesterov * use cpu_possible_map, the cpumask below is more a documentation 20114441960SOleg Nesterov * than optimization. 20214441960SOleg Nesterov */ 203e7577c50SRusty Russell static cpumask_var_t cpu_populated_map __read_mostly; 204f756d5e2SNathan Lynch 2051da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 2066cc88bc4SDavid Howells static inline int is_wq_single_threaded(struct workqueue_struct *wq) 2071da177e4SLinus Torvalds { 208cce1a165SOleg Nesterov return wq->singlethread; 2091da177e4SLinus Torvalds } 2101da177e4SLinus Torvalds 211e7577c50SRusty Russell static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) 212b1f4ec17SOleg Nesterov { 2136cc88bc4SDavid Howells return is_wq_single_threaded(wq) 214e7577c50SRusty Russell ? cpu_singlethread_map : cpu_populated_map; 215b1f4ec17SOleg Nesterov } 216b1f4ec17SOleg Nesterov 2174690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 2184690c4abSTejun Heo struct workqueue_struct *wq) 219a848e3b6SOleg Nesterov { 2206cc88bc4SDavid Howells if (unlikely(is_wq_single_threaded(wq))) 221a848e3b6SOleg Nesterov cpu = singlethread_cpu; 222a848e3b6SOleg Nesterov return per_cpu_ptr(wq->cpu_wq, cpu); 223a848e3b6SOleg Nesterov } 224a848e3b6SOleg Nesterov 2254594bf15SDavid Howells /* 2264594bf15SDavid Howells * Set the workqueue on which a work item is to be run 2274594bf15SDavid Howells * - Must *only* be called if the pending flag is set 2284594bf15SDavid Howells */ 229ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work, 2304690c4abSTejun Heo struct cpu_workqueue_struct *cwq, 2314690c4abSTejun Heo unsigned long extra_flags) 232365970a1SDavid Howells { 2334594bf15SDavid Howells BUG_ON(!work_pending(work)); 2344594bf15SDavid Howells 2354690c4abSTejun Heo atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | 2364690c4abSTejun Heo (1UL << WORK_STRUCT_PENDING) | extra_flags); 237365970a1SDavid Howells } 238365970a1SDavid Howells 2394d707b9fSOleg Nesterov /* 2404d707b9fSOleg Nesterov * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued. 2414d707b9fSOleg Nesterov */ 2424d707b9fSOleg Nesterov static inline void clear_wq_data(struct work_struct *work) 2434d707b9fSOleg Nesterov { 2444690c4abSTejun Heo atomic_long_set(&work->data, work_static(work)); 2454d707b9fSOleg Nesterov } 2464d707b9fSOleg Nesterov 247ed7c0feeSOleg Nesterov static inline 248ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 249365970a1SDavid Howells { 250a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 251365970a1SDavid Howells } 252365970a1SDavid Howells 2534690c4abSTejun Heo /** 2544690c4abSTejun Heo * insert_work - insert a work into cwq 2554690c4abSTejun Heo * @cwq: cwq @work belongs to 2564690c4abSTejun Heo * @work: work to insert 2574690c4abSTejun Heo * @head: insertion point 2584690c4abSTejun Heo * @extra_flags: extra WORK_STRUCT_* flags to set 2594690c4abSTejun Heo * 2604690c4abSTejun Heo * Insert @work into @cwq after @head. 2614690c4abSTejun Heo * 2624690c4abSTejun Heo * CONTEXT: 2634690c4abSTejun Heo * spin_lock_irq(cwq->lock). 2644690c4abSTejun Heo */ 265b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 2664690c4abSTejun Heo struct work_struct *work, struct list_head *head, 2674690c4abSTejun Heo unsigned int extra_flags) 268b89deed3SOleg Nesterov { 269e1d8aa9fSFrederic Weisbecker trace_workqueue_insertion(cwq->thread, work); 270e1d8aa9fSFrederic Weisbecker 2714690c4abSTejun Heo /* we own @work, set data and link */ 2724690c4abSTejun Heo set_wq_data(work, cwq, extra_flags); 2734690c4abSTejun Heo 2746e84d644SOleg Nesterov /* 2756e84d644SOleg Nesterov * Ensure that we get the right work->data if we see the 2766e84d644SOleg Nesterov * result of list_add() below, see try_to_grab_pending(). 2776e84d644SOleg Nesterov */ 2786e84d644SOleg Nesterov smp_wmb(); 2794690c4abSTejun Heo 2801a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 281b89deed3SOleg Nesterov wake_up(&cwq->more_work); 282b89deed3SOleg Nesterov } 283b89deed3SOleg Nesterov 2844690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 2851da177e4SLinus Torvalds struct work_struct *work) 2861da177e4SLinus Torvalds { 2874690c4abSTejun Heo struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2881da177e4SLinus Torvalds unsigned long flags; 2891da177e4SLinus Torvalds 290dc186ad7SThomas Gleixner debug_work_activate(work); 2911da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2924690c4abSTejun Heo BUG_ON(!list_empty(&work->entry)); 2934690c4abSTejun Heo insert_work(cwq, work, &cwq->worklist, 0); 2941da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 2951da177e4SLinus Torvalds } 2961da177e4SLinus Torvalds 2970fcb78c2SRolf Eike Beer /** 2980fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 2990fcb78c2SRolf Eike Beer * @wq: workqueue to use 3000fcb78c2SRolf Eike Beer * @work: work to queue 3010fcb78c2SRolf Eike Beer * 302057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 3031da177e4SLinus Torvalds * 30400dfcaf7SOleg Nesterov * We queue the work to the CPU on which it was submitted, but if the CPU dies 30500dfcaf7SOleg Nesterov * it can be processed by another CPU. 3061da177e4SLinus Torvalds */ 3077ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work) 3081da177e4SLinus Torvalds { 309ef1ca236SOleg Nesterov int ret; 3101da177e4SLinus Torvalds 311ef1ca236SOleg Nesterov ret = queue_work_on(get_cpu(), wq, work); 312a848e3b6SOleg Nesterov put_cpu(); 313ef1ca236SOleg Nesterov 3141da177e4SLinus Torvalds return ret; 3151da177e4SLinus Torvalds } 316ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 3171da177e4SLinus Torvalds 318c1a220e7SZhang Rui /** 319c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 320c1a220e7SZhang Rui * @cpu: CPU number to execute work on 321c1a220e7SZhang Rui * @wq: workqueue to use 322c1a220e7SZhang Rui * @work: work to queue 323c1a220e7SZhang Rui * 324c1a220e7SZhang Rui * Returns 0 if @work was already on a queue, non-zero otherwise. 325c1a220e7SZhang Rui * 326c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 327c1a220e7SZhang Rui * can't go away. 328c1a220e7SZhang Rui */ 329c1a220e7SZhang Rui int 330c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 331c1a220e7SZhang Rui { 332c1a220e7SZhang Rui int ret = 0; 333c1a220e7SZhang Rui 334c1a220e7SZhang Rui if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 3354690c4abSTejun Heo __queue_work(cpu, wq, work); 336c1a220e7SZhang Rui ret = 1; 337c1a220e7SZhang Rui } 338c1a220e7SZhang Rui return ret; 339c1a220e7SZhang Rui } 340c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 341c1a220e7SZhang Rui 3426d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data) 3431da177e4SLinus Torvalds { 34452bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 345ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 3461da177e4SLinus Torvalds 3474690c4abSTejun Heo __queue_work(smp_processor_id(), cwq->wq, &dwork->work); 3481da177e4SLinus Torvalds } 3491da177e4SLinus Torvalds 3500fcb78c2SRolf Eike Beer /** 3510fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 3520fcb78c2SRolf Eike Beer * @wq: workqueue to use 353af9997e4SRandy Dunlap * @dwork: delayable work to queue 3540fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 3550fcb78c2SRolf Eike Beer * 356057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 3570fcb78c2SRolf Eike Beer */ 3587ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq, 35952bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 3601da177e4SLinus Torvalds { 36152bad64dSDavid Howells if (delay == 0) 36263bc0362SOleg Nesterov return queue_work(wq, &dwork->work); 3631da177e4SLinus Torvalds 36463bc0362SOleg Nesterov return queue_delayed_work_on(-1, wq, dwork, delay); 3651da177e4SLinus Torvalds } 366ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 3671da177e4SLinus Torvalds 3680fcb78c2SRolf Eike Beer /** 3690fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 3700fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 3710fcb78c2SRolf Eike Beer * @wq: workqueue to use 372af9997e4SRandy Dunlap * @dwork: work to queue 3730fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 3740fcb78c2SRolf Eike Beer * 375057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 3760fcb78c2SRolf Eike Beer */ 3777a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 37852bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 3797a6bc1cdSVenkatesh Pallipadi { 3807a6bc1cdSVenkatesh Pallipadi int ret = 0; 38152bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 38252bad64dSDavid Howells struct work_struct *work = &dwork->work; 3837a6bc1cdSVenkatesh Pallipadi 384a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 3857a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 3867a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 3877a6bc1cdSVenkatesh Pallipadi 3888a3e77ccSAndrew Liu timer_stats_timer_set_start_info(&dwork->timer); 3898a3e77ccSAndrew Liu 390ed7c0feeSOleg Nesterov /* This stores cwq for the moment, for the timer_fn */ 3914690c4abSTejun Heo set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); 3927a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 39352bad64dSDavid Howells timer->data = (unsigned long)dwork; 3947a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 39563bc0362SOleg Nesterov 39663bc0362SOleg Nesterov if (unlikely(cpu >= 0)) 3977a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 39863bc0362SOleg Nesterov else 39963bc0362SOleg Nesterov add_timer(timer); 4007a6bc1cdSVenkatesh Pallipadi ret = 1; 4017a6bc1cdSVenkatesh Pallipadi } 4027a6bc1cdSVenkatesh Pallipadi return ret; 4037a6bc1cdSVenkatesh Pallipadi } 404ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 4051da177e4SLinus Torvalds 406858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 4071da177e4SLinus Torvalds { 408f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 4091da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 4101da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 4111da177e4SLinus Torvalds struct work_struct, entry); 4126bb49e59SDavid Howells work_func_t f = work->func; 4134e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 4144e6045f1SJohannes Berg /* 4154e6045f1SJohannes Berg * It is permissible to free the struct work_struct 4164e6045f1SJohannes Berg * from inside the function that is called from it, 4174e6045f1SJohannes Berg * this we need to take into account for lockdep too. 4184e6045f1SJohannes Berg * To avoid bogus "held lock freed" warnings as well 4194e6045f1SJohannes Berg * as problems when looking into work->lockdep_map, 4204e6045f1SJohannes Berg * make a copy and use that here. 4214e6045f1SJohannes Berg */ 4224e6045f1SJohannes Berg struct lockdep_map lockdep_map = work->lockdep_map; 4234e6045f1SJohannes Berg #endif 424e1d8aa9fSFrederic Weisbecker trace_workqueue_execution(cwq->thread, work); 425dc186ad7SThomas Gleixner debug_work_deactivate(work); 426b89deed3SOleg Nesterov cwq->current_work = work; 4271da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 428f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 4291da177e4SLinus Torvalds 430365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 43123b2e599SOleg Nesterov work_clear_pending(work); 4323295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 4333295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 43465f27f38SDavid Howells f(work); 4353295f0efSIngo Molnar lock_map_release(&lockdep_map); 4363295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 4371da177e4SLinus Torvalds 438d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 439d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 440d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 441d5abe669SPeter Zijlstra current->comm, preempt_count(), 442ba25f9dcSPavel Emelyanov task_pid_nr(current)); 443d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 444d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 445d5abe669SPeter Zijlstra debug_show_held_locks(current); 446d5abe669SPeter Zijlstra dump_stack(); 447d5abe669SPeter Zijlstra } 448d5abe669SPeter Zijlstra 449f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 450b89deed3SOleg Nesterov cwq->current_work = NULL; 4511da177e4SLinus Torvalds } 452f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 4531da177e4SLinus Torvalds } 4541da177e4SLinus Torvalds 4554690c4abSTejun Heo /** 4564690c4abSTejun Heo * worker_thread - the worker thread function 4574690c4abSTejun Heo * @__cwq: cwq to serve 4584690c4abSTejun Heo * 4594690c4abSTejun Heo * The cwq worker thread function. 4604690c4abSTejun Heo */ 4611da177e4SLinus Torvalds static int worker_thread(void *__cwq) 4621da177e4SLinus Torvalds { 4631da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 4643af24433SOleg Nesterov DEFINE_WAIT(wait); 4651da177e4SLinus Torvalds 46683144186SRafael J. Wysocki if (cwq->wq->freezeable) 46783144186SRafael J. Wysocki set_freezable(); 4681da177e4SLinus Torvalds 4693af24433SOleg Nesterov for (;;) { 4703af24433SOleg Nesterov prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 47114441960SOleg Nesterov if (!freezing(current) && 47214441960SOleg Nesterov !kthread_should_stop() && 47314441960SOleg Nesterov list_empty(&cwq->worklist)) 4741da177e4SLinus Torvalds schedule(); 4753af24433SOleg Nesterov finish_wait(&cwq->more_work, &wait); 4761da177e4SLinus Torvalds 47785f4186aSOleg Nesterov try_to_freeze(); 47885f4186aSOleg Nesterov 47914441960SOleg Nesterov if (kthread_should_stop()) 4803af24433SOleg Nesterov break; 4813af24433SOleg Nesterov 4821da177e4SLinus Torvalds run_workqueue(cwq); 4831da177e4SLinus Torvalds } 4843af24433SOleg Nesterov 4851da177e4SLinus Torvalds return 0; 4861da177e4SLinus Torvalds } 4871da177e4SLinus Torvalds 488fc2e4d70SOleg Nesterov struct wq_barrier { 489fc2e4d70SOleg Nesterov struct work_struct work; 490fc2e4d70SOleg Nesterov struct completion done; 491fc2e4d70SOleg Nesterov }; 492fc2e4d70SOleg Nesterov 493fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 494fc2e4d70SOleg Nesterov { 495fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 496fc2e4d70SOleg Nesterov complete(&barr->done); 497fc2e4d70SOleg Nesterov } 498fc2e4d70SOleg Nesterov 4994690c4abSTejun Heo /** 5004690c4abSTejun Heo * insert_wq_barrier - insert a barrier work 5014690c4abSTejun Heo * @cwq: cwq to insert barrier into 5024690c4abSTejun Heo * @barr: wq_barrier to insert 5034690c4abSTejun Heo * @head: insertion point 5044690c4abSTejun Heo * 5054690c4abSTejun Heo * Insert barrier @barr into @cwq before @head. 5064690c4abSTejun Heo * 5074690c4abSTejun Heo * CONTEXT: 5084690c4abSTejun Heo * spin_lock_irq(cwq->lock). 5094690c4abSTejun Heo */ 51083c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 5111a4d9b0aSOleg Nesterov struct wq_barrier *barr, struct list_head *head) 512fc2e4d70SOleg Nesterov { 513dc186ad7SThomas Gleixner /* 514dc186ad7SThomas Gleixner * debugobject calls are safe here even with cwq->lock locked 515dc186ad7SThomas Gleixner * as we know for sure that this will not trigger any of the 516dc186ad7SThomas Gleixner * checks and call back into the fixup functions where we 517dc186ad7SThomas Gleixner * might deadlock. 518dc186ad7SThomas Gleixner */ 519dc186ad7SThomas Gleixner INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); 520fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 521fc2e4d70SOleg Nesterov init_completion(&barr->done); 52283c22520SOleg Nesterov 523dc186ad7SThomas Gleixner debug_work_activate(&barr->work); 5244690c4abSTejun Heo insert_work(cwq, &barr->work, head, 0); 525fc2e4d70SOleg Nesterov } 526fc2e4d70SOleg Nesterov 52714441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 5281da177e4SLinus Torvalds { 5292355b70fSLai Jiangshan int active = 0; 530fc2e4d70SOleg Nesterov struct wq_barrier barr; 5311da177e4SLinus Torvalds 5322355b70fSLai Jiangshan WARN_ON(cwq->thread == current); 5332355b70fSLai Jiangshan 53483c22520SOleg Nesterov spin_lock_irq(&cwq->lock); 53583c22520SOleg Nesterov if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 5361a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, &cwq->worklist); 53783c22520SOleg Nesterov active = 1; 53883c22520SOleg Nesterov } 53983c22520SOleg Nesterov spin_unlock_irq(&cwq->lock); 5401da177e4SLinus Torvalds 541dc186ad7SThomas Gleixner if (active) { 542fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 543dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 544dc186ad7SThomas Gleixner } 54514441960SOleg Nesterov 54614441960SOleg Nesterov return active; 54783c22520SOleg Nesterov } 5481da177e4SLinus Torvalds 5490fcb78c2SRolf Eike Beer /** 5501da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 5510fcb78c2SRolf Eike Beer * @wq: workqueue to flush 5521da177e4SLinus Torvalds * 5531da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 5541da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 5551da177e4SLinus Torvalds * 556fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 557fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 5581da177e4SLinus Torvalds */ 5597ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 5601da177e4SLinus Torvalds { 561e7577c50SRusty Russell const struct cpumask *cpu_map = wq_cpu_map(wq); 562cce1a165SOleg Nesterov int cpu; 563b1f4ec17SOleg Nesterov 564f293ea92SOleg Nesterov might_sleep(); 5653295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 5663295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 567aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 56889ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 5691da177e4SLinus Torvalds } 570ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 5711da177e4SLinus Torvalds 572db700897SOleg Nesterov /** 573db700897SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 574db700897SOleg Nesterov * @work: the work which is to be flushed 575db700897SOleg Nesterov * 576a67da70dSOleg Nesterov * Returns false if @work has already terminated. 577a67da70dSOleg Nesterov * 578db700897SOleg Nesterov * It is expected that, prior to calling flush_work(), the caller has 579db700897SOleg Nesterov * arranged for the work to not be requeued, otherwise it doesn't make 580db700897SOleg Nesterov * sense to use this function. 581db700897SOleg Nesterov */ 582db700897SOleg Nesterov int flush_work(struct work_struct *work) 583db700897SOleg Nesterov { 584db700897SOleg Nesterov struct cpu_workqueue_struct *cwq; 585db700897SOleg Nesterov struct list_head *prev; 586db700897SOleg Nesterov struct wq_barrier barr; 587db700897SOleg Nesterov 588db700897SOleg Nesterov might_sleep(); 589db700897SOleg Nesterov cwq = get_wq_data(work); 590db700897SOleg Nesterov if (!cwq) 591db700897SOleg Nesterov return 0; 592db700897SOleg Nesterov 5933295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 5943295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 595a67da70dSOleg Nesterov 596db700897SOleg Nesterov spin_lock_irq(&cwq->lock); 597db700897SOleg Nesterov if (!list_empty(&work->entry)) { 598db700897SOleg Nesterov /* 599db700897SOleg Nesterov * See the comment near try_to_grab_pending()->smp_rmb(). 600db700897SOleg Nesterov * If it was re-queued under us we are not going to wait. 601db700897SOleg Nesterov */ 602db700897SOleg Nesterov smp_rmb(); 603db700897SOleg Nesterov if (unlikely(cwq != get_wq_data(work))) 6044690c4abSTejun Heo goto already_gone; 605db700897SOleg Nesterov prev = &work->entry; 606db700897SOleg Nesterov } else { 607db700897SOleg Nesterov if (cwq->current_work != work) 6084690c4abSTejun Heo goto already_gone; 609db700897SOleg Nesterov prev = &cwq->worklist; 610db700897SOleg Nesterov } 611db700897SOleg Nesterov insert_wq_barrier(cwq, &barr, prev->next); 612db700897SOleg Nesterov 6134690c4abSTejun Heo spin_unlock_irq(&cwq->lock); 614db700897SOleg Nesterov wait_for_completion(&barr.done); 615dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 616db700897SOleg Nesterov return 1; 6174690c4abSTejun Heo already_gone: 6184690c4abSTejun Heo spin_unlock_irq(&cwq->lock); 6194690c4abSTejun Heo return 0; 620db700897SOleg Nesterov } 621db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 622db700897SOleg Nesterov 6236e84d644SOleg Nesterov /* 6241f1f642eSOleg Nesterov * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 6256e84d644SOleg Nesterov * so this work can't be re-armed in any way. 6266e84d644SOleg Nesterov */ 6276e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work) 6286e84d644SOleg Nesterov { 6296e84d644SOleg Nesterov struct cpu_workqueue_struct *cwq; 6301f1f642eSOleg Nesterov int ret = -1; 6316e84d644SOleg Nesterov 6326e84d644SOleg Nesterov if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) 6331f1f642eSOleg Nesterov return 0; 6346e84d644SOleg Nesterov 6356e84d644SOleg Nesterov /* 6366e84d644SOleg Nesterov * The queueing is in progress, or it is already queued. Try to 6376e84d644SOleg Nesterov * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 6386e84d644SOleg Nesterov */ 6396e84d644SOleg Nesterov 6406e84d644SOleg Nesterov cwq = get_wq_data(work); 6416e84d644SOleg Nesterov if (!cwq) 6426e84d644SOleg Nesterov return ret; 6436e84d644SOleg Nesterov 6446e84d644SOleg Nesterov spin_lock_irq(&cwq->lock); 6456e84d644SOleg Nesterov if (!list_empty(&work->entry)) { 6466e84d644SOleg Nesterov /* 6476e84d644SOleg Nesterov * This work is queued, but perhaps we locked the wrong cwq. 6486e84d644SOleg Nesterov * In that case we must see the new value after rmb(), see 6496e84d644SOleg Nesterov * insert_work()->wmb(). 6506e84d644SOleg Nesterov */ 6516e84d644SOleg Nesterov smp_rmb(); 6526e84d644SOleg Nesterov if (cwq == get_wq_data(work)) { 653dc186ad7SThomas Gleixner debug_work_deactivate(work); 6546e84d644SOleg Nesterov list_del_init(&work->entry); 6556e84d644SOleg Nesterov ret = 1; 6566e84d644SOleg Nesterov } 6576e84d644SOleg Nesterov } 6586e84d644SOleg Nesterov spin_unlock_irq(&cwq->lock); 6596e84d644SOleg Nesterov 6606e84d644SOleg Nesterov return ret; 6616e84d644SOleg Nesterov } 6626e84d644SOleg Nesterov 6636e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 664b89deed3SOleg Nesterov struct work_struct *work) 665b89deed3SOleg Nesterov { 666b89deed3SOleg Nesterov struct wq_barrier barr; 667b89deed3SOleg Nesterov int running = 0; 668b89deed3SOleg Nesterov 669b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 670b89deed3SOleg Nesterov if (unlikely(cwq->current_work == work)) { 6711a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, cwq->worklist.next); 672b89deed3SOleg Nesterov running = 1; 673b89deed3SOleg Nesterov } 674b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 675b89deed3SOleg Nesterov 676dc186ad7SThomas Gleixner if (unlikely(running)) { 677b89deed3SOleg Nesterov wait_for_completion(&barr.done); 678dc186ad7SThomas Gleixner destroy_work_on_stack(&barr.work); 679dc186ad7SThomas Gleixner } 680b89deed3SOleg Nesterov } 681b89deed3SOleg Nesterov 6826e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work) 683b89deed3SOleg Nesterov { 684b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 68528e53bddSOleg Nesterov struct workqueue_struct *wq; 686e7577c50SRusty Russell const struct cpumask *cpu_map; 687b1f4ec17SOleg Nesterov int cpu; 688b89deed3SOleg Nesterov 689f293ea92SOleg Nesterov might_sleep(); 690f293ea92SOleg Nesterov 6913295f0efSIngo Molnar lock_map_acquire(&work->lockdep_map); 6923295f0efSIngo Molnar lock_map_release(&work->lockdep_map); 6934e6045f1SJohannes Berg 694b89deed3SOleg Nesterov cwq = get_wq_data(work); 695b89deed3SOleg Nesterov if (!cwq) 6963af24433SOleg Nesterov return; 697b89deed3SOleg Nesterov 69828e53bddSOleg Nesterov wq = cwq->wq; 69928e53bddSOleg Nesterov cpu_map = wq_cpu_map(wq); 70028e53bddSOleg Nesterov 701aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 7024690c4abSTejun Heo wait_on_cpu_work(get_cwq(cpu, wq), work); 7036e84d644SOleg Nesterov } 7046e84d644SOleg Nesterov 7051f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work, 7061f1f642eSOleg Nesterov struct timer_list* timer) 7071f1f642eSOleg Nesterov { 7081f1f642eSOleg Nesterov int ret; 7091f1f642eSOleg Nesterov 7101f1f642eSOleg Nesterov do { 7111f1f642eSOleg Nesterov ret = (timer && likely(del_timer(timer))); 7121f1f642eSOleg Nesterov if (!ret) 7131f1f642eSOleg Nesterov ret = try_to_grab_pending(work); 7141f1f642eSOleg Nesterov wait_on_work(work); 7151f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 7161f1f642eSOleg Nesterov 7174d707b9fSOleg Nesterov clear_wq_data(work); 7181f1f642eSOleg Nesterov return ret; 7191f1f642eSOleg Nesterov } 7201f1f642eSOleg Nesterov 7216e84d644SOleg Nesterov /** 7226e84d644SOleg Nesterov * cancel_work_sync - block until a work_struct's callback has terminated 7236e84d644SOleg Nesterov * @work: the work which is to be flushed 7246e84d644SOleg Nesterov * 7251f1f642eSOleg Nesterov * Returns true if @work was pending. 7261f1f642eSOleg Nesterov * 7276e84d644SOleg Nesterov * cancel_work_sync() will cancel the work if it is queued. If the work's 7286e84d644SOleg Nesterov * callback appears to be running, cancel_work_sync() will block until it 7296e84d644SOleg Nesterov * has completed. 7306e84d644SOleg Nesterov * 7316e84d644SOleg Nesterov * It is possible to use this function if the work re-queues itself. It can 7326e84d644SOleg Nesterov * cancel the work even if it migrates to another workqueue, however in that 7336e84d644SOleg Nesterov * case it only guarantees that work->func() has completed on the last queued 7346e84d644SOleg Nesterov * workqueue. 7356e84d644SOleg Nesterov * 7366e84d644SOleg Nesterov * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 7376e84d644SOleg Nesterov * pending, otherwise it goes into a busy-wait loop until the timer expires. 7386e84d644SOleg Nesterov * 7396e84d644SOleg Nesterov * The caller must ensure that workqueue_struct on which this work was last 7406e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 7416e84d644SOleg Nesterov */ 7421f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work) 7436e84d644SOleg Nesterov { 7441f1f642eSOleg Nesterov return __cancel_work_timer(work, NULL); 745b89deed3SOleg Nesterov } 74628e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 747b89deed3SOleg Nesterov 7486e84d644SOleg Nesterov /** 749f5a421a4SOleg Nesterov * cancel_delayed_work_sync - reliably kill off a delayed work. 7506e84d644SOleg Nesterov * @dwork: the delayed work struct 7516e84d644SOleg Nesterov * 7521f1f642eSOleg Nesterov * Returns true if @dwork was pending. 7531f1f642eSOleg Nesterov * 7546e84d644SOleg Nesterov * It is possible to use this function if @dwork rearms itself via queue_work() 7556e84d644SOleg Nesterov * or queue_delayed_work(). See also the comment for cancel_work_sync(). 7566e84d644SOleg Nesterov */ 7571f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork) 7586e84d644SOleg Nesterov { 7591f1f642eSOleg Nesterov return __cancel_work_timer(&dwork->work, &dwork->timer); 7606e84d644SOleg Nesterov } 761f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 7621da177e4SLinus Torvalds 7636e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly; 7641da177e4SLinus Torvalds 7650fcb78c2SRolf Eike Beer /** 7660fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 7670fcb78c2SRolf Eike Beer * @work: job to be done 7680fcb78c2SRolf Eike Beer * 7695b0f437dSBart Van Assche * Returns zero if @work was already on the kernel-global workqueue and 7705b0f437dSBart Van Assche * non-zero otherwise. 7715b0f437dSBart Van Assche * 7725b0f437dSBart Van Assche * This puts a job in the kernel-global workqueue if it was not already 7735b0f437dSBart Van Assche * queued and leaves it in the same position on the kernel-global 7745b0f437dSBart Van Assche * workqueue otherwise. 7750fcb78c2SRolf Eike Beer */ 7767ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work) 7771da177e4SLinus Torvalds { 7781da177e4SLinus Torvalds return queue_work(keventd_wq, work); 7791da177e4SLinus Torvalds } 780ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 7811da177e4SLinus Torvalds 782c1a220e7SZhang Rui /* 783c1a220e7SZhang Rui * schedule_work_on - put work task on a specific cpu 784c1a220e7SZhang Rui * @cpu: cpu to put the work task on 785c1a220e7SZhang Rui * @work: job to be done 786c1a220e7SZhang Rui * 787c1a220e7SZhang Rui * This puts a job on a specific cpu 788c1a220e7SZhang Rui */ 789c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work) 790c1a220e7SZhang Rui { 791c1a220e7SZhang Rui return queue_work_on(cpu, keventd_wq, work); 792c1a220e7SZhang Rui } 793c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on); 794c1a220e7SZhang Rui 7950fcb78c2SRolf Eike Beer /** 7960fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 79752bad64dSDavid Howells * @dwork: job to be done 79852bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 7990fcb78c2SRolf Eike Beer * 8000fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 8010fcb78c2SRolf Eike Beer * workqueue. 8020fcb78c2SRolf Eike Beer */ 8037ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork, 80482f67cd9SIngo Molnar unsigned long delay) 8051da177e4SLinus Torvalds { 80652bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 8071da177e4SLinus Torvalds } 808ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 8091da177e4SLinus Torvalds 8100fcb78c2SRolf Eike Beer /** 8118c53e463SLinus Torvalds * flush_delayed_work - block until a dwork_struct's callback has terminated 8128c53e463SLinus Torvalds * @dwork: the delayed work which is to be flushed 8138c53e463SLinus Torvalds * 8148c53e463SLinus Torvalds * Any timeout is cancelled, and any pending work is run immediately. 8158c53e463SLinus Torvalds */ 8168c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork) 8178c53e463SLinus Torvalds { 8188c53e463SLinus Torvalds if (del_timer_sync(&dwork->timer)) { 8194690c4abSTejun Heo __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, 8204690c4abSTejun Heo &dwork->work); 8218c53e463SLinus Torvalds put_cpu(); 8228c53e463SLinus Torvalds } 8238c53e463SLinus Torvalds flush_work(&dwork->work); 8248c53e463SLinus Torvalds } 8258c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work); 8268c53e463SLinus Torvalds 8278c53e463SLinus Torvalds /** 8280fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 8290fcb78c2SRolf Eike Beer * @cpu: cpu to use 83052bad64dSDavid Howells * @dwork: job to be done 8310fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 8320fcb78c2SRolf Eike Beer * 8330fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 8340fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 8350fcb78c2SRolf Eike Beer */ 8361da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 83752bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 8381da177e4SLinus Torvalds { 83952bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 8401da177e4SLinus Torvalds } 841ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 8421da177e4SLinus Torvalds 843b6136773SAndrew Morton /** 844b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 845b6136773SAndrew Morton * @func: the function to call 846b6136773SAndrew Morton * 847b6136773SAndrew Morton * Returns zero on success. 848b6136773SAndrew Morton * Returns -ve errno on failure. 849b6136773SAndrew Morton * 850b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 851b6136773SAndrew Morton */ 85265f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 85315316ba8SChristoph Lameter { 85415316ba8SChristoph Lameter int cpu; 85565a64464SAndi Kleen int orig = -1; 856b6136773SAndrew Morton struct work_struct *works; 85715316ba8SChristoph Lameter 858b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 859b6136773SAndrew Morton if (!works) 86015316ba8SChristoph Lameter return -ENOMEM; 861b6136773SAndrew Morton 86295402b38SGautham R Shenoy get_online_cpus(); 86393981800STejun Heo 86493981800STejun Heo /* 86593981800STejun Heo * When running in keventd don't schedule a work item on 86693981800STejun Heo * itself. Can just call directly because the work queue is 86793981800STejun Heo * already bound. This also is faster. 86893981800STejun Heo */ 86993981800STejun Heo if (current_is_keventd()) 87093981800STejun Heo orig = raw_smp_processor_id(); 87193981800STejun Heo 87215316ba8SChristoph Lameter for_each_online_cpu(cpu) { 8739bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 8749bfb1839SIngo Molnar 8759bfb1839SIngo Molnar INIT_WORK(work, func); 87693981800STejun Heo if (cpu != orig) 8778de6d308SOleg Nesterov schedule_work_on(cpu, work); 87815316ba8SChristoph Lameter } 87993981800STejun Heo if (orig >= 0) 88093981800STejun Heo func(per_cpu_ptr(works, orig)); 88193981800STejun Heo 88293981800STejun Heo for_each_online_cpu(cpu) 8838616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 88493981800STejun Heo 88595402b38SGautham R Shenoy put_online_cpus(); 886b6136773SAndrew Morton free_percpu(works); 88715316ba8SChristoph Lameter return 0; 88815316ba8SChristoph Lameter } 88915316ba8SChristoph Lameter 890eef6a7d5SAlan Stern /** 891eef6a7d5SAlan Stern * flush_scheduled_work - ensure that any scheduled work has run to completion. 892eef6a7d5SAlan Stern * 893eef6a7d5SAlan Stern * Forces execution of the kernel-global workqueue and blocks until its 894eef6a7d5SAlan Stern * completion. 895eef6a7d5SAlan Stern * 896eef6a7d5SAlan Stern * Think twice before calling this function! It's very easy to get into 897eef6a7d5SAlan Stern * trouble if you don't take great care. Either of the following situations 898eef6a7d5SAlan Stern * will lead to deadlock: 899eef6a7d5SAlan Stern * 900eef6a7d5SAlan Stern * One of the work items currently on the workqueue needs to acquire 901eef6a7d5SAlan Stern * a lock held by your code or its caller. 902eef6a7d5SAlan Stern * 903eef6a7d5SAlan Stern * Your code is running in the context of a work routine. 904eef6a7d5SAlan Stern * 905eef6a7d5SAlan Stern * They will be detected by lockdep when they occur, but the first might not 906eef6a7d5SAlan Stern * occur very often. It depends on what work items are on the workqueue and 907eef6a7d5SAlan Stern * what locks they need, which you have no control over. 908eef6a7d5SAlan Stern * 909eef6a7d5SAlan Stern * In most situations flushing the entire workqueue is overkill; you merely 910eef6a7d5SAlan Stern * need to know that a particular work item isn't queued and isn't running. 911eef6a7d5SAlan Stern * In such cases you should use cancel_delayed_work_sync() or 912eef6a7d5SAlan Stern * cancel_work_sync() instead. 913eef6a7d5SAlan Stern */ 9141da177e4SLinus Torvalds void flush_scheduled_work(void) 9151da177e4SLinus Torvalds { 9161da177e4SLinus Torvalds flush_workqueue(keventd_wq); 9171da177e4SLinus Torvalds } 918ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 9191da177e4SLinus Torvalds 9201da177e4SLinus Torvalds /** 9211fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 9221fa44ecaSJames Bottomley * @fn: the function to execute 9231fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 9241fa44ecaSJames Bottomley * be available when the work executes) 9251fa44ecaSJames Bottomley * 9261fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 9271fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 9281fa44ecaSJames Bottomley * 9291fa44ecaSJames Bottomley * Returns: 0 - function was executed 9301fa44ecaSJames Bottomley * 1 - function was scheduled for execution 9311fa44ecaSJames Bottomley */ 93265f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 9331fa44ecaSJames Bottomley { 9341fa44ecaSJames Bottomley if (!in_interrupt()) { 93565f27f38SDavid Howells fn(&ew->work); 9361fa44ecaSJames Bottomley return 0; 9371fa44ecaSJames Bottomley } 9381fa44ecaSJames Bottomley 93965f27f38SDavid Howells INIT_WORK(&ew->work, fn); 9401fa44ecaSJames Bottomley schedule_work(&ew->work); 9411fa44ecaSJames Bottomley 9421fa44ecaSJames Bottomley return 1; 9431fa44ecaSJames Bottomley } 9441fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 9451fa44ecaSJames Bottomley 9461da177e4SLinus Torvalds int keventd_up(void) 9471da177e4SLinus Torvalds { 9481da177e4SLinus Torvalds return keventd_wq != NULL; 9491da177e4SLinus Torvalds } 9501da177e4SLinus Torvalds 9511da177e4SLinus Torvalds int current_is_keventd(void) 9521da177e4SLinus Torvalds { 9531da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 954d243769dSHugh Dickins int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 9551da177e4SLinus Torvalds int ret = 0; 9561da177e4SLinus Torvalds 9571da177e4SLinus Torvalds BUG_ON(!keventd_wq); 9581da177e4SLinus Torvalds 95989ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 9601da177e4SLinus Torvalds if (current == cwq->thread) 9611da177e4SLinus Torvalds ret = 1; 9621da177e4SLinus Torvalds 9631da177e4SLinus Torvalds return ret; 9641da177e4SLinus Torvalds 9651da177e4SLinus Torvalds } 9661da177e4SLinus Torvalds 9673af24433SOleg Nesterov static struct cpu_workqueue_struct * 9683af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu) 9691da177e4SLinus Torvalds { 97089ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 9713af24433SOleg Nesterov 9723af24433SOleg Nesterov cwq->wq = wq; 9733af24433SOleg Nesterov spin_lock_init(&cwq->lock); 9743af24433SOleg Nesterov INIT_LIST_HEAD(&cwq->worklist); 9753af24433SOleg Nesterov init_waitqueue_head(&cwq->more_work); 9763af24433SOleg Nesterov 9773af24433SOleg Nesterov return cwq; 9783af24433SOleg Nesterov } 9793af24433SOleg Nesterov 9803af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 9813af24433SOleg Nesterov { 9823af24433SOleg Nesterov struct workqueue_struct *wq = cwq->wq; 9836cc88bc4SDavid Howells const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; 9843af24433SOleg Nesterov struct task_struct *p; 9853af24433SOleg Nesterov 9863af24433SOleg Nesterov p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 9873af24433SOleg Nesterov /* 9883af24433SOleg Nesterov * Nobody can add the work_struct to this cwq, 9893af24433SOleg Nesterov * if (caller is __create_workqueue) 9903af24433SOleg Nesterov * nobody should see this wq 9913af24433SOleg Nesterov * else // caller is CPU_UP_PREPARE 9923af24433SOleg Nesterov * cpu is not on cpu_online_map 9933af24433SOleg Nesterov * so we can abort safely. 9943af24433SOleg Nesterov */ 9953af24433SOleg Nesterov if (IS_ERR(p)) 9963af24433SOleg Nesterov return PTR_ERR(p); 9973af24433SOleg Nesterov cwq->thread = p; 9983af24433SOleg Nesterov 999e1d8aa9fSFrederic Weisbecker trace_workqueue_creation(cwq->thread, cpu); 1000e1d8aa9fSFrederic Weisbecker 10013af24433SOleg Nesterov return 0; 10023af24433SOleg Nesterov } 10033af24433SOleg Nesterov 100406ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 100506ba38a9SOleg Nesterov { 100606ba38a9SOleg Nesterov struct task_struct *p = cwq->thread; 100706ba38a9SOleg Nesterov 100806ba38a9SOleg Nesterov if (p != NULL) { 100906ba38a9SOleg Nesterov if (cpu >= 0) 101006ba38a9SOleg Nesterov kthread_bind(p, cpu); 101106ba38a9SOleg Nesterov wake_up_process(p); 101206ba38a9SOleg Nesterov } 101306ba38a9SOleg Nesterov } 101406ba38a9SOleg Nesterov 10154e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name, 10164e6045f1SJohannes Berg int singlethread, 10174e6045f1SJohannes Berg int freezeable, 1018eb13ba87SJohannes Berg struct lock_class_key *key, 1019eb13ba87SJohannes Berg const char *lock_name) 10203af24433SOleg Nesterov { 10213af24433SOleg Nesterov struct workqueue_struct *wq; 10223af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 10233af24433SOleg Nesterov int err = 0, cpu; 10243af24433SOleg Nesterov 10253af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 10263af24433SOleg Nesterov if (!wq) 10274690c4abSTejun Heo goto err; 10283af24433SOleg Nesterov 10293af24433SOleg Nesterov wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 10304690c4abSTejun Heo if (!wq->cpu_wq) 10314690c4abSTejun Heo goto err; 10323af24433SOleg Nesterov 10333af24433SOleg Nesterov wq->name = name; 1034eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 1035cce1a165SOleg Nesterov wq->singlethread = singlethread; 10363af24433SOleg Nesterov wq->freezeable = freezeable; 1037cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 10383af24433SOleg Nesterov 10393af24433SOleg Nesterov if (singlethread) { 10403af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, singlethread_cpu); 10413af24433SOleg Nesterov err = create_workqueue_thread(cwq, singlethread_cpu); 104206ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 10433af24433SOleg Nesterov } else { 10443da1c84cSOleg Nesterov cpu_maps_update_begin(); 10456af8bf3dSOleg Nesterov /* 10466af8bf3dSOleg Nesterov * We must place this wq on list even if the code below fails. 10476af8bf3dSOleg Nesterov * cpu_down(cpu) can remove cpu from cpu_populated_map before 10486af8bf3dSOleg Nesterov * destroy_workqueue() takes the lock, in that case we leak 10496af8bf3dSOleg Nesterov * cwq[cpu]->thread. 10506af8bf3dSOleg Nesterov */ 105195402b38SGautham R Shenoy spin_lock(&workqueue_lock); 10523af24433SOleg Nesterov list_add(&wq->list, &workqueues); 105395402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 10546af8bf3dSOleg Nesterov /* 10556af8bf3dSOleg Nesterov * We must initialize cwqs for each possible cpu even if we 10566af8bf3dSOleg Nesterov * are going to call destroy_workqueue() finally. Otherwise 10576af8bf3dSOleg Nesterov * cpu_up() can hit the uninitialized cwq once we drop the 10586af8bf3dSOleg Nesterov * lock. 10596af8bf3dSOleg Nesterov */ 10603af24433SOleg Nesterov for_each_possible_cpu(cpu) { 10613af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, cpu); 10623af24433SOleg Nesterov if (err || !cpu_online(cpu)) 10633af24433SOleg Nesterov continue; 10643af24433SOleg Nesterov err = create_workqueue_thread(cwq, cpu); 106506ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 10663af24433SOleg Nesterov } 10673da1c84cSOleg Nesterov cpu_maps_update_done(); 10683af24433SOleg Nesterov } 10693af24433SOleg Nesterov 10703af24433SOleg Nesterov if (err) { 10713af24433SOleg Nesterov destroy_workqueue(wq); 10723af24433SOleg Nesterov wq = NULL; 10733af24433SOleg Nesterov } 10743af24433SOleg Nesterov return wq; 10754690c4abSTejun Heo err: 10764690c4abSTejun Heo if (wq) { 10774690c4abSTejun Heo free_percpu(wq->cpu_wq); 10784690c4abSTejun Heo kfree(wq); 10794690c4abSTejun Heo } 10804690c4abSTejun Heo return NULL; 10813af24433SOleg Nesterov } 10824e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key); 10833af24433SOleg Nesterov 10841e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 10853af24433SOleg Nesterov { 10863af24433SOleg Nesterov /* 10873da1c84cSOleg Nesterov * Our caller is either destroy_workqueue() or CPU_POST_DEAD, 10883da1c84cSOleg Nesterov * cpu_add_remove_lock protects cwq->thread. 10893af24433SOleg Nesterov */ 109014441960SOleg Nesterov if (cwq->thread == NULL) 109114441960SOleg Nesterov return; 109214441960SOleg Nesterov 10933295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 10943295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 10954e6045f1SJohannes Berg 109613c22168SOleg Nesterov flush_cpu_workqueue(cwq); 109714441960SOleg Nesterov /* 10983da1c84cSOleg Nesterov * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, 109913c22168SOleg Nesterov * a concurrent flush_workqueue() can insert a barrier after us. 110013c22168SOleg Nesterov * However, in that case run_workqueue() won't return and check 110113c22168SOleg Nesterov * kthread_should_stop() until it flushes all work_struct's. 110214441960SOleg Nesterov * When ->worklist becomes empty it is safe to exit because no 110314441960SOleg Nesterov * more work_structs can be queued on this cwq: flush_workqueue 110414441960SOleg Nesterov * checks list_empty(), and a "normal" queue_work() can't use 110514441960SOleg Nesterov * a dead CPU. 110614441960SOleg Nesterov */ 1107e1d8aa9fSFrederic Weisbecker trace_workqueue_destruction(cwq->thread); 110814441960SOleg Nesterov kthread_stop(cwq->thread); 110914441960SOleg Nesterov cwq->thread = NULL; 11101da177e4SLinus Torvalds } 11111da177e4SLinus Torvalds 11123af24433SOleg Nesterov /** 11133af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 11143af24433SOleg Nesterov * @wq: target workqueue 11153af24433SOleg Nesterov * 11163af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 11173af24433SOleg Nesterov */ 11183af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 11193af24433SOleg Nesterov { 1120e7577c50SRusty Russell const struct cpumask *cpu_map = wq_cpu_map(wq); 11213af24433SOleg Nesterov int cpu; 11223af24433SOleg Nesterov 11233da1c84cSOleg Nesterov cpu_maps_update_begin(); 112495402b38SGautham R Shenoy spin_lock(&workqueue_lock); 11253af24433SOleg Nesterov list_del(&wq->list); 112695402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 11273af24433SOleg Nesterov 1128aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 11291e35eaa2SOleg Nesterov cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 11303da1c84cSOleg Nesterov cpu_maps_update_done(); 11313af24433SOleg Nesterov 11323af24433SOleg Nesterov free_percpu(wq->cpu_wq); 11333af24433SOleg Nesterov kfree(wq); 11343af24433SOleg Nesterov } 11353af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 11363af24433SOleg Nesterov 11379c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 11381da177e4SLinus Torvalds unsigned long action, 11391da177e4SLinus Torvalds void *hcpu) 11401da177e4SLinus Torvalds { 11413af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 11423af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 11431da177e4SLinus Torvalds struct workqueue_struct *wq; 114480b5184cSAkinobu Mita int err = 0; 11451da177e4SLinus Torvalds 11468bb78442SRafael J. Wysocki action &= ~CPU_TASKS_FROZEN; 11478bb78442SRafael J. Wysocki 11481da177e4SLinus Torvalds switch (action) { 11493af24433SOleg Nesterov case CPU_UP_PREPARE: 1150e7577c50SRusty Russell cpumask_set_cpu(cpu, cpu_populated_map); 11513af24433SOleg Nesterov } 11528448502cSOleg Nesterov undo: 11531da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 11543af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 11553af24433SOleg Nesterov 11563af24433SOleg Nesterov switch (action) { 11573af24433SOleg Nesterov case CPU_UP_PREPARE: 115880b5184cSAkinobu Mita err = create_workqueue_thread(cwq, cpu); 115980b5184cSAkinobu Mita if (!err) 11601da177e4SLinus Torvalds break; 116195402b38SGautham R Shenoy printk(KERN_ERR "workqueue [%s] for %i failed\n", 116295402b38SGautham R Shenoy wq->name, cpu); 11638448502cSOleg Nesterov action = CPU_UP_CANCELED; 116480b5184cSAkinobu Mita err = -ENOMEM; 11658448502cSOleg Nesterov goto undo; 11661da177e4SLinus Torvalds 11671da177e4SLinus Torvalds case CPU_ONLINE: 116806ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 11691da177e4SLinus Torvalds break; 11701da177e4SLinus Torvalds 11711da177e4SLinus Torvalds case CPU_UP_CANCELED: 117206ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 11733da1c84cSOleg Nesterov case CPU_POST_DEAD: 11741e35eaa2SOleg Nesterov cleanup_workqueue_thread(cwq); 11751da177e4SLinus Torvalds break; 11761da177e4SLinus Torvalds } 11773af24433SOleg Nesterov } 11781da177e4SLinus Torvalds 117900dfcaf7SOleg Nesterov switch (action) { 118000dfcaf7SOleg Nesterov case CPU_UP_CANCELED: 11813da1c84cSOleg Nesterov case CPU_POST_DEAD: 1182e7577c50SRusty Russell cpumask_clear_cpu(cpu, cpu_populated_map); 118300dfcaf7SOleg Nesterov } 118400dfcaf7SOleg Nesterov 118580b5184cSAkinobu Mita return notifier_from_errno(err); 11861da177e4SLinus Torvalds } 11871da177e4SLinus Torvalds 11882d3854a3SRusty Russell #ifdef CONFIG_SMP 11898ccad40dSRusty Russell 11902d3854a3SRusty Russell struct work_for_cpu { 11916b44003eSAndrew Morton struct completion completion; 11922d3854a3SRusty Russell long (*fn)(void *); 11932d3854a3SRusty Russell void *arg; 11942d3854a3SRusty Russell long ret; 11952d3854a3SRusty Russell }; 11962d3854a3SRusty Russell 11976b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc) 11982d3854a3SRusty Russell { 11996b44003eSAndrew Morton struct work_for_cpu *wfc = _wfc; 12002d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 12016b44003eSAndrew Morton complete(&wfc->completion); 12026b44003eSAndrew Morton return 0; 12032d3854a3SRusty Russell } 12042d3854a3SRusty Russell 12052d3854a3SRusty Russell /** 12062d3854a3SRusty Russell * work_on_cpu - run a function in user context on a particular cpu 12072d3854a3SRusty Russell * @cpu: the cpu to run on 12082d3854a3SRusty Russell * @fn: the function to run 12092d3854a3SRusty Russell * @arg: the function arg 12102d3854a3SRusty Russell * 121131ad9081SRusty Russell * This will return the value @fn returns. 121231ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 12136b44003eSAndrew Morton * The caller must not hold any locks which would prevent @fn from completing. 12142d3854a3SRusty Russell */ 12152d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 12162d3854a3SRusty Russell { 12176b44003eSAndrew Morton struct task_struct *sub_thread; 12186b44003eSAndrew Morton struct work_for_cpu wfc = { 12196b44003eSAndrew Morton .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), 12206b44003eSAndrew Morton .fn = fn, 12216b44003eSAndrew Morton .arg = arg, 12226b44003eSAndrew Morton }; 12232d3854a3SRusty Russell 12246b44003eSAndrew Morton sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 12256b44003eSAndrew Morton if (IS_ERR(sub_thread)) 12266b44003eSAndrew Morton return PTR_ERR(sub_thread); 12276b44003eSAndrew Morton kthread_bind(sub_thread, cpu); 12286b44003eSAndrew Morton wake_up_process(sub_thread); 12296b44003eSAndrew Morton wait_for_completion(&wfc.completion); 12302d3854a3SRusty Russell return wfc.ret; 12312d3854a3SRusty Russell } 12322d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu); 12332d3854a3SRusty Russell #endif /* CONFIG_SMP */ 12342d3854a3SRusty Russell 1235c12920d1SOleg Nesterov void __init init_workqueues(void) 12361da177e4SLinus Torvalds { 1237e7577c50SRusty Russell alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); 1238e7577c50SRusty Russell 1239e7577c50SRusty Russell cpumask_copy(cpu_populated_map, cpu_online_mask); 1240e7577c50SRusty Russell singlethread_cpu = cpumask_first(cpu_possible_mask); 1241e7577c50SRusty Russell cpu_singlethread_map = cpumask_of(singlethread_cpu); 12421da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 12431da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 12441da177e4SLinus Torvalds BUG_ON(!keventd_wq); 12451da177e4SLinus Torvalds } 1246