11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 121da177e4SLinus Torvalds * Andrew Morton <andrewm@uow.edu.au> 131da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 141da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds /* 37f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 38f756d5e2SNathan Lynch * possible cpu). 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds struct cpu_workqueue_struct { 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds spinlock_t lock; 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds struct list_head worklist; 451da177e4SLinus Torvalds wait_queue_head_t more_work; 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds struct workqueue_struct *wq; 4836c8b586SIngo Molnar struct task_struct *thread; 49b89deed3SOleg Nesterov struct work_struct *current_work; 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 52341a5958SRafael J. Wysocki 53341a5958SRafael J. Wysocki int freezeable; /* Freeze the thread during suspend */ 541da177e4SLinus Torvalds } ____cacheline_aligned; 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* 571da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 581da177e4SLinus Torvalds * per-CPU workqueues: 591da177e4SLinus Torvalds */ 601da177e4SLinus Torvalds struct workqueue_struct { 6189ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 621da177e4SLinus Torvalds const char *name; 631da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 641da177e4SLinus Torvalds }; 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 671da177e4SLinus Torvalds threads to each one as cpus come/go. */ 689b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex); 691da177e4SLinus Torvalds static LIST_HEAD(workqueues); 701da177e4SLinus Torvalds 71f756d5e2SNathan Lynch static int singlethread_cpu; 72f756d5e2SNathan Lynch 731da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 741da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 751da177e4SLinus Torvalds { 761da177e4SLinus Torvalds return list_empty(&wq->list); 771da177e4SLinus Torvalds } 781da177e4SLinus Torvalds 794594bf15SDavid Howells /* 804594bf15SDavid Howells * Set the workqueue on which a work item is to be run 814594bf15SDavid Howells * - Must *only* be called if the pending flag is set 824594bf15SDavid Howells */ 83365970a1SDavid Howells static inline void set_wq_data(struct work_struct *work, void *wq) 84365970a1SDavid Howells { 854594bf15SDavid Howells unsigned long new; 86365970a1SDavid Howells 874594bf15SDavid Howells BUG_ON(!work_pending(work)); 884594bf15SDavid Howells 89365970a1SDavid Howells new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 90a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 91a08727baSLinus Torvalds atomic_long_set(&work->data, new); 92365970a1SDavid Howells } 93365970a1SDavid Howells 94365970a1SDavid Howells static inline void *get_wq_data(struct work_struct *work) 95365970a1SDavid Howells { 96a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 97365970a1SDavid Howells } 98365970a1SDavid Howells 9968380b58SLinus Torvalds static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 10068380b58SLinus Torvalds { 10168380b58SLinus Torvalds int ret = 0; 10268380b58SLinus Torvalds unsigned long flags; 10368380b58SLinus Torvalds 10468380b58SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 10568380b58SLinus Torvalds /* 10668380b58SLinus Torvalds * We need to re-validate the work info after we've gotten 10768380b58SLinus Torvalds * the cpu_workqueue lock. We can run the work now iff: 10868380b58SLinus Torvalds * 10968380b58SLinus Torvalds * - the wq_data still matches the cpu_workqueue_struct 11068380b58SLinus Torvalds * - AND the work is still marked pending 11168380b58SLinus Torvalds * - AND the work is still on a list (which will be this 11268380b58SLinus Torvalds * workqueue_struct list) 11368380b58SLinus Torvalds * 11468380b58SLinus Torvalds * All these conditions are important, because we 11568380b58SLinus Torvalds * need to protect against the work being run right 11668380b58SLinus Torvalds * now on another CPU (all but the last one might be 11768380b58SLinus Torvalds * true if it's currently running and has not been 11868380b58SLinus Torvalds * released yet, for example). 11968380b58SLinus Torvalds */ 12068380b58SLinus Torvalds if (get_wq_data(work) == cwq 12168380b58SLinus Torvalds && work_pending(work) 12268380b58SLinus Torvalds && !list_empty(&work->entry)) { 12368380b58SLinus Torvalds work_func_t f = work->func; 124b89deed3SOleg Nesterov cwq->current_work = work; 12568380b58SLinus Torvalds list_del_init(&work->entry); 12668380b58SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 12768380b58SLinus Torvalds 128a08727baSLinus Torvalds if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 12968380b58SLinus Torvalds work_release(work); 13068380b58SLinus Torvalds f(work); 13168380b58SLinus Torvalds 13268380b58SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 133b89deed3SOleg Nesterov cwq->current_work = NULL; 13468380b58SLinus Torvalds ret = 1; 13568380b58SLinus Torvalds } 13668380b58SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 13768380b58SLinus Torvalds return ret; 13868380b58SLinus Torvalds } 13968380b58SLinus Torvalds 14068380b58SLinus Torvalds /** 14168380b58SLinus Torvalds * run_scheduled_work - run scheduled work synchronously 14268380b58SLinus Torvalds * @work: work to run 14368380b58SLinus Torvalds * 14468380b58SLinus Torvalds * This checks if the work was pending, and runs it 14568380b58SLinus Torvalds * synchronously if so. It returns a boolean to indicate 14668380b58SLinus Torvalds * whether it had any scheduled work to run or not. 14768380b58SLinus Torvalds * 14868380b58SLinus Torvalds * NOTE! This _only_ works for normal work_structs. You 14968380b58SLinus Torvalds * CANNOT use this for delayed work, because the wq data 15068380b58SLinus Torvalds * for delayed work will not point properly to the per- 15168380b58SLinus Torvalds * CPU workqueue struct, but will change! 15268380b58SLinus Torvalds */ 15368380b58SLinus Torvalds int fastcall run_scheduled_work(struct work_struct *work) 15468380b58SLinus Torvalds { 15568380b58SLinus Torvalds for (;;) { 15668380b58SLinus Torvalds struct cpu_workqueue_struct *cwq; 15768380b58SLinus Torvalds 15868380b58SLinus Torvalds if (!work_pending(work)) 15968380b58SLinus Torvalds return 0; 16068380b58SLinus Torvalds if (list_empty(&work->entry)) 16168380b58SLinus Torvalds return 0; 16268380b58SLinus Torvalds /* NOTE! This depends intimately on __queue_work! */ 16368380b58SLinus Torvalds cwq = get_wq_data(work); 16468380b58SLinus Torvalds if (!cwq) 16568380b58SLinus Torvalds return 0; 16668380b58SLinus Torvalds if (__run_work(cwq, work)) 16768380b58SLinus Torvalds return 1; 16868380b58SLinus Torvalds } 16968380b58SLinus Torvalds } 17068380b58SLinus Torvalds EXPORT_SYMBOL(run_scheduled_work); 17168380b58SLinus Torvalds 172b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 173b89deed3SOleg Nesterov struct work_struct *work, int tail) 174b89deed3SOleg Nesterov { 175b89deed3SOleg Nesterov set_wq_data(work, cwq); 176b89deed3SOleg Nesterov if (tail) 177b89deed3SOleg Nesterov list_add_tail(&work->entry, &cwq->worklist); 178b89deed3SOleg Nesterov else 179b89deed3SOleg Nesterov list_add(&work->entry, &cwq->worklist); 180b89deed3SOleg Nesterov wake_up(&cwq->more_work); 181b89deed3SOleg Nesterov } 182b89deed3SOleg Nesterov 1831da177e4SLinus Torvalds /* Preempt must be disabled. */ 1841da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1851da177e4SLinus Torvalds struct work_struct *work) 1861da177e4SLinus Torvalds { 1871da177e4SLinus Torvalds unsigned long flags; 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 190b89deed3SOleg Nesterov insert_work(cwq, work, 1); 1911da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1940fcb78c2SRolf Eike Beer /** 1950fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1960fcb78c2SRolf Eike Beer * @wq: workqueue to use 1970fcb78c2SRolf Eike Beer * @work: work to queue 1980fcb78c2SRolf Eike Beer * 199057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2001da177e4SLinus Torvalds * 2011da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 2021da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 2031da177e4SLinus Torvalds */ 2041da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 2051da177e4SLinus Torvalds { 2061da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 2071da177e4SLinus Torvalds 208a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2091da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 210f756d5e2SNathan Lynch cpu = singlethread_cpu; 2111da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 21289ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 2131da177e4SLinus Torvalds ret = 1; 2141da177e4SLinus Torvalds } 2151da177e4SLinus Torvalds put_cpu(); 2161da177e4SLinus Torvalds return ret; 2171da177e4SLinus Torvalds } 218ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 2191da177e4SLinus Torvalds 22082f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data) 2211da177e4SLinus Torvalds { 22252bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 223365970a1SDavid Howells struct workqueue_struct *wq = get_wq_data(&dwork->work); 2241da177e4SLinus Torvalds int cpu = smp_processor_id(); 2251da177e4SLinus Torvalds 2261da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 227f756d5e2SNathan Lynch cpu = singlethread_cpu; 2281da177e4SLinus Torvalds 22952bad64dSDavid Howells __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); 2301da177e4SLinus Torvalds } 2311da177e4SLinus Torvalds 2320fcb78c2SRolf Eike Beer /** 2330fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 2340fcb78c2SRolf Eike Beer * @wq: workqueue to use 235af9997e4SRandy Dunlap * @dwork: delayable work to queue 2360fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2370fcb78c2SRolf Eike Beer * 238057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2390fcb78c2SRolf Eike Beer */ 2401da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 24152bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2421da177e4SLinus Torvalds { 2431da177e4SLinus Torvalds int ret = 0; 24452bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 24552bad64dSDavid Howells struct work_struct *work = &dwork->work; 24652bad64dSDavid Howells 24782f67cd9SIngo Molnar timer_stats_timer_set_start_info(timer); 24852bad64dSDavid Howells if (delay == 0) 24952bad64dSDavid Howells return queue_work(wq, work); 2501da177e4SLinus Torvalds 251a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2521da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 2531da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 2541da177e4SLinus Torvalds 2551da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 256365970a1SDavid Howells set_wq_data(work, wq); 2571da177e4SLinus Torvalds timer->expires = jiffies + delay; 25852bad64dSDavid Howells timer->data = (unsigned long)dwork; 2591da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 2601da177e4SLinus Torvalds add_timer(timer); 2611da177e4SLinus Torvalds ret = 1; 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds return ret; 2641da177e4SLinus Torvalds } 265ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 2661da177e4SLinus Torvalds 2670fcb78c2SRolf Eike Beer /** 2680fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 2690fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 2700fcb78c2SRolf Eike Beer * @wq: workqueue to use 271af9997e4SRandy Dunlap * @dwork: work to queue 2720fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2730fcb78c2SRolf Eike Beer * 274057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2750fcb78c2SRolf Eike Beer */ 2767a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 27752bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2787a6bc1cdSVenkatesh Pallipadi { 2797a6bc1cdSVenkatesh Pallipadi int ret = 0; 28052bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 28152bad64dSDavid Howells struct work_struct *work = &dwork->work; 2827a6bc1cdSVenkatesh Pallipadi 283a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2847a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2857a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2867a6bc1cdSVenkatesh Pallipadi 2877a6bc1cdSVenkatesh Pallipadi /* This stores wq for the moment, for the timer_fn */ 288365970a1SDavid Howells set_wq_data(work, wq); 2897a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 29052bad64dSDavid Howells timer->data = (unsigned long)dwork; 2917a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 2927a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 2937a6bc1cdSVenkatesh Pallipadi ret = 1; 2947a6bc1cdSVenkatesh Pallipadi } 2957a6bc1cdSVenkatesh Pallipadi return ret; 2967a6bc1cdSVenkatesh Pallipadi } 297ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2981da177e4SLinus Torvalds 299858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 3001da177e4SLinus Torvalds { 3011da177e4SLinus Torvalds unsigned long flags; 3021da177e4SLinus Torvalds 3031da177e4SLinus Torvalds /* 3041da177e4SLinus Torvalds * Keep taking off work from the queue until 3051da177e4SLinus Torvalds * done. 3061da177e4SLinus Torvalds */ 3071da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 3081da177e4SLinus Torvalds cwq->run_depth++; 3091da177e4SLinus Torvalds if (cwq->run_depth > 3) { 3101da177e4SLinus Torvalds /* morton gets to eat his hat */ 3111da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 3121da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 3131da177e4SLinus Torvalds dump_stack(); 3141da177e4SLinus Torvalds } 3151da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 3161da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 3171da177e4SLinus Torvalds struct work_struct, entry); 3186bb49e59SDavid Howells work_func_t f = work->func; 3191da177e4SLinus Torvalds 320b89deed3SOleg Nesterov cwq->current_work = work; 3211da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 3221da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 3231da177e4SLinus Torvalds 324365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 325a08727baSLinus Torvalds if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 32665f27f38SDavid Howells work_release(work); 32765f27f38SDavid Howells f(work); 3281da177e4SLinus Torvalds 329d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 330d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 331d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 332d5abe669SPeter Zijlstra current->comm, preempt_count(), 333d5abe669SPeter Zijlstra current->pid); 334d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 335d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 336d5abe669SPeter Zijlstra debug_show_held_locks(current); 337d5abe669SPeter Zijlstra dump_stack(); 338d5abe669SPeter Zijlstra } 339d5abe669SPeter Zijlstra 3401da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 341b89deed3SOleg Nesterov cwq->current_work = NULL; 3421da177e4SLinus Torvalds } 3431da177e4SLinus Torvalds cwq->run_depth--; 3441da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds 3471da177e4SLinus Torvalds static int worker_thread(void *__cwq) 3481da177e4SLinus Torvalds { 3491da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 3501da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current); 3511da177e4SLinus Torvalds struct k_sigaction sa; 3521da177e4SLinus Torvalds sigset_t blocked; 3531da177e4SLinus Torvalds 354341a5958SRafael J. Wysocki if (!cwq->freezeable) 3551da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 3561da177e4SLinus Torvalds 3571da177e4SLinus Torvalds set_user_nice(current, -5); 3581da177e4SLinus Torvalds 3591da177e4SLinus Torvalds /* Block and flush all signals */ 3601da177e4SLinus Torvalds sigfillset(&blocked); 3611da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 3621da177e4SLinus Torvalds flush_signals(current); 3631da177e4SLinus Torvalds 36446934023SChristoph Lameter /* 36546934023SChristoph Lameter * We inherited MPOL_INTERLEAVE from the booting kernel. 36646934023SChristoph Lameter * Set MPOL_DEFAULT to insure node local allocations. 36746934023SChristoph Lameter */ 36846934023SChristoph Lameter numa_default_policy(); 36946934023SChristoph Lameter 3701da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 3711da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 3721da177e4SLinus Torvalds sa.sa.sa_flags = 0; 3731da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 3741da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 3751da177e4SLinus Torvalds 3761da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3771da177e4SLinus Torvalds while (!kthread_should_stop()) { 378341a5958SRafael J. Wysocki if (cwq->freezeable) 379341a5958SRafael J. Wysocki try_to_freeze(); 380341a5958SRafael J. Wysocki 3811da177e4SLinus Torvalds add_wait_queue(&cwq->more_work, &wait); 3821da177e4SLinus Torvalds if (list_empty(&cwq->worklist)) 3831da177e4SLinus Torvalds schedule(); 3841da177e4SLinus Torvalds else 3851da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3861da177e4SLinus Torvalds remove_wait_queue(&cwq->more_work, &wait); 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds if (!list_empty(&cwq->worklist)) 3891da177e4SLinus Torvalds run_workqueue(cwq); 3901da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3911da177e4SLinus Torvalds } 3921da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3931da177e4SLinus Torvalds return 0; 3941da177e4SLinus Torvalds } 3951da177e4SLinus Torvalds 396fc2e4d70SOleg Nesterov struct wq_barrier { 397fc2e4d70SOleg Nesterov struct work_struct work; 398fc2e4d70SOleg Nesterov struct completion done; 399fc2e4d70SOleg Nesterov }; 400fc2e4d70SOleg Nesterov 401fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 402fc2e4d70SOleg Nesterov { 403fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 404fc2e4d70SOleg Nesterov complete(&barr->done); 405fc2e4d70SOleg Nesterov } 406fc2e4d70SOleg Nesterov 40783c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 40883c22520SOleg Nesterov struct wq_barrier *barr, int tail) 409fc2e4d70SOleg Nesterov { 410fc2e4d70SOleg Nesterov INIT_WORK(&barr->work, wq_barrier_func); 411fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 412fc2e4d70SOleg Nesterov 413fc2e4d70SOleg Nesterov init_completion(&barr->done); 41483c22520SOleg Nesterov 41583c22520SOleg Nesterov insert_work(cwq, &barr->work, tail); 416fc2e4d70SOleg Nesterov } 417fc2e4d70SOleg Nesterov 4181da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 4191da177e4SLinus Torvalds { 4201da177e4SLinus Torvalds if (cwq->thread == current) { 4211da177e4SLinus Torvalds /* 4221da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 4231da177e4SLinus Torvalds * it by hand rather than deadlocking. 4241da177e4SLinus Torvalds */ 425edab2516SAndrew Morton preempt_enable(); 426edab2516SAndrew Morton /* 427edab2516SAndrew Morton * We can still touch *cwq here because we are keventd, and 428edab2516SAndrew Morton * hot-unplug will be waiting us to exit. 429edab2516SAndrew Morton */ 4301da177e4SLinus Torvalds run_workqueue(cwq); 431edab2516SAndrew Morton preempt_disable(); 4321da177e4SLinus Torvalds } else { 433fc2e4d70SOleg Nesterov struct wq_barrier barr; 43483c22520SOleg Nesterov int active = 0; 4351da177e4SLinus Torvalds 43683c22520SOleg Nesterov spin_lock_irq(&cwq->lock); 43783c22520SOleg Nesterov if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 43883c22520SOleg Nesterov insert_wq_barrier(cwq, &barr, 1); 43983c22520SOleg Nesterov active = 1; 44083c22520SOleg Nesterov } 44183c22520SOleg Nesterov spin_unlock_irq(&cwq->lock); 4421da177e4SLinus Torvalds 44383c22520SOleg Nesterov if (active) { 44483c22520SOleg Nesterov preempt_enable(); 445fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 446edab2516SAndrew Morton preempt_disable(); 4471da177e4SLinus Torvalds } 4481da177e4SLinus Torvalds } 44983c22520SOleg Nesterov } 4501da177e4SLinus Torvalds 4510fcb78c2SRolf Eike Beer /** 4521da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 4530fcb78c2SRolf Eike Beer * @wq: workqueue to flush 4541da177e4SLinus Torvalds * 4551da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 4561da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 4571da177e4SLinus Torvalds * 458fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 459fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 4601da177e4SLinus Torvalds * 4611da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 4621da177e4SLinus Torvalds * helper threads to do it. 4631da177e4SLinus Torvalds */ 4641da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 4651da177e4SLinus Torvalds { 466edab2516SAndrew Morton preempt_disable(); /* CPU hotplug */ 4671da177e4SLinus Torvalds if (is_single_threaded(wq)) { 468bce61dd4SBen Collins /* Always use first cpu's area. */ 469f756d5e2SNathan Lynch flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 4701da177e4SLinus Torvalds } else { 4711da177e4SLinus Torvalds int cpu; 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds for_each_online_cpu(cpu) 47489ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 4751da177e4SLinus Torvalds } 476edab2516SAndrew Morton preempt_enable(); 4771da177e4SLinus Torvalds } 478ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 4791da177e4SLinus Torvalds 480b89deed3SOleg Nesterov static void wait_on_work(struct cpu_workqueue_struct *cwq, 481b89deed3SOleg Nesterov struct work_struct *work) 482b89deed3SOleg Nesterov { 483b89deed3SOleg Nesterov struct wq_barrier barr; 484b89deed3SOleg Nesterov int running = 0; 485b89deed3SOleg Nesterov 486b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 487b89deed3SOleg Nesterov if (unlikely(cwq->current_work == work)) { 48883c22520SOleg Nesterov insert_wq_barrier(cwq, &barr, 0); 489b89deed3SOleg Nesterov running = 1; 490b89deed3SOleg Nesterov } 491b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 492b89deed3SOleg Nesterov 493b89deed3SOleg Nesterov if (unlikely(running)) { 494b89deed3SOleg Nesterov mutex_unlock(&workqueue_mutex); 495b89deed3SOleg Nesterov wait_for_completion(&barr.done); 496b89deed3SOleg Nesterov mutex_lock(&workqueue_mutex); 497b89deed3SOleg Nesterov } 498b89deed3SOleg Nesterov } 499b89deed3SOleg Nesterov 500b89deed3SOleg Nesterov /** 501b89deed3SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 502b89deed3SOleg Nesterov * @wq: the workqueue on which the work is queued 503b89deed3SOleg Nesterov * @work: the work which is to be flushed 504b89deed3SOleg Nesterov * 505b89deed3SOleg Nesterov * flush_work() will attempt to cancel the work if it is queued. If the work's 506b89deed3SOleg Nesterov * callback appears to be running, flush_work() will block until it has 507b89deed3SOleg Nesterov * completed. 508b89deed3SOleg Nesterov * 509b89deed3SOleg Nesterov * flush_work() is designed to be used when the caller is tearing down data 510b89deed3SOleg Nesterov * structures which the callback function operates upon. It is expected that, 511b89deed3SOleg Nesterov * prior to calling flush_work(), the caller has arranged for the work to not 512b89deed3SOleg Nesterov * be requeued. 513b89deed3SOleg Nesterov */ 514b89deed3SOleg Nesterov void flush_work(struct workqueue_struct *wq, struct work_struct *work) 515b89deed3SOleg Nesterov { 516b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 517b89deed3SOleg Nesterov 518b89deed3SOleg Nesterov mutex_lock(&workqueue_mutex); 519b89deed3SOleg Nesterov cwq = get_wq_data(work); 520b89deed3SOleg Nesterov /* Was it ever queued ? */ 521b89deed3SOleg Nesterov if (!cwq) 522b89deed3SOleg Nesterov goto out; 523b89deed3SOleg Nesterov 524b89deed3SOleg Nesterov /* 525b89deed3SOleg Nesterov * This work can't be re-queued, and the lock above protects us 526b89deed3SOleg Nesterov * from take_over_work(), no need to re-check that get_wq_data() 527b89deed3SOleg Nesterov * is still the same when we take cwq->lock. 528b89deed3SOleg Nesterov */ 529b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 530b89deed3SOleg Nesterov list_del_init(&work->entry); 531b89deed3SOleg Nesterov work_release(work); 532b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 533b89deed3SOleg Nesterov 534b89deed3SOleg Nesterov if (is_single_threaded(wq)) { 535b89deed3SOleg Nesterov /* Always use first cpu's area. */ 536b89deed3SOleg Nesterov wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work); 537b89deed3SOleg Nesterov } else { 538b89deed3SOleg Nesterov int cpu; 539b89deed3SOleg Nesterov 540b89deed3SOleg Nesterov for_each_online_cpu(cpu) 541b89deed3SOleg Nesterov wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 542b89deed3SOleg Nesterov } 543b89deed3SOleg Nesterov out: 544b89deed3SOleg Nesterov mutex_unlock(&workqueue_mutex); 545b89deed3SOleg Nesterov } 546b89deed3SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 547b89deed3SOleg Nesterov 5481da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 549341a5958SRafael J. Wysocki int cpu, int freezeable) 5501da177e4SLinus Torvalds { 55189ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 5521da177e4SLinus Torvalds struct task_struct *p; 5531da177e4SLinus Torvalds 5541da177e4SLinus Torvalds spin_lock_init(&cwq->lock); 5551da177e4SLinus Torvalds cwq->wq = wq; 5561da177e4SLinus Torvalds cwq->thread = NULL; 557341a5958SRafael J. Wysocki cwq->freezeable = freezeable; 5581da177e4SLinus Torvalds INIT_LIST_HEAD(&cwq->worklist); 5591da177e4SLinus Torvalds init_waitqueue_head(&cwq->more_work); 5601da177e4SLinus Torvalds 5611da177e4SLinus Torvalds if (is_single_threaded(wq)) 5621da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s", wq->name); 5631da177e4SLinus Torvalds else 5641da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); 5651da177e4SLinus Torvalds if (IS_ERR(p)) 5661da177e4SLinus Torvalds return NULL; 5671da177e4SLinus Torvalds cwq->thread = p; 5681da177e4SLinus Torvalds return p; 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name, 572341a5958SRafael J. Wysocki int singlethread, int freezeable) 5731da177e4SLinus Torvalds { 5741da177e4SLinus Torvalds int cpu, destroy = 0; 5751da177e4SLinus Torvalds struct workqueue_struct *wq; 5761da177e4SLinus Torvalds struct task_struct *p; 5771da177e4SLinus Torvalds 578dd392710SPekka J Enberg wq = kzalloc(sizeof(*wq), GFP_KERNEL); 5791da177e4SLinus Torvalds if (!wq) 5801da177e4SLinus Torvalds return NULL; 5811da177e4SLinus Torvalds 58289ada679SChristoph Lameter wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 583676121fcSBen Collins if (!wq->cpu_wq) { 584676121fcSBen Collins kfree(wq); 585676121fcSBen Collins return NULL; 586676121fcSBen Collins } 587676121fcSBen Collins 5881da177e4SLinus Torvalds wq->name = name; 5899b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 5901da177e4SLinus Torvalds if (singlethread) { 5911da177e4SLinus Torvalds INIT_LIST_HEAD(&wq->list); 592341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, singlethread_cpu, freezeable); 5931da177e4SLinus Torvalds if (!p) 5941da177e4SLinus Torvalds destroy = 1; 5951da177e4SLinus Torvalds else 5961da177e4SLinus Torvalds wake_up_process(p); 5971da177e4SLinus Torvalds } else { 5981da177e4SLinus Torvalds list_add(&wq->list, &workqueues); 5991da177e4SLinus Torvalds for_each_online_cpu(cpu) { 600341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, cpu, freezeable); 6011da177e4SLinus Torvalds if (p) { 6021da177e4SLinus Torvalds kthread_bind(p, cpu); 6031da177e4SLinus Torvalds wake_up_process(p); 6041da177e4SLinus Torvalds } else 6051da177e4SLinus Torvalds destroy = 1; 6061da177e4SLinus Torvalds } 6071da177e4SLinus Torvalds } 6089b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 6091da177e4SLinus Torvalds 6101da177e4SLinus Torvalds /* 6111da177e4SLinus Torvalds * Was there any error during startup? If yes then clean up: 6121da177e4SLinus Torvalds */ 6131da177e4SLinus Torvalds if (destroy) { 6141da177e4SLinus Torvalds destroy_workqueue(wq); 6151da177e4SLinus Torvalds wq = NULL; 6161da177e4SLinus Torvalds } 6171da177e4SLinus Torvalds return wq; 6181da177e4SLinus Torvalds } 619ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue); 6201da177e4SLinus Torvalds 6211da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 6221da177e4SLinus Torvalds { 6231da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 6241da177e4SLinus Torvalds unsigned long flags; 6251da177e4SLinus Torvalds struct task_struct *p; 6261da177e4SLinus Torvalds 62789ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, cpu); 6281da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 6291da177e4SLinus Torvalds p = cwq->thread; 6301da177e4SLinus Torvalds cwq->thread = NULL; 6311da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 6321da177e4SLinus Torvalds if (p) 6331da177e4SLinus Torvalds kthread_stop(p); 6341da177e4SLinus Torvalds } 6351da177e4SLinus Torvalds 6360fcb78c2SRolf Eike Beer /** 6370fcb78c2SRolf Eike Beer * destroy_workqueue - safely terminate a workqueue 6380fcb78c2SRolf Eike Beer * @wq: target workqueue 6390fcb78c2SRolf Eike Beer * 6400fcb78c2SRolf Eike Beer * Safely destroy a workqueue. All work currently pending will be done first. 6410fcb78c2SRolf Eike Beer */ 6421da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq) 6431da177e4SLinus Torvalds { 6441da177e4SLinus Torvalds int cpu; 6451da177e4SLinus Torvalds 6461da177e4SLinus Torvalds flush_workqueue(wq); 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 6499b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 6501da177e4SLinus Torvalds if (is_single_threaded(wq)) 651f756d5e2SNathan Lynch cleanup_workqueue_thread(wq, singlethread_cpu); 6521da177e4SLinus Torvalds else { 6531da177e4SLinus Torvalds for_each_online_cpu(cpu) 6541da177e4SLinus Torvalds cleanup_workqueue_thread(wq, cpu); 6551da177e4SLinus Torvalds list_del(&wq->list); 6561da177e4SLinus Torvalds } 6579b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 65889ada679SChristoph Lameter free_percpu(wq->cpu_wq); 6591da177e4SLinus Torvalds kfree(wq); 6601da177e4SLinus Torvalds } 661ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue); 6621da177e4SLinus Torvalds 6631da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 6641da177e4SLinus Torvalds 6650fcb78c2SRolf Eike Beer /** 6660fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 6670fcb78c2SRolf Eike Beer * @work: job to be done 6680fcb78c2SRolf Eike Beer * 6690fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 6700fcb78c2SRolf Eike Beer */ 6711da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 6721da177e4SLinus Torvalds { 6731da177e4SLinus Torvalds return queue_work(keventd_wq, work); 6741da177e4SLinus Torvalds } 675ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 6761da177e4SLinus Torvalds 6770fcb78c2SRolf Eike Beer /** 6780fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 67952bad64dSDavid Howells * @dwork: job to be done 68052bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 6810fcb78c2SRolf Eike Beer * 6820fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 6830fcb78c2SRolf Eike Beer * workqueue. 6840fcb78c2SRolf Eike Beer */ 68582f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork, 68682f67cd9SIngo Molnar unsigned long delay) 6871da177e4SLinus Torvalds { 68882f67cd9SIngo Molnar timer_stats_timer_set_start_info(&dwork->timer); 68952bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 6901da177e4SLinus Torvalds } 691ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 6921da177e4SLinus Torvalds 6930fcb78c2SRolf Eike Beer /** 6940fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 6950fcb78c2SRolf Eike Beer * @cpu: cpu to use 69652bad64dSDavid Howells * @dwork: job to be done 6970fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 6980fcb78c2SRolf Eike Beer * 6990fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 7000fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 7010fcb78c2SRolf Eike Beer */ 7021da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 70352bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 7041da177e4SLinus Torvalds { 70552bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 7061da177e4SLinus Torvalds } 707ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 7081da177e4SLinus Torvalds 709b6136773SAndrew Morton /** 710b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 711b6136773SAndrew Morton * @func: the function to call 712b6136773SAndrew Morton * 713b6136773SAndrew Morton * Returns zero on success. 714b6136773SAndrew Morton * Returns -ve errno on failure. 715b6136773SAndrew Morton * 716b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 717b6136773SAndrew Morton * 718b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 719b6136773SAndrew Morton */ 72065f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 72115316ba8SChristoph Lameter { 72215316ba8SChristoph Lameter int cpu; 723b6136773SAndrew Morton struct work_struct *works; 72415316ba8SChristoph Lameter 725b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 726b6136773SAndrew Morton if (!works) 72715316ba8SChristoph Lameter return -ENOMEM; 728b6136773SAndrew Morton 729e18f3ffbSAndrew Morton preempt_disable(); /* CPU hotplug */ 73015316ba8SChristoph Lameter for_each_online_cpu(cpu) { 7319bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 7329bfb1839SIngo Molnar 7339bfb1839SIngo Molnar INIT_WORK(work, func); 7349bfb1839SIngo Molnar set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 7359bfb1839SIngo Molnar __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); 73615316ba8SChristoph Lameter } 737e18f3ffbSAndrew Morton preempt_enable(); 73815316ba8SChristoph Lameter flush_workqueue(keventd_wq); 739b6136773SAndrew Morton free_percpu(works); 74015316ba8SChristoph Lameter return 0; 74115316ba8SChristoph Lameter } 74215316ba8SChristoph Lameter 7431da177e4SLinus Torvalds void flush_scheduled_work(void) 7441da177e4SLinus Torvalds { 7451da177e4SLinus Torvalds flush_workqueue(keventd_wq); 7461da177e4SLinus Torvalds } 747ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 7481da177e4SLinus Torvalds 749b89deed3SOleg Nesterov void flush_work_keventd(struct work_struct *work) 750b89deed3SOleg Nesterov { 751b89deed3SOleg Nesterov flush_work(keventd_wq, work); 752b89deed3SOleg Nesterov } 753b89deed3SOleg Nesterov EXPORT_SYMBOL(flush_work_keventd); 754b89deed3SOleg Nesterov 7551da177e4SLinus Torvalds /** 75672fd4a35SRobert P. J. Day * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. 7571da177e4SLinus Torvalds * @wq: the controlling workqueue structure 75852bad64dSDavid Howells * @dwork: the delayed work struct 7591da177e4SLinus Torvalds */ 76081ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 76152bad64dSDavid Howells struct delayed_work *dwork) 7621da177e4SLinus Torvalds { 76352bad64dSDavid Howells while (!cancel_delayed_work(dwork)) 7641da177e4SLinus Torvalds flush_workqueue(wq); 7651da177e4SLinus Torvalds } 76681ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 7671da177e4SLinus Torvalds 7681da177e4SLinus Torvalds /** 76972fd4a35SRobert P. J. Day * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. 77052bad64dSDavid Howells * @dwork: the delayed work struct 7711da177e4SLinus Torvalds */ 77252bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork) 7731da177e4SLinus Torvalds { 77452bad64dSDavid Howells cancel_rearming_delayed_workqueue(keventd_wq, dwork); 7751da177e4SLinus Torvalds } 7761da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 7771da177e4SLinus Torvalds 7781fa44ecaSJames Bottomley /** 7791fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 7801fa44ecaSJames Bottomley * @fn: the function to execute 7811fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 7821fa44ecaSJames Bottomley * be available when the work executes) 7831fa44ecaSJames Bottomley * 7841fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 7851fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 7861fa44ecaSJames Bottomley * 7871fa44ecaSJames Bottomley * Returns: 0 - function was executed 7881fa44ecaSJames Bottomley * 1 - function was scheduled for execution 7891fa44ecaSJames Bottomley */ 79065f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 7911fa44ecaSJames Bottomley { 7921fa44ecaSJames Bottomley if (!in_interrupt()) { 79365f27f38SDavid Howells fn(&ew->work); 7941fa44ecaSJames Bottomley return 0; 7951fa44ecaSJames Bottomley } 7961fa44ecaSJames Bottomley 79765f27f38SDavid Howells INIT_WORK(&ew->work, fn); 7981fa44ecaSJames Bottomley schedule_work(&ew->work); 7991fa44ecaSJames Bottomley 8001fa44ecaSJames Bottomley return 1; 8011fa44ecaSJames Bottomley } 8021fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 8031fa44ecaSJames Bottomley 8041da177e4SLinus Torvalds int keventd_up(void) 8051da177e4SLinus Torvalds { 8061da177e4SLinus Torvalds return keventd_wq != NULL; 8071da177e4SLinus Torvalds } 8081da177e4SLinus Torvalds 8091da177e4SLinus Torvalds int current_is_keventd(void) 8101da177e4SLinus Torvalds { 8111da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 8121da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 8131da177e4SLinus Torvalds int ret = 0; 8141da177e4SLinus Torvalds 8151da177e4SLinus Torvalds BUG_ON(!keventd_wq); 8161da177e4SLinus Torvalds 81789ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 8181da177e4SLinus Torvalds if (current == cwq->thread) 8191da177e4SLinus Torvalds ret = 1; 8201da177e4SLinus Torvalds 8211da177e4SLinus Torvalds return ret; 8221da177e4SLinus Torvalds 8231da177e4SLinus Torvalds } 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */ 8261da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 8271da177e4SLinus Torvalds { 82889ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 829626ab0e6SOleg Nesterov struct list_head list; 8301da177e4SLinus Torvalds struct work_struct *work; 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 833626ab0e6SOleg Nesterov list_replace_init(&cwq->worklist, &list); 8341da177e4SLinus Torvalds 8351da177e4SLinus Torvalds while (!list_empty(&list)) { 8361da177e4SLinus Torvalds printk("Taking work for %s\n", wq->name); 8371da177e4SLinus Torvalds work = list_entry(list.next,struct work_struct,entry); 8381da177e4SLinus Torvalds list_del(&work->entry); 83989ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 8421da177e4SLinus Torvalds } 8431da177e4SLinus Torvalds 8441da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */ 8459c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 8461da177e4SLinus Torvalds unsigned long action, 8471da177e4SLinus Torvalds void *hcpu) 8481da177e4SLinus Torvalds { 8491da177e4SLinus Torvalds unsigned int hotcpu = (unsigned long)hcpu; 8501da177e4SLinus Torvalds struct workqueue_struct *wq; 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds switch (action) { 8531da177e4SLinus Torvalds case CPU_UP_PREPARE: 8549b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 8551da177e4SLinus Torvalds /* Create a new workqueue thread for it. */ 8561da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 857341a5958SRafael J. Wysocki if (!create_workqueue_thread(wq, hotcpu, 0)) { 8581da177e4SLinus Torvalds printk("workqueue for %i failed\n", hotcpu); 8591da177e4SLinus Torvalds return NOTIFY_BAD; 8601da177e4SLinus Torvalds } 8611da177e4SLinus Torvalds } 8621da177e4SLinus Torvalds break; 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds case CPU_ONLINE: 8651da177e4SLinus Torvalds /* Kick off worker threads. */ 8661da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 86789ada679SChristoph Lameter struct cpu_workqueue_struct *cwq; 86889ada679SChristoph Lameter 86989ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); 87089ada679SChristoph Lameter kthread_bind(cwq->thread, hotcpu); 87189ada679SChristoph Lameter wake_up_process(cwq->thread); 8721da177e4SLinus Torvalds } 8739b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 8741da177e4SLinus Torvalds break; 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds case CPU_UP_CANCELED: 8771da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 878fc75cdfaSHeiko Carstens if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) 879fc75cdfaSHeiko Carstens continue; 8801da177e4SLinus Torvalds /* Unbind so it can run. */ 88189ada679SChristoph Lameter kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 882a4c4af7cSHeiko Carstens any_online_cpu(cpu_online_map)); 8831da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 8841da177e4SLinus Torvalds } 8859b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 8869b41ea72SAndrew Morton break; 8879b41ea72SAndrew Morton 8889b41ea72SAndrew Morton case CPU_DOWN_PREPARE: 8899b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 8909b41ea72SAndrew Morton break; 8919b41ea72SAndrew Morton 8929b41ea72SAndrew Morton case CPU_DOWN_FAILED: 8939b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 8941da177e4SLinus Torvalds break; 8951da177e4SLinus Torvalds 8961da177e4SLinus Torvalds case CPU_DEAD: 8971da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 8981da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 8991da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 9001da177e4SLinus Torvalds take_over_work(wq, hotcpu); 9019b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 9021da177e4SLinus Torvalds break; 9031da177e4SLinus Torvalds } 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds return NOTIFY_OK; 9061da177e4SLinus Torvalds } 9071da177e4SLinus Torvalds 9081da177e4SLinus Torvalds void init_workqueues(void) 9091da177e4SLinus Torvalds { 910f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 9111da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 9121da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 9131da177e4SLinus Torvalds BUG_ON(!keventd_wq); 9141da177e4SLinus Torvalds } 9151da177e4SLinus Torvalds 916