11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <dwmw2@infradead.org> 121da177e4SLinus Torvalds * Andrew Morton <andrewm@uow.edu.au> 131da177e4SLinus Torvalds * Kai Petzke <wpp@marie.physik.tu-berlin.de> 141da177e4SLinus Torvalds * Theodore Ts'o <tytso@mit.edu> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 321da177e4SLinus Torvalds 331da177e4SLinus Torvalds /* 34f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 35f756d5e2SNathan Lynch * possible cpu). 361da177e4SLinus Torvalds * 371da177e4SLinus Torvalds * The sequence counters are for flush_scheduled_work(). It wants to wait 389f5d785eSRolf Eike Beer * until all currently-scheduled works are completed, but it doesn't 391da177e4SLinus Torvalds * want to be livelocked by new, incoming ones. So it waits until 401da177e4SLinus Torvalds * remove_sequence is >= the insert_sequence which pertained when 411da177e4SLinus Torvalds * flush_scheduled_work() was called. 421da177e4SLinus Torvalds */ 431da177e4SLinus Torvalds struct cpu_workqueue_struct { 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds spinlock_t lock; 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds long remove_sequence; /* Least-recently added (next to run) */ 481da177e4SLinus Torvalds long insert_sequence; /* Next to add */ 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds struct list_head worklist; 511da177e4SLinus Torvalds wait_queue_head_t more_work; 521da177e4SLinus Torvalds wait_queue_head_t work_done; 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds struct workqueue_struct *wq; 5536c8b586SIngo Molnar struct task_struct *thread; 561da177e4SLinus Torvalds 571da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 581da177e4SLinus Torvalds } ____cacheline_aligned; 591da177e4SLinus Torvalds 601da177e4SLinus Torvalds /* 611da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 621da177e4SLinus Torvalds * per-CPU workqueues: 631da177e4SLinus Torvalds */ 641da177e4SLinus Torvalds struct workqueue_struct { 6589ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 661da177e4SLinus Torvalds const char *name; 671da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 681da177e4SLinus Torvalds }; 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 711da177e4SLinus Torvalds threads to each one as cpus come/go. */ 729b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex); 731da177e4SLinus Torvalds static LIST_HEAD(workqueues); 741da177e4SLinus Torvalds 75f756d5e2SNathan Lynch static int singlethread_cpu; 76f756d5e2SNathan Lynch 771da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 781da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 791da177e4SLinus Torvalds { 801da177e4SLinus Torvalds return list_empty(&wq->list); 811da177e4SLinus Torvalds } 821da177e4SLinus Torvalds 83365970a1SDavid Howells static inline void set_wq_data(struct work_struct *work, void *wq) 84365970a1SDavid Howells { 85365970a1SDavid Howells unsigned long new, old, res; 86365970a1SDavid Howells 87365970a1SDavid Howells /* assume the pending flag is already set and that the task has already 88365970a1SDavid Howells * been queued on this workqueue */ 89365970a1SDavid Howells new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 90365970a1SDavid Howells res = work->management; 91365970a1SDavid Howells if (res != new) { 92365970a1SDavid Howells do { 93365970a1SDavid Howells old = res; 94365970a1SDavid Howells new = (unsigned long) wq; 95365970a1SDavid Howells new |= (old & WORK_STRUCT_FLAG_MASK); 96365970a1SDavid Howells res = cmpxchg(&work->management, old, new); 97365970a1SDavid Howells } while (res != old); 98365970a1SDavid Howells } 99365970a1SDavid Howells } 100365970a1SDavid Howells 101365970a1SDavid Howells static inline void *get_wq_data(struct work_struct *work) 102365970a1SDavid Howells { 103365970a1SDavid Howells return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 104365970a1SDavid Howells } 105365970a1SDavid Howells 1061da177e4SLinus Torvalds /* Preempt must be disabled. */ 1071da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1081da177e4SLinus Torvalds struct work_struct *work) 1091da177e4SLinus Torvalds { 1101da177e4SLinus Torvalds unsigned long flags; 1111da177e4SLinus Torvalds 1121da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 113365970a1SDavid Howells set_wq_data(work, cwq); 1141da177e4SLinus Torvalds list_add_tail(&work->entry, &cwq->worklist); 1151da177e4SLinus Torvalds cwq->insert_sequence++; 1161da177e4SLinus Torvalds wake_up(&cwq->more_work); 1171da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1181da177e4SLinus Torvalds } 1191da177e4SLinus Torvalds 1200fcb78c2SRolf Eike Beer /** 1210fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1220fcb78c2SRolf Eike Beer * @wq: workqueue to use 1230fcb78c2SRolf Eike Beer * @work: work to queue 1240fcb78c2SRolf Eike Beer * 125057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1261da177e4SLinus Torvalds * 1271da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 1281da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 1291da177e4SLinus Torvalds */ 1301da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 1311da177e4SLinus Torvalds { 1321da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 1331da177e4SLinus Torvalds 134365970a1SDavid Howells if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 1351da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 136f756d5e2SNathan Lynch cpu = singlethread_cpu; 1371da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 13889ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 1391da177e4SLinus Torvalds ret = 1; 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds put_cpu(); 1421da177e4SLinus Torvalds return ret; 1431da177e4SLinus Torvalds } 144ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds static void delayed_work_timer_fn(unsigned long __data) 1471da177e4SLinus Torvalds { 14852bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 149365970a1SDavid Howells struct workqueue_struct *wq = get_wq_data(&dwork->work); 1501da177e4SLinus Torvalds int cpu = smp_processor_id(); 1511da177e4SLinus Torvalds 1521da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 153f756d5e2SNathan Lynch cpu = singlethread_cpu; 1541da177e4SLinus Torvalds 15552bad64dSDavid Howells __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds 1580fcb78c2SRolf Eike Beer /** 1590fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 1600fcb78c2SRolf Eike Beer * @wq: workqueue to use 16152bad64dSDavid Howells * @work: delayable work to queue 1620fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 1630fcb78c2SRolf Eike Beer * 164057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1650fcb78c2SRolf Eike Beer */ 1661da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 16752bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 1681da177e4SLinus Torvalds { 1691da177e4SLinus Torvalds int ret = 0; 17052bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 17152bad64dSDavid Howells struct work_struct *work = &dwork->work; 17252bad64dSDavid Howells 17352bad64dSDavid Howells if (delay == 0) 17452bad64dSDavid Howells return queue_work(wq, work); 1751da177e4SLinus Torvalds 176365970a1SDavid Howells if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 1771da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 1781da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 1791da177e4SLinus Torvalds 1801da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 181365970a1SDavid Howells set_wq_data(work, wq); 1821da177e4SLinus Torvalds timer->expires = jiffies + delay; 18352bad64dSDavid Howells timer->data = (unsigned long)dwork; 1841da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 1851da177e4SLinus Torvalds add_timer(timer); 1861da177e4SLinus Torvalds ret = 1; 1871da177e4SLinus Torvalds } 1881da177e4SLinus Torvalds return ret; 1891da177e4SLinus Torvalds } 190ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 1911da177e4SLinus Torvalds 1920fcb78c2SRolf Eike Beer /** 1930fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 1940fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 1950fcb78c2SRolf Eike Beer * @wq: workqueue to use 1960fcb78c2SRolf Eike Beer * @work: work to queue 1970fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 1980fcb78c2SRolf Eike Beer * 199057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2000fcb78c2SRolf Eike Beer */ 2017a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 20252bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2037a6bc1cdSVenkatesh Pallipadi { 2047a6bc1cdSVenkatesh Pallipadi int ret = 0; 20552bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 20652bad64dSDavid Howells struct work_struct *work = &dwork->work; 2077a6bc1cdSVenkatesh Pallipadi 208365970a1SDavid Howells if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 2097a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2107a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2117a6bc1cdSVenkatesh Pallipadi 2127a6bc1cdSVenkatesh Pallipadi /* This stores wq for the moment, for the timer_fn */ 213365970a1SDavid Howells set_wq_data(work, wq); 2147a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 21552bad64dSDavid Howells timer->data = (unsigned long)dwork; 2167a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 2177a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 2187a6bc1cdSVenkatesh Pallipadi ret = 1; 2197a6bc1cdSVenkatesh Pallipadi } 2207a6bc1cdSVenkatesh Pallipadi return ret; 2217a6bc1cdSVenkatesh Pallipadi } 222ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2231da177e4SLinus Torvalds 224858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 2251da177e4SLinus Torvalds { 2261da177e4SLinus Torvalds unsigned long flags; 2271da177e4SLinus Torvalds 2281da177e4SLinus Torvalds /* 2291da177e4SLinus Torvalds * Keep taking off work from the queue until 2301da177e4SLinus Torvalds * done. 2311da177e4SLinus Torvalds */ 2321da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2331da177e4SLinus Torvalds cwq->run_depth++; 2341da177e4SLinus Torvalds if (cwq->run_depth > 3) { 2351da177e4SLinus Torvalds /* morton gets to eat his hat */ 2361da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 2371da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 2381da177e4SLinus Torvalds dump_stack(); 2391da177e4SLinus Torvalds } 2401da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 2411da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 2421da177e4SLinus Torvalds struct work_struct, entry); 2436bb49e59SDavid Howells work_func_t f = work->func; 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 2461da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 2471da177e4SLinus Torvalds 248365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 24965f27f38SDavid Howells if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 25065f27f38SDavid Howells work_release(work); 25165f27f38SDavid Howells f(work); 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2541da177e4SLinus Torvalds cwq->remove_sequence++; 2551da177e4SLinus Torvalds wake_up(&cwq->work_done); 2561da177e4SLinus Torvalds } 2571da177e4SLinus Torvalds cwq->run_depth--; 2581da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 2591da177e4SLinus Torvalds } 2601da177e4SLinus Torvalds 2611da177e4SLinus Torvalds static int worker_thread(void *__cwq) 2621da177e4SLinus Torvalds { 2631da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 2641da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current); 2651da177e4SLinus Torvalds struct k_sigaction sa; 2661da177e4SLinus Torvalds sigset_t blocked; 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds set_user_nice(current, -5); 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds /* Block and flush all signals */ 2731da177e4SLinus Torvalds sigfillset(&blocked); 2741da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 2751da177e4SLinus Torvalds flush_signals(current); 2761da177e4SLinus Torvalds 27746934023SChristoph Lameter /* 27846934023SChristoph Lameter * We inherited MPOL_INTERLEAVE from the booting kernel. 27946934023SChristoph Lameter * Set MPOL_DEFAULT to insure node local allocations. 28046934023SChristoph Lameter */ 28146934023SChristoph Lameter numa_default_policy(); 28246934023SChristoph Lameter 2831da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 2841da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 2851da177e4SLinus Torvalds sa.sa.sa_flags = 0; 2861da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 2871da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 2881da177e4SLinus Torvalds 2891da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 2901da177e4SLinus Torvalds while (!kthread_should_stop()) { 2911da177e4SLinus Torvalds add_wait_queue(&cwq->more_work, &wait); 2921da177e4SLinus Torvalds if (list_empty(&cwq->worklist)) 2931da177e4SLinus Torvalds schedule(); 2941da177e4SLinus Torvalds else 2951da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 2961da177e4SLinus Torvalds remove_wait_queue(&cwq->more_work, &wait); 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds if (!list_empty(&cwq->worklist)) 2991da177e4SLinus Torvalds run_workqueue(cwq); 3001da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3011da177e4SLinus Torvalds } 3021da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3031da177e4SLinus Torvalds return 0; 3041da177e4SLinus Torvalds } 3051da177e4SLinus Torvalds 3061da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 3071da177e4SLinus Torvalds { 3081da177e4SLinus Torvalds if (cwq->thread == current) { 3091da177e4SLinus Torvalds /* 3101da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 3111da177e4SLinus Torvalds * it by hand rather than deadlocking. 3121da177e4SLinus Torvalds */ 3131da177e4SLinus Torvalds run_workqueue(cwq); 3141da177e4SLinus Torvalds } else { 3151da177e4SLinus Torvalds DEFINE_WAIT(wait); 3161da177e4SLinus Torvalds long sequence_needed; 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 3191da177e4SLinus Torvalds sequence_needed = cwq->insert_sequence; 3201da177e4SLinus Torvalds 3211da177e4SLinus Torvalds while (sequence_needed - cwq->remove_sequence > 0) { 3221da177e4SLinus Torvalds prepare_to_wait(&cwq->work_done, &wait, 3231da177e4SLinus Torvalds TASK_UNINTERRUPTIBLE); 3241da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 3251da177e4SLinus Torvalds schedule(); 3261da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 3271da177e4SLinus Torvalds } 3281da177e4SLinus Torvalds finish_wait(&cwq->work_done, &wait); 3291da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 3301da177e4SLinus Torvalds } 3311da177e4SLinus Torvalds } 3321da177e4SLinus Torvalds 3330fcb78c2SRolf Eike Beer /** 3341da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 3350fcb78c2SRolf Eike Beer * @wq: workqueue to flush 3361da177e4SLinus Torvalds * 3371da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 3381da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 3391da177e4SLinus Torvalds * 3401da177e4SLinus Torvalds * This function will sample each workqueue's current insert_sequence number and 3411da177e4SLinus Torvalds * will sleep until the head sequence is greater than or equal to that. This 3421da177e4SLinus Torvalds * means that we sleep until all works which were queued on entry have been 3431da177e4SLinus Torvalds * handled, but we are not livelocked by new incoming ones. 3441da177e4SLinus Torvalds * 3451da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 3461da177e4SLinus Torvalds * helper threads to do it. 3471da177e4SLinus Torvalds */ 3481da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 3491da177e4SLinus Torvalds { 3501da177e4SLinus Torvalds might_sleep(); 3511da177e4SLinus Torvalds 3521da177e4SLinus Torvalds if (is_single_threaded(wq)) { 353bce61dd4SBen Collins /* Always use first cpu's area. */ 354f756d5e2SNathan Lynch flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 3551da177e4SLinus Torvalds } else { 3561da177e4SLinus Torvalds int cpu; 3571da177e4SLinus Torvalds 3589b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 3591da177e4SLinus Torvalds for_each_online_cpu(cpu) 36089ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 3619b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 3621da177e4SLinus Torvalds } 3631da177e4SLinus Torvalds } 364ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 3651da177e4SLinus Torvalds 3661da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 3671da177e4SLinus Torvalds int cpu) 3681da177e4SLinus Torvalds { 36989ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 3701da177e4SLinus Torvalds struct task_struct *p; 3711da177e4SLinus Torvalds 3721da177e4SLinus Torvalds spin_lock_init(&cwq->lock); 3731da177e4SLinus Torvalds cwq->wq = wq; 3741da177e4SLinus Torvalds cwq->thread = NULL; 3751da177e4SLinus Torvalds cwq->insert_sequence = 0; 3761da177e4SLinus Torvalds cwq->remove_sequence = 0; 3771da177e4SLinus Torvalds INIT_LIST_HEAD(&cwq->worklist); 3781da177e4SLinus Torvalds init_waitqueue_head(&cwq->more_work); 3791da177e4SLinus Torvalds init_waitqueue_head(&cwq->work_done); 3801da177e4SLinus Torvalds 3811da177e4SLinus Torvalds if (is_single_threaded(wq)) 3821da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s", wq->name); 3831da177e4SLinus Torvalds else 3841da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); 3851da177e4SLinus Torvalds if (IS_ERR(p)) 3861da177e4SLinus Torvalds return NULL; 3871da177e4SLinus Torvalds cwq->thread = p; 3881da177e4SLinus Torvalds return p; 3891da177e4SLinus Torvalds } 3901da177e4SLinus Torvalds 3911da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name, 3921da177e4SLinus Torvalds int singlethread) 3931da177e4SLinus Torvalds { 3941da177e4SLinus Torvalds int cpu, destroy = 0; 3951da177e4SLinus Torvalds struct workqueue_struct *wq; 3961da177e4SLinus Torvalds struct task_struct *p; 3971da177e4SLinus Torvalds 398dd392710SPekka J Enberg wq = kzalloc(sizeof(*wq), GFP_KERNEL); 3991da177e4SLinus Torvalds if (!wq) 4001da177e4SLinus Torvalds return NULL; 4011da177e4SLinus Torvalds 40289ada679SChristoph Lameter wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 403676121fcSBen Collins if (!wq->cpu_wq) { 404676121fcSBen Collins kfree(wq); 405676121fcSBen Collins return NULL; 406676121fcSBen Collins } 407676121fcSBen Collins 4081da177e4SLinus Torvalds wq->name = name; 4099b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 4101da177e4SLinus Torvalds if (singlethread) { 4111da177e4SLinus Torvalds INIT_LIST_HEAD(&wq->list); 412f756d5e2SNathan Lynch p = create_workqueue_thread(wq, singlethread_cpu); 4131da177e4SLinus Torvalds if (!p) 4141da177e4SLinus Torvalds destroy = 1; 4151da177e4SLinus Torvalds else 4161da177e4SLinus Torvalds wake_up_process(p); 4171da177e4SLinus Torvalds } else { 4181da177e4SLinus Torvalds list_add(&wq->list, &workqueues); 4191da177e4SLinus Torvalds for_each_online_cpu(cpu) { 4201da177e4SLinus Torvalds p = create_workqueue_thread(wq, cpu); 4211da177e4SLinus Torvalds if (p) { 4221da177e4SLinus Torvalds kthread_bind(p, cpu); 4231da177e4SLinus Torvalds wake_up_process(p); 4241da177e4SLinus Torvalds } else 4251da177e4SLinus Torvalds destroy = 1; 4261da177e4SLinus Torvalds } 4271da177e4SLinus Torvalds } 4289b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds /* 4311da177e4SLinus Torvalds * Was there any error during startup? If yes then clean up: 4321da177e4SLinus Torvalds */ 4331da177e4SLinus Torvalds if (destroy) { 4341da177e4SLinus Torvalds destroy_workqueue(wq); 4351da177e4SLinus Torvalds wq = NULL; 4361da177e4SLinus Torvalds } 4371da177e4SLinus Torvalds return wq; 4381da177e4SLinus Torvalds } 439ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue); 4401da177e4SLinus Torvalds 4411da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 4421da177e4SLinus Torvalds { 4431da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 4441da177e4SLinus Torvalds unsigned long flags; 4451da177e4SLinus Torvalds struct task_struct *p; 4461da177e4SLinus Torvalds 44789ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, cpu); 4481da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 4491da177e4SLinus Torvalds p = cwq->thread; 4501da177e4SLinus Torvalds cwq->thread = NULL; 4511da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 4521da177e4SLinus Torvalds if (p) 4531da177e4SLinus Torvalds kthread_stop(p); 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 4560fcb78c2SRolf Eike Beer /** 4570fcb78c2SRolf Eike Beer * destroy_workqueue - safely terminate a workqueue 4580fcb78c2SRolf Eike Beer * @wq: target workqueue 4590fcb78c2SRolf Eike Beer * 4600fcb78c2SRolf Eike Beer * Safely destroy a workqueue. All work currently pending will be done first. 4610fcb78c2SRolf Eike Beer */ 4621da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq) 4631da177e4SLinus Torvalds { 4641da177e4SLinus Torvalds int cpu; 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds flush_workqueue(wq); 4671da177e4SLinus Torvalds 4681da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 4699b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 4701da177e4SLinus Torvalds if (is_single_threaded(wq)) 471f756d5e2SNathan Lynch cleanup_workqueue_thread(wq, singlethread_cpu); 4721da177e4SLinus Torvalds else { 4731da177e4SLinus Torvalds for_each_online_cpu(cpu) 4741da177e4SLinus Torvalds cleanup_workqueue_thread(wq, cpu); 4751da177e4SLinus Torvalds list_del(&wq->list); 4761da177e4SLinus Torvalds } 4779b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 47889ada679SChristoph Lameter free_percpu(wq->cpu_wq); 4791da177e4SLinus Torvalds kfree(wq); 4801da177e4SLinus Torvalds } 481ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue); 4821da177e4SLinus Torvalds 4831da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 4841da177e4SLinus Torvalds 4850fcb78c2SRolf Eike Beer /** 4860fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 4870fcb78c2SRolf Eike Beer * @work: job to be done 4880fcb78c2SRolf Eike Beer * 4890fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 4900fcb78c2SRolf Eike Beer */ 4911da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 4921da177e4SLinus Torvalds { 4931da177e4SLinus Torvalds return queue_work(keventd_wq, work); 4941da177e4SLinus Torvalds } 495ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 4961da177e4SLinus Torvalds 4970fcb78c2SRolf Eike Beer /** 4980fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 49952bad64dSDavid Howells * @dwork: job to be done 50052bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 5010fcb78c2SRolf Eike Beer * 5020fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5030fcb78c2SRolf Eike Beer * workqueue. 5040fcb78c2SRolf Eike Beer */ 50552bad64dSDavid Howells int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) 5061da177e4SLinus Torvalds { 50752bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 5081da177e4SLinus Torvalds } 509ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 5101da177e4SLinus Torvalds 5110fcb78c2SRolf Eike Beer /** 5120fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 5130fcb78c2SRolf Eike Beer * @cpu: cpu to use 51452bad64dSDavid Howells * @dwork: job to be done 5150fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 5160fcb78c2SRolf Eike Beer * 5170fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5180fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 5190fcb78c2SRolf Eike Beer */ 5201da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 52152bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 5221da177e4SLinus Torvalds { 52352bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 5241da177e4SLinus Torvalds } 525ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 5261da177e4SLinus Torvalds 527b6136773SAndrew Morton /** 528b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 529b6136773SAndrew Morton * @func: the function to call 530b6136773SAndrew Morton * 531b6136773SAndrew Morton * Returns zero on success. 532b6136773SAndrew Morton * Returns -ve errno on failure. 533b6136773SAndrew Morton * 534b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 535b6136773SAndrew Morton * 536b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 537b6136773SAndrew Morton */ 53865f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 53915316ba8SChristoph Lameter { 54015316ba8SChristoph Lameter int cpu; 541b6136773SAndrew Morton struct work_struct *works; 54215316ba8SChristoph Lameter 543b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 544b6136773SAndrew Morton if (!works) 54515316ba8SChristoph Lameter return -ENOMEM; 546b6136773SAndrew Morton 5479b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 54815316ba8SChristoph Lameter for_each_online_cpu(cpu) { 54965f27f38SDavid Howells INIT_WORK(per_cpu_ptr(works, cpu), func); 55015316ba8SChristoph Lameter __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 551b6136773SAndrew Morton per_cpu_ptr(works, cpu)); 55215316ba8SChristoph Lameter } 5539b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 55415316ba8SChristoph Lameter flush_workqueue(keventd_wq); 555b6136773SAndrew Morton free_percpu(works); 55615316ba8SChristoph Lameter return 0; 55715316ba8SChristoph Lameter } 55815316ba8SChristoph Lameter 5591da177e4SLinus Torvalds void flush_scheduled_work(void) 5601da177e4SLinus Torvalds { 5611da177e4SLinus Torvalds flush_workqueue(keventd_wq); 5621da177e4SLinus Torvalds } 563ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds /** 5661da177e4SLinus Torvalds * cancel_rearming_delayed_workqueue - reliably kill off a delayed 5671da177e4SLinus Torvalds * work whose handler rearms the delayed work. 5681da177e4SLinus Torvalds * @wq: the controlling workqueue structure 56952bad64dSDavid Howells * @dwork: the delayed work struct 5701da177e4SLinus Torvalds */ 57181ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 57252bad64dSDavid Howells struct delayed_work *dwork) 5731da177e4SLinus Torvalds { 57452bad64dSDavid Howells while (!cancel_delayed_work(dwork)) 5751da177e4SLinus Torvalds flush_workqueue(wq); 5761da177e4SLinus Torvalds } 57781ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds /** 5801da177e4SLinus Torvalds * cancel_rearming_delayed_work - reliably kill off a delayed keventd 5811da177e4SLinus Torvalds * work whose handler rearms the delayed work. 58252bad64dSDavid Howells * @dwork: the delayed work struct 5831da177e4SLinus Torvalds */ 58452bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork) 5851da177e4SLinus Torvalds { 58652bad64dSDavid Howells cancel_rearming_delayed_workqueue(keventd_wq, dwork); 5871da177e4SLinus Torvalds } 5881da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 5891da177e4SLinus Torvalds 5901fa44ecaSJames Bottomley /** 5911fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 5921fa44ecaSJames Bottomley * @fn: the function to execute 5931fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 5941fa44ecaSJames Bottomley * be available when the work executes) 5951fa44ecaSJames Bottomley * 5961fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 5971fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 5981fa44ecaSJames Bottomley * 5991fa44ecaSJames Bottomley * Returns: 0 - function was executed 6001fa44ecaSJames Bottomley * 1 - function was scheduled for execution 6011fa44ecaSJames Bottomley */ 60265f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 6031fa44ecaSJames Bottomley { 6041fa44ecaSJames Bottomley if (!in_interrupt()) { 60565f27f38SDavid Howells fn(&ew->work); 6061fa44ecaSJames Bottomley return 0; 6071fa44ecaSJames Bottomley } 6081fa44ecaSJames Bottomley 60965f27f38SDavid Howells INIT_WORK(&ew->work, fn); 6101fa44ecaSJames Bottomley schedule_work(&ew->work); 6111fa44ecaSJames Bottomley 6121fa44ecaSJames Bottomley return 1; 6131fa44ecaSJames Bottomley } 6141fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 6151fa44ecaSJames Bottomley 6161da177e4SLinus Torvalds int keventd_up(void) 6171da177e4SLinus Torvalds { 6181da177e4SLinus Torvalds return keventd_wq != NULL; 6191da177e4SLinus Torvalds } 6201da177e4SLinus Torvalds 6211da177e4SLinus Torvalds int current_is_keventd(void) 6221da177e4SLinus Torvalds { 6231da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 6241da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 6251da177e4SLinus Torvalds int ret = 0; 6261da177e4SLinus Torvalds 6271da177e4SLinus Torvalds BUG_ON(!keventd_wq); 6281da177e4SLinus Torvalds 62989ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 6301da177e4SLinus Torvalds if (current == cwq->thread) 6311da177e4SLinus Torvalds ret = 1; 6321da177e4SLinus Torvalds 6331da177e4SLinus Torvalds return ret; 6341da177e4SLinus Torvalds 6351da177e4SLinus Torvalds } 6361da177e4SLinus Torvalds 6371da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 6381da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */ 6391da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 6401da177e4SLinus Torvalds { 64189ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 642626ab0e6SOleg Nesterov struct list_head list; 6431da177e4SLinus Torvalds struct work_struct *work; 6441da177e4SLinus Torvalds 6451da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 646626ab0e6SOleg Nesterov list_replace_init(&cwq->worklist, &list); 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds while (!list_empty(&list)) { 6491da177e4SLinus Torvalds printk("Taking work for %s\n", wq->name); 6501da177e4SLinus Torvalds work = list_entry(list.next,struct work_struct,entry); 6511da177e4SLinus Torvalds list_del(&work->entry); 65289ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 6571da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */ 6589c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 6591da177e4SLinus Torvalds unsigned long action, 6601da177e4SLinus Torvalds void *hcpu) 6611da177e4SLinus Torvalds { 6621da177e4SLinus Torvalds unsigned int hotcpu = (unsigned long)hcpu; 6631da177e4SLinus Torvalds struct workqueue_struct *wq; 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds switch (action) { 6661da177e4SLinus Torvalds case CPU_UP_PREPARE: 6679b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 6681da177e4SLinus Torvalds /* Create a new workqueue thread for it. */ 6691da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 670230649daSMika Kukkonen if (!create_workqueue_thread(wq, hotcpu)) { 6711da177e4SLinus Torvalds printk("workqueue for %i failed\n", hotcpu); 6721da177e4SLinus Torvalds return NOTIFY_BAD; 6731da177e4SLinus Torvalds } 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds break; 6761da177e4SLinus Torvalds 6771da177e4SLinus Torvalds case CPU_ONLINE: 6781da177e4SLinus Torvalds /* Kick off worker threads. */ 6791da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 68089ada679SChristoph Lameter struct cpu_workqueue_struct *cwq; 68189ada679SChristoph Lameter 68289ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); 68389ada679SChristoph Lameter kthread_bind(cwq->thread, hotcpu); 68489ada679SChristoph Lameter wake_up_process(cwq->thread); 6851da177e4SLinus Torvalds } 6869b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 6871da177e4SLinus Torvalds break; 6881da177e4SLinus Torvalds 6891da177e4SLinus Torvalds case CPU_UP_CANCELED: 6901da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 691fc75cdfaSHeiko Carstens if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) 692fc75cdfaSHeiko Carstens continue; 6931da177e4SLinus Torvalds /* Unbind so it can run. */ 69489ada679SChristoph Lameter kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 695a4c4af7cSHeiko Carstens any_online_cpu(cpu_online_map)); 6961da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 6971da177e4SLinus Torvalds } 6989b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 6999b41ea72SAndrew Morton break; 7009b41ea72SAndrew Morton 7019b41ea72SAndrew Morton case CPU_DOWN_PREPARE: 7029b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7039b41ea72SAndrew Morton break; 7049b41ea72SAndrew Morton 7059b41ea72SAndrew Morton case CPU_DOWN_FAILED: 7069b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7071da177e4SLinus Torvalds break; 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds case CPU_DEAD: 7101da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 7111da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 7121da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 7131da177e4SLinus Torvalds take_over_work(wq, hotcpu); 7149b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7151da177e4SLinus Torvalds break; 7161da177e4SLinus Torvalds } 7171da177e4SLinus Torvalds 7181da177e4SLinus Torvalds return NOTIFY_OK; 7191da177e4SLinus Torvalds } 7201da177e4SLinus Torvalds #endif 7211da177e4SLinus Torvalds 7221da177e4SLinus Torvalds void init_workqueues(void) 7231da177e4SLinus Torvalds { 724f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 7251da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 7261da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 7271da177e4SLinus Torvalds BUG_ON(!keventd_wq); 7281da177e4SLinus Torvalds } 7291da177e4SLinus Torvalds 730