xref: /openbmc/linux/kernel/workqueue.c (revision 52bad64d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <dwmw2@infradead.org>
121da177e4SLinus Torvalds  *   Andrew Morton <andrewm@uow.edu.au>
131da177e4SLinus Torvalds  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
141da177e4SLinus Torvalds  *   Theodore Ts'o <tytso@mit.edu>
1589ada679SChristoph Lameter  *
1689ada679SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
321da177e4SLinus Torvalds 
331da177e4SLinus Torvalds /*
34f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
35f756d5e2SNathan Lynch  * possible cpu).
361da177e4SLinus Torvalds  *
371da177e4SLinus Torvalds  * The sequence counters are for flush_scheduled_work().  It wants to wait
389f5d785eSRolf Eike Beer  * until all currently-scheduled works are completed, but it doesn't
391da177e4SLinus Torvalds  * want to be livelocked by new, incoming ones.  So it waits until
401da177e4SLinus Torvalds  * remove_sequence is >= the insert_sequence which pertained when
411da177e4SLinus Torvalds  * flush_scheduled_work() was called.
421da177e4SLinus Torvalds  */
431da177e4SLinus Torvalds struct cpu_workqueue_struct {
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds 	spinlock_t lock;
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds 	long remove_sequence;	/* Least-recently added (next to run) */
481da177e4SLinus Torvalds 	long insert_sequence;	/* Next to add */
491da177e4SLinus Torvalds 
501da177e4SLinus Torvalds 	struct list_head worklist;
511da177e4SLinus Torvalds 	wait_queue_head_t more_work;
521da177e4SLinus Torvalds 	wait_queue_head_t work_done;
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds 	struct workqueue_struct *wq;
5536c8b586SIngo Molnar 	struct task_struct *thread;
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds 	int run_depth;		/* Detect run_workqueue() recursion depth */
581da177e4SLinus Torvalds } ____cacheline_aligned;
591da177e4SLinus Torvalds 
601da177e4SLinus Torvalds /*
611da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
621da177e4SLinus Torvalds  * per-CPU workqueues:
631da177e4SLinus Torvalds  */
641da177e4SLinus Torvalds struct workqueue_struct {
6589ada679SChristoph Lameter 	struct cpu_workqueue_struct *cpu_wq;
661da177e4SLinus Torvalds 	const char *name;
671da177e4SLinus Torvalds 	struct list_head list; 	/* Empty if single thread */
681da177e4SLinus Torvalds };
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
711da177e4SLinus Torvalds    threads to each one as cpus come/go. */
729b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex);
731da177e4SLinus Torvalds static LIST_HEAD(workqueues);
741da177e4SLinus Torvalds 
75f756d5e2SNathan Lynch static int singlethread_cpu;
76f756d5e2SNathan Lynch 
771da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
781da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq)
791da177e4SLinus Torvalds {
801da177e4SLinus Torvalds 	return list_empty(&wq->list);
811da177e4SLinus Torvalds }
821da177e4SLinus Torvalds 
831da177e4SLinus Torvalds /* Preempt must be disabled. */
841da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq,
851da177e4SLinus Torvalds 			 struct work_struct *work)
861da177e4SLinus Torvalds {
871da177e4SLinus Torvalds 	unsigned long flags;
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
901da177e4SLinus Torvalds 	work->wq_data = cwq;
911da177e4SLinus Torvalds 	list_add_tail(&work->entry, &cwq->worklist);
921da177e4SLinus Torvalds 	cwq->insert_sequence++;
931da177e4SLinus Torvalds 	wake_up(&cwq->more_work);
941da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
951da177e4SLinus Torvalds }
961da177e4SLinus Torvalds 
970fcb78c2SRolf Eike Beer /**
980fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
990fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1000fcb78c2SRolf Eike Beer  * @work: work to queue
1010fcb78c2SRolf Eike Beer  *
102057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1031da177e4SLinus Torvalds  *
1041da177e4SLinus Torvalds  * We queue the work to the CPU it was submitted, but there is no
1051da177e4SLinus Torvalds  * guarantee that it will be processed by that CPU.
1061da177e4SLinus Torvalds  */
1071da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
1081da177e4SLinus Torvalds {
1091da177e4SLinus Torvalds 	int ret = 0, cpu = get_cpu();
1101da177e4SLinus Torvalds 
1111da177e4SLinus Torvalds 	if (!test_and_set_bit(0, &work->pending)) {
1121da177e4SLinus Torvalds 		if (unlikely(is_single_threaded(wq)))
113f756d5e2SNathan Lynch 			cpu = singlethread_cpu;
1141da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
11589ada679SChristoph Lameter 		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
1161da177e4SLinus Torvalds 		ret = 1;
1171da177e4SLinus Torvalds 	}
1181da177e4SLinus Torvalds 	put_cpu();
1191da177e4SLinus Torvalds 	return ret;
1201da177e4SLinus Torvalds }
121ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
1221da177e4SLinus Torvalds 
1231da177e4SLinus Torvalds static void delayed_work_timer_fn(unsigned long __data)
1241da177e4SLinus Torvalds {
12552bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
12652bad64dSDavid Howells 	struct workqueue_struct *wq = dwork->work.wq_data;
1271da177e4SLinus Torvalds 	int cpu = smp_processor_id();
1281da177e4SLinus Torvalds 
1291da177e4SLinus Torvalds 	if (unlikely(is_single_threaded(wq)))
130f756d5e2SNathan Lynch 		cpu = singlethread_cpu;
1311da177e4SLinus Torvalds 
13252bad64dSDavid Howells 	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds 
1350fcb78c2SRolf Eike Beer /**
1360fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
1370fcb78c2SRolf Eike Beer  * @wq: workqueue to use
13852bad64dSDavid Howells  * @work: delayable work to queue
1390fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
1400fcb78c2SRolf Eike Beer  *
141057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1420fcb78c2SRolf Eike Beer  */
1431da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq,
14452bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
1451da177e4SLinus Torvalds {
1461da177e4SLinus Torvalds 	int ret = 0;
14752bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
14852bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
14952bad64dSDavid Howells 
15052bad64dSDavid Howells 	if (delay == 0)
15152bad64dSDavid Howells 		return queue_work(wq, work);
1521da177e4SLinus Torvalds 
1531da177e4SLinus Torvalds 	if (!test_and_set_bit(0, &work->pending)) {
1541da177e4SLinus Torvalds 		BUG_ON(timer_pending(timer));
1551da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
1561da177e4SLinus Torvalds 
1571da177e4SLinus Torvalds 		/* This stores wq for the moment, for the timer_fn */
1581da177e4SLinus Torvalds 		work->wq_data = wq;
1591da177e4SLinus Torvalds 		timer->expires = jiffies + delay;
16052bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
1611da177e4SLinus Torvalds 		timer->function = delayed_work_timer_fn;
1621da177e4SLinus Torvalds 		add_timer(timer);
1631da177e4SLinus Torvalds 		ret = 1;
1641da177e4SLinus Torvalds 	}
1651da177e4SLinus Torvalds 	return ret;
1661da177e4SLinus Torvalds }
167ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
1681da177e4SLinus Torvalds 
1690fcb78c2SRolf Eike Beer /**
1700fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
1710fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
1720fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1730fcb78c2SRolf Eike Beer  * @work: work to queue
1740fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
1750fcb78c2SRolf Eike Beer  *
176057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1770fcb78c2SRolf Eike Beer  */
1787a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
17952bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
1807a6bc1cdSVenkatesh Pallipadi {
1817a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
18252bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
18352bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
1847a6bc1cdSVenkatesh Pallipadi 
1857a6bc1cdSVenkatesh Pallipadi 	if (!test_and_set_bit(0, &work->pending)) {
1867a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
1877a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
1887a6bc1cdSVenkatesh Pallipadi 
1897a6bc1cdSVenkatesh Pallipadi 		/* This stores wq for the moment, for the timer_fn */
1907a6bc1cdSVenkatesh Pallipadi 		work->wq_data = wq;
1917a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
19252bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
1937a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
1947a6bc1cdSVenkatesh Pallipadi 		add_timer_on(timer, cpu);
1957a6bc1cdSVenkatesh Pallipadi 		ret = 1;
1967a6bc1cdSVenkatesh Pallipadi 	}
1977a6bc1cdSVenkatesh Pallipadi 	return ret;
1987a6bc1cdSVenkatesh Pallipadi }
199ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
2001da177e4SLinus Torvalds 
201858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
2021da177e4SLinus Torvalds {
2031da177e4SLinus Torvalds 	unsigned long flags;
2041da177e4SLinus Torvalds 
2051da177e4SLinus Torvalds 	/*
2061da177e4SLinus Torvalds 	 * Keep taking off work from the queue until
2071da177e4SLinus Torvalds 	 * done.
2081da177e4SLinus Torvalds 	 */
2091da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
2101da177e4SLinus Torvalds 	cwq->run_depth++;
2111da177e4SLinus Torvalds 	if (cwq->run_depth > 3) {
2121da177e4SLinus Torvalds 		/* morton gets to eat his hat */
2131da177e4SLinus Torvalds 		printk("%s: recursion depth exceeded: %d\n",
2141da177e4SLinus Torvalds 			__FUNCTION__, cwq->run_depth);
2151da177e4SLinus Torvalds 		dump_stack();
2161da177e4SLinus Torvalds 	}
2171da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
2181da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
2191da177e4SLinus Torvalds 						struct work_struct, entry);
2201da177e4SLinus Torvalds 		void (*f) (void *) = work->func;
2211da177e4SLinus Torvalds 		void *data = work->data;
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
2241da177e4SLinus Torvalds 		spin_unlock_irqrestore(&cwq->lock, flags);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 		BUG_ON(work->wq_data != cwq);
2271da177e4SLinus Torvalds 		clear_bit(0, &work->pending);
2281da177e4SLinus Torvalds 		f(data);
2291da177e4SLinus Torvalds 
2301da177e4SLinus Torvalds 		spin_lock_irqsave(&cwq->lock, flags);
2311da177e4SLinus Torvalds 		cwq->remove_sequence++;
2321da177e4SLinus Torvalds 		wake_up(&cwq->work_done);
2331da177e4SLinus Torvalds 	}
2341da177e4SLinus Torvalds 	cwq->run_depth--;
2351da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
2361da177e4SLinus Torvalds }
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds static int worker_thread(void *__cwq)
2391da177e4SLinus Torvalds {
2401da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
2411da177e4SLinus Torvalds 	DECLARE_WAITQUEUE(wait, current);
2421da177e4SLinus Torvalds 	struct k_sigaction sa;
2431da177e4SLinus Torvalds 	sigset_t blocked;
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds 	current->flags |= PF_NOFREEZE;
2461da177e4SLinus Torvalds 
2471da177e4SLinus Torvalds 	set_user_nice(current, -5);
2481da177e4SLinus Torvalds 
2491da177e4SLinus Torvalds 	/* Block and flush all signals */
2501da177e4SLinus Torvalds 	sigfillset(&blocked);
2511da177e4SLinus Torvalds 	sigprocmask(SIG_BLOCK, &blocked, NULL);
2521da177e4SLinus Torvalds 	flush_signals(current);
2531da177e4SLinus Torvalds 
25446934023SChristoph Lameter 	/*
25546934023SChristoph Lameter 	 * We inherited MPOL_INTERLEAVE from the booting kernel.
25646934023SChristoph Lameter 	 * Set MPOL_DEFAULT to insure node local allocations.
25746934023SChristoph Lameter 	 */
25846934023SChristoph Lameter 	numa_default_policy();
25946934023SChristoph Lameter 
2601da177e4SLinus Torvalds 	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
2611da177e4SLinus Torvalds 	sa.sa.sa_handler = SIG_IGN;
2621da177e4SLinus Torvalds 	sa.sa.sa_flags = 0;
2631da177e4SLinus Torvalds 	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
2641da177e4SLinus Torvalds 	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
2651da177e4SLinus Torvalds 
2661da177e4SLinus Torvalds 	set_current_state(TASK_INTERRUPTIBLE);
2671da177e4SLinus Torvalds 	while (!kthread_should_stop()) {
2681da177e4SLinus Torvalds 		add_wait_queue(&cwq->more_work, &wait);
2691da177e4SLinus Torvalds 		if (list_empty(&cwq->worklist))
2701da177e4SLinus Torvalds 			schedule();
2711da177e4SLinus Torvalds 		else
2721da177e4SLinus Torvalds 			__set_current_state(TASK_RUNNING);
2731da177e4SLinus Torvalds 		remove_wait_queue(&cwq->more_work, &wait);
2741da177e4SLinus Torvalds 
2751da177e4SLinus Torvalds 		if (!list_empty(&cwq->worklist))
2761da177e4SLinus Torvalds 			run_workqueue(cwq);
2771da177e4SLinus Torvalds 		set_current_state(TASK_INTERRUPTIBLE);
2781da177e4SLinus Torvalds 	}
2791da177e4SLinus Torvalds 	__set_current_state(TASK_RUNNING);
2801da177e4SLinus Torvalds 	return 0;
2811da177e4SLinus Torvalds }
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
2841da177e4SLinus Torvalds {
2851da177e4SLinus Torvalds 	if (cwq->thread == current) {
2861da177e4SLinus Torvalds 		/*
2871da177e4SLinus Torvalds 		 * Probably keventd trying to flush its own queue. So simply run
2881da177e4SLinus Torvalds 		 * it by hand rather than deadlocking.
2891da177e4SLinus Torvalds 		 */
2901da177e4SLinus Torvalds 		run_workqueue(cwq);
2911da177e4SLinus Torvalds 	} else {
2921da177e4SLinus Torvalds 		DEFINE_WAIT(wait);
2931da177e4SLinus Torvalds 		long sequence_needed;
2941da177e4SLinus Torvalds 
2951da177e4SLinus Torvalds 		spin_lock_irq(&cwq->lock);
2961da177e4SLinus Torvalds 		sequence_needed = cwq->insert_sequence;
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds 		while (sequence_needed - cwq->remove_sequence > 0) {
2991da177e4SLinus Torvalds 			prepare_to_wait(&cwq->work_done, &wait,
3001da177e4SLinus Torvalds 					TASK_UNINTERRUPTIBLE);
3011da177e4SLinus Torvalds 			spin_unlock_irq(&cwq->lock);
3021da177e4SLinus Torvalds 			schedule();
3031da177e4SLinus Torvalds 			spin_lock_irq(&cwq->lock);
3041da177e4SLinus Torvalds 		}
3051da177e4SLinus Torvalds 		finish_wait(&cwq->work_done, &wait);
3061da177e4SLinus Torvalds 		spin_unlock_irq(&cwq->lock);
3071da177e4SLinus Torvalds 	}
3081da177e4SLinus Torvalds }
3091da177e4SLinus Torvalds 
3100fcb78c2SRolf Eike Beer /**
3111da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
3120fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
3131da177e4SLinus Torvalds  *
3141da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
3151da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
3161da177e4SLinus Torvalds  *
3171da177e4SLinus Torvalds  * This function will sample each workqueue's current insert_sequence number and
3181da177e4SLinus Torvalds  * will sleep until the head sequence is greater than or equal to that.  This
3191da177e4SLinus Torvalds  * means that we sleep until all works which were queued on entry have been
3201da177e4SLinus Torvalds  * handled, but we are not livelocked by new incoming ones.
3211da177e4SLinus Torvalds  *
3221da177e4SLinus Torvalds  * This function used to run the workqueues itself.  Now we just wait for the
3231da177e4SLinus Torvalds  * helper threads to do it.
3241da177e4SLinus Torvalds  */
3251da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq)
3261da177e4SLinus Torvalds {
3271da177e4SLinus Torvalds 	might_sleep();
3281da177e4SLinus Torvalds 
3291da177e4SLinus Torvalds 	if (is_single_threaded(wq)) {
330bce61dd4SBen Collins 		/* Always use first cpu's area. */
331f756d5e2SNathan Lynch 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
3321da177e4SLinus Torvalds 	} else {
3331da177e4SLinus Torvalds 		int cpu;
3341da177e4SLinus Torvalds 
3359b41ea72SAndrew Morton 		mutex_lock(&workqueue_mutex);
3361da177e4SLinus Torvalds 		for_each_online_cpu(cpu)
33789ada679SChristoph Lameter 			flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
3389b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
3391da177e4SLinus Torvalds 	}
3401da177e4SLinus Torvalds }
341ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
3421da177e4SLinus Torvalds 
3431da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
3441da177e4SLinus Torvalds 						   int cpu)
3451da177e4SLinus Torvalds {
34689ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
3471da177e4SLinus Torvalds 	struct task_struct *p;
3481da177e4SLinus Torvalds 
3491da177e4SLinus Torvalds 	spin_lock_init(&cwq->lock);
3501da177e4SLinus Torvalds 	cwq->wq = wq;
3511da177e4SLinus Torvalds 	cwq->thread = NULL;
3521da177e4SLinus Torvalds 	cwq->insert_sequence = 0;
3531da177e4SLinus Torvalds 	cwq->remove_sequence = 0;
3541da177e4SLinus Torvalds 	INIT_LIST_HEAD(&cwq->worklist);
3551da177e4SLinus Torvalds 	init_waitqueue_head(&cwq->more_work);
3561da177e4SLinus Torvalds 	init_waitqueue_head(&cwq->work_done);
3571da177e4SLinus Torvalds 
3581da177e4SLinus Torvalds 	if (is_single_threaded(wq))
3591da177e4SLinus Torvalds 		p = kthread_create(worker_thread, cwq, "%s", wq->name);
3601da177e4SLinus Torvalds 	else
3611da177e4SLinus Torvalds 		p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
3621da177e4SLinus Torvalds 	if (IS_ERR(p))
3631da177e4SLinus Torvalds 		return NULL;
3641da177e4SLinus Torvalds 	cwq->thread = p;
3651da177e4SLinus Torvalds 	return p;
3661da177e4SLinus Torvalds }
3671da177e4SLinus Torvalds 
3681da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name,
3691da177e4SLinus Torvalds 					    int singlethread)
3701da177e4SLinus Torvalds {
3711da177e4SLinus Torvalds 	int cpu, destroy = 0;
3721da177e4SLinus Torvalds 	struct workqueue_struct *wq;
3731da177e4SLinus Torvalds 	struct task_struct *p;
3741da177e4SLinus Torvalds 
375dd392710SPekka J Enberg 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
3761da177e4SLinus Torvalds 	if (!wq)
3771da177e4SLinus Torvalds 		return NULL;
3781da177e4SLinus Torvalds 
37989ada679SChristoph Lameter 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
380676121fcSBen Collins 	if (!wq->cpu_wq) {
381676121fcSBen Collins 		kfree(wq);
382676121fcSBen Collins 		return NULL;
383676121fcSBen Collins 	}
384676121fcSBen Collins 
3851da177e4SLinus Torvalds 	wq->name = name;
3869b41ea72SAndrew Morton 	mutex_lock(&workqueue_mutex);
3871da177e4SLinus Torvalds 	if (singlethread) {
3881da177e4SLinus Torvalds 		INIT_LIST_HEAD(&wq->list);
389f756d5e2SNathan Lynch 		p = create_workqueue_thread(wq, singlethread_cpu);
3901da177e4SLinus Torvalds 		if (!p)
3911da177e4SLinus Torvalds 			destroy = 1;
3921da177e4SLinus Torvalds 		else
3931da177e4SLinus Torvalds 			wake_up_process(p);
3941da177e4SLinus Torvalds 	} else {
3951da177e4SLinus Torvalds 		list_add(&wq->list, &workqueues);
3961da177e4SLinus Torvalds 		for_each_online_cpu(cpu) {
3971da177e4SLinus Torvalds 			p = create_workqueue_thread(wq, cpu);
3981da177e4SLinus Torvalds 			if (p) {
3991da177e4SLinus Torvalds 				kthread_bind(p, cpu);
4001da177e4SLinus Torvalds 				wake_up_process(p);
4011da177e4SLinus Torvalds 			} else
4021da177e4SLinus Torvalds 				destroy = 1;
4031da177e4SLinus Torvalds 		}
4041da177e4SLinus Torvalds 	}
4059b41ea72SAndrew Morton 	mutex_unlock(&workqueue_mutex);
4061da177e4SLinus Torvalds 
4071da177e4SLinus Torvalds 	/*
4081da177e4SLinus Torvalds 	 * Was there any error during startup? If yes then clean up:
4091da177e4SLinus Torvalds 	 */
4101da177e4SLinus Torvalds 	if (destroy) {
4111da177e4SLinus Torvalds 		destroy_workqueue(wq);
4121da177e4SLinus Torvalds 		wq = NULL;
4131da177e4SLinus Torvalds 	}
4141da177e4SLinus Torvalds 	return wq;
4151da177e4SLinus Torvalds }
416ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue);
4171da177e4SLinus Torvalds 
4181da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
4191da177e4SLinus Torvalds {
4201da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
4211da177e4SLinus Torvalds 	unsigned long flags;
4221da177e4SLinus Torvalds 	struct task_struct *p;
4231da177e4SLinus Torvalds 
42489ada679SChristoph Lameter 	cwq = per_cpu_ptr(wq->cpu_wq, cpu);
4251da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
4261da177e4SLinus Torvalds 	p = cwq->thread;
4271da177e4SLinus Torvalds 	cwq->thread = NULL;
4281da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
4291da177e4SLinus Torvalds 	if (p)
4301da177e4SLinus Torvalds 		kthread_stop(p);
4311da177e4SLinus Torvalds }
4321da177e4SLinus Torvalds 
4330fcb78c2SRolf Eike Beer /**
4340fcb78c2SRolf Eike Beer  * destroy_workqueue - safely terminate a workqueue
4350fcb78c2SRolf Eike Beer  * @wq: target workqueue
4360fcb78c2SRolf Eike Beer  *
4370fcb78c2SRolf Eike Beer  * Safely destroy a workqueue. All work currently pending will be done first.
4380fcb78c2SRolf Eike Beer  */
4391da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq)
4401da177e4SLinus Torvalds {
4411da177e4SLinus Torvalds 	int cpu;
4421da177e4SLinus Torvalds 
4431da177e4SLinus Torvalds 	flush_workqueue(wq);
4441da177e4SLinus Torvalds 
4451da177e4SLinus Torvalds 	/* We don't need the distraction of CPUs appearing and vanishing. */
4469b41ea72SAndrew Morton 	mutex_lock(&workqueue_mutex);
4471da177e4SLinus Torvalds 	if (is_single_threaded(wq))
448f756d5e2SNathan Lynch 		cleanup_workqueue_thread(wq, singlethread_cpu);
4491da177e4SLinus Torvalds 	else {
4501da177e4SLinus Torvalds 		for_each_online_cpu(cpu)
4511da177e4SLinus Torvalds 			cleanup_workqueue_thread(wq, cpu);
4521da177e4SLinus Torvalds 		list_del(&wq->list);
4531da177e4SLinus Torvalds 	}
4549b41ea72SAndrew Morton 	mutex_unlock(&workqueue_mutex);
45589ada679SChristoph Lameter 	free_percpu(wq->cpu_wq);
4561da177e4SLinus Torvalds 	kfree(wq);
4571da177e4SLinus Torvalds }
458ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue);
4591da177e4SLinus Torvalds 
4601da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq;
4611da177e4SLinus Torvalds 
4620fcb78c2SRolf Eike Beer /**
4630fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
4640fcb78c2SRolf Eike Beer  * @work: job to be done
4650fcb78c2SRolf Eike Beer  *
4660fcb78c2SRolf Eike Beer  * This puts a job in the kernel-global workqueue.
4670fcb78c2SRolf Eike Beer  */
4681da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work)
4691da177e4SLinus Torvalds {
4701da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
4711da177e4SLinus Torvalds }
472ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
4731da177e4SLinus Torvalds 
4740fcb78c2SRolf Eike Beer /**
4750fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
47652bad64dSDavid Howells  * @dwork: job to be done
47752bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
4780fcb78c2SRolf Eike Beer  *
4790fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
4800fcb78c2SRolf Eike Beer  * workqueue.
4810fcb78c2SRolf Eike Beer  */
48252bad64dSDavid Howells int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
4831da177e4SLinus Torvalds {
48452bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
4851da177e4SLinus Torvalds }
486ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
4871da177e4SLinus Torvalds 
4880fcb78c2SRolf Eike Beer /**
4890fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
4900fcb78c2SRolf Eike Beer  * @cpu: cpu to use
49152bad64dSDavid Howells  * @dwork: job to be done
4920fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
4930fcb78c2SRolf Eike Beer  *
4940fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
4950fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
4960fcb78c2SRolf Eike Beer  */
4971da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
49852bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
4991da177e4SLinus Torvalds {
50052bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
5011da177e4SLinus Torvalds }
502ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
5031da177e4SLinus Torvalds 
504b6136773SAndrew Morton /**
505b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
506b6136773SAndrew Morton  * @func: the function to call
507b6136773SAndrew Morton  * @info: a pointer to pass to func()
508b6136773SAndrew Morton  *
509b6136773SAndrew Morton  * Returns zero on success.
510b6136773SAndrew Morton  * Returns -ve errno on failure.
511b6136773SAndrew Morton  *
512b6136773SAndrew Morton  * Appears to be racy against CPU hotplug.
513b6136773SAndrew Morton  *
514b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
515b6136773SAndrew Morton  */
51615316ba8SChristoph Lameter int schedule_on_each_cpu(void (*func)(void *info), void *info)
51715316ba8SChristoph Lameter {
51815316ba8SChristoph Lameter 	int cpu;
519b6136773SAndrew Morton 	struct work_struct *works;
52015316ba8SChristoph Lameter 
521b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
522b6136773SAndrew Morton 	if (!works)
52315316ba8SChristoph Lameter 		return -ENOMEM;
524b6136773SAndrew Morton 
5259b41ea72SAndrew Morton 	mutex_lock(&workqueue_mutex);
52615316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
527b6136773SAndrew Morton 		INIT_WORK(per_cpu_ptr(works, cpu), func, info);
52815316ba8SChristoph Lameter 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
529b6136773SAndrew Morton 				per_cpu_ptr(works, cpu));
53015316ba8SChristoph Lameter 	}
5319b41ea72SAndrew Morton 	mutex_unlock(&workqueue_mutex);
53215316ba8SChristoph Lameter 	flush_workqueue(keventd_wq);
533b6136773SAndrew Morton 	free_percpu(works);
53415316ba8SChristoph Lameter 	return 0;
53515316ba8SChristoph Lameter }
53615316ba8SChristoph Lameter 
5371da177e4SLinus Torvalds void flush_scheduled_work(void)
5381da177e4SLinus Torvalds {
5391da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
5401da177e4SLinus Torvalds }
541ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
5421da177e4SLinus Torvalds 
5431da177e4SLinus Torvalds /**
5441da177e4SLinus Torvalds  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
5451da177e4SLinus Torvalds  *			work whose handler rearms the delayed work.
5461da177e4SLinus Torvalds  * @wq:   the controlling workqueue structure
54752bad64dSDavid Howells  * @dwork: the delayed work struct
5481da177e4SLinus Torvalds  */
54981ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
55052bad64dSDavid Howells 				       struct delayed_work *dwork)
5511da177e4SLinus Torvalds {
55252bad64dSDavid Howells 	while (!cancel_delayed_work(dwork))
5531da177e4SLinus Torvalds 		flush_workqueue(wq);
5541da177e4SLinus Torvalds }
55581ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
5561da177e4SLinus Torvalds 
5571da177e4SLinus Torvalds /**
5581da177e4SLinus Torvalds  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
5591da177e4SLinus Torvalds  *			work whose handler rearms the delayed work.
56052bad64dSDavid Howells  * @dwork: the delayed work struct
5611da177e4SLinus Torvalds  */
56252bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork)
5631da177e4SLinus Torvalds {
56452bad64dSDavid Howells 	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
5651da177e4SLinus Torvalds }
5661da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work);
5671da177e4SLinus Torvalds 
5681fa44ecaSJames Bottomley /**
5691fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
5701fa44ecaSJames Bottomley  * @fn:		the function to execute
5711fa44ecaSJames Bottomley  * @data:	data to pass to the function
5721fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
5731fa44ecaSJames Bottomley  *		be available when the work executes)
5741fa44ecaSJames Bottomley  *
5751fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
5761fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
5771fa44ecaSJames Bottomley  *
5781fa44ecaSJames Bottomley  * Returns:	0 - function was executed
5791fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
5801fa44ecaSJames Bottomley  */
5811fa44ecaSJames Bottomley int execute_in_process_context(void (*fn)(void *data), void *data,
5821fa44ecaSJames Bottomley 			       struct execute_work *ew)
5831fa44ecaSJames Bottomley {
5841fa44ecaSJames Bottomley 	if (!in_interrupt()) {
5851fa44ecaSJames Bottomley 		fn(data);
5861fa44ecaSJames Bottomley 		return 0;
5871fa44ecaSJames Bottomley 	}
5881fa44ecaSJames Bottomley 
5891fa44ecaSJames Bottomley 	INIT_WORK(&ew->work, fn, data);
5901fa44ecaSJames Bottomley 	schedule_work(&ew->work);
5911fa44ecaSJames Bottomley 
5921fa44ecaSJames Bottomley 	return 1;
5931fa44ecaSJames Bottomley }
5941fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
5951fa44ecaSJames Bottomley 
5961da177e4SLinus Torvalds int keventd_up(void)
5971da177e4SLinus Torvalds {
5981da177e4SLinus Torvalds 	return keventd_wq != NULL;
5991da177e4SLinus Torvalds }
6001da177e4SLinus Torvalds 
6011da177e4SLinus Torvalds int current_is_keventd(void)
6021da177e4SLinus Torvalds {
6031da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
6041da177e4SLinus Torvalds 	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
6051da177e4SLinus Torvalds 	int ret = 0;
6061da177e4SLinus Torvalds 
6071da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
6081da177e4SLinus Torvalds 
60989ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
6101da177e4SLinus Torvalds 	if (current == cwq->thread)
6111da177e4SLinus Torvalds 		ret = 1;
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds 	return ret;
6141da177e4SLinus Torvalds 
6151da177e4SLinus Torvalds }
6161da177e4SLinus Torvalds 
6171da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
6181da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */
6191da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
6201da177e4SLinus Torvalds {
62189ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
622626ab0e6SOleg Nesterov 	struct list_head list;
6231da177e4SLinus Torvalds 	struct work_struct *work;
6241da177e4SLinus Torvalds 
6251da177e4SLinus Torvalds 	spin_lock_irq(&cwq->lock);
626626ab0e6SOleg Nesterov 	list_replace_init(&cwq->worklist, &list);
6271da177e4SLinus Torvalds 
6281da177e4SLinus Torvalds 	while (!list_empty(&list)) {
6291da177e4SLinus Torvalds 		printk("Taking work for %s\n", wq->name);
6301da177e4SLinus Torvalds 		work = list_entry(list.next,struct work_struct,entry);
6311da177e4SLinus Torvalds 		list_del(&work->entry);
63289ada679SChristoph Lameter 		__queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
6331da177e4SLinus Torvalds 	}
6341da177e4SLinus Torvalds 	spin_unlock_irq(&cwq->lock);
6351da177e4SLinus Torvalds }
6361da177e4SLinus Torvalds 
6371da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */
6389c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
6391da177e4SLinus Torvalds 				  unsigned long action,
6401da177e4SLinus Torvalds 				  void *hcpu)
6411da177e4SLinus Torvalds {
6421da177e4SLinus Torvalds 	unsigned int hotcpu = (unsigned long)hcpu;
6431da177e4SLinus Torvalds 	struct workqueue_struct *wq;
6441da177e4SLinus Torvalds 
6451da177e4SLinus Torvalds 	switch (action) {
6461da177e4SLinus Torvalds 	case CPU_UP_PREPARE:
6479b41ea72SAndrew Morton 		mutex_lock(&workqueue_mutex);
6481da177e4SLinus Torvalds 		/* Create a new workqueue thread for it. */
6491da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list) {
650230649daSMika Kukkonen 			if (!create_workqueue_thread(wq, hotcpu)) {
6511da177e4SLinus Torvalds 				printk("workqueue for %i failed\n", hotcpu);
6521da177e4SLinus Torvalds 				return NOTIFY_BAD;
6531da177e4SLinus Torvalds 			}
6541da177e4SLinus Torvalds 		}
6551da177e4SLinus Torvalds 		break;
6561da177e4SLinus Torvalds 
6571da177e4SLinus Torvalds 	case CPU_ONLINE:
6581da177e4SLinus Torvalds 		/* Kick off worker threads. */
6591da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list) {
66089ada679SChristoph Lameter 			struct cpu_workqueue_struct *cwq;
66189ada679SChristoph Lameter 
66289ada679SChristoph Lameter 			cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
66389ada679SChristoph Lameter 			kthread_bind(cwq->thread, hotcpu);
66489ada679SChristoph Lameter 			wake_up_process(cwq->thread);
6651da177e4SLinus Torvalds 		}
6669b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
6671da177e4SLinus Torvalds 		break;
6681da177e4SLinus Torvalds 
6691da177e4SLinus Torvalds 	case CPU_UP_CANCELED:
6701da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list) {
671fc75cdfaSHeiko Carstens 			if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
672fc75cdfaSHeiko Carstens 				continue;
6731da177e4SLinus Torvalds 			/* Unbind so it can run. */
67489ada679SChristoph Lameter 			kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
675a4c4af7cSHeiko Carstens 				     any_online_cpu(cpu_online_map));
6761da177e4SLinus Torvalds 			cleanup_workqueue_thread(wq, hotcpu);
6771da177e4SLinus Torvalds 		}
6789b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
6799b41ea72SAndrew Morton 		break;
6809b41ea72SAndrew Morton 
6819b41ea72SAndrew Morton 	case CPU_DOWN_PREPARE:
6829b41ea72SAndrew Morton 		mutex_lock(&workqueue_mutex);
6839b41ea72SAndrew Morton 		break;
6849b41ea72SAndrew Morton 
6859b41ea72SAndrew Morton 	case CPU_DOWN_FAILED:
6869b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
6871da177e4SLinus Torvalds 		break;
6881da177e4SLinus Torvalds 
6891da177e4SLinus Torvalds 	case CPU_DEAD:
6901da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list)
6911da177e4SLinus Torvalds 			cleanup_workqueue_thread(wq, hotcpu);
6921da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list)
6931da177e4SLinus Torvalds 			take_over_work(wq, hotcpu);
6949b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
6951da177e4SLinus Torvalds 		break;
6961da177e4SLinus Torvalds 	}
6971da177e4SLinus Torvalds 
6981da177e4SLinus Torvalds 	return NOTIFY_OK;
6991da177e4SLinus Torvalds }
7001da177e4SLinus Torvalds #endif
7011da177e4SLinus Torvalds 
7021da177e4SLinus Torvalds void init_workqueues(void)
7031da177e4SLinus Torvalds {
704f756d5e2SNathan Lynch 	singlethread_cpu = first_cpu(cpu_possible_map);
7051da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
7061da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
7071da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
7081da177e4SLinus Torvalds }
7091da177e4SLinus Torvalds 
710