xref: /openbmc/linux/kernel/workqueue.c (revision 384740dc)
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton <andrewm@uow.edu.au>
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 
37 /*
38  * The per-CPU workqueue (if single thread, we always use the first
39  * possible cpu).
40  */
41 struct cpu_workqueue_struct {
42 
43 	spinlock_t lock;
44 
45 	struct list_head worklist;
46 	wait_queue_head_t more_work;
47 	struct work_struct *current_work;
48 
49 	struct workqueue_struct *wq;
50 	struct task_struct *thread;
51 
52 	int run_depth;		/* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
54 
55 /*
56  * The externally visible workqueue abstraction is an array of
57  * per-CPU workqueues:
58  */
59 struct workqueue_struct {
60 	struct cpu_workqueue_struct *cpu_wq;
61 	struct list_head list;
62 	const char *name;
63 	int singlethread;
64 	int freezeable;		/* Freeze threads during suspend */
65 #ifdef CONFIG_LOCKDEP
66 	struct lockdep_map lockdep_map;
67 #endif
68 };
69 
70 /* Serializes the accesses to the list of workqueues. */
71 static DEFINE_SPINLOCK(workqueue_lock);
72 static LIST_HEAD(workqueues);
73 
74 static int singlethread_cpu __read_mostly;
75 static cpumask_t cpu_singlethread_map __read_mostly;
76 /*
77  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
78  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
79  * which comes in between can't use for_each_online_cpu(). We could
80  * use cpu_possible_map, the cpumask below is more a documentation
81  * than optimization.
82  */
83 static cpumask_t cpu_populated_map __read_mostly;
84 
85 /* If it's single threaded, it isn't in the list of workqueues. */
86 static inline int is_single_threaded(struct workqueue_struct *wq)
87 {
88 	return wq->singlethread;
89 }
90 
91 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
92 {
93 	return is_single_threaded(wq)
94 		? &cpu_singlethread_map : &cpu_populated_map;
95 }
96 
97 static
98 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
99 {
100 	if (unlikely(is_single_threaded(wq)))
101 		cpu = singlethread_cpu;
102 	return per_cpu_ptr(wq->cpu_wq, cpu);
103 }
104 
105 /*
106  * Set the workqueue on which a work item is to be run
107  * - Must *only* be called if the pending flag is set
108  */
109 static inline void set_wq_data(struct work_struct *work,
110 				struct cpu_workqueue_struct *cwq)
111 {
112 	unsigned long new;
113 
114 	BUG_ON(!work_pending(work));
115 
116 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
117 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
118 	atomic_long_set(&work->data, new);
119 }
120 
121 static inline
122 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
123 {
124 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
125 }
126 
127 static void insert_work(struct cpu_workqueue_struct *cwq,
128 			struct work_struct *work, struct list_head *head)
129 {
130 	set_wq_data(work, cwq);
131 	/*
132 	 * Ensure that we get the right work->data if we see the
133 	 * result of list_add() below, see try_to_grab_pending().
134 	 */
135 	smp_wmb();
136 	list_add_tail(&work->entry, head);
137 	wake_up(&cwq->more_work);
138 }
139 
140 static void __queue_work(struct cpu_workqueue_struct *cwq,
141 			 struct work_struct *work)
142 {
143 	unsigned long flags;
144 
145 	spin_lock_irqsave(&cwq->lock, flags);
146 	insert_work(cwq, work, &cwq->worklist);
147 	spin_unlock_irqrestore(&cwq->lock, flags);
148 }
149 
150 /**
151  * queue_work - queue work on a workqueue
152  * @wq: workqueue to use
153  * @work: work to queue
154  *
155  * Returns 0 if @work was already on a queue, non-zero otherwise.
156  *
157  * We queue the work to the CPU on which it was submitted, but if the CPU dies
158  * it can be processed by another CPU.
159  */
160 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
161 {
162 	int ret;
163 
164 	ret = queue_work_on(get_cpu(), wq, work);
165 	put_cpu();
166 
167 	return ret;
168 }
169 EXPORT_SYMBOL_GPL(queue_work);
170 
171 /**
172  * queue_work_on - queue work on specific cpu
173  * @cpu: CPU number to execute work on
174  * @wq: workqueue to use
175  * @work: work to queue
176  *
177  * Returns 0 if @work was already on a queue, non-zero otherwise.
178  *
179  * We queue the work to a specific CPU, the caller must ensure it
180  * can't go away.
181  */
182 int
183 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
184 {
185 	int ret = 0;
186 
187 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
188 		BUG_ON(!list_empty(&work->entry));
189 		__queue_work(wq_per_cpu(wq, cpu), work);
190 		ret = 1;
191 	}
192 	return ret;
193 }
194 EXPORT_SYMBOL_GPL(queue_work_on);
195 
196 static void delayed_work_timer_fn(unsigned long __data)
197 {
198 	struct delayed_work *dwork = (struct delayed_work *)__data;
199 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
200 	struct workqueue_struct *wq = cwq->wq;
201 
202 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
203 }
204 
205 /**
206  * queue_delayed_work - queue work on a workqueue after delay
207  * @wq: workqueue to use
208  * @dwork: delayable work to queue
209  * @delay: number of jiffies to wait before queueing
210  *
211  * Returns 0 if @work was already on a queue, non-zero otherwise.
212  */
213 int queue_delayed_work(struct workqueue_struct *wq,
214 			struct delayed_work *dwork, unsigned long delay)
215 {
216 	if (delay == 0)
217 		return queue_work(wq, &dwork->work);
218 
219 	return queue_delayed_work_on(-1, wq, dwork, delay);
220 }
221 EXPORT_SYMBOL_GPL(queue_delayed_work);
222 
223 /**
224  * queue_delayed_work_on - queue work on specific CPU after delay
225  * @cpu: CPU number to execute work on
226  * @wq: workqueue to use
227  * @dwork: work to queue
228  * @delay: number of jiffies to wait before queueing
229  *
230  * Returns 0 if @work was already on a queue, non-zero otherwise.
231  */
232 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
233 			struct delayed_work *dwork, unsigned long delay)
234 {
235 	int ret = 0;
236 	struct timer_list *timer = &dwork->timer;
237 	struct work_struct *work = &dwork->work;
238 
239 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
240 		BUG_ON(timer_pending(timer));
241 		BUG_ON(!list_empty(&work->entry));
242 
243 		timer_stats_timer_set_start_info(&dwork->timer);
244 
245 		/* This stores cwq for the moment, for the timer_fn */
246 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
247 		timer->expires = jiffies + delay;
248 		timer->data = (unsigned long)dwork;
249 		timer->function = delayed_work_timer_fn;
250 
251 		if (unlikely(cpu >= 0))
252 			add_timer_on(timer, cpu);
253 		else
254 			add_timer(timer);
255 		ret = 1;
256 	}
257 	return ret;
258 }
259 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
260 
261 static void run_workqueue(struct cpu_workqueue_struct *cwq)
262 {
263 	spin_lock_irq(&cwq->lock);
264 	cwq->run_depth++;
265 	if (cwq->run_depth > 3) {
266 		/* morton gets to eat his hat */
267 		printk("%s: recursion depth exceeded: %d\n",
268 			__func__, cwq->run_depth);
269 		dump_stack();
270 	}
271 	while (!list_empty(&cwq->worklist)) {
272 		struct work_struct *work = list_entry(cwq->worklist.next,
273 						struct work_struct, entry);
274 		work_func_t f = work->func;
275 #ifdef CONFIG_LOCKDEP
276 		/*
277 		 * It is permissible to free the struct work_struct
278 		 * from inside the function that is called from it,
279 		 * this we need to take into account for lockdep too.
280 		 * To avoid bogus "held lock freed" warnings as well
281 		 * as problems when looking into work->lockdep_map,
282 		 * make a copy and use that here.
283 		 */
284 		struct lockdep_map lockdep_map = work->lockdep_map;
285 #endif
286 
287 		cwq->current_work = work;
288 		list_del_init(cwq->worklist.next);
289 		spin_unlock_irq(&cwq->lock);
290 
291 		BUG_ON(get_wq_data(work) != cwq);
292 		work_clear_pending(work);
293 		lock_map_acquire(&cwq->wq->lockdep_map);
294 		lock_map_acquire(&lockdep_map);
295 		f(work);
296 		lock_map_release(&lockdep_map);
297 		lock_map_release(&cwq->wq->lockdep_map);
298 
299 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
300 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
301 					"%s/0x%08x/%d\n",
302 					current->comm, preempt_count(),
303 				       	task_pid_nr(current));
304 			printk(KERN_ERR "    last function: ");
305 			print_symbol("%s\n", (unsigned long)f);
306 			debug_show_held_locks(current);
307 			dump_stack();
308 		}
309 
310 		spin_lock_irq(&cwq->lock);
311 		cwq->current_work = NULL;
312 	}
313 	cwq->run_depth--;
314 	spin_unlock_irq(&cwq->lock);
315 }
316 
317 static int worker_thread(void *__cwq)
318 {
319 	struct cpu_workqueue_struct *cwq = __cwq;
320 	DEFINE_WAIT(wait);
321 
322 	if (cwq->wq->freezeable)
323 		set_freezable();
324 
325 	set_user_nice(current, -5);
326 
327 	for (;;) {
328 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
329 		if (!freezing(current) &&
330 		    !kthread_should_stop() &&
331 		    list_empty(&cwq->worklist))
332 			schedule();
333 		finish_wait(&cwq->more_work, &wait);
334 
335 		try_to_freeze();
336 
337 		if (kthread_should_stop())
338 			break;
339 
340 		run_workqueue(cwq);
341 	}
342 
343 	return 0;
344 }
345 
346 struct wq_barrier {
347 	struct work_struct	work;
348 	struct completion	done;
349 };
350 
351 static void wq_barrier_func(struct work_struct *work)
352 {
353 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
354 	complete(&barr->done);
355 }
356 
357 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
358 			struct wq_barrier *barr, struct list_head *head)
359 {
360 	INIT_WORK(&barr->work, wq_barrier_func);
361 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
362 
363 	init_completion(&barr->done);
364 
365 	insert_work(cwq, &barr->work, head);
366 }
367 
368 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
369 {
370 	int active;
371 
372 	if (cwq->thread == current) {
373 		/*
374 		 * Probably keventd trying to flush its own queue. So simply run
375 		 * it by hand rather than deadlocking.
376 		 */
377 		run_workqueue(cwq);
378 		active = 1;
379 	} else {
380 		struct wq_barrier barr;
381 
382 		active = 0;
383 		spin_lock_irq(&cwq->lock);
384 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
385 			insert_wq_barrier(cwq, &barr, &cwq->worklist);
386 			active = 1;
387 		}
388 		spin_unlock_irq(&cwq->lock);
389 
390 		if (active)
391 			wait_for_completion(&barr.done);
392 	}
393 
394 	return active;
395 }
396 
397 /**
398  * flush_workqueue - ensure that any scheduled work has run to completion.
399  * @wq: workqueue to flush
400  *
401  * Forces execution of the workqueue and blocks until its completion.
402  * This is typically used in driver shutdown handlers.
403  *
404  * We sleep until all works which were queued on entry have been handled,
405  * but we are not livelocked by new incoming ones.
406  *
407  * This function used to run the workqueues itself.  Now we just wait for the
408  * helper threads to do it.
409  */
410 void flush_workqueue(struct workqueue_struct *wq)
411 {
412 	const cpumask_t *cpu_map = wq_cpu_map(wq);
413 	int cpu;
414 
415 	might_sleep();
416 	lock_map_acquire(&wq->lockdep_map);
417 	lock_map_release(&wq->lockdep_map);
418 	for_each_cpu_mask_nr(cpu, *cpu_map)
419 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
420 }
421 EXPORT_SYMBOL_GPL(flush_workqueue);
422 
423 /**
424  * flush_work - block until a work_struct's callback has terminated
425  * @work: the work which is to be flushed
426  *
427  * Returns false if @work has already terminated.
428  *
429  * It is expected that, prior to calling flush_work(), the caller has
430  * arranged for the work to not be requeued, otherwise it doesn't make
431  * sense to use this function.
432  */
433 int flush_work(struct work_struct *work)
434 {
435 	struct cpu_workqueue_struct *cwq;
436 	struct list_head *prev;
437 	struct wq_barrier barr;
438 
439 	might_sleep();
440 	cwq = get_wq_data(work);
441 	if (!cwq)
442 		return 0;
443 
444 	lock_map_acquire(&cwq->wq->lockdep_map);
445 	lock_map_release(&cwq->wq->lockdep_map);
446 
447 	prev = NULL;
448 	spin_lock_irq(&cwq->lock);
449 	if (!list_empty(&work->entry)) {
450 		/*
451 		 * See the comment near try_to_grab_pending()->smp_rmb().
452 		 * If it was re-queued under us we are not going to wait.
453 		 */
454 		smp_rmb();
455 		if (unlikely(cwq != get_wq_data(work)))
456 			goto out;
457 		prev = &work->entry;
458 	} else {
459 		if (cwq->current_work != work)
460 			goto out;
461 		prev = &cwq->worklist;
462 	}
463 	insert_wq_barrier(cwq, &barr, prev->next);
464 out:
465 	spin_unlock_irq(&cwq->lock);
466 	if (!prev)
467 		return 0;
468 
469 	wait_for_completion(&barr.done);
470 	return 1;
471 }
472 EXPORT_SYMBOL_GPL(flush_work);
473 
474 /*
475  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
476  * so this work can't be re-armed in any way.
477  */
478 static int try_to_grab_pending(struct work_struct *work)
479 {
480 	struct cpu_workqueue_struct *cwq;
481 	int ret = -1;
482 
483 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
484 		return 0;
485 
486 	/*
487 	 * The queueing is in progress, or it is already queued. Try to
488 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
489 	 */
490 
491 	cwq = get_wq_data(work);
492 	if (!cwq)
493 		return ret;
494 
495 	spin_lock_irq(&cwq->lock);
496 	if (!list_empty(&work->entry)) {
497 		/*
498 		 * This work is queued, but perhaps we locked the wrong cwq.
499 		 * In that case we must see the new value after rmb(), see
500 		 * insert_work()->wmb().
501 		 */
502 		smp_rmb();
503 		if (cwq == get_wq_data(work)) {
504 			list_del_init(&work->entry);
505 			ret = 1;
506 		}
507 	}
508 	spin_unlock_irq(&cwq->lock);
509 
510 	return ret;
511 }
512 
513 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
514 				struct work_struct *work)
515 {
516 	struct wq_barrier barr;
517 	int running = 0;
518 
519 	spin_lock_irq(&cwq->lock);
520 	if (unlikely(cwq->current_work == work)) {
521 		insert_wq_barrier(cwq, &barr, cwq->worklist.next);
522 		running = 1;
523 	}
524 	spin_unlock_irq(&cwq->lock);
525 
526 	if (unlikely(running))
527 		wait_for_completion(&barr.done);
528 }
529 
530 static void wait_on_work(struct work_struct *work)
531 {
532 	struct cpu_workqueue_struct *cwq;
533 	struct workqueue_struct *wq;
534 	const cpumask_t *cpu_map;
535 	int cpu;
536 
537 	might_sleep();
538 
539 	lock_map_acquire(&work->lockdep_map);
540 	lock_map_release(&work->lockdep_map);
541 
542 	cwq = get_wq_data(work);
543 	if (!cwq)
544 		return;
545 
546 	wq = cwq->wq;
547 	cpu_map = wq_cpu_map(wq);
548 
549 	for_each_cpu_mask_nr(cpu, *cpu_map)
550 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
551 }
552 
553 static int __cancel_work_timer(struct work_struct *work,
554 				struct timer_list* timer)
555 {
556 	int ret;
557 
558 	do {
559 		ret = (timer && likely(del_timer(timer)));
560 		if (!ret)
561 			ret = try_to_grab_pending(work);
562 		wait_on_work(work);
563 	} while (unlikely(ret < 0));
564 
565 	work_clear_pending(work);
566 	return ret;
567 }
568 
569 /**
570  * cancel_work_sync - block until a work_struct's callback has terminated
571  * @work: the work which is to be flushed
572  *
573  * Returns true if @work was pending.
574  *
575  * cancel_work_sync() will cancel the work if it is queued. If the work's
576  * callback appears to be running, cancel_work_sync() will block until it
577  * has completed.
578  *
579  * It is possible to use this function if the work re-queues itself. It can
580  * cancel the work even if it migrates to another workqueue, however in that
581  * case it only guarantees that work->func() has completed on the last queued
582  * workqueue.
583  *
584  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
585  * pending, otherwise it goes into a busy-wait loop until the timer expires.
586  *
587  * The caller must ensure that workqueue_struct on which this work was last
588  * queued can't be destroyed before this function returns.
589  */
590 int cancel_work_sync(struct work_struct *work)
591 {
592 	return __cancel_work_timer(work, NULL);
593 }
594 EXPORT_SYMBOL_GPL(cancel_work_sync);
595 
596 /**
597  * cancel_delayed_work_sync - reliably kill off a delayed work.
598  * @dwork: the delayed work struct
599  *
600  * Returns true if @dwork was pending.
601  *
602  * It is possible to use this function if @dwork rearms itself via queue_work()
603  * or queue_delayed_work(). See also the comment for cancel_work_sync().
604  */
605 int cancel_delayed_work_sync(struct delayed_work *dwork)
606 {
607 	return __cancel_work_timer(&dwork->work, &dwork->timer);
608 }
609 EXPORT_SYMBOL(cancel_delayed_work_sync);
610 
611 static struct workqueue_struct *keventd_wq __read_mostly;
612 
613 /**
614  * schedule_work - put work task in global workqueue
615  * @work: job to be done
616  *
617  * This puts a job in the kernel-global workqueue.
618  */
619 int schedule_work(struct work_struct *work)
620 {
621 	return queue_work(keventd_wq, work);
622 }
623 EXPORT_SYMBOL(schedule_work);
624 
625 /*
626  * schedule_work_on - put work task on a specific cpu
627  * @cpu: cpu to put the work task on
628  * @work: job to be done
629  *
630  * This puts a job on a specific cpu
631  */
632 int schedule_work_on(int cpu, struct work_struct *work)
633 {
634 	return queue_work_on(cpu, keventd_wq, work);
635 }
636 EXPORT_SYMBOL(schedule_work_on);
637 
638 /**
639  * schedule_delayed_work - put work task in global workqueue after delay
640  * @dwork: job to be done
641  * @delay: number of jiffies to wait or 0 for immediate execution
642  *
643  * After waiting for a given time this puts a job in the kernel-global
644  * workqueue.
645  */
646 int schedule_delayed_work(struct delayed_work *dwork,
647 					unsigned long delay)
648 {
649 	return queue_delayed_work(keventd_wq, dwork, delay);
650 }
651 EXPORT_SYMBOL(schedule_delayed_work);
652 
653 /**
654  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
655  * @cpu: cpu to use
656  * @dwork: job to be done
657  * @delay: number of jiffies to wait
658  *
659  * After waiting for a given time this puts a job in the kernel-global
660  * workqueue on the specified CPU.
661  */
662 int schedule_delayed_work_on(int cpu,
663 			struct delayed_work *dwork, unsigned long delay)
664 {
665 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
666 }
667 EXPORT_SYMBOL(schedule_delayed_work_on);
668 
669 /**
670  * schedule_on_each_cpu - call a function on each online CPU from keventd
671  * @func: the function to call
672  *
673  * Returns zero on success.
674  * Returns -ve errno on failure.
675  *
676  * schedule_on_each_cpu() is very slow.
677  */
678 int schedule_on_each_cpu(work_func_t func)
679 {
680 	int cpu;
681 	struct work_struct *works;
682 
683 	works = alloc_percpu(struct work_struct);
684 	if (!works)
685 		return -ENOMEM;
686 
687 	get_online_cpus();
688 	for_each_online_cpu(cpu) {
689 		struct work_struct *work = per_cpu_ptr(works, cpu);
690 
691 		INIT_WORK(work, func);
692 		schedule_work_on(cpu, work);
693 	}
694 	for_each_online_cpu(cpu)
695 		flush_work(per_cpu_ptr(works, cpu));
696 	put_online_cpus();
697 	free_percpu(works);
698 	return 0;
699 }
700 
701 void flush_scheduled_work(void)
702 {
703 	flush_workqueue(keventd_wq);
704 }
705 EXPORT_SYMBOL(flush_scheduled_work);
706 
707 /**
708  * execute_in_process_context - reliably execute the routine with user context
709  * @fn:		the function to execute
710  * @ew:		guaranteed storage for the execute work structure (must
711  *		be available when the work executes)
712  *
713  * Executes the function immediately if process context is available,
714  * otherwise schedules the function for delayed execution.
715  *
716  * Returns:	0 - function was executed
717  *		1 - function was scheduled for execution
718  */
719 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
720 {
721 	if (!in_interrupt()) {
722 		fn(&ew->work);
723 		return 0;
724 	}
725 
726 	INIT_WORK(&ew->work, fn);
727 	schedule_work(&ew->work);
728 
729 	return 1;
730 }
731 EXPORT_SYMBOL_GPL(execute_in_process_context);
732 
733 int keventd_up(void)
734 {
735 	return keventd_wq != NULL;
736 }
737 
738 int current_is_keventd(void)
739 {
740 	struct cpu_workqueue_struct *cwq;
741 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
742 	int ret = 0;
743 
744 	BUG_ON(!keventd_wq);
745 
746 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
747 	if (current == cwq->thread)
748 		ret = 1;
749 
750 	return ret;
751 
752 }
753 
754 static struct cpu_workqueue_struct *
755 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
756 {
757 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
758 
759 	cwq->wq = wq;
760 	spin_lock_init(&cwq->lock);
761 	INIT_LIST_HEAD(&cwq->worklist);
762 	init_waitqueue_head(&cwq->more_work);
763 
764 	return cwq;
765 }
766 
767 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
768 {
769 	struct workqueue_struct *wq = cwq->wq;
770 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
771 	struct task_struct *p;
772 
773 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
774 	/*
775 	 * Nobody can add the work_struct to this cwq,
776 	 *	if (caller is __create_workqueue)
777 	 *		nobody should see this wq
778 	 *	else // caller is CPU_UP_PREPARE
779 	 *		cpu is not on cpu_online_map
780 	 * so we can abort safely.
781 	 */
782 	if (IS_ERR(p))
783 		return PTR_ERR(p);
784 
785 	cwq->thread = p;
786 
787 	return 0;
788 }
789 
790 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
791 {
792 	struct task_struct *p = cwq->thread;
793 
794 	if (p != NULL) {
795 		if (cpu >= 0)
796 			kthread_bind(p, cpu);
797 		wake_up_process(p);
798 	}
799 }
800 
801 struct workqueue_struct *__create_workqueue_key(const char *name,
802 						int singlethread,
803 						int freezeable,
804 						struct lock_class_key *key,
805 						const char *lock_name)
806 {
807 	struct workqueue_struct *wq;
808 	struct cpu_workqueue_struct *cwq;
809 	int err = 0, cpu;
810 
811 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
812 	if (!wq)
813 		return NULL;
814 
815 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
816 	if (!wq->cpu_wq) {
817 		kfree(wq);
818 		return NULL;
819 	}
820 
821 	wq->name = name;
822 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
823 	wq->singlethread = singlethread;
824 	wq->freezeable = freezeable;
825 	INIT_LIST_HEAD(&wq->list);
826 
827 	if (singlethread) {
828 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
829 		err = create_workqueue_thread(cwq, singlethread_cpu);
830 		start_workqueue_thread(cwq, -1);
831 	} else {
832 		cpu_maps_update_begin();
833 		/*
834 		 * We must place this wq on list even if the code below fails.
835 		 * cpu_down(cpu) can remove cpu from cpu_populated_map before
836 		 * destroy_workqueue() takes the lock, in that case we leak
837 		 * cwq[cpu]->thread.
838 		 */
839 		spin_lock(&workqueue_lock);
840 		list_add(&wq->list, &workqueues);
841 		spin_unlock(&workqueue_lock);
842 		/*
843 		 * We must initialize cwqs for each possible cpu even if we
844 		 * are going to call destroy_workqueue() finally. Otherwise
845 		 * cpu_up() can hit the uninitialized cwq once we drop the
846 		 * lock.
847 		 */
848 		for_each_possible_cpu(cpu) {
849 			cwq = init_cpu_workqueue(wq, cpu);
850 			if (err || !cpu_online(cpu))
851 				continue;
852 			err = create_workqueue_thread(cwq, cpu);
853 			start_workqueue_thread(cwq, cpu);
854 		}
855 		cpu_maps_update_done();
856 	}
857 
858 	if (err) {
859 		destroy_workqueue(wq);
860 		wq = NULL;
861 	}
862 	return wq;
863 }
864 EXPORT_SYMBOL_GPL(__create_workqueue_key);
865 
866 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
867 {
868 	/*
869 	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
870 	 * cpu_add_remove_lock protects cwq->thread.
871 	 */
872 	if (cwq->thread == NULL)
873 		return;
874 
875 	lock_map_acquire(&cwq->wq->lockdep_map);
876 	lock_map_release(&cwq->wq->lockdep_map);
877 
878 	flush_cpu_workqueue(cwq);
879 	/*
880 	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
881 	 * a concurrent flush_workqueue() can insert a barrier after us.
882 	 * However, in that case run_workqueue() won't return and check
883 	 * kthread_should_stop() until it flushes all work_struct's.
884 	 * When ->worklist becomes empty it is safe to exit because no
885 	 * more work_structs can be queued on this cwq: flush_workqueue
886 	 * checks list_empty(), and a "normal" queue_work() can't use
887 	 * a dead CPU.
888 	 */
889 	kthread_stop(cwq->thread);
890 	cwq->thread = NULL;
891 }
892 
893 /**
894  * destroy_workqueue - safely terminate a workqueue
895  * @wq: target workqueue
896  *
897  * Safely destroy a workqueue. All work currently pending will be done first.
898  */
899 void destroy_workqueue(struct workqueue_struct *wq)
900 {
901 	const cpumask_t *cpu_map = wq_cpu_map(wq);
902 	int cpu;
903 
904 	cpu_maps_update_begin();
905 	spin_lock(&workqueue_lock);
906 	list_del(&wq->list);
907 	spin_unlock(&workqueue_lock);
908 
909 	for_each_cpu_mask_nr(cpu, *cpu_map)
910 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
911  	cpu_maps_update_done();
912 
913 	free_percpu(wq->cpu_wq);
914 	kfree(wq);
915 }
916 EXPORT_SYMBOL_GPL(destroy_workqueue);
917 
918 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
919 						unsigned long action,
920 						void *hcpu)
921 {
922 	unsigned int cpu = (unsigned long)hcpu;
923 	struct cpu_workqueue_struct *cwq;
924 	struct workqueue_struct *wq;
925 	int ret = NOTIFY_OK;
926 
927 	action &= ~CPU_TASKS_FROZEN;
928 
929 	switch (action) {
930 	case CPU_UP_PREPARE:
931 		cpu_set(cpu, cpu_populated_map);
932 	}
933 undo:
934 	list_for_each_entry(wq, &workqueues, list) {
935 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
936 
937 		switch (action) {
938 		case CPU_UP_PREPARE:
939 			if (!create_workqueue_thread(cwq, cpu))
940 				break;
941 			printk(KERN_ERR "workqueue [%s] for %i failed\n",
942 				wq->name, cpu);
943 			action = CPU_UP_CANCELED;
944 			ret = NOTIFY_BAD;
945 			goto undo;
946 
947 		case CPU_ONLINE:
948 			start_workqueue_thread(cwq, cpu);
949 			break;
950 
951 		case CPU_UP_CANCELED:
952 			start_workqueue_thread(cwq, -1);
953 		case CPU_POST_DEAD:
954 			cleanup_workqueue_thread(cwq);
955 			break;
956 		}
957 	}
958 
959 	switch (action) {
960 	case CPU_UP_CANCELED:
961 	case CPU_POST_DEAD:
962 		cpu_clear(cpu, cpu_populated_map);
963 	}
964 
965 	return ret;
966 }
967 
968 void __init init_workqueues(void)
969 {
970 	cpu_populated_map = cpu_online_map;
971 	singlethread_cpu = first_cpu(cpu_possible_map);
972 	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
973 	hotcpu_notifier(workqueue_cpu_callback, 0);
974 	keventd_wq = create_workqueue("events");
975 	BUG_ON(!keventd_wq);
976 }
977