xref: /openbmc/linux/kernel/workqueue.c (revision b5266ea6)
1 /*
2  * kernel/workqueue.c - generic async execution with shared worker pool
3  *
4  * Copyright (C) 2002		Ingo Molnar
5  *
6  *   Derived from the taskqueue/keventd code by:
7  *     David Woodhouse <dwmw2@infradead.org>
8  *     Andrew Morton
9  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
10  *     Theodore Ts'o <tytso@mit.edu>
11  *
12  * Made to use alloc_percpu by Christoph Lameter.
13  *
14  * Copyright (C) 2010		SUSE Linux Products GmbH
15  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
16  *
17  * This is the generic async execution mechanism.  Work items as are
18  * executed in process context.  The worker pool is shared and
19  * automatically managed.  There is one worker pool for each CPU and
20  * one extra for works which are better served by workers which are
21  * not bound to any specific CPU.
22  *
23  * Please read Documentation/workqueue.txt for details.
24  */
25 
26 #include <linux/export.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/completion.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/cpu.h>
35 #include <linux/notifier.h>
36 #include <linux/kthread.h>
37 #include <linux/hardirq.h>
38 #include <linux/mempolicy.h>
39 #include <linux/freezer.h>
40 #include <linux/kallsyms.h>
41 #include <linux/debug_locks.h>
42 #include <linux/lockdep.h>
43 #include <linux/idr.h>
44 
45 #include "workqueue_sched.h"
46 
47 enum {
48 	/* global_cwq flags */
49 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
50 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
51 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
52 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
53 	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
54 
55 	/* worker flags */
56 	WORKER_STARTED		= 1 << 0,	/* started */
57 	WORKER_DIE		= 1 << 1,	/* die die die */
58 	WORKER_IDLE		= 1 << 2,	/* is idle */
59 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
60 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
61 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
62 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
63 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
64 
65 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
66 				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
67 
68 	/* gcwq->trustee_state */
69 	TRUSTEE_START		= 0,		/* start */
70 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
71 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
72 	TRUSTEE_RELEASE		= 3,		/* release workers */
73 	TRUSTEE_DONE		= 4,		/* trustee is done */
74 
75 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
76 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
77 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
78 
79 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
80 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
81 
82 	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
83 						/* call for help after 10ms
84 						   (min two ticks) */
85 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
86 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
87 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
88 
89 	/*
90 	 * Rescue workers are used only on emergencies and shared by
91 	 * all cpus.  Give -20.
92 	 */
93 	RESCUER_NICE_LEVEL	= -20,
94 };
95 
96 /*
97  * Structure fields follow one of the following exclusion rules.
98  *
99  * I: Modifiable by initialization/destruction paths and read-only for
100  *    everyone else.
101  *
102  * P: Preemption protected.  Disabling preemption is enough and should
103  *    only be modified and accessed from the local cpu.
104  *
105  * L: gcwq->lock protected.  Access with gcwq->lock held.
106  *
107  * X: During normal operation, modification requires gcwq->lock and
108  *    should be done only from local cpu.  Either disabling preemption
109  *    on local cpu or grabbing gcwq->lock is enough for read access.
110  *    If GCWQ_DISASSOCIATED is set, it's identical to L.
111  *
112  * F: wq->flush_mutex protected.
113  *
114  * W: workqueue_lock protected.
115  */
116 
117 struct global_cwq;
118 
119 /*
120  * The poor guys doing the actual heavy lifting.  All on-duty workers
121  * are either serving the manager role, on idle list or on busy hash.
122  */
123 struct worker {
124 	/* on idle list while idle, on busy hash table while busy */
125 	union {
126 		struct list_head	entry;	/* L: while idle */
127 		struct hlist_node	hentry;	/* L: while busy */
128 	};
129 
130 	struct work_struct	*current_work;	/* L: work being processed */
131 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
132 	struct list_head	scheduled;	/* L: scheduled works */
133 	struct task_struct	*task;		/* I: worker task */
134 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
135 	/* 64 bytes boundary on 64bit, 32 on 32bit */
136 	unsigned long		last_active;	/* L: last active timestamp */
137 	unsigned int		flags;		/* X: flags */
138 	int			id;		/* I: worker id */
139 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
140 };
141 
142 /*
143  * Global per-cpu workqueue.  There's one and only one for each cpu
144  * and all works are queued and processed here regardless of their
145  * target workqueues.
146  */
147 struct global_cwq {
148 	spinlock_t		lock;		/* the gcwq lock */
149 	struct list_head	worklist;	/* L: list of pending works */
150 	unsigned int		cpu;		/* I: the associated cpu */
151 	unsigned int		flags;		/* L: GCWQ_* flags */
152 
153 	int			nr_workers;	/* L: total number of workers */
154 	int			nr_idle;	/* L: currently idle ones */
155 
156 	/* workers are chained either in the idle_list or busy_hash */
157 	struct list_head	idle_list;	/* X: list of idle workers */
158 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
159 						/* L: hash of busy workers */
160 
161 	struct timer_list	idle_timer;	/* L: worker idle timeout */
162 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
163 
164 	struct ida		worker_ida;	/* L: for worker IDs */
165 
166 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
167 	unsigned int		trustee_state;	/* L: trustee state */
168 	wait_queue_head_t	trustee_wait;	/* trustee wait */
169 	struct worker		*first_idle;	/* L: first idle worker */
170 } ____cacheline_aligned_in_smp;
171 
172 /*
173  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
174  * work_struct->data are used for flags and thus cwqs need to be
175  * aligned at two's power of the number of flag bits.
176  */
177 struct cpu_workqueue_struct {
178 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
179 	struct workqueue_struct *wq;		/* I: the owning workqueue */
180 	int			work_color;	/* L: current color */
181 	int			flush_color;	/* L: flushing color */
182 	int			nr_in_flight[WORK_NR_COLORS];
183 						/* L: nr of in_flight works */
184 	int			nr_active;	/* L: nr of active works */
185 	int			max_active;	/* L: max active works */
186 	struct list_head	delayed_works;	/* L: delayed works */
187 };
188 
189 /*
190  * Structure used to wait for workqueue flush.
191  */
192 struct wq_flusher {
193 	struct list_head	list;		/* F: list of flushers */
194 	int			flush_color;	/* F: flush color waiting for */
195 	struct completion	done;		/* flush completion */
196 };
197 
198 /*
199  * All cpumasks are assumed to be always set on UP and thus can't be
200  * used to determine whether there's something to be done.
201  */
202 #ifdef CONFIG_SMP
203 typedef cpumask_var_t mayday_mask_t;
204 #define mayday_test_and_set_cpu(cpu, mask)	\
205 	cpumask_test_and_set_cpu((cpu), (mask))
206 #define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask))
207 #define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask))
208 #define alloc_mayday_mask(maskp, gfp)		zalloc_cpumask_var((maskp), (gfp))
209 #define free_mayday_mask(mask)			free_cpumask_var((mask))
210 #else
211 typedef unsigned long mayday_mask_t;
212 #define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask))
213 #define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask))
214 #define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask))
215 #define alloc_mayday_mask(maskp, gfp)		true
216 #define free_mayday_mask(mask)			do { } while (0)
217 #endif
218 
219 /*
220  * The externally visible workqueue abstraction is an array of
221  * per-CPU workqueues:
222  */
223 struct workqueue_struct {
224 	unsigned int		flags;		/* W: WQ_* flags */
225 	union {
226 		struct cpu_workqueue_struct __percpu	*pcpu;
227 		struct cpu_workqueue_struct		*single;
228 		unsigned long				v;
229 	} cpu_wq;				/* I: cwq's */
230 	struct list_head	list;		/* W: list of all workqueues */
231 
232 	struct mutex		flush_mutex;	/* protects wq flushing */
233 	int			work_color;	/* F: current work color */
234 	int			flush_color;	/* F: current flush color */
235 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
236 	struct wq_flusher	*first_flusher;	/* F: first flusher */
237 	struct list_head	flusher_queue;	/* F: flush waiters */
238 	struct list_head	flusher_overflow; /* F: flush overflow list */
239 
240 	mayday_mask_t		mayday_mask;	/* cpus requesting rescue */
241 	struct worker		*rescuer;	/* I: rescue worker */
242 
243 	int			nr_drainers;	/* W: drain in progress */
244 	int			saved_max_active; /* W: saved cwq max_active */
245 #ifdef CONFIG_LOCKDEP
246 	struct lockdep_map	lockdep_map;
247 #endif
248 	char			name[];		/* I: workqueue name */
249 };
250 
251 struct workqueue_struct *system_wq __read_mostly;
252 struct workqueue_struct *system_long_wq __read_mostly;
253 struct workqueue_struct *system_nrt_wq __read_mostly;
254 struct workqueue_struct *system_unbound_wq __read_mostly;
255 struct workqueue_struct *system_freezable_wq __read_mostly;
256 EXPORT_SYMBOL_GPL(system_wq);
257 EXPORT_SYMBOL_GPL(system_long_wq);
258 EXPORT_SYMBOL_GPL(system_nrt_wq);
259 EXPORT_SYMBOL_GPL(system_unbound_wq);
260 EXPORT_SYMBOL_GPL(system_freezable_wq);
261 
262 #define CREATE_TRACE_POINTS
263 #include <trace/events/workqueue.h>
264 
265 #define for_each_busy_worker(worker, i, pos, gcwq)			\
266 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
267 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
268 
269 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
270 				  unsigned int sw)
271 {
272 	if (cpu < nr_cpu_ids) {
273 		if (sw & 1) {
274 			cpu = cpumask_next(cpu, mask);
275 			if (cpu < nr_cpu_ids)
276 				return cpu;
277 		}
278 		if (sw & 2)
279 			return WORK_CPU_UNBOUND;
280 	}
281 	return WORK_CPU_NONE;
282 }
283 
284 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
285 				struct workqueue_struct *wq)
286 {
287 	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
288 }
289 
290 /*
291  * CPU iterators
292  *
293  * An extra gcwq is defined for an invalid cpu number
294  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
295  * specific CPU.  The following iterators are similar to
296  * for_each_*_cpu() iterators but also considers the unbound gcwq.
297  *
298  * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
299  * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND
300  * for_each_cwq_cpu()		: possible CPUs for bound workqueues,
301  *				  WORK_CPU_UNBOUND for unbound workqueues
302  */
303 #define for_each_gcwq_cpu(cpu)						\
304 	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
305 	     (cpu) < WORK_CPU_NONE;					\
306 	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
307 
308 #define for_each_online_gcwq_cpu(cpu)					\
309 	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
310 	     (cpu) < WORK_CPU_NONE;					\
311 	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
312 
313 #define for_each_cwq_cpu(cpu, wq)					\
314 	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
315 	     (cpu) < WORK_CPU_NONE;					\
316 	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
317 
318 #ifdef CONFIG_DEBUG_OBJECTS_WORK
319 
320 static struct debug_obj_descr work_debug_descr;
321 
322 static void *work_debug_hint(void *addr)
323 {
324 	return ((struct work_struct *) addr)->func;
325 }
326 
327 /*
328  * fixup_init is called when:
329  * - an active object is initialized
330  */
331 static int work_fixup_init(void *addr, enum debug_obj_state state)
332 {
333 	struct work_struct *work = addr;
334 
335 	switch (state) {
336 	case ODEBUG_STATE_ACTIVE:
337 		cancel_work_sync(work);
338 		debug_object_init(work, &work_debug_descr);
339 		return 1;
340 	default:
341 		return 0;
342 	}
343 }
344 
345 /*
346  * fixup_activate is called when:
347  * - an active object is activated
348  * - an unknown object is activated (might be a statically initialized object)
349  */
350 static int work_fixup_activate(void *addr, enum debug_obj_state state)
351 {
352 	struct work_struct *work = addr;
353 
354 	switch (state) {
355 
356 	case ODEBUG_STATE_NOTAVAILABLE:
357 		/*
358 		 * This is not really a fixup. The work struct was
359 		 * statically initialized. We just make sure that it
360 		 * is tracked in the object tracker.
361 		 */
362 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
363 			debug_object_init(work, &work_debug_descr);
364 			debug_object_activate(work, &work_debug_descr);
365 			return 0;
366 		}
367 		WARN_ON_ONCE(1);
368 		return 0;
369 
370 	case ODEBUG_STATE_ACTIVE:
371 		WARN_ON(1);
372 
373 	default:
374 		return 0;
375 	}
376 }
377 
378 /*
379  * fixup_free is called when:
380  * - an active object is freed
381  */
382 static int work_fixup_free(void *addr, enum debug_obj_state state)
383 {
384 	struct work_struct *work = addr;
385 
386 	switch (state) {
387 	case ODEBUG_STATE_ACTIVE:
388 		cancel_work_sync(work);
389 		debug_object_free(work, &work_debug_descr);
390 		return 1;
391 	default:
392 		return 0;
393 	}
394 }
395 
396 static struct debug_obj_descr work_debug_descr = {
397 	.name		= "work_struct",
398 	.debug_hint	= work_debug_hint,
399 	.fixup_init	= work_fixup_init,
400 	.fixup_activate	= work_fixup_activate,
401 	.fixup_free	= work_fixup_free,
402 };
403 
404 static inline void debug_work_activate(struct work_struct *work)
405 {
406 	debug_object_activate(work, &work_debug_descr);
407 }
408 
409 static inline void debug_work_deactivate(struct work_struct *work)
410 {
411 	debug_object_deactivate(work, &work_debug_descr);
412 }
413 
414 void __init_work(struct work_struct *work, int onstack)
415 {
416 	if (onstack)
417 		debug_object_init_on_stack(work, &work_debug_descr);
418 	else
419 		debug_object_init(work, &work_debug_descr);
420 }
421 EXPORT_SYMBOL_GPL(__init_work);
422 
423 void destroy_work_on_stack(struct work_struct *work)
424 {
425 	debug_object_free(work, &work_debug_descr);
426 }
427 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
428 
429 #else
430 static inline void debug_work_activate(struct work_struct *work) { }
431 static inline void debug_work_deactivate(struct work_struct *work) { }
432 #endif
433 
434 /* Serializes the accesses to the list of workqueues. */
435 static DEFINE_SPINLOCK(workqueue_lock);
436 static LIST_HEAD(workqueues);
437 static bool workqueue_freezing;		/* W: have wqs started freezing? */
438 
439 /*
440  * The almighty global cpu workqueues.  nr_running is the only field
441  * which is expected to be used frequently by other cpus via
442  * try_to_wake_up().  Put it in a separate cacheline.
443  */
444 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
445 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
446 
447 /*
448  * Global cpu workqueue and nr_running counter for unbound gcwq.  The
449  * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
450  * workers have WORKER_UNBOUND set.
451  */
452 static struct global_cwq unbound_global_cwq;
453 static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */
454 
455 static int worker_thread(void *__worker);
456 
457 static struct global_cwq *get_gcwq(unsigned int cpu)
458 {
459 	if (cpu != WORK_CPU_UNBOUND)
460 		return &per_cpu(global_cwq, cpu);
461 	else
462 		return &unbound_global_cwq;
463 }
464 
465 static atomic_t *get_gcwq_nr_running(unsigned int cpu)
466 {
467 	if (cpu != WORK_CPU_UNBOUND)
468 		return &per_cpu(gcwq_nr_running, cpu);
469 	else
470 		return &unbound_gcwq_nr_running;
471 }
472 
473 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
474 					    struct workqueue_struct *wq)
475 {
476 	if (!(wq->flags & WQ_UNBOUND)) {
477 		if (likely(cpu < nr_cpu_ids)) {
478 #ifdef CONFIG_SMP
479 			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
480 #else
481 			return wq->cpu_wq.single;
482 #endif
483 		}
484 	} else if (likely(cpu == WORK_CPU_UNBOUND))
485 		return wq->cpu_wq.single;
486 	return NULL;
487 }
488 
489 static unsigned int work_color_to_flags(int color)
490 {
491 	return color << WORK_STRUCT_COLOR_SHIFT;
492 }
493 
494 static int get_work_color(struct work_struct *work)
495 {
496 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
497 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
498 }
499 
500 static int work_next_color(int color)
501 {
502 	return (color + 1) % WORK_NR_COLORS;
503 }
504 
505 /*
506  * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
507  * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
508  * cleared and the work data contains the cpu number it was last on.
509  *
510  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
511  * cwq, cpu or clear work->data.  These functions should only be
512  * called while the work is owned - ie. while the PENDING bit is set.
513  *
514  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
515  * corresponding to a work.  gcwq is available once the work has been
516  * queued anywhere after initialization.  cwq is available only from
517  * queueing until execution starts.
518  */
519 static inline void set_work_data(struct work_struct *work, unsigned long data,
520 				 unsigned long flags)
521 {
522 	BUG_ON(!work_pending(work));
523 	atomic_long_set(&work->data, data | flags | work_static(work));
524 }
525 
526 static void set_work_cwq(struct work_struct *work,
527 			 struct cpu_workqueue_struct *cwq,
528 			 unsigned long extra_flags)
529 {
530 	set_work_data(work, (unsigned long)cwq,
531 		      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
532 }
533 
534 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
535 {
536 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
537 }
538 
539 static void clear_work_data(struct work_struct *work)
540 {
541 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
542 }
543 
544 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
545 {
546 	unsigned long data = atomic_long_read(&work->data);
547 
548 	if (data & WORK_STRUCT_CWQ)
549 		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
550 	else
551 		return NULL;
552 }
553 
554 static struct global_cwq *get_work_gcwq(struct work_struct *work)
555 {
556 	unsigned long data = atomic_long_read(&work->data);
557 	unsigned int cpu;
558 
559 	if (data & WORK_STRUCT_CWQ)
560 		return ((struct cpu_workqueue_struct *)
561 			(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
562 
563 	cpu = data >> WORK_STRUCT_FLAG_BITS;
564 	if (cpu == WORK_CPU_NONE)
565 		return NULL;
566 
567 	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
568 	return get_gcwq(cpu);
569 }
570 
571 /*
572  * Policy functions.  These define the policies on how the global
573  * worker pool is managed.  Unless noted otherwise, these functions
574  * assume that they're being called with gcwq->lock held.
575  */
576 
577 static bool __need_more_worker(struct global_cwq *gcwq)
578 {
579 	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
580 		gcwq->flags & GCWQ_HIGHPRI_PENDING;
581 }
582 
583 /*
584  * Need to wake up a worker?  Called from anything but currently
585  * running workers.
586  */
587 static bool need_more_worker(struct global_cwq *gcwq)
588 {
589 	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
590 }
591 
592 /* Can I start working?  Called from busy but !running workers. */
593 static bool may_start_working(struct global_cwq *gcwq)
594 {
595 	return gcwq->nr_idle;
596 }
597 
598 /* Do I need to keep working?  Called from currently running workers. */
599 static bool keep_working(struct global_cwq *gcwq)
600 {
601 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
602 
603 	return !list_empty(&gcwq->worklist) &&
604 		(atomic_read(nr_running) <= 1 ||
605 		 gcwq->flags & GCWQ_HIGHPRI_PENDING);
606 }
607 
608 /* Do we need a new worker?  Called from manager. */
609 static bool need_to_create_worker(struct global_cwq *gcwq)
610 {
611 	return need_more_worker(gcwq) && !may_start_working(gcwq);
612 }
613 
614 /* Do I need to be the manager? */
615 static bool need_to_manage_workers(struct global_cwq *gcwq)
616 {
617 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
618 }
619 
620 /* Do we have too many workers and should some go away? */
621 static bool too_many_workers(struct global_cwq *gcwq)
622 {
623 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
624 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
625 	int nr_busy = gcwq->nr_workers - nr_idle;
626 
627 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
628 }
629 
630 /*
631  * Wake up functions.
632  */
633 
634 /* Return the first worker.  Safe with preemption disabled */
635 static struct worker *first_worker(struct global_cwq *gcwq)
636 {
637 	if (unlikely(list_empty(&gcwq->idle_list)))
638 		return NULL;
639 
640 	return list_first_entry(&gcwq->idle_list, struct worker, entry);
641 }
642 
643 /**
644  * wake_up_worker - wake up an idle worker
645  * @gcwq: gcwq to wake worker for
646  *
647  * Wake up the first idle worker of @gcwq.
648  *
649  * CONTEXT:
650  * spin_lock_irq(gcwq->lock).
651  */
652 static void wake_up_worker(struct global_cwq *gcwq)
653 {
654 	struct worker *worker = first_worker(gcwq);
655 
656 	if (likely(worker))
657 		wake_up_process(worker->task);
658 }
659 
660 /**
661  * wq_worker_waking_up - a worker is waking up
662  * @task: task waking up
663  * @cpu: CPU @task is waking up to
664  *
665  * This function is called during try_to_wake_up() when a worker is
666  * being awoken.
667  *
668  * CONTEXT:
669  * spin_lock_irq(rq->lock)
670  */
671 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
672 {
673 	struct worker *worker = kthread_data(task);
674 
675 	if (!(worker->flags & WORKER_NOT_RUNNING))
676 		atomic_inc(get_gcwq_nr_running(cpu));
677 }
678 
679 /**
680  * wq_worker_sleeping - a worker is going to sleep
681  * @task: task going to sleep
682  * @cpu: CPU in question, must be the current CPU number
683  *
684  * This function is called during schedule() when a busy worker is
685  * going to sleep.  Worker on the same cpu can be woken up by
686  * returning pointer to its task.
687  *
688  * CONTEXT:
689  * spin_lock_irq(rq->lock)
690  *
691  * RETURNS:
692  * Worker task on @cpu to wake up, %NULL if none.
693  */
694 struct task_struct *wq_worker_sleeping(struct task_struct *task,
695 				       unsigned int cpu)
696 {
697 	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
698 	struct global_cwq *gcwq = get_gcwq(cpu);
699 	atomic_t *nr_running = get_gcwq_nr_running(cpu);
700 
701 	if (worker->flags & WORKER_NOT_RUNNING)
702 		return NULL;
703 
704 	/* this can only happen on the local cpu */
705 	BUG_ON(cpu != raw_smp_processor_id());
706 
707 	/*
708 	 * The counterpart of the following dec_and_test, implied mb,
709 	 * worklist not empty test sequence is in insert_work().
710 	 * Please read comment there.
711 	 *
712 	 * NOT_RUNNING is clear.  This means that trustee is not in
713 	 * charge and we're running on the local cpu w/ rq lock held
714 	 * and preemption disabled, which in turn means that none else
715 	 * could be manipulating idle_list, so dereferencing idle_list
716 	 * without gcwq lock is safe.
717 	 */
718 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
719 		to_wakeup = first_worker(gcwq);
720 	return to_wakeup ? to_wakeup->task : NULL;
721 }
722 
723 /**
724  * worker_set_flags - set worker flags and adjust nr_running accordingly
725  * @worker: self
726  * @flags: flags to set
727  * @wakeup: wakeup an idle worker if necessary
728  *
729  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
730  * nr_running becomes zero and @wakeup is %true, an idle worker is
731  * woken up.
732  *
733  * CONTEXT:
734  * spin_lock_irq(gcwq->lock)
735  */
736 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
737 				    bool wakeup)
738 {
739 	struct global_cwq *gcwq = worker->gcwq;
740 
741 	WARN_ON_ONCE(worker->task != current);
742 
743 	/*
744 	 * If transitioning into NOT_RUNNING, adjust nr_running and
745 	 * wake up an idle worker as necessary if requested by
746 	 * @wakeup.
747 	 */
748 	if ((flags & WORKER_NOT_RUNNING) &&
749 	    !(worker->flags & WORKER_NOT_RUNNING)) {
750 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
751 
752 		if (wakeup) {
753 			if (atomic_dec_and_test(nr_running) &&
754 			    !list_empty(&gcwq->worklist))
755 				wake_up_worker(gcwq);
756 		} else
757 			atomic_dec(nr_running);
758 	}
759 
760 	worker->flags |= flags;
761 }
762 
763 /**
764  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
765  * @worker: self
766  * @flags: flags to clear
767  *
768  * Clear @flags in @worker->flags and adjust nr_running accordingly.
769  *
770  * CONTEXT:
771  * spin_lock_irq(gcwq->lock)
772  */
773 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
774 {
775 	struct global_cwq *gcwq = worker->gcwq;
776 	unsigned int oflags = worker->flags;
777 
778 	WARN_ON_ONCE(worker->task != current);
779 
780 	worker->flags &= ~flags;
781 
782 	/*
783 	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
784 	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
785 	 * of multiple flags, not a single flag.
786 	 */
787 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
788 		if (!(worker->flags & WORKER_NOT_RUNNING))
789 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
790 }
791 
792 /**
793  * busy_worker_head - return the busy hash head for a work
794  * @gcwq: gcwq of interest
795  * @work: work to be hashed
796  *
797  * Return hash head of @gcwq for @work.
798  *
799  * CONTEXT:
800  * spin_lock_irq(gcwq->lock).
801  *
802  * RETURNS:
803  * Pointer to the hash head.
804  */
805 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
806 					   struct work_struct *work)
807 {
808 	const int base_shift = ilog2(sizeof(struct work_struct));
809 	unsigned long v = (unsigned long)work;
810 
811 	/* simple shift and fold hash, do we need something better? */
812 	v >>= base_shift;
813 	v += v >> BUSY_WORKER_HASH_ORDER;
814 	v &= BUSY_WORKER_HASH_MASK;
815 
816 	return &gcwq->busy_hash[v];
817 }
818 
819 /**
820  * __find_worker_executing_work - find worker which is executing a work
821  * @gcwq: gcwq of interest
822  * @bwh: hash head as returned by busy_worker_head()
823  * @work: work to find worker for
824  *
825  * Find a worker which is executing @work on @gcwq.  @bwh should be
826  * the hash head obtained by calling busy_worker_head() with the same
827  * work.
828  *
829  * CONTEXT:
830  * spin_lock_irq(gcwq->lock).
831  *
832  * RETURNS:
833  * Pointer to worker which is executing @work if found, NULL
834  * otherwise.
835  */
836 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
837 						   struct hlist_head *bwh,
838 						   struct work_struct *work)
839 {
840 	struct worker *worker;
841 	struct hlist_node *tmp;
842 
843 	hlist_for_each_entry(worker, tmp, bwh, hentry)
844 		if (worker->current_work == work)
845 			return worker;
846 	return NULL;
847 }
848 
849 /**
850  * find_worker_executing_work - find worker which is executing a work
851  * @gcwq: gcwq of interest
852  * @work: work to find worker for
853  *
854  * Find a worker which is executing @work on @gcwq.  This function is
855  * identical to __find_worker_executing_work() except that this
856  * function calculates @bwh itself.
857  *
858  * CONTEXT:
859  * spin_lock_irq(gcwq->lock).
860  *
861  * RETURNS:
862  * Pointer to worker which is executing @work if found, NULL
863  * otherwise.
864  */
865 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
866 						 struct work_struct *work)
867 {
868 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
869 					    work);
870 }
871 
872 /**
873  * gcwq_determine_ins_pos - find insertion position
874  * @gcwq: gcwq of interest
875  * @cwq: cwq a work is being queued for
876  *
877  * A work for @cwq is about to be queued on @gcwq, determine insertion
878  * position for the work.  If @cwq is for HIGHPRI wq, the work is
879  * queued at the head of the queue but in FIFO order with respect to
880  * other HIGHPRI works; otherwise, at the end of the queue.  This
881  * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
882  * there are HIGHPRI works pending.
883  *
884  * CONTEXT:
885  * spin_lock_irq(gcwq->lock).
886  *
887  * RETURNS:
888  * Pointer to inserstion position.
889  */
890 static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
891 					       struct cpu_workqueue_struct *cwq)
892 {
893 	struct work_struct *twork;
894 
895 	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
896 		return &gcwq->worklist;
897 
898 	list_for_each_entry(twork, &gcwq->worklist, entry) {
899 		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
900 
901 		if (!(tcwq->wq->flags & WQ_HIGHPRI))
902 			break;
903 	}
904 
905 	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
906 	return &twork->entry;
907 }
908 
909 /**
910  * insert_work - insert a work into gcwq
911  * @cwq: cwq @work belongs to
912  * @work: work to insert
913  * @head: insertion point
914  * @extra_flags: extra WORK_STRUCT_* flags to set
915  *
916  * Insert @work which belongs to @cwq into @gcwq after @head.
917  * @extra_flags is or'd to work_struct flags.
918  *
919  * CONTEXT:
920  * spin_lock_irq(gcwq->lock).
921  */
922 static void insert_work(struct cpu_workqueue_struct *cwq,
923 			struct work_struct *work, struct list_head *head,
924 			unsigned int extra_flags)
925 {
926 	struct global_cwq *gcwq = cwq->gcwq;
927 
928 	/* we own @work, set data and link */
929 	set_work_cwq(work, cwq, extra_flags);
930 
931 	/*
932 	 * Ensure that we get the right work->data if we see the
933 	 * result of list_add() below, see try_to_grab_pending().
934 	 */
935 	smp_wmb();
936 
937 	list_add_tail(&work->entry, head);
938 
939 	/*
940 	 * Ensure either worker_sched_deactivated() sees the above
941 	 * list_add_tail() or we see zero nr_running to avoid workers
942 	 * lying around lazily while there are works to be processed.
943 	 */
944 	smp_mb();
945 
946 	if (__need_more_worker(gcwq))
947 		wake_up_worker(gcwq);
948 }
949 
950 /*
951  * Test whether @work is being queued from another work executing on the
952  * same workqueue.  This is rather expensive and should only be used from
953  * cold paths.
954  */
955 static bool is_chained_work(struct workqueue_struct *wq)
956 {
957 	unsigned long flags;
958 	unsigned int cpu;
959 
960 	for_each_gcwq_cpu(cpu) {
961 		struct global_cwq *gcwq = get_gcwq(cpu);
962 		struct worker *worker;
963 		struct hlist_node *pos;
964 		int i;
965 
966 		spin_lock_irqsave(&gcwq->lock, flags);
967 		for_each_busy_worker(worker, i, pos, gcwq) {
968 			if (worker->task != current)
969 				continue;
970 			spin_unlock_irqrestore(&gcwq->lock, flags);
971 			/*
972 			 * I'm @worker, no locking necessary.  See if @work
973 			 * is headed to the same workqueue.
974 			 */
975 			return worker->current_cwq->wq == wq;
976 		}
977 		spin_unlock_irqrestore(&gcwq->lock, flags);
978 	}
979 	return false;
980 }
981 
982 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
983 			 struct work_struct *work)
984 {
985 	struct global_cwq *gcwq;
986 	struct cpu_workqueue_struct *cwq;
987 	struct list_head *worklist;
988 	unsigned int work_flags;
989 	unsigned long flags;
990 
991 	debug_work_activate(work);
992 
993 	/* if dying, only works from the same workqueue are allowed */
994 	if (unlikely(wq->flags & WQ_DRAINING) &&
995 	    WARN_ON_ONCE(!is_chained_work(wq)))
996 		return;
997 
998 	/* determine gcwq to use */
999 	if (!(wq->flags & WQ_UNBOUND)) {
1000 		struct global_cwq *last_gcwq;
1001 
1002 		if (unlikely(cpu == WORK_CPU_UNBOUND))
1003 			cpu = raw_smp_processor_id();
1004 
1005 		/*
1006 		 * It's multi cpu.  If @wq is non-reentrant and @work
1007 		 * was previously on a different cpu, it might still
1008 		 * be running there, in which case the work needs to
1009 		 * be queued on that cpu to guarantee non-reentrance.
1010 		 */
1011 		gcwq = get_gcwq(cpu);
1012 		if (wq->flags & WQ_NON_REENTRANT &&
1013 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1014 			struct worker *worker;
1015 
1016 			spin_lock_irqsave(&last_gcwq->lock, flags);
1017 
1018 			worker = find_worker_executing_work(last_gcwq, work);
1019 
1020 			if (worker && worker->current_cwq->wq == wq)
1021 				gcwq = last_gcwq;
1022 			else {
1023 				/* meh... not running there, queue here */
1024 				spin_unlock_irqrestore(&last_gcwq->lock, flags);
1025 				spin_lock_irqsave(&gcwq->lock, flags);
1026 			}
1027 		} else
1028 			spin_lock_irqsave(&gcwq->lock, flags);
1029 	} else {
1030 		gcwq = get_gcwq(WORK_CPU_UNBOUND);
1031 		spin_lock_irqsave(&gcwq->lock, flags);
1032 	}
1033 
1034 	/* gcwq determined, get cwq and queue */
1035 	cwq = get_cwq(gcwq->cpu, wq);
1036 	trace_workqueue_queue_work(cpu, cwq, work);
1037 
1038 	BUG_ON(!list_empty(&work->entry));
1039 
1040 	cwq->nr_in_flight[cwq->work_color]++;
1041 	work_flags = work_color_to_flags(cwq->work_color);
1042 
1043 	if (likely(cwq->nr_active < cwq->max_active)) {
1044 		trace_workqueue_activate_work(work);
1045 		cwq->nr_active++;
1046 		worklist = gcwq_determine_ins_pos(gcwq, cwq);
1047 	} else {
1048 		work_flags |= WORK_STRUCT_DELAYED;
1049 		worklist = &cwq->delayed_works;
1050 	}
1051 
1052 	insert_work(cwq, work, worklist, work_flags);
1053 
1054 	spin_unlock_irqrestore(&gcwq->lock, flags);
1055 }
1056 
1057 /**
1058  * queue_work - queue work on a workqueue
1059  * @wq: workqueue to use
1060  * @work: work to queue
1061  *
1062  * Returns 0 if @work was already on a queue, non-zero otherwise.
1063  *
1064  * We queue the work to the CPU on which it was submitted, but if the CPU dies
1065  * it can be processed by another CPU.
1066  */
1067 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1068 {
1069 	int ret;
1070 
1071 	ret = queue_work_on(get_cpu(), wq, work);
1072 	put_cpu();
1073 
1074 	return ret;
1075 }
1076 EXPORT_SYMBOL_GPL(queue_work);
1077 
1078 /**
1079  * queue_work_on - queue work on specific cpu
1080  * @cpu: CPU number to execute work on
1081  * @wq: workqueue to use
1082  * @work: work to queue
1083  *
1084  * Returns 0 if @work was already on a queue, non-zero otherwise.
1085  *
1086  * We queue the work to a specific CPU, the caller must ensure it
1087  * can't go away.
1088  */
1089 int
1090 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1091 {
1092 	int ret = 0;
1093 
1094 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1095 		__queue_work(cpu, wq, work);
1096 		ret = 1;
1097 	}
1098 	return ret;
1099 }
1100 EXPORT_SYMBOL_GPL(queue_work_on);
1101 
1102 static void delayed_work_timer_fn(unsigned long __data)
1103 {
1104 	struct delayed_work *dwork = (struct delayed_work *)__data;
1105 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1106 
1107 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1108 }
1109 
1110 /**
1111  * queue_delayed_work - queue work on a workqueue after delay
1112  * @wq: workqueue to use
1113  * @dwork: delayable work to queue
1114  * @delay: number of jiffies to wait before queueing
1115  *
1116  * Returns 0 if @work was already on a queue, non-zero otherwise.
1117  */
1118 int queue_delayed_work(struct workqueue_struct *wq,
1119 			struct delayed_work *dwork, unsigned long delay)
1120 {
1121 	if (delay == 0)
1122 		return queue_work(wq, &dwork->work);
1123 
1124 	return queue_delayed_work_on(-1, wq, dwork, delay);
1125 }
1126 EXPORT_SYMBOL_GPL(queue_delayed_work);
1127 
1128 /**
1129  * queue_delayed_work_on - queue work on specific CPU after delay
1130  * @cpu: CPU number to execute work on
1131  * @wq: workqueue to use
1132  * @dwork: work to queue
1133  * @delay: number of jiffies to wait before queueing
1134  *
1135  * Returns 0 if @work was already on a queue, non-zero otherwise.
1136  */
1137 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1138 			struct delayed_work *dwork, unsigned long delay)
1139 {
1140 	int ret = 0;
1141 	struct timer_list *timer = &dwork->timer;
1142 	struct work_struct *work = &dwork->work;
1143 
1144 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1145 		unsigned int lcpu;
1146 
1147 		BUG_ON(timer_pending(timer));
1148 		BUG_ON(!list_empty(&work->entry));
1149 
1150 		timer_stats_timer_set_start_info(&dwork->timer);
1151 
1152 		/*
1153 		 * This stores cwq for the moment, for the timer_fn.
1154 		 * Note that the work's gcwq is preserved to allow
1155 		 * reentrance detection for delayed works.
1156 		 */
1157 		if (!(wq->flags & WQ_UNBOUND)) {
1158 			struct global_cwq *gcwq = get_work_gcwq(work);
1159 
1160 			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1161 				lcpu = gcwq->cpu;
1162 			else
1163 				lcpu = raw_smp_processor_id();
1164 		} else
1165 			lcpu = WORK_CPU_UNBOUND;
1166 
1167 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
1168 
1169 		timer->expires = jiffies + delay;
1170 		timer->data = (unsigned long)dwork;
1171 		timer->function = delayed_work_timer_fn;
1172 
1173 		if (unlikely(cpu >= 0))
1174 			add_timer_on(timer, cpu);
1175 		else
1176 			add_timer(timer);
1177 		ret = 1;
1178 	}
1179 	return ret;
1180 }
1181 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1182 
1183 /**
1184  * worker_enter_idle - enter idle state
1185  * @worker: worker which is entering idle state
1186  *
1187  * @worker is entering idle state.  Update stats and idle timer if
1188  * necessary.
1189  *
1190  * LOCKING:
1191  * spin_lock_irq(gcwq->lock).
1192  */
1193 static void worker_enter_idle(struct worker *worker)
1194 {
1195 	struct global_cwq *gcwq = worker->gcwq;
1196 
1197 	BUG_ON(worker->flags & WORKER_IDLE);
1198 	BUG_ON(!list_empty(&worker->entry) &&
1199 	       (worker->hentry.next || worker->hentry.pprev));
1200 
1201 	/* can't use worker_set_flags(), also called from start_worker() */
1202 	worker->flags |= WORKER_IDLE;
1203 	gcwq->nr_idle++;
1204 	worker->last_active = jiffies;
1205 
1206 	/* idle_list is LIFO */
1207 	list_add(&worker->entry, &gcwq->idle_list);
1208 
1209 	if (likely(!(worker->flags & WORKER_ROGUE))) {
1210 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1211 			mod_timer(&gcwq->idle_timer,
1212 				  jiffies + IDLE_WORKER_TIMEOUT);
1213 	} else
1214 		wake_up_all(&gcwq->trustee_wait);
1215 
1216 	/* sanity check nr_running */
1217 	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1218 		     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1219 }
1220 
1221 /**
1222  * worker_leave_idle - leave idle state
1223  * @worker: worker which is leaving idle state
1224  *
1225  * @worker is leaving idle state.  Update stats.
1226  *
1227  * LOCKING:
1228  * spin_lock_irq(gcwq->lock).
1229  */
1230 static void worker_leave_idle(struct worker *worker)
1231 {
1232 	struct global_cwq *gcwq = worker->gcwq;
1233 
1234 	BUG_ON(!(worker->flags & WORKER_IDLE));
1235 	worker_clr_flags(worker, WORKER_IDLE);
1236 	gcwq->nr_idle--;
1237 	list_del_init(&worker->entry);
1238 }
1239 
1240 /**
1241  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1242  * @worker: self
1243  *
1244  * Works which are scheduled while the cpu is online must at least be
1245  * scheduled to a worker which is bound to the cpu so that if they are
1246  * flushed from cpu callbacks while cpu is going down, they are
1247  * guaranteed to execute on the cpu.
1248  *
1249  * This function is to be used by rogue workers and rescuers to bind
1250  * themselves to the target cpu and may race with cpu going down or
1251  * coming online.  kthread_bind() can't be used because it may put the
1252  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1253  * verbatim as it's best effort and blocking and gcwq may be
1254  * [dis]associated in the meantime.
1255  *
1256  * This function tries set_cpus_allowed() and locks gcwq and verifies
1257  * the binding against GCWQ_DISASSOCIATED which is set during
1258  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1259  * idle state or fetches works without dropping lock, it can guarantee
1260  * the scheduling requirement described in the first paragraph.
1261  *
1262  * CONTEXT:
1263  * Might sleep.  Called without any lock but returns with gcwq->lock
1264  * held.
1265  *
1266  * RETURNS:
1267  * %true if the associated gcwq is online (@worker is successfully
1268  * bound), %false if offline.
1269  */
1270 static bool worker_maybe_bind_and_lock(struct worker *worker)
1271 __acquires(&gcwq->lock)
1272 {
1273 	struct global_cwq *gcwq = worker->gcwq;
1274 	struct task_struct *task = worker->task;
1275 
1276 	while (true) {
1277 		/*
1278 		 * The following call may fail, succeed or succeed
1279 		 * without actually migrating the task to the cpu if
1280 		 * it races with cpu hotunplug operation.  Verify
1281 		 * against GCWQ_DISASSOCIATED.
1282 		 */
1283 		if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1284 			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1285 
1286 		spin_lock_irq(&gcwq->lock);
1287 		if (gcwq->flags & GCWQ_DISASSOCIATED)
1288 			return false;
1289 		if (task_cpu(task) == gcwq->cpu &&
1290 		    cpumask_equal(&current->cpus_allowed,
1291 				  get_cpu_mask(gcwq->cpu)))
1292 			return true;
1293 		spin_unlock_irq(&gcwq->lock);
1294 
1295 		/*
1296 		 * We've raced with CPU hot[un]plug.  Give it a breather
1297 		 * and retry migration.  cond_resched() is required here;
1298 		 * otherwise, we might deadlock against cpu_stop trying to
1299 		 * bring down the CPU on non-preemptive kernel.
1300 		 */
1301 		cpu_relax();
1302 		cond_resched();
1303 	}
1304 }
1305 
1306 /*
1307  * Function for worker->rebind_work used to rebind rogue busy workers
1308  * to the associated cpu which is coming back online.  This is
1309  * scheduled by cpu up but can race with other cpu hotplug operations
1310  * and may be executed twice without intervening cpu down.
1311  */
1312 static void worker_rebind_fn(struct work_struct *work)
1313 {
1314 	struct worker *worker = container_of(work, struct worker, rebind_work);
1315 	struct global_cwq *gcwq = worker->gcwq;
1316 
1317 	if (worker_maybe_bind_and_lock(worker))
1318 		worker_clr_flags(worker, WORKER_REBIND);
1319 
1320 	spin_unlock_irq(&gcwq->lock);
1321 }
1322 
1323 static struct worker *alloc_worker(void)
1324 {
1325 	struct worker *worker;
1326 
1327 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1328 	if (worker) {
1329 		INIT_LIST_HEAD(&worker->entry);
1330 		INIT_LIST_HEAD(&worker->scheduled);
1331 		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1332 		/* on creation a worker is in !idle && prep state */
1333 		worker->flags = WORKER_PREP;
1334 	}
1335 	return worker;
1336 }
1337 
1338 /**
1339  * create_worker - create a new workqueue worker
1340  * @gcwq: gcwq the new worker will belong to
1341  * @bind: whether to set affinity to @cpu or not
1342  *
1343  * Create a new worker which is bound to @gcwq.  The returned worker
1344  * can be started by calling start_worker() or destroyed using
1345  * destroy_worker().
1346  *
1347  * CONTEXT:
1348  * Might sleep.  Does GFP_KERNEL allocations.
1349  *
1350  * RETURNS:
1351  * Pointer to the newly created worker.
1352  */
1353 static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1354 {
1355 	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1356 	struct worker *worker = NULL;
1357 	int id = -1;
1358 
1359 	spin_lock_irq(&gcwq->lock);
1360 	while (ida_get_new(&gcwq->worker_ida, &id)) {
1361 		spin_unlock_irq(&gcwq->lock);
1362 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1363 			goto fail;
1364 		spin_lock_irq(&gcwq->lock);
1365 	}
1366 	spin_unlock_irq(&gcwq->lock);
1367 
1368 	worker = alloc_worker();
1369 	if (!worker)
1370 		goto fail;
1371 
1372 	worker->gcwq = gcwq;
1373 	worker->id = id;
1374 
1375 	if (!on_unbound_cpu)
1376 		worker->task = kthread_create_on_node(worker_thread,
1377 						      worker,
1378 						      cpu_to_node(gcwq->cpu),
1379 						      "kworker/%u:%d", gcwq->cpu, id);
1380 	else
1381 		worker->task = kthread_create(worker_thread, worker,
1382 					      "kworker/u:%d", id);
1383 	if (IS_ERR(worker->task))
1384 		goto fail;
1385 
1386 	/*
1387 	 * A rogue worker will become a regular one if CPU comes
1388 	 * online later on.  Make sure every worker has
1389 	 * PF_THREAD_BOUND set.
1390 	 */
1391 	if (bind && !on_unbound_cpu)
1392 		kthread_bind(worker->task, gcwq->cpu);
1393 	else {
1394 		worker->task->flags |= PF_THREAD_BOUND;
1395 		if (on_unbound_cpu)
1396 			worker->flags |= WORKER_UNBOUND;
1397 	}
1398 
1399 	return worker;
1400 fail:
1401 	if (id >= 0) {
1402 		spin_lock_irq(&gcwq->lock);
1403 		ida_remove(&gcwq->worker_ida, id);
1404 		spin_unlock_irq(&gcwq->lock);
1405 	}
1406 	kfree(worker);
1407 	return NULL;
1408 }
1409 
1410 /**
1411  * start_worker - start a newly created worker
1412  * @worker: worker to start
1413  *
1414  * Make the gcwq aware of @worker and start it.
1415  *
1416  * CONTEXT:
1417  * spin_lock_irq(gcwq->lock).
1418  */
1419 static void start_worker(struct worker *worker)
1420 {
1421 	worker->flags |= WORKER_STARTED;
1422 	worker->gcwq->nr_workers++;
1423 	worker_enter_idle(worker);
1424 	wake_up_process(worker->task);
1425 }
1426 
1427 /**
1428  * destroy_worker - destroy a workqueue worker
1429  * @worker: worker to be destroyed
1430  *
1431  * Destroy @worker and adjust @gcwq stats accordingly.
1432  *
1433  * CONTEXT:
1434  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1435  */
1436 static void destroy_worker(struct worker *worker)
1437 {
1438 	struct global_cwq *gcwq = worker->gcwq;
1439 	int id = worker->id;
1440 
1441 	/* sanity check frenzy */
1442 	BUG_ON(worker->current_work);
1443 	BUG_ON(!list_empty(&worker->scheduled));
1444 
1445 	if (worker->flags & WORKER_STARTED)
1446 		gcwq->nr_workers--;
1447 	if (worker->flags & WORKER_IDLE)
1448 		gcwq->nr_idle--;
1449 
1450 	list_del_init(&worker->entry);
1451 	worker->flags |= WORKER_DIE;
1452 
1453 	spin_unlock_irq(&gcwq->lock);
1454 
1455 	kthread_stop(worker->task);
1456 	kfree(worker);
1457 
1458 	spin_lock_irq(&gcwq->lock);
1459 	ida_remove(&gcwq->worker_ida, id);
1460 }
1461 
1462 static void idle_worker_timeout(unsigned long __gcwq)
1463 {
1464 	struct global_cwq *gcwq = (void *)__gcwq;
1465 
1466 	spin_lock_irq(&gcwq->lock);
1467 
1468 	if (too_many_workers(gcwq)) {
1469 		struct worker *worker;
1470 		unsigned long expires;
1471 
1472 		/* idle_list is kept in LIFO order, check the last one */
1473 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1474 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1475 
1476 		if (time_before(jiffies, expires))
1477 			mod_timer(&gcwq->idle_timer, expires);
1478 		else {
1479 			/* it's been idle for too long, wake up manager */
1480 			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1481 			wake_up_worker(gcwq);
1482 		}
1483 	}
1484 
1485 	spin_unlock_irq(&gcwq->lock);
1486 }
1487 
1488 static bool send_mayday(struct work_struct *work)
1489 {
1490 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1491 	struct workqueue_struct *wq = cwq->wq;
1492 	unsigned int cpu;
1493 
1494 	if (!(wq->flags & WQ_RESCUER))
1495 		return false;
1496 
1497 	/* mayday mayday mayday */
1498 	cpu = cwq->gcwq->cpu;
1499 	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1500 	if (cpu == WORK_CPU_UNBOUND)
1501 		cpu = 0;
1502 	if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1503 		wake_up_process(wq->rescuer->task);
1504 	return true;
1505 }
1506 
1507 static void gcwq_mayday_timeout(unsigned long __gcwq)
1508 {
1509 	struct global_cwq *gcwq = (void *)__gcwq;
1510 	struct work_struct *work;
1511 
1512 	spin_lock_irq(&gcwq->lock);
1513 
1514 	if (need_to_create_worker(gcwq)) {
1515 		/*
1516 		 * We've been trying to create a new worker but
1517 		 * haven't been successful.  We might be hitting an
1518 		 * allocation deadlock.  Send distress signals to
1519 		 * rescuers.
1520 		 */
1521 		list_for_each_entry(work, &gcwq->worklist, entry)
1522 			send_mayday(work);
1523 	}
1524 
1525 	spin_unlock_irq(&gcwq->lock);
1526 
1527 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1528 }
1529 
1530 /**
1531  * maybe_create_worker - create a new worker if necessary
1532  * @gcwq: gcwq to create a new worker for
1533  *
1534  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1535  * have at least one idle worker on return from this function.  If
1536  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1537  * sent to all rescuers with works scheduled on @gcwq to resolve
1538  * possible allocation deadlock.
1539  *
1540  * On return, need_to_create_worker() is guaranteed to be false and
1541  * may_start_working() true.
1542  *
1543  * LOCKING:
1544  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1545  * multiple times.  Does GFP_KERNEL allocations.  Called only from
1546  * manager.
1547  *
1548  * RETURNS:
1549  * false if no action was taken and gcwq->lock stayed locked, true
1550  * otherwise.
1551  */
1552 static bool maybe_create_worker(struct global_cwq *gcwq)
1553 __releases(&gcwq->lock)
1554 __acquires(&gcwq->lock)
1555 {
1556 	if (!need_to_create_worker(gcwq))
1557 		return false;
1558 restart:
1559 	spin_unlock_irq(&gcwq->lock);
1560 
1561 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1562 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1563 
1564 	while (true) {
1565 		struct worker *worker;
1566 
1567 		worker = create_worker(gcwq, true);
1568 		if (worker) {
1569 			del_timer_sync(&gcwq->mayday_timer);
1570 			spin_lock_irq(&gcwq->lock);
1571 			start_worker(worker);
1572 			BUG_ON(need_to_create_worker(gcwq));
1573 			return true;
1574 		}
1575 
1576 		if (!need_to_create_worker(gcwq))
1577 			break;
1578 
1579 		__set_current_state(TASK_INTERRUPTIBLE);
1580 		schedule_timeout(CREATE_COOLDOWN);
1581 
1582 		if (!need_to_create_worker(gcwq))
1583 			break;
1584 	}
1585 
1586 	del_timer_sync(&gcwq->mayday_timer);
1587 	spin_lock_irq(&gcwq->lock);
1588 	if (need_to_create_worker(gcwq))
1589 		goto restart;
1590 	return true;
1591 }
1592 
1593 /**
1594  * maybe_destroy_worker - destroy workers which have been idle for a while
1595  * @gcwq: gcwq to destroy workers for
1596  *
1597  * Destroy @gcwq workers which have been idle for longer than
1598  * IDLE_WORKER_TIMEOUT.
1599  *
1600  * LOCKING:
1601  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1602  * multiple times.  Called only from manager.
1603  *
1604  * RETURNS:
1605  * false if no action was taken and gcwq->lock stayed locked, true
1606  * otherwise.
1607  */
1608 static bool maybe_destroy_workers(struct global_cwq *gcwq)
1609 {
1610 	bool ret = false;
1611 
1612 	while (too_many_workers(gcwq)) {
1613 		struct worker *worker;
1614 		unsigned long expires;
1615 
1616 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1617 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1618 
1619 		if (time_before(jiffies, expires)) {
1620 			mod_timer(&gcwq->idle_timer, expires);
1621 			break;
1622 		}
1623 
1624 		destroy_worker(worker);
1625 		ret = true;
1626 	}
1627 
1628 	return ret;
1629 }
1630 
1631 /**
1632  * manage_workers - manage worker pool
1633  * @worker: self
1634  *
1635  * Assume the manager role and manage gcwq worker pool @worker belongs
1636  * to.  At any given time, there can be only zero or one manager per
1637  * gcwq.  The exclusion is handled automatically by this function.
1638  *
1639  * The caller can safely start processing works on false return.  On
1640  * true return, it's guaranteed that need_to_create_worker() is false
1641  * and may_start_working() is true.
1642  *
1643  * CONTEXT:
1644  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1645  * multiple times.  Does GFP_KERNEL allocations.
1646  *
1647  * RETURNS:
1648  * false if no action was taken and gcwq->lock stayed locked, true if
1649  * some action was taken.
1650  */
1651 static bool manage_workers(struct worker *worker)
1652 {
1653 	struct global_cwq *gcwq = worker->gcwq;
1654 	bool ret = false;
1655 
1656 	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1657 		return ret;
1658 
1659 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1660 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1661 
1662 	/*
1663 	 * Destroy and then create so that may_start_working() is true
1664 	 * on return.
1665 	 */
1666 	ret |= maybe_destroy_workers(gcwq);
1667 	ret |= maybe_create_worker(gcwq);
1668 
1669 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1670 
1671 	/*
1672 	 * The trustee might be waiting to take over the manager
1673 	 * position, tell it we're done.
1674 	 */
1675 	if (unlikely(gcwq->trustee))
1676 		wake_up_all(&gcwq->trustee_wait);
1677 
1678 	return ret;
1679 }
1680 
1681 /**
1682  * move_linked_works - move linked works to a list
1683  * @work: start of series of works to be scheduled
1684  * @head: target list to append @work to
1685  * @nextp: out paramter for nested worklist walking
1686  *
1687  * Schedule linked works starting from @work to @head.  Work series to
1688  * be scheduled starts at @work and includes any consecutive work with
1689  * WORK_STRUCT_LINKED set in its predecessor.
1690  *
1691  * If @nextp is not NULL, it's updated to point to the next work of
1692  * the last scheduled work.  This allows move_linked_works() to be
1693  * nested inside outer list_for_each_entry_safe().
1694  *
1695  * CONTEXT:
1696  * spin_lock_irq(gcwq->lock).
1697  */
1698 static void move_linked_works(struct work_struct *work, struct list_head *head,
1699 			      struct work_struct **nextp)
1700 {
1701 	struct work_struct *n;
1702 
1703 	/*
1704 	 * Linked worklist will always end before the end of the list,
1705 	 * use NULL for list head.
1706 	 */
1707 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1708 		list_move_tail(&work->entry, head);
1709 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1710 			break;
1711 	}
1712 
1713 	/*
1714 	 * If we're already inside safe list traversal and have moved
1715 	 * multiple works to the scheduled queue, the next position
1716 	 * needs to be updated.
1717 	 */
1718 	if (nextp)
1719 		*nextp = n;
1720 }
1721 
1722 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1723 {
1724 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
1725 						    struct work_struct, entry);
1726 	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1727 
1728 	trace_workqueue_activate_work(work);
1729 	move_linked_works(work, pos, NULL);
1730 	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1731 	cwq->nr_active++;
1732 }
1733 
1734 /**
1735  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1736  * @cwq: cwq of interest
1737  * @color: color of work which left the queue
1738  * @delayed: for a delayed work
1739  *
1740  * A work either has completed or is removed from pending queue,
1741  * decrement nr_in_flight of its cwq and handle workqueue flushing.
1742  *
1743  * CONTEXT:
1744  * spin_lock_irq(gcwq->lock).
1745  */
1746 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1747 				 bool delayed)
1748 {
1749 	/* ignore uncolored works */
1750 	if (color == WORK_NO_COLOR)
1751 		return;
1752 
1753 	cwq->nr_in_flight[color]--;
1754 
1755 	if (!delayed) {
1756 		cwq->nr_active--;
1757 		if (!list_empty(&cwq->delayed_works)) {
1758 			/* one down, submit a delayed one */
1759 			if (cwq->nr_active < cwq->max_active)
1760 				cwq_activate_first_delayed(cwq);
1761 		}
1762 	}
1763 
1764 	/* is flush in progress and are we at the flushing tip? */
1765 	if (likely(cwq->flush_color != color))
1766 		return;
1767 
1768 	/* are there still in-flight works? */
1769 	if (cwq->nr_in_flight[color])
1770 		return;
1771 
1772 	/* this cwq is done, clear flush_color */
1773 	cwq->flush_color = -1;
1774 
1775 	/*
1776 	 * If this was the last cwq, wake up the first flusher.  It
1777 	 * will handle the rest.
1778 	 */
1779 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1780 		complete(&cwq->wq->first_flusher->done);
1781 }
1782 
1783 /**
1784  * process_one_work - process single work
1785  * @worker: self
1786  * @work: work to process
1787  *
1788  * Process @work.  This function contains all the logics necessary to
1789  * process a single work including synchronization against and
1790  * interaction with other workers on the same cpu, queueing and
1791  * flushing.  As long as context requirement is met, any worker can
1792  * call this function to process a work.
1793  *
1794  * CONTEXT:
1795  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1796  */
1797 static void process_one_work(struct worker *worker, struct work_struct *work)
1798 __releases(&gcwq->lock)
1799 __acquires(&gcwq->lock)
1800 {
1801 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1802 	struct global_cwq *gcwq = cwq->gcwq;
1803 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
1804 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1805 	work_func_t f = work->func;
1806 	int work_color;
1807 	struct worker *collision;
1808 #ifdef CONFIG_LOCKDEP
1809 	/*
1810 	 * It is permissible to free the struct work_struct from
1811 	 * inside the function that is called from it, this we need to
1812 	 * take into account for lockdep too.  To avoid bogus "held
1813 	 * lock freed" warnings as well as problems when looking into
1814 	 * work->lockdep_map, make a copy and use that here.
1815 	 */
1816 	struct lockdep_map lockdep_map = work->lockdep_map;
1817 #endif
1818 	/*
1819 	 * A single work shouldn't be executed concurrently by
1820 	 * multiple workers on a single cpu.  Check whether anyone is
1821 	 * already processing the work.  If so, defer the work to the
1822 	 * currently executing one.
1823 	 */
1824 	collision = __find_worker_executing_work(gcwq, bwh, work);
1825 	if (unlikely(collision)) {
1826 		move_linked_works(work, &collision->scheduled, NULL);
1827 		return;
1828 	}
1829 
1830 	/* claim and process */
1831 	debug_work_deactivate(work);
1832 	hlist_add_head(&worker->hentry, bwh);
1833 	worker->current_work = work;
1834 	worker->current_cwq = cwq;
1835 	work_color = get_work_color(work);
1836 
1837 	/* record the current cpu number in the work data and dequeue */
1838 	set_work_cpu(work, gcwq->cpu);
1839 	list_del_init(&work->entry);
1840 
1841 	/*
1842 	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1843 	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1844 	 */
1845 	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1846 		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1847 						struct work_struct, entry);
1848 
1849 		if (!list_empty(&gcwq->worklist) &&
1850 		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1851 			wake_up_worker(gcwq);
1852 		else
1853 			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1854 	}
1855 
1856 	/*
1857 	 * CPU intensive works don't participate in concurrency
1858 	 * management.  They're the scheduler's responsibility.
1859 	 */
1860 	if (unlikely(cpu_intensive))
1861 		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1862 
1863 	spin_unlock_irq(&gcwq->lock);
1864 
1865 	work_clear_pending(work);
1866 	lock_map_acquire_read(&cwq->wq->lockdep_map);
1867 	lock_map_acquire(&lockdep_map);
1868 	trace_workqueue_execute_start(work);
1869 	f(work);
1870 	/*
1871 	 * While we must be careful to not use "work" after this, the trace
1872 	 * point will only record its address.
1873 	 */
1874 	trace_workqueue_execute_end(work);
1875 	lock_map_release(&lockdep_map);
1876 	lock_map_release(&cwq->wq->lockdep_map);
1877 
1878 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1879 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1880 		       "%s/0x%08x/%d\n",
1881 		       current->comm, preempt_count(), task_pid_nr(current));
1882 		printk(KERN_ERR "    last function: ");
1883 		print_symbol("%s\n", (unsigned long)f);
1884 		debug_show_held_locks(current);
1885 		dump_stack();
1886 	}
1887 
1888 	spin_lock_irq(&gcwq->lock);
1889 
1890 	/* clear cpu intensive status */
1891 	if (unlikely(cpu_intensive))
1892 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1893 
1894 	/* we're done with it, release */
1895 	hlist_del_init(&worker->hentry);
1896 	worker->current_work = NULL;
1897 	worker->current_cwq = NULL;
1898 	cwq_dec_nr_in_flight(cwq, work_color, false);
1899 }
1900 
1901 /**
1902  * process_scheduled_works - process scheduled works
1903  * @worker: self
1904  *
1905  * Process all scheduled works.  Please note that the scheduled list
1906  * may change while processing a work, so this function repeatedly
1907  * fetches a work from the top and executes it.
1908  *
1909  * CONTEXT:
1910  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1911  * multiple times.
1912  */
1913 static void process_scheduled_works(struct worker *worker)
1914 {
1915 	while (!list_empty(&worker->scheduled)) {
1916 		struct work_struct *work = list_first_entry(&worker->scheduled,
1917 						struct work_struct, entry);
1918 		process_one_work(worker, work);
1919 	}
1920 }
1921 
1922 /**
1923  * worker_thread - the worker thread function
1924  * @__worker: self
1925  *
1926  * The gcwq worker thread function.  There's a single dynamic pool of
1927  * these per each cpu.  These workers process all works regardless of
1928  * their specific target workqueue.  The only exception is works which
1929  * belong to workqueues with a rescuer which will be explained in
1930  * rescuer_thread().
1931  */
1932 static int worker_thread(void *__worker)
1933 {
1934 	struct worker *worker = __worker;
1935 	struct global_cwq *gcwq = worker->gcwq;
1936 
1937 	/* tell the scheduler that this is a workqueue worker */
1938 	worker->task->flags |= PF_WQ_WORKER;
1939 woke_up:
1940 	spin_lock_irq(&gcwq->lock);
1941 
1942 	/* DIE can be set only while we're idle, checking here is enough */
1943 	if (worker->flags & WORKER_DIE) {
1944 		spin_unlock_irq(&gcwq->lock);
1945 		worker->task->flags &= ~PF_WQ_WORKER;
1946 		return 0;
1947 	}
1948 
1949 	worker_leave_idle(worker);
1950 recheck:
1951 	/* no more worker necessary? */
1952 	if (!need_more_worker(gcwq))
1953 		goto sleep;
1954 
1955 	/* do we need to manage? */
1956 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1957 		goto recheck;
1958 
1959 	/*
1960 	 * ->scheduled list can only be filled while a worker is
1961 	 * preparing to process a work or actually processing it.
1962 	 * Make sure nobody diddled with it while I was sleeping.
1963 	 */
1964 	BUG_ON(!list_empty(&worker->scheduled));
1965 
1966 	/*
1967 	 * When control reaches this point, we're guaranteed to have
1968 	 * at least one idle worker or that someone else has already
1969 	 * assumed the manager role.
1970 	 */
1971 	worker_clr_flags(worker, WORKER_PREP);
1972 
1973 	do {
1974 		struct work_struct *work =
1975 			list_first_entry(&gcwq->worklist,
1976 					 struct work_struct, entry);
1977 
1978 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1979 			/* optimization path, not strictly necessary */
1980 			process_one_work(worker, work);
1981 			if (unlikely(!list_empty(&worker->scheduled)))
1982 				process_scheduled_works(worker);
1983 		} else {
1984 			move_linked_works(work, &worker->scheduled, NULL);
1985 			process_scheduled_works(worker);
1986 		}
1987 	} while (keep_working(gcwq));
1988 
1989 	worker_set_flags(worker, WORKER_PREP, false);
1990 sleep:
1991 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1992 		goto recheck;
1993 
1994 	/*
1995 	 * gcwq->lock is held and there's no work to process and no
1996 	 * need to manage, sleep.  Workers are woken up only while
1997 	 * holding gcwq->lock or from local cpu, so setting the
1998 	 * current state before releasing gcwq->lock is enough to
1999 	 * prevent losing any event.
2000 	 */
2001 	worker_enter_idle(worker);
2002 	__set_current_state(TASK_INTERRUPTIBLE);
2003 	spin_unlock_irq(&gcwq->lock);
2004 	schedule();
2005 	goto woke_up;
2006 }
2007 
2008 /**
2009  * rescuer_thread - the rescuer thread function
2010  * @__wq: the associated workqueue
2011  *
2012  * Workqueue rescuer thread function.  There's one rescuer for each
2013  * workqueue which has WQ_RESCUER set.
2014  *
2015  * Regular work processing on a gcwq may block trying to create a new
2016  * worker which uses GFP_KERNEL allocation which has slight chance of
2017  * developing into deadlock if some works currently on the same queue
2018  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2019  * the problem rescuer solves.
2020  *
2021  * When such condition is possible, the gcwq summons rescuers of all
2022  * workqueues which have works queued on the gcwq and let them process
2023  * those works so that forward progress can be guaranteed.
2024  *
2025  * This should happen rarely.
2026  */
2027 static int rescuer_thread(void *__wq)
2028 {
2029 	struct workqueue_struct *wq = __wq;
2030 	struct worker *rescuer = wq->rescuer;
2031 	struct list_head *scheduled = &rescuer->scheduled;
2032 	bool is_unbound = wq->flags & WQ_UNBOUND;
2033 	unsigned int cpu;
2034 
2035 	set_user_nice(current, RESCUER_NICE_LEVEL);
2036 repeat:
2037 	set_current_state(TASK_INTERRUPTIBLE);
2038 
2039 	if (kthread_should_stop())
2040 		return 0;
2041 
2042 	/*
2043 	 * See whether any cpu is asking for help.  Unbounded
2044 	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2045 	 */
2046 	for_each_mayday_cpu(cpu, wq->mayday_mask) {
2047 		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2048 		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2049 		struct global_cwq *gcwq = cwq->gcwq;
2050 		struct work_struct *work, *n;
2051 
2052 		__set_current_state(TASK_RUNNING);
2053 		mayday_clear_cpu(cpu, wq->mayday_mask);
2054 
2055 		/* migrate to the target cpu if possible */
2056 		rescuer->gcwq = gcwq;
2057 		worker_maybe_bind_and_lock(rescuer);
2058 
2059 		/*
2060 		 * Slurp in all works issued via this workqueue and
2061 		 * process'em.
2062 		 */
2063 		BUG_ON(!list_empty(&rescuer->scheduled));
2064 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2065 			if (get_work_cwq(work) == cwq)
2066 				move_linked_works(work, scheduled, &n);
2067 
2068 		process_scheduled_works(rescuer);
2069 
2070 		/*
2071 		 * Leave this gcwq.  If keep_working() is %true, notify a
2072 		 * regular worker; otherwise, we end up with 0 concurrency
2073 		 * and stalling the execution.
2074 		 */
2075 		if (keep_working(gcwq))
2076 			wake_up_worker(gcwq);
2077 
2078 		spin_unlock_irq(&gcwq->lock);
2079 	}
2080 
2081 	schedule();
2082 	goto repeat;
2083 }
2084 
2085 struct wq_barrier {
2086 	struct work_struct	work;
2087 	struct completion	done;
2088 };
2089 
2090 static void wq_barrier_func(struct work_struct *work)
2091 {
2092 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2093 	complete(&barr->done);
2094 }
2095 
2096 /**
2097  * insert_wq_barrier - insert a barrier work
2098  * @cwq: cwq to insert barrier into
2099  * @barr: wq_barrier to insert
2100  * @target: target work to attach @barr to
2101  * @worker: worker currently executing @target, NULL if @target is not executing
2102  *
2103  * @barr is linked to @target such that @barr is completed only after
2104  * @target finishes execution.  Please note that the ordering
2105  * guarantee is observed only with respect to @target and on the local
2106  * cpu.
2107  *
2108  * Currently, a queued barrier can't be canceled.  This is because
2109  * try_to_grab_pending() can't determine whether the work to be
2110  * grabbed is at the head of the queue and thus can't clear LINKED
2111  * flag of the previous work while there must be a valid next work
2112  * after a work with LINKED flag set.
2113  *
2114  * Note that when @worker is non-NULL, @target may be modified
2115  * underneath us, so we can't reliably determine cwq from @target.
2116  *
2117  * CONTEXT:
2118  * spin_lock_irq(gcwq->lock).
2119  */
2120 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2121 			      struct wq_barrier *barr,
2122 			      struct work_struct *target, struct worker *worker)
2123 {
2124 	struct list_head *head;
2125 	unsigned int linked = 0;
2126 
2127 	/*
2128 	 * debugobject calls are safe here even with gcwq->lock locked
2129 	 * as we know for sure that this will not trigger any of the
2130 	 * checks and call back into the fixup functions where we
2131 	 * might deadlock.
2132 	 */
2133 	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2134 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2135 	init_completion(&barr->done);
2136 
2137 	/*
2138 	 * If @target is currently being executed, schedule the
2139 	 * barrier to the worker; otherwise, put it after @target.
2140 	 */
2141 	if (worker)
2142 		head = worker->scheduled.next;
2143 	else {
2144 		unsigned long *bits = work_data_bits(target);
2145 
2146 		head = target->entry.next;
2147 		/* there can already be other linked works, inherit and set */
2148 		linked = *bits & WORK_STRUCT_LINKED;
2149 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2150 	}
2151 
2152 	debug_work_activate(&barr->work);
2153 	insert_work(cwq, &barr->work, head,
2154 		    work_color_to_flags(WORK_NO_COLOR) | linked);
2155 }
2156 
2157 /**
2158  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2159  * @wq: workqueue being flushed
2160  * @flush_color: new flush color, < 0 for no-op
2161  * @work_color: new work color, < 0 for no-op
2162  *
2163  * Prepare cwqs for workqueue flushing.
2164  *
2165  * If @flush_color is non-negative, flush_color on all cwqs should be
2166  * -1.  If no cwq has in-flight commands at the specified color, all
2167  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
2168  * has in flight commands, its cwq->flush_color is set to
2169  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2170  * wakeup logic is armed and %true is returned.
2171  *
2172  * The caller should have initialized @wq->first_flusher prior to
2173  * calling this function with non-negative @flush_color.  If
2174  * @flush_color is negative, no flush color update is done and %false
2175  * is returned.
2176  *
2177  * If @work_color is non-negative, all cwqs should have the same
2178  * work_color which is previous to @work_color and all will be
2179  * advanced to @work_color.
2180  *
2181  * CONTEXT:
2182  * mutex_lock(wq->flush_mutex).
2183  *
2184  * RETURNS:
2185  * %true if @flush_color >= 0 and there's something to flush.  %false
2186  * otherwise.
2187  */
2188 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2189 				      int flush_color, int work_color)
2190 {
2191 	bool wait = false;
2192 	unsigned int cpu;
2193 
2194 	if (flush_color >= 0) {
2195 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2196 		atomic_set(&wq->nr_cwqs_to_flush, 1);
2197 	}
2198 
2199 	for_each_cwq_cpu(cpu, wq) {
2200 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2201 		struct global_cwq *gcwq = cwq->gcwq;
2202 
2203 		spin_lock_irq(&gcwq->lock);
2204 
2205 		if (flush_color >= 0) {
2206 			BUG_ON(cwq->flush_color != -1);
2207 
2208 			if (cwq->nr_in_flight[flush_color]) {
2209 				cwq->flush_color = flush_color;
2210 				atomic_inc(&wq->nr_cwqs_to_flush);
2211 				wait = true;
2212 			}
2213 		}
2214 
2215 		if (work_color >= 0) {
2216 			BUG_ON(work_color != work_next_color(cwq->work_color));
2217 			cwq->work_color = work_color;
2218 		}
2219 
2220 		spin_unlock_irq(&gcwq->lock);
2221 	}
2222 
2223 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2224 		complete(&wq->first_flusher->done);
2225 
2226 	return wait;
2227 }
2228 
2229 /**
2230  * flush_workqueue - ensure that any scheduled work has run to completion.
2231  * @wq: workqueue to flush
2232  *
2233  * Forces execution of the workqueue and blocks until its completion.
2234  * This is typically used in driver shutdown handlers.
2235  *
2236  * We sleep until all works which were queued on entry have been handled,
2237  * but we are not livelocked by new incoming ones.
2238  */
2239 void flush_workqueue(struct workqueue_struct *wq)
2240 {
2241 	struct wq_flusher this_flusher = {
2242 		.list = LIST_HEAD_INIT(this_flusher.list),
2243 		.flush_color = -1,
2244 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2245 	};
2246 	int next_color;
2247 
2248 	lock_map_acquire(&wq->lockdep_map);
2249 	lock_map_release(&wq->lockdep_map);
2250 
2251 	mutex_lock(&wq->flush_mutex);
2252 
2253 	/*
2254 	 * Start-to-wait phase
2255 	 */
2256 	next_color = work_next_color(wq->work_color);
2257 
2258 	if (next_color != wq->flush_color) {
2259 		/*
2260 		 * Color space is not full.  The current work_color
2261 		 * becomes our flush_color and work_color is advanced
2262 		 * by one.
2263 		 */
2264 		BUG_ON(!list_empty(&wq->flusher_overflow));
2265 		this_flusher.flush_color = wq->work_color;
2266 		wq->work_color = next_color;
2267 
2268 		if (!wq->first_flusher) {
2269 			/* no flush in progress, become the first flusher */
2270 			BUG_ON(wq->flush_color != this_flusher.flush_color);
2271 
2272 			wq->first_flusher = &this_flusher;
2273 
2274 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2275 						       wq->work_color)) {
2276 				/* nothing to flush, done */
2277 				wq->flush_color = next_color;
2278 				wq->first_flusher = NULL;
2279 				goto out_unlock;
2280 			}
2281 		} else {
2282 			/* wait in queue */
2283 			BUG_ON(wq->flush_color == this_flusher.flush_color);
2284 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2285 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2286 		}
2287 	} else {
2288 		/*
2289 		 * Oops, color space is full, wait on overflow queue.
2290 		 * The next flush completion will assign us
2291 		 * flush_color and transfer to flusher_queue.
2292 		 */
2293 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2294 	}
2295 
2296 	mutex_unlock(&wq->flush_mutex);
2297 
2298 	wait_for_completion(&this_flusher.done);
2299 
2300 	/*
2301 	 * Wake-up-and-cascade phase
2302 	 *
2303 	 * First flushers are responsible for cascading flushes and
2304 	 * handling overflow.  Non-first flushers can simply return.
2305 	 */
2306 	if (wq->first_flusher != &this_flusher)
2307 		return;
2308 
2309 	mutex_lock(&wq->flush_mutex);
2310 
2311 	/* we might have raced, check again with mutex held */
2312 	if (wq->first_flusher != &this_flusher)
2313 		goto out_unlock;
2314 
2315 	wq->first_flusher = NULL;
2316 
2317 	BUG_ON(!list_empty(&this_flusher.list));
2318 	BUG_ON(wq->flush_color != this_flusher.flush_color);
2319 
2320 	while (true) {
2321 		struct wq_flusher *next, *tmp;
2322 
2323 		/* complete all the flushers sharing the current flush color */
2324 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2325 			if (next->flush_color != wq->flush_color)
2326 				break;
2327 			list_del_init(&next->list);
2328 			complete(&next->done);
2329 		}
2330 
2331 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
2332 		       wq->flush_color != work_next_color(wq->work_color));
2333 
2334 		/* this flush_color is finished, advance by one */
2335 		wq->flush_color = work_next_color(wq->flush_color);
2336 
2337 		/* one color has been freed, handle overflow queue */
2338 		if (!list_empty(&wq->flusher_overflow)) {
2339 			/*
2340 			 * Assign the same color to all overflowed
2341 			 * flushers, advance work_color and append to
2342 			 * flusher_queue.  This is the start-to-wait
2343 			 * phase for these overflowed flushers.
2344 			 */
2345 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
2346 				tmp->flush_color = wq->work_color;
2347 
2348 			wq->work_color = work_next_color(wq->work_color);
2349 
2350 			list_splice_tail_init(&wq->flusher_overflow,
2351 					      &wq->flusher_queue);
2352 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2353 		}
2354 
2355 		if (list_empty(&wq->flusher_queue)) {
2356 			BUG_ON(wq->flush_color != wq->work_color);
2357 			break;
2358 		}
2359 
2360 		/*
2361 		 * Need to flush more colors.  Make the next flusher
2362 		 * the new first flusher and arm cwqs.
2363 		 */
2364 		BUG_ON(wq->flush_color == wq->work_color);
2365 		BUG_ON(wq->flush_color != next->flush_color);
2366 
2367 		list_del_init(&next->list);
2368 		wq->first_flusher = next;
2369 
2370 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2371 			break;
2372 
2373 		/*
2374 		 * Meh... this color is already done, clear first
2375 		 * flusher and repeat cascading.
2376 		 */
2377 		wq->first_flusher = NULL;
2378 	}
2379 
2380 out_unlock:
2381 	mutex_unlock(&wq->flush_mutex);
2382 }
2383 EXPORT_SYMBOL_GPL(flush_workqueue);
2384 
2385 /**
2386  * drain_workqueue - drain a workqueue
2387  * @wq: workqueue to drain
2388  *
2389  * Wait until the workqueue becomes empty.  While draining is in progress,
2390  * only chain queueing is allowed.  IOW, only currently pending or running
2391  * work items on @wq can queue further work items on it.  @wq is flushed
2392  * repeatedly until it becomes empty.  The number of flushing is detemined
2393  * by the depth of chaining and should be relatively short.  Whine if it
2394  * takes too long.
2395  */
2396 void drain_workqueue(struct workqueue_struct *wq)
2397 {
2398 	unsigned int flush_cnt = 0;
2399 	unsigned int cpu;
2400 
2401 	/*
2402 	 * __queue_work() needs to test whether there are drainers, is much
2403 	 * hotter than drain_workqueue() and already looks at @wq->flags.
2404 	 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2405 	 */
2406 	spin_lock(&workqueue_lock);
2407 	if (!wq->nr_drainers++)
2408 		wq->flags |= WQ_DRAINING;
2409 	spin_unlock(&workqueue_lock);
2410 reflush:
2411 	flush_workqueue(wq);
2412 
2413 	for_each_cwq_cpu(cpu, wq) {
2414 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2415 		bool drained;
2416 
2417 		spin_lock_irq(&cwq->gcwq->lock);
2418 		drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2419 		spin_unlock_irq(&cwq->gcwq->lock);
2420 
2421 		if (drained)
2422 			continue;
2423 
2424 		if (++flush_cnt == 10 ||
2425 		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2426 			pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2427 				   wq->name, flush_cnt);
2428 		goto reflush;
2429 	}
2430 
2431 	spin_lock(&workqueue_lock);
2432 	if (!--wq->nr_drainers)
2433 		wq->flags &= ~WQ_DRAINING;
2434 	spin_unlock(&workqueue_lock);
2435 }
2436 EXPORT_SYMBOL_GPL(drain_workqueue);
2437 
2438 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2439 			     bool wait_executing)
2440 {
2441 	struct worker *worker = NULL;
2442 	struct global_cwq *gcwq;
2443 	struct cpu_workqueue_struct *cwq;
2444 
2445 	might_sleep();
2446 	gcwq = get_work_gcwq(work);
2447 	if (!gcwq)
2448 		return false;
2449 
2450 	spin_lock_irq(&gcwq->lock);
2451 	if (!list_empty(&work->entry)) {
2452 		/*
2453 		 * See the comment near try_to_grab_pending()->smp_rmb().
2454 		 * If it was re-queued to a different gcwq under us, we
2455 		 * are not going to wait.
2456 		 */
2457 		smp_rmb();
2458 		cwq = get_work_cwq(work);
2459 		if (unlikely(!cwq || gcwq != cwq->gcwq))
2460 			goto already_gone;
2461 	} else if (wait_executing) {
2462 		worker = find_worker_executing_work(gcwq, work);
2463 		if (!worker)
2464 			goto already_gone;
2465 		cwq = worker->current_cwq;
2466 	} else
2467 		goto already_gone;
2468 
2469 	insert_wq_barrier(cwq, barr, work, worker);
2470 	spin_unlock_irq(&gcwq->lock);
2471 
2472 	/*
2473 	 * If @max_active is 1 or rescuer is in use, flushing another work
2474 	 * item on the same workqueue may lead to deadlock.  Make sure the
2475 	 * flusher is not running on the same workqueue by verifying write
2476 	 * access.
2477 	 */
2478 	if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2479 		lock_map_acquire(&cwq->wq->lockdep_map);
2480 	else
2481 		lock_map_acquire_read(&cwq->wq->lockdep_map);
2482 	lock_map_release(&cwq->wq->lockdep_map);
2483 
2484 	return true;
2485 already_gone:
2486 	spin_unlock_irq(&gcwq->lock);
2487 	return false;
2488 }
2489 
2490 /**
2491  * flush_work - wait for a work to finish executing the last queueing instance
2492  * @work: the work to flush
2493  *
2494  * Wait until @work has finished execution.  This function considers
2495  * only the last queueing instance of @work.  If @work has been
2496  * enqueued across different CPUs on a non-reentrant workqueue or on
2497  * multiple workqueues, @work might still be executing on return on
2498  * some of the CPUs from earlier queueing.
2499  *
2500  * If @work was queued only on a non-reentrant, ordered or unbound
2501  * workqueue, @work is guaranteed to be idle on return if it hasn't
2502  * been requeued since flush started.
2503  *
2504  * RETURNS:
2505  * %true if flush_work() waited for the work to finish execution,
2506  * %false if it was already idle.
2507  */
2508 bool flush_work(struct work_struct *work)
2509 {
2510 	struct wq_barrier barr;
2511 
2512 	if (start_flush_work(work, &barr, true)) {
2513 		wait_for_completion(&barr.done);
2514 		destroy_work_on_stack(&barr.work);
2515 		return true;
2516 	} else
2517 		return false;
2518 }
2519 EXPORT_SYMBOL_GPL(flush_work);
2520 
2521 static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2522 {
2523 	struct wq_barrier barr;
2524 	struct worker *worker;
2525 
2526 	spin_lock_irq(&gcwq->lock);
2527 
2528 	worker = find_worker_executing_work(gcwq, work);
2529 	if (unlikely(worker))
2530 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2531 
2532 	spin_unlock_irq(&gcwq->lock);
2533 
2534 	if (unlikely(worker)) {
2535 		wait_for_completion(&barr.done);
2536 		destroy_work_on_stack(&barr.work);
2537 		return true;
2538 	} else
2539 		return false;
2540 }
2541 
2542 static bool wait_on_work(struct work_struct *work)
2543 {
2544 	bool ret = false;
2545 	int cpu;
2546 
2547 	might_sleep();
2548 
2549 	lock_map_acquire(&work->lockdep_map);
2550 	lock_map_release(&work->lockdep_map);
2551 
2552 	for_each_gcwq_cpu(cpu)
2553 		ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2554 	return ret;
2555 }
2556 
2557 /**
2558  * flush_work_sync - wait until a work has finished execution
2559  * @work: the work to flush
2560  *
2561  * Wait until @work has finished execution.  On return, it's
2562  * guaranteed that all queueing instances of @work which happened
2563  * before this function is called are finished.  In other words, if
2564  * @work hasn't been requeued since this function was called, @work is
2565  * guaranteed to be idle on return.
2566  *
2567  * RETURNS:
2568  * %true if flush_work_sync() waited for the work to finish execution,
2569  * %false if it was already idle.
2570  */
2571 bool flush_work_sync(struct work_struct *work)
2572 {
2573 	struct wq_barrier barr;
2574 	bool pending, waited;
2575 
2576 	/* we'll wait for executions separately, queue barr only if pending */
2577 	pending = start_flush_work(work, &barr, false);
2578 
2579 	/* wait for executions to finish */
2580 	waited = wait_on_work(work);
2581 
2582 	/* wait for the pending one */
2583 	if (pending) {
2584 		wait_for_completion(&barr.done);
2585 		destroy_work_on_stack(&barr.work);
2586 	}
2587 
2588 	return pending || waited;
2589 }
2590 EXPORT_SYMBOL_GPL(flush_work_sync);
2591 
2592 /*
2593  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2594  * so this work can't be re-armed in any way.
2595  */
2596 static int try_to_grab_pending(struct work_struct *work)
2597 {
2598 	struct global_cwq *gcwq;
2599 	int ret = -1;
2600 
2601 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2602 		return 0;
2603 
2604 	/*
2605 	 * The queueing is in progress, or it is already queued. Try to
2606 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2607 	 */
2608 	gcwq = get_work_gcwq(work);
2609 	if (!gcwq)
2610 		return ret;
2611 
2612 	spin_lock_irq(&gcwq->lock);
2613 	if (!list_empty(&work->entry)) {
2614 		/*
2615 		 * This work is queued, but perhaps we locked the wrong gcwq.
2616 		 * In that case we must see the new value after rmb(), see
2617 		 * insert_work()->wmb().
2618 		 */
2619 		smp_rmb();
2620 		if (gcwq == get_work_gcwq(work)) {
2621 			debug_work_deactivate(work);
2622 			list_del_init(&work->entry);
2623 			cwq_dec_nr_in_flight(get_work_cwq(work),
2624 				get_work_color(work),
2625 				*work_data_bits(work) & WORK_STRUCT_DELAYED);
2626 			ret = 1;
2627 		}
2628 	}
2629 	spin_unlock_irq(&gcwq->lock);
2630 
2631 	return ret;
2632 }
2633 
2634 static bool __cancel_work_timer(struct work_struct *work,
2635 				struct timer_list* timer)
2636 {
2637 	int ret;
2638 
2639 	do {
2640 		ret = (timer && likely(del_timer(timer)));
2641 		if (!ret)
2642 			ret = try_to_grab_pending(work);
2643 		wait_on_work(work);
2644 	} while (unlikely(ret < 0));
2645 
2646 	clear_work_data(work);
2647 	return ret;
2648 }
2649 
2650 /**
2651  * cancel_work_sync - cancel a work and wait for it to finish
2652  * @work: the work to cancel
2653  *
2654  * Cancel @work and wait for its execution to finish.  This function
2655  * can be used even if the work re-queues itself or migrates to
2656  * another workqueue.  On return from this function, @work is
2657  * guaranteed to be not pending or executing on any CPU.
2658  *
2659  * cancel_work_sync(&delayed_work->work) must not be used for
2660  * delayed_work's.  Use cancel_delayed_work_sync() instead.
2661  *
2662  * The caller must ensure that the workqueue on which @work was last
2663  * queued can't be destroyed before this function returns.
2664  *
2665  * RETURNS:
2666  * %true if @work was pending, %false otherwise.
2667  */
2668 bool cancel_work_sync(struct work_struct *work)
2669 {
2670 	return __cancel_work_timer(work, NULL);
2671 }
2672 EXPORT_SYMBOL_GPL(cancel_work_sync);
2673 
2674 /**
2675  * flush_delayed_work - wait for a dwork to finish executing the last queueing
2676  * @dwork: the delayed work to flush
2677  *
2678  * Delayed timer is cancelled and the pending work is queued for
2679  * immediate execution.  Like flush_work(), this function only
2680  * considers the last queueing instance of @dwork.
2681  *
2682  * RETURNS:
2683  * %true if flush_work() waited for the work to finish execution,
2684  * %false if it was already idle.
2685  */
2686 bool flush_delayed_work(struct delayed_work *dwork)
2687 {
2688 	if (del_timer_sync(&dwork->timer))
2689 		__queue_work(raw_smp_processor_id(),
2690 			     get_work_cwq(&dwork->work)->wq, &dwork->work);
2691 	return flush_work(&dwork->work);
2692 }
2693 EXPORT_SYMBOL(flush_delayed_work);
2694 
2695 /**
2696  * flush_delayed_work_sync - wait for a dwork to finish
2697  * @dwork: the delayed work to flush
2698  *
2699  * Delayed timer is cancelled and the pending work is queued for
2700  * execution immediately.  Other than timer handling, its behavior
2701  * is identical to flush_work_sync().
2702  *
2703  * RETURNS:
2704  * %true if flush_work_sync() waited for the work to finish execution,
2705  * %false if it was already idle.
2706  */
2707 bool flush_delayed_work_sync(struct delayed_work *dwork)
2708 {
2709 	if (del_timer_sync(&dwork->timer))
2710 		__queue_work(raw_smp_processor_id(),
2711 			     get_work_cwq(&dwork->work)->wq, &dwork->work);
2712 	return flush_work_sync(&dwork->work);
2713 }
2714 EXPORT_SYMBOL(flush_delayed_work_sync);
2715 
2716 /**
2717  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2718  * @dwork: the delayed work cancel
2719  *
2720  * This is cancel_work_sync() for delayed works.
2721  *
2722  * RETURNS:
2723  * %true if @dwork was pending, %false otherwise.
2724  */
2725 bool cancel_delayed_work_sync(struct delayed_work *dwork)
2726 {
2727 	return __cancel_work_timer(&dwork->work, &dwork->timer);
2728 }
2729 EXPORT_SYMBOL(cancel_delayed_work_sync);
2730 
2731 /**
2732  * schedule_work - put work task in global workqueue
2733  * @work: job to be done
2734  *
2735  * Returns zero if @work was already on the kernel-global workqueue and
2736  * non-zero otherwise.
2737  *
2738  * This puts a job in the kernel-global workqueue if it was not already
2739  * queued and leaves it in the same position on the kernel-global
2740  * workqueue otherwise.
2741  */
2742 int schedule_work(struct work_struct *work)
2743 {
2744 	return queue_work(system_wq, work);
2745 }
2746 EXPORT_SYMBOL(schedule_work);
2747 
2748 /*
2749  * schedule_work_on - put work task on a specific cpu
2750  * @cpu: cpu to put the work task on
2751  * @work: job to be done
2752  *
2753  * This puts a job on a specific cpu
2754  */
2755 int schedule_work_on(int cpu, struct work_struct *work)
2756 {
2757 	return queue_work_on(cpu, system_wq, work);
2758 }
2759 EXPORT_SYMBOL(schedule_work_on);
2760 
2761 /**
2762  * schedule_delayed_work - put work task in global workqueue after delay
2763  * @dwork: job to be done
2764  * @delay: number of jiffies to wait or 0 for immediate execution
2765  *
2766  * After waiting for a given time this puts a job in the kernel-global
2767  * workqueue.
2768  */
2769 int schedule_delayed_work(struct delayed_work *dwork,
2770 					unsigned long delay)
2771 {
2772 	return queue_delayed_work(system_wq, dwork, delay);
2773 }
2774 EXPORT_SYMBOL(schedule_delayed_work);
2775 
2776 /**
2777  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2778  * @cpu: cpu to use
2779  * @dwork: job to be done
2780  * @delay: number of jiffies to wait
2781  *
2782  * After waiting for a given time this puts a job in the kernel-global
2783  * workqueue on the specified CPU.
2784  */
2785 int schedule_delayed_work_on(int cpu,
2786 			struct delayed_work *dwork, unsigned long delay)
2787 {
2788 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2789 }
2790 EXPORT_SYMBOL(schedule_delayed_work_on);
2791 
2792 /**
2793  * schedule_on_each_cpu - execute a function synchronously on each online CPU
2794  * @func: the function to call
2795  *
2796  * schedule_on_each_cpu() executes @func on each online CPU using the
2797  * system workqueue and blocks until all CPUs have completed.
2798  * schedule_on_each_cpu() is very slow.
2799  *
2800  * RETURNS:
2801  * 0 on success, -errno on failure.
2802  */
2803 int schedule_on_each_cpu(work_func_t func)
2804 {
2805 	int cpu;
2806 	struct work_struct __percpu *works;
2807 
2808 	works = alloc_percpu(struct work_struct);
2809 	if (!works)
2810 		return -ENOMEM;
2811 
2812 	get_online_cpus();
2813 
2814 	for_each_online_cpu(cpu) {
2815 		struct work_struct *work = per_cpu_ptr(works, cpu);
2816 
2817 		INIT_WORK(work, func);
2818 		schedule_work_on(cpu, work);
2819 	}
2820 
2821 	for_each_online_cpu(cpu)
2822 		flush_work(per_cpu_ptr(works, cpu));
2823 
2824 	put_online_cpus();
2825 	free_percpu(works);
2826 	return 0;
2827 }
2828 
2829 /**
2830  * flush_scheduled_work - ensure that any scheduled work has run to completion.
2831  *
2832  * Forces execution of the kernel-global workqueue and blocks until its
2833  * completion.
2834  *
2835  * Think twice before calling this function!  It's very easy to get into
2836  * trouble if you don't take great care.  Either of the following situations
2837  * will lead to deadlock:
2838  *
2839  *	One of the work items currently on the workqueue needs to acquire
2840  *	a lock held by your code or its caller.
2841  *
2842  *	Your code is running in the context of a work routine.
2843  *
2844  * They will be detected by lockdep when they occur, but the first might not
2845  * occur very often.  It depends on what work items are on the workqueue and
2846  * what locks they need, which you have no control over.
2847  *
2848  * In most situations flushing the entire workqueue is overkill; you merely
2849  * need to know that a particular work item isn't queued and isn't running.
2850  * In such cases you should use cancel_delayed_work_sync() or
2851  * cancel_work_sync() instead.
2852  */
2853 void flush_scheduled_work(void)
2854 {
2855 	flush_workqueue(system_wq);
2856 }
2857 EXPORT_SYMBOL(flush_scheduled_work);
2858 
2859 /**
2860  * execute_in_process_context - reliably execute the routine with user context
2861  * @fn:		the function to execute
2862  * @ew:		guaranteed storage for the execute work structure (must
2863  *		be available when the work executes)
2864  *
2865  * Executes the function immediately if process context is available,
2866  * otherwise schedules the function for delayed execution.
2867  *
2868  * Returns:	0 - function was executed
2869  *		1 - function was scheduled for execution
2870  */
2871 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2872 {
2873 	if (!in_interrupt()) {
2874 		fn(&ew->work);
2875 		return 0;
2876 	}
2877 
2878 	INIT_WORK(&ew->work, fn);
2879 	schedule_work(&ew->work);
2880 
2881 	return 1;
2882 }
2883 EXPORT_SYMBOL_GPL(execute_in_process_context);
2884 
2885 int keventd_up(void)
2886 {
2887 	return system_wq != NULL;
2888 }
2889 
2890 static int alloc_cwqs(struct workqueue_struct *wq)
2891 {
2892 	/*
2893 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2894 	 * Make sure that the alignment isn't lower than that of
2895 	 * unsigned long long.
2896 	 */
2897 	const size_t size = sizeof(struct cpu_workqueue_struct);
2898 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2899 				   __alignof__(unsigned long long));
2900 #ifdef CONFIG_SMP
2901 	bool percpu = !(wq->flags & WQ_UNBOUND);
2902 #else
2903 	bool percpu = false;
2904 #endif
2905 
2906 	if (percpu)
2907 		wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2908 	else {
2909 		void *ptr;
2910 
2911 		/*
2912 		 * Allocate enough room to align cwq and put an extra
2913 		 * pointer at the end pointing back to the originally
2914 		 * allocated pointer which will be used for free.
2915 		 */
2916 		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2917 		if (ptr) {
2918 			wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2919 			*(void **)(wq->cpu_wq.single + 1) = ptr;
2920 		}
2921 	}
2922 
2923 	/* just in case, make sure it's actually aligned */
2924 	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2925 	return wq->cpu_wq.v ? 0 : -ENOMEM;
2926 }
2927 
2928 static void free_cwqs(struct workqueue_struct *wq)
2929 {
2930 #ifdef CONFIG_SMP
2931 	bool percpu = !(wq->flags & WQ_UNBOUND);
2932 #else
2933 	bool percpu = false;
2934 #endif
2935 
2936 	if (percpu)
2937 		free_percpu(wq->cpu_wq.pcpu);
2938 	else if (wq->cpu_wq.single) {
2939 		/* the pointer to free is stored right after the cwq */
2940 		kfree(*(void **)(wq->cpu_wq.single + 1));
2941 	}
2942 }
2943 
2944 static int wq_clamp_max_active(int max_active, unsigned int flags,
2945 			       const char *name)
2946 {
2947 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2948 
2949 	if (max_active < 1 || max_active > lim)
2950 		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2951 		       "is out of range, clamping between %d and %d\n",
2952 		       max_active, name, 1, lim);
2953 
2954 	return clamp_val(max_active, 1, lim);
2955 }
2956 
2957 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
2958 					       unsigned int flags,
2959 					       int max_active,
2960 					       struct lock_class_key *key,
2961 					       const char *lock_name, ...)
2962 {
2963 	va_list args, args1;
2964 	struct workqueue_struct *wq;
2965 	unsigned int cpu;
2966 	size_t namelen;
2967 
2968 	/* determine namelen, allocate wq and format name */
2969 	va_start(args, lock_name);
2970 	va_copy(args1, args);
2971 	namelen = vsnprintf(NULL, 0, fmt, args) + 1;
2972 
2973 	wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
2974 	if (!wq)
2975 		goto err;
2976 
2977 	vsnprintf(wq->name, namelen, fmt, args1);
2978 	va_end(args);
2979 	va_end(args1);
2980 
2981 	/*
2982 	 * Workqueues which may be used during memory reclaim should
2983 	 * have a rescuer to guarantee forward progress.
2984 	 */
2985 	if (flags & WQ_MEM_RECLAIM)
2986 		flags |= WQ_RESCUER;
2987 
2988 	/*
2989 	 * Unbound workqueues aren't concurrency managed and should be
2990 	 * dispatched to workers immediately.
2991 	 */
2992 	if (flags & WQ_UNBOUND)
2993 		flags |= WQ_HIGHPRI;
2994 
2995 	max_active = max_active ?: WQ_DFL_ACTIVE;
2996 	max_active = wq_clamp_max_active(max_active, flags, wq->name);
2997 
2998 	/* init wq */
2999 	wq->flags = flags;
3000 	wq->saved_max_active = max_active;
3001 	mutex_init(&wq->flush_mutex);
3002 	atomic_set(&wq->nr_cwqs_to_flush, 0);
3003 	INIT_LIST_HEAD(&wq->flusher_queue);
3004 	INIT_LIST_HEAD(&wq->flusher_overflow);
3005 
3006 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3007 	INIT_LIST_HEAD(&wq->list);
3008 
3009 	if (alloc_cwqs(wq) < 0)
3010 		goto err;
3011 
3012 	for_each_cwq_cpu(cpu, wq) {
3013 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3014 		struct global_cwq *gcwq = get_gcwq(cpu);
3015 
3016 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3017 		cwq->gcwq = gcwq;
3018 		cwq->wq = wq;
3019 		cwq->flush_color = -1;
3020 		cwq->max_active = max_active;
3021 		INIT_LIST_HEAD(&cwq->delayed_works);
3022 	}
3023 
3024 	if (flags & WQ_RESCUER) {
3025 		struct worker *rescuer;
3026 
3027 		if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3028 			goto err;
3029 
3030 		wq->rescuer = rescuer = alloc_worker();
3031 		if (!rescuer)
3032 			goto err;
3033 
3034 		rescuer->task = kthread_create(rescuer_thread, wq, "%s",
3035 					       wq->name);
3036 		if (IS_ERR(rescuer->task))
3037 			goto err;
3038 
3039 		rescuer->task->flags |= PF_THREAD_BOUND;
3040 		wake_up_process(rescuer->task);
3041 	}
3042 
3043 	/*
3044 	 * workqueue_lock protects global freeze state and workqueues
3045 	 * list.  Grab it, set max_active accordingly and add the new
3046 	 * workqueue to workqueues list.
3047 	 */
3048 	spin_lock(&workqueue_lock);
3049 
3050 	if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3051 		for_each_cwq_cpu(cpu, wq)
3052 			get_cwq(cpu, wq)->max_active = 0;
3053 
3054 	list_add(&wq->list, &workqueues);
3055 
3056 	spin_unlock(&workqueue_lock);
3057 
3058 	return wq;
3059 err:
3060 	if (wq) {
3061 		free_cwqs(wq);
3062 		free_mayday_mask(wq->mayday_mask);
3063 		kfree(wq->rescuer);
3064 		kfree(wq);
3065 	}
3066 	return NULL;
3067 }
3068 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3069 
3070 /**
3071  * destroy_workqueue - safely terminate a workqueue
3072  * @wq: target workqueue
3073  *
3074  * Safely destroy a workqueue. All work currently pending will be done first.
3075  */
3076 void destroy_workqueue(struct workqueue_struct *wq)
3077 {
3078 	unsigned int cpu;
3079 
3080 	/* drain it before proceeding with destruction */
3081 	drain_workqueue(wq);
3082 
3083 	/*
3084 	 * wq list is used to freeze wq, remove from list after
3085 	 * flushing is complete in case freeze races us.
3086 	 */
3087 	spin_lock(&workqueue_lock);
3088 	list_del(&wq->list);
3089 	spin_unlock(&workqueue_lock);
3090 
3091 	/* sanity check */
3092 	for_each_cwq_cpu(cpu, wq) {
3093 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3094 		int i;
3095 
3096 		for (i = 0; i < WORK_NR_COLORS; i++)
3097 			BUG_ON(cwq->nr_in_flight[i]);
3098 		BUG_ON(cwq->nr_active);
3099 		BUG_ON(!list_empty(&cwq->delayed_works));
3100 	}
3101 
3102 	if (wq->flags & WQ_RESCUER) {
3103 		kthread_stop(wq->rescuer->task);
3104 		free_mayday_mask(wq->mayday_mask);
3105 		kfree(wq->rescuer);
3106 	}
3107 
3108 	free_cwqs(wq);
3109 	kfree(wq);
3110 }
3111 EXPORT_SYMBOL_GPL(destroy_workqueue);
3112 
3113 /**
3114  * workqueue_set_max_active - adjust max_active of a workqueue
3115  * @wq: target workqueue
3116  * @max_active: new max_active value.
3117  *
3118  * Set max_active of @wq to @max_active.
3119  *
3120  * CONTEXT:
3121  * Don't call from IRQ context.
3122  */
3123 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3124 {
3125 	unsigned int cpu;
3126 
3127 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3128 
3129 	spin_lock(&workqueue_lock);
3130 
3131 	wq->saved_max_active = max_active;
3132 
3133 	for_each_cwq_cpu(cpu, wq) {
3134 		struct global_cwq *gcwq = get_gcwq(cpu);
3135 
3136 		spin_lock_irq(&gcwq->lock);
3137 
3138 		if (!(wq->flags & WQ_FREEZABLE) ||
3139 		    !(gcwq->flags & GCWQ_FREEZING))
3140 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
3141 
3142 		spin_unlock_irq(&gcwq->lock);
3143 	}
3144 
3145 	spin_unlock(&workqueue_lock);
3146 }
3147 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3148 
3149 /**
3150  * workqueue_congested - test whether a workqueue is congested
3151  * @cpu: CPU in question
3152  * @wq: target workqueue
3153  *
3154  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
3155  * no synchronization around this function and the test result is
3156  * unreliable and only useful as advisory hints or for debugging.
3157  *
3158  * RETURNS:
3159  * %true if congested, %false otherwise.
3160  */
3161 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3162 {
3163 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3164 
3165 	return !list_empty(&cwq->delayed_works);
3166 }
3167 EXPORT_SYMBOL_GPL(workqueue_congested);
3168 
3169 /**
3170  * work_cpu - return the last known associated cpu for @work
3171  * @work: the work of interest
3172  *
3173  * RETURNS:
3174  * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
3175  */
3176 unsigned int work_cpu(struct work_struct *work)
3177 {
3178 	struct global_cwq *gcwq = get_work_gcwq(work);
3179 
3180 	return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3181 }
3182 EXPORT_SYMBOL_GPL(work_cpu);
3183 
3184 /**
3185  * work_busy - test whether a work is currently pending or running
3186  * @work: the work to be tested
3187  *
3188  * Test whether @work is currently pending or running.  There is no
3189  * synchronization around this function and the test result is
3190  * unreliable and only useful as advisory hints or for debugging.
3191  * Especially for reentrant wqs, the pending state might hide the
3192  * running state.
3193  *
3194  * RETURNS:
3195  * OR'd bitmask of WORK_BUSY_* bits.
3196  */
3197 unsigned int work_busy(struct work_struct *work)
3198 {
3199 	struct global_cwq *gcwq = get_work_gcwq(work);
3200 	unsigned long flags;
3201 	unsigned int ret = 0;
3202 
3203 	if (!gcwq)
3204 		return false;
3205 
3206 	spin_lock_irqsave(&gcwq->lock, flags);
3207 
3208 	if (work_pending(work))
3209 		ret |= WORK_BUSY_PENDING;
3210 	if (find_worker_executing_work(gcwq, work))
3211 		ret |= WORK_BUSY_RUNNING;
3212 
3213 	spin_unlock_irqrestore(&gcwq->lock, flags);
3214 
3215 	return ret;
3216 }
3217 EXPORT_SYMBOL_GPL(work_busy);
3218 
3219 /*
3220  * CPU hotplug.
3221  *
3222  * There are two challenges in supporting CPU hotplug.  Firstly, there
3223  * are a lot of assumptions on strong associations among work, cwq and
3224  * gcwq which make migrating pending and scheduled works very
3225  * difficult to implement without impacting hot paths.  Secondly,
3226  * gcwqs serve mix of short, long and very long running works making
3227  * blocked draining impractical.
3228  *
3229  * This is solved by allowing a gcwq to be detached from CPU, running
3230  * it with unbound (rogue) workers and allowing it to be reattached
3231  * later if the cpu comes back online.  A separate thread is created
3232  * to govern a gcwq in such state and is called the trustee of the
3233  * gcwq.
3234  *
3235  * Trustee states and their descriptions.
3236  *
3237  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
3238  *		new trustee is started with this state.
3239  *
3240  * IN_CHARGE	Once started, trustee will enter this state after
3241  *		assuming the manager role and making all existing
3242  *		workers rogue.  DOWN_PREPARE waits for trustee to
3243  *		enter this state.  After reaching IN_CHARGE, trustee
3244  *		tries to execute the pending worklist until it's empty
3245  *		and the state is set to BUTCHER, or the state is set
3246  *		to RELEASE.
3247  *
3248  * BUTCHER	Command state which is set by the cpu callback after
3249  *		the cpu has went down.  Once this state is set trustee
3250  *		knows that there will be no new works on the worklist
3251  *		and once the worklist is empty it can proceed to
3252  *		killing idle workers.
3253  *
3254  * RELEASE	Command state which is set by the cpu callback if the
3255  *		cpu down has been canceled or it has come online
3256  *		again.  After recognizing this state, trustee stops
3257  *		trying to drain or butcher and clears ROGUE, rebinds
3258  *		all remaining workers back to the cpu and releases
3259  *		manager role.
3260  *
3261  * DONE		Trustee will enter this state after BUTCHER or RELEASE
3262  *		is complete.
3263  *
3264  *          trustee                 CPU                draining
3265  *         took over                down               complete
3266  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3267  *                        |                     |                  ^
3268  *                        | CPU is back online  v   return workers |
3269  *                         ----------------> RELEASE --------------
3270  */
3271 
3272 /**
3273  * trustee_wait_event_timeout - timed event wait for trustee
3274  * @cond: condition to wait for
3275  * @timeout: timeout in jiffies
3276  *
3277  * wait_event_timeout() for trustee to use.  Handles locking and
3278  * checks for RELEASE request.
3279  *
3280  * CONTEXT:
3281  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3282  * multiple times.  To be used by trustee.
3283  *
3284  * RETURNS:
3285  * Positive indicating left time if @cond is satisfied, 0 if timed
3286  * out, -1 if canceled.
3287  */
3288 #define trustee_wait_event_timeout(cond, timeout) ({			\
3289 	long __ret = (timeout);						\
3290 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
3291 	       __ret) {							\
3292 		spin_unlock_irq(&gcwq->lock);				\
3293 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
3294 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
3295 			__ret);						\
3296 		spin_lock_irq(&gcwq->lock);				\
3297 	}								\
3298 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
3299 })
3300 
3301 /**
3302  * trustee_wait_event - event wait for trustee
3303  * @cond: condition to wait for
3304  *
3305  * wait_event() for trustee to use.  Automatically handles locking and
3306  * checks for CANCEL request.
3307  *
3308  * CONTEXT:
3309  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3310  * multiple times.  To be used by trustee.
3311  *
3312  * RETURNS:
3313  * 0 if @cond is satisfied, -1 if canceled.
3314  */
3315 #define trustee_wait_event(cond) ({					\
3316 	long __ret1;							\
3317 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3318 	__ret1 < 0 ? -1 : 0;						\
3319 })
3320 
3321 static int __cpuinit trustee_thread(void *__gcwq)
3322 {
3323 	struct global_cwq *gcwq = __gcwq;
3324 	struct worker *worker;
3325 	struct work_struct *work;
3326 	struct hlist_node *pos;
3327 	long rc;
3328 	int i;
3329 
3330 	BUG_ON(gcwq->cpu != smp_processor_id());
3331 
3332 	spin_lock_irq(&gcwq->lock);
3333 	/*
3334 	 * Claim the manager position and make all workers rogue.
3335 	 * Trustee must be bound to the target cpu and can't be
3336 	 * cancelled.
3337 	 */
3338 	BUG_ON(gcwq->cpu != smp_processor_id());
3339 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3340 	BUG_ON(rc < 0);
3341 
3342 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
3343 
3344 	list_for_each_entry(worker, &gcwq->idle_list, entry)
3345 		worker->flags |= WORKER_ROGUE;
3346 
3347 	for_each_busy_worker(worker, i, pos, gcwq)
3348 		worker->flags |= WORKER_ROGUE;
3349 
3350 	/*
3351 	 * Call schedule() so that we cross rq->lock and thus can
3352 	 * guarantee sched callbacks see the rogue flag.  This is
3353 	 * necessary as scheduler callbacks may be invoked from other
3354 	 * cpus.
3355 	 */
3356 	spin_unlock_irq(&gcwq->lock);
3357 	schedule();
3358 	spin_lock_irq(&gcwq->lock);
3359 
3360 	/*
3361 	 * Sched callbacks are disabled now.  Zap nr_running.  After
3362 	 * this, nr_running stays zero and need_more_worker() and
3363 	 * keep_working() are always true as long as the worklist is
3364 	 * not empty.
3365 	 */
3366 	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3367 
3368 	spin_unlock_irq(&gcwq->lock);
3369 	del_timer_sync(&gcwq->idle_timer);
3370 	spin_lock_irq(&gcwq->lock);
3371 
3372 	/*
3373 	 * We're now in charge.  Notify and proceed to drain.  We need
3374 	 * to keep the gcwq running during the whole CPU down
3375 	 * procedure as other cpu hotunplug callbacks may need to
3376 	 * flush currently running tasks.
3377 	 */
3378 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3379 	wake_up_all(&gcwq->trustee_wait);
3380 
3381 	/*
3382 	 * The original cpu is in the process of dying and may go away
3383 	 * anytime now.  When that happens, we and all workers would
3384 	 * be migrated to other cpus.  Try draining any left work.  We
3385 	 * want to get it over with ASAP - spam rescuers, wake up as
3386 	 * many idlers as necessary and create new ones till the
3387 	 * worklist is empty.  Note that if the gcwq is frozen, there
3388 	 * may be frozen works in freezable cwqs.  Don't declare
3389 	 * completion while frozen.
3390 	 */
3391 	while (gcwq->nr_workers != gcwq->nr_idle ||
3392 	       gcwq->flags & GCWQ_FREEZING ||
3393 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3394 		int nr_works = 0;
3395 
3396 		list_for_each_entry(work, &gcwq->worklist, entry) {
3397 			send_mayday(work);
3398 			nr_works++;
3399 		}
3400 
3401 		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3402 			if (!nr_works--)
3403 				break;
3404 			wake_up_process(worker->task);
3405 		}
3406 
3407 		if (need_to_create_worker(gcwq)) {
3408 			spin_unlock_irq(&gcwq->lock);
3409 			worker = create_worker(gcwq, false);
3410 			spin_lock_irq(&gcwq->lock);
3411 			if (worker) {
3412 				worker->flags |= WORKER_ROGUE;
3413 				start_worker(worker);
3414 			}
3415 		}
3416 
3417 		/* give a breather */
3418 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3419 			break;
3420 	}
3421 
3422 	/*
3423 	 * Either all works have been scheduled and cpu is down, or
3424 	 * cpu down has already been canceled.  Wait for and butcher
3425 	 * all workers till we're canceled.
3426 	 */
3427 	do {
3428 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3429 		while (!list_empty(&gcwq->idle_list))
3430 			destroy_worker(list_first_entry(&gcwq->idle_list,
3431 							struct worker, entry));
3432 	} while (gcwq->nr_workers && rc >= 0);
3433 
3434 	/*
3435 	 * At this point, either draining has completed and no worker
3436 	 * is left, or cpu down has been canceled or the cpu is being
3437 	 * brought back up.  There shouldn't be any idle one left.
3438 	 * Tell the remaining busy ones to rebind once it finishes the
3439 	 * currently scheduled works by scheduling the rebind_work.
3440 	 */
3441 	WARN_ON(!list_empty(&gcwq->idle_list));
3442 
3443 	for_each_busy_worker(worker, i, pos, gcwq) {
3444 		struct work_struct *rebind_work = &worker->rebind_work;
3445 
3446 		/*
3447 		 * Rebind_work may race with future cpu hotplug
3448 		 * operations.  Use a separate flag to mark that
3449 		 * rebinding is scheduled.
3450 		 */
3451 		worker->flags |= WORKER_REBIND;
3452 		worker->flags &= ~WORKER_ROGUE;
3453 
3454 		/* queue rebind_work, wq doesn't matter, use the default one */
3455 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3456 				     work_data_bits(rebind_work)))
3457 			continue;
3458 
3459 		debug_work_activate(rebind_work);
3460 		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3461 			    worker->scheduled.next,
3462 			    work_color_to_flags(WORK_NO_COLOR));
3463 	}
3464 
3465 	/* relinquish manager role */
3466 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3467 
3468 	/* notify completion */
3469 	gcwq->trustee = NULL;
3470 	gcwq->trustee_state = TRUSTEE_DONE;
3471 	wake_up_all(&gcwq->trustee_wait);
3472 	spin_unlock_irq(&gcwq->lock);
3473 	return 0;
3474 }
3475 
3476 /**
3477  * wait_trustee_state - wait for trustee to enter the specified state
3478  * @gcwq: gcwq the trustee of interest belongs to
3479  * @state: target state to wait for
3480  *
3481  * Wait for the trustee to reach @state.  DONE is already matched.
3482  *
3483  * CONTEXT:
3484  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3485  * multiple times.  To be used by cpu_callback.
3486  */
3487 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3488 __releases(&gcwq->lock)
3489 __acquires(&gcwq->lock)
3490 {
3491 	if (!(gcwq->trustee_state == state ||
3492 	      gcwq->trustee_state == TRUSTEE_DONE)) {
3493 		spin_unlock_irq(&gcwq->lock);
3494 		__wait_event(gcwq->trustee_wait,
3495 			     gcwq->trustee_state == state ||
3496 			     gcwq->trustee_state == TRUSTEE_DONE);
3497 		spin_lock_irq(&gcwq->lock);
3498 	}
3499 }
3500 
3501 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3502 						unsigned long action,
3503 						void *hcpu)
3504 {
3505 	unsigned int cpu = (unsigned long)hcpu;
3506 	struct global_cwq *gcwq = get_gcwq(cpu);
3507 	struct task_struct *new_trustee = NULL;
3508 	struct worker *uninitialized_var(new_worker);
3509 	unsigned long flags;
3510 
3511 	action &= ~CPU_TASKS_FROZEN;
3512 
3513 	switch (action) {
3514 	case CPU_DOWN_PREPARE:
3515 		new_trustee = kthread_create(trustee_thread, gcwq,
3516 					     "workqueue_trustee/%d\n", cpu);
3517 		if (IS_ERR(new_trustee))
3518 			return notifier_from_errno(PTR_ERR(new_trustee));
3519 		kthread_bind(new_trustee, cpu);
3520 		/* fall through */
3521 	case CPU_UP_PREPARE:
3522 		BUG_ON(gcwq->first_idle);
3523 		new_worker = create_worker(gcwq, false);
3524 		if (!new_worker) {
3525 			if (new_trustee)
3526 				kthread_stop(new_trustee);
3527 			return NOTIFY_BAD;
3528 		}
3529 	}
3530 
3531 	/* some are called w/ irq disabled, don't disturb irq status */
3532 	spin_lock_irqsave(&gcwq->lock, flags);
3533 
3534 	switch (action) {
3535 	case CPU_DOWN_PREPARE:
3536 		/* initialize trustee and tell it to acquire the gcwq */
3537 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3538 		gcwq->trustee = new_trustee;
3539 		gcwq->trustee_state = TRUSTEE_START;
3540 		wake_up_process(gcwq->trustee);
3541 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3542 		/* fall through */
3543 	case CPU_UP_PREPARE:
3544 		BUG_ON(gcwq->first_idle);
3545 		gcwq->first_idle = new_worker;
3546 		break;
3547 
3548 	case CPU_DYING:
3549 		/*
3550 		 * Before this, the trustee and all workers except for
3551 		 * the ones which are still executing works from
3552 		 * before the last CPU down must be on the cpu.  After
3553 		 * this, they'll all be diasporas.
3554 		 */
3555 		gcwq->flags |= GCWQ_DISASSOCIATED;
3556 		break;
3557 
3558 	case CPU_POST_DEAD:
3559 		gcwq->trustee_state = TRUSTEE_BUTCHER;
3560 		/* fall through */
3561 	case CPU_UP_CANCELED:
3562 		destroy_worker(gcwq->first_idle);
3563 		gcwq->first_idle = NULL;
3564 		break;
3565 
3566 	case CPU_DOWN_FAILED:
3567 	case CPU_ONLINE:
3568 		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3569 		if (gcwq->trustee_state != TRUSTEE_DONE) {
3570 			gcwq->trustee_state = TRUSTEE_RELEASE;
3571 			wake_up_process(gcwq->trustee);
3572 			wait_trustee_state(gcwq, TRUSTEE_DONE);
3573 		}
3574 
3575 		/*
3576 		 * Trustee is done and there might be no worker left.
3577 		 * Put the first_idle in and request a real manager to
3578 		 * take a look.
3579 		 */
3580 		spin_unlock_irq(&gcwq->lock);
3581 		kthread_bind(gcwq->first_idle->task, cpu);
3582 		spin_lock_irq(&gcwq->lock);
3583 		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3584 		start_worker(gcwq->first_idle);
3585 		gcwq->first_idle = NULL;
3586 		break;
3587 	}
3588 
3589 	spin_unlock_irqrestore(&gcwq->lock, flags);
3590 
3591 	return notifier_from_errno(0);
3592 }
3593 
3594 #ifdef CONFIG_SMP
3595 
3596 struct work_for_cpu {
3597 	struct completion completion;
3598 	long (*fn)(void *);
3599 	void *arg;
3600 	long ret;
3601 };
3602 
3603 static int do_work_for_cpu(void *_wfc)
3604 {
3605 	struct work_for_cpu *wfc = _wfc;
3606 	wfc->ret = wfc->fn(wfc->arg);
3607 	complete(&wfc->completion);
3608 	return 0;
3609 }
3610 
3611 /**
3612  * work_on_cpu - run a function in user context on a particular cpu
3613  * @cpu: the cpu to run on
3614  * @fn: the function to run
3615  * @arg: the function arg
3616  *
3617  * This will return the value @fn returns.
3618  * It is up to the caller to ensure that the cpu doesn't go offline.
3619  * The caller must not hold any locks which would prevent @fn from completing.
3620  */
3621 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3622 {
3623 	struct task_struct *sub_thread;
3624 	struct work_for_cpu wfc = {
3625 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3626 		.fn = fn,
3627 		.arg = arg,
3628 	};
3629 
3630 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3631 	if (IS_ERR(sub_thread))
3632 		return PTR_ERR(sub_thread);
3633 	kthread_bind(sub_thread, cpu);
3634 	wake_up_process(sub_thread);
3635 	wait_for_completion(&wfc.completion);
3636 	return wfc.ret;
3637 }
3638 EXPORT_SYMBOL_GPL(work_on_cpu);
3639 #endif /* CONFIG_SMP */
3640 
3641 #ifdef CONFIG_FREEZER
3642 
3643 /**
3644  * freeze_workqueues_begin - begin freezing workqueues
3645  *
3646  * Start freezing workqueues.  After this function returns, all freezable
3647  * workqueues will queue new works to their frozen_works list instead of
3648  * gcwq->worklist.
3649  *
3650  * CONTEXT:
3651  * Grabs and releases workqueue_lock and gcwq->lock's.
3652  */
3653 void freeze_workqueues_begin(void)
3654 {
3655 	unsigned int cpu;
3656 
3657 	spin_lock(&workqueue_lock);
3658 
3659 	BUG_ON(workqueue_freezing);
3660 	workqueue_freezing = true;
3661 
3662 	for_each_gcwq_cpu(cpu) {
3663 		struct global_cwq *gcwq = get_gcwq(cpu);
3664 		struct workqueue_struct *wq;
3665 
3666 		spin_lock_irq(&gcwq->lock);
3667 
3668 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3669 		gcwq->flags |= GCWQ_FREEZING;
3670 
3671 		list_for_each_entry(wq, &workqueues, list) {
3672 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3673 
3674 			if (cwq && wq->flags & WQ_FREEZABLE)
3675 				cwq->max_active = 0;
3676 		}
3677 
3678 		spin_unlock_irq(&gcwq->lock);
3679 	}
3680 
3681 	spin_unlock(&workqueue_lock);
3682 }
3683 
3684 /**
3685  * freeze_workqueues_busy - are freezable workqueues still busy?
3686  *
3687  * Check whether freezing is complete.  This function must be called
3688  * between freeze_workqueues_begin() and thaw_workqueues().
3689  *
3690  * CONTEXT:
3691  * Grabs and releases workqueue_lock.
3692  *
3693  * RETURNS:
3694  * %true if some freezable workqueues are still busy.  %false if freezing
3695  * is complete.
3696  */
3697 bool freeze_workqueues_busy(void)
3698 {
3699 	unsigned int cpu;
3700 	bool busy = false;
3701 
3702 	spin_lock(&workqueue_lock);
3703 
3704 	BUG_ON(!workqueue_freezing);
3705 
3706 	for_each_gcwq_cpu(cpu) {
3707 		struct workqueue_struct *wq;
3708 		/*
3709 		 * nr_active is monotonically decreasing.  It's safe
3710 		 * to peek without lock.
3711 		 */
3712 		list_for_each_entry(wq, &workqueues, list) {
3713 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3714 
3715 			if (!cwq || !(wq->flags & WQ_FREEZABLE))
3716 				continue;
3717 
3718 			BUG_ON(cwq->nr_active < 0);
3719 			if (cwq->nr_active) {
3720 				busy = true;
3721 				goto out_unlock;
3722 			}
3723 		}
3724 	}
3725 out_unlock:
3726 	spin_unlock(&workqueue_lock);
3727 	return busy;
3728 }
3729 
3730 /**
3731  * thaw_workqueues - thaw workqueues
3732  *
3733  * Thaw workqueues.  Normal queueing is restored and all collected
3734  * frozen works are transferred to their respective gcwq worklists.
3735  *
3736  * CONTEXT:
3737  * Grabs and releases workqueue_lock and gcwq->lock's.
3738  */
3739 void thaw_workqueues(void)
3740 {
3741 	unsigned int cpu;
3742 
3743 	spin_lock(&workqueue_lock);
3744 
3745 	if (!workqueue_freezing)
3746 		goto out_unlock;
3747 
3748 	for_each_gcwq_cpu(cpu) {
3749 		struct global_cwq *gcwq = get_gcwq(cpu);
3750 		struct workqueue_struct *wq;
3751 
3752 		spin_lock_irq(&gcwq->lock);
3753 
3754 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3755 		gcwq->flags &= ~GCWQ_FREEZING;
3756 
3757 		list_for_each_entry(wq, &workqueues, list) {
3758 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3759 
3760 			if (!cwq || !(wq->flags & WQ_FREEZABLE))
3761 				continue;
3762 
3763 			/* restore max_active and repopulate worklist */
3764 			cwq->max_active = wq->saved_max_active;
3765 
3766 			while (!list_empty(&cwq->delayed_works) &&
3767 			       cwq->nr_active < cwq->max_active)
3768 				cwq_activate_first_delayed(cwq);
3769 		}
3770 
3771 		wake_up_worker(gcwq);
3772 
3773 		spin_unlock_irq(&gcwq->lock);
3774 	}
3775 
3776 	workqueue_freezing = false;
3777 out_unlock:
3778 	spin_unlock(&workqueue_lock);
3779 }
3780 #endif /* CONFIG_FREEZER */
3781 
3782 static int __init init_workqueues(void)
3783 {
3784 	unsigned int cpu;
3785 	int i;
3786 
3787 	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
3788 
3789 	/* initialize gcwqs */
3790 	for_each_gcwq_cpu(cpu) {
3791 		struct global_cwq *gcwq = get_gcwq(cpu);
3792 
3793 		spin_lock_init(&gcwq->lock);
3794 		INIT_LIST_HEAD(&gcwq->worklist);
3795 		gcwq->cpu = cpu;
3796 		gcwq->flags |= GCWQ_DISASSOCIATED;
3797 
3798 		INIT_LIST_HEAD(&gcwq->idle_list);
3799 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3800 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3801 
3802 		init_timer_deferrable(&gcwq->idle_timer);
3803 		gcwq->idle_timer.function = idle_worker_timeout;
3804 		gcwq->idle_timer.data = (unsigned long)gcwq;
3805 
3806 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3807 			    (unsigned long)gcwq);
3808 
3809 		ida_init(&gcwq->worker_ida);
3810 
3811 		gcwq->trustee_state = TRUSTEE_DONE;
3812 		init_waitqueue_head(&gcwq->trustee_wait);
3813 	}
3814 
3815 	/* create the initial worker */
3816 	for_each_online_gcwq_cpu(cpu) {
3817 		struct global_cwq *gcwq = get_gcwq(cpu);
3818 		struct worker *worker;
3819 
3820 		if (cpu != WORK_CPU_UNBOUND)
3821 			gcwq->flags &= ~GCWQ_DISASSOCIATED;
3822 		worker = create_worker(gcwq, true);
3823 		BUG_ON(!worker);
3824 		spin_lock_irq(&gcwq->lock);
3825 		start_worker(worker);
3826 		spin_unlock_irq(&gcwq->lock);
3827 	}
3828 
3829 	system_wq = alloc_workqueue("events", 0, 0);
3830 	system_long_wq = alloc_workqueue("events_long", 0, 0);
3831 	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3832 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3833 					    WQ_UNBOUND_MAX_ACTIVE);
3834 	system_freezable_wq = alloc_workqueue("events_freezable",
3835 					      WQ_FREEZABLE, 0);
3836 	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3837 	       !system_unbound_wq || !system_freezable_wq);
3838 	return 0;
3839 }
3840 early_initcall(init_workqueues);
3841