1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/workqueue.c - generic async execution with shared worker pool
4 *
5 * Copyright (C) 2002 Ingo Molnar
6 *
7 * Derived from the taskqueue/keventd code by:
8 * David Woodhouse <dwmw2@infradead.org>
9 * Andrew Morton
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 * Theodore Ts'o <tytso@mit.edu>
12 *
13 * Made to use alloc_percpu by Christoph Lameter.
14 *
15 * Copyright (C) 2010 SUSE Linux Products GmbH
16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
17 *
18 * This is the generic async execution mechanism. Work items as are
19 * executed in process context. The worker pool is shared and
20 * automatically managed. There are two worker pools for each CPU (one for
21 * normal work items and the other for high priority ones) and some extra
22 * pools for workqueues which are not bound to any specific CPU - the
23 * number of these backing pools is dynamic.
24 *
25 * Please read Documentation/core-api/workqueue.rst for details.
26 */
27
28 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/completion.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <linux/cpu.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/hardirq.h>
40 #include <linux/mempolicy.h>
41 #include <linux/freezer.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/sched/isolation.h>
52 #include <linux/sched/debug.h>
53 #include <linux/nmi.h>
54 #include <linux/kvm_para.h>
55 #include <linux/delay.h>
56
57 #include "workqueue_internal.h"
58
59 enum {
60 /*
61 * worker_pool flags
62 *
63 * A bound pool is either associated or disassociated with its CPU.
64 * While associated (!DISASSOCIATED), all workers are bound to the
65 * CPU and none has %WORKER_UNBOUND set and concurrency management
66 * is in effect.
67 *
68 * While DISASSOCIATED, the cpu may be offline and all workers have
69 * %WORKER_UNBOUND set and concurrency management disabled, and may
70 * be executing on any CPU. The pool behaves as an unbound one.
71 *
72 * Note that DISASSOCIATED should be flipped only while holding
73 * wq_pool_attach_mutex to avoid changing binding state while
74 * worker_attach_to_pool() is in progress.
75 */
76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
78
79 /* worker flags */
80 WORKER_DIE = 1 << 1, /* die die die */
81 WORKER_IDLE = 1 << 2, /* is idle */
82 WORKER_PREP = 1 << 3, /* preparing to run works */
83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
85 WORKER_REBOUND = 1 << 8, /* worker was rebound */
86
87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
88 WORKER_UNBOUND | WORKER_REBOUND,
89
90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
91
92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
94
95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
97
98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
99 /* call for help after 10ms
100 (min two ticks) */
101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
102 CREATE_COOLDOWN = HZ, /* time to breath after fail */
103
104 /*
105 * Rescue workers are used only on emergencies and shared by
106 * all cpus. Give MIN_NICE.
107 */
108 RESCUER_NICE_LEVEL = MIN_NICE,
109 HIGHPRI_NICE_LEVEL = MIN_NICE,
110
111 WQ_NAME_LEN = 24,
112 };
113
114 /*
115 * Structure fields follow one of the following exclusion rules.
116 *
117 * I: Modifiable by initialization/destruction paths and read-only for
118 * everyone else.
119 *
120 * P: Preemption protected. Disabling preemption is enough and should
121 * only be modified and accessed from the local cpu.
122 *
123 * L: pool->lock protected. Access with pool->lock held.
124 *
125 * K: Only modified by worker while holding pool->lock. Can be safely read by
126 * self, while holding pool->lock or from IRQ context if %current is the
127 * kworker.
128 *
129 * S: Only modified by worker self.
130 *
131 * A: wq_pool_attach_mutex protected.
132 *
133 * PL: wq_pool_mutex protected.
134 *
135 * PR: wq_pool_mutex protected for writes. RCU protected for reads.
136 *
137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
138 *
139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
140 * RCU for reads.
141 *
142 * WQ: wq->mutex protected.
143 *
144 * WR: wq->mutex protected for writes. RCU protected for reads.
145 *
146 * MD: wq_mayday_lock protected.
147 *
148 * WD: Used internally by the watchdog.
149 */
150
151 /* struct worker is defined in workqueue_internal.h */
152
153 struct worker_pool {
154 raw_spinlock_t lock; /* the pool lock */
155 int cpu; /* I: the associated cpu */
156 int node; /* I: the associated node ID */
157 int id; /* I: pool ID */
158 unsigned int flags; /* L: flags */
159
160 unsigned long watchdog_ts; /* L: watchdog timestamp */
161 bool cpu_stall; /* WD: stalled cpu bound pool */
162
163 /*
164 * The counter is incremented in a process context on the associated CPU
165 * w/ preemption disabled, and decremented or reset in the same context
166 * but w/ pool->lock held. The readers grab pool->lock and are
167 * guaranteed to see if the counter reached zero.
168 */
169 int nr_running;
170
171 struct list_head worklist; /* L: list of pending works */
172
173 int nr_workers; /* L: total number of workers */
174 int nr_idle; /* L: currently idle workers */
175
176 struct list_head idle_list; /* L: list of idle workers */
177 struct timer_list idle_timer; /* L: worker idle timeout */
178 struct work_struct idle_cull_work; /* L: worker idle cleanup */
179
180 struct timer_list mayday_timer; /* L: SOS timer for workers */
181
182 /* a workers is either on busy_hash or idle_list, or the manager */
183 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
184 /* L: hash of busy workers */
185
186 struct worker *manager; /* L: purely informational */
187 struct list_head workers; /* A: attached workers */
188 struct list_head dying_workers; /* A: workers about to die */
189 struct completion *detach_completion; /* all workers detached */
190
191 struct ida worker_ida; /* worker IDs for task name */
192
193 struct workqueue_attrs *attrs; /* I: worker attributes */
194 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
195 int refcnt; /* PL: refcnt for unbound pools */
196
197 /*
198 * Destruction of pool is RCU protected to allow dereferences
199 * from get_work_pool().
200 */
201 struct rcu_head rcu;
202 };
203
204 /*
205 * Per-pool_workqueue statistics. These can be monitored using
206 * tools/workqueue/wq_monitor.py.
207 */
208 enum pool_workqueue_stats {
209 PWQ_STAT_STARTED, /* work items started execution */
210 PWQ_STAT_COMPLETED, /* work items completed execution */
211 PWQ_STAT_CPU_TIME, /* total CPU time consumed */
212 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
213 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
214 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */
215 PWQ_STAT_MAYDAY, /* maydays to rescuer */
216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
217
218 PWQ_NR_STATS,
219 };
220
221 /*
222 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
223 * of work_struct->data are used for flags and the remaining high bits
224 * point to the pwq; thus, pwqs need to be aligned at two's power of the
225 * number of flag bits.
226 */
227 struct pool_workqueue {
228 struct worker_pool *pool; /* I: the associated pool */
229 struct workqueue_struct *wq; /* I: the owning workqueue */
230 int work_color; /* L: current color */
231 int flush_color; /* L: flushing color */
232 int refcnt; /* L: reference count */
233 int nr_in_flight[WORK_NR_COLORS];
234 /* L: nr of in_flight works */
235
236 /*
237 * nr_active management and WORK_STRUCT_INACTIVE:
238 *
239 * When pwq->nr_active >= max_active, new work item is queued to
240 * pwq->inactive_works instead of pool->worklist and marked with
241 * WORK_STRUCT_INACTIVE.
242 *
243 * All work items marked with WORK_STRUCT_INACTIVE do not participate
244 * in pwq->nr_active and all work items in pwq->inactive_works are
245 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
246 * work items are in pwq->inactive_works. Some of them are ready to
247 * run in pool->worklist or worker->scheduled. Those work itmes are
248 * only struct wq_barrier which is used for flush_work() and should
249 * not participate in pwq->nr_active. For non-barrier work item, it
250 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
251 */
252 int nr_active; /* L: nr of active works */
253 int max_active; /* L: max active works */
254 struct list_head inactive_works; /* L: inactive works */
255 struct list_head pwqs_node; /* WR: node on wq->pwqs */
256 struct list_head mayday_node; /* MD: node on wq->maydays */
257
258 u64 stats[PWQ_NR_STATS];
259
260 /*
261 * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
262 * and pwq_release_workfn() for details. pool_workqueue itself is also
263 * RCU protected so that the first pwq can be determined without
264 * grabbing wq->mutex.
265 */
266 struct kthread_work release_work;
267 struct rcu_head rcu;
268 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
269
270 /*
271 * Structure used to wait for workqueue flush.
272 */
273 struct wq_flusher {
274 struct list_head list; /* WQ: list of flushers */
275 int flush_color; /* WQ: flush color waiting for */
276 struct completion done; /* flush completion */
277 };
278
279 struct wq_device;
280
281 /*
282 * The externally visible workqueue. It relays the issued work items to
283 * the appropriate worker_pool through its pool_workqueues.
284 */
285 struct workqueue_struct {
286 struct list_head pwqs; /* WR: all pwqs of this wq */
287 struct list_head list; /* PR: list of all workqueues */
288
289 struct mutex mutex; /* protects this wq */
290 int work_color; /* WQ: current work color */
291 int flush_color; /* WQ: current flush color */
292 atomic_t nr_pwqs_to_flush; /* flush in progress */
293 struct wq_flusher *first_flusher; /* WQ: first flusher */
294 struct list_head flusher_queue; /* WQ: flush waiters */
295 struct list_head flusher_overflow; /* WQ: flush overflow list */
296
297 struct list_head maydays; /* MD: pwqs requesting rescue */
298 struct worker *rescuer; /* MD: rescue worker */
299
300 int nr_drainers; /* WQ: drain in progress */
301 int saved_max_active; /* WQ: saved pwq max_active */
302
303 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
304 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
305
306 #ifdef CONFIG_SYSFS
307 struct wq_device *wq_dev; /* I: for sysfs interface */
308 #endif
309 #ifdef CONFIG_LOCKDEP
310 char *lock_name;
311 struct lock_class_key key;
312 struct lockdep_map lockdep_map;
313 #endif
314 char name[WQ_NAME_LEN]; /* I: workqueue name */
315
316 /*
317 * Destruction of workqueue_struct is RCU protected to allow walking
318 * the workqueues list without grabbing wq_pool_mutex.
319 * This is used to dump all workqueues from sysrq.
320 */
321 struct rcu_head rcu;
322
323 /* hot fields used during command issue, aligned to cacheline */
324 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
325 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
326 };
327
328 static struct kmem_cache *pwq_cache;
329
330 /*
331 * Each pod type describes how CPUs should be grouped for unbound workqueues.
332 * See the comment above workqueue_attrs->affn_scope.
333 */
334 struct wq_pod_type {
335 int nr_pods; /* number of pods */
336 cpumask_var_t *pod_cpus; /* pod -> cpus */
337 int *pod_node; /* pod -> node */
338 int *cpu_pod; /* cpu -> pod */
339 };
340
341 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
342 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
343
344 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
345 [WQ_AFFN_DFL] = "default",
346 [WQ_AFFN_CPU] = "cpu",
347 [WQ_AFFN_SMT] = "smt",
348 [WQ_AFFN_CACHE] = "cache",
349 [WQ_AFFN_NUMA] = "numa",
350 [WQ_AFFN_SYSTEM] = "system",
351 };
352
353 /*
354 * Per-cpu work items which run for longer than the following threshold are
355 * automatically considered CPU intensive and excluded from concurrency
356 * management to prevent them from noticeably delaying other per-cpu work items.
357 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
358 * The actual value is initialized in wq_cpu_intensive_thresh_init().
359 */
360 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
361 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
362
363 /* see the comment above the definition of WQ_POWER_EFFICIENT */
364 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
365 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
366
367 static bool wq_online; /* can kworkers be created yet? */
368
369 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
370 static struct workqueue_attrs *wq_update_pod_attrs_buf;
371
372 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
373 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
374 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
375 /* wait for manager to go away */
376 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
377
378 static LIST_HEAD(workqueues); /* PR: list of all workqueues */
379 static bool workqueue_freezing; /* PL: have wqs started freezing? */
380
381 /* PL&A: allowable cpus for unbound wqs and work items */
382 static cpumask_var_t wq_unbound_cpumask;
383
384 /* for further constrain wq_unbound_cpumask by cmdline parameter*/
385 static struct cpumask wq_cmdline_cpumask __initdata;
386
387 /* CPU where unbound work was last round robin scheduled from this CPU */
388 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
389
390 /*
391 * Local execution of unbound work items is no longer guaranteed. The
392 * following always forces round-robin CPU selection on unbound work items
393 * to uncover usages which depend on it.
394 */
395 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
396 static bool wq_debug_force_rr_cpu = true;
397 #else
398 static bool wq_debug_force_rr_cpu = false;
399 #endif
400 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
401
402 /* the per-cpu worker pools */
403 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
404
405 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
406
407 /* PL: hash of all unbound pools keyed by pool->attrs */
408 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
409
410 /* I: attributes used when instantiating standard unbound pools on demand */
411 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
412
413 /* I: attributes used when instantiating ordered pools on demand */
414 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
415
416 /*
417 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
418 * process context while holding a pool lock. Bounce to a dedicated kthread
419 * worker to avoid A-A deadlocks.
420 */
421 static struct kthread_worker *pwq_release_worker;
422
423 struct workqueue_struct *system_wq __read_mostly;
424 EXPORT_SYMBOL(system_wq);
425 struct workqueue_struct *system_highpri_wq __read_mostly;
426 EXPORT_SYMBOL_GPL(system_highpri_wq);
427 struct workqueue_struct *system_long_wq __read_mostly;
428 EXPORT_SYMBOL_GPL(system_long_wq);
429 struct workqueue_struct *system_unbound_wq __read_mostly;
430 EXPORT_SYMBOL_GPL(system_unbound_wq);
431 struct workqueue_struct *system_freezable_wq __read_mostly;
432 EXPORT_SYMBOL_GPL(system_freezable_wq);
433 struct workqueue_struct *system_power_efficient_wq __read_mostly;
434 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
435 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
436 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
437
438 static int worker_thread(void *__worker);
439 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
440 static void show_pwq(struct pool_workqueue *pwq);
441 static void show_one_worker_pool(struct worker_pool *pool);
442
443 #define CREATE_TRACE_POINTS
444 #include <trace/events/workqueue.h>
445
446 #define assert_rcu_or_pool_mutex() \
447 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
448 !lockdep_is_held(&wq_pool_mutex), \
449 "RCU or wq_pool_mutex should be held")
450
451 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
452 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
453 !lockdep_is_held(&wq->mutex) && \
454 !lockdep_is_held(&wq_pool_mutex), \
455 "RCU, wq->mutex or wq_pool_mutex should be held")
456
457 #define for_each_cpu_worker_pool(pool, cpu) \
458 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
459 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
460 (pool)++)
461
462 /**
463 * for_each_pool - iterate through all worker_pools in the system
464 * @pool: iteration cursor
465 * @pi: integer used for iteration
466 *
467 * This must be called either with wq_pool_mutex held or RCU read
468 * locked. If the pool needs to be used beyond the locking in effect, the
469 * caller is responsible for guaranteeing that the pool stays online.
470 *
471 * The if/else clause exists only for the lockdep assertion and can be
472 * ignored.
473 */
474 #define for_each_pool(pool, pi) \
475 idr_for_each_entry(&worker_pool_idr, pool, pi) \
476 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
477 else
478
479 /**
480 * for_each_pool_worker - iterate through all workers of a worker_pool
481 * @worker: iteration cursor
482 * @pool: worker_pool to iterate workers of
483 *
484 * This must be called with wq_pool_attach_mutex.
485 *
486 * The if/else clause exists only for the lockdep assertion and can be
487 * ignored.
488 */
489 #define for_each_pool_worker(worker, pool) \
490 list_for_each_entry((worker), &(pool)->workers, node) \
491 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
492 else
493
494 /**
495 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
496 * @pwq: iteration cursor
497 * @wq: the target workqueue
498 *
499 * This must be called either with wq->mutex held or RCU read locked.
500 * If the pwq needs to be used beyond the locking in effect, the caller is
501 * responsible for guaranteeing that the pwq stays online.
502 *
503 * The if/else clause exists only for the lockdep assertion and can be
504 * ignored.
505 */
506 #define for_each_pwq(pwq, wq) \
507 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
508 lockdep_is_held(&(wq->mutex)))
509
510 #ifdef CONFIG_DEBUG_OBJECTS_WORK
511
512 static const struct debug_obj_descr work_debug_descr;
513
work_debug_hint(void * addr)514 static void *work_debug_hint(void *addr)
515 {
516 return ((struct work_struct *) addr)->func;
517 }
518
work_is_static_object(void * addr)519 static bool work_is_static_object(void *addr)
520 {
521 struct work_struct *work = addr;
522
523 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
524 }
525
526 /*
527 * fixup_init is called when:
528 * - an active object is initialized
529 */
work_fixup_init(void * addr,enum debug_obj_state state)530 static bool work_fixup_init(void *addr, enum debug_obj_state state)
531 {
532 struct work_struct *work = addr;
533
534 switch (state) {
535 case ODEBUG_STATE_ACTIVE:
536 cancel_work_sync(work);
537 debug_object_init(work, &work_debug_descr);
538 return true;
539 default:
540 return false;
541 }
542 }
543
544 /*
545 * fixup_free is called when:
546 * - an active object is freed
547 */
work_fixup_free(void * addr,enum debug_obj_state state)548 static bool work_fixup_free(void *addr, enum debug_obj_state state)
549 {
550 struct work_struct *work = addr;
551
552 switch (state) {
553 case ODEBUG_STATE_ACTIVE:
554 cancel_work_sync(work);
555 debug_object_free(work, &work_debug_descr);
556 return true;
557 default:
558 return false;
559 }
560 }
561
562 static const struct debug_obj_descr work_debug_descr = {
563 .name = "work_struct",
564 .debug_hint = work_debug_hint,
565 .is_static_object = work_is_static_object,
566 .fixup_init = work_fixup_init,
567 .fixup_free = work_fixup_free,
568 };
569
debug_work_activate(struct work_struct * work)570 static inline void debug_work_activate(struct work_struct *work)
571 {
572 debug_object_activate(work, &work_debug_descr);
573 }
574
debug_work_deactivate(struct work_struct * work)575 static inline void debug_work_deactivate(struct work_struct *work)
576 {
577 debug_object_deactivate(work, &work_debug_descr);
578 }
579
__init_work(struct work_struct * work,int onstack)580 void __init_work(struct work_struct *work, int onstack)
581 {
582 if (onstack)
583 debug_object_init_on_stack(work, &work_debug_descr);
584 else
585 debug_object_init(work, &work_debug_descr);
586 }
587 EXPORT_SYMBOL_GPL(__init_work);
588
destroy_work_on_stack(struct work_struct * work)589 void destroy_work_on_stack(struct work_struct *work)
590 {
591 debug_object_free(work, &work_debug_descr);
592 }
593 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
594
destroy_delayed_work_on_stack(struct delayed_work * work)595 void destroy_delayed_work_on_stack(struct delayed_work *work)
596 {
597 destroy_timer_on_stack(&work->timer);
598 debug_object_free(&work->work, &work_debug_descr);
599 }
600 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
601
602 #else
debug_work_activate(struct work_struct * work)603 static inline void debug_work_activate(struct work_struct *work) { }
debug_work_deactivate(struct work_struct * work)604 static inline void debug_work_deactivate(struct work_struct *work) { }
605 #endif
606
607 /**
608 * worker_pool_assign_id - allocate ID and assign it to @pool
609 * @pool: the pool pointer of interest
610 *
611 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
612 * successfully, -errno on failure.
613 */
worker_pool_assign_id(struct worker_pool * pool)614 static int worker_pool_assign_id(struct worker_pool *pool)
615 {
616 int ret;
617
618 lockdep_assert_held(&wq_pool_mutex);
619
620 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
621 GFP_KERNEL);
622 if (ret >= 0) {
623 pool->id = ret;
624 return 0;
625 }
626 return ret;
627 }
628
work_color_to_flags(int color)629 static unsigned int work_color_to_flags(int color)
630 {
631 return color << WORK_STRUCT_COLOR_SHIFT;
632 }
633
get_work_color(unsigned long work_data)634 static int get_work_color(unsigned long work_data)
635 {
636 return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
637 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
638 }
639
work_next_color(int color)640 static int work_next_color(int color)
641 {
642 return (color + 1) % WORK_NR_COLORS;
643 }
644
645 /*
646 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
647 * contain the pointer to the queued pwq. Once execution starts, the flag
648 * is cleared and the high bits contain OFFQ flags and pool ID.
649 *
650 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
651 * and clear_work_data() can be used to set the pwq, pool or clear
652 * work->data. These functions should only be called while the work is
653 * owned - ie. while the PENDING bit is set.
654 *
655 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
656 * corresponding to a work. Pool is available once the work has been
657 * queued anywhere after initialization until it is sync canceled. pwq is
658 * available only while the work item is queued.
659 *
660 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
661 * canceled. While being canceled, a work item may have its PENDING set
662 * but stay off timer and worklist for arbitrarily long and nobody should
663 * try to steal the PENDING bit.
664 */
set_work_data(struct work_struct * work,unsigned long data,unsigned long flags)665 static inline void set_work_data(struct work_struct *work, unsigned long data,
666 unsigned long flags)
667 {
668 WARN_ON_ONCE(!work_pending(work));
669 atomic_long_set(&work->data, data | flags | work_static(work));
670 }
671
set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long extra_flags)672 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
673 unsigned long extra_flags)
674 {
675 set_work_data(work, (unsigned long)pwq,
676 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
677 }
678
set_work_pool_and_keep_pending(struct work_struct * work,int pool_id)679 static void set_work_pool_and_keep_pending(struct work_struct *work,
680 int pool_id)
681 {
682 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
683 WORK_STRUCT_PENDING);
684 }
685
set_work_pool_and_clear_pending(struct work_struct * work,int pool_id)686 static void set_work_pool_and_clear_pending(struct work_struct *work,
687 int pool_id)
688 {
689 /*
690 * The following wmb is paired with the implied mb in
691 * test_and_set_bit(PENDING) and ensures all updates to @work made
692 * here are visible to and precede any updates by the next PENDING
693 * owner.
694 */
695 smp_wmb();
696 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
697 /*
698 * The following mb guarantees that previous clear of a PENDING bit
699 * will not be reordered with any speculative LOADS or STORES from
700 * work->current_func, which is executed afterwards. This possible
701 * reordering can lead to a missed execution on attempt to queue
702 * the same @work. E.g. consider this case:
703 *
704 * CPU#0 CPU#1
705 * ---------------------------- --------------------------------
706 *
707 * 1 STORE event_indicated
708 * 2 queue_work_on() {
709 * 3 test_and_set_bit(PENDING)
710 * 4 } set_..._and_clear_pending() {
711 * 5 set_work_data() # clear bit
712 * 6 smp_mb()
713 * 7 work->current_func() {
714 * 8 LOAD event_indicated
715 * }
716 *
717 * Without an explicit full barrier speculative LOAD on line 8 can
718 * be executed before CPU#0 does STORE on line 1. If that happens,
719 * CPU#0 observes the PENDING bit is still set and new execution of
720 * a @work is not queued in a hope, that CPU#1 will eventually
721 * finish the queued @work. Meanwhile CPU#1 does not see
722 * event_indicated is set, because speculative LOAD was executed
723 * before actual STORE.
724 */
725 smp_mb();
726 }
727
clear_work_data(struct work_struct * work)728 static void clear_work_data(struct work_struct *work)
729 {
730 smp_wmb(); /* see set_work_pool_and_clear_pending() */
731 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
732 }
733
work_struct_pwq(unsigned long data)734 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
735 {
736 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
737 }
738
get_work_pwq(struct work_struct * work)739 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
740 {
741 unsigned long data = atomic_long_read(&work->data);
742
743 if (data & WORK_STRUCT_PWQ)
744 return work_struct_pwq(data);
745 else
746 return NULL;
747 }
748
749 /**
750 * get_work_pool - return the worker_pool a given work was associated with
751 * @work: the work item of interest
752 *
753 * Pools are created and destroyed under wq_pool_mutex, and allows read
754 * access under RCU read lock. As such, this function should be
755 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
756 *
757 * All fields of the returned pool are accessible as long as the above
758 * mentioned locking is in effect. If the returned pool needs to be used
759 * beyond the critical section, the caller is responsible for ensuring the
760 * returned pool is and stays online.
761 *
762 * Return: The worker_pool @work was last associated with. %NULL if none.
763 */
get_work_pool(struct work_struct * work)764 static struct worker_pool *get_work_pool(struct work_struct *work)
765 {
766 unsigned long data = atomic_long_read(&work->data);
767 int pool_id;
768
769 assert_rcu_or_pool_mutex();
770
771 if (data & WORK_STRUCT_PWQ)
772 return work_struct_pwq(data)->pool;
773
774 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
775 if (pool_id == WORK_OFFQ_POOL_NONE)
776 return NULL;
777
778 return idr_find(&worker_pool_idr, pool_id);
779 }
780
781 /**
782 * get_work_pool_id - return the worker pool ID a given work is associated with
783 * @work: the work item of interest
784 *
785 * Return: The worker_pool ID @work was last associated with.
786 * %WORK_OFFQ_POOL_NONE if none.
787 */
get_work_pool_id(struct work_struct * work)788 static int get_work_pool_id(struct work_struct *work)
789 {
790 unsigned long data = atomic_long_read(&work->data);
791
792 if (data & WORK_STRUCT_PWQ)
793 return work_struct_pwq(data)->pool->id;
794
795 return data >> WORK_OFFQ_POOL_SHIFT;
796 }
797
mark_work_canceling(struct work_struct * work)798 static void mark_work_canceling(struct work_struct *work)
799 {
800 unsigned long pool_id = get_work_pool_id(work);
801
802 pool_id <<= WORK_OFFQ_POOL_SHIFT;
803 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
804 }
805
work_is_canceling(struct work_struct * work)806 static bool work_is_canceling(struct work_struct *work)
807 {
808 unsigned long data = atomic_long_read(&work->data);
809
810 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
811 }
812
813 /*
814 * Policy functions. These define the policies on how the global worker
815 * pools are managed. Unless noted otherwise, these functions assume that
816 * they're being called with pool->lock held.
817 */
818
819 /*
820 * Need to wake up a worker? Called from anything but currently
821 * running workers.
822 *
823 * Note that, because unbound workers never contribute to nr_running, this
824 * function will always return %true for unbound pools as long as the
825 * worklist isn't empty.
826 */
need_more_worker(struct worker_pool * pool)827 static bool need_more_worker(struct worker_pool *pool)
828 {
829 return !list_empty(&pool->worklist) && !pool->nr_running;
830 }
831
832 /* Can I start working? Called from busy but !running workers. */
may_start_working(struct worker_pool * pool)833 static bool may_start_working(struct worker_pool *pool)
834 {
835 return pool->nr_idle;
836 }
837
838 /* Do I need to keep working? Called from currently running workers. */
keep_working(struct worker_pool * pool)839 static bool keep_working(struct worker_pool *pool)
840 {
841 return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
842 }
843
844 /* Do we need a new worker? Called from manager. */
need_to_create_worker(struct worker_pool * pool)845 static bool need_to_create_worker(struct worker_pool *pool)
846 {
847 return need_more_worker(pool) && !may_start_working(pool);
848 }
849
850 /* Do we have too many workers and should some go away? */
too_many_workers(struct worker_pool * pool)851 static bool too_many_workers(struct worker_pool *pool)
852 {
853 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
854 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
855 int nr_busy = pool->nr_workers - nr_idle;
856
857 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
858 }
859
860 /**
861 * worker_set_flags - set worker flags and adjust nr_running accordingly
862 * @worker: self
863 * @flags: flags to set
864 *
865 * Set @flags in @worker->flags and adjust nr_running accordingly.
866 */
worker_set_flags(struct worker * worker,unsigned int flags)867 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
868 {
869 struct worker_pool *pool = worker->pool;
870
871 lockdep_assert_held(&pool->lock);
872
873 /* If transitioning into NOT_RUNNING, adjust nr_running. */
874 if ((flags & WORKER_NOT_RUNNING) &&
875 !(worker->flags & WORKER_NOT_RUNNING)) {
876 pool->nr_running--;
877 }
878
879 worker->flags |= flags;
880 }
881
882 /**
883 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
884 * @worker: self
885 * @flags: flags to clear
886 *
887 * Clear @flags in @worker->flags and adjust nr_running accordingly.
888 */
worker_clr_flags(struct worker * worker,unsigned int flags)889 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
890 {
891 struct worker_pool *pool = worker->pool;
892 unsigned int oflags = worker->flags;
893
894 lockdep_assert_held(&pool->lock);
895
896 worker->flags &= ~flags;
897
898 /*
899 * If transitioning out of NOT_RUNNING, increment nr_running. Note
900 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
901 * of multiple flags, not a single flag.
902 */
903 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
904 if (!(worker->flags & WORKER_NOT_RUNNING))
905 pool->nr_running++;
906 }
907
908 /* Return the first idle worker. Called with pool->lock held. */
first_idle_worker(struct worker_pool * pool)909 static struct worker *first_idle_worker(struct worker_pool *pool)
910 {
911 if (unlikely(list_empty(&pool->idle_list)))
912 return NULL;
913
914 return list_first_entry(&pool->idle_list, struct worker, entry);
915 }
916
917 /**
918 * worker_enter_idle - enter idle state
919 * @worker: worker which is entering idle state
920 *
921 * @worker is entering idle state. Update stats and idle timer if
922 * necessary.
923 *
924 * LOCKING:
925 * raw_spin_lock_irq(pool->lock).
926 */
worker_enter_idle(struct worker * worker)927 static void worker_enter_idle(struct worker *worker)
928 {
929 struct worker_pool *pool = worker->pool;
930
931 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
932 WARN_ON_ONCE(!list_empty(&worker->entry) &&
933 (worker->hentry.next || worker->hentry.pprev)))
934 return;
935
936 /* can't use worker_set_flags(), also called from create_worker() */
937 worker->flags |= WORKER_IDLE;
938 pool->nr_idle++;
939 worker->last_active = jiffies;
940
941 /* idle_list is LIFO */
942 list_add(&worker->entry, &pool->idle_list);
943
944 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
945 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
946
947 /* Sanity check nr_running. */
948 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
949 }
950
951 /**
952 * worker_leave_idle - leave idle state
953 * @worker: worker which is leaving idle state
954 *
955 * @worker is leaving idle state. Update stats.
956 *
957 * LOCKING:
958 * raw_spin_lock_irq(pool->lock).
959 */
worker_leave_idle(struct worker * worker)960 static void worker_leave_idle(struct worker *worker)
961 {
962 struct worker_pool *pool = worker->pool;
963
964 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
965 return;
966 worker_clr_flags(worker, WORKER_IDLE);
967 pool->nr_idle--;
968 list_del_init(&worker->entry);
969 }
970
971 /**
972 * find_worker_executing_work - find worker which is executing a work
973 * @pool: pool of interest
974 * @work: work to find worker for
975 *
976 * Find a worker which is executing @work on @pool by searching
977 * @pool->busy_hash which is keyed by the address of @work. For a worker
978 * to match, its current execution should match the address of @work and
979 * its work function. This is to avoid unwanted dependency between
980 * unrelated work executions through a work item being recycled while still
981 * being executed.
982 *
983 * This is a bit tricky. A work item may be freed once its execution
984 * starts and nothing prevents the freed area from being recycled for
985 * another work item. If the same work item address ends up being reused
986 * before the original execution finishes, workqueue will identify the
987 * recycled work item as currently executing and make it wait until the
988 * current execution finishes, introducing an unwanted dependency.
989 *
990 * This function checks the work item address and work function to avoid
991 * false positives. Note that this isn't complete as one may construct a
992 * work function which can introduce dependency onto itself through a
993 * recycled work item. Well, if somebody wants to shoot oneself in the
994 * foot that badly, there's only so much we can do, and if such deadlock
995 * actually occurs, it should be easy to locate the culprit work function.
996 *
997 * CONTEXT:
998 * raw_spin_lock_irq(pool->lock).
999 *
1000 * Return:
1001 * Pointer to worker which is executing @work if found, %NULL
1002 * otherwise.
1003 */
find_worker_executing_work(struct worker_pool * pool,struct work_struct * work)1004 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1005 struct work_struct *work)
1006 {
1007 struct worker *worker;
1008
1009 hash_for_each_possible(pool->busy_hash, worker, hentry,
1010 (unsigned long)work)
1011 if (worker->current_work == work &&
1012 worker->current_func == work->func)
1013 return worker;
1014
1015 return NULL;
1016 }
1017
1018 /**
1019 * move_linked_works - move linked works to a list
1020 * @work: start of series of works to be scheduled
1021 * @head: target list to append @work to
1022 * @nextp: out parameter for nested worklist walking
1023 *
1024 * Schedule linked works starting from @work to @head. Work series to be
1025 * scheduled starts at @work and includes any consecutive work with
1026 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1027 * @nextp.
1028 *
1029 * CONTEXT:
1030 * raw_spin_lock_irq(pool->lock).
1031 */
move_linked_works(struct work_struct * work,struct list_head * head,struct work_struct ** nextp)1032 static void move_linked_works(struct work_struct *work, struct list_head *head,
1033 struct work_struct **nextp)
1034 {
1035 struct work_struct *n;
1036
1037 /*
1038 * Linked worklist will always end before the end of the list,
1039 * use NULL for list head.
1040 */
1041 list_for_each_entry_safe_from(work, n, NULL, entry) {
1042 list_move_tail(&work->entry, head);
1043 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1044 break;
1045 }
1046
1047 /*
1048 * If we're already inside safe list traversal and have moved
1049 * multiple works to the scheduled queue, the next position
1050 * needs to be updated.
1051 */
1052 if (nextp)
1053 *nextp = n;
1054 }
1055
1056 /**
1057 * assign_work - assign a work item and its linked work items to a worker
1058 * @work: work to assign
1059 * @worker: worker to assign to
1060 * @nextp: out parameter for nested worklist walking
1061 *
1062 * Assign @work and its linked work items to @worker. If @work is already being
1063 * executed by another worker in the same pool, it'll be punted there.
1064 *
1065 * If @nextp is not NULL, it's updated to point to the next work of the last
1066 * scheduled work. This allows assign_work() to be nested inside
1067 * list_for_each_entry_safe().
1068 *
1069 * Returns %true if @work was successfully assigned to @worker. %false if @work
1070 * was punted to another worker already executing it.
1071 */
assign_work(struct work_struct * work,struct worker * worker,struct work_struct ** nextp)1072 static bool assign_work(struct work_struct *work, struct worker *worker,
1073 struct work_struct **nextp)
1074 {
1075 struct worker_pool *pool = worker->pool;
1076 struct worker *collision;
1077
1078 lockdep_assert_held(&pool->lock);
1079
1080 /*
1081 * A single work shouldn't be executed concurrently by multiple workers.
1082 * __queue_work() ensures that @work doesn't jump to a different pool
1083 * while still running in the previous pool. Here, we should ensure that
1084 * @work is not executed concurrently by multiple workers from the same
1085 * pool. Check whether anyone is already processing the work. If so,
1086 * defer the work to the currently executing one.
1087 */
1088 collision = find_worker_executing_work(pool, work);
1089 if (unlikely(collision)) {
1090 move_linked_works(work, &collision->scheduled, nextp);
1091 return false;
1092 }
1093
1094 move_linked_works(work, &worker->scheduled, nextp);
1095 return true;
1096 }
1097
1098 /**
1099 * kick_pool - wake up an idle worker if necessary
1100 * @pool: pool to kick
1101 *
1102 * @pool may have pending work items. Wake up worker if necessary. Returns
1103 * whether a worker was woken up.
1104 */
kick_pool(struct worker_pool * pool)1105 static bool kick_pool(struct worker_pool *pool)
1106 {
1107 struct worker *worker = first_idle_worker(pool);
1108 struct task_struct *p;
1109
1110 lockdep_assert_held(&pool->lock);
1111
1112 if (!need_more_worker(pool) || !worker)
1113 return false;
1114
1115 p = worker->task;
1116
1117 #ifdef CONFIG_SMP
1118 /*
1119 * Idle @worker is about to execute @work and waking up provides an
1120 * opportunity to migrate @worker at a lower cost by setting the task's
1121 * wake_cpu field. Let's see if we want to move @worker to improve
1122 * execution locality.
1123 *
1124 * We're waking the worker that went idle the latest and there's some
1125 * chance that @worker is marked idle but hasn't gone off CPU yet. If
1126 * so, setting the wake_cpu won't do anything. As this is a best-effort
1127 * optimization and the race window is narrow, let's leave as-is for
1128 * now. If this becomes pronounced, we can skip over workers which are
1129 * still on cpu when picking an idle worker.
1130 *
1131 * If @pool has non-strict affinity, @worker might have ended up outside
1132 * its affinity scope. Repatriate.
1133 */
1134 if (!pool->attrs->affn_strict &&
1135 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
1136 struct work_struct *work = list_first_entry(&pool->worklist,
1137 struct work_struct, entry);
1138 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
1139 cpu_online_mask);
1140 if (wake_cpu < nr_cpu_ids) {
1141 p->wake_cpu = wake_cpu;
1142 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
1143 }
1144 }
1145 #endif
1146 wake_up_process(p);
1147 return true;
1148 }
1149
1150 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
1151
1152 /*
1153 * Concurrency-managed per-cpu work items that hog CPU for longer than
1154 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
1155 * which prevents them from stalling other concurrency-managed work items. If a
1156 * work function keeps triggering this mechanism, it's likely that the work item
1157 * should be using an unbound workqueue instead.
1158 *
1159 * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1160 * and report them so that they can be examined and converted to use unbound
1161 * workqueues as appropriate. To avoid flooding the console, each violating work
1162 * function is tracked and reported with exponential backoff.
1163 */
1164 #define WCI_MAX_ENTS 128
1165
1166 struct wci_ent {
1167 work_func_t func;
1168 atomic64_t cnt;
1169 struct hlist_node hash_node;
1170 };
1171
1172 static struct wci_ent wci_ents[WCI_MAX_ENTS];
1173 static int wci_nr_ents;
1174 static DEFINE_RAW_SPINLOCK(wci_lock);
1175 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
1176
wci_find_ent(work_func_t func)1177 static struct wci_ent *wci_find_ent(work_func_t func)
1178 {
1179 struct wci_ent *ent;
1180
1181 hash_for_each_possible_rcu(wci_hash, ent, hash_node,
1182 (unsigned long)func) {
1183 if (ent->func == func)
1184 return ent;
1185 }
1186 return NULL;
1187 }
1188
wq_cpu_intensive_report(work_func_t func)1189 static void wq_cpu_intensive_report(work_func_t func)
1190 {
1191 struct wci_ent *ent;
1192
1193 restart:
1194 ent = wci_find_ent(func);
1195 if (ent) {
1196 u64 cnt;
1197
1198 /*
1199 * Start reporting from the fourth time and back off
1200 * exponentially.
1201 */
1202 cnt = atomic64_inc_return_relaxed(&ent->cnt);
1203 if (cnt >= 4 && is_power_of_2(cnt))
1204 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
1205 ent->func, wq_cpu_intensive_thresh_us,
1206 atomic64_read(&ent->cnt));
1207 return;
1208 }
1209
1210 /*
1211 * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
1212 * is exhausted, something went really wrong and we probably made enough
1213 * noise already.
1214 */
1215 if (wci_nr_ents >= WCI_MAX_ENTS)
1216 return;
1217
1218 raw_spin_lock(&wci_lock);
1219
1220 if (wci_nr_ents >= WCI_MAX_ENTS) {
1221 raw_spin_unlock(&wci_lock);
1222 return;
1223 }
1224
1225 if (wci_find_ent(func)) {
1226 raw_spin_unlock(&wci_lock);
1227 goto restart;
1228 }
1229
1230 ent = &wci_ents[wci_nr_ents++];
1231 ent->func = func;
1232 atomic64_set(&ent->cnt, 1);
1233 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
1234
1235 raw_spin_unlock(&wci_lock);
1236 }
1237
1238 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
wq_cpu_intensive_report(work_func_t func)1239 static void wq_cpu_intensive_report(work_func_t func) {}
1240 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1241
1242 /**
1243 * wq_worker_running - a worker is running again
1244 * @task: task waking up
1245 *
1246 * This function is called when a worker returns from schedule()
1247 */
wq_worker_running(struct task_struct * task)1248 void wq_worker_running(struct task_struct *task)
1249 {
1250 struct worker *worker = kthread_data(task);
1251
1252 if (!READ_ONCE(worker->sleeping))
1253 return;
1254
1255 /*
1256 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1257 * and the nr_running increment below, we may ruin the nr_running reset
1258 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1259 * pool. Protect against such race.
1260 */
1261 preempt_disable();
1262 if (!(worker->flags & WORKER_NOT_RUNNING))
1263 worker->pool->nr_running++;
1264 preempt_enable();
1265
1266 /*
1267 * CPU intensive auto-detection cares about how long a work item hogged
1268 * CPU without sleeping. Reset the starting timestamp on wakeup.
1269 */
1270 worker->current_at = worker->task->se.sum_exec_runtime;
1271
1272 WRITE_ONCE(worker->sleeping, 0);
1273 }
1274
1275 /**
1276 * wq_worker_sleeping - a worker is going to sleep
1277 * @task: task going to sleep
1278 *
1279 * This function is called from schedule() when a busy worker is
1280 * going to sleep.
1281 */
wq_worker_sleeping(struct task_struct * task)1282 void wq_worker_sleeping(struct task_struct *task)
1283 {
1284 struct worker *worker = kthread_data(task);
1285 struct worker_pool *pool;
1286
1287 /*
1288 * Rescuers, which may not have all the fields set up like normal
1289 * workers, also reach here, let's not access anything before
1290 * checking NOT_RUNNING.
1291 */
1292 if (worker->flags & WORKER_NOT_RUNNING)
1293 return;
1294
1295 pool = worker->pool;
1296
1297 /* Return if preempted before wq_worker_running() was reached */
1298 if (READ_ONCE(worker->sleeping))
1299 return;
1300
1301 WRITE_ONCE(worker->sleeping, 1);
1302 raw_spin_lock_irq(&pool->lock);
1303
1304 /*
1305 * Recheck in case unbind_workers() preempted us. We don't
1306 * want to decrement nr_running after the worker is unbound
1307 * and nr_running has been reset.
1308 */
1309 if (worker->flags & WORKER_NOT_RUNNING) {
1310 raw_spin_unlock_irq(&pool->lock);
1311 return;
1312 }
1313
1314 pool->nr_running--;
1315 if (kick_pool(pool))
1316 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1317
1318 raw_spin_unlock_irq(&pool->lock);
1319 }
1320
1321 /**
1322 * wq_worker_tick - a scheduler tick occurred while a kworker is running
1323 * @task: task currently running
1324 *
1325 * Called from scheduler_tick(). We're in the IRQ context and the current
1326 * worker's fields which follow the 'K' locking rule can be accessed safely.
1327 */
wq_worker_tick(struct task_struct * task)1328 void wq_worker_tick(struct task_struct *task)
1329 {
1330 struct worker *worker = kthread_data(task);
1331 struct pool_workqueue *pwq = worker->current_pwq;
1332 struct worker_pool *pool = worker->pool;
1333
1334 if (!pwq)
1335 return;
1336
1337 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
1338
1339 if (!wq_cpu_intensive_thresh_us)
1340 return;
1341
1342 /*
1343 * If the current worker is concurrency managed and hogged the CPU for
1344 * longer than wq_cpu_intensive_thresh_us, it's automatically marked
1345 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
1346 *
1347 * Set @worker->sleeping means that @worker is in the process of
1348 * switching out voluntarily and won't be contributing to
1349 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1350 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
1351 * double decrements. The task is releasing the CPU anyway. Let's skip.
1352 * We probably want to make this prettier in the future.
1353 */
1354 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
1355 worker->task->se.sum_exec_runtime - worker->current_at <
1356 wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
1357 return;
1358
1359 raw_spin_lock(&pool->lock);
1360
1361 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
1362 wq_cpu_intensive_report(worker->current_func);
1363 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
1364
1365 if (kick_pool(pool))
1366 pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1367
1368 raw_spin_unlock(&pool->lock);
1369 }
1370
1371 /**
1372 * wq_worker_last_func - retrieve worker's last work function
1373 * @task: Task to retrieve last work function of.
1374 *
1375 * Determine the last function a worker executed. This is called from
1376 * the scheduler to get a worker's last known identity.
1377 *
1378 * CONTEXT:
1379 * raw_spin_lock_irq(rq->lock)
1380 *
1381 * This function is called during schedule() when a kworker is going
1382 * to sleep. It's used by psi to identify aggregation workers during
1383 * dequeuing, to allow periodic aggregation to shut-off when that
1384 * worker is the last task in the system or cgroup to go to sleep.
1385 *
1386 * As this function doesn't involve any workqueue-related locking, it
1387 * only returns stable values when called from inside the scheduler's
1388 * queuing and dequeuing paths, when @task, which must be a kworker,
1389 * is guaranteed to not be processing any works.
1390 *
1391 * Return:
1392 * The last work function %current executed as a worker, NULL if it
1393 * hasn't executed any work yet.
1394 */
wq_worker_last_func(struct task_struct * task)1395 work_func_t wq_worker_last_func(struct task_struct *task)
1396 {
1397 struct worker *worker = kthread_data(task);
1398
1399 return worker->last_func;
1400 }
1401
1402 /**
1403 * get_pwq - get an extra reference on the specified pool_workqueue
1404 * @pwq: pool_workqueue to get
1405 *
1406 * Obtain an extra reference on @pwq. The caller should guarantee that
1407 * @pwq has positive refcnt and be holding the matching pool->lock.
1408 */
get_pwq(struct pool_workqueue * pwq)1409 static void get_pwq(struct pool_workqueue *pwq)
1410 {
1411 lockdep_assert_held(&pwq->pool->lock);
1412 WARN_ON_ONCE(pwq->refcnt <= 0);
1413 pwq->refcnt++;
1414 }
1415
1416 /**
1417 * put_pwq - put a pool_workqueue reference
1418 * @pwq: pool_workqueue to put
1419 *
1420 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1421 * destruction. The caller should be holding the matching pool->lock.
1422 */
put_pwq(struct pool_workqueue * pwq)1423 static void put_pwq(struct pool_workqueue *pwq)
1424 {
1425 lockdep_assert_held(&pwq->pool->lock);
1426 if (likely(--pwq->refcnt))
1427 return;
1428 /*
1429 * @pwq can't be released under pool->lock, bounce to a dedicated
1430 * kthread_worker to avoid A-A deadlocks.
1431 */
1432 kthread_queue_work(pwq_release_worker, &pwq->release_work);
1433 }
1434
1435 /**
1436 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1437 * @pwq: pool_workqueue to put (can be %NULL)
1438 *
1439 * put_pwq() with locking. This function also allows %NULL @pwq.
1440 */
put_pwq_unlocked(struct pool_workqueue * pwq)1441 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1442 {
1443 if (pwq) {
1444 /*
1445 * As both pwqs and pools are RCU protected, the
1446 * following lock operations are safe.
1447 */
1448 raw_spin_lock_irq(&pwq->pool->lock);
1449 put_pwq(pwq);
1450 raw_spin_unlock_irq(&pwq->pool->lock);
1451 }
1452 }
1453
pwq_activate_inactive_work(struct work_struct * work)1454 static void pwq_activate_inactive_work(struct work_struct *work)
1455 {
1456 struct pool_workqueue *pwq = get_work_pwq(work);
1457
1458 trace_workqueue_activate_work(work);
1459 if (list_empty(&pwq->pool->worklist))
1460 pwq->pool->watchdog_ts = jiffies;
1461 move_linked_works(work, &pwq->pool->worklist, NULL);
1462 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1463 pwq->nr_active++;
1464 }
1465
pwq_activate_first_inactive(struct pool_workqueue * pwq)1466 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1467 {
1468 struct work_struct *work = list_first_entry(&pwq->inactive_works,
1469 struct work_struct, entry);
1470
1471 pwq_activate_inactive_work(work);
1472 }
1473
1474 /**
1475 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1476 * @pwq: pwq of interest
1477 * @work_data: work_data of work which left the queue
1478 *
1479 * A work either has completed or is removed from pending queue,
1480 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1481 *
1482 * CONTEXT:
1483 * raw_spin_lock_irq(pool->lock).
1484 */
pwq_dec_nr_in_flight(struct pool_workqueue * pwq,unsigned long work_data)1485 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1486 {
1487 int color = get_work_color(work_data);
1488
1489 if (!(work_data & WORK_STRUCT_INACTIVE)) {
1490 pwq->nr_active--;
1491 if (!list_empty(&pwq->inactive_works)) {
1492 /* one down, submit an inactive one */
1493 if (pwq->nr_active < pwq->max_active)
1494 pwq_activate_first_inactive(pwq);
1495 }
1496 }
1497
1498 pwq->nr_in_flight[color]--;
1499
1500 /* is flush in progress and are we at the flushing tip? */
1501 if (likely(pwq->flush_color != color))
1502 goto out_put;
1503
1504 /* are there still in-flight works? */
1505 if (pwq->nr_in_flight[color])
1506 goto out_put;
1507
1508 /* this pwq is done, clear flush_color */
1509 pwq->flush_color = -1;
1510
1511 /*
1512 * If this was the last pwq, wake up the first flusher. It
1513 * will handle the rest.
1514 */
1515 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1516 complete(&pwq->wq->first_flusher->done);
1517 out_put:
1518 put_pwq(pwq);
1519 }
1520
1521 /**
1522 * try_to_grab_pending - steal work item from worklist and disable irq
1523 * @work: work item to steal
1524 * @is_dwork: @work is a delayed_work
1525 * @flags: place to store irq state
1526 *
1527 * Try to grab PENDING bit of @work. This function can handle @work in any
1528 * stable state - idle, on timer or on worklist.
1529 *
1530 * Return:
1531 *
1532 * ======== ================================================================
1533 * 1 if @work was pending and we successfully stole PENDING
1534 * 0 if @work was idle and we claimed PENDING
1535 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1536 * -ENOENT if someone else is canceling @work, this state may persist
1537 * for arbitrarily long
1538 * ======== ================================================================
1539 *
1540 * Note:
1541 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1542 * interrupted while holding PENDING and @work off queue, irq must be
1543 * disabled on entry. This, combined with delayed_work->timer being
1544 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1545 *
1546 * On successful return, >= 0, irq is disabled and the caller is
1547 * responsible for releasing it using local_irq_restore(*@flags).
1548 *
1549 * This function is safe to call from any context including IRQ handler.
1550 */
try_to_grab_pending(struct work_struct * work,bool is_dwork,unsigned long * flags)1551 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1552 unsigned long *flags)
1553 {
1554 struct worker_pool *pool;
1555 struct pool_workqueue *pwq;
1556
1557 local_irq_save(*flags);
1558
1559 /* try to steal the timer if it exists */
1560 if (is_dwork) {
1561 struct delayed_work *dwork = to_delayed_work(work);
1562
1563 /*
1564 * dwork->timer is irqsafe. If del_timer() fails, it's
1565 * guaranteed that the timer is not queued anywhere and not
1566 * running on the local CPU.
1567 */
1568 if (likely(del_timer(&dwork->timer)))
1569 return 1;
1570 }
1571
1572 /* try to claim PENDING the normal way */
1573 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1574 return 0;
1575
1576 rcu_read_lock();
1577 /*
1578 * The queueing is in progress, or it is already queued. Try to
1579 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1580 */
1581 pool = get_work_pool(work);
1582 if (!pool)
1583 goto fail;
1584
1585 raw_spin_lock(&pool->lock);
1586 /*
1587 * work->data is guaranteed to point to pwq only while the work
1588 * item is queued on pwq->wq, and both updating work->data to point
1589 * to pwq on queueing and to pool on dequeueing are done under
1590 * pwq->pool->lock. This in turn guarantees that, if work->data
1591 * points to pwq which is associated with a locked pool, the work
1592 * item is currently queued on that pool.
1593 */
1594 pwq = get_work_pwq(work);
1595 if (pwq && pwq->pool == pool) {
1596 debug_work_deactivate(work);
1597
1598 /*
1599 * A cancelable inactive work item must be in the
1600 * pwq->inactive_works since a queued barrier can't be
1601 * canceled (see the comments in insert_wq_barrier()).
1602 *
1603 * An inactive work item cannot be grabbed directly because
1604 * it might have linked barrier work items which, if left
1605 * on the inactive_works list, will confuse pwq->nr_active
1606 * management later on and cause stall. Make sure the work
1607 * item is activated before grabbing.
1608 */
1609 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1610 pwq_activate_inactive_work(work);
1611
1612 list_del_init(&work->entry);
1613 pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
1614
1615 /* work->data points to pwq iff queued, point to pool */
1616 set_work_pool_and_keep_pending(work, pool->id);
1617
1618 raw_spin_unlock(&pool->lock);
1619 rcu_read_unlock();
1620 return 1;
1621 }
1622 raw_spin_unlock(&pool->lock);
1623 fail:
1624 rcu_read_unlock();
1625 local_irq_restore(*flags);
1626 if (work_is_canceling(work))
1627 return -ENOENT;
1628 cpu_relax();
1629 return -EAGAIN;
1630 }
1631
1632 /**
1633 * insert_work - insert a work into a pool
1634 * @pwq: pwq @work belongs to
1635 * @work: work to insert
1636 * @head: insertion point
1637 * @extra_flags: extra WORK_STRUCT_* flags to set
1638 *
1639 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1640 * work_struct flags.
1641 *
1642 * CONTEXT:
1643 * raw_spin_lock_irq(pool->lock).
1644 */
insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags)1645 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1646 struct list_head *head, unsigned int extra_flags)
1647 {
1648 debug_work_activate(work);
1649
1650 /* record the work call stack in order to print it in KASAN reports */
1651 kasan_record_aux_stack_noalloc(work);
1652
1653 /* we own @work, set data and link */
1654 set_work_pwq(work, pwq, extra_flags);
1655 list_add_tail(&work->entry, head);
1656 get_pwq(pwq);
1657 }
1658
1659 /*
1660 * Test whether @work is being queued from another work executing on the
1661 * same workqueue.
1662 */
is_chained_work(struct workqueue_struct * wq)1663 static bool is_chained_work(struct workqueue_struct *wq)
1664 {
1665 struct worker *worker;
1666
1667 worker = current_wq_worker();
1668 /*
1669 * Return %true iff I'm a worker executing a work item on @wq. If
1670 * I'm @worker, it's safe to dereference it without locking.
1671 */
1672 return worker && worker->current_pwq->wq == wq;
1673 }
1674
1675 /*
1676 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1677 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1678 * avoid perturbing sensitive tasks.
1679 */
wq_select_unbound_cpu(int cpu)1680 static int wq_select_unbound_cpu(int cpu)
1681 {
1682 int new_cpu;
1683
1684 if (likely(!wq_debug_force_rr_cpu)) {
1685 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1686 return cpu;
1687 } else {
1688 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
1689 }
1690
1691 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1692 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1693 if (unlikely(new_cpu >= nr_cpu_ids)) {
1694 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1695 if (unlikely(new_cpu >= nr_cpu_ids))
1696 return cpu;
1697 }
1698 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1699
1700 return new_cpu;
1701 }
1702
__queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work)1703 static void __queue_work(int cpu, struct workqueue_struct *wq,
1704 struct work_struct *work)
1705 {
1706 struct pool_workqueue *pwq;
1707 struct worker_pool *last_pool, *pool;
1708 unsigned int work_flags;
1709 unsigned int req_cpu = cpu;
1710
1711 /*
1712 * While a work item is PENDING && off queue, a task trying to
1713 * steal the PENDING will busy-loop waiting for it to either get
1714 * queued or lose PENDING. Grabbing PENDING and queueing should
1715 * happen with IRQ disabled.
1716 */
1717 lockdep_assert_irqs_disabled();
1718
1719
1720 /*
1721 * For a draining wq, only works from the same workqueue are
1722 * allowed. The __WQ_DESTROYING helps to spot the issue that
1723 * queues a new work item to a wq after destroy_workqueue(wq).
1724 */
1725 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
1726 WARN_ON_ONCE(!is_chained_work(wq))))
1727 return;
1728 rcu_read_lock();
1729 retry:
1730 /* pwq which will be used unless @work is executing elsewhere */
1731 if (req_cpu == WORK_CPU_UNBOUND) {
1732 if (wq->flags & WQ_UNBOUND)
1733 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1734 else
1735 cpu = raw_smp_processor_id();
1736 }
1737
1738 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
1739 pool = pwq->pool;
1740
1741 /*
1742 * If @work was previously on a different pool, it might still be
1743 * running there, in which case the work needs to be queued on that
1744 * pool to guarantee non-reentrancy.
1745 */
1746 last_pool = get_work_pool(work);
1747 if (last_pool && last_pool != pool) {
1748 struct worker *worker;
1749
1750 raw_spin_lock(&last_pool->lock);
1751
1752 worker = find_worker_executing_work(last_pool, work);
1753
1754 if (worker && worker->current_pwq->wq == wq) {
1755 pwq = worker->current_pwq;
1756 pool = pwq->pool;
1757 WARN_ON_ONCE(pool != last_pool);
1758 } else {
1759 /* meh... not running there, queue here */
1760 raw_spin_unlock(&last_pool->lock);
1761 raw_spin_lock(&pool->lock);
1762 }
1763 } else {
1764 raw_spin_lock(&pool->lock);
1765 }
1766
1767 /*
1768 * pwq is determined and locked. For unbound pools, we could have raced
1769 * with pwq release and it could already be dead. If its refcnt is zero,
1770 * repeat pwq selection. Note that unbound pwqs never die without
1771 * another pwq replacing it in cpu_pwq or while work items are executing
1772 * on it, so the retrying is guaranteed to make forward-progress.
1773 */
1774 if (unlikely(!pwq->refcnt)) {
1775 if (wq->flags & WQ_UNBOUND) {
1776 raw_spin_unlock(&pool->lock);
1777 cpu_relax();
1778 goto retry;
1779 }
1780 /* oops */
1781 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1782 wq->name, cpu);
1783 }
1784
1785 /* pwq determined, queue */
1786 trace_workqueue_queue_work(req_cpu, pwq, work);
1787
1788 if (WARN_ON(!list_empty(&work->entry)))
1789 goto out;
1790
1791 pwq->nr_in_flight[pwq->work_color]++;
1792 work_flags = work_color_to_flags(pwq->work_color);
1793
1794 if (likely(pwq->nr_active < pwq->max_active)) {
1795 if (list_empty(&pool->worklist))
1796 pool->watchdog_ts = jiffies;
1797
1798 trace_workqueue_activate_work(work);
1799 pwq->nr_active++;
1800 insert_work(pwq, work, &pool->worklist, work_flags);
1801 kick_pool(pool);
1802 } else {
1803 work_flags |= WORK_STRUCT_INACTIVE;
1804 insert_work(pwq, work, &pwq->inactive_works, work_flags);
1805 }
1806
1807 out:
1808 raw_spin_unlock(&pool->lock);
1809 rcu_read_unlock();
1810 }
1811
1812 /**
1813 * queue_work_on - queue work on specific cpu
1814 * @cpu: CPU number to execute work on
1815 * @wq: workqueue to use
1816 * @work: work to queue
1817 *
1818 * We queue the work to a specific CPU, the caller must ensure it
1819 * can't go away. Callers that fail to ensure that the specified
1820 * CPU cannot go away will execute on a randomly chosen CPU.
1821 * But note well that callers specifying a CPU that never has been
1822 * online will get a splat.
1823 *
1824 * Return: %false if @work was already on a queue, %true otherwise.
1825 */
queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work)1826 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1827 struct work_struct *work)
1828 {
1829 bool ret = false;
1830 unsigned long flags;
1831
1832 local_irq_save(flags);
1833
1834 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1835 __queue_work(cpu, wq, work);
1836 ret = true;
1837 }
1838
1839 local_irq_restore(flags);
1840 return ret;
1841 }
1842 EXPORT_SYMBOL(queue_work_on);
1843
1844 /**
1845 * select_numa_node_cpu - Select a CPU based on NUMA node
1846 * @node: NUMA node ID that we want to select a CPU from
1847 *
1848 * This function will attempt to find a "random" cpu available on a given
1849 * node. If there are no CPUs available on the given node it will return
1850 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1851 * available CPU if we need to schedule this work.
1852 */
select_numa_node_cpu(int node)1853 static int select_numa_node_cpu(int node)
1854 {
1855 int cpu;
1856
1857 /* Delay binding to CPU if node is not valid or online */
1858 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1859 return WORK_CPU_UNBOUND;
1860
1861 /* Use local node/cpu if we are already there */
1862 cpu = raw_smp_processor_id();
1863 if (node == cpu_to_node(cpu))
1864 return cpu;
1865
1866 /* Use "random" otherwise know as "first" online CPU of node */
1867 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1868
1869 /* If CPU is valid return that, otherwise just defer */
1870 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1871 }
1872
1873 /**
1874 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1875 * @node: NUMA node that we are targeting the work for
1876 * @wq: workqueue to use
1877 * @work: work to queue
1878 *
1879 * We queue the work to a "random" CPU within a given NUMA node. The basic
1880 * idea here is to provide a way to somehow associate work with a given
1881 * NUMA node.
1882 *
1883 * This function will only make a best effort attempt at getting this onto
1884 * the right NUMA node. If no node is requested or the requested node is
1885 * offline then we just fall back to standard queue_work behavior.
1886 *
1887 * Currently the "random" CPU ends up being the first available CPU in the
1888 * intersection of cpu_online_mask and the cpumask of the node, unless we
1889 * are running on the node. In that case we just use the current CPU.
1890 *
1891 * Return: %false if @work was already on a queue, %true otherwise.
1892 */
queue_work_node(int node,struct workqueue_struct * wq,struct work_struct * work)1893 bool queue_work_node(int node, struct workqueue_struct *wq,
1894 struct work_struct *work)
1895 {
1896 unsigned long flags;
1897 bool ret = false;
1898
1899 /*
1900 * This current implementation is specific to unbound workqueues.
1901 * Specifically we only return the first available CPU for a given
1902 * node instead of cycling through individual CPUs within the node.
1903 *
1904 * If this is used with a per-cpu workqueue then the logic in
1905 * workqueue_select_cpu_near would need to be updated to allow for
1906 * some round robin type logic.
1907 */
1908 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1909
1910 local_irq_save(flags);
1911
1912 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1913 int cpu = select_numa_node_cpu(node);
1914
1915 __queue_work(cpu, wq, work);
1916 ret = true;
1917 }
1918
1919 local_irq_restore(flags);
1920 return ret;
1921 }
1922 EXPORT_SYMBOL_GPL(queue_work_node);
1923
delayed_work_timer_fn(struct timer_list * t)1924 void delayed_work_timer_fn(struct timer_list *t)
1925 {
1926 struct delayed_work *dwork = from_timer(dwork, t, timer);
1927
1928 /* should have been called from irqsafe timer with irq already off */
1929 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1930 }
1931 EXPORT_SYMBOL(delayed_work_timer_fn);
1932
__queue_delayed_work(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1933 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1934 struct delayed_work *dwork, unsigned long delay)
1935 {
1936 struct timer_list *timer = &dwork->timer;
1937 struct work_struct *work = &dwork->work;
1938
1939 WARN_ON_ONCE(!wq);
1940 WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1941 WARN_ON_ONCE(timer_pending(timer));
1942 WARN_ON_ONCE(!list_empty(&work->entry));
1943
1944 /*
1945 * If @delay is 0, queue @dwork->work immediately. This is for
1946 * both optimization and correctness. The earliest @timer can
1947 * expire is on the closest next tick and delayed_work users depend
1948 * on that there's no such delay when @delay is 0.
1949 */
1950 if (!delay) {
1951 __queue_work(cpu, wq, &dwork->work);
1952 return;
1953 }
1954
1955 dwork->wq = wq;
1956 dwork->cpu = cpu;
1957 timer->expires = jiffies + delay;
1958
1959 if (unlikely(cpu != WORK_CPU_UNBOUND))
1960 add_timer_on(timer, cpu);
1961 else
1962 add_timer(timer);
1963 }
1964
1965 /**
1966 * queue_delayed_work_on - queue work on specific CPU after delay
1967 * @cpu: CPU number to execute work on
1968 * @wq: workqueue to use
1969 * @dwork: work to queue
1970 * @delay: number of jiffies to wait before queueing
1971 *
1972 * Return: %false if @work was already on a queue, %true otherwise. If
1973 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1974 * execution.
1975 */
queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1976 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1977 struct delayed_work *dwork, unsigned long delay)
1978 {
1979 struct work_struct *work = &dwork->work;
1980 bool ret = false;
1981 unsigned long flags;
1982
1983 /* read the comment in __queue_work() */
1984 local_irq_save(flags);
1985
1986 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1987 __queue_delayed_work(cpu, wq, dwork, delay);
1988 ret = true;
1989 }
1990
1991 local_irq_restore(flags);
1992 return ret;
1993 }
1994 EXPORT_SYMBOL(queue_delayed_work_on);
1995
1996 /**
1997 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1998 * @cpu: CPU number to execute work on
1999 * @wq: workqueue to use
2000 * @dwork: work to queue
2001 * @delay: number of jiffies to wait before queueing
2002 *
2003 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2004 * modify @dwork's timer so that it expires after @delay. If @delay is
2005 * zero, @work is guaranteed to be scheduled immediately regardless of its
2006 * current state.
2007 *
2008 * Return: %false if @dwork was idle and queued, %true if @dwork was
2009 * pending and its timer was modified.
2010 *
2011 * This function is safe to call from any context including IRQ handler.
2012 * See try_to_grab_pending() for details.
2013 */
mod_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)2014 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2015 struct delayed_work *dwork, unsigned long delay)
2016 {
2017 unsigned long flags;
2018 int ret;
2019
2020 do {
2021 ret = try_to_grab_pending(&dwork->work, true, &flags);
2022 } while (unlikely(ret == -EAGAIN));
2023
2024 if (likely(ret >= 0)) {
2025 __queue_delayed_work(cpu, wq, dwork, delay);
2026 local_irq_restore(flags);
2027 }
2028
2029 /* -ENOENT from try_to_grab_pending() becomes %true */
2030 return ret;
2031 }
2032 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2033
rcu_work_rcufn(struct rcu_head * rcu)2034 static void rcu_work_rcufn(struct rcu_head *rcu)
2035 {
2036 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2037
2038 /* read the comment in __queue_work() */
2039 local_irq_disable();
2040 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2041 local_irq_enable();
2042 }
2043
2044 /**
2045 * queue_rcu_work - queue work after a RCU grace period
2046 * @wq: workqueue to use
2047 * @rwork: work to queue
2048 *
2049 * Return: %false if @rwork was already pending, %true otherwise. Note
2050 * that a full RCU grace period is guaranteed only after a %true return.
2051 * While @rwork is guaranteed to be executed after a %false return, the
2052 * execution may happen before a full RCU grace period has passed.
2053 */
queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork)2054 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2055 {
2056 struct work_struct *work = &rwork->work;
2057
2058 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2059 rwork->wq = wq;
2060 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
2061 return true;
2062 }
2063
2064 return false;
2065 }
2066 EXPORT_SYMBOL(queue_rcu_work);
2067
alloc_worker(int node)2068 static struct worker *alloc_worker(int node)
2069 {
2070 struct worker *worker;
2071
2072 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
2073 if (worker) {
2074 INIT_LIST_HEAD(&worker->entry);
2075 INIT_LIST_HEAD(&worker->scheduled);
2076 INIT_LIST_HEAD(&worker->node);
2077 /* on creation a worker is in !idle && prep state */
2078 worker->flags = WORKER_PREP;
2079 }
2080 return worker;
2081 }
2082
pool_allowed_cpus(struct worker_pool * pool)2083 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
2084 {
2085 if (pool->cpu < 0 && pool->attrs->affn_strict)
2086 return pool->attrs->__pod_cpumask;
2087 else
2088 return pool->attrs->cpumask;
2089 }
2090
2091 /**
2092 * worker_attach_to_pool() - attach a worker to a pool
2093 * @worker: worker to be attached
2094 * @pool: the target pool
2095 *
2096 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
2097 * cpu-binding of @worker are kept coordinated with the pool across
2098 * cpu-[un]hotplugs.
2099 */
worker_attach_to_pool(struct worker * worker,struct worker_pool * pool)2100 static void worker_attach_to_pool(struct worker *worker,
2101 struct worker_pool *pool)
2102 {
2103 mutex_lock(&wq_pool_attach_mutex);
2104
2105 /*
2106 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
2107 * stable across this function. See the comments above the flag
2108 * definition for details.
2109 */
2110 if (pool->flags & POOL_DISASSOCIATED)
2111 worker->flags |= WORKER_UNBOUND;
2112 else
2113 kthread_set_per_cpu(worker->task, pool->cpu);
2114
2115 if (worker->rescue_wq)
2116 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
2117
2118 list_add_tail(&worker->node, &pool->workers);
2119 worker->pool = pool;
2120
2121 mutex_unlock(&wq_pool_attach_mutex);
2122 }
2123
2124 /**
2125 * worker_detach_from_pool() - detach a worker from its pool
2126 * @worker: worker which is attached to its pool
2127 *
2128 * Undo the attaching which had been done in worker_attach_to_pool(). The
2129 * caller worker shouldn't access to the pool after detached except it has
2130 * other reference to the pool.
2131 */
worker_detach_from_pool(struct worker * worker)2132 static void worker_detach_from_pool(struct worker *worker)
2133 {
2134 struct worker_pool *pool = worker->pool;
2135 struct completion *detach_completion = NULL;
2136
2137 mutex_lock(&wq_pool_attach_mutex);
2138
2139 kthread_set_per_cpu(worker->task, -1);
2140 list_del(&worker->node);
2141 worker->pool = NULL;
2142
2143 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
2144 detach_completion = pool->detach_completion;
2145 mutex_unlock(&wq_pool_attach_mutex);
2146
2147 /* clear leftover flags without pool->lock after it is detached */
2148 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2149
2150 if (detach_completion)
2151 complete(detach_completion);
2152 }
2153
2154 /**
2155 * create_worker - create a new workqueue worker
2156 * @pool: pool the new worker will belong to
2157 *
2158 * Create and start a new worker which is attached to @pool.
2159 *
2160 * CONTEXT:
2161 * Might sleep. Does GFP_KERNEL allocations.
2162 *
2163 * Return:
2164 * Pointer to the newly created worker.
2165 */
create_worker(struct worker_pool * pool)2166 static struct worker *create_worker(struct worker_pool *pool)
2167 {
2168 struct worker *worker;
2169 int id;
2170 char id_buf[23];
2171
2172 /* ID is needed to determine kthread name */
2173 id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
2174 if (id < 0) {
2175 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
2176 ERR_PTR(id));
2177 return NULL;
2178 }
2179
2180 worker = alloc_worker(pool->node);
2181 if (!worker) {
2182 pr_err_once("workqueue: Failed to allocate a worker\n");
2183 goto fail;
2184 }
2185
2186 worker->id = id;
2187
2188 if (pool->cpu >= 0)
2189 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
2190 pool->attrs->nice < 0 ? "H" : "");
2191 else
2192 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
2193
2194 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
2195 "kworker/%s", id_buf);
2196 if (IS_ERR(worker->task)) {
2197 if (PTR_ERR(worker->task) == -EINTR) {
2198 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
2199 id_buf);
2200 } else {
2201 pr_err_once("workqueue: Failed to create a worker thread: %pe",
2202 worker->task);
2203 }
2204 goto fail;
2205 }
2206
2207 set_user_nice(worker->task, pool->attrs->nice);
2208 kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
2209
2210 /* successful, attach the worker to the pool */
2211 worker_attach_to_pool(worker, pool);
2212
2213 /* start the newly created worker */
2214 raw_spin_lock_irq(&pool->lock);
2215
2216 worker->pool->nr_workers++;
2217 worker_enter_idle(worker);
2218 kick_pool(pool);
2219
2220 /*
2221 * @worker is waiting on a completion in kthread() and will trigger hung
2222 * check if not woken up soon. As kick_pool() might not have waken it
2223 * up, wake it up explicitly once more.
2224 */
2225 wake_up_process(worker->task);
2226
2227 raw_spin_unlock_irq(&pool->lock);
2228
2229 return worker;
2230
2231 fail:
2232 ida_free(&pool->worker_ida, id);
2233 kfree(worker);
2234 return NULL;
2235 }
2236
unbind_worker(struct worker * worker)2237 static void unbind_worker(struct worker *worker)
2238 {
2239 lockdep_assert_held(&wq_pool_attach_mutex);
2240
2241 kthread_set_per_cpu(worker->task, -1);
2242 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
2243 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
2244 else
2245 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2246 }
2247
wake_dying_workers(struct list_head * cull_list)2248 static void wake_dying_workers(struct list_head *cull_list)
2249 {
2250 struct worker *worker, *tmp;
2251
2252 list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2253 list_del_init(&worker->entry);
2254 unbind_worker(worker);
2255 /*
2256 * If the worker was somehow already running, then it had to be
2257 * in pool->idle_list when set_worker_dying() happened or we
2258 * wouldn't have gotten here.
2259 *
2260 * Thus, the worker must either have observed the WORKER_DIE
2261 * flag, or have set its state to TASK_IDLE. Either way, the
2262 * below will be observed by the worker and is safe to do
2263 * outside of pool->lock.
2264 */
2265 wake_up_process(worker->task);
2266 }
2267 }
2268
2269 /**
2270 * set_worker_dying - Tag a worker for destruction
2271 * @worker: worker to be destroyed
2272 * @list: transfer worker away from its pool->idle_list and into list
2273 *
2274 * Tag @worker for destruction and adjust @pool stats accordingly. The worker
2275 * should be idle.
2276 *
2277 * CONTEXT:
2278 * raw_spin_lock_irq(pool->lock).
2279 */
set_worker_dying(struct worker * worker,struct list_head * list)2280 static void set_worker_dying(struct worker *worker, struct list_head *list)
2281 {
2282 struct worker_pool *pool = worker->pool;
2283
2284 lockdep_assert_held(&pool->lock);
2285 lockdep_assert_held(&wq_pool_attach_mutex);
2286
2287 /* sanity check frenzy */
2288 if (WARN_ON(worker->current_work) ||
2289 WARN_ON(!list_empty(&worker->scheduled)) ||
2290 WARN_ON(!(worker->flags & WORKER_IDLE)))
2291 return;
2292
2293 pool->nr_workers--;
2294 pool->nr_idle--;
2295
2296 worker->flags |= WORKER_DIE;
2297
2298 list_move(&worker->entry, list);
2299 list_move(&worker->node, &pool->dying_workers);
2300 }
2301
2302 /**
2303 * idle_worker_timeout - check if some idle workers can now be deleted.
2304 * @t: The pool's idle_timer that just expired
2305 *
2306 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2307 * worker_leave_idle(), as a worker flicking between idle and active while its
2308 * pool is at the too_many_workers() tipping point would cause too much timer
2309 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2310 * it expire and re-evaluate things from there.
2311 */
idle_worker_timeout(struct timer_list * t)2312 static void idle_worker_timeout(struct timer_list *t)
2313 {
2314 struct worker_pool *pool = from_timer(pool, t, idle_timer);
2315 bool do_cull = false;
2316
2317 if (work_pending(&pool->idle_cull_work))
2318 return;
2319
2320 raw_spin_lock_irq(&pool->lock);
2321
2322 if (too_many_workers(pool)) {
2323 struct worker *worker;
2324 unsigned long expires;
2325
2326 /* idle_list is kept in LIFO order, check the last one */
2327 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2328 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2329 do_cull = !time_before(jiffies, expires);
2330
2331 if (!do_cull)
2332 mod_timer(&pool->idle_timer, expires);
2333 }
2334 raw_spin_unlock_irq(&pool->lock);
2335
2336 if (do_cull)
2337 queue_work(system_unbound_wq, &pool->idle_cull_work);
2338 }
2339
2340 /**
2341 * idle_cull_fn - cull workers that have been idle for too long.
2342 * @work: the pool's work for handling these idle workers
2343 *
2344 * This goes through a pool's idle workers and gets rid of those that have been
2345 * idle for at least IDLE_WORKER_TIMEOUT seconds.
2346 *
2347 * We don't want to disturb isolated CPUs because of a pcpu kworker being
2348 * culled, so this also resets worker affinity. This requires a sleepable
2349 * context, hence the split between timer callback and work item.
2350 */
idle_cull_fn(struct work_struct * work)2351 static void idle_cull_fn(struct work_struct *work)
2352 {
2353 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
2354 LIST_HEAD(cull_list);
2355
2356 /*
2357 * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2358 * cannot proceed beyong worker_detach_from_pool() in its self-destruct
2359 * path. This is required as a previously-preempted worker could run after
2360 * set_worker_dying() has happened but before wake_dying_workers() did.
2361 */
2362 mutex_lock(&wq_pool_attach_mutex);
2363 raw_spin_lock_irq(&pool->lock);
2364
2365 while (too_many_workers(pool)) {
2366 struct worker *worker;
2367 unsigned long expires;
2368
2369 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2370 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2371
2372 if (time_before(jiffies, expires)) {
2373 mod_timer(&pool->idle_timer, expires);
2374 break;
2375 }
2376
2377 set_worker_dying(worker, &cull_list);
2378 }
2379
2380 raw_spin_unlock_irq(&pool->lock);
2381 wake_dying_workers(&cull_list);
2382 mutex_unlock(&wq_pool_attach_mutex);
2383 }
2384
send_mayday(struct work_struct * work)2385 static void send_mayday(struct work_struct *work)
2386 {
2387 struct pool_workqueue *pwq = get_work_pwq(work);
2388 struct workqueue_struct *wq = pwq->wq;
2389
2390 lockdep_assert_held(&wq_mayday_lock);
2391
2392 if (!wq->rescuer)
2393 return;
2394
2395 /* mayday mayday mayday */
2396 if (list_empty(&pwq->mayday_node)) {
2397 /*
2398 * If @pwq is for an unbound wq, its base ref may be put at
2399 * any time due to an attribute change. Pin @pwq until the
2400 * rescuer is done with it.
2401 */
2402 get_pwq(pwq);
2403 list_add_tail(&pwq->mayday_node, &wq->maydays);
2404 wake_up_process(wq->rescuer->task);
2405 pwq->stats[PWQ_STAT_MAYDAY]++;
2406 }
2407 }
2408
pool_mayday_timeout(struct timer_list * t)2409 static void pool_mayday_timeout(struct timer_list *t)
2410 {
2411 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2412 struct work_struct *work;
2413
2414 raw_spin_lock_irq(&pool->lock);
2415 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
2416
2417 if (need_to_create_worker(pool)) {
2418 /*
2419 * We've been trying to create a new worker but
2420 * haven't been successful. We might be hitting an
2421 * allocation deadlock. Send distress signals to
2422 * rescuers.
2423 */
2424 list_for_each_entry(work, &pool->worklist, entry)
2425 send_mayday(work);
2426 }
2427
2428 raw_spin_unlock(&wq_mayday_lock);
2429 raw_spin_unlock_irq(&pool->lock);
2430
2431 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2432 }
2433
2434 /**
2435 * maybe_create_worker - create a new worker if necessary
2436 * @pool: pool to create a new worker for
2437 *
2438 * Create a new worker for @pool if necessary. @pool is guaranteed to
2439 * have at least one idle worker on return from this function. If
2440 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2441 * sent to all rescuers with works scheduled on @pool to resolve
2442 * possible allocation deadlock.
2443 *
2444 * On return, need_to_create_worker() is guaranteed to be %false and
2445 * may_start_working() %true.
2446 *
2447 * LOCKING:
2448 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2449 * multiple times. Does GFP_KERNEL allocations. Called only from
2450 * manager.
2451 */
maybe_create_worker(struct worker_pool * pool)2452 static void maybe_create_worker(struct worker_pool *pool)
2453 __releases(&pool->lock)
2454 __acquires(&pool->lock)
2455 {
2456 restart:
2457 raw_spin_unlock_irq(&pool->lock);
2458
2459 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2460 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2461
2462 while (true) {
2463 if (create_worker(pool) || !need_to_create_worker(pool))
2464 break;
2465
2466 schedule_timeout_interruptible(CREATE_COOLDOWN);
2467
2468 if (!need_to_create_worker(pool))
2469 break;
2470 }
2471
2472 del_timer_sync(&pool->mayday_timer);
2473 raw_spin_lock_irq(&pool->lock);
2474 /*
2475 * This is necessary even after a new worker was just successfully
2476 * created as @pool->lock was dropped and the new worker might have
2477 * already become busy.
2478 */
2479 if (need_to_create_worker(pool))
2480 goto restart;
2481 }
2482
2483 /**
2484 * manage_workers - manage worker pool
2485 * @worker: self
2486 *
2487 * Assume the manager role and manage the worker pool @worker belongs
2488 * to. At any given time, there can be only zero or one manager per
2489 * pool. The exclusion is handled automatically by this function.
2490 *
2491 * The caller can safely start processing works on false return. On
2492 * true return, it's guaranteed that need_to_create_worker() is false
2493 * and may_start_working() is true.
2494 *
2495 * CONTEXT:
2496 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2497 * multiple times. Does GFP_KERNEL allocations.
2498 *
2499 * Return:
2500 * %false if the pool doesn't need management and the caller can safely
2501 * start processing works, %true if management function was performed and
2502 * the conditions that the caller verified before calling the function may
2503 * no longer be true.
2504 */
manage_workers(struct worker * worker)2505 static bool manage_workers(struct worker *worker)
2506 {
2507 struct worker_pool *pool = worker->pool;
2508
2509 if (pool->flags & POOL_MANAGER_ACTIVE)
2510 return false;
2511
2512 pool->flags |= POOL_MANAGER_ACTIVE;
2513 pool->manager = worker;
2514
2515 maybe_create_worker(pool);
2516
2517 pool->manager = NULL;
2518 pool->flags &= ~POOL_MANAGER_ACTIVE;
2519 rcuwait_wake_up(&manager_wait);
2520 return true;
2521 }
2522
2523 /**
2524 * process_one_work - process single work
2525 * @worker: self
2526 * @work: work to process
2527 *
2528 * Process @work. This function contains all the logics necessary to
2529 * process a single work including synchronization against and
2530 * interaction with other workers on the same cpu, queueing and
2531 * flushing. As long as context requirement is met, any worker can
2532 * call this function to process a work.
2533 *
2534 * CONTEXT:
2535 * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2536 */
process_one_work(struct worker * worker,struct work_struct * work)2537 static void process_one_work(struct worker *worker, struct work_struct *work)
2538 __releases(&pool->lock)
2539 __acquires(&pool->lock)
2540 {
2541 struct pool_workqueue *pwq = get_work_pwq(work);
2542 struct worker_pool *pool = worker->pool;
2543 unsigned long work_data;
2544 int lockdep_start_depth, rcu_start_depth;
2545 #ifdef CONFIG_LOCKDEP
2546 /*
2547 * It is permissible to free the struct work_struct from
2548 * inside the function that is called from it, this we need to
2549 * take into account for lockdep too. To avoid bogus "held
2550 * lock freed" warnings as well as problems when looking into
2551 * work->lockdep_map, make a copy and use that here.
2552 */
2553 struct lockdep_map lockdep_map;
2554
2555 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2556 #endif
2557 /* ensure we're on the correct CPU */
2558 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2559 raw_smp_processor_id() != pool->cpu);
2560
2561 /* claim and dequeue */
2562 debug_work_deactivate(work);
2563 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2564 worker->current_work = work;
2565 worker->current_func = work->func;
2566 worker->current_pwq = pwq;
2567 worker->current_at = worker->task->se.sum_exec_runtime;
2568 work_data = *work_data_bits(work);
2569 worker->current_color = get_work_color(work_data);
2570
2571 /*
2572 * Record wq name for cmdline and debug reporting, may get
2573 * overridden through set_worker_desc().
2574 */
2575 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2576
2577 list_del_init(&work->entry);
2578
2579 /*
2580 * CPU intensive works don't participate in concurrency management.
2581 * They're the scheduler's responsibility. This takes @worker out
2582 * of concurrency management and the next code block will chain
2583 * execution of the pending work items.
2584 */
2585 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
2586 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2587
2588 /*
2589 * Kick @pool if necessary. It's always noop for per-cpu worker pools
2590 * since nr_running would always be >= 1 at this point. This is used to
2591 * chain execution of the pending work items for WORKER_NOT_RUNNING
2592 * workers such as the UNBOUND and CPU_INTENSIVE ones.
2593 */
2594 kick_pool(pool);
2595
2596 /*
2597 * Record the last pool and clear PENDING which should be the last
2598 * update to @work. Also, do this inside @pool->lock so that
2599 * PENDING and queued state changes happen together while IRQ is
2600 * disabled.
2601 */
2602 set_work_pool_and_clear_pending(work, pool->id);
2603
2604 pwq->stats[PWQ_STAT_STARTED]++;
2605 raw_spin_unlock_irq(&pool->lock);
2606
2607 rcu_start_depth = rcu_preempt_depth();
2608 lockdep_start_depth = lockdep_depth(current);
2609 lock_map_acquire(&pwq->wq->lockdep_map);
2610 lock_map_acquire(&lockdep_map);
2611 /*
2612 * Strictly speaking we should mark the invariant state without holding
2613 * any locks, that is, before these two lock_map_acquire()'s.
2614 *
2615 * However, that would result in:
2616 *
2617 * A(W1)
2618 * WFC(C)
2619 * A(W1)
2620 * C(C)
2621 *
2622 * Which would create W1->C->W1 dependencies, even though there is no
2623 * actual deadlock possible. There are two solutions, using a
2624 * read-recursive acquire on the work(queue) 'locks', but this will then
2625 * hit the lockdep limitation on recursive locks, or simply discard
2626 * these locks.
2627 *
2628 * AFAICT there is no possible deadlock scenario between the
2629 * flush_work() and complete() primitives (except for single-threaded
2630 * workqueues), so hiding them isn't a problem.
2631 */
2632 lockdep_invariant_state(true);
2633 trace_workqueue_execute_start(work);
2634 worker->current_func(work);
2635 /*
2636 * While we must be careful to not use "work" after this, the trace
2637 * point will only record its address.
2638 */
2639 trace_workqueue_execute_end(work, worker->current_func);
2640 pwq->stats[PWQ_STAT_COMPLETED]++;
2641 lock_map_release(&lockdep_map);
2642 lock_map_release(&pwq->wq->lockdep_map);
2643
2644 if (unlikely((worker->task && in_atomic()) ||
2645 lockdep_depth(current) != lockdep_start_depth ||
2646 rcu_preempt_depth() != rcu_start_depth)) {
2647 pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n"
2648 " preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n",
2649 current->comm, task_pid_nr(current), preempt_count(),
2650 lockdep_start_depth, lockdep_depth(current),
2651 rcu_start_depth, rcu_preempt_depth(),
2652 worker->current_func);
2653 debug_show_held_locks(current);
2654 dump_stack();
2655 }
2656
2657 /*
2658 * The following prevents a kworker from hogging CPU on !PREEMPTION
2659 * kernels, where a requeueing work item waiting for something to
2660 * happen could deadlock with stop_machine as such work item could
2661 * indefinitely requeue itself while all other CPUs are trapped in
2662 * stop_machine. At the same time, report a quiescent RCU state so
2663 * the same condition doesn't freeze RCU.
2664 */
2665 cond_resched();
2666
2667 raw_spin_lock_irq(&pool->lock);
2668
2669 /*
2670 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
2671 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
2672 * wq_cpu_intensive_thresh_us. Clear it.
2673 */
2674 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2675
2676 /* tag the worker for identification in schedule() */
2677 worker->last_func = worker->current_func;
2678
2679 /* we're done with it, release */
2680 hash_del(&worker->hentry);
2681 worker->current_work = NULL;
2682 worker->current_func = NULL;
2683 worker->current_pwq = NULL;
2684 worker->current_color = INT_MAX;
2685 pwq_dec_nr_in_flight(pwq, work_data);
2686 }
2687
2688 /**
2689 * process_scheduled_works - process scheduled works
2690 * @worker: self
2691 *
2692 * Process all scheduled works. Please note that the scheduled list
2693 * may change while processing a work, so this function repeatedly
2694 * fetches a work from the top and executes it.
2695 *
2696 * CONTEXT:
2697 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2698 * multiple times.
2699 */
process_scheduled_works(struct worker * worker)2700 static void process_scheduled_works(struct worker *worker)
2701 {
2702 struct work_struct *work;
2703 bool first = true;
2704
2705 while ((work = list_first_entry_or_null(&worker->scheduled,
2706 struct work_struct, entry))) {
2707 if (first) {
2708 worker->pool->watchdog_ts = jiffies;
2709 first = false;
2710 }
2711 process_one_work(worker, work);
2712 }
2713 }
2714
set_pf_worker(bool val)2715 static void set_pf_worker(bool val)
2716 {
2717 mutex_lock(&wq_pool_attach_mutex);
2718 if (val)
2719 current->flags |= PF_WQ_WORKER;
2720 else
2721 current->flags &= ~PF_WQ_WORKER;
2722 mutex_unlock(&wq_pool_attach_mutex);
2723 }
2724
2725 /**
2726 * worker_thread - the worker thread function
2727 * @__worker: self
2728 *
2729 * The worker thread function. All workers belong to a worker_pool -
2730 * either a per-cpu one or dynamic unbound one. These workers process all
2731 * work items regardless of their specific target workqueue. The only
2732 * exception is work items which belong to workqueues with a rescuer which
2733 * will be explained in rescuer_thread().
2734 *
2735 * Return: 0
2736 */
worker_thread(void * __worker)2737 static int worker_thread(void *__worker)
2738 {
2739 struct worker *worker = __worker;
2740 struct worker_pool *pool = worker->pool;
2741
2742 /* tell the scheduler that this is a workqueue worker */
2743 set_pf_worker(true);
2744 woke_up:
2745 raw_spin_lock_irq(&pool->lock);
2746
2747 /* am I supposed to die? */
2748 if (unlikely(worker->flags & WORKER_DIE)) {
2749 raw_spin_unlock_irq(&pool->lock);
2750 set_pf_worker(false);
2751
2752 set_task_comm(worker->task, "kworker/dying");
2753 ida_free(&pool->worker_ida, worker->id);
2754 worker_detach_from_pool(worker);
2755 WARN_ON_ONCE(!list_empty(&worker->entry));
2756 kfree(worker);
2757 return 0;
2758 }
2759
2760 worker_leave_idle(worker);
2761 recheck:
2762 /* no more worker necessary? */
2763 if (!need_more_worker(pool))
2764 goto sleep;
2765
2766 /* do we need to manage? */
2767 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2768 goto recheck;
2769
2770 /*
2771 * ->scheduled list can only be filled while a worker is
2772 * preparing to process a work or actually processing it.
2773 * Make sure nobody diddled with it while I was sleeping.
2774 */
2775 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2776
2777 /*
2778 * Finish PREP stage. We're guaranteed to have at least one idle
2779 * worker or that someone else has already assumed the manager
2780 * role. This is where @worker starts participating in concurrency
2781 * management if applicable and concurrency management is restored
2782 * after being rebound. See rebind_workers() for details.
2783 */
2784 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2785
2786 do {
2787 struct work_struct *work =
2788 list_first_entry(&pool->worklist,
2789 struct work_struct, entry);
2790
2791 if (assign_work(work, worker, NULL))
2792 process_scheduled_works(worker);
2793 } while (keep_working(pool));
2794
2795 worker_set_flags(worker, WORKER_PREP);
2796 sleep:
2797 /*
2798 * pool->lock is held and there's no work to process and no need to
2799 * manage, sleep. Workers are woken up only while holding
2800 * pool->lock or from local cpu, so setting the current state
2801 * before releasing pool->lock is enough to prevent losing any
2802 * event.
2803 */
2804 worker_enter_idle(worker);
2805 __set_current_state(TASK_IDLE);
2806 raw_spin_unlock_irq(&pool->lock);
2807 schedule();
2808 goto woke_up;
2809 }
2810
2811 /**
2812 * rescuer_thread - the rescuer thread function
2813 * @__rescuer: self
2814 *
2815 * Workqueue rescuer thread function. There's one rescuer for each
2816 * workqueue which has WQ_MEM_RECLAIM set.
2817 *
2818 * Regular work processing on a pool may block trying to create a new
2819 * worker which uses GFP_KERNEL allocation which has slight chance of
2820 * developing into deadlock if some works currently on the same queue
2821 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2822 * the problem rescuer solves.
2823 *
2824 * When such condition is possible, the pool summons rescuers of all
2825 * workqueues which have works queued on the pool and let them process
2826 * those works so that forward progress can be guaranteed.
2827 *
2828 * This should happen rarely.
2829 *
2830 * Return: 0
2831 */
rescuer_thread(void * __rescuer)2832 static int rescuer_thread(void *__rescuer)
2833 {
2834 struct worker *rescuer = __rescuer;
2835 struct workqueue_struct *wq = rescuer->rescue_wq;
2836 bool should_stop;
2837
2838 set_user_nice(current, RESCUER_NICE_LEVEL);
2839
2840 /*
2841 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2842 * doesn't participate in concurrency management.
2843 */
2844 set_pf_worker(true);
2845 repeat:
2846 set_current_state(TASK_IDLE);
2847
2848 /*
2849 * By the time the rescuer is requested to stop, the workqueue
2850 * shouldn't have any work pending, but @wq->maydays may still have
2851 * pwq(s) queued. This can happen by non-rescuer workers consuming
2852 * all the work items before the rescuer got to them. Go through
2853 * @wq->maydays processing before acting on should_stop so that the
2854 * list is always empty on exit.
2855 */
2856 should_stop = kthread_should_stop();
2857
2858 /* see whether any pwq is asking for help */
2859 raw_spin_lock_irq(&wq_mayday_lock);
2860
2861 while (!list_empty(&wq->maydays)) {
2862 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2863 struct pool_workqueue, mayday_node);
2864 struct worker_pool *pool = pwq->pool;
2865 struct work_struct *work, *n;
2866
2867 __set_current_state(TASK_RUNNING);
2868 list_del_init(&pwq->mayday_node);
2869
2870 raw_spin_unlock_irq(&wq_mayday_lock);
2871
2872 worker_attach_to_pool(rescuer, pool);
2873
2874 raw_spin_lock_irq(&pool->lock);
2875
2876 /*
2877 * Slurp in all works issued via this workqueue and
2878 * process'em.
2879 */
2880 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
2881 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2882 if (get_work_pwq(work) == pwq &&
2883 assign_work(work, rescuer, &n))
2884 pwq->stats[PWQ_STAT_RESCUED]++;
2885 }
2886
2887 if (!list_empty(&rescuer->scheduled)) {
2888 process_scheduled_works(rescuer);
2889
2890 /*
2891 * The above execution of rescued work items could
2892 * have created more to rescue through
2893 * pwq_activate_first_inactive() or chained
2894 * queueing. Let's put @pwq back on mayday list so
2895 * that such back-to-back work items, which may be
2896 * being used to relieve memory pressure, don't
2897 * incur MAYDAY_INTERVAL delay inbetween.
2898 */
2899 if (pwq->nr_active && need_to_create_worker(pool)) {
2900 raw_spin_lock(&wq_mayday_lock);
2901 /*
2902 * Queue iff we aren't racing destruction
2903 * and somebody else hasn't queued it already.
2904 */
2905 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2906 get_pwq(pwq);
2907 list_add_tail(&pwq->mayday_node, &wq->maydays);
2908 }
2909 raw_spin_unlock(&wq_mayday_lock);
2910 }
2911 }
2912
2913 /*
2914 * Put the reference grabbed by send_mayday(). @pool won't
2915 * go away while we're still attached to it.
2916 */
2917 put_pwq(pwq);
2918
2919 /*
2920 * Leave this pool. Notify regular workers; otherwise, we end up
2921 * with 0 concurrency and stalling the execution.
2922 */
2923 kick_pool(pool);
2924
2925 raw_spin_unlock_irq(&pool->lock);
2926
2927 worker_detach_from_pool(rescuer);
2928
2929 raw_spin_lock_irq(&wq_mayday_lock);
2930 }
2931
2932 raw_spin_unlock_irq(&wq_mayday_lock);
2933
2934 if (should_stop) {
2935 __set_current_state(TASK_RUNNING);
2936 set_pf_worker(false);
2937 return 0;
2938 }
2939
2940 /* rescuers should never participate in concurrency management */
2941 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2942 schedule();
2943 goto repeat;
2944 }
2945
2946 /**
2947 * check_flush_dependency - check for flush dependency sanity
2948 * @target_wq: workqueue being flushed
2949 * @target_work: work item being flushed (NULL for workqueue flushes)
2950 * @from_cancel: are we called from the work cancel path
2951 *
2952 * %current is trying to flush the whole @target_wq or @target_work on it.
2953 * If this is not the cancel path (which implies work being flushed is either
2954 * already running, or will not be at all), check if @target_wq doesn't have
2955 * %WQ_MEM_RECLAIM and verify that %current is not reclaiming memory or running
2956 * on a workqueue which doesn't have %WQ_MEM_RECLAIM as that can break forward-
2957 * progress guarantee leading to a deadlock.
2958 */
check_flush_dependency(struct workqueue_struct * target_wq,struct work_struct * target_work,bool from_cancel)2959 static void check_flush_dependency(struct workqueue_struct *target_wq,
2960 struct work_struct *target_work,
2961 bool from_cancel)
2962 {
2963 work_func_t target_func;
2964 struct worker *worker;
2965
2966 if (from_cancel || target_wq->flags & WQ_MEM_RECLAIM)
2967 return;
2968
2969 worker = current_wq_worker();
2970 target_func = target_work ? target_work->func : NULL;
2971
2972 WARN_ONCE(current->flags & PF_MEMALLOC,
2973 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2974 current->pid, current->comm, target_wq->name, target_func);
2975 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2976 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2977 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2978 worker->current_pwq->wq->name, worker->current_func,
2979 target_wq->name, target_func);
2980 }
2981
2982 struct wq_barrier {
2983 struct work_struct work;
2984 struct completion done;
2985 struct task_struct *task; /* purely informational */
2986 };
2987
wq_barrier_func(struct work_struct * work)2988 static void wq_barrier_func(struct work_struct *work)
2989 {
2990 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2991 complete(&barr->done);
2992 }
2993
2994 /**
2995 * insert_wq_barrier - insert a barrier work
2996 * @pwq: pwq to insert barrier into
2997 * @barr: wq_barrier to insert
2998 * @target: target work to attach @barr to
2999 * @worker: worker currently executing @target, NULL if @target is not executing
3000 *
3001 * @barr is linked to @target such that @barr is completed only after
3002 * @target finishes execution. Please note that the ordering
3003 * guarantee is observed only with respect to @target and on the local
3004 * cpu.
3005 *
3006 * Currently, a queued barrier can't be canceled. This is because
3007 * try_to_grab_pending() can't determine whether the work to be
3008 * grabbed is at the head of the queue and thus can't clear LINKED
3009 * flag of the previous work while there must be a valid next work
3010 * after a work with LINKED flag set.
3011 *
3012 * Note that when @worker is non-NULL, @target may be modified
3013 * underneath us, so we can't reliably determine pwq from @target.
3014 *
3015 * CONTEXT:
3016 * raw_spin_lock_irq(pool->lock).
3017 */
insert_wq_barrier(struct pool_workqueue * pwq,struct wq_barrier * barr,struct work_struct * target,struct worker * worker)3018 static void insert_wq_barrier(struct pool_workqueue *pwq,
3019 struct wq_barrier *barr,
3020 struct work_struct *target, struct worker *worker)
3021 {
3022 unsigned int work_flags = 0;
3023 unsigned int work_color;
3024 struct list_head *head;
3025
3026 /*
3027 * debugobject calls are safe here even with pool->lock locked
3028 * as we know for sure that this will not trigger any of the
3029 * checks and call back into the fixup functions where we
3030 * might deadlock.
3031 */
3032 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
3033 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
3034
3035 init_completion_map(&barr->done, &target->lockdep_map);
3036
3037 barr->task = current;
3038
3039 /* The barrier work item does not participate in pwq->nr_active. */
3040 work_flags |= WORK_STRUCT_INACTIVE;
3041
3042 /*
3043 * If @target is currently being executed, schedule the
3044 * barrier to the worker; otherwise, put it after @target.
3045 */
3046 if (worker) {
3047 head = worker->scheduled.next;
3048 work_color = worker->current_color;
3049 } else {
3050 unsigned long *bits = work_data_bits(target);
3051
3052 head = target->entry.next;
3053 /* there can already be other linked works, inherit and set */
3054 work_flags |= *bits & WORK_STRUCT_LINKED;
3055 work_color = get_work_color(*bits);
3056 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
3057 }
3058
3059 pwq->nr_in_flight[work_color]++;
3060 work_flags |= work_color_to_flags(work_color);
3061
3062 insert_work(pwq, &barr->work, head, work_flags);
3063 }
3064
3065 /**
3066 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
3067 * @wq: workqueue being flushed
3068 * @flush_color: new flush color, < 0 for no-op
3069 * @work_color: new work color, < 0 for no-op
3070 *
3071 * Prepare pwqs for workqueue flushing.
3072 *
3073 * If @flush_color is non-negative, flush_color on all pwqs should be
3074 * -1. If no pwq has in-flight commands at the specified color, all
3075 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
3076 * has in flight commands, its pwq->flush_color is set to
3077 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
3078 * wakeup logic is armed and %true is returned.
3079 *
3080 * The caller should have initialized @wq->first_flusher prior to
3081 * calling this function with non-negative @flush_color. If
3082 * @flush_color is negative, no flush color update is done and %false
3083 * is returned.
3084 *
3085 * If @work_color is non-negative, all pwqs should have the same
3086 * work_color which is previous to @work_color and all will be
3087 * advanced to @work_color.
3088 *
3089 * CONTEXT:
3090 * mutex_lock(wq->mutex).
3091 *
3092 * Return:
3093 * %true if @flush_color >= 0 and there's something to flush. %false
3094 * otherwise.
3095 */
flush_workqueue_prep_pwqs(struct workqueue_struct * wq,int flush_color,int work_color)3096 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3097 int flush_color, int work_color)
3098 {
3099 bool wait = false;
3100 struct pool_workqueue *pwq;
3101
3102 if (flush_color >= 0) {
3103 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
3104 atomic_set(&wq->nr_pwqs_to_flush, 1);
3105 }
3106
3107 for_each_pwq(pwq, wq) {
3108 struct worker_pool *pool = pwq->pool;
3109
3110 raw_spin_lock_irq(&pool->lock);
3111
3112 if (flush_color >= 0) {
3113 WARN_ON_ONCE(pwq->flush_color != -1);
3114
3115 if (pwq->nr_in_flight[flush_color]) {
3116 pwq->flush_color = flush_color;
3117 atomic_inc(&wq->nr_pwqs_to_flush);
3118 wait = true;
3119 }
3120 }
3121
3122 if (work_color >= 0) {
3123 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
3124 pwq->work_color = work_color;
3125 }
3126
3127 raw_spin_unlock_irq(&pool->lock);
3128 }
3129
3130 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
3131 complete(&wq->first_flusher->done);
3132
3133 return wait;
3134 }
3135
touch_wq_lockdep_map(struct workqueue_struct * wq)3136 static void touch_wq_lockdep_map(struct workqueue_struct *wq)
3137 {
3138 lock_map_acquire(&wq->lockdep_map);
3139 lock_map_release(&wq->lockdep_map);
3140 }
3141
touch_work_lockdep_map(struct work_struct * work,struct workqueue_struct * wq)3142 static void touch_work_lockdep_map(struct work_struct *work,
3143 struct workqueue_struct *wq)
3144 {
3145 lock_map_acquire(&work->lockdep_map);
3146 lock_map_release(&work->lockdep_map);
3147 }
3148
3149 /**
3150 * __flush_workqueue - ensure that any scheduled work has run to completion.
3151 * @wq: workqueue to flush
3152 *
3153 * This function sleeps until all work items which were queued on entry
3154 * have finished execution, but it is not livelocked by new incoming ones.
3155 */
__flush_workqueue(struct workqueue_struct * wq)3156 void __flush_workqueue(struct workqueue_struct *wq)
3157 {
3158 struct wq_flusher this_flusher = {
3159 .list = LIST_HEAD_INIT(this_flusher.list),
3160 .flush_color = -1,
3161 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
3162 };
3163 int next_color;
3164
3165 if (WARN_ON(!wq_online))
3166 return;
3167
3168 touch_wq_lockdep_map(wq);
3169
3170 mutex_lock(&wq->mutex);
3171
3172 /*
3173 * Start-to-wait phase
3174 */
3175 next_color = work_next_color(wq->work_color);
3176
3177 if (next_color != wq->flush_color) {
3178 /*
3179 * Color space is not full. The current work_color
3180 * becomes our flush_color and work_color is advanced
3181 * by one.
3182 */
3183 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
3184 this_flusher.flush_color = wq->work_color;
3185 wq->work_color = next_color;
3186
3187 if (!wq->first_flusher) {
3188 /* no flush in progress, become the first flusher */
3189 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3190
3191 wq->first_flusher = &this_flusher;
3192
3193 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
3194 wq->work_color)) {
3195 /* nothing to flush, done */
3196 wq->flush_color = next_color;
3197 wq->first_flusher = NULL;
3198 goto out_unlock;
3199 }
3200 } else {
3201 /* wait in queue */
3202 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
3203 list_add_tail(&this_flusher.list, &wq->flusher_queue);
3204 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3205 }
3206 } else {
3207 /*
3208 * Oops, color space is full, wait on overflow queue.
3209 * The next flush completion will assign us
3210 * flush_color and transfer to flusher_queue.
3211 */
3212 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3213 }
3214
3215 check_flush_dependency(wq, NULL, false);
3216
3217 mutex_unlock(&wq->mutex);
3218
3219 wait_for_completion(&this_flusher.done);
3220
3221 /*
3222 * Wake-up-and-cascade phase
3223 *
3224 * First flushers are responsible for cascading flushes and
3225 * handling overflow. Non-first flushers can simply return.
3226 */
3227 if (READ_ONCE(wq->first_flusher) != &this_flusher)
3228 return;
3229
3230 mutex_lock(&wq->mutex);
3231
3232 /* we might have raced, check again with mutex held */
3233 if (wq->first_flusher != &this_flusher)
3234 goto out_unlock;
3235
3236 WRITE_ONCE(wq->first_flusher, NULL);
3237
3238 WARN_ON_ONCE(!list_empty(&this_flusher.list));
3239 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3240
3241 while (true) {
3242 struct wq_flusher *next, *tmp;
3243
3244 /* complete all the flushers sharing the current flush color */
3245 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3246 if (next->flush_color != wq->flush_color)
3247 break;
3248 list_del_init(&next->list);
3249 complete(&next->done);
3250 }
3251
3252 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
3253 wq->flush_color != work_next_color(wq->work_color));
3254
3255 /* this flush_color is finished, advance by one */
3256 wq->flush_color = work_next_color(wq->flush_color);
3257
3258 /* one color has been freed, handle overflow queue */
3259 if (!list_empty(&wq->flusher_overflow)) {
3260 /*
3261 * Assign the same color to all overflowed
3262 * flushers, advance work_color and append to
3263 * flusher_queue. This is the start-to-wait
3264 * phase for these overflowed flushers.
3265 */
3266 list_for_each_entry(tmp, &wq->flusher_overflow, list)
3267 tmp->flush_color = wq->work_color;
3268
3269 wq->work_color = work_next_color(wq->work_color);
3270
3271 list_splice_tail_init(&wq->flusher_overflow,
3272 &wq->flusher_queue);
3273 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3274 }
3275
3276 if (list_empty(&wq->flusher_queue)) {
3277 WARN_ON_ONCE(wq->flush_color != wq->work_color);
3278 break;
3279 }
3280
3281 /*
3282 * Need to flush more colors. Make the next flusher
3283 * the new first flusher and arm pwqs.
3284 */
3285 WARN_ON_ONCE(wq->flush_color == wq->work_color);
3286 WARN_ON_ONCE(wq->flush_color != next->flush_color);
3287
3288 list_del_init(&next->list);
3289 wq->first_flusher = next;
3290
3291 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
3292 break;
3293
3294 /*
3295 * Meh... this color is already done, clear first
3296 * flusher and repeat cascading.
3297 */
3298 wq->first_flusher = NULL;
3299 }
3300
3301 out_unlock:
3302 mutex_unlock(&wq->mutex);
3303 }
3304 EXPORT_SYMBOL(__flush_workqueue);
3305
3306 /**
3307 * drain_workqueue - drain a workqueue
3308 * @wq: workqueue to drain
3309 *
3310 * Wait until the workqueue becomes empty. While draining is in progress,
3311 * only chain queueing is allowed. IOW, only currently pending or running
3312 * work items on @wq can queue further work items on it. @wq is flushed
3313 * repeatedly until it becomes empty. The number of flushing is determined
3314 * by the depth of chaining and should be relatively short. Whine if it
3315 * takes too long.
3316 */
drain_workqueue(struct workqueue_struct * wq)3317 void drain_workqueue(struct workqueue_struct *wq)
3318 {
3319 unsigned int flush_cnt = 0;
3320 struct pool_workqueue *pwq;
3321
3322 /*
3323 * __queue_work() needs to test whether there are drainers, is much
3324 * hotter than drain_workqueue() and already looks at @wq->flags.
3325 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
3326 */
3327 mutex_lock(&wq->mutex);
3328 if (!wq->nr_drainers++)
3329 wq->flags |= __WQ_DRAINING;
3330 mutex_unlock(&wq->mutex);
3331 reflush:
3332 __flush_workqueue(wq);
3333
3334 mutex_lock(&wq->mutex);
3335
3336 for_each_pwq(pwq, wq) {
3337 bool drained;
3338
3339 raw_spin_lock_irq(&pwq->pool->lock);
3340 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
3341 raw_spin_unlock_irq(&pwq->pool->lock);
3342
3343 if (drained)
3344 continue;
3345
3346 if (++flush_cnt == 10 ||
3347 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3348 pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3349 wq->name, __func__, flush_cnt);
3350
3351 mutex_unlock(&wq->mutex);
3352 goto reflush;
3353 }
3354
3355 if (!--wq->nr_drainers)
3356 wq->flags &= ~__WQ_DRAINING;
3357 mutex_unlock(&wq->mutex);
3358 }
3359 EXPORT_SYMBOL_GPL(drain_workqueue);
3360
start_flush_work(struct work_struct * work,struct wq_barrier * barr,bool from_cancel)3361 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3362 bool from_cancel)
3363 {
3364 struct worker *worker = NULL;
3365 struct worker_pool *pool;
3366 struct pool_workqueue *pwq;
3367 struct workqueue_struct *wq;
3368
3369 might_sleep();
3370
3371 rcu_read_lock();
3372 pool = get_work_pool(work);
3373 if (!pool) {
3374 rcu_read_unlock();
3375 return false;
3376 }
3377
3378 raw_spin_lock_irq(&pool->lock);
3379 /* see the comment in try_to_grab_pending() with the same code */
3380 pwq = get_work_pwq(work);
3381 if (pwq) {
3382 if (unlikely(pwq->pool != pool))
3383 goto already_gone;
3384 } else {
3385 worker = find_worker_executing_work(pool, work);
3386 if (!worker)
3387 goto already_gone;
3388 pwq = worker->current_pwq;
3389 }
3390
3391 wq = pwq->wq;
3392 check_flush_dependency(wq, work, from_cancel);
3393
3394 insert_wq_barrier(pwq, barr, work, worker);
3395 raw_spin_unlock_irq(&pool->lock);
3396
3397 touch_work_lockdep_map(work, wq);
3398
3399 /*
3400 * Force a lock recursion deadlock when using flush_work() inside a
3401 * single-threaded or rescuer equipped workqueue.
3402 *
3403 * For single threaded workqueues the deadlock happens when the work
3404 * is after the work issuing the flush_work(). For rescuer equipped
3405 * workqueues the deadlock happens when the rescuer stalls, blocking
3406 * forward progress.
3407 */
3408 if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
3409 touch_wq_lockdep_map(wq);
3410
3411 rcu_read_unlock();
3412 return true;
3413 already_gone:
3414 raw_spin_unlock_irq(&pool->lock);
3415 rcu_read_unlock();
3416 return false;
3417 }
3418
__flush_work(struct work_struct * work,bool from_cancel)3419 static bool __flush_work(struct work_struct *work, bool from_cancel)
3420 {
3421 struct wq_barrier barr;
3422
3423 if (WARN_ON(!wq_online))
3424 return false;
3425
3426 if (WARN_ON(!work->func))
3427 return false;
3428
3429 if (start_flush_work(work, &barr, from_cancel)) {
3430 wait_for_completion(&barr.done);
3431 destroy_work_on_stack(&barr.work);
3432 return true;
3433 } else {
3434 return false;
3435 }
3436 }
3437
3438 /**
3439 * flush_work - wait for a work to finish executing the last queueing instance
3440 * @work: the work to flush
3441 *
3442 * Wait until @work has finished execution. @work is guaranteed to be idle
3443 * on return if it hasn't been requeued since flush started.
3444 *
3445 * Return:
3446 * %true if flush_work() waited for the work to finish execution,
3447 * %false if it was already idle.
3448 */
flush_work(struct work_struct * work)3449 bool flush_work(struct work_struct *work)
3450 {
3451 return __flush_work(work, false);
3452 }
3453 EXPORT_SYMBOL_GPL(flush_work);
3454
3455 struct cwt_wait {
3456 wait_queue_entry_t wait;
3457 struct work_struct *work;
3458 };
3459
cwt_wakefn(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)3460 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3461 {
3462 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3463
3464 if (cwait->work != key)
3465 return 0;
3466 return autoremove_wake_function(wait, mode, sync, key);
3467 }
3468
__cancel_work_timer(struct work_struct * work,bool is_dwork)3469 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3470 {
3471 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3472 unsigned long flags;
3473 int ret;
3474
3475 do {
3476 ret = try_to_grab_pending(work, is_dwork, &flags);
3477 /*
3478 * If someone else is already canceling, wait for it to
3479 * finish. flush_work() doesn't work for PREEMPT_NONE
3480 * because we may get scheduled between @work's completion
3481 * and the other canceling task resuming and clearing
3482 * CANCELING - flush_work() will return false immediately
3483 * as @work is no longer busy, try_to_grab_pending() will
3484 * return -ENOENT as @work is still being canceled and the
3485 * other canceling task won't be able to clear CANCELING as
3486 * we're hogging the CPU.
3487 *
3488 * Let's wait for completion using a waitqueue. As this
3489 * may lead to the thundering herd problem, use a custom
3490 * wake function which matches @work along with exclusive
3491 * wait and wakeup.
3492 */
3493 if (unlikely(ret == -ENOENT)) {
3494 struct cwt_wait cwait;
3495
3496 init_wait(&cwait.wait);
3497 cwait.wait.func = cwt_wakefn;
3498 cwait.work = work;
3499
3500 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3501 TASK_UNINTERRUPTIBLE);
3502 if (work_is_canceling(work))
3503 schedule();
3504 finish_wait(&cancel_waitq, &cwait.wait);
3505 }
3506 } while (unlikely(ret < 0));
3507
3508 /* tell other tasks trying to grab @work to back off */
3509 mark_work_canceling(work);
3510 local_irq_restore(flags);
3511
3512 /*
3513 * This allows canceling during early boot. We know that @work
3514 * isn't executing.
3515 */
3516 if (wq_online)
3517 __flush_work(work, true);
3518
3519 clear_work_data(work);
3520
3521 /*
3522 * Paired with prepare_to_wait() above so that either
3523 * waitqueue_active() is visible here or !work_is_canceling() is
3524 * visible there.
3525 */
3526 smp_mb();
3527 if (waitqueue_active(&cancel_waitq))
3528 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3529
3530 return ret;
3531 }
3532
3533 /**
3534 * cancel_work_sync - cancel a work and wait for it to finish
3535 * @work: the work to cancel
3536 *
3537 * Cancel @work and wait for its execution to finish. This function
3538 * can be used even if the work re-queues itself or migrates to
3539 * another workqueue. On return from this function, @work is
3540 * guaranteed to be not pending or executing on any CPU.
3541 *
3542 * cancel_work_sync(&delayed_work->work) must not be used for
3543 * delayed_work's. Use cancel_delayed_work_sync() instead.
3544 *
3545 * The caller must ensure that the workqueue on which @work was last
3546 * queued can't be destroyed before this function returns.
3547 *
3548 * Return:
3549 * %true if @work was pending, %false otherwise.
3550 */
cancel_work_sync(struct work_struct * work)3551 bool cancel_work_sync(struct work_struct *work)
3552 {
3553 return __cancel_work_timer(work, false);
3554 }
3555 EXPORT_SYMBOL_GPL(cancel_work_sync);
3556
3557 /**
3558 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3559 * @dwork: the delayed work to flush
3560 *
3561 * Delayed timer is cancelled and the pending work is queued for
3562 * immediate execution. Like flush_work(), this function only
3563 * considers the last queueing instance of @dwork.
3564 *
3565 * Return:
3566 * %true if flush_work() waited for the work to finish execution,
3567 * %false if it was already idle.
3568 */
flush_delayed_work(struct delayed_work * dwork)3569 bool flush_delayed_work(struct delayed_work *dwork)
3570 {
3571 local_irq_disable();
3572 if (del_timer_sync(&dwork->timer))
3573 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3574 local_irq_enable();
3575 return flush_work(&dwork->work);
3576 }
3577 EXPORT_SYMBOL(flush_delayed_work);
3578
3579 /**
3580 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3581 * @rwork: the rcu work to flush
3582 *
3583 * Return:
3584 * %true if flush_rcu_work() waited for the work to finish execution,
3585 * %false if it was already idle.
3586 */
flush_rcu_work(struct rcu_work * rwork)3587 bool flush_rcu_work(struct rcu_work *rwork)
3588 {
3589 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3590 rcu_barrier();
3591 flush_work(&rwork->work);
3592 return true;
3593 } else {
3594 return flush_work(&rwork->work);
3595 }
3596 }
3597 EXPORT_SYMBOL(flush_rcu_work);
3598
__cancel_work(struct work_struct * work,bool is_dwork)3599 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3600 {
3601 unsigned long flags;
3602 int ret;
3603
3604 do {
3605 ret = try_to_grab_pending(work, is_dwork, &flags);
3606 } while (unlikely(ret == -EAGAIN));
3607
3608 if (unlikely(ret < 0))
3609 return false;
3610
3611 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3612 local_irq_restore(flags);
3613 return ret;
3614 }
3615
3616 /*
3617 * See cancel_delayed_work()
3618 */
cancel_work(struct work_struct * work)3619 bool cancel_work(struct work_struct *work)
3620 {
3621 return __cancel_work(work, false);
3622 }
3623 EXPORT_SYMBOL(cancel_work);
3624
3625 /**
3626 * cancel_delayed_work - cancel a delayed work
3627 * @dwork: delayed_work to cancel
3628 *
3629 * Kill off a pending delayed_work.
3630 *
3631 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3632 * pending.
3633 *
3634 * Note:
3635 * The work callback function may still be running on return, unless
3636 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3637 * use cancel_delayed_work_sync() to wait on it.
3638 *
3639 * This function is safe to call from any context including IRQ handler.
3640 */
cancel_delayed_work(struct delayed_work * dwork)3641 bool cancel_delayed_work(struct delayed_work *dwork)
3642 {
3643 return __cancel_work(&dwork->work, true);
3644 }
3645 EXPORT_SYMBOL(cancel_delayed_work);
3646
3647 /**
3648 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3649 * @dwork: the delayed work cancel
3650 *
3651 * This is cancel_work_sync() for delayed works.
3652 *
3653 * Return:
3654 * %true if @dwork was pending, %false otherwise.
3655 */
cancel_delayed_work_sync(struct delayed_work * dwork)3656 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3657 {
3658 return __cancel_work_timer(&dwork->work, true);
3659 }
3660 EXPORT_SYMBOL(cancel_delayed_work_sync);
3661
3662 /**
3663 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3664 * @func: the function to call
3665 *
3666 * schedule_on_each_cpu() executes @func on each online CPU using the
3667 * system workqueue and blocks until all CPUs have completed.
3668 * schedule_on_each_cpu() is very slow.
3669 *
3670 * Return:
3671 * 0 on success, -errno on failure.
3672 */
schedule_on_each_cpu(work_func_t func)3673 int schedule_on_each_cpu(work_func_t func)
3674 {
3675 int cpu;
3676 struct work_struct __percpu *works;
3677
3678 works = alloc_percpu(struct work_struct);
3679 if (!works)
3680 return -ENOMEM;
3681
3682 cpus_read_lock();
3683
3684 for_each_online_cpu(cpu) {
3685 struct work_struct *work = per_cpu_ptr(works, cpu);
3686
3687 INIT_WORK(work, func);
3688 schedule_work_on(cpu, work);
3689 }
3690
3691 for_each_online_cpu(cpu)
3692 flush_work(per_cpu_ptr(works, cpu));
3693
3694 cpus_read_unlock();
3695 free_percpu(works);
3696 return 0;
3697 }
3698
3699 /**
3700 * execute_in_process_context - reliably execute the routine with user context
3701 * @fn: the function to execute
3702 * @ew: guaranteed storage for the execute work structure (must
3703 * be available when the work executes)
3704 *
3705 * Executes the function immediately if process context is available,
3706 * otherwise schedules the function for delayed execution.
3707 *
3708 * Return: 0 - function was executed
3709 * 1 - function was scheduled for execution
3710 */
execute_in_process_context(work_func_t fn,struct execute_work * ew)3711 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3712 {
3713 if (!in_interrupt()) {
3714 fn(&ew->work);
3715 return 0;
3716 }
3717
3718 INIT_WORK(&ew->work, fn);
3719 schedule_work(&ew->work);
3720
3721 return 1;
3722 }
3723 EXPORT_SYMBOL_GPL(execute_in_process_context);
3724
3725 /**
3726 * free_workqueue_attrs - free a workqueue_attrs
3727 * @attrs: workqueue_attrs to free
3728 *
3729 * Undo alloc_workqueue_attrs().
3730 */
free_workqueue_attrs(struct workqueue_attrs * attrs)3731 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3732 {
3733 if (attrs) {
3734 free_cpumask_var(attrs->cpumask);
3735 free_cpumask_var(attrs->__pod_cpumask);
3736 kfree(attrs);
3737 }
3738 }
3739
3740 /**
3741 * alloc_workqueue_attrs - allocate a workqueue_attrs
3742 *
3743 * Allocate a new workqueue_attrs, initialize with default settings and
3744 * return it.
3745 *
3746 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3747 */
alloc_workqueue_attrs(void)3748 struct workqueue_attrs *alloc_workqueue_attrs(void)
3749 {
3750 struct workqueue_attrs *attrs;
3751
3752 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3753 if (!attrs)
3754 goto fail;
3755 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3756 goto fail;
3757 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
3758 goto fail;
3759
3760 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3761 attrs->affn_scope = WQ_AFFN_DFL;
3762 return attrs;
3763 fail:
3764 free_workqueue_attrs(attrs);
3765 return NULL;
3766 }
3767
copy_workqueue_attrs(struct workqueue_attrs * to,const struct workqueue_attrs * from)3768 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3769 const struct workqueue_attrs *from)
3770 {
3771 to->nice = from->nice;
3772 cpumask_copy(to->cpumask, from->cpumask);
3773 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
3774 to->affn_strict = from->affn_strict;
3775
3776 /*
3777 * Unlike hash and equality test, copying shouldn't ignore wq-only
3778 * fields as copying is used for both pool and wq attrs. Instead,
3779 * get_unbound_pool() explicitly clears the fields.
3780 */
3781 to->affn_scope = from->affn_scope;
3782 to->ordered = from->ordered;
3783 }
3784
3785 /*
3786 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
3787 * comments in 'struct workqueue_attrs' definition.
3788 */
wqattrs_clear_for_pool(struct workqueue_attrs * attrs)3789 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
3790 {
3791 attrs->affn_scope = WQ_AFFN_NR_TYPES;
3792 attrs->ordered = false;
3793 }
3794
3795 /* hash value of the content of @attr */
wqattrs_hash(const struct workqueue_attrs * attrs)3796 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3797 {
3798 u32 hash = 0;
3799
3800 hash = jhash_1word(attrs->nice, hash);
3801 hash = jhash(cpumask_bits(attrs->cpumask),
3802 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3803 hash = jhash(cpumask_bits(attrs->__pod_cpumask),
3804 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3805 hash = jhash_1word(attrs->affn_strict, hash);
3806 return hash;
3807 }
3808
3809 /* content equality test */
wqattrs_equal(const struct workqueue_attrs * a,const struct workqueue_attrs * b)3810 static bool wqattrs_equal(const struct workqueue_attrs *a,
3811 const struct workqueue_attrs *b)
3812 {
3813 if (a->nice != b->nice)
3814 return false;
3815 if (!cpumask_equal(a->cpumask, b->cpumask))
3816 return false;
3817 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
3818 return false;
3819 if (a->affn_strict != b->affn_strict)
3820 return false;
3821 return true;
3822 }
3823
3824 /* Update @attrs with actually available CPUs */
wqattrs_actualize_cpumask(struct workqueue_attrs * attrs,const cpumask_t * unbound_cpumask)3825 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
3826 const cpumask_t *unbound_cpumask)
3827 {
3828 /*
3829 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
3830 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
3831 * @unbound_cpumask.
3832 */
3833 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
3834 if (unlikely(cpumask_empty(attrs->cpumask)))
3835 cpumask_copy(attrs->cpumask, unbound_cpumask);
3836 }
3837
3838 /* find wq_pod_type to use for @attrs */
3839 static const struct wq_pod_type *
wqattrs_pod_type(const struct workqueue_attrs * attrs)3840 wqattrs_pod_type(const struct workqueue_attrs *attrs)
3841 {
3842 enum wq_affn_scope scope;
3843 struct wq_pod_type *pt;
3844
3845 /* to synchronize access to wq_affn_dfl */
3846 lockdep_assert_held(&wq_pool_mutex);
3847
3848 if (attrs->affn_scope == WQ_AFFN_DFL)
3849 scope = wq_affn_dfl;
3850 else
3851 scope = attrs->affn_scope;
3852
3853 pt = &wq_pod_types[scope];
3854
3855 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
3856 likely(pt->nr_pods))
3857 return pt;
3858
3859 /*
3860 * Before workqueue_init_topology(), only SYSTEM is available which is
3861 * initialized in workqueue_init_early().
3862 */
3863 pt = &wq_pod_types[WQ_AFFN_SYSTEM];
3864 BUG_ON(!pt->nr_pods);
3865 return pt;
3866 }
3867
3868 /**
3869 * init_worker_pool - initialize a newly zalloc'd worker_pool
3870 * @pool: worker_pool to initialize
3871 *
3872 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
3873 *
3874 * Return: 0 on success, -errno on failure. Even on failure, all fields
3875 * inside @pool proper are initialized and put_unbound_pool() can be called
3876 * on @pool safely to release it.
3877 */
init_worker_pool(struct worker_pool * pool)3878 static int init_worker_pool(struct worker_pool *pool)
3879 {
3880 raw_spin_lock_init(&pool->lock);
3881 pool->id = -1;
3882 pool->cpu = -1;
3883 pool->node = NUMA_NO_NODE;
3884 pool->flags |= POOL_DISASSOCIATED;
3885 pool->watchdog_ts = jiffies;
3886 INIT_LIST_HEAD(&pool->worklist);
3887 INIT_LIST_HEAD(&pool->idle_list);
3888 hash_init(pool->busy_hash);
3889
3890 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3891 INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
3892
3893 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3894
3895 INIT_LIST_HEAD(&pool->workers);
3896 INIT_LIST_HEAD(&pool->dying_workers);
3897
3898 ida_init(&pool->worker_ida);
3899 INIT_HLIST_NODE(&pool->hash_node);
3900 pool->refcnt = 1;
3901
3902 /* shouldn't fail above this point */
3903 pool->attrs = alloc_workqueue_attrs();
3904 if (!pool->attrs)
3905 return -ENOMEM;
3906
3907 wqattrs_clear_for_pool(pool->attrs);
3908
3909 return 0;
3910 }
3911
3912 #ifdef CONFIG_LOCKDEP
wq_init_lockdep(struct workqueue_struct * wq)3913 static void wq_init_lockdep(struct workqueue_struct *wq)
3914 {
3915 char *lock_name;
3916
3917 lockdep_register_key(&wq->key);
3918 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3919 if (!lock_name)
3920 lock_name = wq->name;
3921
3922 wq->lock_name = lock_name;
3923 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3924 }
3925
wq_unregister_lockdep(struct workqueue_struct * wq)3926 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3927 {
3928 lockdep_unregister_key(&wq->key);
3929 }
3930
wq_free_lockdep(struct workqueue_struct * wq)3931 static void wq_free_lockdep(struct workqueue_struct *wq)
3932 {
3933 if (wq->lock_name != wq->name)
3934 kfree(wq->lock_name);
3935 }
3936 #else
wq_init_lockdep(struct workqueue_struct * wq)3937 static void wq_init_lockdep(struct workqueue_struct *wq)
3938 {
3939 }
3940
wq_unregister_lockdep(struct workqueue_struct * wq)3941 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3942 {
3943 }
3944
wq_free_lockdep(struct workqueue_struct * wq)3945 static void wq_free_lockdep(struct workqueue_struct *wq)
3946 {
3947 }
3948 #endif
3949
rcu_free_wq(struct rcu_head * rcu)3950 static void rcu_free_wq(struct rcu_head *rcu)
3951 {
3952 struct workqueue_struct *wq =
3953 container_of(rcu, struct workqueue_struct, rcu);
3954
3955 wq_free_lockdep(wq);
3956 free_percpu(wq->cpu_pwq);
3957 free_workqueue_attrs(wq->unbound_attrs);
3958 kfree(wq);
3959 }
3960
rcu_free_pool(struct rcu_head * rcu)3961 static void rcu_free_pool(struct rcu_head *rcu)
3962 {
3963 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3964
3965 ida_destroy(&pool->worker_ida);
3966 free_workqueue_attrs(pool->attrs);
3967 kfree(pool);
3968 }
3969
3970 /**
3971 * put_unbound_pool - put a worker_pool
3972 * @pool: worker_pool to put
3973 *
3974 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
3975 * safe manner. get_unbound_pool() calls this function on its failure path
3976 * and this function should be able to release pools which went through,
3977 * successfully or not, init_worker_pool().
3978 *
3979 * Should be called with wq_pool_mutex held.
3980 */
put_unbound_pool(struct worker_pool * pool)3981 static void put_unbound_pool(struct worker_pool *pool)
3982 {
3983 DECLARE_COMPLETION_ONSTACK(detach_completion);
3984 struct worker *worker;
3985 LIST_HEAD(cull_list);
3986
3987 lockdep_assert_held(&wq_pool_mutex);
3988
3989 if (--pool->refcnt)
3990 return;
3991
3992 /* sanity checks */
3993 if (WARN_ON(!(pool->cpu < 0)) ||
3994 WARN_ON(!list_empty(&pool->worklist)))
3995 return;
3996
3997 /* release id and unhash */
3998 if (pool->id >= 0)
3999 idr_remove(&worker_pool_idr, pool->id);
4000 hash_del(&pool->hash_node);
4001
4002 /*
4003 * Become the manager and destroy all workers. This prevents
4004 * @pool's workers from blocking on attach_mutex. We're the last
4005 * manager and @pool gets freed with the flag set.
4006 *
4007 * Having a concurrent manager is quite unlikely to happen as we can
4008 * only get here with
4009 * pwq->refcnt == pool->refcnt == 0
4010 * which implies no work queued to the pool, which implies no worker can
4011 * become the manager. However a worker could have taken the role of
4012 * manager before the refcnts dropped to 0, since maybe_create_worker()
4013 * drops pool->lock
4014 */
4015 while (true) {
4016 rcuwait_wait_event(&manager_wait,
4017 !(pool->flags & POOL_MANAGER_ACTIVE),
4018 TASK_UNINTERRUPTIBLE);
4019
4020 mutex_lock(&wq_pool_attach_mutex);
4021 raw_spin_lock_irq(&pool->lock);
4022 if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
4023 pool->flags |= POOL_MANAGER_ACTIVE;
4024 break;
4025 }
4026 raw_spin_unlock_irq(&pool->lock);
4027 mutex_unlock(&wq_pool_attach_mutex);
4028 }
4029
4030 while ((worker = first_idle_worker(pool)))
4031 set_worker_dying(worker, &cull_list);
4032 WARN_ON(pool->nr_workers || pool->nr_idle);
4033 raw_spin_unlock_irq(&pool->lock);
4034
4035 wake_dying_workers(&cull_list);
4036
4037 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
4038 pool->detach_completion = &detach_completion;
4039 mutex_unlock(&wq_pool_attach_mutex);
4040
4041 if (pool->detach_completion)
4042 wait_for_completion(pool->detach_completion);
4043
4044 /* shut down the timers */
4045 del_timer_sync(&pool->idle_timer);
4046 cancel_work_sync(&pool->idle_cull_work);
4047 del_timer_sync(&pool->mayday_timer);
4048
4049 /* RCU protected to allow dereferences from get_work_pool() */
4050 call_rcu(&pool->rcu, rcu_free_pool);
4051 }
4052
4053 /**
4054 * get_unbound_pool - get a worker_pool with the specified attributes
4055 * @attrs: the attributes of the worker_pool to get
4056 *
4057 * Obtain a worker_pool which has the same attributes as @attrs, bump the
4058 * reference count and return it. If there already is a matching
4059 * worker_pool, it will be used; otherwise, this function attempts to
4060 * create a new one.
4061 *
4062 * Should be called with wq_pool_mutex held.
4063 *
4064 * Return: On success, a worker_pool with the same attributes as @attrs.
4065 * On failure, %NULL.
4066 */
get_unbound_pool(const struct workqueue_attrs * attrs)4067 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
4068 {
4069 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
4070 u32 hash = wqattrs_hash(attrs);
4071 struct worker_pool *pool;
4072 int pod, node = NUMA_NO_NODE;
4073
4074 lockdep_assert_held(&wq_pool_mutex);
4075
4076 /* do we already have a matching pool? */
4077 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4078 if (wqattrs_equal(pool->attrs, attrs)) {
4079 pool->refcnt++;
4080 return pool;
4081 }
4082 }
4083
4084 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */
4085 for (pod = 0; pod < pt->nr_pods; pod++) {
4086 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
4087 node = pt->pod_node[pod];
4088 break;
4089 }
4090 }
4091
4092 /* nope, create a new one */
4093 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
4094 if (!pool || init_worker_pool(pool) < 0)
4095 goto fail;
4096
4097 pool->node = node;
4098 copy_workqueue_attrs(pool->attrs, attrs);
4099 wqattrs_clear_for_pool(pool->attrs);
4100
4101 if (worker_pool_assign_id(pool) < 0)
4102 goto fail;
4103
4104 /* create and start the initial worker */
4105 if (wq_online && !create_worker(pool))
4106 goto fail;
4107
4108 /* install */
4109 hash_add(unbound_pool_hash, &pool->hash_node, hash);
4110
4111 return pool;
4112 fail:
4113 if (pool)
4114 put_unbound_pool(pool);
4115 return NULL;
4116 }
4117
rcu_free_pwq(struct rcu_head * rcu)4118 static void rcu_free_pwq(struct rcu_head *rcu)
4119 {
4120 kmem_cache_free(pwq_cache,
4121 container_of(rcu, struct pool_workqueue, rcu));
4122 }
4123
4124 /*
4125 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
4126 * refcnt and needs to be destroyed.
4127 */
pwq_release_workfn(struct kthread_work * work)4128 static void pwq_release_workfn(struct kthread_work *work)
4129 {
4130 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
4131 release_work);
4132 struct workqueue_struct *wq = pwq->wq;
4133 struct worker_pool *pool = pwq->pool;
4134 bool is_last = false;
4135
4136 /*
4137 * When @pwq is not linked, it doesn't hold any reference to the
4138 * @wq, and @wq is invalid to access.
4139 */
4140 if (!list_empty(&pwq->pwqs_node)) {
4141 mutex_lock(&wq->mutex);
4142 list_del_rcu(&pwq->pwqs_node);
4143 is_last = list_empty(&wq->pwqs);
4144 mutex_unlock(&wq->mutex);
4145 }
4146
4147 if (wq->flags & WQ_UNBOUND) {
4148 mutex_lock(&wq_pool_mutex);
4149 put_unbound_pool(pool);
4150 mutex_unlock(&wq_pool_mutex);
4151 }
4152
4153 call_rcu(&pwq->rcu, rcu_free_pwq);
4154
4155 /*
4156 * If we're the last pwq going away, @wq is already dead and no one
4157 * is gonna access it anymore. Schedule RCU free.
4158 */
4159 if (is_last) {
4160 wq_unregister_lockdep(wq);
4161 call_rcu(&wq->rcu, rcu_free_wq);
4162 }
4163 }
4164
4165 /**
4166 * pwq_adjust_max_active - update a pwq's max_active to the current setting
4167 * @pwq: target pool_workqueue
4168 *
4169 * If @pwq isn't freezing, set @pwq->max_active to the associated
4170 * workqueue's saved_max_active and activate inactive work items
4171 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
4172 */
pwq_adjust_max_active(struct pool_workqueue * pwq)4173 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
4174 {
4175 struct workqueue_struct *wq = pwq->wq;
4176 bool freezable = wq->flags & WQ_FREEZABLE;
4177 unsigned long flags;
4178
4179 /* for @wq->saved_max_active */
4180 lockdep_assert_held(&wq->mutex);
4181
4182 /* fast exit for non-freezable wqs */
4183 if (!freezable && pwq->max_active == wq->saved_max_active)
4184 return;
4185
4186 /* this function can be called during early boot w/ irq disabled */
4187 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4188
4189 /*
4190 * During [un]freezing, the caller is responsible for ensuring that
4191 * this function is called at least once after @workqueue_freezing
4192 * is updated and visible.
4193 */
4194 if (!freezable || !workqueue_freezing) {
4195 pwq->max_active = wq->saved_max_active;
4196
4197 while (!list_empty(&pwq->inactive_works) &&
4198 pwq->nr_active < pwq->max_active)
4199 pwq_activate_first_inactive(pwq);
4200
4201 kick_pool(pwq->pool);
4202 } else {
4203 pwq->max_active = 0;
4204 }
4205
4206 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4207 }
4208
4209 /* initialize newly allocated @pwq which is associated with @wq and @pool */
init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool)4210 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
4211 struct worker_pool *pool)
4212 {
4213 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
4214
4215 memset(pwq, 0, sizeof(*pwq));
4216
4217 pwq->pool = pool;
4218 pwq->wq = wq;
4219 pwq->flush_color = -1;
4220 pwq->refcnt = 1;
4221 INIT_LIST_HEAD(&pwq->inactive_works);
4222 INIT_LIST_HEAD(&pwq->pwqs_node);
4223 INIT_LIST_HEAD(&pwq->mayday_node);
4224 kthread_init_work(&pwq->release_work, pwq_release_workfn);
4225 }
4226
4227 /* sync @pwq with the current state of its associated wq and link it */
link_pwq(struct pool_workqueue * pwq)4228 static void link_pwq(struct pool_workqueue *pwq)
4229 {
4230 struct workqueue_struct *wq = pwq->wq;
4231
4232 lockdep_assert_held(&wq->mutex);
4233
4234 /* may be called multiple times, ignore if already linked */
4235 if (!list_empty(&pwq->pwqs_node))
4236 return;
4237
4238 /* set the matching work_color */
4239 pwq->work_color = wq->work_color;
4240
4241 /* sync max_active to the current setting */
4242 pwq_adjust_max_active(pwq);
4243
4244 /* link in @pwq */
4245 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
4246 }
4247
4248 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
alloc_unbound_pwq(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4249 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
4250 const struct workqueue_attrs *attrs)
4251 {
4252 struct worker_pool *pool;
4253 struct pool_workqueue *pwq;
4254
4255 lockdep_assert_held(&wq_pool_mutex);
4256
4257 pool = get_unbound_pool(attrs);
4258 if (!pool)
4259 return NULL;
4260
4261 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
4262 if (!pwq) {
4263 put_unbound_pool(pool);
4264 return NULL;
4265 }
4266
4267 init_pwq(pwq, wq, pool);
4268 return pwq;
4269 }
4270
4271 /**
4272 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
4273 * @attrs: the wq_attrs of the default pwq of the target workqueue
4274 * @cpu: the target CPU
4275 * @cpu_going_down: if >= 0, the CPU to consider as offline
4276 *
4277 * Calculate the cpumask a workqueue with @attrs should use on @pod. If
4278 * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
4279 * The result is stored in @attrs->__pod_cpumask.
4280 *
4281 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
4282 * and @pod has online CPUs requested by @attrs, the returned cpumask is the
4283 * intersection of the possible CPUs of @pod and @attrs->cpumask.
4284 *
4285 * The caller is responsible for ensuring that the cpumask of @pod stays stable.
4286 */
wq_calc_pod_cpumask(struct workqueue_attrs * attrs,int cpu,int cpu_going_down)4287 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
4288 int cpu_going_down)
4289 {
4290 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
4291 int pod = pt->cpu_pod[cpu];
4292
4293 /* does @pod have any online CPUs @attrs wants? */
4294 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
4295 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
4296 if (cpu_going_down >= 0)
4297 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
4298
4299 if (cpumask_empty(attrs->__pod_cpumask)) {
4300 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
4301 return;
4302 }
4303
4304 /* yeap, return possible CPUs in @pod that @attrs wants */
4305 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
4306
4307 if (cpumask_empty(attrs->__pod_cpumask))
4308 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
4309 "possible intersect\n");
4310 }
4311
4312 /* install @pwq into @wq's cpu_pwq and return the old pwq */
install_unbound_pwq(struct workqueue_struct * wq,int cpu,struct pool_workqueue * pwq)4313 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
4314 int cpu, struct pool_workqueue *pwq)
4315 {
4316 struct pool_workqueue *old_pwq;
4317
4318 lockdep_assert_held(&wq_pool_mutex);
4319 lockdep_assert_held(&wq->mutex);
4320
4321 /* link_pwq() can handle duplicate calls */
4322 link_pwq(pwq);
4323
4324 old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4325 rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
4326 return old_pwq;
4327 }
4328
4329 /* context to store the prepared attrs & pwqs before applying */
4330 struct apply_wqattrs_ctx {
4331 struct workqueue_struct *wq; /* target workqueue */
4332 struct workqueue_attrs *attrs; /* attrs to apply */
4333 struct list_head list; /* queued for batching commit */
4334 struct pool_workqueue *dfl_pwq;
4335 struct pool_workqueue *pwq_tbl[];
4336 };
4337
4338 /* free the resources after success or abort */
apply_wqattrs_cleanup(struct apply_wqattrs_ctx * ctx)4339 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
4340 {
4341 if (ctx) {
4342 int cpu;
4343
4344 for_each_possible_cpu(cpu)
4345 put_pwq_unlocked(ctx->pwq_tbl[cpu]);
4346 put_pwq_unlocked(ctx->dfl_pwq);
4347
4348 free_workqueue_attrs(ctx->attrs);
4349
4350 kfree(ctx);
4351 }
4352 }
4353
4354 /* allocate the attrs and pwqs for later installation */
4355 static struct apply_wqattrs_ctx *
apply_wqattrs_prepare(struct workqueue_struct * wq,const struct workqueue_attrs * attrs,const cpumask_var_t unbound_cpumask)4356 apply_wqattrs_prepare(struct workqueue_struct *wq,
4357 const struct workqueue_attrs *attrs,
4358 const cpumask_var_t unbound_cpumask)
4359 {
4360 struct apply_wqattrs_ctx *ctx;
4361 struct workqueue_attrs *new_attrs;
4362 int cpu;
4363
4364 lockdep_assert_held(&wq_pool_mutex);
4365
4366 if (WARN_ON(attrs->affn_scope < 0 ||
4367 attrs->affn_scope >= WQ_AFFN_NR_TYPES))
4368 return ERR_PTR(-EINVAL);
4369
4370 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
4371
4372 new_attrs = alloc_workqueue_attrs();
4373 if (!ctx || !new_attrs)
4374 goto out_free;
4375
4376 /*
4377 * If something goes wrong during CPU up/down, we'll fall back to
4378 * the default pwq covering whole @attrs->cpumask. Always create
4379 * it even if we don't use it immediately.
4380 */
4381 copy_workqueue_attrs(new_attrs, attrs);
4382 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
4383 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
4384 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
4385 if (!ctx->dfl_pwq)
4386 goto out_free;
4387
4388 for_each_possible_cpu(cpu) {
4389 if (new_attrs->ordered) {
4390 ctx->dfl_pwq->refcnt++;
4391 ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
4392 } else {
4393 wq_calc_pod_cpumask(new_attrs, cpu, -1);
4394 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
4395 if (!ctx->pwq_tbl[cpu])
4396 goto out_free;
4397 }
4398 }
4399
4400 /* save the user configured attrs and sanitize it. */
4401 copy_workqueue_attrs(new_attrs, attrs);
4402 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
4403 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
4404 ctx->attrs = new_attrs;
4405
4406 ctx->wq = wq;
4407 return ctx;
4408
4409 out_free:
4410 free_workqueue_attrs(new_attrs);
4411 apply_wqattrs_cleanup(ctx);
4412 return ERR_PTR(-ENOMEM);
4413 }
4414
4415 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
apply_wqattrs_commit(struct apply_wqattrs_ctx * ctx)4416 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4417 {
4418 int cpu;
4419
4420 /* all pwqs have been created successfully, let's install'em */
4421 mutex_lock(&ctx->wq->mutex);
4422
4423 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4424
4425 /* save the previous pwq and install the new one */
4426 for_each_possible_cpu(cpu)
4427 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
4428 ctx->pwq_tbl[cpu]);
4429
4430 /* @dfl_pwq might not have been used, ensure it's linked */
4431 link_pwq(ctx->dfl_pwq);
4432 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
4433
4434 mutex_unlock(&ctx->wq->mutex);
4435 }
4436
apply_wqattrs_lock(void)4437 static void apply_wqattrs_lock(void)
4438 {
4439 /* CPUs should stay stable across pwq creations and installations */
4440 cpus_read_lock();
4441 mutex_lock(&wq_pool_mutex);
4442 }
4443
apply_wqattrs_unlock(void)4444 static void apply_wqattrs_unlock(void)
4445 {
4446 mutex_unlock(&wq_pool_mutex);
4447 cpus_read_unlock();
4448 }
4449
apply_workqueue_attrs_locked(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4450 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4451 const struct workqueue_attrs *attrs)
4452 {
4453 struct apply_wqattrs_ctx *ctx;
4454
4455 /* only unbound workqueues can change attributes */
4456 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4457 return -EINVAL;
4458
4459 /* creating multiple pwqs breaks ordering guarantee */
4460 if (!list_empty(&wq->pwqs)) {
4461 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4462 return -EINVAL;
4463
4464 wq->flags &= ~__WQ_ORDERED;
4465 }
4466
4467 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
4468 if (IS_ERR(ctx))
4469 return PTR_ERR(ctx);
4470
4471 /* the ctx has been prepared successfully, let's commit it */
4472 apply_wqattrs_commit(ctx);
4473 apply_wqattrs_cleanup(ctx);
4474
4475 return 0;
4476 }
4477
4478 /**
4479 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4480 * @wq: the target workqueue
4481 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4482 *
4483 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
4484 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
4485 * work items are affine to the pod it was issued on. Older pwqs are released as
4486 * in-flight work items finish. Note that a work item which repeatedly requeues
4487 * itself back-to-back will stay on its current pwq.
4488 *
4489 * Performs GFP_KERNEL allocations.
4490 *
4491 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
4492 *
4493 * Return: 0 on success and -errno on failure.
4494 */
apply_workqueue_attrs(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4495 int apply_workqueue_attrs(struct workqueue_struct *wq,
4496 const struct workqueue_attrs *attrs)
4497 {
4498 int ret;
4499
4500 lockdep_assert_cpus_held();
4501
4502 mutex_lock(&wq_pool_mutex);
4503 ret = apply_workqueue_attrs_locked(wq, attrs);
4504 mutex_unlock(&wq_pool_mutex);
4505
4506 return ret;
4507 }
4508
4509 /**
4510 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
4511 * @wq: the target workqueue
4512 * @cpu: the CPU to update pool association for
4513 * @hotplug_cpu: the CPU coming up or going down
4514 * @online: whether @cpu is coming up or going down
4515 *
4516 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4517 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
4518 * @wq accordingly.
4519 *
4520 *
4521 * If pod affinity can't be adjusted due to memory allocation failure, it falls
4522 * back to @wq->dfl_pwq which may not be optimal but is always correct.
4523 *
4524 * Note that when the last allowed CPU of a pod goes offline for a workqueue
4525 * with a cpumask spanning multiple pods, the workers which were already
4526 * executing the work items for the workqueue will lose their CPU affinity and
4527 * may execute on any CPU. This is similar to how per-cpu workqueues behave on
4528 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
4529 * responsibility to flush the work item from CPU_DOWN_PREPARE.
4530 */
wq_update_pod(struct workqueue_struct * wq,int cpu,int hotplug_cpu,bool online)4531 static void wq_update_pod(struct workqueue_struct *wq, int cpu,
4532 int hotplug_cpu, bool online)
4533 {
4534 int off_cpu = online ? -1 : hotplug_cpu;
4535 struct pool_workqueue *old_pwq = NULL, *pwq;
4536 struct workqueue_attrs *target_attrs;
4537
4538 lockdep_assert_held(&wq_pool_mutex);
4539
4540 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
4541 return;
4542
4543 /*
4544 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4545 * Let's use a preallocated one. The following buf is protected by
4546 * CPU hotplug exclusion.
4547 */
4548 target_attrs = wq_update_pod_attrs_buf;
4549
4550 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4551 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
4552
4553 /* nothing to do if the target cpumask matches the current pwq */
4554 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
4555 pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
4556 lockdep_is_held(&wq_pool_mutex));
4557 if (wqattrs_equal(target_attrs, pwq->pool->attrs))
4558 return;
4559
4560 /* create a new pwq */
4561 pwq = alloc_unbound_pwq(wq, target_attrs);
4562 if (!pwq) {
4563 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
4564 wq->name);
4565 goto use_dfl_pwq;
4566 }
4567
4568 /* Install the new pwq. */
4569 mutex_lock(&wq->mutex);
4570 old_pwq = install_unbound_pwq(wq, cpu, pwq);
4571 goto out_unlock;
4572
4573 use_dfl_pwq:
4574 mutex_lock(&wq->mutex);
4575 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4576 get_pwq(wq->dfl_pwq);
4577 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4578 old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
4579 out_unlock:
4580 mutex_unlock(&wq->mutex);
4581 put_pwq_unlocked(old_pwq);
4582 }
4583
alloc_and_link_pwqs(struct workqueue_struct * wq)4584 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4585 {
4586 bool highpri = wq->flags & WQ_HIGHPRI;
4587 int cpu, ret;
4588
4589 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
4590 if (!wq->cpu_pwq)
4591 goto enomem;
4592
4593 if (!(wq->flags & WQ_UNBOUND)) {
4594 for_each_possible_cpu(cpu) {
4595 struct pool_workqueue **pwq_p =
4596 per_cpu_ptr(wq->cpu_pwq, cpu);
4597 struct worker_pool *pool =
4598 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
4599
4600 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
4601 pool->node);
4602 if (!*pwq_p)
4603 goto enomem;
4604
4605 init_pwq(*pwq_p, wq, pool);
4606
4607 mutex_lock(&wq->mutex);
4608 link_pwq(*pwq_p);
4609 mutex_unlock(&wq->mutex);
4610 }
4611 return 0;
4612 }
4613
4614 cpus_read_lock();
4615 if (wq->flags & __WQ_ORDERED) {
4616 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4617 /* there should only be single pwq for ordering guarantee */
4618 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4619 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4620 "ordering guarantee broken for workqueue %s\n", wq->name);
4621 } else {
4622 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4623 }
4624 cpus_read_unlock();
4625
4626 /* for unbound pwq, flush the pwq_release_worker ensures that the
4627 * pwq_release_workfn() completes before calling kfree(wq).
4628 */
4629 if (ret)
4630 kthread_flush_worker(pwq_release_worker);
4631
4632 return ret;
4633
4634 enomem:
4635 if (wq->cpu_pwq) {
4636 for_each_possible_cpu(cpu) {
4637 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
4638
4639 if (pwq)
4640 kmem_cache_free(pwq_cache, pwq);
4641 }
4642 free_percpu(wq->cpu_pwq);
4643 wq->cpu_pwq = NULL;
4644 }
4645 return -ENOMEM;
4646 }
4647
wq_clamp_max_active(int max_active,unsigned int flags,const char * name)4648 static int wq_clamp_max_active(int max_active, unsigned int flags,
4649 const char *name)
4650 {
4651 if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
4652 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4653 max_active, name, 1, WQ_MAX_ACTIVE);
4654
4655 return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
4656 }
4657
4658 /*
4659 * Workqueues which may be used during memory reclaim should have a rescuer
4660 * to guarantee forward progress.
4661 */
init_rescuer(struct workqueue_struct * wq)4662 static int init_rescuer(struct workqueue_struct *wq)
4663 {
4664 struct worker *rescuer;
4665 int ret;
4666
4667 if (!(wq->flags & WQ_MEM_RECLAIM))
4668 return 0;
4669
4670 rescuer = alloc_worker(NUMA_NO_NODE);
4671 if (!rescuer) {
4672 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
4673 wq->name);
4674 return -ENOMEM;
4675 }
4676
4677 rescuer->rescue_wq = wq;
4678 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
4679 if (IS_ERR(rescuer->task)) {
4680 ret = PTR_ERR(rescuer->task);
4681 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
4682 wq->name, ERR_PTR(ret));
4683 kfree(rescuer);
4684 return ret;
4685 }
4686
4687 wq->rescuer = rescuer;
4688 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4689 wake_up_process(rescuer->task);
4690
4691 return 0;
4692 }
4693
4694 __printf(1, 4)
alloc_workqueue(const char * fmt,unsigned int flags,int max_active,...)4695 struct workqueue_struct *alloc_workqueue(const char *fmt,
4696 unsigned int flags,
4697 int max_active, ...)
4698 {
4699 va_list args;
4700 struct workqueue_struct *wq;
4701 struct pool_workqueue *pwq;
4702
4703 /*
4704 * Unbound && max_active == 1 used to imply ordered, which is no longer
4705 * the case on many machines due to per-pod pools. While
4706 * alloc_ordered_workqueue() is the right way to create an ordered
4707 * workqueue, keep the previous behavior to avoid subtle breakages.
4708 */
4709 if ((flags & WQ_UNBOUND) && max_active == 1)
4710 flags |= __WQ_ORDERED;
4711
4712 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4713 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4714 flags |= WQ_UNBOUND;
4715
4716 /* allocate wq and format name */
4717 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
4718 if (!wq)
4719 return NULL;
4720
4721 if (flags & WQ_UNBOUND) {
4722 wq->unbound_attrs = alloc_workqueue_attrs();
4723 if (!wq->unbound_attrs)
4724 goto err_free_wq;
4725 }
4726
4727 va_start(args, max_active);
4728 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4729 va_end(args);
4730
4731 max_active = max_active ?: WQ_DFL_ACTIVE;
4732 max_active = wq_clamp_max_active(max_active, flags, wq->name);
4733
4734 /* init wq */
4735 wq->flags = flags;
4736 wq->saved_max_active = max_active;
4737 mutex_init(&wq->mutex);
4738 atomic_set(&wq->nr_pwqs_to_flush, 0);
4739 INIT_LIST_HEAD(&wq->pwqs);
4740 INIT_LIST_HEAD(&wq->flusher_queue);
4741 INIT_LIST_HEAD(&wq->flusher_overflow);
4742 INIT_LIST_HEAD(&wq->maydays);
4743
4744 wq_init_lockdep(wq);
4745 INIT_LIST_HEAD(&wq->list);
4746
4747 if (alloc_and_link_pwqs(wq) < 0)
4748 goto err_unreg_lockdep;
4749
4750 if (wq_online && init_rescuer(wq) < 0)
4751 goto err_destroy;
4752
4753 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4754 goto err_destroy;
4755
4756 /*
4757 * wq_pool_mutex protects global freeze state and workqueues list.
4758 * Grab it, adjust max_active and add the new @wq to workqueues
4759 * list.
4760 */
4761 mutex_lock(&wq_pool_mutex);
4762
4763 mutex_lock(&wq->mutex);
4764 for_each_pwq(pwq, wq)
4765 pwq_adjust_max_active(pwq);
4766 mutex_unlock(&wq->mutex);
4767
4768 list_add_tail_rcu(&wq->list, &workqueues);
4769
4770 mutex_unlock(&wq_pool_mutex);
4771
4772 return wq;
4773
4774 err_unreg_lockdep:
4775 wq_unregister_lockdep(wq);
4776 wq_free_lockdep(wq);
4777 err_free_wq:
4778 free_workqueue_attrs(wq->unbound_attrs);
4779 kfree(wq);
4780 return NULL;
4781 err_destroy:
4782 destroy_workqueue(wq);
4783 return NULL;
4784 }
4785 EXPORT_SYMBOL_GPL(alloc_workqueue);
4786
pwq_busy(struct pool_workqueue * pwq)4787 static bool pwq_busy(struct pool_workqueue *pwq)
4788 {
4789 int i;
4790
4791 for (i = 0; i < WORK_NR_COLORS; i++)
4792 if (pwq->nr_in_flight[i])
4793 return true;
4794
4795 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4796 return true;
4797 if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4798 return true;
4799
4800 return false;
4801 }
4802
4803 /**
4804 * destroy_workqueue - safely terminate a workqueue
4805 * @wq: target workqueue
4806 *
4807 * Safely destroy a workqueue. All work currently pending will be done first.
4808 */
destroy_workqueue(struct workqueue_struct * wq)4809 void destroy_workqueue(struct workqueue_struct *wq)
4810 {
4811 struct pool_workqueue *pwq;
4812 int cpu;
4813
4814 /*
4815 * Remove it from sysfs first so that sanity check failure doesn't
4816 * lead to sysfs name conflicts.
4817 */
4818 workqueue_sysfs_unregister(wq);
4819
4820 /* mark the workqueue destruction is in progress */
4821 mutex_lock(&wq->mutex);
4822 wq->flags |= __WQ_DESTROYING;
4823 mutex_unlock(&wq->mutex);
4824
4825 /* drain it before proceeding with destruction */
4826 drain_workqueue(wq);
4827
4828 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4829 if (wq->rescuer) {
4830 struct worker *rescuer = wq->rescuer;
4831
4832 /* this prevents new queueing */
4833 raw_spin_lock_irq(&wq_mayday_lock);
4834 wq->rescuer = NULL;
4835 raw_spin_unlock_irq(&wq_mayday_lock);
4836
4837 /* rescuer will empty maydays list before exiting */
4838 kthread_stop(rescuer->task);
4839 kfree(rescuer);
4840 }
4841
4842 /*
4843 * Sanity checks - grab all the locks so that we wait for all
4844 * in-flight operations which may do put_pwq().
4845 */
4846 mutex_lock(&wq_pool_mutex);
4847 mutex_lock(&wq->mutex);
4848 for_each_pwq(pwq, wq) {
4849 raw_spin_lock_irq(&pwq->pool->lock);
4850 if (WARN_ON(pwq_busy(pwq))) {
4851 pr_warn("%s: %s has the following busy pwq\n",
4852 __func__, wq->name);
4853 show_pwq(pwq);
4854 raw_spin_unlock_irq(&pwq->pool->lock);
4855 mutex_unlock(&wq->mutex);
4856 mutex_unlock(&wq_pool_mutex);
4857 show_one_workqueue(wq);
4858 return;
4859 }
4860 raw_spin_unlock_irq(&pwq->pool->lock);
4861 }
4862 mutex_unlock(&wq->mutex);
4863
4864 /*
4865 * wq list is used to freeze wq, remove from list after
4866 * flushing is complete in case freeze races us.
4867 */
4868 list_del_rcu(&wq->list);
4869 mutex_unlock(&wq_pool_mutex);
4870
4871 /*
4872 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
4873 * to put the base refs. @wq will be auto-destroyed from the last
4874 * pwq_put. RCU read lock prevents @wq from going away from under us.
4875 */
4876 rcu_read_lock();
4877
4878 for_each_possible_cpu(cpu) {
4879 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4880 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
4881 put_pwq_unlocked(pwq);
4882 }
4883
4884 put_pwq_unlocked(wq->dfl_pwq);
4885 wq->dfl_pwq = NULL;
4886
4887 rcu_read_unlock();
4888 }
4889 EXPORT_SYMBOL_GPL(destroy_workqueue);
4890
4891 /**
4892 * workqueue_set_max_active - adjust max_active of a workqueue
4893 * @wq: target workqueue
4894 * @max_active: new max_active value.
4895 *
4896 * Set max_active of @wq to @max_active.
4897 *
4898 * CONTEXT:
4899 * Don't call from IRQ context.
4900 */
workqueue_set_max_active(struct workqueue_struct * wq,int max_active)4901 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4902 {
4903 struct pool_workqueue *pwq;
4904
4905 /* disallow meddling with max_active for ordered workqueues */
4906 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4907 return;
4908
4909 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4910
4911 mutex_lock(&wq->mutex);
4912
4913 wq->flags &= ~__WQ_ORDERED;
4914 wq->saved_max_active = max_active;
4915
4916 for_each_pwq(pwq, wq)
4917 pwq_adjust_max_active(pwq);
4918
4919 mutex_unlock(&wq->mutex);
4920 }
4921 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4922
4923 /**
4924 * current_work - retrieve %current task's work struct
4925 *
4926 * Determine if %current task is a workqueue worker and what it's working on.
4927 * Useful to find out the context that the %current task is running in.
4928 *
4929 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4930 */
current_work(void)4931 struct work_struct *current_work(void)
4932 {
4933 struct worker *worker = current_wq_worker();
4934
4935 return worker ? worker->current_work : NULL;
4936 }
4937 EXPORT_SYMBOL(current_work);
4938
4939 /**
4940 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4941 *
4942 * Determine whether %current is a workqueue rescuer. Can be used from
4943 * work functions to determine whether it's being run off the rescuer task.
4944 *
4945 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4946 */
current_is_workqueue_rescuer(void)4947 bool current_is_workqueue_rescuer(void)
4948 {
4949 struct worker *worker = current_wq_worker();
4950
4951 return worker && worker->rescue_wq;
4952 }
4953
4954 /**
4955 * workqueue_congested - test whether a workqueue is congested
4956 * @cpu: CPU in question
4957 * @wq: target workqueue
4958 *
4959 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4960 * no synchronization around this function and the test result is
4961 * unreliable and only useful as advisory hints or for debugging.
4962 *
4963 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4964 *
4965 * With the exception of ordered workqueues, all workqueues have per-cpu
4966 * pool_workqueues, each with its own congested state. A workqueue being
4967 * congested on one CPU doesn't mean that the workqueue is contested on any
4968 * other CPUs.
4969 *
4970 * Return:
4971 * %true if congested, %false otherwise.
4972 */
workqueue_congested(int cpu,struct workqueue_struct * wq)4973 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4974 {
4975 struct pool_workqueue *pwq;
4976 bool ret;
4977
4978 rcu_read_lock();
4979 preempt_disable();
4980
4981 if (cpu == WORK_CPU_UNBOUND)
4982 cpu = smp_processor_id();
4983
4984 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
4985 ret = !list_empty(&pwq->inactive_works);
4986
4987 preempt_enable();
4988 rcu_read_unlock();
4989
4990 return ret;
4991 }
4992 EXPORT_SYMBOL_GPL(workqueue_congested);
4993
4994 /**
4995 * work_busy - test whether a work is currently pending or running
4996 * @work: the work to be tested
4997 *
4998 * Test whether @work is currently pending or running. There is no
4999 * synchronization around this function and the test result is
5000 * unreliable and only useful as advisory hints or for debugging.
5001 *
5002 * Return:
5003 * OR'd bitmask of WORK_BUSY_* bits.
5004 */
work_busy(struct work_struct * work)5005 unsigned int work_busy(struct work_struct *work)
5006 {
5007 struct worker_pool *pool;
5008 unsigned long flags;
5009 unsigned int ret = 0;
5010
5011 if (work_pending(work))
5012 ret |= WORK_BUSY_PENDING;
5013
5014 rcu_read_lock();
5015 pool = get_work_pool(work);
5016 if (pool) {
5017 raw_spin_lock_irqsave(&pool->lock, flags);
5018 if (find_worker_executing_work(pool, work))
5019 ret |= WORK_BUSY_RUNNING;
5020 raw_spin_unlock_irqrestore(&pool->lock, flags);
5021 }
5022 rcu_read_unlock();
5023
5024 return ret;
5025 }
5026 EXPORT_SYMBOL_GPL(work_busy);
5027
5028 /**
5029 * set_worker_desc - set description for the current work item
5030 * @fmt: printf-style format string
5031 * @...: arguments for the format string
5032 *
5033 * This function can be called by a running work function to describe what
5034 * the work item is about. If the worker task gets dumped, this
5035 * information will be printed out together to help debugging. The
5036 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
5037 */
set_worker_desc(const char * fmt,...)5038 void set_worker_desc(const char *fmt, ...)
5039 {
5040 struct worker *worker = current_wq_worker();
5041 va_list args;
5042
5043 if (worker) {
5044 va_start(args, fmt);
5045 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
5046 va_end(args);
5047 }
5048 }
5049 EXPORT_SYMBOL_GPL(set_worker_desc);
5050
5051 /**
5052 * print_worker_info - print out worker information and description
5053 * @log_lvl: the log level to use when printing
5054 * @task: target task
5055 *
5056 * If @task is a worker and currently executing a work item, print out the
5057 * name of the workqueue being serviced and worker description set with
5058 * set_worker_desc() by the currently executing work item.
5059 *
5060 * This function can be safely called on any task as long as the
5061 * task_struct itself is accessible. While safe, this function isn't
5062 * synchronized and may print out mixups or garbages of limited length.
5063 */
print_worker_info(const char * log_lvl,struct task_struct * task)5064 void print_worker_info(const char *log_lvl, struct task_struct *task)
5065 {
5066 work_func_t *fn = NULL;
5067 char name[WQ_NAME_LEN] = { };
5068 char desc[WORKER_DESC_LEN] = { };
5069 struct pool_workqueue *pwq = NULL;
5070 struct workqueue_struct *wq = NULL;
5071 struct worker *worker;
5072
5073 if (!(task->flags & PF_WQ_WORKER))
5074 return;
5075
5076 /*
5077 * This function is called without any synchronization and @task
5078 * could be in any state. Be careful with dereferences.
5079 */
5080 worker = kthread_probe_data(task);
5081
5082 /*
5083 * Carefully copy the associated workqueue's workfn, name and desc.
5084 * Keep the original last '\0' in case the original is garbage.
5085 */
5086 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
5087 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
5088 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
5089 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
5090 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
5091
5092 if (fn || name[0] || desc[0]) {
5093 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
5094 if (strcmp(name, desc))
5095 pr_cont(" (%s)", desc);
5096 pr_cont("\n");
5097 }
5098 }
5099
pr_cont_pool_info(struct worker_pool * pool)5100 static void pr_cont_pool_info(struct worker_pool *pool)
5101 {
5102 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
5103 if (pool->node != NUMA_NO_NODE)
5104 pr_cont(" node=%d", pool->node);
5105 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
5106 }
5107
5108 struct pr_cont_work_struct {
5109 bool comma;
5110 work_func_t func;
5111 long ctr;
5112 };
5113
pr_cont_work_flush(bool comma,work_func_t func,struct pr_cont_work_struct * pcwsp)5114 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
5115 {
5116 if (!pcwsp->ctr)
5117 goto out_record;
5118 if (func == pcwsp->func) {
5119 pcwsp->ctr++;
5120 return;
5121 }
5122 if (pcwsp->ctr == 1)
5123 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
5124 else
5125 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
5126 pcwsp->ctr = 0;
5127 out_record:
5128 if ((long)func == -1L)
5129 return;
5130 pcwsp->comma = comma;
5131 pcwsp->func = func;
5132 pcwsp->ctr = 1;
5133 }
5134
pr_cont_work(bool comma,struct work_struct * work,struct pr_cont_work_struct * pcwsp)5135 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
5136 {
5137 if (work->func == wq_barrier_func) {
5138 struct wq_barrier *barr;
5139
5140 barr = container_of(work, struct wq_barrier, work);
5141
5142 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5143 pr_cont("%s BAR(%d)", comma ? "," : "",
5144 task_pid_nr(barr->task));
5145 } else {
5146 if (!comma)
5147 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5148 pr_cont_work_flush(comma, work->func, pcwsp);
5149 }
5150 }
5151
show_pwq(struct pool_workqueue * pwq)5152 static void show_pwq(struct pool_workqueue *pwq)
5153 {
5154 struct pr_cont_work_struct pcws = { .ctr = 0, };
5155 struct worker_pool *pool = pwq->pool;
5156 struct work_struct *work;
5157 struct worker *worker;
5158 bool has_in_flight = false, has_pending = false;
5159 int bkt;
5160
5161 pr_info(" pwq %d:", pool->id);
5162 pr_cont_pool_info(pool);
5163
5164 pr_cont(" active=%d/%d refcnt=%d%s\n",
5165 pwq->nr_active, pwq->max_active, pwq->refcnt,
5166 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
5167
5168 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5169 if (worker->current_pwq == pwq) {
5170 has_in_flight = true;
5171 break;
5172 }
5173 }
5174 if (has_in_flight) {
5175 bool comma = false;
5176
5177 pr_info(" in-flight:");
5178 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5179 if (worker->current_pwq != pwq)
5180 continue;
5181
5182 pr_cont("%s %d%s:%ps", comma ? "," : "",
5183 task_pid_nr(worker->task),
5184 worker->rescue_wq ? "(RESCUER)" : "",
5185 worker->current_func);
5186 list_for_each_entry(work, &worker->scheduled, entry)
5187 pr_cont_work(false, work, &pcws);
5188 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5189 comma = true;
5190 }
5191 pr_cont("\n");
5192 }
5193
5194 list_for_each_entry(work, &pool->worklist, entry) {
5195 if (get_work_pwq(work) == pwq) {
5196 has_pending = true;
5197 break;
5198 }
5199 }
5200 if (has_pending) {
5201 bool comma = false;
5202
5203 pr_info(" pending:");
5204 list_for_each_entry(work, &pool->worklist, entry) {
5205 if (get_work_pwq(work) != pwq)
5206 continue;
5207
5208 pr_cont_work(comma, work, &pcws);
5209 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5210 }
5211 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5212 pr_cont("\n");
5213 }
5214
5215 if (!list_empty(&pwq->inactive_works)) {
5216 bool comma = false;
5217
5218 pr_info(" inactive:");
5219 list_for_each_entry(work, &pwq->inactive_works, entry) {
5220 pr_cont_work(comma, work, &pcws);
5221 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5222 }
5223 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5224 pr_cont("\n");
5225 }
5226 }
5227
5228 /**
5229 * show_one_workqueue - dump state of specified workqueue
5230 * @wq: workqueue whose state will be printed
5231 */
show_one_workqueue(struct workqueue_struct * wq)5232 void show_one_workqueue(struct workqueue_struct *wq)
5233 {
5234 struct pool_workqueue *pwq;
5235 bool idle = true;
5236 unsigned long flags;
5237
5238 for_each_pwq(pwq, wq) {
5239 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
5240 idle = false;
5241 break;
5242 }
5243 }
5244 if (idle) /* Nothing to print for idle workqueue */
5245 return;
5246
5247 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
5248
5249 for_each_pwq(pwq, wq) {
5250 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
5251 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
5252 /*
5253 * Defer printing to avoid deadlocks in console
5254 * drivers that queue work while holding locks
5255 * also taken in their write paths.
5256 */
5257 printk_deferred_enter();
5258 show_pwq(pwq);
5259 printk_deferred_exit();
5260 }
5261 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
5262 /*
5263 * We could be printing a lot from atomic context, e.g.
5264 * sysrq-t -> show_all_workqueues(). Avoid triggering
5265 * hard lockup.
5266 */
5267 touch_nmi_watchdog();
5268 }
5269
5270 }
5271
5272 /**
5273 * show_one_worker_pool - dump state of specified worker pool
5274 * @pool: worker pool whose state will be printed
5275 */
show_one_worker_pool(struct worker_pool * pool)5276 static void show_one_worker_pool(struct worker_pool *pool)
5277 {
5278 struct worker *worker;
5279 bool first = true;
5280 unsigned long flags;
5281 unsigned long hung = 0;
5282
5283 raw_spin_lock_irqsave(&pool->lock, flags);
5284 if (pool->nr_workers == pool->nr_idle)
5285 goto next_pool;
5286
5287 /* How long the first pending work is waiting for a worker. */
5288 if (!list_empty(&pool->worklist))
5289 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
5290
5291 /*
5292 * Defer printing to avoid deadlocks in console drivers that
5293 * queue work while holding locks also taken in their write
5294 * paths.
5295 */
5296 printk_deferred_enter();
5297 pr_info("pool %d:", pool->id);
5298 pr_cont_pool_info(pool);
5299 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
5300 if (pool->manager)
5301 pr_cont(" manager: %d",
5302 task_pid_nr(pool->manager->task));
5303 list_for_each_entry(worker, &pool->idle_list, entry) {
5304 pr_cont(" %s%d", first ? "idle: " : "",
5305 task_pid_nr(worker->task));
5306 first = false;
5307 }
5308 pr_cont("\n");
5309 printk_deferred_exit();
5310 next_pool:
5311 raw_spin_unlock_irqrestore(&pool->lock, flags);
5312 /*
5313 * We could be printing a lot from atomic context, e.g.
5314 * sysrq-t -> show_all_workqueues(). Avoid triggering
5315 * hard lockup.
5316 */
5317 touch_nmi_watchdog();
5318
5319 }
5320
5321 /**
5322 * show_all_workqueues - dump workqueue state
5323 *
5324 * Called from a sysrq handler and prints out all busy workqueues and pools.
5325 */
show_all_workqueues(void)5326 void show_all_workqueues(void)
5327 {
5328 struct workqueue_struct *wq;
5329 struct worker_pool *pool;
5330 int pi;
5331
5332 rcu_read_lock();
5333
5334 pr_info("Showing busy workqueues and worker pools:\n");
5335
5336 list_for_each_entry_rcu(wq, &workqueues, list)
5337 show_one_workqueue(wq);
5338
5339 for_each_pool(pool, pi)
5340 show_one_worker_pool(pool);
5341
5342 rcu_read_unlock();
5343 }
5344
5345 /**
5346 * show_freezable_workqueues - dump freezable workqueue state
5347 *
5348 * Called from try_to_freeze_tasks() and prints out all freezable workqueues
5349 * still busy.
5350 */
show_freezable_workqueues(void)5351 void show_freezable_workqueues(void)
5352 {
5353 struct workqueue_struct *wq;
5354
5355 rcu_read_lock();
5356
5357 pr_info("Showing freezable workqueues that are still busy:\n");
5358
5359 list_for_each_entry_rcu(wq, &workqueues, list) {
5360 if (!(wq->flags & WQ_FREEZABLE))
5361 continue;
5362 show_one_workqueue(wq);
5363 }
5364
5365 rcu_read_unlock();
5366 }
5367
5368 /* used to show worker information through /proc/PID/{comm,stat,status} */
wq_worker_comm(char * buf,size_t size,struct task_struct * task)5369 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
5370 {
5371 int off;
5372
5373 /* always show the actual comm */
5374 off = strscpy(buf, task->comm, size);
5375 if (off < 0)
5376 return;
5377
5378 /* stabilize PF_WQ_WORKER and worker pool association */
5379 mutex_lock(&wq_pool_attach_mutex);
5380
5381 if (task->flags & PF_WQ_WORKER) {
5382 struct worker *worker = kthread_data(task);
5383 struct worker_pool *pool = worker->pool;
5384
5385 if (pool) {
5386 raw_spin_lock_irq(&pool->lock);
5387 /*
5388 * ->desc tracks information (wq name or
5389 * set_worker_desc()) for the latest execution. If
5390 * current, prepend '+', otherwise '-'.
5391 */
5392 if (worker->desc[0] != '\0') {
5393 if (worker->current_work)
5394 scnprintf(buf + off, size - off, "+%s",
5395 worker->desc);
5396 else
5397 scnprintf(buf + off, size - off, "-%s",
5398 worker->desc);
5399 }
5400 raw_spin_unlock_irq(&pool->lock);
5401 }
5402 }
5403
5404 mutex_unlock(&wq_pool_attach_mutex);
5405 }
5406
5407 #ifdef CONFIG_SMP
5408
5409 /*
5410 * CPU hotplug.
5411 *
5412 * There are two challenges in supporting CPU hotplug. Firstly, there
5413 * are a lot of assumptions on strong associations among work, pwq and
5414 * pool which make migrating pending and scheduled works very
5415 * difficult to implement without impacting hot paths. Secondly,
5416 * worker pools serve mix of short, long and very long running works making
5417 * blocked draining impractical.
5418 *
5419 * This is solved by allowing the pools to be disassociated from the CPU
5420 * running as an unbound one and allowing it to be reattached later if the
5421 * cpu comes back online.
5422 */
5423
unbind_workers(int cpu)5424 static void unbind_workers(int cpu)
5425 {
5426 struct worker_pool *pool;
5427 struct worker *worker;
5428
5429 for_each_cpu_worker_pool(pool, cpu) {
5430 mutex_lock(&wq_pool_attach_mutex);
5431 raw_spin_lock_irq(&pool->lock);
5432
5433 /*
5434 * We've blocked all attach/detach operations. Make all workers
5435 * unbound and set DISASSOCIATED. Before this, all workers
5436 * must be on the cpu. After this, they may become diasporas.
5437 * And the preemption disabled section in their sched callbacks
5438 * are guaranteed to see WORKER_UNBOUND since the code here
5439 * is on the same cpu.
5440 */
5441 for_each_pool_worker(worker, pool)
5442 worker->flags |= WORKER_UNBOUND;
5443
5444 pool->flags |= POOL_DISASSOCIATED;
5445
5446 /*
5447 * The handling of nr_running in sched callbacks are disabled
5448 * now. Zap nr_running. After this, nr_running stays zero and
5449 * need_more_worker() and keep_working() are always true as
5450 * long as the worklist is not empty. This pool now behaves as
5451 * an unbound (in terms of concurrency management) pool which
5452 * are served by workers tied to the pool.
5453 */
5454 pool->nr_running = 0;
5455
5456 /*
5457 * With concurrency management just turned off, a busy
5458 * worker blocking could lead to lengthy stalls. Kick off
5459 * unbound chain execution of currently pending work items.
5460 */
5461 kick_pool(pool);
5462
5463 raw_spin_unlock_irq(&pool->lock);
5464
5465 for_each_pool_worker(worker, pool)
5466 unbind_worker(worker);
5467
5468 mutex_unlock(&wq_pool_attach_mutex);
5469 }
5470 }
5471
5472 /**
5473 * rebind_workers - rebind all workers of a pool to the associated CPU
5474 * @pool: pool of interest
5475 *
5476 * @pool->cpu is coming online. Rebind all workers to the CPU.
5477 */
rebind_workers(struct worker_pool * pool)5478 static void rebind_workers(struct worker_pool *pool)
5479 {
5480 struct worker *worker;
5481
5482 lockdep_assert_held(&wq_pool_attach_mutex);
5483
5484 /*
5485 * Restore CPU affinity of all workers. As all idle workers should
5486 * be on the run-queue of the associated CPU before any local
5487 * wake-ups for concurrency management happen, restore CPU affinity
5488 * of all workers first and then clear UNBOUND. As we're called
5489 * from CPU_ONLINE, the following shouldn't fail.
5490 */
5491 for_each_pool_worker(worker, pool) {
5492 kthread_set_per_cpu(worker->task, pool->cpu);
5493 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
5494 pool_allowed_cpus(pool)) < 0);
5495 }
5496
5497 raw_spin_lock_irq(&pool->lock);
5498
5499 pool->flags &= ~POOL_DISASSOCIATED;
5500
5501 for_each_pool_worker(worker, pool) {
5502 unsigned int worker_flags = worker->flags;
5503
5504 /*
5505 * We want to clear UNBOUND but can't directly call
5506 * worker_clr_flags() or adjust nr_running. Atomically
5507 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5508 * @worker will clear REBOUND using worker_clr_flags() when
5509 * it initiates the next execution cycle thus restoring
5510 * concurrency management. Note that when or whether
5511 * @worker clears REBOUND doesn't affect correctness.
5512 *
5513 * WRITE_ONCE() is necessary because @worker->flags may be
5514 * tested without holding any lock in
5515 * wq_worker_running(). Without it, NOT_RUNNING test may
5516 * fail incorrectly leading to premature concurrency
5517 * management operations.
5518 */
5519 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5520 worker_flags |= WORKER_REBOUND;
5521 worker_flags &= ~WORKER_UNBOUND;
5522 WRITE_ONCE(worker->flags, worker_flags);
5523 }
5524
5525 raw_spin_unlock_irq(&pool->lock);
5526 }
5527
5528 /**
5529 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5530 * @pool: unbound pool of interest
5531 * @cpu: the CPU which is coming up
5532 *
5533 * An unbound pool may end up with a cpumask which doesn't have any online
5534 * CPUs. When a worker of such pool get scheduled, the scheduler resets
5535 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
5536 * online CPU before, cpus_allowed of all its workers should be restored.
5537 */
restore_unbound_workers_cpumask(struct worker_pool * pool,int cpu)5538 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5539 {
5540 static cpumask_t cpumask;
5541 struct worker *worker;
5542
5543 lockdep_assert_held(&wq_pool_attach_mutex);
5544
5545 /* is @cpu allowed for @pool? */
5546 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5547 return;
5548
5549 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5550
5551 /* as we're called from CPU_ONLINE, the following shouldn't fail */
5552 for_each_pool_worker(worker, pool)
5553 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5554 }
5555
workqueue_prepare_cpu(unsigned int cpu)5556 int workqueue_prepare_cpu(unsigned int cpu)
5557 {
5558 struct worker_pool *pool;
5559
5560 for_each_cpu_worker_pool(pool, cpu) {
5561 if (pool->nr_workers)
5562 continue;
5563 if (!create_worker(pool))
5564 return -ENOMEM;
5565 }
5566 return 0;
5567 }
5568
workqueue_online_cpu(unsigned int cpu)5569 int workqueue_online_cpu(unsigned int cpu)
5570 {
5571 struct worker_pool *pool;
5572 struct workqueue_struct *wq;
5573 int pi;
5574
5575 mutex_lock(&wq_pool_mutex);
5576
5577 for_each_pool(pool, pi) {
5578 mutex_lock(&wq_pool_attach_mutex);
5579
5580 if (pool->cpu == cpu)
5581 rebind_workers(pool);
5582 else if (pool->cpu < 0)
5583 restore_unbound_workers_cpumask(pool, cpu);
5584
5585 mutex_unlock(&wq_pool_attach_mutex);
5586 }
5587
5588 /* update pod affinity of unbound workqueues */
5589 list_for_each_entry(wq, &workqueues, list) {
5590 struct workqueue_attrs *attrs = wq->unbound_attrs;
5591
5592 if (attrs) {
5593 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5594 int tcpu;
5595
5596 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
5597 wq_update_pod(wq, tcpu, cpu, true);
5598 }
5599 }
5600
5601 mutex_unlock(&wq_pool_mutex);
5602 return 0;
5603 }
5604
workqueue_offline_cpu(unsigned int cpu)5605 int workqueue_offline_cpu(unsigned int cpu)
5606 {
5607 struct workqueue_struct *wq;
5608
5609 /* unbinding per-cpu workers should happen on the local CPU */
5610 if (WARN_ON(cpu != smp_processor_id()))
5611 return -1;
5612
5613 unbind_workers(cpu);
5614
5615 /* update pod affinity of unbound workqueues */
5616 mutex_lock(&wq_pool_mutex);
5617 list_for_each_entry(wq, &workqueues, list) {
5618 struct workqueue_attrs *attrs = wq->unbound_attrs;
5619
5620 if (attrs) {
5621 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5622 int tcpu;
5623
5624 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
5625 wq_update_pod(wq, tcpu, cpu, false);
5626 }
5627 }
5628 mutex_unlock(&wq_pool_mutex);
5629
5630 return 0;
5631 }
5632
5633 struct work_for_cpu {
5634 struct work_struct work;
5635 long (*fn)(void *);
5636 void *arg;
5637 long ret;
5638 };
5639
work_for_cpu_fn(struct work_struct * work)5640 static void work_for_cpu_fn(struct work_struct *work)
5641 {
5642 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5643
5644 wfc->ret = wfc->fn(wfc->arg);
5645 }
5646
5647 /**
5648 * work_on_cpu_key - run a function in thread context on a particular cpu
5649 * @cpu: the cpu to run on
5650 * @fn: the function to run
5651 * @arg: the function arg
5652 * @key: The lock class key for lock debugging purposes
5653 *
5654 * It is up to the caller to ensure that the cpu doesn't go offline.
5655 * The caller must not hold any locks which would prevent @fn from completing.
5656 *
5657 * Return: The value @fn returns.
5658 */
work_on_cpu_key(int cpu,long (* fn)(void *),void * arg,struct lock_class_key * key)5659 long work_on_cpu_key(int cpu, long (*fn)(void *),
5660 void *arg, struct lock_class_key *key)
5661 {
5662 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5663
5664 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
5665 schedule_work_on(cpu, &wfc.work);
5666 flush_work(&wfc.work);
5667 destroy_work_on_stack(&wfc.work);
5668 return wfc.ret;
5669 }
5670 EXPORT_SYMBOL_GPL(work_on_cpu_key);
5671
5672 /**
5673 * work_on_cpu_safe_key - run a function in thread context on a particular cpu
5674 * @cpu: the cpu to run on
5675 * @fn: the function to run
5676 * @arg: the function argument
5677 * @key: The lock class key for lock debugging purposes
5678 *
5679 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5680 * any locks which would prevent @fn from completing.
5681 *
5682 * Return: The value @fn returns.
5683 */
work_on_cpu_safe_key(int cpu,long (* fn)(void *),void * arg,struct lock_class_key * key)5684 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
5685 void *arg, struct lock_class_key *key)
5686 {
5687 long ret = -ENODEV;
5688
5689 cpus_read_lock();
5690 if (cpu_online(cpu))
5691 ret = work_on_cpu_key(cpu, fn, arg, key);
5692 cpus_read_unlock();
5693 return ret;
5694 }
5695 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
5696 #endif /* CONFIG_SMP */
5697
5698 #ifdef CONFIG_FREEZER
5699
5700 /**
5701 * freeze_workqueues_begin - begin freezing workqueues
5702 *
5703 * Start freezing workqueues. After this function returns, all freezable
5704 * workqueues will queue new works to their inactive_works list instead of
5705 * pool->worklist.
5706 *
5707 * CONTEXT:
5708 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5709 */
freeze_workqueues_begin(void)5710 void freeze_workqueues_begin(void)
5711 {
5712 struct workqueue_struct *wq;
5713 struct pool_workqueue *pwq;
5714
5715 mutex_lock(&wq_pool_mutex);
5716
5717 WARN_ON_ONCE(workqueue_freezing);
5718 workqueue_freezing = true;
5719
5720 list_for_each_entry(wq, &workqueues, list) {
5721 mutex_lock(&wq->mutex);
5722 for_each_pwq(pwq, wq)
5723 pwq_adjust_max_active(pwq);
5724 mutex_unlock(&wq->mutex);
5725 }
5726
5727 mutex_unlock(&wq_pool_mutex);
5728 }
5729
5730 /**
5731 * freeze_workqueues_busy - are freezable workqueues still busy?
5732 *
5733 * Check whether freezing is complete. This function must be called
5734 * between freeze_workqueues_begin() and thaw_workqueues().
5735 *
5736 * CONTEXT:
5737 * Grabs and releases wq_pool_mutex.
5738 *
5739 * Return:
5740 * %true if some freezable workqueues are still busy. %false if freezing
5741 * is complete.
5742 */
freeze_workqueues_busy(void)5743 bool freeze_workqueues_busy(void)
5744 {
5745 bool busy = false;
5746 struct workqueue_struct *wq;
5747 struct pool_workqueue *pwq;
5748
5749 mutex_lock(&wq_pool_mutex);
5750
5751 WARN_ON_ONCE(!workqueue_freezing);
5752
5753 list_for_each_entry(wq, &workqueues, list) {
5754 if (!(wq->flags & WQ_FREEZABLE))
5755 continue;
5756 /*
5757 * nr_active is monotonically decreasing. It's safe
5758 * to peek without lock.
5759 */
5760 rcu_read_lock();
5761 for_each_pwq(pwq, wq) {
5762 WARN_ON_ONCE(pwq->nr_active < 0);
5763 if (pwq->nr_active) {
5764 busy = true;
5765 rcu_read_unlock();
5766 goto out_unlock;
5767 }
5768 }
5769 rcu_read_unlock();
5770 }
5771 out_unlock:
5772 mutex_unlock(&wq_pool_mutex);
5773 return busy;
5774 }
5775
5776 /**
5777 * thaw_workqueues - thaw workqueues
5778 *
5779 * Thaw workqueues. Normal queueing is restored and all collected
5780 * frozen works are transferred to their respective pool worklists.
5781 *
5782 * CONTEXT:
5783 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5784 */
thaw_workqueues(void)5785 void thaw_workqueues(void)
5786 {
5787 struct workqueue_struct *wq;
5788 struct pool_workqueue *pwq;
5789
5790 mutex_lock(&wq_pool_mutex);
5791
5792 if (!workqueue_freezing)
5793 goto out_unlock;
5794
5795 workqueue_freezing = false;
5796
5797 /* restore max_active and repopulate worklist */
5798 list_for_each_entry(wq, &workqueues, list) {
5799 mutex_lock(&wq->mutex);
5800 for_each_pwq(pwq, wq)
5801 pwq_adjust_max_active(pwq);
5802 mutex_unlock(&wq->mutex);
5803 }
5804
5805 out_unlock:
5806 mutex_unlock(&wq_pool_mutex);
5807 }
5808 #endif /* CONFIG_FREEZER */
5809
workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)5810 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
5811 {
5812 LIST_HEAD(ctxs);
5813 int ret = 0;
5814 struct workqueue_struct *wq;
5815 struct apply_wqattrs_ctx *ctx, *n;
5816
5817 lockdep_assert_held(&wq_pool_mutex);
5818
5819 list_for_each_entry(wq, &workqueues, list) {
5820 if (!(wq->flags & WQ_UNBOUND))
5821 continue;
5822 /* creating multiple pwqs breaks ordering guarantee */
5823 if (wq->flags & __WQ_ORDERED)
5824 continue;
5825
5826 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
5827 if (IS_ERR(ctx)) {
5828 ret = PTR_ERR(ctx);
5829 break;
5830 }
5831
5832 list_add_tail(&ctx->list, &ctxs);
5833 }
5834
5835 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5836 if (!ret)
5837 apply_wqattrs_commit(ctx);
5838 apply_wqattrs_cleanup(ctx);
5839 }
5840
5841 if (!ret) {
5842 mutex_lock(&wq_pool_attach_mutex);
5843 cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
5844 mutex_unlock(&wq_pool_attach_mutex);
5845 }
5846 return ret;
5847 }
5848
5849 /**
5850 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5851 * @cpumask: the cpumask to set
5852 *
5853 * The low-level workqueues cpumask is a global cpumask that limits
5854 * the affinity of all unbound workqueues. This function check the @cpumask
5855 * and apply it to all unbound workqueues and updates all pwqs of them.
5856 *
5857 * Return: 0 - Success
5858 * -EINVAL - Invalid @cpumask
5859 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
5860 */
workqueue_set_unbound_cpumask(cpumask_var_t cpumask)5861 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5862 {
5863 int ret = -EINVAL;
5864
5865 /*
5866 * Not excluding isolated cpus on purpose.
5867 * If the user wishes to include them, we allow that.
5868 */
5869 cpumask_and(cpumask, cpumask, cpu_possible_mask);
5870 if (!cpumask_empty(cpumask)) {
5871 apply_wqattrs_lock();
5872 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5873 ret = 0;
5874 goto out_unlock;
5875 }
5876
5877 ret = workqueue_apply_unbound_cpumask(cpumask);
5878
5879 out_unlock:
5880 apply_wqattrs_unlock();
5881 }
5882
5883 return ret;
5884 }
5885
parse_affn_scope(const char * val)5886 static int parse_affn_scope(const char *val)
5887 {
5888 int i;
5889
5890 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
5891 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
5892 return i;
5893 }
5894 return -EINVAL;
5895 }
5896
wq_affn_dfl_set(const char * val,const struct kernel_param * kp)5897 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
5898 {
5899 struct workqueue_struct *wq;
5900 int affn, cpu;
5901
5902 affn = parse_affn_scope(val);
5903 if (affn < 0)
5904 return affn;
5905 if (affn == WQ_AFFN_DFL)
5906 return -EINVAL;
5907
5908 cpus_read_lock();
5909 mutex_lock(&wq_pool_mutex);
5910
5911 wq_affn_dfl = affn;
5912
5913 list_for_each_entry(wq, &workqueues, list) {
5914 for_each_online_cpu(cpu) {
5915 wq_update_pod(wq, cpu, cpu, true);
5916 }
5917 }
5918
5919 mutex_unlock(&wq_pool_mutex);
5920 cpus_read_unlock();
5921
5922 return 0;
5923 }
5924
wq_affn_dfl_get(char * buffer,const struct kernel_param * kp)5925 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
5926 {
5927 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
5928 }
5929
5930 static const struct kernel_param_ops wq_affn_dfl_ops = {
5931 .set = wq_affn_dfl_set,
5932 .get = wq_affn_dfl_get,
5933 };
5934
5935 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
5936
5937 #ifdef CONFIG_SYSFS
5938 /*
5939 * Workqueues with WQ_SYSFS flag set is visible to userland via
5940 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5941 * following attributes.
5942 *
5943 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5944 * max_active RW int : maximum number of in-flight work items
5945 *
5946 * Unbound workqueues have the following extra attributes.
5947 *
5948 * nice RW int : nice value of the workers
5949 * cpumask RW mask : bitmask of allowed CPUs for the workers
5950 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none)
5951 * affinity_strict RW bool : worker CPU affinity is strict
5952 */
5953 struct wq_device {
5954 struct workqueue_struct *wq;
5955 struct device dev;
5956 };
5957
dev_to_wq(struct device * dev)5958 static struct workqueue_struct *dev_to_wq(struct device *dev)
5959 {
5960 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5961
5962 return wq_dev->wq;
5963 }
5964
per_cpu_show(struct device * dev,struct device_attribute * attr,char * buf)5965 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5966 char *buf)
5967 {
5968 struct workqueue_struct *wq = dev_to_wq(dev);
5969
5970 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5971 }
5972 static DEVICE_ATTR_RO(per_cpu);
5973
max_active_show(struct device * dev,struct device_attribute * attr,char * buf)5974 static ssize_t max_active_show(struct device *dev,
5975 struct device_attribute *attr, char *buf)
5976 {
5977 struct workqueue_struct *wq = dev_to_wq(dev);
5978
5979 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5980 }
5981
max_active_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5982 static ssize_t max_active_store(struct device *dev,
5983 struct device_attribute *attr, const char *buf,
5984 size_t count)
5985 {
5986 struct workqueue_struct *wq = dev_to_wq(dev);
5987 int val;
5988
5989 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5990 return -EINVAL;
5991
5992 workqueue_set_max_active(wq, val);
5993 return count;
5994 }
5995 static DEVICE_ATTR_RW(max_active);
5996
5997 static struct attribute *wq_sysfs_attrs[] = {
5998 &dev_attr_per_cpu.attr,
5999 &dev_attr_max_active.attr,
6000 NULL,
6001 };
6002 ATTRIBUTE_GROUPS(wq_sysfs);
6003
wq_nice_show(struct device * dev,struct device_attribute * attr,char * buf)6004 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
6005 char *buf)
6006 {
6007 struct workqueue_struct *wq = dev_to_wq(dev);
6008 int written;
6009
6010 mutex_lock(&wq->mutex);
6011 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
6012 mutex_unlock(&wq->mutex);
6013
6014 return written;
6015 }
6016
6017 /* prepare workqueue_attrs for sysfs store operations */
wq_sysfs_prep_attrs(struct workqueue_struct * wq)6018 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
6019 {
6020 struct workqueue_attrs *attrs;
6021
6022 lockdep_assert_held(&wq_pool_mutex);
6023
6024 attrs = alloc_workqueue_attrs();
6025 if (!attrs)
6026 return NULL;
6027
6028 copy_workqueue_attrs(attrs, wq->unbound_attrs);
6029 return attrs;
6030 }
6031
wq_nice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6032 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
6033 const char *buf, size_t count)
6034 {
6035 struct workqueue_struct *wq = dev_to_wq(dev);
6036 struct workqueue_attrs *attrs;
6037 int ret = -ENOMEM;
6038
6039 apply_wqattrs_lock();
6040
6041 attrs = wq_sysfs_prep_attrs(wq);
6042 if (!attrs)
6043 goto out_unlock;
6044
6045 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
6046 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
6047 ret = apply_workqueue_attrs_locked(wq, attrs);
6048 else
6049 ret = -EINVAL;
6050
6051 out_unlock:
6052 apply_wqattrs_unlock();
6053 free_workqueue_attrs(attrs);
6054 return ret ?: count;
6055 }
6056
wq_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)6057 static ssize_t wq_cpumask_show(struct device *dev,
6058 struct device_attribute *attr, char *buf)
6059 {
6060 struct workqueue_struct *wq = dev_to_wq(dev);
6061 int written;
6062
6063 mutex_lock(&wq->mutex);
6064 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6065 cpumask_pr_args(wq->unbound_attrs->cpumask));
6066 mutex_unlock(&wq->mutex);
6067 return written;
6068 }
6069
wq_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6070 static ssize_t wq_cpumask_store(struct device *dev,
6071 struct device_attribute *attr,
6072 const char *buf, size_t count)
6073 {
6074 struct workqueue_struct *wq = dev_to_wq(dev);
6075 struct workqueue_attrs *attrs;
6076 int ret = -ENOMEM;
6077
6078 apply_wqattrs_lock();
6079
6080 attrs = wq_sysfs_prep_attrs(wq);
6081 if (!attrs)
6082 goto out_unlock;
6083
6084 ret = cpumask_parse(buf, attrs->cpumask);
6085 if (!ret)
6086 ret = apply_workqueue_attrs_locked(wq, attrs);
6087
6088 out_unlock:
6089 apply_wqattrs_unlock();
6090 free_workqueue_attrs(attrs);
6091 return ret ?: count;
6092 }
6093
wq_affn_scope_show(struct device * dev,struct device_attribute * attr,char * buf)6094 static ssize_t wq_affn_scope_show(struct device *dev,
6095 struct device_attribute *attr, char *buf)
6096 {
6097 struct workqueue_struct *wq = dev_to_wq(dev);
6098 int written;
6099
6100 mutex_lock(&wq->mutex);
6101 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
6102 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
6103 wq_affn_names[WQ_AFFN_DFL],
6104 wq_affn_names[wq_affn_dfl]);
6105 else
6106 written = scnprintf(buf, PAGE_SIZE, "%s\n",
6107 wq_affn_names[wq->unbound_attrs->affn_scope]);
6108 mutex_unlock(&wq->mutex);
6109
6110 return written;
6111 }
6112
wq_affn_scope_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6113 static ssize_t wq_affn_scope_store(struct device *dev,
6114 struct device_attribute *attr,
6115 const char *buf, size_t count)
6116 {
6117 struct workqueue_struct *wq = dev_to_wq(dev);
6118 struct workqueue_attrs *attrs;
6119 int affn, ret = -ENOMEM;
6120
6121 affn = parse_affn_scope(buf);
6122 if (affn < 0)
6123 return affn;
6124
6125 apply_wqattrs_lock();
6126 attrs = wq_sysfs_prep_attrs(wq);
6127 if (attrs) {
6128 attrs->affn_scope = affn;
6129 ret = apply_workqueue_attrs_locked(wq, attrs);
6130 }
6131 apply_wqattrs_unlock();
6132 free_workqueue_attrs(attrs);
6133 return ret ?: count;
6134 }
6135
wq_affinity_strict_show(struct device * dev,struct device_attribute * attr,char * buf)6136 static ssize_t wq_affinity_strict_show(struct device *dev,
6137 struct device_attribute *attr, char *buf)
6138 {
6139 struct workqueue_struct *wq = dev_to_wq(dev);
6140
6141 return scnprintf(buf, PAGE_SIZE, "%d\n",
6142 wq->unbound_attrs->affn_strict);
6143 }
6144
wq_affinity_strict_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6145 static ssize_t wq_affinity_strict_store(struct device *dev,
6146 struct device_attribute *attr,
6147 const char *buf, size_t count)
6148 {
6149 struct workqueue_struct *wq = dev_to_wq(dev);
6150 struct workqueue_attrs *attrs;
6151 int v, ret = -ENOMEM;
6152
6153 if (sscanf(buf, "%d", &v) != 1)
6154 return -EINVAL;
6155
6156 apply_wqattrs_lock();
6157 attrs = wq_sysfs_prep_attrs(wq);
6158 if (attrs) {
6159 attrs->affn_strict = (bool)v;
6160 ret = apply_workqueue_attrs_locked(wq, attrs);
6161 }
6162 apply_wqattrs_unlock();
6163 free_workqueue_attrs(attrs);
6164 return ret ?: count;
6165 }
6166
6167 static struct device_attribute wq_sysfs_unbound_attrs[] = {
6168 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
6169 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
6170 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
6171 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
6172 __ATTR_NULL,
6173 };
6174
6175 static struct bus_type wq_subsys = {
6176 .name = "workqueue",
6177 .dev_groups = wq_sysfs_groups,
6178 };
6179
wq_unbound_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)6180 static ssize_t wq_unbound_cpumask_show(struct device *dev,
6181 struct device_attribute *attr, char *buf)
6182 {
6183 int written;
6184
6185 mutex_lock(&wq_pool_mutex);
6186 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6187 cpumask_pr_args(wq_unbound_cpumask));
6188 mutex_unlock(&wq_pool_mutex);
6189
6190 return written;
6191 }
6192
wq_unbound_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6193 static ssize_t wq_unbound_cpumask_store(struct device *dev,
6194 struct device_attribute *attr, const char *buf, size_t count)
6195 {
6196 cpumask_var_t cpumask;
6197 int ret;
6198
6199 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6200 return -ENOMEM;
6201
6202 ret = cpumask_parse(buf, cpumask);
6203 if (!ret)
6204 ret = workqueue_set_unbound_cpumask(cpumask);
6205
6206 free_cpumask_var(cpumask);
6207 return ret ? ret : count;
6208 }
6209
6210 static struct device_attribute wq_sysfs_cpumask_attr =
6211 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
6212 wq_unbound_cpumask_store);
6213
wq_sysfs_init(void)6214 static int __init wq_sysfs_init(void)
6215 {
6216 struct device *dev_root;
6217 int err;
6218
6219 err = subsys_virtual_register(&wq_subsys, NULL);
6220 if (err)
6221 return err;
6222
6223 dev_root = bus_get_dev_root(&wq_subsys);
6224 if (dev_root) {
6225 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr);
6226 put_device(dev_root);
6227 }
6228 return err;
6229 }
6230 core_initcall(wq_sysfs_init);
6231
wq_device_release(struct device * dev)6232 static void wq_device_release(struct device *dev)
6233 {
6234 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6235
6236 kfree(wq_dev);
6237 }
6238
6239 /**
6240 * workqueue_sysfs_register - make a workqueue visible in sysfs
6241 * @wq: the workqueue to register
6242 *
6243 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
6244 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
6245 * which is the preferred method.
6246 *
6247 * Workqueue user should use this function directly iff it wants to apply
6248 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
6249 * apply_workqueue_attrs() may race against userland updating the
6250 * attributes.
6251 *
6252 * Return: 0 on success, -errno on failure.
6253 */
workqueue_sysfs_register(struct workqueue_struct * wq)6254 int workqueue_sysfs_register(struct workqueue_struct *wq)
6255 {
6256 struct wq_device *wq_dev;
6257 int ret;
6258
6259 /*
6260 * Adjusting max_active or creating new pwqs by applying
6261 * attributes breaks ordering guarantee. Disallow exposing ordered
6262 * workqueues.
6263 */
6264 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
6265 return -EINVAL;
6266
6267 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
6268 if (!wq_dev)
6269 return -ENOMEM;
6270
6271 wq_dev->wq = wq;
6272 wq_dev->dev.bus = &wq_subsys;
6273 wq_dev->dev.release = wq_device_release;
6274 dev_set_name(&wq_dev->dev, "%s", wq->name);
6275
6276 /*
6277 * unbound_attrs are created separately. Suppress uevent until
6278 * everything is ready.
6279 */
6280 dev_set_uevent_suppress(&wq_dev->dev, true);
6281
6282 ret = device_register(&wq_dev->dev);
6283 if (ret) {
6284 put_device(&wq_dev->dev);
6285 wq->wq_dev = NULL;
6286 return ret;
6287 }
6288
6289 if (wq->flags & WQ_UNBOUND) {
6290 struct device_attribute *attr;
6291
6292 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
6293 ret = device_create_file(&wq_dev->dev, attr);
6294 if (ret) {
6295 device_unregister(&wq_dev->dev);
6296 wq->wq_dev = NULL;
6297 return ret;
6298 }
6299 }
6300 }
6301
6302 dev_set_uevent_suppress(&wq_dev->dev, false);
6303 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
6304 return 0;
6305 }
6306
6307 /**
6308 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
6309 * @wq: the workqueue to unregister
6310 *
6311 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
6312 */
workqueue_sysfs_unregister(struct workqueue_struct * wq)6313 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
6314 {
6315 struct wq_device *wq_dev = wq->wq_dev;
6316
6317 if (!wq->wq_dev)
6318 return;
6319
6320 wq->wq_dev = NULL;
6321 device_unregister(&wq_dev->dev);
6322 }
6323 #else /* CONFIG_SYSFS */
workqueue_sysfs_unregister(struct workqueue_struct * wq)6324 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
6325 #endif /* CONFIG_SYSFS */
6326
6327 /*
6328 * Workqueue watchdog.
6329 *
6330 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
6331 * flush dependency, a concurrency managed work item which stays RUNNING
6332 * indefinitely. Workqueue stalls can be very difficult to debug as the
6333 * usual warning mechanisms don't trigger and internal workqueue state is
6334 * largely opaque.
6335 *
6336 * Workqueue watchdog monitors all worker pools periodically and dumps
6337 * state if some pools failed to make forward progress for a while where
6338 * forward progress is defined as the first item on ->worklist changing.
6339 *
6340 * This mechanism is controlled through the kernel parameter
6341 * "workqueue.watchdog_thresh" which can be updated at runtime through the
6342 * corresponding sysfs parameter file.
6343 */
6344 #ifdef CONFIG_WQ_WATCHDOG
6345
6346 static unsigned long wq_watchdog_thresh = 30;
6347 static struct timer_list wq_watchdog_timer;
6348
6349 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
6350 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
6351
6352 /*
6353 * Show workers that might prevent the processing of pending work items.
6354 * The only candidates are CPU-bound workers in the running state.
6355 * Pending work items should be handled by another idle worker
6356 * in all other situations.
6357 */
show_cpu_pool_hog(struct worker_pool * pool)6358 static void show_cpu_pool_hog(struct worker_pool *pool)
6359 {
6360 struct worker *worker;
6361 unsigned long flags;
6362 int bkt;
6363
6364 raw_spin_lock_irqsave(&pool->lock, flags);
6365
6366 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6367 if (task_is_running(worker->task)) {
6368 /*
6369 * Defer printing to avoid deadlocks in console
6370 * drivers that queue work while holding locks
6371 * also taken in their write paths.
6372 */
6373 printk_deferred_enter();
6374
6375 pr_info("pool %d:\n", pool->id);
6376 sched_show_task(worker->task);
6377
6378 printk_deferred_exit();
6379 }
6380 }
6381
6382 raw_spin_unlock_irqrestore(&pool->lock, flags);
6383 }
6384
show_cpu_pools_hogs(void)6385 static void show_cpu_pools_hogs(void)
6386 {
6387 struct worker_pool *pool;
6388 int pi;
6389
6390 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
6391
6392 rcu_read_lock();
6393
6394 for_each_pool(pool, pi) {
6395 if (pool->cpu_stall)
6396 show_cpu_pool_hog(pool);
6397
6398 }
6399
6400 rcu_read_unlock();
6401 }
6402
wq_watchdog_reset_touched(void)6403 static void wq_watchdog_reset_touched(void)
6404 {
6405 int cpu;
6406
6407 wq_watchdog_touched = jiffies;
6408 for_each_possible_cpu(cpu)
6409 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6410 }
6411
wq_watchdog_timer_fn(struct timer_list * unused)6412 static void wq_watchdog_timer_fn(struct timer_list *unused)
6413 {
6414 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
6415 bool lockup_detected = false;
6416 bool cpu_pool_stall = false;
6417 unsigned long now = jiffies;
6418 struct worker_pool *pool;
6419 int pi;
6420
6421 if (!thresh)
6422 return;
6423
6424 rcu_read_lock();
6425
6426 for_each_pool(pool, pi) {
6427 unsigned long pool_ts, touched, ts;
6428
6429 pool->cpu_stall = false;
6430 if (list_empty(&pool->worklist))
6431 continue;
6432
6433 /*
6434 * If a virtual machine is stopped by the host it can look to
6435 * the watchdog like a stall.
6436 */
6437 kvm_check_and_clear_guest_paused();
6438
6439 /* get the latest of pool and touched timestamps */
6440 if (pool->cpu >= 0)
6441 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
6442 else
6443 touched = READ_ONCE(wq_watchdog_touched);
6444 pool_ts = READ_ONCE(pool->watchdog_ts);
6445
6446 if (time_after(pool_ts, touched))
6447 ts = pool_ts;
6448 else
6449 ts = touched;
6450
6451 /* did we stall? */
6452 if (time_after(now, ts + thresh)) {
6453 lockup_detected = true;
6454 if (pool->cpu >= 0) {
6455 pool->cpu_stall = true;
6456 cpu_pool_stall = true;
6457 }
6458 pr_emerg("BUG: workqueue lockup - pool");
6459 pr_cont_pool_info(pool);
6460 pr_cont(" stuck for %us!\n",
6461 jiffies_to_msecs(now - pool_ts) / 1000);
6462 }
6463
6464
6465 }
6466
6467 rcu_read_unlock();
6468
6469 if (lockup_detected)
6470 show_all_workqueues();
6471
6472 if (cpu_pool_stall)
6473 show_cpu_pools_hogs();
6474
6475 wq_watchdog_reset_touched();
6476 mod_timer(&wq_watchdog_timer, jiffies + thresh);
6477 }
6478
wq_watchdog_touch(int cpu)6479 notrace void wq_watchdog_touch(int cpu)
6480 {
6481 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
6482 unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
6483 unsigned long now = jiffies;
6484
6485 if (cpu >= 0)
6486 per_cpu(wq_watchdog_touched_cpu, cpu) = now;
6487 else
6488 WARN_ONCE(1, "%s should be called with valid CPU", __func__);
6489
6490 /* Don't unnecessarily store to global cacheline */
6491 if (time_after(now, touch_ts + thresh / 4))
6492 WRITE_ONCE(wq_watchdog_touched, jiffies);
6493 }
6494
wq_watchdog_set_thresh(unsigned long thresh)6495 static void wq_watchdog_set_thresh(unsigned long thresh)
6496 {
6497 wq_watchdog_thresh = 0;
6498 del_timer_sync(&wq_watchdog_timer);
6499
6500 if (thresh) {
6501 wq_watchdog_thresh = thresh;
6502 wq_watchdog_reset_touched();
6503 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
6504 }
6505 }
6506
wq_watchdog_param_set_thresh(const char * val,const struct kernel_param * kp)6507 static int wq_watchdog_param_set_thresh(const char *val,
6508 const struct kernel_param *kp)
6509 {
6510 unsigned long thresh;
6511 int ret;
6512
6513 ret = kstrtoul(val, 0, &thresh);
6514 if (ret)
6515 return ret;
6516
6517 if (system_wq)
6518 wq_watchdog_set_thresh(thresh);
6519 else
6520 wq_watchdog_thresh = thresh;
6521
6522 return 0;
6523 }
6524
6525 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
6526 .set = wq_watchdog_param_set_thresh,
6527 .get = param_get_ulong,
6528 };
6529
6530 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
6531 0644);
6532
wq_watchdog_init(void)6533 static void wq_watchdog_init(void)
6534 {
6535 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
6536 wq_watchdog_set_thresh(wq_watchdog_thresh);
6537 }
6538
6539 #else /* CONFIG_WQ_WATCHDOG */
6540
wq_watchdog_init(void)6541 static inline void wq_watchdog_init(void) { }
6542
6543 #endif /* CONFIG_WQ_WATCHDOG */
6544
restrict_unbound_cpumask(const char * name,const struct cpumask * mask)6545 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
6546 {
6547 if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
6548 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
6549 cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
6550 return;
6551 }
6552
6553 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
6554 }
6555
6556 /**
6557 * workqueue_init_early - early init for workqueue subsystem
6558 *
6559 * This is the first step of three-staged workqueue subsystem initialization and
6560 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
6561 * up. It sets up all the data structures and system workqueues and allows early
6562 * boot code to create workqueues and queue/cancel work items. Actual work item
6563 * execution starts only after kthreads can be created and scheduled right
6564 * before early initcalls.
6565 */
workqueue_init_early(void)6566 void __init workqueue_init_early(void)
6567 {
6568 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
6569 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
6570 int i, cpu;
6571
6572 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
6573
6574 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
6575 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
6576 restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
6577 restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
6578 if (!cpumask_empty(&wq_cmdline_cpumask))
6579 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
6580
6581 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6582
6583 wq_update_pod_attrs_buf = alloc_workqueue_attrs();
6584 BUG_ON(!wq_update_pod_attrs_buf);
6585
6586 /* initialize WQ_AFFN_SYSTEM pods */
6587 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6588 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
6589 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6590 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
6591
6592 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
6593
6594 pt->nr_pods = 1;
6595 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
6596 pt->pod_node[0] = NUMA_NO_NODE;
6597 pt->cpu_pod[0] = 0;
6598
6599 /* initialize CPU pools */
6600 for_each_possible_cpu(cpu) {
6601 struct worker_pool *pool;
6602
6603 i = 0;
6604 for_each_cpu_worker_pool(pool, cpu) {
6605 BUG_ON(init_worker_pool(pool));
6606 pool->cpu = cpu;
6607 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
6608 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
6609 pool->attrs->nice = std_nice[i++];
6610 pool->attrs->affn_strict = true;
6611 pool->node = cpu_to_node(cpu);
6612
6613 /* alloc pool ID */
6614 mutex_lock(&wq_pool_mutex);
6615 BUG_ON(worker_pool_assign_id(pool));
6616 mutex_unlock(&wq_pool_mutex);
6617 }
6618 }
6619
6620 /* create default unbound and ordered wq attrs */
6621 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
6622 struct workqueue_attrs *attrs;
6623
6624 BUG_ON(!(attrs = alloc_workqueue_attrs()));
6625 attrs->nice = std_nice[i];
6626 unbound_std_wq_attrs[i] = attrs;
6627
6628 /*
6629 * An ordered wq should have only one pwq as ordering is
6630 * guaranteed by max_active which is enforced by pwqs.
6631 */
6632 BUG_ON(!(attrs = alloc_workqueue_attrs()));
6633 attrs->nice = std_nice[i];
6634 attrs->ordered = true;
6635 ordered_wq_attrs[i] = attrs;
6636 }
6637
6638 system_wq = alloc_workqueue("events", 0, 0);
6639 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
6640 system_long_wq = alloc_workqueue("events_long", 0, 0);
6641 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
6642 WQ_MAX_ACTIVE);
6643 system_freezable_wq = alloc_workqueue("events_freezable",
6644 WQ_FREEZABLE, 0);
6645 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6646 WQ_POWER_EFFICIENT, 0);
6647 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6648 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6649 0);
6650 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
6651 !system_unbound_wq || !system_freezable_wq ||
6652 !system_power_efficient_wq ||
6653 !system_freezable_power_efficient_wq);
6654 }
6655
wq_cpu_intensive_thresh_init(void)6656 static void __init wq_cpu_intensive_thresh_init(void)
6657 {
6658 unsigned long thresh;
6659 unsigned long bogo;
6660
6661 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
6662 BUG_ON(IS_ERR(pwq_release_worker));
6663
6664 /* if the user set it to a specific value, keep it */
6665 if (wq_cpu_intensive_thresh_us != ULONG_MAX)
6666 return;
6667
6668 /*
6669 * The default of 10ms is derived from the fact that most modern (as of
6670 * 2023) processors can do a lot in 10ms and that it's just below what
6671 * most consider human-perceivable. However, the kernel also runs on a
6672 * lot slower CPUs including microcontrollers where the threshold is way
6673 * too low.
6674 *
6675 * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
6676 * This is by no means accurate but it doesn't have to be. The mechanism
6677 * is still useful even when the threshold is fully scaled up. Also, as
6678 * the reports would usually be applicable to everyone, some machines
6679 * operating on longer thresholds won't significantly diminish their
6680 * usefulness.
6681 */
6682 thresh = 10 * USEC_PER_MSEC;
6683
6684 /* see init/calibrate.c for lpj -> BogoMIPS calculation */
6685 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
6686 if (bogo < 4000)
6687 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
6688
6689 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
6690 loops_per_jiffy, bogo, thresh);
6691
6692 wq_cpu_intensive_thresh_us = thresh;
6693 }
6694
6695 /**
6696 * workqueue_init - bring workqueue subsystem fully online
6697 *
6698 * This is the second step of three-staged workqueue subsystem initialization
6699 * and invoked as soon as kthreads can be created and scheduled. Workqueues have
6700 * been created and work items queued on them, but there are no kworkers
6701 * executing the work items yet. Populate the worker pools with the initial
6702 * workers and enable future kworker creations.
6703 */
workqueue_init(void)6704 void __init workqueue_init(void)
6705 {
6706 struct workqueue_struct *wq;
6707 struct worker_pool *pool;
6708 int cpu, bkt;
6709
6710 wq_cpu_intensive_thresh_init();
6711
6712 mutex_lock(&wq_pool_mutex);
6713
6714 /*
6715 * Per-cpu pools created earlier could be missing node hint. Fix them
6716 * up. Also, create a rescuer for workqueues that requested it.
6717 */
6718 for_each_possible_cpu(cpu) {
6719 for_each_cpu_worker_pool(pool, cpu) {
6720 pool->node = cpu_to_node(cpu);
6721 }
6722 }
6723
6724 list_for_each_entry(wq, &workqueues, list) {
6725 WARN(init_rescuer(wq),
6726 "workqueue: failed to create early rescuer for %s",
6727 wq->name);
6728 }
6729
6730 mutex_unlock(&wq_pool_mutex);
6731
6732 /* create the initial workers */
6733 for_each_online_cpu(cpu) {
6734 for_each_cpu_worker_pool(pool, cpu) {
6735 pool->flags &= ~POOL_DISASSOCIATED;
6736 BUG_ON(!create_worker(pool));
6737 }
6738 }
6739
6740 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6741 BUG_ON(!create_worker(pool));
6742
6743 wq_online = true;
6744 wq_watchdog_init();
6745 }
6746
6747 /*
6748 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
6749 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
6750 * and consecutive pod ID. The rest of @pt is initialized accordingly.
6751 */
init_pod_type(struct wq_pod_type * pt,bool (* cpus_share_pod)(int,int))6752 static void __init init_pod_type(struct wq_pod_type *pt,
6753 bool (*cpus_share_pod)(int, int))
6754 {
6755 int cur, pre, cpu, pod;
6756
6757 pt->nr_pods = 0;
6758
6759 /* init @pt->cpu_pod[] according to @cpus_share_pod() */
6760 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6761 BUG_ON(!pt->cpu_pod);
6762
6763 for_each_possible_cpu(cur) {
6764 for_each_possible_cpu(pre) {
6765 if (pre >= cur) {
6766 pt->cpu_pod[cur] = pt->nr_pods++;
6767 break;
6768 }
6769 if (cpus_share_pod(cur, pre)) {
6770 pt->cpu_pod[cur] = pt->cpu_pod[pre];
6771 break;
6772 }
6773 }
6774 }
6775
6776 /* init the rest to match @pt->cpu_pod[] */
6777 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6778 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
6779 BUG_ON(!pt->pod_cpus || !pt->pod_node);
6780
6781 for (pod = 0; pod < pt->nr_pods; pod++)
6782 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
6783
6784 for_each_possible_cpu(cpu) {
6785 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
6786 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
6787 }
6788 }
6789
cpus_dont_share(int cpu0,int cpu1)6790 static bool __init cpus_dont_share(int cpu0, int cpu1)
6791 {
6792 return false;
6793 }
6794
cpus_share_smt(int cpu0,int cpu1)6795 static bool __init cpus_share_smt(int cpu0, int cpu1)
6796 {
6797 #ifdef CONFIG_SCHED_SMT
6798 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
6799 #else
6800 return false;
6801 #endif
6802 }
6803
cpus_share_numa(int cpu0,int cpu1)6804 static bool __init cpus_share_numa(int cpu0, int cpu1)
6805 {
6806 return cpu_to_node(cpu0) == cpu_to_node(cpu1);
6807 }
6808
6809 /**
6810 * workqueue_init_topology - initialize CPU pods for unbound workqueues
6811 *
6812 * This is the third step of there-staged workqueue subsystem initialization and
6813 * invoked after SMP and topology information are fully initialized. It
6814 * initializes the unbound CPU pods accordingly.
6815 */
workqueue_init_topology(void)6816 void __init workqueue_init_topology(void)
6817 {
6818 struct workqueue_struct *wq;
6819 int cpu;
6820
6821 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
6822 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
6823 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
6824 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
6825
6826 mutex_lock(&wq_pool_mutex);
6827
6828 /*
6829 * Workqueues allocated earlier would have all CPUs sharing the default
6830 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
6831 * combinations to apply per-pod sharing.
6832 */
6833 list_for_each_entry(wq, &workqueues, list) {
6834 for_each_online_cpu(cpu) {
6835 wq_update_pod(wq, cpu, cpu, true);
6836 }
6837 }
6838
6839 mutex_unlock(&wq_pool_mutex);
6840 }
6841
__warn_flushing_systemwide_wq(void)6842 void __warn_flushing_systemwide_wq(void)
6843 {
6844 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
6845 dump_stack();
6846 }
6847 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
6848
workqueue_unbound_cpus_setup(char * str)6849 static int __init workqueue_unbound_cpus_setup(char *str)
6850 {
6851 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
6852 cpumask_clear(&wq_cmdline_cpumask);
6853 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
6854 }
6855
6856 return 1;
6857 }
6858 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);
6859