1 /* 2 * kernel/workqueue.c - generic async execution with shared worker pool 3 * 4 * Copyright (C) 2002 Ingo Molnar 5 * 6 * Derived from the taskqueue/keventd code by: 7 * David Woodhouse <dwmw2@infradead.org> 8 * Andrew Morton 9 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 10 * Theodore Ts'o <tytso@mit.edu> 11 * 12 * Made to use alloc_percpu by Christoph Lameter. 13 * 14 * Copyright (C) 2010 SUSE Linux Products GmbH 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 16 * 17 * This is the generic async execution mechanism. Work items as are 18 * executed in process context. The worker pool is shared and 19 * automatically managed. There are two worker pools for each CPU (one for 20 * normal work items and the other for high priority ones) and some extra 21 * pools for workqueues which are not bound to any specific CPU - the 22 * number of these backing pools is dynamic. 23 * 24 * Please read Documentation/workqueue.txt for details. 25 */ 26 27 #include <linux/export.h> 28 #include <linux/kernel.h> 29 #include <linux/sched.h> 30 #include <linux/init.h> 31 #include <linux/signal.h> 32 #include <linux/completion.h> 33 #include <linux/workqueue.h> 34 #include <linux/slab.h> 35 #include <linux/cpu.h> 36 #include <linux/notifier.h> 37 #include <linux/kthread.h> 38 #include <linux/hardirq.h> 39 #include <linux/mempolicy.h> 40 #include <linux/freezer.h> 41 #include <linux/kallsyms.h> 42 #include <linux/debug_locks.h> 43 #include <linux/lockdep.h> 44 #include <linux/idr.h> 45 #include <linux/jhash.h> 46 #include <linux/hashtable.h> 47 #include <linux/rculist.h> 48 #include <linux/nodemask.h> 49 #include <linux/moduleparam.h> 50 #include <linux/uaccess.h> 51 52 #include "workqueue_internal.h" 53 54 enum { 55 /* 56 * worker_pool flags 57 * 58 * A bound pool is either associated or disassociated with its CPU. 59 * While associated (!DISASSOCIATED), all workers are bound to the 60 * CPU and none has %WORKER_UNBOUND set and concurrency management 61 * is in effect. 62 * 63 * While DISASSOCIATED, the cpu may be offline and all workers have 64 * %WORKER_UNBOUND set and concurrency management disabled, and may 65 * be executing on any CPU. The pool behaves as an unbound one. 66 * 67 * Note that DISASSOCIATED should be flipped only while holding 68 * attach_mutex to avoid changing binding state while 69 * worker_attach_to_pool() is in progress. 70 */ 71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 72 73 /* worker flags */ 74 WORKER_DIE = 1 << 1, /* die die die */ 75 WORKER_IDLE = 1 << 2, /* is idle */ 76 WORKER_PREP = 1 << 3, /* preparing to run works */ 77 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 78 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 79 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 80 81 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 82 WORKER_UNBOUND | WORKER_REBOUND, 83 84 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 85 86 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 87 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 88 89 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 90 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 91 92 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 93 /* call for help after 10ms 94 (min two ticks) */ 95 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 96 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 97 98 /* 99 * Rescue workers are used only on emergencies and shared by 100 * all cpus. Give MIN_NICE. 101 */ 102 RESCUER_NICE_LEVEL = MIN_NICE, 103 HIGHPRI_NICE_LEVEL = MIN_NICE, 104 105 WQ_NAME_LEN = 24, 106 }; 107 108 /* 109 * Structure fields follow one of the following exclusion rules. 110 * 111 * I: Modifiable by initialization/destruction paths and read-only for 112 * everyone else. 113 * 114 * P: Preemption protected. Disabling preemption is enough and should 115 * only be modified and accessed from the local cpu. 116 * 117 * L: pool->lock protected. Access with pool->lock held. 118 * 119 * X: During normal operation, modification requires pool->lock and should 120 * be done only from local cpu. Either disabling preemption on local 121 * cpu or grabbing pool->lock is enough for read access. If 122 * POOL_DISASSOCIATED is set, it's identical to L. 123 * 124 * A: pool->attach_mutex protected. 125 * 126 * PL: wq_pool_mutex protected. 127 * 128 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. 129 * 130 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 131 * 132 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 133 * sched-RCU for reads. 134 * 135 * WQ: wq->mutex protected. 136 * 137 * WR: wq->mutex protected for writes. Sched-RCU protected for reads. 138 * 139 * MD: wq_mayday_lock protected. 140 */ 141 142 /* struct worker is defined in workqueue_internal.h */ 143 144 struct worker_pool { 145 spinlock_t lock; /* the pool lock */ 146 int cpu; /* I: the associated cpu */ 147 int node; /* I: the associated node ID */ 148 int id; /* I: pool ID */ 149 unsigned int flags; /* X: flags */ 150 151 struct list_head worklist; /* L: list of pending works */ 152 int nr_workers; /* L: total number of workers */ 153 154 /* nr_idle includes the ones off idle_list for rebinding */ 155 int nr_idle; /* L: currently idle ones */ 156 157 struct list_head idle_list; /* X: list of idle workers */ 158 struct timer_list idle_timer; /* L: worker idle timeout */ 159 struct timer_list mayday_timer; /* L: SOS timer for workers */ 160 161 /* a workers is either on busy_hash or idle_list, or the manager */ 162 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 163 /* L: hash of busy workers */ 164 165 /* see manage_workers() for details on the two manager mutexes */ 166 struct mutex manager_arb; /* manager arbitration */ 167 struct worker *manager; /* L: purely informational */ 168 struct mutex attach_mutex; /* attach/detach exclusion */ 169 struct list_head workers; /* A: attached workers */ 170 struct completion *detach_completion; /* all workers detached */ 171 172 struct ida worker_ida; /* worker IDs for task name */ 173 174 struct workqueue_attrs *attrs; /* I: worker attributes */ 175 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 176 int refcnt; /* PL: refcnt for unbound pools */ 177 178 /* 179 * The current concurrency level. As it's likely to be accessed 180 * from other CPUs during try_to_wake_up(), put it in a separate 181 * cacheline. 182 */ 183 atomic_t nr_running ____cacheline_aligned_in_smp; 184 185 /* 186 * Destruction of pool is sched-RCU protected to allow dereferences 187 * from get_work_pool(). 188 */ 189 struct rcu_head rcu; 190 } ____cacheline_aligned_in_smp; 191 192 /* 193 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 194 * of work_struct->data are used for flags and the remaining high bits 195 * point to the pwq; thus, pwqs need to be aligned at two's power of the 196 * number of flag bits. 197 */ 198 struct pool_workqueue { 199 struct worker_pool *pool; /* I: the associated pool */ 200 struct workqueue_struct *wq; /* I: the owning workqueue */ 201 int work_color; /* L: current color */ 202 int flush_color; /* L: flushing color */ 203 int refcnt; /* L: reference count */ 204 int nr_in_flight[WORK_NR_COLORS]; 205 /* L: nr of in_flight works */ 206 int nr_active; /* L: nr of active works */ 207 int max_active; /* L: max active works */ 208 struct list_head delayed_works; /* L: delayed works */ 209 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 210 struct list_head mayday_node; /* MD: node on wq->maydays */ 211 212 /* 213 * Release of unbound pwq is punted to system_wq. See put_pwq() 214 * and pwq_unbound_release_workfn() for details. pool_workqueue 215 * itself is also sched-RCU protected so that the first pwq can be 216 * determined without grabbing wq->mutex. 217 */ 218 struct work_struct unbound_release_work; 219 struct rcu_head rcu; 220 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 221 222 /* 223 * Structure used to wait for workqueue flush. 224 */ 225 struct wq_flusher { 226 struct list_head list; /* WQ: list of flushers */ 227 int flush_color; /* WQ: flush color waiting for */ 228 struct completion done; /* flush completion */ 229 }; 230 231 struct wq_device; 232 233 /* 234 * The externally visible workqueue. It relays the issued work items to 235 * the appropriate worker_pool through its pool_workqueues. 236 */ 237 struct workqueue_struct { 238 struct list_head pwqs; /* WR: all pwqs of this wq */ 239 struct list_head list; /* PR: list of all workqueues */ 240 241 struct mutex mutex; /* protects this wq */ 242 int work_color; /* WQ: current work color */ 243 int flush_color; /* WQ: current flush color */ 244 atomic_t nr_pwqs_to_flush; /* flush in progress */ 245 struct wq_flusher *first_flusher; /* WQ: first flusher */ 246 struct list_head flusher_queue; /* WQ: flush waiters */ 247 struct list_head flusher_overflow; /* WQ: flush overflow list */ 248 249 struct list_head maydays; /* MD: pwqs requesting rescue */ 250 struct worker *rescuer; /* I: rescue worker */ 251 252 int nr_drainers; /* WQ: drain in progress */ 253 int saved_max_active; /* WQ: saved pwq max_active */ 254 255 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 256 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ 257 258 #ifdef CONFIG_SYSFS 259 struct wq_device *wq_dev; /* I: for sysfs interface */ 260 #endif 261 #ifdef CONFIG_LOCKDEP 262 struct lockdep_map lockdep_map; 263 #endif 264 char name[WQ_NAME_LEN]; /* I: workqueue name */ 265 266 /* 267 * Destruction of workqueue_struct is sched-RCU protected to allow 268 * walking the workqueues list without grabbing wq_pool_mutex. 269 * This is used to dump all workqueues from sysrq. 270 */ 271 struct rcu_head rcu; 272 273 /* hot fields used during command issue, aligned to cacheline */ 274 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 275 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ 276 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */ 277 }; 278 279 static struct kmem_cache *pwq_cache; 280 281 static cpumask_var_t *wq_numa_possible_cpumask; 282 /* possible CPUs of each node */ 283 284 static bool wq_disable_numa; 285 module_param_named(disable_numa, wq_disable_numa, bool, 0444); 286 287 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 288 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 289 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 290 291 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ 292 293 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ 294 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; 295 296 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 297 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 298 299 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 300 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 301 302 static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */ 303 304 /* the per-cpu worker pools */ 305 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 306 cpu_worker_pools); 307 308 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 309 310 /* PL: hash of all unbound pools keyed by pool->attrs */ 311 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 312 313 /* I: attributes used when instantiating standard unbound pools on demand */ 314 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 315 316 /* I: attributes used when instantiating ordered pools on demand */ 317 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 318 319 struct workqueue_struct *system_wq __read_mostly; 320 EXPORT_SYMBOL(system_wq); 321 struct workqueue_struct *system_highpri_wq __read_mostly; 322 EXPORT_SYMBOL_GPL(system_highpri_wq); 323 struct workqueue_struct *system_long_wq __read_mostly; 324 EXPORT_SYMBOL_GPL(system_long_wq); 325 struct workqueue_struct *system_unbound_wq __read_mostly; 326 EXPORT_SYMBOL_GPL(system_unbound_wq); 327 struct workqueue_struct *system_freezable_wq __read_mostly; 328 EXPORT_SYMBOL_GPL(system_freezable_wq); 329 struct workqueue_struct *system_power_efficient_wq __read_mostly; 330 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 331 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 332 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 333 334 static int worker_thread(void *__worker); 335 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 336 337 #define CREATE_TRACE_POINTS 338 #include <trace/events/workqueue.h> 339 340 #define assert_rcu_or_pool_mutex() \ 341 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 342 !lockdep_is_held(&wq_pool_mutex), \ 343 "sched RCU or wq_pool_mutex should be held") 344 345 #define assert_rcu_or_wq_mutex(wq) \ 346 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 347 !lockdep_is_held(&wq->mutex), \ 348 "sched RCU or wq->mutex should be held") 349 350 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 351 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 352 !lockdep_is_held(&wq->mutex) && \ 353 !lockdep_is_held(&wq_pool_mutex), \ 354 "sched RCU, wq->mutex or wq_pool_mutex should be held") 355 356 #define for_each_cpu_worker_pool(pool, cpu) \ 357 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 358 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 359 (pool)++) 360 361 /** 362 * for_each_pool - iterate through all worker_pools in the system 363 * @pool: iteration cursor 364 * @pi: integer used for iteration 365 * 366 * This must be called either with wq_pool_mutex held or sched RCU read 367 * locked. If the pool needs to be used beyond the locking in effect, the 368 * caller is responsible for guaranteeing that the pool stays online. 369 * 370 * The if/else clause exists only for the lockdep assertion and can be 371 * ignored. 372 */ 373 #define for_each_pool(pool, pi) \ 374 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 375 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 376 else 377 378 /** 379 * for_each_pool_worker - iterate through all workers of a worker_pool 380 * @worker: iteration cursor 381 * @pool: worker_pool to iterate workers of 382 * 383 * This must be called with @pool->attach_mutex. 384 * 385 * The if/else clause exists only for the lockdep assertion and can be 386 * ignored. 387 */ 388 #define for_each_pool_worker(worker, pool) \ 389 list_for_each_entry((worker), &(pool)->workers, node) \ 390 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ 391 else 392 393 /** 394 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 395 * @pwq: iteration cursor 396 * @wq: the target workqueue 397 * 398 * This must be called either with wq->mutex held or sched RCU read locked. 399 * If the pwq needs to be used beyond the locking in effect, the caller is 400 * responsible for guaranteeing that the pwq stays online. 401 * 402 * The if/else clause exists only for the lockdep assertion and can be 403 * ignored. 404 */ 405 #define for_each_pwq(pwq, wq) \ 406 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 407 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ 408 else 409 410 #ifdef CONFIG_DEBUG_OBJECTS_WORK 411 412 static struct debug_obj_descr work_debug_descr; 413 414 static void *work_debug_hint(void *addr) 415 { 416 return ((struct work_struct *) addr)->func; 417 } 418 419 /* 420 * fixup_init is called when: 421 * - an active object is initialized 422 */ 423 static int work_fixup_init(void *addr, enum debug_obj_state state) 424 { 425 struct work_struct *work = addr; 426 427 switch (state) { 428 case ODEBUG_STATE_ACTIVE: 429 cancel_work_sync(work); 430 debug_object_init(work, &work_debug_descr); 431 return 1; 432 default: 433 return 0; 434 } 435 } 436 437 /* 438 * fixup_activate is called when: 439 * - an active object is activated 440 * - an unknown object is activated (might be a statically initialized object) 441 */ 442 static int work_fixup_activate(void *addr, enum debug_obj_state state) 443 { 444 struct work_struct *work = addr; 445 446 switch (state) { 447 448 case ODEBUG_STATE_NOTAVAILABLE: 449 /* 450 * This is not really a fixup. The work struct was 451 * statically initialized. We just make sure that it 452 * is tracked in the object tracker. 453 */ 454 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 455 debug_object_init(work, &work_debug_descr); 456 debug_object_activate(work, &work_debug_descr); 457 return 0; 458 } 459 WARN_ON_ONCE(1); 460 return 0; 461 462 case ODEBUG_STATE_ACTIVE: 463 WARN_ON(1); 464 465 default: 466 return 0; 467 } 468 } 469 470 /* 471 * fixup_free is called when: 472 * - an active object is freed 473 */ 474 static int work_fixup_free(void *addr, enum debug_obj_state state) 475 { 476 struct work_struct *work = addr; 477 478 switch (state) { 479 case ODEBUG_STATE_ACTIVE: 480 cancel_work_sync(work); 481 debug_object_free(work, &work_debug_descr); 482 return 1; 483 default: 484 return 0; 485 } 486 } 487 488 static struct debug_obj_descr work_debug_descr = { 489 .name = "work_struct", 490 .debug_hint = work_debug_hint, 491 .fixup_init = work_fixup_init, 492 .fixup_activate = work_fixup_activate, 493 .fixup_free = work_fixup_free, 494 }; 495 496 static inline void debug_work_activate(struct work_struct *work) 497 { 498 debug_object_activate(work, &work_debug_descr); 499 } 500 501 static inline void debug_work_deactivate(struct work_struct *work) 502 { 503 debug_object_deactivate(work, &work_debug_descr); 504 } 505 506 void __init_work(struct work_struct *work, int onstack) 507 { 508 if (onstack) 509 debug_object_init_on_stack(work, &work_debug_descr); 510 else 511 debug_object_init(work, &work_debug_descr); 512 } 513 EXPORT_SYMBOL_GPL(__init_work); 514 515 void destroy_work_on_stack(struct work_struct *work) 516 { 517 debug_object_free(work, &work_debug_descr); 518 } 519 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 520 521 void destroy_delayed_work_on_stack(struct delayed_work *work) 522 { 523 destroy_timer_on_stack(&work->timer); 524 debug_object_free(&work->work, &work_debug_descr); 525 } 526 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 527 528 #else 529 static inline void debug_work_activate(struct work_struct *work) { } 530 static inline void debug_work_deactivate(struct work_struct *work) { } 531 #endif 532 533 /** 534 * worker_pool_assign_id - allocate ID and assing it to @pool 535 * @pool: the pool pointer of interest 536 * 537 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 538 * successfully, -errno on failure. 539 */ 540 static int worker_pool_assign_id(struct worker_pool *pool) 541 { 542 int ret; 543 544 lockdep_assert_held(&wq_pool_mutex); 545 546 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 547 GFP_KERNEL); 548 if (ret >= 0) { 549 pool->id = ret; 550 return 0; 551 } 552 return ret; 553 } 554 555 /** 556 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node 557 * @wq: the target workqueue 558 * @node: the node ID 559 * 560 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU 561 * read locked. 562 * If the pwq needs to be used beyond the locking in effect, the caller is 563 * responsible for guaranteeing that the pwq stays online. 564 * 565 * Return: The unbound pool_workqueue for @node. 566 */ 567 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, 568 int node) 569 { 570 assert_rcu_or_wq_mutex_or_pool_mutex(wq); 571 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); 572 } 573 574 static unsigned int work_color_to_flags(int color) 575 { 576 return color << WORK_STRUCT_COLOR_SHIFT; 577 } 578 579 static int get_work_color(struct work_struct *work) 580 { 581 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 582 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 583 } 584 585 static int work_next_color(int color) 586 { 587 return (color + 1) % WORK_NR_COLORS; 588 } 589 590 /* 591 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 592 * contain the pointer to the queued pwq. Once execution starts, the flag 593 * is cleared and the high bits contain OFFQ flags and pool ID. 594 * 595 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 596 * and clear_work_data() can be used to set the pwq, pool or clear 597 * work->data. These functions should only be called while the work is 598 * owned - ie. while the PENDING bit is set. 599 * 600 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 601 * corresponding to a work. Pool is available once the work has been 602 * queued anywhere after initialization until it is sync canceled. pwq is 603 * available only while the work item is queued. 604 * 605 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 606 * canceled. While being canceled, a work item may have its PENDING set 607 * but stay off timer and worklist for arbitrarily long and nobody should 608 * try to steal the PENDING bit. 609 */ 610 static inline void set_work_data(struct work_struct *work, unsigned long data, 611 unsigned long flags) 612 { 613 WARN_ON_ONCE(!work_pending(work)); 614 atomic_long_set(&work->data, data | flags | work_static(work)); 615 } 616 617 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 618 unsigned long extra_flags) 619 { 620 set_work_data(work, (unsigned long)pwq, 621 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 622 } 623 624 static void set_work_pool_and_keep_pending(struct work_struct *work, 625 int pool_id) 626 { 627 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 628 WORK_STRUCT_PENDING); 629 } 630 631 static void set_work_pool_and_clear_pending(struct work_struct *work, 632 int pool_id) 633 { 634 /* 635 * The following wmb is paired with the implied mb in 636 * test_and_set_bit(PENDING) and ensures all updates to @work made 637 * here are visible to and precede any updates by the next PENDING 638 * owner. 639 */ 640 smp_wmb(); 641 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 642 } 643 644 static void clear_work_data(struct work_struct *work) 645 { 646 smp_wmb(); /* see set_work_pool_and_clear_pending() */ 647 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 648 } 649 650 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 651 { 652 unsigned long data = atomic_long_read(&work->data); 653 654 if (data & WORK_STRUCT_PWQ) 655 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 656 else 657 return NULL; 658 } 659 660 /** 661 * get_work_pool - return the worker_pool a given work was associated with 662 * @work: the work item of interest 663 * 664 * Pools are created and destroyed under wq_pool_mutex, and allows read 665 * access under sched-RCU read lock. As such, this function should be 666 * called under wq_pool_mutex or with preemption disabled. 667 * 668 * All fields of the returned pool are accessible as long as the above 669 * mentioned locking is in effect. If the returned pool needs to be used 670 * beyond the critical section, the caller is responsible for ensuring the 671 * returned pool is and stays online. 672 * 673 * Return: The worker_pool @work was last associated with. %NULL if none. 674 */ 675 static struct worker_pool *get_work_pool(struct work_struct *work) 676 { 677 unsigned long data = atomic_long_read(&work->data); 678 int pool_id; 679 680 assert_rcu_or_pool_mutex(); 681 682 if (data & WORK_STRUCT_PWQ) 683 return ((struct pool_workqueue *) 684 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 685 686 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 687 if (pool_id == WORK_OFFQ_POOL_NONE) 688 return NULL; 689 690 return idr_find(&worker_pool_idr, pool_id); 691 } 692 693 /** 694 * get_work_pool_id - return the worker pool ID a given work is associated with 695 * @work: the work item of interest 696 * 697 * Return: The worker_pool ID @work was last associated with. 698 * %WORK_OFFQ_POOL_NONE if none. 699 */ 700 static int get_work_pool_id(struct work_struct *work) 701 { 702 unsigned long data = atomic_long_read(&work->data); 703 704 if (data & WORK_STRUCT_PWQ) 705 return ((struct pool_workqueue *) 706 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 707 708 return data >> WORK_OFFQ_POOL_SHIFT; 709 } 710 711 static void mark_work_canceling(struct work_struct *work) 712 { 713 unsigned long pool_id = get_work_pool_id(work); 714 715 pool_id <<= WORK_OFFQ_POOL_SHIFT; 716 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 717 } 718 719 static bool work_is_canceling(struct work_struct *work) 720 { 721 unsigned long data = atomic_long_read(&work->data); 722 723 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 724 } 725 726 /* 727 * Policy functions. These define the policies on how the global worker 728 * pools are managed. Unless noted otherwise, these functions assume that 729 * they're being called with pool->lock held. 730 */ 731 732 static bool __need_more_worker(struct worker_pool *pool) 733 { 734 return !atomic_read(&pool->nr_running); 735 } 736 737 /* 738 * Need to wake up a worker? Called from anything but currently 739 * running workers. 740 * 741 * Note that, because unbound workers never contribute to nr_running, this 742 * function will always return %true for unbound pools as long as the 743 * worklist isn't empty. 744 */ 745 static bool need_more_worker(struct worker_pool *pool) 746 { 747 return !list_empty(&pool->worklist) && __need_more_worker(pool); 748 } 749 750 /* Can I start working? Called from busy but !running workers. */ 751 static bool may_start_working(struct worker_pool *pool) 752 { 753 return pool->nr_idle; 754 } 755 756 /* Do I need to keep working? Called from currently running workers. */ 757 static bool keep_working(struct worker_pool *pool) 758 { 759 return !list_empty(&pool->worklist) && 760 atomic_read(&pool->nr_running) <= 1; 761 } 762 763 /* Do we need a new worker? Called from manager. */ 764 static bool need_to_create_worker(struct worker_pool *pool) 765 { 766 return need_more_worker(pool) && !may_start_working(pool); 767 } 768 769 /* Do we have too many workers and should some go away? */ 770 static bool too_many_workers(struct worker_pool *pool) 771 { 772 bool managing = mutex_is_locked(&pool->manager_arb); 773 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 774 int nr_busy = pool->nr_workers - nr_idle; 775 776 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 777 } 778 779 /* 780 * Wake up functions. 781 */ 782 783 /* Return the first idle worker. Safe with preemption disabled */ 784 static struct worker *first_idle_worker(struct worker_pool *pool) 785 { 786 if (unlikely(list_empty(&pool->idle_list))) 787 return NULL; 788 789 return list_first_entry(&pool->idle_list, struct worker, entry); 790 } 791 792 /** 793 * wake_up_worker - wake up an idle worker 794 * @pool: worker pool to wake worker from 795 * 796 * Wake up the first idle worker of @pool. 797 * 798 * CONTEXT: 799 * spin_lock_irq(pool->lock). 800 */ 801 static void wake_up_worker(struct worker_pool *pool) 802 { 803 struct worker *worker = first_idle_worker(pool); 804 805 if (likely(worker)) 806 wake_up_process(worker->task); 807 } 808 809 /** 810 * wq_worker_waking_up - a worker is waking up 811 * @task: task waking up 812 * @cpu: CPU @task is waking up to 813 * 814 * This function is called during try_to_wake_up() when a worker is 815 * being awoken. 816 * 817 * CONTEXT: 818 * spin_lock_irq(rq->lock) 819 */ 820 void wq_worker_waking_up(struct task_struct *task, int cpu) 821 { 822 struct worker *worker = kthread_data(task); 823 824 if (!(worker->flags & WORKER_NOT_RUNNING)) { 825 WARN_ON_ONCE(worker->pool->cpu != cpu); 826 atomic_inc(&worker->pool->nr_running); 827 } 828 } 829 830 /** 831 * wq_worker_sleeping - a worker is going to sleep 832 * @task: task going to sleep 833 * @cpu: CPU in question, must be the current CPU number 834 * 835 * This function is called during schedule() when a busy worker is 836 * going to sleep. Worker on the same cpu can be woken up by 837 * returning pointer to its task. 838 * 839 * CONTEXT: 840 * spin_lock_irq(rq->lock) 841 * 842 * Return: 843 * Worker task on @cpu to wake up, %NULL if none. 844 */ 845 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) 846 { 847 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 848 struct worker_pool *pool; 849 850 /* 851 * Rescuers, which may not have all the fields set up like normal 852 * workers, also reach here, let's not access anything before 853 * checking NOT_RUNNING. 854 */ 855 if (worker->flags & WORKER_NOT_RUNNING) 856 return NULL; 857 858 pool = worker->pool; 859 860 /* this can only happen on the local cpu */ 861 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) 862 return NULL; 863 864 /* 865 * The counterpart of the following dec_and_test, implied mb, 866 * worklist not empty test sequence is in insert_work(). 867 * Please read comment there. 868 * 869 * NOT_RUNNING is clear. This means that we're bound to and 870 * running on the local cpu w/ rq lock held and preemption 871 * disabled, which in turn means that none else could be 872 * manipulating idle_list, so dereferencing idle_list without pool 873 * lock is safe. 874 */ 875 if (atomic_dec_and_test(&pool->nr_running) && 876 !list_empty(&pool->worklist)) 877 to_wakeup = first_idle_worker(pool); 878 return to_wakeup ? to_wakeup->task : NULL; 879 } 880 881 /** 882 * worker_set_flags - set worker flags and adjust nr_running accordingly 883 * @worker: self 884 * @flags: flags to set 885 * 886 * Set @flags in @worker->flags and adjust nr_running accordingly. 887 * 888 * CONTEXT: 889 * spin_lock_irq(pool->lock) 890 */ 891 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 892 { 893 struct worker_pool *pool = worker->pool; 894 895 WARN_ON_ONCE(worker->task != current); 896 897 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 898 if ((flags & WORKER_NOT_RUNNING) && 899 !(worker->flags & WORKER_NOT_RUNNING)) { 900 atomic_dec(&pool->nr_running); 901 } 902 903 worker->flags |= flags; 904 } 905 906 /** 907 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 908 * @worker: self 909 * @flags: flags to clear 910 * 911 * Clear @flags in @worker->flags and adjust nr_running accordingly. 912 * 913 * CONTEXT: 914 * spin_lock_irq(pool->lock) 915 */ 916 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 917 { 918 struct worker_pool *pool = worker->pool; 919 unsigned int oflags = worker->flags; 920 921 WARN_ON_ONCE(worker->task != current); 922 923 worker->flags &= ~flags; 924 925 /* 926 * If transitioning out of NOT_RUNNING, increment nr_running. Note 927 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 928 * of multiple flags, not a single flag. 929 */ 930 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 931 if (!(worker->flags & WORKER_NOT_RUNNING)) 932 atomic_inc(&pool->nr_running); 933 } 934 935 /** 936 * find_worker_executing_work - find worker which is executing a work 937 * @pool: pool of interest 938 * @work: work to find worker for 939 * 940 * Find a worker which is executing @work on @pool by searching 941 * @pool->busy_hash which is keyed by the address of @work. For a worker 942 * to match, its current execution should match the address of @work and 943 * its work function. This is to avoid unwanted dependency between 944 * unrelated work executions through a work item being recycled while still 945 * being executed. 946 * 947 * This is a bit tricky. A work item may be freed once its execution 948 * starts and nothing prevents the freed area from being recycled for 949 * another work item. If the same work item address ends up being reused 950 * before the original execution finishes, workqueue will identify the 951 * recycled work item as currently executing and make it wait until the 952 * current execution finishes, introducing an unwanted dependency. 953 * 954 * This function checks the work item address and work function to avoid 955 * false positives. Note that this isn't complete as one may construct a 956 * work function which can introduce dependency onto itself through a 957 * recycled work item. Well, if somebody wants to shoot oneself in the 958 * foot that badly, there's only so much we can do, and if such deadlock 959 * actually occurs, it should be easy to locate the culprit work function. 960 * 961 * CONTEXT: 962 * spin_lock_irq(pool->lock). 963 * 964 * Return: 965 * Pointer to worker which is executing @work if found, %NULL 966 * otherwise. 967 */ 968 static struct worker *find_worker_executing_work(struct worker_pool *pool, 969 struct work_struct *work) 970 { 971 struct worker *worker; 972 973 hash_for_each_possible(pool->busy_hash, worker, hentry, 974 (unsigned long)work) 975 if (worker->current_work == work && 976 worker->current_func == work->func) 977 return worker; 978 979 return NULL; 980 } 981 982 /** 983 * move_linked_works - move linked works to a list 984 * @work: start of series of works to be scheduled 985 * @head: target list to append @work to 986 * @nextp: out parameter for nested worklist walking 987 * 988 * Schedule linked works starting from @work to @head. Work series to 989 * be scheduled starts at @work and includes any consecutive work with 990 * WORK_STRUCT_LINKED set in its predecessor. 991 * 992 * If @nextp is not NULL, it's updated to point to the next work of 993 * the last scheduled work. This allows move_linked_works() to be 994 * nested inside outer list_for_each_entry_safe(). 995 * 996 * CONTEXT: 997 * spin_lock_irq(pool->lock). 998 */ 999 static void move_linked_works(struct work_struct *work, struct list_head *head, 1000 struct work_struct **nextp) 1001 { 1002 struct work_struct *n; 1003 1004 /* 1005 * Linked worklist will always end before the end of the list, 1006 * use NULL for list head. 1007 */ 1008 list_for_each_entry_safe_from(work, n, NULL, entry) { 1009 list_move_tail(&work->entry, head); 1010 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1011 break; 1012 } 1013 1014 /* 1015 * If we're already inside safe list traversal and have moved 1016 * multiple works to the scheduled queue, the next position 1017 * needs to be updated. 1018 */ 1019 if (nextp) 1020 *nextp = n; 1021 } 1022 1023 /** 1024 * get_pwq - get an extra reference on the specified pool_workqueue 1025 * @pwq: pool_workqueue to get 1026 * 1027 * Obtain an extra reference on @pwq. The caller should guarantee that 1028 * @pwq has positive refcnt and be holding the matching pool->lock. 1029 */ 1030 static void get_pwq(struct pool_workqueue *pwq) 1031 { 1032 lockdep_assert_held(&pwq->pool->lock); 1033 WARN_ON_ONCE(pwq->refcnt <= 0); 1034 pwq->refcnt++; 1035 } 1036 1037 /** 1038 * put_pwq - put a pool_workqueue reference 1039 * @pwq: pool_workqueue to put 1040 * 1041 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1042 * destruction. The caller should be holding the matching pool->lock. 1043 */ 1044 static void put_pwq(struct pool_workqueue *pwq) 1045 { 1046 lockdep_assert_held(&pwq->pool->lock); 1047 if (likely(--pwq->refcnt)) 1048 return; 1049 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) 1050 return; 1051 /* 1052 * @pwq can't be released under pool->lock, bounce to 1053 * pwq_unbound_release_workfn(). This never recurses on the same 1054 * pool->lock as this path is taken only for unbound workqueues and 1055 * the release work item is scheduled on a per-cpu workqueue. To 1056 * avoid lockdep warning, unbound pool->locks are given lockdep 1057 * subclass of 1 in get_unbound_pool(). 1058 */ 1059 schedule_work(&pwq->unbound_release_work); 1060 } 1061 1062 /** 1063 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1064 * @pwq: pool_workqueue to put (can be %NULL) 1065 * 1066 * put_pwq() with locking. This function also allows %NULL @pwq. 1067 */ 1068 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1069 { 1070 if (pwq) { 1071 /* 1072 * As both pwqs and pools are sched-RCU protected, the 1073 * following lock operations are safe. 1074 */ 1075 spin_lock_irq(&pwq->pool->lock); 1076 put_pwq(pwq); 1077 spin_unlock_irq(&pwq->pool->lock); 1078 } 1079 } 1080 1081 static void pwq_activate_delayed_work(struct work_struct *work) 1082 { 1083 struct pool_workqueue *pwq = get_work_pwq(work); 1084 1085 trace_workqueue_activate_work(work); 1086 move_linked_works(work, &pwq->pool->worklist, NULL); 1087 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1088 pwq->nr_active++; 1089 } 1090 1091 static void pwq_activate_first_delayed(struct pool_workqueue *pwq) 1092 { 1093 struct work_struct *work = list_first_entry(&pwq->delayed_works, 1094 struct work_struct, entry); 1095 1096 pwq_activate_delayed_work(work); 1097 } 1098 1099 /** 1100 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1101 * @pwq: pwq of interest 1102 * @color: color of work which left the queue 1103 * 1104 * A work either has completed or is removed from pending queue, 1105 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1106 * 1107 * CONTEXT: 1108 * spin_lock_irq(pool->lock). 1109 */ 1110 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 1111 { 1112 /* uncolored work items don't participate in flushing or nr_active */ 1113 if (color == WORK_NO_COLOR) 1114 goto out_put; 1115 1116 pwq->nr_in_flight[color]--; 1117 1118 pwq->nr_active--; 1119 if (!list_empty(&pwq->delayed_works)) { 1120 /* one down, submit a delayed one */ 1121 if (pwq->nr_active < pwq->max_active) 1122 pwq_activate_first_delayed(pwq); 1123 } 1124 1125 /* is flush in progress and are we at the flushing tip? */ 1126 if (likely(pwq->flush_color != color)) 1127 goto out_put; 1128 1129 /* are there still in-flight works? */ 1130 if (pwq->nr_in_flight[color]) 1131 goto out_put; 1132 1133 /* this pwq is done, clear flush_color */ 1134 pwq->flush_color = -1; 1135 1136 /* 1137 * If this was the last pwq, wake up the first flusher. It 1138 * will handle the rest. 1139 */ 1140 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1141 complete(&pwq->wq->first_flusher->done); 1142 out_put: 1143 put_pwq(pwq); 1144 } 1145 1146 /** 1147 * try_to_grab_pending - steal work item from worklist and disable irq 1148 * @work: work item to steal 1149 * @is_dwork: @work is a delayed_work 1150 * @flags: place to store irq state 1151 * 1152 * Try to grab PENDING bit of @work. This function can handle @work in any 1153 * stable state - idle, on timer or on worklist. 1154 * 1155 * Return: 1156 * 1 if @work was pending and we successfully stole PENDING 1157 * 0 if @work was idle and we claimed PENDING 1158 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1159 * -ENOENT if someone else is canceling @work, this state may persist 1160 * for arbitrarily long 1161 * 1162 * Note: 1163 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1164 * interrupted while holding PENDING and @work off queue, irq must be 1165 * disabled on entry. This, combined with delayed_work->timer being 1166 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1167 * 1168 * On successful return, >= 0, irq is disabled and the caller is 1169 * responsible for releasing it using local_irq_restore(*@flags). 1170 * 1171 * This function is safe to call from any context including IRQ handler. 1172 */ 1173 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1174 unsigned long *flags) 1175 { 1176 struct worker_pool *pool; 1177 struct pool_workqueue *pwq; 1178 1179 local_irq_save(*flags); 1180 1181 /* try to steal the timer if it exists */ 1182 if (is_dwork) { 1183 struct delayed_work *dwork = to_delayed_work(work); 1184 1185 /* 1186 * dwork->timer is irqsafe. If del_timer() fails, it's 1187 * guaranteed that the timer is not queued anywhere and not 1188 * running on the local CPU. 1189 */ 1190 if (likely(del_timer(&dwork->timer))) 1191 return 1; 1192 } 1193 1194 /* try to claim PENDING the normal way */ 1195 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1196 return 0; 1197 1198 /* 1199 * The queueing is in progress, or it is already queued. Try to 1200 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1201 */ 1202 pool = get_work_pool(work); 1203 if (!pool) 1204 goto fail; 1205 1206 spin_lock(&pool->lock); 1207 /* 1208 * work->data is guaranteed to point to pwq only while the work 1209 * item is queued on pwq->wq, and both updating work->data to point 1210 * to pwq on queueing and to pool on dequeueing are done under 1211 * pwq->pool->lock. This in turn guarantees that, if work->data 1212 * points to pwq which is associated with a locked pool, the work 1213 * item is currently queued on that pool. 1214 */ 1215 pwq = get_work_pwq(work); 1216 if (pwq && pwq->pool == pool) { 1217 debug_work_deactivate(work); 1218 1219 /* 1220 * A delayed work item cannot be grabbed directly because 1221 * it might have linked NO_COLOR work items which, if left 1222 * on the delayed_list, will confuse pwq->nr_active 1223 * management later on and cause stall. Make sure the work 1224 * item is activated before grabbing. 1225 */ 1226 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1227 pwq_activate_delayed_work(work); 1228 1229 list_del_init(&work->entry); 1230 pwq_dec_nr_in_flight(pwq, get_work_color(work)); 1231 1232 /* work->data points to pwq iff queued, point to pool */ 1233 set_work_pool_and_keep_pending(work, pool->id); 1234 1235 spin_unlock(&pool->lock); 1236 return 1; 1237 } 1238 spin_unlock(&pool->lock); 1239 fail: 1240 local_irq_restore(*flags); 1241 if (work_is_canceling(work)) 1242 return -ENOENT; 1243 cpu_relax(); 1244 return -EAGAIN; 1245 } 1246 1247 /** 1248 * insert_work - insert a work into a pool 1249 * @pwq: pwq @work belongs to 1250 * @work: work to insert 1251 * @head: insertion point 1252 * @extra_flags: extra WORK_STRUCT_* flags to set 1253 * 1254 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1255 * work_struct flags. 1256 * 1257 * CONTEXT: 1258 * spin_lock_irq(pool->lock). 1259 */ 1260 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1261 struct list_head *head, unsigned int extra_flags) 1262 { 1263 struct worker_pool *pool = pwq->pool; 1264 1265 /* we own @work, set data and link */ 1266 set_work_pwq(work, pwq, extra_flags); 1267 list_add_tail(&work->entry, head); 1268 get_pwq(pwq); 1269 1270 /* 1271 * Ensure either wq_worker_sleeping() sees the above 1272 * list_add_tail() or we see zero nr_running to avoid workers lying 1273 * around lazily while there are works to be processed. 1274 */ 1275 smp_mb(); 1276 1277 if (__need_more_worker(pool)) 1278 wake_up_worker(pool); 1279 } 1280 1281 /* 1282 * Test whether @work is being queued from another work executing on the 1283 * same workqueue. 1284 */ 1285 static bool is_chained_work(struct workqueue_struct *wq) 1286 { 1287 struct worker *worker; 1288 1289 worker = current_wq_worker(); 1290 /* 1291 * Return %true iff I'm a worker execuing a work item on @wq. If 1292 * I'm @worker, it's safe to dereference it without locking. 1293 */ 1294 return worker && worker->current_pwq->wq == wq; 1295 } 1296 1297 static void __queue_work(int cpu, struct workqueue_struct *wq, 1298 struct work_struct *work) 1299 { 1300 struct pool_workqueue *pwq; 1301 struct worker_pool *last_pool; 1302 struct list_head *worklist; 1303 unsigned int work_flags; 1304 unsigned int req_cpu = cpu; 1305 1306 /* 1307 * While a work item is PENDING && off queue, a task trying to 1308 * steal the PENDING will busy-loop waiting for it to either get 1309 * queued or lose PENDING. Grabbing PENDING and queueing should 1310 * happen with IRQ disabled. 1311 */ 1312 WARN_ON_ONCE(!irqs_disabled()); 1313 1314 debug_work_activate(work); 1315 1316 /* if draining, only works from the same workqueue are allowed */ 1317 if (unlikely(wq->flags & __WQ_DRAINING) && 1318 WARN_ON_ONCE(!is_chained_work(wq))) 1319 return; 1320 retry: 1321 if (req_cpu == WORK_CPU_UNBOUND) 1322 cpu = raw_smp_processor_id(); 1323 1324 /* pwq which will be used unless @work is executing elsewhere */ 1325 if (!(wq->flags & WQ_UNBOUND)) 1326 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 1327 else 1328 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 1329 1330 /* 1331 * If @work was previously on a different pool, it might still be 1332 * running there, in which case the work needs to be queued on that 1333 * pool to guarantee non-reentrancy. 1334 */ 1335 last_pool = get_work_pool(work); 1336 if (last_pool && last_pool != pwq->pool) { 1337 struct worker *worker; 1338 1339 spin_lock(&last_pool->lock); 1340 1341 worker = find_worker_executing_work(last_pool, work); 1342 1343 if (worker && worker->current_pwq->wq == wq) { 1344 pwq = worker->current_pwq; 1345 } else { 1346 /* meh... not running there, queue here */ 1347 spin_unlock(&last_pool->lock); 1348 spin_lock(&pwq->pool->lock); 1349 } 1350 } else { 1351 spin_lock(&pwq->pool->lock); 1352 } 1353 1354 /* 1355 * pwq is determined and locked. For unbound pools, we could have 1356 * raced with pwq release and it could already be dead. If its 1357 * refcnt is zero, repeat pwq selection. Note that pwqs never die 1358 * without another pwq replacing it in the numa_pwq_tbl or while 1359 * work items are executing on it, so the retrying is guaranteed to 1360 * make forward-progress. 1361 */ 1362 if (unlikely(!pwq->refcnt)) { 1363 if (wq->flags & WQ_UNBOUND) { 1364 spin_unlock(&pwq->pool->lock); 1365 cpu_relax(); 1366 goto retry; 1367 } 1368 /* oops */ 1369 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 1370 wq->name, cpu); 1371 } 1372 1373 /* pwq determined, queue */ 1374 trace_workqueue_queue_work(req_cpu, pwq, work); 1375 1376 if (WARN_ON(!list_empty(&work->entry))) { 1377 spin_unlock(&pwq->pool->lock); 1378 return; 1379 } 1380 1381 pwq->nr_in_flight[pwq->work_color]++; 1382 work_flags = work_color_to_flags(pwq->work_color); 1383 1384 if (likely(pwq->nr_active < pwq->max_active)) { 1385 trace_workqueue_activate_work(work); 1386 pwq->nr_active++; 1387 worklist = &pwq->pool->worklist; 1388 } else { 1389 work_flags |= WORK_STRUCT_DELAYED; 1390 worklist = &pwq->delayed_works; 1391 } 1392 1393 insert_work(pwq, work, worklist, work_flags); 1394 1395 spin_unlock(&pwq->pool->lock); 1396 } 1397 1398 /** 1399 * queue_work_on - queue work on specific cpu 1400 * @cpu: CPU number to execute work on 1401 * @wq: workqueue to use 1402 * @work: work to queue 1403 * 1404 * We queue the work to a specific CPU, the caller must ensure it 1405 * can't go away. 1406 * 1407 * Return: %false if @work was already on a queue, %true otherwise. 1408 */ 1409 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1410 struct work_struct *work) 1411 { 1412 bool ret = false; 1413 unsigned long flags; 1414 1415 local_irq_save(flags); 1416 1417 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1418 __queue_work(cpu, wq, work); 1419 ret = true; 1420 } 1421 1422 local_irq_restore(flags); 1423 return ret; 1424 } 1425 EXPORT_SYMBOL(queue_work_on); 1426 1427 void delayed_work_timer_fn(unsigned long __data) 1428 { 1429 struct delayed_work *dwork = (struct delayed_work *)__data; 1430 1431 /* should have been called from irqsafe timer with irq already off */ 1432 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 1433 } 1434 EXPORT_SYMBOL(delayed_work_timer_fn); 1435 1436 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1437 struct delayed_work *dwork, unsigned long delay) 1438 { 1439 struct timer_list *timer = &dwork->timer; 1440 struct work_struct *work = &dwork->work; 1441 1442 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1443 timer->data != (unsigned long)dwork); 1444 WARN_ON_ONCE(timer_pending(timer)); 1445 WARN_ON_ONCE(!list_empty(&work->entry)); 1446 1447 /* 1448 * If @delay is 0, queue @dwork->work immediately. This is for 1449 * both optimization and correctness. The earliest @timer can 1450 * expire is on the closest next tick and delayed_work users depend 1451 * on that there's no such delay when @delay is 0. 1452 */ 1453 if (!delay) { 1454 __queue_work(cpu, wq, &dwork->work); 1455 return; 1456 } 1457 1458 timer_stats_timer_set_start_info(&dwork->timer); 1459 1460 dwork->wq = wq; 1461 /* timer isn't guaranteed to run in this cpu, record earlier */ 1462 if (cpu == WORK_CPU_UNBOUND) 1463 cpu = raw_smp_processor_id(); 1464 dwork->cpu = cpu; 1465 timer->expires = jiffies + delay; 1466 1467 add_timer_on(timer, cpu); 1468 } 1469 1470 /** 1471 * queue_delayed_work_on - queue work on specific CPU after delay 1472 * @cpu: CPU number to execute work on 1473 * @wq: workqueue to use 1474 * @dwork: work to queue 1475 * @delay: number of jiffies to wait before queueing 1476 * 1477 * Return: %false if @work was already on a queue, %true otherwise. If 1478 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1479 * execution. 1480 */ 1481 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1482 struct delayed_work *dwork, unsigned long delay) 1483 { 1484 struct work_struct *work = &dwork->work; 1485 bool ret = false; 1486 unsigned long flags; 1487 1488 /* read the comment in __queue_work() */ 1489 local_irq_save(flags); 1490 1491 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1492 __queue_delayed_work(cpu, wq, dwork, delay); 1493 ret = true; 1494 } 1495 1496 local_irq_restore(flags); 1497 return ret; 1498 } 1499 EXPORT_SYMBOL(queue_delayed_work_on); 1500 1501 /** 1502 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1503 * @cpu: CPU number to execute work on 1504 * @wq: workqueue to use 1505 * @dwork: work to queue 1506 * @delay: number of jiffies to wait before queueing 1507 * 1508 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 1509 * modify @dwork's timer so that it expires after @delay. If @delay is 1510 * zero, @work is guaranteed to be scheduled immediately regardless of its 1511 * current state. 1512 * 1513 * Return: %false if @dwork was idle and queued, %true if @dwork was 1514 * pending and its timer was modified. 1515 * 1516 * This function is safe to call from any context including IRQ handler. 1517 * See try_to_grab_pending() for details. 1518 */ 1519 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 1520 struct delayed_work *dwork, unsigned long delay) 1521 { 1522 unsigned long flags; 1523 int ret; 1524 1525 do { 1526 ret = try_to_grab_pending(&dwork->work, true, &flags); 1527 } while (unlikely(ret == -EAGAIN)); 1528 1529 if (likely(ret >= 0)) { 1530 __queue_delayed_work(cpu, wq, dwork, delay); 1531 local_irq_restore(flags); 1532 } 1533 1534 /* -ENOENT from try_to_grab_pending() becomes %true */ 1535 return ret; 1536 } 1537 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1538 1539 /** 1540 * worker_enter_idle - enter idle state 1541 * @worker: worker which is entering idle state 1542 * 1543 * @worker is entering idle state. Update stats and idle timer if 1544 * necessary. 1545 * 1546 * LOCKING: 1547 * spin_lock_irq(pool->lock). 1548 */ 1549 static void worker_enter_idle(struct worker *worker) 1550 { 1551 struct worker_pool *pool = worker->pool; 1552 1553 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 1554 WARN_ON_ONCE(!list_empty(&worker->entry) && 1555 (worker->hentry.next || worker->hentry.pprev))) 1556 return; 1557 1558 /* can't use worker_set_flags(), also called from create_worker() */ 1559 worker->flags |= WORKER_IDLE; 1560 pool->nr_idle++; 1561 worker->last_active = jiffies; 1562 1563 /* idle_list is LIFO */ 1564 list_add(&worker->entry, &pool->idle_list); 1565 1566 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 1567 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1568 1569 /* 1570 * Sanity check nr_running. Because wq_unbind_fn() releases 1571 * pool->lock between setting %WORKER_UNBOUND and zapping 1572 * nr_running, the warning may trigger spuriously. Check iff 1573 * unbind is not in progress. 1574 */ 1575 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 1576 pool->nr_workers == pool->nr_idle && 1577 atomic_read(&pool->nr_running)); 1578 } 1579 1580 /** 1581 * worker_leave_idle - leave idle state 1582 * @worker: worker which is leaving idle state 1583 * 1584 * @worker is leaving idle state. Update stats. 1585 * 1586 * LOCKING: 1587 * spin_lock_irq(pool->lock). 1588 */ 1589 static void worker_leave_idle(struct worker *worker) 1590 { 1591 struct worker_pool *pool = worker->pool; 1592 1593 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 1594 return; 1595 worker_clr_flags(worker, WORKER_IDLE); 1596 pool->nr_idle--; 1597 list_del_init(&worker->entry); 1598 } 1599 1600 static struct worker *alloc_worker(int node) 1601 { 1602 struct worker *worker; 1603 1604 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 1605 if (worker) { 1606 INIT_LIST_HEAD(&worker->entry); 1607 INIT_LIST_HEAD(&worker->scheduled); 1608 INIT_LIST_HEAD(&worker->node); 1609 /* on creation a worker is in !idle && prep state */ 1610 worker->flags = WORKER_PREP; 1611 } 1612 return worker; 1613 } 1614 1615 /** 1616 * worker_attach_to_pool() - attach a worker to a pool 1617 * @worker: worker to be attached 1618 * @pool: the target pool 1619 * 1620 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 1621 * cpu-binding of @worker are kept coordinated with the pool across 1622 * cpu-[un]hotplugs. 1623 */ 1624 static void worker_attach_to_pool(struct worker *worker, 1625 struct worker_pool *pool) 1626 { 1627 mutex_lock(&pool->attach_mutex); 1628 1629 /* 1630 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1631 * online CPUs. It'll be re-applied when any of the CPUs come up. 1632 */ 1633 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 1634 1635 /* 1636 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains 1637 * stable across this function. See the comments above the 1638 * flag definition for details. 1639 */ 1640 if (pool->flags & POOL_DISASSOCIATED) 1641 worker->flags |= WORKER_UNBOUND; 1642 1643 list_add_tail(&worker->node, &pool->workers); 1644 1645 mutex_unlock(&pool->attach_mutex); 1646 } 1647 1648 /** 1649 * worker_detach_from_pool() - detach a worker from its pool 1650 * @worker: worker which is attached to its pool 1651 * @pool: the pool @worker is attached to 1652 * 1653 * Undo the attaching which had been done in worker_attach_to_pool(). The 1654 * caller worker shouldn't access to the pool after detached except it has 1655 * other reference to the pool. 1656 */ 1657 static void worker_detach_from_pool(struct worker *worker, 1658 struct worker_pool *pool) 1659 { 1660 struct completion *detach_completion = NULL; 1661 1662 mutex_lock(&pool->attach_mutex); 1663 list_del(&worker->node); 1664 if (list_empty(&pool->workers)) 1665 detach_completion = pool->detach_completion; 1666 mutex_unlock(&pool->attach_mutex); 1667 1668 /* clear leftover flags without pool->lock after it is detached */ 1669 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 1670 1671 if (detach_completion) 1672 complete(detach_completion); 1673 } 1674 1675 /** 1676 * create_worker - create a new workqueue worker 1677 * @pool: pool the new worker will belong to 1678 * 1679 * Create and start a new worker which is attached to @pool. 1680 * 1681 * CONTEXT: 1682 * Might sleep. Does GFP_KERNEL allocations. 1683 * 1684 * Return: 1685 * Pointer to the newly created worker. 1686 */ 1687 static struct worker *create_worker(struct worker_pool *pool) 1688 { 1689 struct worker *worker = NULL; 1690 int id = -1; 1691 char id_buf[16]; 1692 1693 /* ID is needed to determine kthread name */ 1694 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); 1695 if (id < 0) 1696 goto fail; 1697 1698 worker = alloc_worker(pool->node); 1699 if (!worker) 1700 goto fail; 1701 1702 worker->pool = pool; 1703 worker->id = id; 1704 1705 if (pool->cpu >= 0) 1706 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 1707 pool->attrs->nice < 0 ? "H" : ""); 1708 else 1709 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 1710 1711 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 1712 "kworker/%s", id_buf); 1713 if (IS_ERR(worker->task)) 1714 goto fail; 1715 1716 set_user_nice(worker->task, pool->attrs->nice); 1717 kthread_bind_mask(worker->task, pool->attrs->cpumask); 1718 1719 /* successful, attach the worker to the pool */ 1720 worker_attach_to_pool(worker, pool); 1721 1722 /* start the newly created worker */ 1723 spin_lock_irq(&pool->lock); 1724 worker->pool->nr_workers++; 1725 worker_enter_idle(worker); 1726 wake_up_process(worker->task); 1727 spin_unlock_irq(&pool->lock); 1728 1729 return worker; 1730 1731 fail: 1732 if (id >= 0) 1733 ida_simple_remove(&pool->worker_ida, id); 1734 kfree(worker); 1735 return NULL; 1736 } 1737 1738 /** 1739 * destroy_worker - destroy a workqueue worker 1740 * @worker: worker to be destroyed 1741 * 1742 * Destroy @worker and adjust @pool stats accordingly. The worker should 1743 * be idle. 1744 * 1745 * CONTEXT: 1746 * spin_lock_irq(pool->lock). 1747 */ 1748 static void destroy_worker(struct worker *worker) 1749 { 1750 struct worker_pool *pool = worker->pool; 1751 1752 lockdep_assert_held(&pool->lock); 1753 1754 /* sanity check frenzy */ 1755 if (WARN_ON(worker->current_work) || 1756 WARN_ON(!list_empty(&worker->scheduled)) || 1757 WARN_ON(!(worker->flags & WORKER_IDLE))) 1758 return; 1759 1760 pool->nr_workers--; 1761 pool->nr_idle--; 1762 1763 list_del_init(&worker->entry); 1764 worker->flags |= WORKER_DIE; 1765 wake_up_process(worker->task); 1766 } 1767 1768 static void idle_worker_timeout(unsigned long __pool) 1769 { 1770 struct worker_pool *pool = (void *)__pool; 1771 1772 spin_lock_irq(&pool->lock); 1773 1774 while (too_many_workers(pool)) { 1775 struct worker *worker; 1776 unsigned long expires; 1777 1778 /* idle_list is kept in LIFO order, check the last one */ 1779 worker = list_entry(pool->idle_list.prev, struct worker, entry); 1780 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1781 1782 if (time_before(jiffies, expires)) { 1783 mod_timer(&pool->idle_timer, expires); 1784 break; 1785 } 1786 1787 destroy_worker(worker); 1788 } 1789 1790 spin_unlock_irq(&pool->lock); 1791 } 1792 1793 static void send_mayday(struct work_struct *work) 1794 { 1795 struct pool_workqueue *pwq = get_work_pwq(work); 1796 struct workqueue_struct *wq = pwq->wq; 1797 1798 lockdep_assert_held(&wq_mayday_lock); 1799 1800 if (!wq->rescuer) 1801 return; 1802 1803 /* mayday mayday mayday */ 1804 if (list_empty(&pwq->mayday_node)) { 1805 /* 1806 * If @pwq is for an unbound wq, its base ref may be put at 1807 * any time due to an attribute change. Pin @pwq until the 1808 * rescuer is done with it. 1809 */ 1810 get_pwq(pwq); 1811 list_add_tail(&pwq->mayday_node, &wq->maydays); 1812 wake_up_process(wq->rescuer->task); 1813 } 1814 } 1815 1816 static void pool_mayday_timeout(unsigned long __pool) 1817 { 1818 struct worker_pool *pool = (void *)__pool; 1819 struct work_struct *work; 1820 1821 spin_lock_irq(&pool->lock); 1822 spin_lock(&wq_mayday_lock); /* for wq->maydays */ 1823 1824 if (need_to_create_worker(pool)) { 1825 /* 1826 * We've been trying to create a new worker but 1827 * haven't been successful. We might be hitting an 1828 * allocation deadlock. Send distress signals to 1829 * rescuers. 1830 */ 1831 list_for_each_entry(work, &pool->worklist, entry) 1832 send_mayday(work); 1833 } 1834 1835 spin_unlock(&wq_mayday_lock); 1836 spin_unlock_irq(&pool->lock); 1837 1838 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1839 } 1840 1841 /** 1842 * maybe_create_worker - create a new worker if necessary 1843 * @pool: pool to create a new worker for 1844 * 1845 * Create a new worker for @pool if necessary. @pool is guaranteed to 1846 * have at least one idle worker on return from this function. If 1847 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 1848 * sent to all rescuers with works scheduled on @pool to resolve 1849 * possible allocation deadlock. 1850 * 1851 * On return, need_to_create_worker() is guaranteed to be %false and 1852 * may_start_working() %true. 1853 * 1854 * LOCKING: 1855 * spin_lock_irq(pool->lock) which may be released and regrabbed 1856 * multiple times. Does GFP_KERNEL allocations. Called only from 1857 * manager. 1858 */ 1859 static void maybe_create_worker(struct worker_pool *pool) 1860 __releases(&pool->lock) 1861 __acquires(&pool->lock) 1862 { 1863 restart: 1864 spin_unlock_irq(&pool->lock); 1865 1866 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 1867 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1868 1869 while (true) { 1870 if (create_worker(pool) || !need_to_create_worker(pool)) 1871 break; 1872 1873 schedule_timeout_interruptible(CREATE_COOLDOWN); 1874 1875 if (!need_to_create_worker(pool)) 1876 break; 1877 } 1878 1879 del_timer_sync(&pool->mayday_timer); 1880 spin_lock_irq(&pool->lock); 1881 /* 1882 * This is necessary even after a new worker was just successfully 1883 * created as @pool->lock was dropped and the new worker might have 1884 * already become busy. 1885 */ 1886 if (need_to_create_worker(pool)) 1887 goto restart; 1888 } 1889 1890 /** 1891 * manage_workers - manage worker pool 1892 * @worker: self 1893 * 1894 * Assume the manager role and manage the worker pool @worker belongs 1895 * to. At any given time, there can be only zero or one manager per 1896 * pool. The exclusion is handled automatically by this function. 1897 * 1898 * The caller can safely start processing works on false return. On 1899 * true return, it's guaranteed that need_to_create_worker() is false 1900 * and may_start_working() is true. 1901 * 1902 * CONTEXT: 1903 * spin_lock_irq(pool->lock) which may be released and regrabbed 1904 * multiple times. Does GFP_KERNEL allocations. 1905 * 1906 * Return: 1907 * %false if the pool doesn't need management and the caller can safely 1908 * start processing works, %true if management function was performed and 1909 * the conditions that the caller verified before calling the function may 1910 * no longer be true. 1911 */ 1912 static bool manage_workers(struct worker *worker) 1913 { 1914 struct worker_pool *pool = worker->pool; 1915 1916 /* 1917 * Anyone who successfully grabs manager_arb wins the arbitration 1918 * and becomes the manager. mutex_trylock() on pool->manager_arb 1919 * failure while holding pool->lock reliably indicates that someone 1920 * else is managing the pool and the worker which failed trylock 1921 * can proceed to executing work items. This means that anyone 1922 * grabbing manager_arb is responsible for actually performing 1923 * manager duties. If manager_arb is grabbed and released without 1924 * actual management, the pool may stall indefinitely. 1925 */ 1926 if (!mutex_trylock(&pool->manager_arb)) 1927 return false; 1928 pool->manager = worker; 1929 1930 maybe_create_worker(pool); 1931 1932 pool->manager = NULL; 1933 mutex_unlock(&pool->manager_arb); 1934 return true; 1935 } 1936 1937 /** 1938 * process_one_work - process single work 1939 * @worker: self 1940 * @work: work to process 1941 * 1942 * Process @work. This function contains all the logics necessary to 1943 * process a single work including synchronization against and 1944 * interaction with other workers on the same cpu, queueing and 1945 * flushing. As long as context requirement is met, any worker can 1946 * call this function to process a work. 1947 * 1948 * CONTEXT: 1949 * spin_lock_irq(pool->lock) which is released and regrabbed. 1950 */ 1951 static void process_one_work(struct worker *worker, struct work_struct *work) 1952 __releases(&pool->lock) 1953 __acquires(&pool->lock) 1954 { 1955 struct pool_workqueue *pwq = get_work_pwq(work); 1956 struct worker_pool *pool = worker->pool; 1957 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 1958 int work_color; 1959 struct worker *collision; 1960 #ifdef CONFIG_LOCKDEP 1961 /* 1962 * It is permissible to free the struct work_struct from 1963 * inside the function that is called from it, this we need to 1964 * take into account for lockdep too. To avoid bogus "held 1965 * lock freed" warnings as well as problems when looking into 1966 * work->lockdep_map, make a copy and use that here. 1967 */ 1968 struct lockdep_map lockdep_map; 1969 1970 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 1971 #endif 1972 /* ensure we're on the correct CPU */ 1973 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 1974 raw_smp_processor_id() != pool->cpu); 1975 1976 /* 1977 * A single work shouldn't be executed concurrently by 1978 * multiple workers on a single cpu. Check whether anyone is 1979 * already processing the work. If so, defer the work to the 1980 * currently executing one. 1981 */ 1982 collision = find_worker_executing_work(pool, work); 1983 if (unlikely(collision)) { 1984 move_linked_works(work, &collision->scheduled, NULL); 1985 return; 1986 } 1987 1988 /* claim and dequeue */ 1989 debug_work_deactivate(work); 1990 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 1991 worker->current_work = work; 1992 worker->current_func = work->func; 1993 worker->current_pwq = pwq; 1994 work_color = get_work_color(work); 1995 1996 list_del_init(&work->entry); 1997 1998 /* 1999 * CPU intensive works don't participate in concurrency management. 2000 * They're the scheduler's responsibility. This takes @worker out 2001 * of concurrency management and the next code block will chain 2002 * execution of the pending work items. 2003 */ 2004 if (unlikely(cpu_intensive)) 2005 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2006 2007 /* 2008 * Wake up another worker if necessary. The condition is always 2009 * false for normal per-cpu workers since nr_running would always 2010 * be >= 1 at this point. This is used to chain execution of the 2011 * pending work items for WORKER_NOT_RUNNING workers such as the 2012 * UNBOUND and CPU_INTENSIVE ones. 2013 */ 2014 if (need_more_worker(pool)) 2015 wake_up_worker(pool); 2016 2017 /* 2018 * Record the last pool and clear PENDING which should be the last 2019 * update to @work. Also, do this inside @pool->lock so that 2020 * PENDING and queued state changes happen together while IRQ is 2021 * disabled. 2022 */ 2023 set_work_pool_and_clear_pending(work, pool->id); 2024 2025 spin_unlock_irq(&pool->lock); 2026 2027 lock_map_acquire_read(&pwq->wq->lockdep_map); 2028 lock_map_acquire(&lockdep_map); 2029 trace_workqueue_execute_start(work); 2030 worker->current_func(work); 2031 /* 2032 * While we must be careful to not use "work" after this, the trace 2033 * point will only record its address. 2034 */ 2035 trace_workqueue_execute_end(work); 2036 lock_map_release(&lockdep_map); 2037 lock_map_release(&pwq->wq->lockdep_map); 2038 2039 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2040 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2041 " last function: %pf\n", 2042 current->comm, preempt_count(), task_pid_nr(current), 2043 worker->current_func); 2044 debug_show_held_locks(current); 2045 dump_stack(); 2046 } 2047 2048 /* 2049 * The following prevents a kworker from hogging CPU on !PREEMPT 2050 * kernels, where a requeueing work item waiting for something to 2051 * happen could deadlock with stop_machine as such work item could 2052 * indefinitely requeue itself while all other CPUs are trapped in 2053 * stop_machine. At the same time, report a quiescent RCU state so 2054 * the same condition doesn't freeze RCU. 2055 */ 2056 cond_resched_rcu_qs(); 2057 2058 spin_lock_irq(&pool->lock); 2059 2060 /* clear cpu intensive status */ 2061 if (unlikely(cpu_intensive)) 2062 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2063 2064 /* we're done with it, release */ 2065 hash_del(&worker->hentry); 2066 worker->current_work = NULL; 2067 worker->current_func = NULL; 2068 worker->current_pwq = NULL; 2069 worker->desc_valid = false; 2070 pwq_dec_nr_in_flight(pwq, work_color); 2071 } 2072 2073 /** 2074 * process_scheduled_works - process scheduled works 2075 * @worker: self 2076 * 2077 * Process all scheduled works. Please note that the scheduled list 2078 * may change while processing a work, so this function repeatedly 2079 * fetches a work from the top and executes it. 2080 * 2081 * CONTEXT: 2082 * spin_lock_irq(pool->lock) which may be released and regrabbed 2083 * multiple times. 2084 */ 2085 static void process_scheduled_works(struct worker *worker) 2086 { 2087 while (!list_empty(&worker->scheduled)) { 2088 struct work_struct *work = list_first_entry(&worker->scheduled, 2089 struct work_struct, entry); 2090 process_one_work(worker, work); 2091 } 2092 } 2093 2094 /** 2095 * worker_thread - the worker thread function 2096 * @__worker: self 2097 * 2098 * The worker thread function. All workers belong to a worker_pool - 2099 * either a per-cpu one or dynamic unbound one. These workers process all 2100 * work items regardless of their specific target workqueue. The only 2101 * exception is work items which belong to workqueues with a rescuer which 2102 * will be explained in rescuer_thread(). 2103 * 2104 * Return: 0 2105 */ 2106 static int worker_thread(void *__worker) 2107 { 2108 struct worker *worker = __worker; 2109 struct worker_pool *pool = worker->pool; 2110 2111 /* tell the scheduler that this is a workqueue worker */ 2112 worker->task->flags |= PF_WQ_WORKER; 2113 woke_up: 2114 spin_lock_irq(&pool->lock); 2115 2116 /* am I supposed to die? */ 2117 if (unlikely(worker->flags & WORKER_DIE)) { 2118 spin_unlock_irq(&pool->lock); 2119 WARN_ON_ONCE(!list_empty(&worker->entry)); 2120 worker->task->flags &= ~PF_WQ_WORKER; 2121 2122 set_task_comm(worker->task, "kworker/dying"); 2123 ida_simple_remove(&pool->worker_ida, worker->id); 2124 worker_detach_from_pool(worker, pool); 2125 kfree(worker); 2126 return 0; 2127 } 2128 2129 worker_leave_idle(worker); 2130 recheck: 2131 /* no more worker necessary? */ 2132 if (!need_more_worker(pool)) 2133 goto sleep; 2134 2135 /* do we need to manage? */ 2136 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2137 goto recheck; 2138 2139 /* 2140 * ->scheduled list can only be filled while a worker is 2141 * preparing to process a work or actually processing it. 2142 * Make sure nobody diddled with it while I was sleeping. 2143 */ 2144 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2145 2146 /* 2147 * Finish PREP stage. We're guaranteed to have at least one idle 2148 * worker or that someone else has already assumed the manager 2149 * role. This is where @worker starts participating in concurrency 2150 * management if applicable and concurrency management is restored 2151 * after being rebound. See rebind_workers() for details. 2152 */ 2153 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2154 2155 do { 2156 struct work_struct *work = 2157 list_first_entry(&pool->worklist, 2158 struct work_struct, entry); 2159 2160 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 2161 /* optimization path, not strictly necessary */ 2162 process_one_work(worker, work); 2163 if (unlikely(!list_empty(&worker->scheduled))) 2164 process_scheduled_works(worker); 2165 } else { 2166 move_linked_works(work, &worker->scheduled, NULL); 2167 process_scheduled_works(worker); 2168 } 2169 } while (keep_working(pool)); 2170 2171 worker_set_flags(worker, WORKER_PREP); 2172 sleep: 2173 /* 2174 * pool->lock is held and there's no work to process and no need to 2175 * manage, sleep. Workers are woken up only while holding 2176 * pool->lock or from local cpu, so setting the current state 2177 * before releasing pool->lock is enough to prevent losing any 2178 * event. 2179 */ 2180 worker_enter_idle(worker); 2181 __set_current_state(TASK_INTERRUPTIBLE); 2182 spin_unlock_irq(&pool->lock); 2183 schedule(); 2184 goto woke_up; 2185 } 2186 2187 /** 2188 * rescuer_thread - the rescuer thread function 2189 * @__rescuer: self 2190 * 2191 * Workqueue rescuer thread function. There's one rescuer for each 2192 * workqueue which has WQ_MEM_RECLAIM set. 2193 * 2194 * Regular work processing on a pool may block trying to create a new 2195 * worker which uses GFP_KERNEL allocation which has slight chance of 2196 * developing into deadlock if some works currently on the same queue 2197 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2198 * the problem rescuer solves. 2199 * 2200 * When such condition is possible, the pool summons rescuers of all 2201 * workqueues which have works queued on the pool and let them process 2202 * those works so that forward progress can be guaranteed. 2203 * 2204 * This should happen rarely. 2205 * 2206 * Return: 0 2207 */ 2208 static int rescuer_thread(void *__rescuer) 2209 { 2210 struct worker *rescuer = __rescuer; 2211 struct workqueue_struct *wq = rescuer->rescue_wq; 2212 struct list_head *scheduled = &rescuer->scheduled; 2213 bool should_stop; 2214 2215 set_user_nice(current, RESCUER_NICE_LEVEL); 2216 2217 /* 2218 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2219 * doesn't participate in concurrency management. 2220 */ 2221 rescuer->task->flags |= PF_WQ_WORKER; 2222 repeat: 2223 set_current_state(TASK_INTERRUPTIBLE); 2224 2225 /* 2226 * By the time the rescuer is requested to stop, the workqueue 2227 * shouldn't have any work pending, but @wq->maydays may still have 2228 * pwq(s) queued. This can happen by non-rescuer workers consuming 2229 * all the work items before the rescuer got to them. Go through 2230 * @wq->maydays processing before acting on should_stop so that the 2231 * list is always empty on exit. 2232 */ 2233 should_stop = kthread_should_stop(); 2234 2235 /* see whether any pwq is asking for help */ 2236 spin_lock_irq(&wq_mayday_lock); 2237 2238 while (!list_empty(&wq->maydays)) { 2239 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2240 struct pool_workqueue, mayday_node); 2241 struct worker_pool *pool = pwq->pool; 2242 struct work_struct *work, *n; 2243 2244 __set_current_state(TASK_RUNNING); 2245 list_del_init(&pwq->mayday_node); 2246 2247 spin_unlock_irq(&wq_mayday_lock); 2248 2249 worker_attach_to_pool(rescuer, pool); 2250 2251 spin_lock_irq(&pool->lock); 2252 rescuer->pool = pool; 2253 2254 /* 2255 * Slurp in all works issued via this workqueue and 2256 * process'em. 2257 */ 2258 WARN_ON_ONCE(!list_empty(scheduled)); 2259 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2260 if (get_work_pwq(work) == pwq) 2261 move_linked_works(work, scheduled, &n); 2262 2263 if (!list_empty(scheduled)) { 2264 process_scheduled_works(rescuer); 2265 2266 /* 2267 * The above execution of rescued work items could 2268 * have created more to rescue through 2269 * pwq_activate_first_delayed() or chained 2270 * queueing. Let's put @pwq back on mayday list so 2271 * that such back-to-back work items, which may be 2272 * being used to relieve memory pressure, don't 2273 * incur MAYDAY_INTERVAL delay inbetween. 2274 */ 2275 if (need_to_create_worker(pool)) { 2276 spin_lock(&wq_mayday_lock); 2277 get_pwq(pwq); 2278 list_move_tail(&pwq->mayday_node, &wq->maydays); 2279 spin_unlock(&wq_mayday_lock); 2280 } 2281 } 2282 2283 /* 2284 * Put the reference grabbed by send_mayday(). @pool won't 2285 * go away while we're still attached to it. 2286 */ 2287 put_pwq(pwq); 2288 2289 /* 2290 * Leave this pool. If need_more_worker() is %true, notify a 2291 * regular worker; otherwise, we end up with 0 concurrency 2292 * and stalling the execution. 2293 */ 2294 if (need_more_worker(pool)) 2295 wake_up_worker(pool); 2296 2297 rescuer->pool = NULL; 2298 spin_unlock_irq(&pool->lock); 2299 2300 worker_detach_from_pool(rescuer, pool); 2301 2302 spin_lock_irq(&wq_mayday_lock); 2303 } 2304 2305 spin_unlock_irq(&wq_mayday_lock); 2306 2307 if (should_stop) { 2308 __set_current_state(TASK_RUNNING); 2309 rescuer->task->flags &= ~PF_WQ_WORKER; 2310 return 0; 2311 } 2312 2313 /* rescuers should never participate in concurrency management */ 2314 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2315 schedule(); 2316 goto repeat; 2317 } 2318 2319 struct wq_barrier { 2320 struct work_struct work; 2321 struct completion done; 2322 struct task_struct *task; /* purely informational */ 2323 }; 2324 2325 static void wq_barrier_func(struct work_struct *work) 2326 { 2327 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2328 complete(&barr->done); 2329 } 2330 2331 /** 2332 * insert_wq_barrier - insert a barrier work 2333 * @pwq: pwq to insert barrier into 2334 * @barr: wq_barrier to insert 2335 * @target: target work to attach @barr to 2336 * @worker: worker currently executing @target, NULL if @target is not executing 2337 * 2338 * @barr is linked to @target such that @barr is completed only after 2339 * @target finishes execution. Please note that the ordering 2340 * guarantee is observed only with respect to @target and on the local 2341 * cpu. 2342 * 2343 * Currently, a queued barrier can't be canceled. This is because 2344 * try_to_grab_pending() can't determine whether the work to be 2345 * grabbed is at the head of the queue and thus can't clear LINKED 2346 * flag of the previous work while there must be a valid next work 2347 * after a work with LINKED flag set. 2348 * 2349 * Note that when @worker is non-NULL, @target may be modified 2350 * underneath us, so we can't reliably determine pwq from @target. 2351 * 2352 * CONTEXT: 2353 * spin_lock_irq(pool->lock). 2354 */ 2355 static void insert_wq_barrier(struct pool_workqueue *pwq, 2356 struct wq_barrier *barr, 2357 struct work_struct *target, struct worker *worker) 2358 { 2359 struct list_head *head; 2360 unsigned int linked = 0; 2361 2362 /* 2363 * debugobject calls are safe here even with pool->lock locked 2364 * as we know for sure that this will not trigger any of the 2365 * checks and call back into the fixup functions where we 2366 * might deadlock. 2367 */ 2368 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2369 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2370 init_completion(&barr->done); 2371 barr->task = current; 2372 2373 /* 2374 * If @target is currently being executed, schedule the 2375 * barrier to the worker; otherwise, put it after @target. 2376 */ 2377 if (worker) 2378 head = worker->scheduled.next; 2379 else { 2380 unsigned long *bits = work_data_bits(target); 2381 2382 head = target->entry.next; 2383 /* there can already be other linked works, inherit and set */ 2384 linked = *bits & WORK_STRUCT_LINKED; 2385 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 2386 } 2387 2388 debug_work_activate(&barr->work); 2389 insert_work(pwq, &barr->work, head, 2390 work_color_to_flags(WORK_NO_COLOR) | linked); 2391 } 2392 2393 /** 2394 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 2395 * @wq: workqueue being flushed 2396 * @flush_color: new flush color, < 0 for no-op 2397 * @work_color: new work color, < 0 for no-op 2398 * 2399 * Prepare pwqs for workqueue flushing. 2400 * 2401 * If @flush_color is non-negative, flush_color on all pwqs should be 2402 * -1. If no pwq has in-flight commands at the specified color, all 2403 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 2404 * has in flight commands, its pwq->flush_color is set to 2405 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 2406 * wakeup logic is armed and %true is returned. 2407 * 2408 * The caller should have initialized @wq->first_flusher prior to 2409 * calling this function with non-negative @flush_color. If 2410 * @flush_color is negative, no flush color update is done and %false 2411 * is returned. 2412 * 2413 * If @work_color is non-negative, all pwqs should have the same 2414 * work_color which is previous to @work_color and all will be 2415 * advanced to @work_color. 2416 * 2417 * CONTEXT: 2418 * mutex_lock(wq->mutex). 2419 * 2420 * Return: 2421 * %true if @flush_color >= 0 and there's something to flush. %false 2422 * otherwise. 2423 */ 2424 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 2425 int flush_color, int work_color) 2426 { 2427 bool wait = false; 2428 struct pool_workqueue *pwq; 2429 2430 if (flush_color >= 0) { 2431 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 2432 atomic_set(&wq->nr_pwqs_to_flush, 1); 2433 } 2434 2435 for_each_pwq(pwq, wq) { 2436 struct worker_pool *pool = pwq->pool; 2437 2438 spin_lock_irq(&pool->lock); 2439 2440 if (flush_color >= 0) { 2441 WARN_ON_ONCE(pwq->flush_color != -1); 2442 2443 if (pwq->nr_in_flight[flush_color]) { 2444 pwq->flush_color = flush_color; 2445 atomic_inc(&wq->nr_pwqs_to_flush); 2446 wait = true; 2447 } 2448 } 2449 2450 if (work_color >= 0) { 2451 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 2452 pwq->work_color = work_color; 2453 } 2454 2455 spin_unlock_irq(&pool->lock); 2456 } 2457 2458 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 2459 complete(&wq->first_flusher->done); 2460 2461 return wait; 2462 } 2463 2464 /** 2465 * flush_workqueue - ensure that any scheduled work has run to completion. 2466 * @wq: workqueue to flush 2467 * 2468 * This function sleeps until all work items which were queued on entry 2469 * have finished execution, but it is not livelocked by new incoming ones. 2470 */ 2471 void flush_workqueue(struct workqueue_struct *wq) 2472 { 2473 struct wq_flusher this_flusher = { 2474 .list = LIST_HEAD_INIT(this_flusher.list), 2475 .flush_color = -1, 2476 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2477 }; 2478 int next_color; 2479 2480 lock_map_acquire(&wq->lockdep_map); 2481 lock_map_release(&wq->lockdep_map); 2482 2483 mutex_lock(&wq->mutex); 2484 2485 /* 2486 * Start-to-wait phase 2487 */ 2488 next_color = work_next_color(wq->work_color); 2489 2490 if (next_color != wq->flush_color) { 2491 /* 2492 * Color space is not full. The current work_color 2493 * becomes our flush_color and work_color is advanced 2494 * by one. 2495 */ 2496 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 2497 this_flusher.flush_color = wq->work_color; 2498 wq->work_color = next_color; 2499 2500 if (!wq->first_flusher) { 2501 /* no flush in progress, become the first flusher */ 2502 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 2503 2504 wq->first_flusher = &this_flusher; 2505 2506 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 2507 wq->work_color)) { 2508 /* nothing to flush, done */ 2509 wq->flush_color = next_color; 2510 wq->first_flusher = NULL; 2511 goto out_unlock; 2512 } 2513 } else { 2514 /* wait in queue */ 2515 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 2516 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2517 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2518 } 2519 } else { 2520 /* 2521 * Oops, color space is full, wait on overflow queue. 2522 * The next flush completion will assign us 2523 * flush_color and transfer to flusher_queue. 2524 */ 2525 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 2526 } 2527 2528 mutex_unlock(&wq->mutex); 2529 2530 wait_for_completion(&this_flusher.done); 2531 2532 /* 2533 * Wake-up-and-cascade phase 2534 * 2535 * First flushers are responsible for cascading flushes and 2536 * handling overflow. Non-first flushers can simply return. 2537 */ 2538 if (wq->first_flusher != &this_flusher) 2539 return; 2540 2541 mutex_lock(&wq->mutex); 2542 2543 /* we might have raced, check again with mutex held */ 2544 if (wq->first_flusher != &this_flusher) 2545 goto out_unlock; 2546 2547 wq->first_flusher = NULL; 2548 2549 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 2550 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 2551 2552 while (true) { 2553 struct wq_flusher *next, *tmp; 2554 2555 /* complete all the flushers sharing the current flush color */ 2556 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 2557 if (next->flush_color != wq->flush_color) 2558 break; 2559 list_del_init(&next->list); 2560 complete(&next->done); 2561 } 2562 2563 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 2564 wq->flush_color != work_next_color(wq->work_color)); 2565 2566 /* this flush_color is finished, advance by one */ 2567 wq->flush_color = work_next_color(wq->flush_color); 2568 2569 /* one color has been freed, handle overflow queue */ 2570 if (!list_empty(&wq->flusher_overflow)) { 2571 /* 2572 * Assign the same color to all overflowed 2573 * flushers, advance work_color and append to 2574 * flusher_queue. This is the start-to-wait 2575 * phase for these overflowed flushers. 2576 */ 2577 list_for_each_entry(tmp, &wq->flusher_overflow, list) 2578 tmp->flush_color = wq->work_color; 2579 2580 wq->work_color = work_next_color(wq->work_color); 2581 2582 list_splice_tail_init(&wq->flusher_overflow, 2583 &wq->flusher_queue); 2584 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2585 } 2586 2587 if (list_empty(&wq->flusher_queue)) { 2588 WARN_ON_ONCE(wq->flush_color != wq->work_color); 2589 break; 2590 } 2591 2592 /* 2593 * Need to flush more colors. Make the next flusher 2594 * the new first flusher and arm pwqs. 2595 */ 2596 WARN_ON_ONCE(wq->flush_color == wq->work_color); 2597 WARN_ON_ONCE(wq->flush_color != next->flush_color); 2598 2599 list_del_init(&next->list); 2600 wq->first_flusher = next; 2601 2602 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 2603 break; 2604 2605 /* 2606 * Meh... this color is already done, clear first 2607 * flusher and repeat cascading. 2608 */ 2609 wq->first_flusher = NULL; 2610 } 2611 2612 out_unlock: 2613 mutex_unlock(&wq->mutex); 2614 } 2615 EXPORT_SYMBOL(flush_workqueue); 2616 2617 /** 2618 * drain_workqueue - drain a workqueue 2619 * @wq: workqueue to drain 2620 * 2621 * Wait until the workqueue becomes empty. While draining is in progress, 2622 * only chain queueing is allowed. IOW, only currently pending or running 2623 * work items on @wq can queue further work items on it. @wq is flushed 2624 * repeatedly until it becomes empty. The number of flushing is determined 2625 * by the depth of chaining and should be relatively short. Whine if it 2626 * takes too long. 2627 */ 2628 void drain_workqueue(struct workqueue_struct *wq) 2629 { 2630 unsigned int flush_cnt = 0; 2631 struct pool_workqueue *pwq; 2632 2633 /* 2634 * __queue_work() needs to test whether there are drainers, is much 2635 * hotter than drain_workqueue() and already looks at @wq->flags. 2636 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 2637 */ 2638 mutex_lock(&wq->mutex); 2639 if (!wq->nr_drainers++) 2640 wq->flags |= __WQ_DRAINING; 2641 mutex_unlock(&wq->mutex); 2642 reflush: 2643 flush_workqueue(wq); 2644 2645 mutex_lock(&wq->mutex); 2646 2647 for_each_pwq(pwq, wq) { 2648 bool drained; 2649 2650 spin_lock_irq(&pwq->pool->lock); 2651 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 2652 spin_unlock_irq(&pwq->pool->lock); 2653 2654 if (drained) 2655 continue; 2656 2657 if (++flush_cnt == 10 || 2658 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2659 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", 2660 wq->name, flush_cnt); 2661 2662 mutex_unlock(&wq->mutex); 2663 goto reflush; 2664 } 2665 2666 if (!--wq->nr_drainers) 2667 wq->flags &= ~__WQ_DRAINING; 2668 mutex_unlock(&wq->mutex); 2669 } 2670 EXPORT_SYMBOL_GPL(drain_workqueue); 2671 2672 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2673 { 2674 struct worker *worker = NULL; 2675 struct worker_pool *pool; 2676 struct pool_workqueue *pwq; 2677 2678 might_sleep(); 2679 2680 local_irq_disable(); 2681 pool = get_work_pool(work); 2682 if (!pool) { 2683 local_irq_enable(); 2684 return false; 2685 } 2686 2687 spin_lock(&pool->lock); 2688 /* see the comment in try_to_grab_pending() with the same code */ 2689 pwq = get_work_pwq(work); 2690 if (pwq) { 2691 if (unlikely(pwq->pool != pool)) 2692 goto already_gone; 2693 } else { 2694 worker = find_worker_executing_work(pool, work); 2695 if (!worker) 2696 goto already_gone; 2697 pwq = worker->current_pwq; 2698 } 2699 2700 insert_wq_barrier(pwq, barr, work, worker); 2701 spin_unlock_irq(&pool->lock); 2702 2703 /* 2704 * If @max_active is 1 or rescuer is in use, flushing another work 2705 * item on the same workqueue may lead to deadlock. Make sure the 2706 * flusher is not running on the same workqueue by verifying write 2707 * access. 2708 */ 2709 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) 2710 lock_map_acquire(&pwq->wq->lockdep_map); 2711 else 2712 lock_map_acquire_read(&pwq->wq->lockdep_map); 2713 lock_map_release(&pwq->wq->lockdep_map); 2714 2715 return true; 2716 already_gone: 2717 spin_unlock_irq(&pool->lock); 2718 return false; 2719 } 2720 2721 /** 2722 * flush_work - wait for a work to finish executing the last queueing instance 2723 * @work: the work to flush 2724 * 2725 * Wait until @work has finished execution. @work is guaranteed to be idle 2726 * on return if it hasn't been requeued since flush started. 2727 * 2728 * Return: 2729 * %true if flush_work() waited for the work to finish execution, 2730 * %false if it was already idle. 2731 */ 2732 bool flush_work(struct work_struct *work) 2733 { 2734 struct wq_barrier barr; 2735 2736 lock_map_acquire(&work->lockdep_map); 2737 lock_map_release(&work->lockdep_map); 2738 2739 if (start_flush_work(work, &barr)) { 2740 wait_for_completion(&barr.done); 2741 destroy_work_on_stack(&barr.work); 2742 return true; 2743 } else { 2744 return false; 2745 } 2746 } 2747 EXPORT_SYMBOL_GPL(flush_work); 2748 2749 struct cwt_wait { 2750 wait_queue_t wait; 2751 struct work_struct *work; 2752 }; 2753 2754 static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) 2755 { 2756 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 2757 2758 if (cwait->work != key) 2759 return 0; 2760 return autoremove_wake_function(wait, mode, sync, key); 2761 } 2762 2763 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2764 { 2765 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 2766 unsigned long flags; 2767 int ret; 2768 2769 do { 2770 ret = try_to_grab_pending(work, is_dwork, &flags); 2771 /* 2772 * If someone else is already canceling, wait for it to 2773 * finish. flush_work() doesn't work for PREEMPT_NONE 2774 * because we may get scheduled between @work's completion 2775 * and the other canceling task resuming and clearing 2776 * CANCELING - flush_work() will return false immediately 2777 * as @work is no longer busy, try_to_grab_pending() will 2778 * return -ENOENT as @work is still being canceled and the 2779 * other canceling task won't be able to clear CANCELING as 2780 * we're hogging the CPU. 2781 * 2782 * Let's wait for completion using a waitqueue. As this 2783 * may lead to the thundering herd problem, use a custom 2784 * wake function which matches @work along with exclusive 2785 * wait and wakeup. 2786 */ 2787 if (unlikely(ret == -ENOENT)) { 2788 struct cwt_wait cwait; 2789 2790 init_wait(&cwait.wait); 2791 cwait.wait.func = cwt_wakefn; 2792 cwait.work = work; 2793 2794 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 2795 TASK_UNINTERRUPTIBLE); 2796 if (work_is_canceling(work)) 2797 schedule(); 2798 finish_wait(&cancel_waitq, &cwait.wait); 2799 } 2800 } while (unlikely(ret < 0)); 2801 2802 /* tell other tasks trying to grab @work to back off */ 2803 mark_work_canceling(work); 2804 local_irq_restore(flags); 2805 2806 flush_work(work); 2807 clear_work_data(work); 2808 2809 /* 2810 * Paired with prepare_to_wait() above so that either 2811 * waitqueue_active() is visible here or !work_is_canceling() is 2812 * visible there. 2813 */ 2814 smp_mb(); 2815 if (waitqueue_active(&cancel_waitq)) 2816 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 2817 2818 return ret; 2819 } 2820 2821 /** 2822 * cancel_work_sync - cancel a work and wait for it to finish 2823 * @work: the work to cancel 2824 * 2825 * Cancel @work and wait for its execution to finish. This function 2826 * can be used even if the work re-queues itself or migrates to 2827 * another workqueue. On return from this function, @work is 2828 * guaranteed to be not pending or executing on any CPU. 2829 * 2830 * cancel_work_sync(&delayed_work->work) must not be used for 2831 * delayed_work's. Use cancel_delayed_work_sync() instead. 2832 * 2833 * The caller must ensure that the workqueue on which @work was last 2834 * queued can't be destroyed before this function returns. 2835 * 2836 * Return: 2837 * %true if @work was pending, %false otherwise. 2838 */ 2839 bool cancel_work_sync(struct work_struct *work) 2840 { 2841 return __cancel_work_timer(work, false); 2842 } 2843 EXPORT_SYMBOL_GPL(cancel_work_sync); 2844 2845 /** 2846 * flush_delayed_work - wait for a dwork to finish executing the last queueing 2847 * @dwork: the delayed work to flush 2848 * 2849 * Delayed timer is cancelled and the pending work is queued for 2850 * immediate execution. Like flush_work(), this function only 2851 * considers the last queueing instance of @dwork. 2852 * 2853 * Return: 2854 * %true if flush_work() waited for the work to finish execution, 2855 * %false if it was already idle. 2856 */ 2857 bool flush_delayed_work(struct delayed_work *dwork) 2858 { 2859 local_irq_disable(); 2860 if (del_timer_sync(&dwork->timer)) 2861 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 2862 local_irq_enable(); 2863 return flush_work(&dwork->work); 2864 } 2865 EXPORT_SYMBOL(flush_delayed_work); 2866 2867 /** 2868 * cancel_delayed_work - cancel a delayed work 2869 * @dwork: delayed_work to cancel 2870 * 2871 * Kill off a pending delayed_work. 2872 * 2873 * Return: %true if @dwork was pending and canceled; %false if it wasn't 2874 * pending. 2875 * 2876 * Note: 2877 * The work callback function may still be running on return, unless 2878 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 2879 * use cancel_delayed_work_sync() to wait on it. 2880 * 2881 * This function is safe to call from any context including IRQ handler. 2882 */ 2883 bool cancel_delayed_work(struct delayed_work *dwork) 2884 { 2885 unsigned long flags; 2886 int ret; 2887 2888 do { 2889 ret = try_to_grab_pending(&dwork->work, true, &flags); 2890 } while (unlikely(ret == -EAGAIN)); 2891 2892 if (unlikely(ret < 0)) 2893 return false; 2894 2895 set_work_pool_and_clear_pending(&dwork->work, 2896 get_work_pool_id(&dwork->work)); 2897 local_irq_restore(flags); 2898 return ret; 2899 } 2900 EXPORT_SYMBOL(cancel_delayed_work); 2901 2902 /** 2903 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2904 * @dwork: the delayed work cancel 2905 * 2906 * This is cancel_work_sync() for delayed works. 2907 * 2908 * Return: 2909 * %true if @dwork was pending, %false otherwise. 2910 */ 2911 bool cancel_delayed_work_sync(struct delayed_work *dwork) 2912 { 2913 return __cancel_work_timer(&dwork->work, true); 2914 } 2915 EXPORT_SYMBOL(cancel_delayed_work_sync); 2916 2917 /** 2918 * schedule_on_each_cpu - execute a function synchronously on each online CPU 2919 * @func: the function to call 2920 * 2921 * schedule_on_each_cpu() executes @func on each online CPU using the 2922 * system workqueue and blocks until all CPUs have completed. 2923 * schedule_on_each_cpu() is very slow. 2924 * 2925 * Return: 2926 * 0 on success, -errno on failure. 2927 */ 2928 int schedule_on_each_cpu(work_func_t func) 2929 { 2930 int cpu; 2931 struct work_struct __percpu *works; 2932 2933 works = alloc_percpu(struct work_struct); 2934 if (!works) 2935 return -ENOMEM; 2936 2937 get_online_cpus(); 2938 2939 for_each_online_cpu(cpu) { 2940 struct work_struct *work = per_cpu_ptr(works, cpu); 2941 2942 INIT_WORK(work, func); 2943 schedule_work_on(cpu, work); 2944 } 2945 2946 for_each_online_cpu(cpu) 2947 flush_work(per_cpu_ptr(works, cpu)); 2948 2949 put_online_cpus(); 2950 free_percpu(works); 2951 return 0; 2952 } 2953 2954 /** 2955 * execute_in_process_context - reliably execute the routine with user context 2956 * @fn: the function to execute 2957 * @ew: guaranteed storage for the execute work structure (must 2958 * be available when the work executes) 2959 * 2960 * Executes the function immediately if process context is available, 2961 * otherwise schedules the function for delayed execution. 2962 * 2963 * Return: 0 - function was executed 2964 * 1 - function was scheduled for execution 2965 */ 2966 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 2967 { 2968 if (!in_interrupt()) { 2969 fn(&ew->work); 2970 return 0; 2971 } 2972 2973 INIT_WORK(&ew->work, fn); 2974 schedule_work(&ew->work); 2975 2976 return 1; 2977 } 2978 EXPORT_SYMBOL_GPL(execute_in_process_context); 2979 2980 /** 2981 * free_workqueue_attrs - free a workqueue_attrs 2982 * @attrs: workqueue_attrs to free 2983 * 2984 * Undo alloc_workqueue_attrs(). 2985 */ 2986 void free_workqueue_attrs(struct workqueue_attrs *attrs) 2987 { 2988 if (attrs) { 2989 free_cpumask_var(attrs->cpumask); 2990 kfree(attrs); 2991 } 2992 } 2993 2994 /** 2995 * alloc_workqueue_attrs - allocate a workqueue_attrs 2996 * @gfp_mask: allocation mask to use 2997 * 2998 * Allocate a new workqueue_attrs, initialize with default settings and 2999 * return it. 3000 * 3001 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3002 */ 3003 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 3004 { 3005 struct workqueue_attrs *attrs; 3006 3007 attrs = kzalloc(sizeof(*attrs), gfp_mask); 3008 if (!attrs) 3009 goto fail; 3010 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) 3011 goto fail; 3012 3013 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3014 return attrs; 3015 fail: 3016 free_workqueue_attrs(attrs); 3017 return NULL; 3018 } 3019 3020 static void copy_workqueue_attrs(struct workqueue_attrs *to, 3021 const struct workqueue_attrs *from) 3022 { 3023 to->nice = from->nice; 3024 cpumask_copy(to->cpumask, from->cpumask); 3025 /* 3026 * Unlike hash and equality test, this function doesn't ignore 3027 * ->no_numa as it is used for both pool and wq attrs. Instead, 3028 * get_unbound_pool() explicitly clears ->no_numa after copying. 3029 */ 3030 to->no_numa = from->no_numa; 3031 } 3032 3033 /* hash value of the content of @attr */ 3034 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 3035 { 3036 u32 hash = 0; 3037 3038 hash = jhash_1word(attrs->nice, hash); 3039 hash = jhash(cpumask_bits(attrs->cpumask), 3040 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3041 return hash; 3042 } 3043 3044 /* content equality test */ 3045 static bool wqattrs_equal(const struct workqueue_attrs *a, 3046 const struct workqueue_attrs *b) 3047 { 3048 if (a->nice != b->nice) 3049 return false; 3050 if (!cpumask_equal(a->cpumask, b->cpumask)) 3051 return false; 3052 return true; 3053 } 3054 3055 /** 3056 * init_worker_pool - initialize a newly zalloc'd worker_pool 3057 * @pool: worker_pool to initialize 3058 * 3059 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3060 * 3061 * Return: 0 on success, -errno on failure. Even on failure, all fields 3062 * inside @pool proper are initialized and put_unbound_pool() can be called 3063 * on @pool safely to release it. 3064 */ 3065 static int init_worker_pool(struct worker_pool *pool) 3066 { 3067 spin_lock_init(&pool->lock); 3068 pool->id = -1; 3069 pool->cpu = -1; 3070 pool->node = NUMA_NO_NODE; 3071 pool->flags |= POOL_DISASSOCIATED; 3072 INIT_LIST_HEAD(&pool->worklist); 3073 INIT_LIST_HEAD(&pool->idle_list); 3074 hash_init(pool->busy_hash); 3075 3076 init_timer_deferrable(&pool->idle_timer); 3077 pool->idle_timer.function = idle_worker_timeout; 3078 pool->idle_timer.data = (unsigned long)pool; 3079 3080 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3081 (unsigned long)pool); 3082 3083 mutex_init(&pool->manager_arb); 3084 mutex_init(&pool->attach_mutex); 3085 INIT_LIST_HEAD(&pool->workers); 3086 3087 ida_init(&pool->worker_ida); 3088 INIT_HLIST_NODE(&pool->hash_node); 3089 pool->refcnt = 1; 3090 3091 /* shouldn't fail above this point */ 3092 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 3093 if (!pool->attrs) 3094 return -ENOMEM; 3095 return 0; 3096 } 3097 3098 static void rcu_free_wq(struct rcu_head *rcu) 3099 { 3100 struct workqueue_struct *wq = 3101 container_of(rcu, struct workqueue_struct, rcu); 3102 3103 if (!(wq->flags & WQ_UNBOUND)) 3104 free_percpu(wq->cpu_pwqs); 3105 else 3106 free_workqueue_attrs(wq->unbound_attrs); 3107 3108 kfree(wq->rescuer); 3109 kfree(wq); 3110 } 3111 3112 static void rcu_free_pool(struct rcu_head *rcu) 3113 { 3114 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 3115 3116 ida_destroy(&pool->worker_ida); 3117 free_workqueue_attrs(pool->attrs); 3118 kfree(pool); 3119 } 3120 3121 /** 3122 * put_unbound_pool - put a worker_pool 3123 * @pool: worker_pool to put 3124 * 3125 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU 3126 * safe manner. get_unbound_pool() calls this function on its failure path 3127 * and this function should be able to release pools which went through, 3128 * successfully or not, init_worker_pool(). 3129 * 3130 * Should be called with wq_pool_mutex held. 3131 */ 3132 static void put_unbound_pool(struct worker_pool *pool) 3133 { 3134 DECLARE_COMPLETION_ONSTACK(detach_completion); 3135 struct worker *worker; 3136 3137 lockdep_assert_held(&wq_pool_mutex); 3138 3139 if (--pool->refcnt) 3140 return; 3141 3142 /* sanity checks */ 3143 if (WARN_ON(!(pool->cpu < 0)) || 3144 WARN_ON(!list_empty(&pool->worklist))) 3145 return; 3146 3147 /* release id and unhash */ 3148 if (pool->id >= 0) 3149 idr_remove(&worker_pool_idr, pool->id); 3150 hash_del(&pool->hash_node); 3151 3152 /* 3153 * Become the manager and destroy all workers. Grabbing 3154 * manager_arb prevents @pool's workers from blocking on 3155 * attach_mutex. 3156 */ 3157 mutex_lock(&pool->manager_arb); 3158 3159 spin_lock_irq(&pool->lock); 3160 while ((worker = first_idle_worker(pool))) 3161 destroy_worker(worker); 3162 WARN_ON(pool->nr_workers || pool->nr_idle); 3163 spin_unlock_irq(&pool->lock); 3164 3165 mutex_lock(&pool->attach_mutex); 3166 if (!list_empty(&pool->workers)) 3167 pool->detach_completion = &detach_completion; 3168 mutex_unlock(&pool->attach_mutex); 3169 3170 if (pool->detach_completion) 3171 wait_for_completion(pool->detach_completion); 3172 3173 mutex_unlock(&pool->manager_arb); 3174 3175 /* shut down the timers */ 3176 del_timer_sync(&pool->idle_timer); 3177 del_timer_sync(&pool->mayday_timer); 3178 3179 /* sched-RCU protected to allow dereferences from get_work_pool() */ 3180 call_rcu_sched(&pool->rcu, rcu_free_pool); 3181 } 3182 3183 /** 3184 * get_unbound_pool - get a worker_pool with the specified attributes 3185 * @attrs: the attributes of the worker_pool to get 3186 * 3187 * Obtain a worker_pool which has the same attributes as @attrs, bump the 3188 * reference count and return it. If there already is a matching 3189 * worker_pool, it will be used; otherwise, this function attempts to 3190 * create a new one. 3191 * 3192 * Should be called with wq_pool_mutex held. 3193 * 3194 * Return: On success, a worker_pool with the same attributes as @attrs. 3195 * On failure, %NULL. 3196 */ 3197 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 3198 { 3199 u32 hash = wqattrs_hash(attrs); 3200 struct worker_pool *pool; 3201 int node; 3202 int target_node = NUMA_NO_NODE; 3203 3204 lockdep_assert_held(&wq_pool_mutex); 3205 3206 /* do we already have a matching pool? */ 3207 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 3208 if (wqattrs_equal(pool->attrs, attrs)) { 3209 pool->refcnt++; 3210 return pool; 3211 } 3212 } 3213 3214 /* if cpumask is contained inside a NUMA node, we belong to that node */ 3215 if (wq_numa_enabled) { 3216 for_each_node(node) { 3217 if (cpumask_subset(attrs->cpumask, 3218 wq_numa_possible_cpumask[node])) { 3219 target_node = node; 3220 break; 3221 } 3222 } 3223 } 3224 3225 /* nope, create a new one */ 3226 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node); 3227 if (!pool || init_worker_pool(pool) < 0) 3228 goto fail; 3229 3230 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 3231 copy_workqueue_attrs(pool->attrs, attrs); 3232 pool->node = target_node; 3233 3234 /* 3235 * no_numa isn't a worker_pool attribute, always clear it. See 3236 * 'struct workqueue_attrs' comments for detail. 3237 */ 3238 pool->attrs->no_numa = false; 3239 3240 if (worker_pool_assign_id(pool) < 0) 3241 goto fail; 3242 3243 /* create and start the initial worker */ 3244 if (!create_worker(pool)) 3245 goto fail; 3246 3247 /* install */ 3248 hash_add(unbound_pool_hash, &pool->hash_node, hash); 3249 3250 return pool; 3251 fail: 3252 if (pool) 3253 put_unbound_pool(pool); 3254 return NULL; 3255 } 3256 3257 static void rcu_free_pwq(struct rcu_head *rcu) 3258 { 3259 kmem_cache_free(pwq_cache, 3260 container_of(rcu, struct pool_workqueue, rcu)); 3261 } 3262 3263 /* 3264 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt 3265 * and needs to be destroyed. 3266 */ 3267 static void pwq_unbound_release_workfn(struct work_struct *work) 3268 { 3269 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 3270 unbound_release_work); 3271 struct workqueue_struct *wq = pwq->wq; 3272 struct worker_pool *pool = pwq->pool; 3273 bool is_last; 3274 3275 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) 3276 return; 3277 3278 mutex_lock(&wq->mutex); 3279 list_del_rcu(&pwq->pwqs_node); 3280 is_last = list_empty(&wq->pwqs); 3281 mutex_unlock(&wq->mutex); 3282 3283 mutex_lock(&wq_pool_mutex); 3284 put_unbound_pool(pool); 3285 mutex_unlock(&wq_pool_mutex); 3286 3287 call_rcu_sched(&pwq->rcu, rcu_free_pwq); 3288 3289 /* 3290 * If we're the last pwq going away, @wq is already dead and no one 3291 * is gonna access it anymore. Schedule RCU free. 3292 */ 3293 if (is_last) 3294 call_rcu_sched(&wq->rcu, rcu_free_wq); 3295 } 3296 3297 /** 3298 * pwq_adjust_max_active - update a pwq's max_active to the current setting 3299 * @pwq: target pool_workqueue 3300 * 3301 * If @pwq isn't freezing, set @pwq->max_active to the associated 3302 * workqueue's saved_max_active and activate delayed work items 3303 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. 3304 */ 3305 static void pwq_adjust_max_active(struct pool_workqueue *pwq) 3306 { 3307 struct workqueue_struct *wq = pwq->wq; 3308 bool freezable = wq->flags & WQ_FREEZABLE; 3309 3310 /* for @wq->saved_max_active */ 3311 lockdep_assert_held(&wq->mutex); 3312 3313 /* fast exit for non-freezable wqs */ 3314 if (!freezable && pwq->max_active == wq->saved_max_active) 3315 return; 3316 3317 spin_lock_irq(&pwq->pool->lock); 3318 3319 /* 3320 * During [un]freezing, the caller is responsible for ensuring that 3321 * this function is called at least once after @workqueue_freezing 3322 * is updated and visible. 3323 */ 3324 if (!freezable || !workqueue_freezing) { 3325 pwq->max_active = wq->saved_max_active; 3326 3327 while (!list_empty(&pwq->delayed_works) && 3328 pwq->nr_active < pwq->max_active) 3329 pwq_activate_first_delayed(pwq); 3330 3331 /* 3332 * Need to kick a worker after thawed or an unbound wq's 3333 * max_active is bumped. It's a slow path. Do it always. 3334 */ 3335 wake_up_worker(pwq->pool); 3336 } else { 3337 pwq->max_active = 0; 3338 } 3339 3340 spin_unlock_irq(&pwq->pool->lock); 3341 } 3342 3343 /* initialize newly alloced @pwq which is associated with @wq and @pool */ 3344 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 3345 struct worker_pool *pool) 3346 { 3347 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 3348 3349 memset(pwq, 0, sizeof(*pwq)); 3350 3351 pwq->pool = pool; 3352 pwq->wq = wq; 3353 pwq->flush_color = -1; 3354 pwq->refcnt = 1; 3355 INIT_LIST_HEAD(&pwq->delayed_works); 3356 INIT_LIST_HEAD(&pwq->pwqs_node); 3357 INIT_LIST_HEAD(&pwq->mayday_node); 3358 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 3359 } 3360 3361 /* sync @pwq with the current state of its associated wq and link it */ 3362 static void link_pwq(struct pool_workqueue *pwq) 3363 { 3364 struct workqueue_struct *wq = pwq->wq; 3365 3366 lockdep_assert_held(&wq->mutex); 3367 3368 /* may be called multiple times, ignore if already linked */ 3369 if (!list_empty(&pwq->pwqs_node)) 3370 return; 3371 3372 /* set the matching work_color */ 3373 pwq->work_color = wq->work_color; 3374 3375 /* sync max_active to the current setting */ 3376 pwq_adjust_max_active(pwq); 3377 3378 /* link in @pwq */ 3379 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 3380 } 3381 3382 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 3383 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 3384 const struct workqueue_attrs *attrs) 3385 { 3386 struct worker_pool *pool; 3387 struct pool_workqueue *pwq; 3388 3389 lockdep_assert_held(&wq_pool_mutex); 3390 3391 pool = get_unbound_pool(attrs); 3392 if (!pool) 3393 return NULL; 3394 3395 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 3396 if (!pwq) { 3397 put_unbound_pool(pool); 3398 return NULL; 3399 } 3400 3401 init_pwq(pwq, wq, pool); 3402 return pwq; 3403 } 3404 3405 /** 3406 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node 3407 * @attrs: the wq_attrs of the default pwq of the target workqueue 3408 * @node: the target NUMA node 3409 * @cpu_going_down: if >= 0, the CPU to consider as offline 3410 * @cpumask: outarg, the resulting cpumask 3411 * 3412 * Calculate the cpumask a workqueue with @attrs should use on @node. If 3413 * @cpu_going_down is >= 0, that cpu is considered offline during 3414 * calculation. The result is stored in @cpumask. 3415 * 3416 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If 3417 * enabled and @node has online CPUs requested by @attrs, the returned 3418 * cpumask is the intersection of the possible CPUs of @node and 3419 * @attrs->cpumask. 3420 * 3421 * The caller is responsible for ensuring that the cpumask of @node stays 3422 * stable. 3423 * 3424 * Return: %true if the resulting @cpumask is different from @attrs->cpumask, 3425 * %false if equal. 3426 */ 3427 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, 3428 int cpu_going_down, cpumask_t *cpumask) 3429 { 3430 if (!wq_numa_enabled || attrs->no_numa) 3431 goto use_dfl; 3432 3433 /* does @node have any online CPUs @attrs wants? */ 3434 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask); 3435 if (cpu_going_down >= 0) 3436 cpumask_clear_cpu(cpu_going_down, cpumask); 3437 3438 if (cpumask_empty(cpumask)) 3439 goto use_dfl; 3440 3441 /* yeap, return possible CPUs in @node that @attrs wants */ 3442 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]); 3443 return !cpumask_equal(cpumask, attrs->cpumask); 3444 3445 use_dfl: 3446 cpumask_copy(cpumask, attrs->cpumask); 3447 return false; 3448 } 3449 3450 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */ 3451 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, 3452 int node, 3453 struct pool_workqueue *pwq) 3454 { 3455 struct pool_workqueue *old_pwq; 3456 3457 lockdep_assert_held(&wq_pool_mutex); 3458 lockdep_assert_held(&wq->mutex); 3459 3460 /* link_pwq() can handle duplicate calls */ 3461 link_pwq(pwq); 3462 3463 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); 3464 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); 3465 return old_pwq; 3466 } 3467 3468 /* context to store the prepared attrs & pwqs before applying */ 3469 struct apply_wqattrs_ctx { 3470 struct workqueue_struct *wq; /* target workqueue */ 3471 struct workqueue_attrs *attrs; /* attrs to apply */ 3472 struct list_head list; /* queued for batching commit */ 3473 struct pool_workqueue *dfl_pwq; 3474 struct pool_workqueue *pwq_tbl[]; 3475 }; 3476 3477 /* free the resources after success or abort */ 3478 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 3479 { 3480 if (ctx) { 3481 int node; 3482 3483 for_each_node(node) 3484 put_pwq_unlocked(ctx->pwq_tbl[node]); 3485 put_pwq_unlocked(ctx->dfl_pwq); 3486 3487 free_workqueue_attrs(ctx->attrs); 3488 3489 kfree(ctx); 3490 } 3491 } 3492 3493 /* allocate the attrs and pwqs for later installation */ 3494 static struct apply_wqattrs_ctx * 3495 apply_wqattrs_prepare(struct workqueue_struct *wq, 3496 const struct workqueue_attrs *attrs) 3497 { 3498 struct apply_wqattrs_ctx *ctx; 3499 struct workqueue_attrs *new_attrs, *tmp_attrs; 3500 int node; 3501 3502 lockdep_assert_held(&wq_pool_mutex); 3503 3504 ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]), 3505 GFP_KERNEL); 3506 3507 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3508 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3509 if (!ctx || !new_attrs || !tmp_attrs) 3510 goto out_free; 3511 3512 /* 3513 * Calculate the attrs of the default pwq. 3514 * If the user configured cpumask doesn't overlap with the 3515 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask. 3516 */ 3517 copy_workqueue_attrs(new_attrs, attrs); 3518 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask); 3519 if (unlikely(cpumask_empty(new_attrs->cpumask))) 3520 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask); 3521 3522 /* 3523 * We may create multiple pwqs with differing cpumasks. Make a 3524 * copy of @new_attrs which will be modified and used to obtain 3525 * pools. 3526 */ 3527 copy_workqueue_attrs(tmp_attrs, new_attrs); 3528 3529 /* 3530 * If something goes wrong during CPU up/down, we'll fall back to 3531 * the default pwq covering whole @attrs->cpumask. Always create 3532 * it even if we don't use it immediately. 3533 */ 3534 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 3535 if (!ctx->dfl_pwq) 3536 goto out_free; 3537 3538 for_each_node(node) { 3539 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) { 3540 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); 3541 if (!ctx->pwq_tbl[node]) 3542 goto out_free; 3543 } else { 3544 ctx->dfl_pwq->refcnt++; 3545 ctx->pwq_tbl[node] = ctx->dfl_pwq; 3546 } 3547 } 3548 3549 /* save the user configured attrs and sanitize it. */ 3550 copy_workqueue_attrs(new_attrs, attrs); 3551 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 3552 ctx->attrs = new_attrs; 3553 3554 ctx->wq = wq; 3555 free_workqueue_attrs(tmp_attrs); 3556 return ctx; 3557 3558 out_free: 3559 free_workqueue_attrs(tmp_attrs); 3560 free_workqueue_attrs(new_attrs); 3561 apply_wqattrs_cleanup(ctx); 3562 return NULL; 3563 } 3564 3565 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 3566 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 3567 { 3568 int node; 3569 3570 /* all pwqs have been created successfully, let's install'em */ 3571 mutex_lock(&ctx->wq->mutex); 3572 3573 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 3574 3575 /* save the previous pwq and install the new one */ 3576 for_each_node(node) 3577 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, 3578 ctx->pwq_tbl[node]); 3579 3580 /* @dfl_pwq might not have been used, ensure it's linked */ 3581 link_pwq(ctx->dfl_pwq); 3582 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 3583 3584 mutex_unlock(&ctx->wq->mutex); 3585 } 3586 3587 static void apply_wqattrs_lock(void) 3588 { 3589 /* CPUs should stay stable across pwq creations and installations */ 3590 get_online_cpus(); 3591 mutex_lock(&wq_pool_mutex); 3592 } 3593 3594 static void apply_wqattrs_unlock(void) 3595 { 3596 mutex_unlock(&wq_pool_mutex); 3597 put_online_cpus(); 3598 } 3599 3600 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 3601 const struct workqueue_attrs *attrs) 3602 { 3603 struct apply_wqattrs_ctx *ctx; 3604 int ret = -ENOMEM; 3605 3606 /* only unbound workqueues can change attributes */ 3607 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 3608 return -EINVAL; 3609 3610 /* creating multiple pwqs breaks ordering guarantee */ 3611 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) 3612 return -EINVAL; 3613 3614 ctx = apply_wqattrs_prepare(wq, attrs); 3615 3616 /* the ctx has been prepared successfully, let's commit it */ 3617 if (ctx) { 3618 apply_wqattrs_commit(ctx); 3619 ret = 0; 3620 } 3621 3622 apply_wqattrs_cleanup(ctx); 3623 3624 return ret; 3625 } 3626 3627 /** 3628 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 3629 * @wq: the target workqueue 3630 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 3631 * 3632 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA 3633 * machines, this function maps a separate pwq to each NUMA node with 3634 * possibles CPUs in @attrs->cpumask so that work items are affine to the 3635 * NUMA node it was issued on. Older pwqs are released as in-flight work 3636 * items finish. Note that a work item which repeatedly requeues itself 3637 * back-to-back will stay on its current pwq. 3638 * 3639 * Performs GFP_KERNEL allocations. 3640 * 3641 * Return: 0 on success and -errno on failure. 3642 */ 3643 int apply_workqueue_attrs(struct workqueue_struct *wq, 3644 const struct workqueue_attrs *attrs) 3645 { 3646 int ret; 3647 3648 apply_wqattrs_lock(); 3649 ret = apply_workqueue_attrs_locked(wq, attrs); 3650 apply_wqattrs_unlock(); 3651 3652 return ret; 3653 } 3654 3655 /** 3656 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug 3657 * @wq: the target workqueue 3658 * @cpu: the CPU coming up or going down 3659 * @online: whether @cpu is coming up or going down 3660 * 3661 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 3662 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of 3663 * @wq accordingly. 3664 * 3665 * If NUMA affinity can't be adjusted due to memory allocation failure, it 3666 * falls back to @wq->dfl_pwq which may not be optimal but is always 3667 * correct. 3668 * 3669 * Note that when the last allowed CPU of a NUMA node goes offline for a 3670 * workqueue with a cpumask spanning multiple nodes, the workers which were 3671 * already executing the work items for the workqueue will lose their CPU 3672 * affinity and may execute on any CPU. This is similar to how per-cpu 3673 * workqueues behave on CPU_DOWN. If a workqueue user wants strict 3674 * affinity, it's the user's responsibility to flush the work item from 3675 * CPU_DOWN_PREPARE. 3676 */ 3677 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, 3678 bool online) 3679 { 3680 int node = cpu_to_node(cpu); 3681 int cpu_off = online ? -1 : cpu; 3682 struct pool_workqueue *old_pwq = NULL, *pwq; 3683 struct workqueue_attrs *target_attrs; 3684 cpumask_t *cpumask; 3685 3686 lockdep_assert_held(&wq_pool_mutex); 3687 3688 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) || 3689 wq->unbound_attrs->no_numa) 3690 return; 3691 3692 /* 3693 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 3694 * Let's use a preallocated one. The following buf is protected by 3695 * CPU hotplug exclusion. 3696 */ 3697 target_attrs = wq_update_unbound_numa_attrs_buf; 3698 cpumask = target_attrs->cpumask; 3699 3700 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 3701 pwq = unbound_pwq_by_node(wq, node); 3702 3703 /* 3704 * Let's determine what needs to be done. If the target cpumask is 3705 * different from the default pwq's, we need to compare it to @pwq's 3706 * and create a new one if they don't match. If the target cpumask 3707 * equals the default pwq's, the default pwq should be used. 3708 */ 3709 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { 3710 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) 3711 return; 3712 } else { 3713 goto use_dfl_pwq; 3714 } 3715 3716 /* create a new pwq */ 3717 pwq = alloc_unbound_pwq(wq, target_attrs); 3718 if (!pwq) { 3719 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", 3720 wq->name); 3721 goto use_dfl_pwq; 3722 } 3723 3724 /* Install the new pwq. */ 3725 mutex_lock(&wq->mutex); 3726 old_pwq = numa_pwq_tbl_install(wq, node, pwq); 3727 goto out_unlock; 3728 3729 use_dfl_pwq: 3730 mutex_lock(&wq->mutex); 3731 spin_lock_irq(&wq->dfl_pwq->pool->lock); 3732 get_pwq(wq->dfl_pwq); 3733 spin_unlock_irq(&wq->dfl_pwq->pool->lock); 3734 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); 3735 out_unlock: 3736 mutex_unlock(&wq->mutex); 3737 put_pwq_unlocked(old_pwq); 3738 } 3739 3740 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 3741 { 3742 bool highpri = wq->flags & WQ_HIGHPRI; 3743 int cpu, ret; 3744 3745 if (!(wq->flags & WQ_UNBOUND)) { 3746 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 3747 if (!wq->cpu_pwqs) 3748 return -ENOMEM; 3749 3750 for_each_possible_cpu(cpu) { 3751 struct pool_workqueue *pwq = 3752 per_cpu_ptr(wq->cpu_pwqs, cpu); 3753 struct worker_pool *cpu_pools = 3754 per_cpu(cpu_worker_pools, cpu); 3755 3756 init_pwq(pwq, wq, &cpu_pools[highpri]); 3757 3758 mutex_lock(&wq->mutex); 3759 link_pwq(pwq); 3760 mutex_unlock(&wq->mutex); 3761 } 3762 return 0; 3763 } else if (wq->flags & __WQ_ORDERED) { 3764 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 3765 /* there should only be single pwq for ordering guarantee */ 3766 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 3767 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 3768 "ordering guarantee broken for workqueue %s\n", wq->name); 3769 return ret; 3770 } else { 3771 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 3772 } 3773 } 3774 3775 static int wq_clamp_max_active(int max_active, unsigned int flags, 3776 const char *name) 3777 { 3778 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 3779 3780 if (max_active < 1 || max_active > lim) 3781 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 3782 max_active, name, 1, lim); 3783 3784 return clamp_val(max_active, 1, lim); 3785 } 3786 3787 struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 3788 unsigned int flags, 3789 int max_active, 3790 struct lock_class_key *key, 3791 const char *lock_name, ...) 3792 { 3793 size_t tbl_size = 0; 3794 va_list args; 3795 struct workqueue_struct *wq; 3796 struct pool_workqueue *pwq; 3797 3798 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 3799 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 3800 flags |= WQ_UNBOUND; 3801 3802 /* allocate wq and format name */ 3803 if (flags & WQ_UNBOUND) 3804 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); 3805 3806 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); 3807 if (!wq) 3808 return NULL; 3809 3810 if (flags & WQ_UNBOUND) { 3811 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3812 if (!wq->unbound_attrs) 3813 goto err_free_wq; 3814 } 3815 3816 va_start(args, lock_name); 3817 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 3818 va_end(args); 3819 3820 max_active = max_active ?: WQ_DFL_ACTIVE; 3821 max_active = wq_clamp_max_active(max_active, flags, wq->name); 3822 3823 /* init wq */ 3824 wq->flags = flags; 3825 wq->saved_max_active = max_active; 3826 mutex_init(&wq->mutex); 3827 atomic_set(&wq->nr_pwqs_to_flush, 0); 3828 INIT_LIST_HEAD(&wq->pwqs); 3829 INIT_LIST_HEAD(&wq->flusher_queue); 3830 INIT_LIST_HEAD(&wq->flusher_overflow); 3831 INIT_LIST_HEAD(&wq->maydays); 3832 3833 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3834 INIT_LIST_HEAD(&wq->list); 3835 3836 if (alloc_and_link_pwqs(wq) < 0) 3837 goto err_free_wq; 3838 3839 /* 3840 * Workqueues which may be used during memory reclaim should 3841 * have a rescuer to guarantee forward progress. 3842 */ 3843 if (flags & WQ_MEM_RECLAIM) { 3844 struct worker *rescuer; 3845 3846 rescuer = alloc_worker(NUMA_NO_NODE); 3847 if (!rescuer) 3848 goto err_destroy; 3849 3850 rescuer->rescue_wq = wq; 3851 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 3852 wq->name); 3853 if (IS_ERR(rescuer->task)) { 3854 kfree(rescuer); 3855 goto err_destroy; 3856 } 3857 3858 wq->rescuer = rescuer; 3859 kthread_bind_mask(rescuer->task, cpu_possible_mask); 3860 wake_up_process(rescuer->task); 3861 } 3862 3863 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 3864 goto err_destroy; 3865 3866 /* 3867 * wq_pool_mutex protects global freeze state and workqueues list. 3868 * Grab it, adjust max_active and add the new @wq to workqueues 3869 * list. 3870 */ 3871 mutex_lock(&wq_pool_mutex); 3872 3873 mutex_lock(&wq->mutex); 3874 for_each_pwq(pwq, wq) 3875 pwq_adjust_max_active(pwq); 3876 mutex_unlock(&wq->mutex); 3877 3878 list_add_tail_rcu(&wq->list, &workqueues); 3879 3880 mutex_unlock(&wq_pool_mutex); 3881 3882 return wq; 3883 3884 err_free_wq: 3885 free_workqueue_attrs(wq->unbound_attrs); 3886 kfree(wq); 3887 return NULL; 3888 err_destroy: 3889 destroy_workqueue(wq); 3890 return NULL; 3891 } 3892 EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 3893 3894 /** 3895 * destroy_workqueue - safely terminate a workqueue 3896 * @wq: target workqueue 3897 * 3898 * Safely destroy a workqueue. All work currently pending will be done first. 3899 */ 3900 void destroy_workqueue(struct workqueue_struct *wq) 3901 { 3902 struct pool_workqueue *pwq; 3903 int node; 3904 3905 /* drain it before proceeding with destruction */ 3906 drain_workqueue(wq); 3907 3908 /* sanity checks */ 3909 mutex_lock(&wq->mutex); 3910 for_each_pwq(pwq, wq) { 3911 int i; 3912 3913 for (i = 0; i < WORK_NR_COLORS; i++) { 3914 if (WARN_ON(pwq->nr_in_flight[i])) { 3915 mutex_unlock(&wq->mutex); 3916 return; 3917 } 3918 } 3919 3920 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || 3921 WARN_ON(pwq->nr_active) || 3922 WARN_ON(!list_empty(&pwq->delayed_works))) { 3923 mutex_unlock(&wq->mutex); 3924 return; 3925 } 3926 } 3927 mutex_unlock(&wq->mutex); 3928 3929 /* 3930 * wq list is used to freeze wq, remove from list after 3931 * flushing is complete in case freeze races us. 3932 */ 3933 mutex_lock(&wq_pool_mutex); 3934 list_del_rcu(&wq->list); 3935 mutex_unlock(&wq_pool_mutex); 3936 3937 workqueue_sysfs_unregister(wq); 3938 3939 if (wq->rescuer) 3940 kthread_stop(wq->rescuer->task); 3941 3942 if (!(wq->flags & WQ_UNBOUND)) { 3943 /* 3944 * The base ref is never dropped on per-cpu pwqs. Directly 3945 * schedule RCU free. 3946 */ 3947 call_rcu_sched(&wq->rcu, rcu_free_wq); 3948 } else { 3949 /* 3950 * We're the sole accessor of @wq at this point. Directly 3951 * access numa_pwq_tbl[] and dfl_pwq to put the base refs. 3952 * @wq will be freed when the last pwq is released. 3953 */ 3954 for_each_node(node) { 3955 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); 3956 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); 3957 put_pwq_unlocked(pwq); 3958 } 3959 3960 /* 3961 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is 3962 * put. Don't access it afterwards. 3963 */ 3964 pwq = wq->dfl_pwq; 3965 wq->dfl_pwq = NULL; 3966 put_pwq_unlocked(pwq); 3967 } 3968 } 3969 EXPORT_SYMBOL_GPL(destroy_workqueue); 3970 3971 /** 3972 * workqueue_set_max_active - adjust max_active of a workqueue 3973 * @wq: target workqueue 3974 * @max_active: new max_active value. 3975 * 3976 * Set max_active of @wq to @max_active. 3977 * 3978 * CONTEXT: 3979 * Don't call from IRQ context. 3980 */ 3981 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 3982 { 3983 struct pool_workqueue *pwq; 3984 3985 /* disallow meddling with max_active for ordered workqueues */ 3986 if (WARN_ON(wq->flags & __WQ_ORDERED)) 3987 return; 3988 3989 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 3990 3991 mutex_lock(&wq->mutex); 3992 3993 wq->saved_max_active = max_active; 3994 3995 for_each_pwq(pwq, wq) 3996 pwq_adjust_max_active(pwq); 3997 3998 mutex_unlock(&wq->mutex); 3999 } 4000 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4001 4002 /** 4003 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4004 * 4005 * Determine whether %current is a workqueue rescuer. Can be used from 4006 * work functions to determine whether it's being run off the rescuer task. 4007 * 4008 * Return: %true if %current is a workqueue rescuer. %false otherwise. 4009 */ 4010 bool current_is_workqueue_rescuer(void) 4011 { 4012 struct worker *worker = current_wq_worker(); 4013 4014 return worker && worker->rescue_wq; 4015 } 4016 4017 /** 4018 * workqueue_congested - test whether a workqueue is congested 4019 * @cpu: CPU in question 4020 * @wq: target workqueue 4021 * 4022 * Test whether @wq's cpu workqueue for @cpu is congested. There is 4023 * no synchronization around this function and the test result is 4024 * unreliable and only useful as advisory hints or for debugging. 4025 * 4026 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 4027 * Note that both per-cpu and unbound workqueues may be associated with 4028 * multiple pool_workqueues which have separate congested states. A 4029 * workqueue being congested on one CPU doesn't mean the workqueue is also 4030 * contested on other CPUs / NUMA nodes. 4031 * 4032 * Return: 4033 * %true if congested, %false otherwise. 4034 */ 4035 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4036 { 4037 struct pool_workqueue *pwq; 4038 bool ret; 4039 4040 rcu_read_lock_sched(); 4041 4042 if (cpu == WORK_CPU_UNBOUND) 4043 cpu = smp_processor_id(); 4044 4045 if (!(wq->flags & WQ_UNBOUND)) 4046 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 4047 else 4048 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 4049 4050 ret = !list_empty(&pwq->delayed_works); 4051 rcu_read_unlock_sched(); 4052 4053 return ret; 4054 } 4055 EXPORT_SYMBOL_GPL(workqueue_congested); 4056 4057 /** 4058 * work_busy - test whether a work is currently pending or running 4059 * @work: the work to be tested 4060 * 4061 * Test whether @work is currently pending or running. There is no 4062 * synchronization around this function and the test result is 4063 * unreliable and only useful as advisory hints or for debugging. 4064 * 4065 * Return: 4066 * OR'd bitmask of WORK_BUSY_* bits. 4067 */ 4068 unsigned int work_busy(struct work_struct *work) 4069 { 4070 struct worker_pool *pool; 4071 unsigned long flags; 4072 unsigned int ret = 0; 4073 4074 if (work_pending(work)) 4075 ret |= WORK_BUSY_PENDING; 4076 4077 local_irq_save(flags); 4078 pool = get_work_pool(work); 4079 if (pool) { 4080 spin_lock(&pool->lock); 4081 if (find_worker_executing_work(pool, work)) 4082 ret |= WORK_BUSY_RUNNING; 4083 spin_unlock(&pool->lock); 4084 } 4085 local_irq_restore(flags); 4086 4087 return ret; 4088 } 4089 EXPORT_SYMBOL_GPL(work_busy); 4090 4091 /** 4092 * set_worker_desc - set description for the current work item 4093 * @fmt: printf-style format string 4094 * @...: arguments for the format string 4095 * 4096 * This function can be called by a running work function to describe what 4097 * the work item is about. If the worker task gets dumped, this 4098 * information will be printed out together to help debugging. The 4099 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 4100 */ 4101 void set_worker_desc(const char *fmt, ...) 4102 { 4103 struct worker *worker = current_wq_worker(); 4104 va_list args; 4105 4106 if (worker) { 4107 va_start(args, fmt); 4108 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 4109 va_end(args); 4110 worker->desc_valid = true; 4111 } 4112 } 4113 4114 /** 4115 * print_worker_info - print out worker information and description 4116 * @log_lvl: the log level to use when printing 4117 * @task: target task 4118 * 4119 * If @task is a worker and currently executing a work item, print out the 4120 * name of the workqueue being serviced and worker description set with 4121 * set_worker_desc() by the currently executing work item. 4122 * 4123 * This function can be safely called on any task as long as the 4124 * task_struct itself is accessible. While safe, this function isn't 4125 * synchronized and may print out mixups or garbages of limited length. 4126 */ 4127 void print_worker_info(const char *log_lvl, struct task_struct *task) 4128 { 4129 work_func_t *fn = NULL; 4130 char name[WQ_NAME_LEN] = { }; 4131 char desc[WORKER_DESC_LEN] = { }; 4132 struct pool_workqueue *pwq = NULL; 4133 struct workqueue_struct *wq = NULL; 4134 bool desc_valid = false; 4135 struct worker *worker; 4136 4137 if (!(task->flags & PF_WQ_WORKER)) 4138 return; 4139 4140 /* 4141 * This function is called without any synchronization and @task 4142 * could be in any state. Be careful with dereferences. 4143 */ 4144 worker = probe_kthread_data(task); 4145 4146 /* 4147 * Carefully copy the associated workqueue's workfn and name. Keep 4148 * the original last '\0' in case the original contains garbage. 4149 */ 4150 probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); 4151 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); 4152 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); 4153 probe_kernel_read(name, wq->name, sizeof(name) - 1); 4154 4155 /* copy worker description */ 4156 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); 4157 if (desc_valid) 4158 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); 4159 4160 if (fn || name[0] || desc[0]) { 4161 printk("%sWorkqueue: %s %pf", log_lvl, name, fn); 4162 if (desc[0]) 4163 pr_cont(" (%s)", desc); 4164 pr_cont("\n"); 4165 } 4166 } 4167 4168 static void pr_cont_pool_info(struct worker_pool *pool) 4169 { 4170 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 4171 if (pool->node != NUMA_NO_NODE) 4172 pr_cont(" node=%d", pool->node); 4173 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 4174 } 4175 4176 static void pr_cont_work(bool comma, struct work_struct *work) 4177 { 4178 if (work->func == wq_barrier_func) { 4179 struct wq_barrier *barr; 4180 4181 barr = container_of(work, struct wq_barrier, work); 4182 4183 pr_cont("%s BAR(%d)", comma ? "," : "", 4184 task_pid_nr(barr->task)); 4185 } else { 4186 pr_cont("%s %pf", comma ? "," : "", work->func); 4187 } 4188 } 4189 4190 static void show_pwq(struct pool_workqueue *pwq) 4191 { 4192 struct worker_pool *pool = pwq->pool; 4193 struct work_struct *work; 4194 struct worker *worker; 4195 bool has_in_flight = false, has_pending = false; 4196 int bkt; 4197 4198 pr_info(" pwq %d:", pool->id); 4199 pr_cont_pool_info(pool); 4200 4201 pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active, 4202 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 4203 4204 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 4205 if (worker->current_pwq == pwq) { 4206 has_in_flight = true; 4207 break; 4208 } 4209 } 4210 if (has_in_flight) { 4211 bool comma = false; 4212 4213 pr_info(" in-flight:"); 4214 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 4215 if (worker->current_pwq != pwq) 4216 continue; 4217 4218 pr_cont("%s %d%s:%pf", comma ? "," : "", 4219 task_pid_nr(worker->task), 4220 worker == pwq->wq->rescuer ? "(RESCUER)" : "", 4221 worker->current_func); 4222 list_for_each_entry(work, &worker->scheduled, entry) 4223 pr_cont_work(false, work); 4224 comma = true; 4225 } 4226 pr_cont("\n"); 4227 } 4228 4229 list_for_each_entry(work, &pool->worklist, entry) { 4230 if (get_work_pwq(work) == pwq) { 4231 has_pending = true; 4232 break; 4233 } 4234 } 4235 if (has_pending) { 4236 bool comma = false; 4237 4238 pr_info(" pending:"); 4239 list_for_each_entry(work, &pool->worklist, entry) { 4240 if (get_work_pwq(work) != pwq) 4241 continue; 4242 4243 pr_cont_work(comma, work); 4244 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 4245 } 4246 pr_cont("\n"); 4247 } 4248 4249 if (!list_empty(&pwq->delayed_works)) { 4250 bool comma = false; 4251 4252 pr_info(" delayed:"); 4253 list_for_each_entry(work, &pwq->delayed_works, entry) { 4254 pr_cont_work(comma, work); 4255 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 4256 } 4257 pr_cont("\n"); 4258 } 4259 } 4260 4261 /** 4262 * show_workqueue_state - dump workqueue state 4263 * 4264 * Called from a sysrq handler and prints out all busy workqueues and 4265 * pools. 4266 */ 4267 void show_workqueue_state(void) 4268 { 4269 struct workqueue_struct *wq; 4270 struct worker_pool *pool; 4271 unsigned long flags; 4272 int pi; 4273 4274 rcu_read_lock_sched(); 4275 4276 pr_info("Showing busy workqueues and worker pools:\n"); 4277 4278 list_for_each_entry_rcu(wq, &workqueues, list) { 4279 struct pool_workqueue *pwq; 4280 bool idle = true; 4281 4282 for_each_pwq(pwq, wq) { 4283 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { 4284 idle = false; 4285 break; 4286 } 4287 } 4288 if (idle) 4289 continue; 4290 4291 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 4292 4293 for_each_pwq(pwq, wq) { 4294 spin_lock_irqsave(&pwq->pool->lock, flags); 4295 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) 4296 show_pwq(pwq); 4297 spin_unlock_irqrestore(&pwq->pool->lock, flags); 4298 } 4299 } 4300 4301 for_each_pool(pool, pi) { 4302 struct worker *worker; 4303 bool first = true; 4304 4305 spin_lock_irqsave(&pool->lock, flags); 4306 if (pool->nr_workers == pool->nr_idle) 4307 goto next_pool; 4308 4309 pr_info("pool %d:", pool->id); 4310 pr_cont_pool_info(pool); 4311 pr_cont(" workers=%d", pool->nr_workers); 4312 if (pool->manager) 4313 pr_cont(" manager: %d", 4314 task_pid_nr(pool->manager->task)); 4315 list_for_each_entry(worker, &pool->idle_list, entry) { 4316 pr_cont(" %s%d", first ? "idle: " : "", 4317 task_pid_nr(worker->task)); 4318 first = false; 4319 } 4320 pr_cont("\n"); 4321 next_pool: 4322 spin_unlock_irqrestore(&pool->lock, flags); 4323 } 4324 4325 rcu_read_unlock_sched(); 4326 } 4327 4328 /* 4329 * CPU hotplug. 4330 * 4331 * There are two challenges in supporting CPU hotplug. Firstly, there 4332 * are a lot of assumptions on strong associations among work, pwq and 4333 * pool which make migrating pending and scheduled works very 4334 * difficult to implement without impacting hot paths. Secondly, 4335 * worker pools serve mix of short, long and very long running works making 4336 * blocked draining impractical. 4337 * 4338 * This is solved by allowing the pools to be disassociated from the CPU 4339 * running as an unbound one and allowing it to be reattached later if the 4340 * cpu comes back online. 4341 */ 4342 4343 static void wq_unbind_fn(struct work_struct *work) 4344 { 4345 int cpu = smp_processor_id(); 4346 struct worker_pool *pool; 4347 struct worker *worker; 4348 4349 for_each_cpu_worker_pool(pool, cpu) { 4350 mutex_lock(&pool->attach_mutex); 4351 spin_lock_irq(&pool->lock); 4352 4353 /* 4354 * We've blocked all attach/detach operations. Make all workers 4355 * unbound and set DISASSOCIATED. Before this, all workers 4356 * except for the ones which are still executing works from 4357 * before the last CPU down must be on the cpu. After 4358 * this, they may become diasporas. 4359 */ 4360 for_each_pool_worker(worker, pool) 4361 worker->flags |= WORKER_UNBOUND; 4362 4363 pool->flags |= POOL_DISASSOCIATED; 4364 4365 spin_unlock_irq(&pool->lock); 4366 mutex_unlock(&pool->attach_mutex); 4367 4368 /* 4369 * Call schedule() so that we cross rq->lock and thus can 4370 * guarantee sched callbacks see the %WORKER_UNBOUND flag. 4371 * This is necessary as scheduler callbacks may be invoked 4372 * from other cpus. 4373 */ 4374 schedule(); 4375 4376 /* 4377 * Sched callbacks are disabled now. Zap nr_running. 4378 * After this, nr_running stays zero and need_more_worker() 4379 * and keep_working() are always true as long as the 4380 * worklist is not empty. This pool now behaves as an 4381 * unbound (in terms of concurrency management) pool which 4382 * are served by workers tied to the pool. 4383 */ 4384 atomic_set(&pool->nr_running, 0); 4385 4386 /* 4387 * With concurrency management just turned off, a busy 4388 * worker blocking could lead to lengthy stalls. Kick off 4389 * unbound chain execution of currently pending work items. 4390 */ 4391 spin_lock_irq(&pool->lock); 4392 wake_up_worker(pool); 4393 spin_unlock_irq(&pool->lock); 4394 } 4395 } 4396 4397 /** 4398 * rebind_workers - rebind all workers of a pool to the associated CPU 4399 * @pool: pool of interest 4400 * 4401 * @pool->cpu is coming online. Rebind all workers to the CPU. 4402 */ 4403 static void rebind_workers(struct worker_pool *pool) 4404 { 4405 struct worker *worker; 4406 4407 lockdep_assert_held(&pool->attach_mutex); 4408 4409 /* 4410 * Restore CPU affinity of all workers. As all idle workers should 4411 * be on the run-queue of the associated CPU before any local 4412 * wake-ups for concurrency management happen, restore CPU affinity 4413 * of all workers first and then clear UNBOUND. As we're called 4414 * from CPU_ONLINE, the following shouldn't fail. 4415 */ 4416 for_each_pool_worker(worker, pool) 4417 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 4418 pool->attrs->cpumask) < 0); 4419 4420 spin_lock_irq(&pool->lock); 4421 pool->flags &= ~POOL_DISASSOCIATED; 4422 4423 for_each_pool_worker(worker, pool) { 4424 unsigned int worker_flags = worker->flags; 4425 4426 /* 4427 * A bound idle worker should actually be on the runqueue 4428 * of the associated CPU for local wake-ups targeting it to 4429 * work. Kick all idle workers so that they migrate to the 4430 * associated CPU. Doing this in the same loop as 4431 * replacing UNBOUND with REBOUND is safe as no worker will 4432 * be bound before @pool->lock is released. 4433 */ 4434 if (worker_flags & WORKER_IDLE) 4435 wake_up_process(worker->task); 4436 4437 /* 4438 * We want to clear UNBOUND but can't directly call 4439 * worker_clr_flags() or adjust nr_running. Atomically 4440 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 4441 * @worker will clear REBOUND using worker_clr_flags() when 4442 * it initiates the next execution cycle thus restoring 4443 * concurrency management. Note that when or whether 4444 * @worker clears REBOUND doesn't affect correctness. 4445 * 4446 * ACCESS_ONCE() is necessary because @worker->flags may be 4447 * tested without holding any lock in 4448 * wq_worker_waking_up(). Without it, NOT_RUNNING test may 4449 * fail incorrectly leading to premature concurrency 4450 * management operations. 4451 */ 4452 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 4453 worker_flags |= WORKER_REBOUND; 4454 worker_flags &= ~WORKER_UNBOUND; 4455 ACCESS_ONCE(worker->flags) = worker_flags; 4456 } 4457 4458 spin_unlock_irq(&pool->lock); 4459 } 4460 4461 /** 4462 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 4463 * @pool: unbound pool of interest 4464 * @cpu: the CPU which is coming up 4465 * 4466 * An unbound pool may end up with a cpumask which doesn't have any online 4467 * CPUs. When a worker of such pool get scheduled, the scheduler resets 4468 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 4469 * online CPU before, cpus_allowed of all its workers should be restored. 4470 */ 4471 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 4472 { 4473 static cpumask_t cpumask; 4474 struct worker *worker; 4475 4476 lockdep_assert_held(&pool->attach_mutex); 4477 4478 /* is @cpu allowed for @pool? */ 4479 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 4480 return; 4481 4482 /* is @cpu the only online CPU? */ 4483 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 4484 if (cpumask_weight(&cpumask) != 1) 4485 return; 4486 4487 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 4488 for_each_pool_worker(worker, pool) 4489 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 4490 pool->attrs->cpumask) < 0); 4491 } 4492 4493 /* 4494 * Workqueues should be brought up before normal priority CPU notifiers. 4495 * This will be registered high priority CPU notifier. 4496 */ 4497 static int workqueue_cpu_up_callback(struct notifier_block *nfb, 4498 unsigned long action, 4499 void *hcpu) 4500 { 4501 int cpu = (unsigned long)hcpu; 4502 struct worker_pool *pool; 4503 struct workqueue_struct *wq; 4504 int pi; 4505 4506 switch (action & ~CPU_TASKS_FROZEN) { 4507 case CPU_UP_PREPARE: 4508 for_each_cpu_worker_pool(pool, cpu) { 4509 if (pool->nr_workers) 4510 continue; 4511 if (!create_worker(pool)) 4512 return NOTIFY_BAD; 4513 } 4514 break; 4515 4516 case CPU_DOWN_FAILED: 4517 case CPU_ONLINE: 4518 mutex_lock(&wq_pool_mutex); 4519 4520 for_each_pool(pool, pi) { 4521 mutex_lock(&pool->attach_mutex); 4522 4523 if (pool->cpu == cpu) 4524 rebind_workers(pool); 4525 else if (pool->cpu < 0) 4526 restore_unbound_workers_cpumask(pool, cpu); 4527 4528 mutex_unlock(&pool->attach_mutex); 4529 } 4530 4531 /* update NUMA affinity of unbound workqueues */ 4532 list_for_each_entry(wq, &workqueues, list) 4533 wq_update_unbound_numa(wq, cpu, true); 4534 4535 mutex_unlock(&wq_pool_mutex); 4536 break; 4537 } 4538 return NOTIFY_OK; 4539 } 4540 4541 /* 4542 * Workqueues should be brought down after normal priority CPU notifiers. 4543 * This will be registered as low priority CPU notifier. 4544 */ 4545 static int workqueue_cpu_down_callback(struct notifier_block *nfb, 4546 unsigned long action, 4547 void *hcpu) 4548 { 4549 int cpu = (unsigned long)hcpu; 4550 struct work_struct unbind_work; 4551 struct workqueue_struct *wq; 4552 4553 switch (action & ~CPU_TASKS_FROZEN) { 4554 case CPU_DOWN_PREPARE: 4555 /* unbinding per-cpu workers should happen on the local CPU */ 4556 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4557 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4558 4559 /* update NUMA affinity of unbound workqueues */ 4560 mutex_lock(&wq_pool_mutex); 4561 list_for_each_entry(wq, &workqueues, list) 4562 wq_update_unbound_numa(wq, cpu, false); 4563 mutex_unlock(&wq_pool_mutex); 4564 4565 /* wait for per-cpu unbinding to finish */ 4566 flush_work(&unbind_work); 4567 destroy_work_on_stack(&unbind_work); 4568 break; 4569 } 4570 return NOTIFY_OK; 4571 } 4572 4573 #ifdef CONFIG_SMP 4574 4575 struct work_for_cpu { 4576 struct work_struct work; 4577 long (*fn)(void *); 4578 void *arg; 4579 long ret; 4580 }; 4581 4582 static void work_for_cpu_fn(struct work_struct *work) 4583 { 4584 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 4585 4586 wfc->ret = wfc->fn(wfc->arg); 4587 } 4588 4589 /** 4590 * work_on_cpu - run a function in user context on a particular cpu 4591 * @cpu: the cpu to run on 4592 * @fn: the function to run 4593 * @arg: the function arg 4594 * 4595 * It is up to the caller to ensure that the cpu doesn't go offline. 4596 * The caller must not hold any locks which would prevent @fn from completing. 4597 * 4598 * Return: The value @fn returns. 4599 */ 4600 long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 4601 { 4602 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 4603 4604 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4605 schedule_work_on(cpu, &wfc.work); 4606 flush_work(&wfc.work); 4607 destroy_work_on_stack(&wfc.work); 4608 return wfc.ret; 4609 } 4610 EXPORT_SYMBOL_GPL(work_on_cpu); 4611 #endif /* CONFIG_SMP */ 4612 4613 #ifdef CONFIG_FREEZER 4614 4615 /** 4616 * freeze_workqueues_begin - begin freezing workqueues 4617 * 4618 * Start freezing workqueues. After this function returns, all freezable 4619 * workqueues will queue new works to their delayed_works list instead of 4620 * pool->worklist. 4621 * 4622 * CONTEXT: 4623 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 4624 */ 4625 void freeze_workqueues_begin(void) 4626 { 4627 struct workqueue_struct *wq; 4628 struct pool_workqueue *pwq; 4629 4630 mutex_lock(&wq_pool_mutex); 4631 4632 WARN_ON_ONCE(workqueue_freezing); 4633 workqueue_freezing = true; 4634 4635 list_for_each_entry(wq, &workqueues, list) { 4636 mutex_lock(&wq->mutex); 4637 for_each_pwq(pwq, wq) 4638 pwq_adjust_max_active(pwq); 4639 mutex_unlock(&wq->mutex); 4640 } 4641 4642 mutex_unlock(&wq_pool_mutex); 4643 } 4644 4645 /** 4646 * freeze_workqueues_busy - are freezable workqueues still busy? 4647 * 4648 * Check whether freezing is complete. This function must be called 4649 * between freeze_workqueues_begin() and thaw_workqueues(). 4650 * 4651 * CONTEXT: 4652 * Grabs and releases wq_pool_mutex. 4653 * 4654 * Return: 4655 * %true if some freezable workqueues are still busy. %false if freezing 4656 * is complete. 4657 */ 4658 bool freeze_workqueues_busy(void) 4659 { 4660 bool busy = false; 4661 struct workqueue_struct *wq; 4662 struct pool_workqueue *pwq; 4663 4664 mutex_lock(&wq_pool_mutex); 4665 4666 WARN_ON_ONCE(!workqueue_freezing); 4667 4668 list_for_each_entry(wq, &workqueues, list) { 4669 if (!(wq->flags & WQ_FREEZABLE)) 4670 continue; 4671 /* 4672 * nr_active is monotonically decreasing. It's safe 4673 * to peek without lock. 4674 */ 4675 rcu_read_lock_sched(); 4676 for_each_pwq(pwq, wq) { 4677 WARN_ON_ONCE(pwq->nr_active < 0); 4678 if (pwq->nr_active) { 4679 busy = true; 4680 rcu_read_unlock_sched(); 4681 goto out_unlock; 4682 } 4683 } 4684 rcu_read_unlock_sched(); 4685 } 4686 out_unlock: 4687 mutex_unlock(&wq_pool_mutex); 4688 return busy; 4689 } 4690 4691 /** 4692 * thaw_workqueues - thaw workqueues 4693 * 4694 * Thaw workqueues. Normal queueing is restored and all collected 4695 * frozen works are transferred to their respective pool worklists. 4696 * 4697 * CONTEXT: 4698 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 4699 */ 4700 void thaw_workqueues(void) 4701 { 4702 struct workqueue_struct *wq; 4703 struct pool_workqueue *pwq; 4704 4705 mutex_lock(&wq_pool_mutex); 4706 4707 if (!workqueue_freezing) 4708 goto out_unlock; 4709 4710 workqueue_freezing = false; 4711 4712 /* restore max_active and repopulate worklist */ 4713 list_for_each_entry(wq, &workqueues, list) { 4714 mutex_lock(&wq->mutex); 4715 for_each_pwq(pwq, wq) 4716 pwq_adjust_max_active(pwq); 4717 mutex_unlock(&wq->mutex); 4718 } 4719 4720 out_unlock: 4721 mutex_unlock(&wq_pool_mutex); 4722 } 4723 #endif /* CONFIG_FREEZER */ 4724 4725 static int workqueue_apply_unbound_cpumask(void) 4726 { 4727 LIST_HEAD(ctxs); 4728 int ret = 0; 4729 struct workqueue_struct *wq; 4730 struct apply_wqattrs_ctx *ctx, *n; 4731 4732 lockdep_assert_held(&wq_pool_mutex); 4733 4734 list_for_each_entry(wq, &workqueues, list) { 4735 if (!(wq->flags & WQ_UNBOUND)) 4736 continue; 4737 /* creating multiple pwqs breaks ordering guarantee */ 4738 if (wq->flags & __WQ_ORDERED) 4739 continue; 4740 4741 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs); 4742 if (!ctx) { 4743 ret = -ENOMEM; 4744 break; 4745 } 4746 4747 list_add_tail(&ctx->list, &ctxs); 4748 } 4749 4750 list_for_each_entry_safe(ctx, n, &ctxs, list) { 4751 if (!ret) 4752 apply_wqattrs_commit(ctx); 4753 apply_wqattrs_cleanup(ctx); 4754 } 4755 4756 return ret; 4757 } 4758 4759 /** 4760 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 4761 * @cpumask: the cpumask to set 4762 * 4763 * The low-level workqueues cpumask is a global cpumask that limits 4764 * the affinity of all unbound workqueues. This function check the @cpumask 4765 * and apply it to all unbound workqueues and updates all pwqs of them. 4766 * 4767 * Retun: 0 - Success 4768 * -EINVAL - Invalid @cpumask 4769 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 4770 */ 4771 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 4772 { 4773 int ret = -EINVAL; 4774 cpumask_var_t saved_cpumask; 4775 4776 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) 4777 return -ENOMEM; 4778 4779 cpumask_and(cpumask, cpumask, cpu_possible_mask); 4780 if (!cpumask_empty(cpumask)) { 4781 apply_wqattrs_lock(); 4782 4783 /* save the old wq_unbound_cpumask. */ 4784 cpumask_copy(saved_cpumask, wq_unbound_cpumask); 4785 4786 /* update wq_unbound_cpumask at first and apply it to wqs. */ 4787 cpumask_copy(wq_unbound_cpumask, cpumask); 4788 ret = workqueue_apply_unbound_cpumask(); 4789 4790 /* restore the wq_unbound_cpumask when failed. */ 4791 if (ret < 0) 4792 cpumask_copy(wq_unbound_cpumask, saved_cpumask); 4793 4794 apply_wqattrs_unlock(); 4795 } 4796 4797 free_cpumask_var(saved_cpumask); 4798 return ret; 4799 } 4800 4801 #ifdef CONFIG_SYSFS 4802 /* 4803 * Workqueues with WQ_SYSFS flag set is visible to userland via 4804 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 4805 * following attributes. 4806 * 4807 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 4808 * max_active RW int : maximum number of in-flight work items 4809 * 4810 * Unbound workqueues have the following extra attributes. 4811 * 4812 * id RO int : the associated pool ID 4813 * nice RW int : nice value of the workers 4814 * cpumask RW mask : bitmask of allowed CPUs for the workers 4815 */ 4816 struct wq_device { 4817 struct workqueue_struct *wq; 4818 struct device dev; 4819 }; 4820 4821 static struct workqueue_struct *dev_to_wq(struct device *dev) 4822 { 4823 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 4824 4825 return wq_dev->wq; 4826 } 4827 4828 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 4829 char *buf) 4830 { 4831 struct workqueue_struct *wq = dev_to_wq(dev); 4832 4833 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 4834 } 4835 static DEVICE_ATTR_RO(per_cpu); 4836 4837 static ssize_t max_active_show(struct device *dev, 4838 struct device_attribute *attr, char *buf) 4839 { 4840 struct workqueue_struct *wq = dev_to_wq(dev); 4841 4842 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 4843 } 4844 4845 static ssize_t max_active_store(struct device *dev, 4846 struct device_attribute *attr, const char *buf, 4847 size_t count) 4848 { 4849 struct workqueue_struct *wq = dev_to_wq(dev); 4850 int val; 4851 4852 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 4853 return -EINVAL; 4854 4855 workqueue_set_max_active(wq, val); 4856 return count; 4857 } 4858 static DEVICE_ATTR_RW(max_active); 4859 4860 static struct attribute *wq_sysfs_attrs[] = { 4861 &dev_attr_per_cpu.attr, 4862 &dev_attr_max_active.attr, 4863 NULL, 4864 }; 4865 ATTRIBUTE_GROUPS(wq_sysfs); 4866 4867 static ssize_t wq_pool_ids_show(struct device *dev, 4868 struct device_attribute *attr, char *buf) 4869 { 4870 struct workqueue_struct *wq = dev_to_wq(dev); 4871 const char *delim = ""; 4872 int node, written = 0; 4873 4874 rcu_read_lock_sched(); 4875 for_each_node(node) { 4876 written += scnprintf(buf + written, PAGE_SIZE - written, 4877 "%s%d:%d", delim, node, 4878 unbound_pwq_by_node(wq, node)->pool->id); 4879 delim = " "; 4880 } 4881 written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); 4882 rcu_read_unlock_sched(); 4883 4884 return written; 4885 } 4886 4887 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 4888 char *buf) 4889 { 4890 struct workqueue_struct *wq = dev_to_wq(dev); 4891 int written; 4892 4893 mutex_lock(&wq->mutex); 4894 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 4895 mutex_unlock(&wq->mutex); 4896 4897 return written; 4898 } 4899 4900 /* prepare workqueue_attrs for sysfs store operations */ 4901 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 4902 { 4903 struct workqueue_attrs *attrs; 4904 4905 lockdep_assert_held(&wq_pool_mutex); 4906 4907 attrs = alloc_workqueue_attrs(GFP_KERNEL); 4908 if (!attrs) 4909 return NULL; 4910 4911 copy_workqueue_attrs(attrs, wq->unbound_attrs); 4912 return attrs; 4913 } 4914 4915 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 4916 const char *buf, size_t count) 4917 { 4918 struct workqueue_struct *wq = dev_to_wq(dev); 4919 struct workqueue_attrs *attrs; 4920 int ret = -ENOMEM; 4921 4922 apply_wqattrs_lock(); 4923 4924 attrs = wq_sysfs_prep_attrs(wq); 4925 if (!attrs) 4926 goto out_unlock; 4927 4928 if (sscanf(buf, "%d", &attrs->nice) == 1 && 4929 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 4930 ret = apply_workqueue_attrs_locked(wq, attrs); 4931 else 4932 ret = -EINVAL; 4933 4934 out_unlock: 4935 apply_wqattrs_unlock(); 4936 free_workqueue_attrs(attrs); 4937 return ret ?: count; 4938 } 4939 4940 static ssize_t wq_cpumask_show(struct device *dev, 4941 struct device_attribute *attr, char *buf) 4942 { 4943 struct workqueue_struct *wq = dev_to_wq(dev); 4944 int written; 4945 4946 mutex_lock(&wq->mutex); 4947 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 4948 cpumask_pr_args(wq->unbound_attrs->cpumask)); 4949 mutex_unlock(&wq->mutex); 4950 return written; 4951 } 4952 4953 static ssize_t wq_cpumask_store(struct device *dev, 4954 struct device_attribute *attr, 4955 const char *buf, size_t count) 4956 { 4957 struct workqueue_struct *wq = dev_to_wq(dev); 4958 struct workqueue_attrs *attrs; 4959 int ret = -ENOMEM; 4960 4961 apply_wqattrs_lock(); 4962 4963 attrs = wq_sysfs_prep_attrs(wq); 4964 if (!attrs) 4965 goto out_unlock; 4966 4967 ret = cpumask_parse(buf, attrs->cpumask); 4968 if (!ret) 4969 ret = apply_workqueue_attrs_locked(wq, attrs); 4970 4971 out_unlock: 4972 apply_wqattrs_unlock(); 4973 free_workqueue_attrs(attrs); 4974 return ret ?: count; 4975 } 4976 4977 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr, 4978 char *buf) 4979 { 4980 struct workqueue_struct *wq = dev_to_wq(dev); 4981 int written; 4982 4983 mutex_lock(&wq->mutex); 4984 written = scnprintf(buf, PAGE_SIZE, "%d\n", 4985 !wq->unbound_attrs->no_numa); 4986 mutex_unlock(&wq->mutex); 4987 4988 return written; 4989 } 4990 4991 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr, 4992 const char *buf, size_t count) 4993 { 4994 struct workqueue_struct *wq = dev_to_wq(dev); 4995 struct workqueue_attrs *attrs; 4996 int v, ret = -ENOMEM; 4997 4998 apply_wqattrs_lock(); 4999 5000 attrs = wq_sysfs_prep_attrs(wq); 5001 if (!attrs) 5002 goto out_unlock; 5003 5004 ret = -EINVAL; 5005 if (sscanf(buf, "%d", &v) == 1) { 5006 attrs->no_numa = !v; 5007 ret = apply_workqueue_attrs_locked(wq, attrs); 5008 } 5009 5010 out_unlock: 5011 apply_wqattrs_unlock(); 5012 free_workqueue_attrs(attrs); 5013 return ret ?: count; 5014 } 5015 5016 static struct device_attribute wq_sysfs_unbound_attrs[] = { 5017 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL), 5018 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 5019 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 5020 __ATTR(numa, 0644, wq_numa_show, wq_numa_store), 5021 __ATTR_NULL, 5022 }; 5023 5024 static struct bus_type wq_subsys = { 5025 .name = "workqueue", 5026 .dev_groups = wq_sysfs_groups, 5027 }; 5028 5029 static ssize_t wq_unbound_cpumask_show(struct device *dev, 5030 struct device_attribute *attr, char *buf) 5031 { 5032 int written; 5033 5034 mutex_lock(&wq_pool_mutex); 5035 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 5036 cpumask_pr_args(wq_unbound_cpumask)); 5037 mutex_unlock(&wq_pool_mutex); 5038 5039 return written; 5040 } 5041 5042 static ssize_t wq_unbound_cpumask_store(struct device *dev, 5043 struct device_attribute *attr, const char *buf, size_t count) 5044 { 5045 cpumask_var_t cpumask; 5046 int ret; 5047 5048 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 5049 return -ENOMEM; 5050 5051 ret = cpumask_parse(buf, cpumask); 5052 if (!ret) 5053 ret = workqueue_set_unbound_cpumask(cpumask); 5054 5055 free_cpumask_var(cpumask); 5056 return ret ? ret : count; 5057 } 5058 5059 static struct device_attribute wq_sysfs_cpumask_attr = 5060 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 5061 wq_unbound_cpumask_store); 5062 5063 static int __init wq_sysfs_init(void) 5064 { 5065 int err; 5066 5067 err = subsys_virtual_register(&wq_subsys, NULL); 5068 if (err) 5069 return err; 5070 5071 return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr); 5072 } 5073 core_initcall(wq_sysfs_init); 5074 5075 static void wq_device_release(struct device *dev) 5076 { 5077 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 5078 5079 kfree(wq_dev); 5080 } 5081 5082 /** 5083 * workqueue_sysfs_register - make a workqueue visible in sysfs 5084 * @wq: the workqueue to register 5085 * 5086 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 5087 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 5088 * which is the preferred method. 5089 * 5090 * Workqueue user should use this function directly iff it wants to apply 5091 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 5092 * apply_workqueue_attrs() may race against userland updating the 5093 * attributes. 5094 * 5095 * Return: 0 on success, -errno on failure. 5096 */ 5097 int workqueue_sysfs_register(struct workqueue_struct *wq) 5098 { 5099 struct wq_device *wq_dev; 5100 int ret; 5101 5102 /* 5103 * Adjusting max_active or creating new pwqs by applying 5104 * attributes breaks ordering guarantee. Disallow exposing ordered 5105 * workqueues. 5106 */ 5107 if (WARN_ON(wq->flags & __WQ_ORDERED)) 5108 return -EINVAL; 5109 5110 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 5111 if (!wq_dev) 5112 return -ENOMEM; 5113 5114 wq_dev->wq = wq; 5115 wq_dev->dev.bus = &wq_subsys; 5116 wq_dev->dev.init_name = wq->name; 5117 wq_dev->dev.release = wq_device_release; 5118 5119 /* 5120 * unbound_attrs are created separately. Suppress uevent until 5121 * everything is ready. 5122 */ 5123 dev_set_uevent_suppress(&wq_dev->dev, true); 5124 5125 ret = device_register(&wq_dev->dev); 5126 if (ret) { 5127 kfree(wq_dev); 5128 wq->wq_dev = NULL; 5129 return ret; 5130 } 5131 5132 if (wq->flags & WQ_UNBOUND) { 5133 struct device_attribute *attr; 5134 5135 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 5136 ret = device_create_file(&wq_dev->dev, attr); 5137 if (ret) { 5138 device_unregister(&wq_dev->dev); 5139 wq->wq_dev = NULL; 5140 return ret; 5141 } 5142 } 5143 } 5144 5145 dev_set_uevent_suppress(&wq_dev->dev, false); 5146 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 5147 return 0; 5148 } 5149 5150 /** 5151 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 5152 * @wq: the workqueue to unregister 5153 * 5154 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 5155 */ 5156 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 5157 { 5158 struct wq_device *wq_dev = wq->wq_dev; 5159 5160 if (!wq->wq_dev) 5161 return; 5162 5163 wq->wq_dev = NULL; 5164 device_unregister(&wq_dev->dev); 5165 } 5166 #else /* CONFIG_SYSFS */ 5167 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 5168 #endif /* CONFIG_SYSFS */ 5169 5170 static void __init wq_numa_init(void) 5171 { 5172 cpumask_var_t *tbl; 5173 int node, cpu; 5174 5175 if (num_possible_nodes() <= 1) 5176 return; 5177 5178 if (wq_disable_numa) { 5179 pr_info("workqueue: NUMA affinity support disabled\n"); 5180 return; 5181 } 5182 5183 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); 5184 BUG_ON(!wq_update_unbound_numa_attrs_buf); 5185 5186 /* 5187 * We want masks of possible CPUs of each node which isn't readily 5188 * available. Build one from cpu_to_node() which should have been 5189 * fully initialized by now. 5190 */ 5191 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL); 5192 BUG_ON(!tbl); 5193 5194 for_each_node(node) 5195 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, 5196 node_online(node) ? node : NUMA_NO_NODE)); 5197 5198 for_each_possible_cpu(cpu) { 5199 node = cpu_to_node(cpu); 5200 if (WARN_ON(node == NUMA_NO_NODE)) { 5201 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu); 5202 /* happens iff arch is bonkers, let's just proceed */ 5203 return; 5204 } 5205 cpumask_set_cpu(cpu, tbl[node]); 5206 } 5207 5208 wq_numa_possible_cpumask = tbl; 5209 wq_numa_enabled = true; 5210 } 5211 5212 static int __init init_workqueues(void) 5213 { 5214 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 5215 int i, cpu; 5216 5217 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 5218 5219 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 5220 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 5221 5222 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5223 5224 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 5225 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 5226 5227 wq_numa_init(); 5228 5229 /* initialize CPU pools */ 5230 for_each_possible_cpu(cpu) { 5231 struct worker_pool *pool; 5232 5233 i = 0; 5234 for_each_cpu_worker_pool(pool, cpu) { 5235 BUG_ON(init_worker_pool(pool)); 5236 pool->cpu = cpu; 5237 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 5238 pool->attrs->nice = std_nice[i++]; 5239 pool->node = cpu_to_node(cpu); 5240 5241 /* alloc pool ID */ 5242 mutex_lock(&wq_pool_mutex); 5243 BUG_ON(worker_pool_assign_id(pool)); 5244 mutex_unlock(&wq_pool_mutex); 5245 } 5246 } 5247 5248 /* create the initial worker */ 5249 for_each_online_cpu(cpu) { 5250 struct worker_pool *pool; 5251 5252 for_each_cpu_worker_pool(pool, cpu) { 5253 pool->flags &= ~POOL_DISASSOCIATED; 5254 BUG_ON(!create_worker(pool)); 5255 } 5256 } 5257 5258 /* create default unbound and ordered wq attrs */ 5259 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 5260 struct workqueue_attrs *attrs; 5261 5262 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5263 attrs->nice = std_nice[i]; 5264 unbound_std_wq_attrs[i] = attrs; 5265 5266 /* 5267 * An ordered wq should have only one pwq as ordering is 5268 * guaranteed by max_active which is enforced by pwqs. 5269 * Turn off NUMA so that dfl_pwq is used for all nodes. 5270 */ 5271 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 5272 attrs->nice = std_nice[i]; 5273 attrs->no_numa = true; 5274 ordered_wq_attrs[i] = attrs; 5275 } 5276 5277 system_wq = alloc_workqueue("events", 0, 0); 5278 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 5279 system_long_wq = alloc_workqueue("events_long", 0, 0); 5280 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 5281 WQ_UNBOUND_MAX_ACTIVE); 5282 system_freezable_wq = alloc_workqueue("events_freezable", 5283 WQ_FREEZABLE, 0); 5284 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 5285 WQ_POWER_EFFICIENT, 0); 5286 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 5287 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 5288 0); 5289 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 5290 !system_unbound_wq || !system_freezable_wq || 5291 !system_power_efficient_wq || 5292 !system_freezable_power_efficient_wq); 5293 return 0; 5294 } 5295 early_initcall(init_workqueues); 5296