1 /* 2 * kernel/workqueue.c - generic async execution with shared worker pool 3 * 4 * Copyright (C) 2002 Ingo Molnar 5 * 6 * Derived from the taskqueue/keventd code by: 7 * David Woodhouse <dwmw2@infradead.org> 8 * Andrew Morton 9 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 10 * Theodore Ts'o <tytso@mit.edu> 11 * 12 * Made to use alloc_percpu by Christoph Lameter. 13 * 14 * Copyright (C) 2010 SUSE Linux Products GmbH 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 16 * 17 * This is the generic async execution mechanism. Work items as are 18 * executed in process context. The worker pool is shared and 19 * automatically managed. There are two worker pools for each CPU (one for 20 * normal work items and the other for high priority ones) and some extra 21 * pools for workqueues which are not bound to any specific CPU - the 22 * number of these backing pools is dynamic. 23 * 24 * Please read Documentation/workqueue.txt for details. 25 */ 26 27 #include <linux/export.h> 28 #include <linux/kernel.h> 29 #include <linux/sched.h> 30 #include <linux/init.h> 31 #include <linux/signal.h> 32 #include <linux/completion.h> 33 #include <linux/workqueue.h> 34 #include <linux/slab.h> 35 #include <linux/cpu.h> 36 #include <linux/notifier.h> 37 #include <linux/kthread.h> 38 #include <linux/hardirq.h> 39 #include <linux/mempolicy.h> 40 #include <linux/freezer.h> 41 #include <linux/kallsyms.h> 42 #include <linux/debug_locks.h> 43 #include <linux/lockdep.h> 44 #include <linux/idr.h> 45 #include <linux/jhash.h> 46 #include <linux/hashtable.h> 47 #include <linux/rculist.h> 48 #include <linux/nodemask.h> 49 #include <linux/moduleparam.h> 50 #include <linux/uaccess.h> 51 52 #include "workqueue_internal.h" 53 54 enum { 55 /* 56 * worker_pool flags 57 * 58 * A bound pool is either associated or disassociated with its CPU. 59 * While associated (!DISASSOCIATED), all workers are bound to the 60 * CPU and none has %WORKER_UNBOUND set and concurrency management 61 * is in effect. 62 * 63 * While DISASSOCIATED, the cpu may be offline and all workers have 64 * %WORKER_UNBOUND set and concurrency management disabled, and may 65 * be executing on any CPU. The pool behaves as an unbound one. 66 * 67 * Note that DISASSOCIATED should be flipped only while holding 68 * attach_mutex to avoid changing binding state while 69 * worker_attach_to_pool() is in progress. 70 */ 71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 72 73 /* worker flags */ 74 WORKER_DIE = 1 << 1, /* die die die */ 75 WORKER_IDLE = 1 << 2, /* is idle */ 76 WORKER_PREP = 1 << 3, /* preparing to run works */ 77 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 78 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 79 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 80 81 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 82 WORKER_UNBOUND | WORKER_REBOUND, 83 84 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 85 86 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 87 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 88 89 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 90 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 91 92 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 93 /* call for help after 10ms 94 (min two ticks) */ 95 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 96 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 97 98 /* 99 * Rescue workers are used only on emergencies and shared by 100 * all cpus. Give MIN_NICE. 101 */ 102 RESCUER_NICE_LEVEL = MIN_NICE, 103 HIGHPRI_NICE_LEVEL = MIN_NICE, 104 105 WQ_NAME_LEN = 24, 106 }; 107 108 /* 109 * Structure fields follow one of the following exclusion rules. 110 * 111 * I: Modifiable by initialization/destruction paths and read-only for 112 * everyone else. 113 * 114 * P: Preemption protected. Disabling preemption is enough and should 115 * only be modified and accessed from the local cpu. 116 * 117 * L: pool->lock protected. Access with pool->lock held. 118 * 119 * X: During normal operation, modification requires pool->lock and should 120 * be done only from local cpu. Either disabling preemption on local 121 * cpu or grabbing pool->lock is enough for read access. If 122 * POOL_DISASSOCIATED is set, it's identical to L. 123 * 124 * A: pool->attach_mutex protected. 125 * 126 * PL: wq_pool_mutex protected. 127 * 128 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. 129 * 130 * WQ: wq->mutex protected. 131 * 132 * WR: wq->mutex protected for writes. Sched-RCU protected for reads. 133 * 134 * MD: wq_mayday_lock protected. 135 */ 136 137 /* struct worker is defined in workqueue_internal.h */ 138 139 struct worker_pool { 140 spinlock_t lock; /* the pool lock */ 141 int cpu; /* I: the associated cpu */ 142 int node; /* I: the associated node ID */ 143 int id; /* I: pool ID */ 144 unsigned int flags; /* X: flags */ 145 146 struct list_head worklist; /* L: list of pending works */ 147 int nr_workers; /* L: total number of workers */ 148 149 /* nr_idle includes the ones off idle_list for rebinding */ 150 int nr_idle; /* L: currently idle ones */ 151 152 struct list_head idle_list; /* X: list of idle workers */ 153 struct timer_list idle_timer; /* L: worker idle timeout */ 154 struct timer_list mayday_timer; /* L: SOS timer for workers */ 155 156 /* a workers is either on busy_hash or idle_list, or the manager */ 157 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 158 /* L: hash of busy workers */ 159 160 /* see manage_workers() for details on the two manager mutexes */ 161 struct mutex manager_arb; /* manager arbitration */ 162 struct mutex attach_mutex; /* attach/detach exclusion */ 163 struct list_head workers; /* A: attached workers */ 164 struct completion *detach_completion; /* all workers detached */ 165 166 struct ida worker_ida; /* worker IDs for task name */ 167 168 struct workqueue_attrs *attrs; /* I: worker attributes */ 169 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 170 int refcnt; /* PL: refcnt for unbound pools */ 171 172 /* 173 * The current concurrency level. As it's likely to be accessed 174 * from other CPUs during try_to_wake_up(), put it in a separate 175 * cacheline. 176 */ 177 atomic_t nr_running ____cacheline_aligned_in_smp; 178 179 /* 180 * Destruction of pool is sched-RCU protected to allow dereferences 181 * from get_work_pool(). 182 */ 183 struct rcu_head rcu; 184 } ____cacheline_aligned_in_smp; 185 186 /* 187 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 188 * of work_struct->data are used for flags and the remaining high bits 189 * point to the pwq; thus, pwqs need to be aligned at two's power of the 190 * number of flag bits. 191 */ 192 struct pool_workqueue { 193 struct worker_pool *pool; /* I: the associated pool */ 194 struct workqueue_struct *wq; /* I: the owning workqueue */ 195 int work_color; /* L: current color */ 196 int flush_color; /* L: flushing color */ 197 int refcnt; /* L: reference count */ 198 int nr_in_flight[WORK_NR_COLORS]; 199 /* L: nr of in_flight works */ 200 int nr_active; /* L: nr of active works */ 201 int max_active; /* L: max active works */ 202 struct list_head delayed_works; /* L: delayed works */ 203 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 204 struct list_head mayday_node; /* MD: node on wq->maydays */ 205 206 /* 207 * Release of unbound pwq is punted to system_wq. See put_pwq() 208 * and pwq_unbound_release_workfn() for details. pool_workqueue 209 * itself is also sched-RCU protected so that the first pwq can be 210 * determined without grabbing wq->mutex. 211 */ 212 struct work_struct unbound_release_work; 213 struct rcu_head rcu; 214 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 215 216 /* 217 * Structure used to wait for workqueue flush. 218 */ 219 struct wq_flusher { 220 struct list_head list; /* WQ: list of flushers */ 221 int flush_color; /* WQ: flush color waiting for */ 222 struct completion done; /* flush completion */ 223 }; 224 225 struct wq_device; 226 227 /* 228 * The externally visible workqueue. It relays the issued work items to 229 * the appropriate worker_pool through its pool_workqueues. 230 */ 231 struct workqueue_struct { 232 struct list_head pwqs; /* WR: all pwqs of this wq */ 233 struct list_head list; /* PL: list of all workqueues */ 234 235 struct mutex mutex; /* protects this wq */ 236 int work_color; /* WQ: current work color */ 237 int flush_color; /* WQ: current flush color */ 238 atomic_t nr_pwqs_to_flush; /* flush in progress */ 239 struct wq_flusher *first_flusher; /* WQ: first flusher */ 240 struct list_head flusher_queue; /* WQ: flush waiters */ 241 struct list_head flusher_overflow; /* WQ: flush overflow list */ 242 243 struct list_head maydays; /* MD: pwqs requesting rescue */ 244 struct worker *rescuer; /* I: rescue worker */ 245 246 int nr_drainers; /* WQ: drain in progress */ 247 int saved_max_active; /* WQ: saved pwq max_active */ 248 249 struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */ 250 struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */ 251 252 #ifdef CONFIG_SYSFS 253 struct wq_device *wq_dev; /* I: for sysfs interface */ 254 #endif 255 #ifdef CONFIG_LOCKDEP 256 struct lockdep_map lockdep_map; 257 #endif 258 char name[WQ_NAME_LEN]; /* I: workqueue name */ 259 260 /* hot fields used during command issue, aligned to cacheline */ 261 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 262 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ 263 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */ 264 }; 265 266 static struct kmem_cache *pwq_cache; 267 268 static cpumask_var_t *wq_numa_possible_cpumask; 269 /* possible CPUs of each node */ 270 271 static bool wq_disable_numa; 272 module_param_named(disable_numa, wq_disable_numa, bool, 0444); 273 274 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 275 #ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT 276 static bool wq_power_efficient = true; 277 #else 278 static bool wq_power_efficient; 279 #endif 280 281 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 282 283 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ 284 285 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ 286 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; 287 288 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 289 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 290 291 static LIST_HEAD(workqueues); /* PL: list of all workqueues */ 292 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 293 294 /* the per-cpu worker pools */ 295 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 296 cpu_worker_pools); 297 298 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 299 300 /* PL: hash of all unbound pools keyed by pool->attrs */ 301 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 302 303 /* I: attributes used when instantiating standard unbound pools on demand */ 304 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 305 306 /* I: attributes used when instantiating ordered pools on demand */ 307 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 308 309 struct workqueue_struct *system_wq __read_mostly; 310 EXPORT_SYMBOL(system_wq); 311 struct workqueue_struct *system_highpri_wq __read_mostly; 312 EXPORT_SYMBOL_GPL(system_highpri_wq); 313 struct workqueue_struct *system_long_wq __read_mostly; 314 EXPORT_SYMBOL_GPL(system_long_wq); 315 struct workqueue_struct *system_unbound_wq __read_mostly; 316 EXPORT_SYMBOL_GPL(system_unbound_wq); 317 struct workqueue_struct *system_freezable_wq __read_mostly; 318 EXPORT_SYMBOL_GPL(system_freezable_wq); 319 struct workqueue_struct *system_power_efficient_wq __read_mostly; 320 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 321 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 322 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 323 324 static int worker_thread(void *__worker); 325 static void copy_workqueue_attrs(struct workqueue_attrs *to, 326 const struct workqueue_attrs *from); 327 328 #define CREATE_TRACE_POINTS 329 #include <trace/events/workqueue.h> 330 331 #define assert_rcu_or_pool_mutex() \ 332 rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 333 lockdep_is_held(&wq_pool_mutex), \ 334 "sched RCU or wq_pool_mutex should be held") 335 336 #define assert_rcu_or_wq_mutex(wq) \ 337 rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 338 lockdep_is_held(&wq->mutex), \ 339 "sched RCU or wq->mutex should be held") 340 341 #define for_each_cpu_worker_pool(pool, cpu) \ 342 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 343 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 344 (pool)++) 345 346 /** 347 * for_each_pool - iterate through all worker_pools in the system 348 * @pool: iteration cursor 349 * @pi: integer used for iteration 350 * 351 * This must be called either with wq_pool_mutex held or sched RCU read 352 * locked. If the pool needs to be used beyond the locking in effect, the 353 * caller is responsible for guaranteeing that the pool stays online. 354 * 355 * The if/else clause exists only for the lockdep assertion and can be 356 * ignored. 357 */ 358 #define for_each_pool(pool, pi) \ 359 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 360 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 361 else 362 363 /** 364 * for_each_pool_worker - iterate through all workers of a worker_pool 365 * @worker: iteration cursor 366 * @pool: worker_pool to iterate workers of 367 * 368 * This must be called with @pool->attach_mutex. 369 * 370 * The if/else clause exists only for the lockdep assertion and can be 371 * ignored. 372 */ 373 #define for_each_pool_worker(worker, pool) \ 374 list_for_each_entry((worker), &(pool)->workers, node) \ 375 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ 376 else 377 378 /** 379 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 380 * @pwq: iteration cursor 381 * @wq: the target workqueue 382 * 383 * This must be called either with wq->mutex held or sched RCU read locked. 384 * If the pwq needs to be used beyond the locking in effect, the caller is 385 * responsible for guaranteeing that the pwq stays online. 386 * 387 * The if/else clause exists only for the lockdep assertion and can be 388 * ignored. 389 */ 390 #define for_each_pwq(pwq, wq) \ 391 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 392 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ 393 else 394 395 #ifdef CONFIG_DEBUG_OBJECTS_WORK 396 397 static struct debug_obj_descr work_debug_descr; 398 399 static void *work_debug_hint(void *addr) 400 { 401 return ((struct work_struct *) addr)->func; 402 } 403 404 /* 405 * fixup_init is called when: 406 * - an active object is initialized 407 */ 408 static int work_fixup_init(void *addr, enum debug_obj_state state) 409 { 410 struct work_struct *work = addr; 411 412 switch (state) { 413 case ODEBUG_STATE_ACTIVE: 414 cancel_work_sync(work); 415 debug_object_init(work, &work_debug_descr); 416 return 1; 417 default: 418 return 0; 419 } 420 } 421 422 /* 423 * fixup_activate is called when: 424 * - an active object is activated 425 * - an unknown object is activated (might be a statically initialized object) 426 */ 427 static int work_fixup_activate(void *addr, enum debug_obj_state state) 428 { 429 struct work_struct *work = addr; 430 431 switch (state) { 432 433 case ODEBUG_STATE_NOTAVAILABLE: 434 /* 435 * This is not really a fixup. The work struct was 436 * statically initialized. We just make sure that it 437 * is tracked in the object tracker. 438 */ 439 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { 440 debug_object_init(work, &work_debug_descr); 441 debug_object_activate(work, &work_debug_descr); 442 return 0; 443 } 444 WARN_ON_ONCE(1); 445 return 0; 446 447 case ODEBUG_STATE_ACTIVE: 448 WARN_ON(1); 449 450 default: 451 return 0; 452 } 453 } 454 455 /* 456 * fixup_free is called when: 457 * - an active object is freed 458 */ 459 static int work_fixup_free(void *addr, enum debug_obj_state state) 460 { 461 struct work_struct *work = addr; 462 463 switch (state) { 464 case ODEBUG_STATE_ACTIVE: 465 cancel_work_sync(work); 466 debug_object_free(work, &work_debug_descr); 467 return 1; 468 default: 469 return 0; 470 } 471 } 472 473 static struct debug_obj_descr work_debug_descr = { 474 .name = "work_struct", 475 .debug_hint = work_debug_hint, 476 .fixup_init = work_fixup_init, 477 .fixup_activate = work_fixup_activate, 478 .fixup_free = work_fixup_free, 479 }; 480 481 static inline void debug_work_activate(struct work_struct *work) 482 { 483 debug_object_activate(work, &work_debug_descr); 484 } 485 486 static inline void debug_work_deactivate(struct work_struct *work) 487 { 488 debug_object_deactivate(work, &work_debug_descr); 489 } 490 491 void __init_work(struct work_struct *work, int onstack) 492 { 493 if (onstack) 494 debug_object_init_on_stack(work, &work_debug_descr); 495 else 496 debug_object_init(work, &work_debug_descr); 497 } 498 EXPORT_SYMBOL_GPL(__init_work); 499 500 void destroy_work_on_stack(struct work_struct *work) 501 { 502 debug_object_free(work, &work_debug_descr); 503 } 504 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 505 506 void destroy_delayed_work_on_stack(struct delayed_work *work) 507 { 508 destroy_timer_on_stack(&work->timer); 509 debug_object_free(&work->work, &work_debug_descr); 510 } 511 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 512 513 #else 514 static inline void debug_work_activate(struct work_struct *work) { } 515 static inline void debug_work_deactivate(struct work_struct *work) { } 516 #endif 517 518 /** 519 * worker_pool_assign_id - allocate ID and assing it to @pool 520 * @pool: the pool pointer of interest 521 * 522 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 523 * successfully, -errno on failure. 524 */ 525 static int worker_pool_assign_id(struct worker_pool *pool) 526 { 527 int ret; 528 529 lockdep_assert_held(&wq_pool_mutex); 530 531 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 532 GFP_KERNEL); 533 if (ret >= 0) { 534 pool->id = ret; 535 return 0; 536 } 537 return ret; 538 } 539 540 /** 541 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node 542 * @wq: the target workqueue 543 * @node: the node ID 544 * 545 * This must be called either with pwq_lock held or sched RCU read locked. 546 * If the pwq needs to be used beyond the locking in effect, the caller is 547 * responsible for guaranteeing that the pwq stays online. 548 * 549 * Return: The unbound pool_workqueue for @node. 550 */ 551 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, 552 int node) 553 { 554 assert_rcu_or_wq_mutex(wq); 555 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); 556 } 557 558 static unsigned int work_color_to_flags(int color) 559 { 560 return color << WORK_STRUCT_COLOR_SHIFT; 561 } 562 563 static int get_work_color(struct work_struct *work) 564 { 565 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 566 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 567 } 568 569 static int work_next_color(int color) 570 { 571 return (color + 1) % WORK_NR_COLORS; 572 } 573 574 /* 575 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 576 * contain the pointer to the queued pwq. Once execution starts, the flag 577 * is cleared and the high bits contain OFFQ flags and pool ID. 578 * 579 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 580 * and clear_work_data() can be used to set the pwq, pool or clear 581 * work->data. These functions should only be called while the work is 582 * owned - ie. while the PENDING bit is set. 583 * 584 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 585 * corresponding to a work. Pool is available once the work has been 586 * queued anywhere after initialization until it is sync canceled. pwq is 587 * available only while the work item is queued. 588 * 589 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 590 * canceled. While being canceled, a work item may have its PENDING set 591 * but stay off timer and worklist for arbitrarily long and nobody should 592 * try to steal the PENDING bit. 593 */ 594 static inline void set_work_data(struct work_struct *work, unsigned long data, 595 unsigned long flags) 596 { 597 WARN_ON_ONCE(!work_pending(work)); 598 atomic_long_set(&work->data, data | flags | work_static(work)); 599 } 600 601 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 602 unsigned long extra_flags) 603 { 604 set_work_data(work, (unsigned long)pwq, 605 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 606 } 607 608 static void set_work_pool_and_keep_pending(struct work_struct *work, 609 int pool_id) 610 { 611 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 612 WORK_STRUCT_PENDING); 613 } 614 615 static void set_work_pool_and_clear_pending(struct work_struct *work, 616 int pool_id) 617 { 618 /* 619 * The following wmb is paired with the implied mb in 620 * test_and_set_bit(PENDING) and ensures all updates to @work made 621 * here are visible to and precede any updates by the next PENDING 622 * owner. 623 */ 624 smp_wmb(); 625 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 626 } 627 628 static void clear_work_data(struct work_struct *work) 629 { 630 smp_wmb(); /* see set_work_pool_and_clear_pending() */ 631 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 632 } 633 634 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 635 { 636 unsigned long data = atomic_long_read(&work->data); 637 638 if (data & WORK_STRUCT_PWQ) 639 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 640 else 641 return NULL; 642 } 643 644 /** 645 * get_work_pool - return the worker_pool a given work was associated with 646 * @work: the work item of interest 647 * 648 * Pools are created and destroyed under wq_pool_mutex, and allows read 649 * access under sched-RCU read lock. As such, this function should be 650 * called under wq_pool_mutex or with preemption disabled. 651 * 652 * All fields of the returned pool are accessible as long as the above 653 * mentioned locking is in effect. If the returned pool needs to be used 654 * beyond the critical section, the caller is responsible for ensuring the 655 * returned pool is and stays online. 656 * 657 * Return: The worker_pool @work was last associated with. %NULL if none. 658 */ 659 static struct worker_pool *get_work_pool(struct work_struct *work) 660 { 661 unsigned long data = atomic_long_read(&work->data); 662 int pool_id; 663 664 assert_rcu_or_pool_mutex(); 665 666 if (data & WORK_STRUCT_PWQ) 667 return ((struct pool_workqueue *) 668 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 669 670 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 671 if (pool_id == WORK_OFFQ_POOL_NONE) 672 return NULL; 673 674 return idr_find(&worker_pool_idr, pool_id); 675 } 676 677 /** 678 * get_work_pool_id - return the worker pool ID a given work is associated with 679 * @work: the work item of interest 680 * 681 * Return: The worker_pool ID @work was last associated with. 682 * %WORK_OFFQ_POOL_NONE if none. 683 */ 684 static int get_work_pool_id(struct work_struct *work) 685 { 686 unsigned long data = atomic_long_read(&work->data); 687 688 if (data & WORK_STRUCT_PWQ) 689 return ((struct pool_workqueue *) 690 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 691 692 return data >> WORK_OFFQ_POOL_SHIFT; 693 } 694 695 static void mark_work_canceling(struct work_struct *work) 696 { 697 unsigned long pool_id = get_work_pool_id(work); 698 699 pool_id <<= WORK_OFFQ_POOL_SHIFT; 700 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 701 } 702 703 static bool work_is_canceling(struct work_struct *work) 704 { 705 unsigned long data = atomic_long_read(&work->data); 706 707 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 708 } 709 710 /* 711 * Policy functions. These define the policies on how the global worker 712 * pools are managed. Unless noted otherwise, these functions assume that 713 * they're being called with pool->lock held. 714 */ 715 716 static bool __need_more_worker(struct worker_pool *pool) 717 { 718 return !atomic_read(&pool->nr_running); 719 } 720 721 /* 722 * Need to wake up a worker? Called from anything but currently 723 * running workers. 724 * 725 * Note that, because unbound workers never contribute to nr_running, this 726 * function will always return %true for unbound pools as long as the 727 * worklist isn't empty. 728 */ 729 static bool need_more_worker(struct worker_pool *pool) 730 { 731 return !list_empty(&pool->worklist) && __need_more_worker(pool); 732 } 733 734 /* Can I start working? Called from busy but !running workers. */ 735 static bool may_start_working(struct worker_pool *pool) 736 { 737 return pool->nr_idle; 738 } 739 740 /* Do I need to keep working? Called from currently running workers. */ 741 static bool keep_working(struct worker_pool *pool) 742 { 743 return !list_empty(&pool->worklist) && 744 atomic_read(&pool->nr_running) <= 1; 745 } 746 747 /* Do we need a new worker? Called from manager. */ 748 static bool need_to_create_worker(struct worker_pool *pool) 749 { 750 return need_more_worker(pool) && !may_start_working(pool); 751 } 752 753 /* Do we have too many workers and should some go away? */ 754 static bool too_many_workers(struct worker_pool *pool) 755 { 756 bool managing = mutex_is_locked(&pool->manager_arb); 757 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 758 int nr_busy = pool->nr_workers - nr_idle; 759 760 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 761 } 762 763 /* 764 * Wake up functions. 765 */ 766 767 /* Return the first idle worker. Safe with preemption disabled */ 768 static struct worker *first_idle_worker(struct worker_pool *pool) 769 { 770 if (unlikely(list_empty(&pool->idle_list))) 771 return NULL; 772 773 return list_first_entry(&pool->idle_list, struct worker, entry); 774 } 775 776 /** 777 * wake_up_worker - wake up an idle worker 778 * @pool: worker pool to wake worker from 779 * 780 * Wake up the first idle worker of @pool. 781 * 782 * CONTEXT: 783 * spin_lock_irq(pool->lock). 784 */ 785 static void wake_up_worker(struct worker_pool *pool) 786 { 787 struct worker *worker = first_idle_worker(pool); 788 789 if (likely(worker)) 790 wake_up_process(worker->task); 791 } 792 793 /** 794 * wq_worker_waking_up - a worker is waking up 795 * @task: task waking up 796 * @cpu: CPU @task is waking up to 797 * 798 * This function is called during try_to_wake_up() when a worker is 799 * being awoken. 800 * 801 * CONTEXT: 802 * spin_lock_irq(rq->lock) 803 */ 804 void wq_worker_waking_up(struct task_struct *task, int cpu) 805 { 806 struct worker *worker = kthread_data(task); 807 808 if (!(worker->flags & WORKER_NOT_RUNNING)) { 809 WARN_ON_ONCE(worker->pool->cpu != cpu); 810 atomic_inc(&worker->pool->nr_running); 811 } 812 } 813 814 /** 815 * wq_worker_sleeping - a worker is going to sleep 816 * @task: task going to sleep 817 * @cpu: CPU in question, must be the current CPU number 818 * 819 * This function is called during schedule() when a busy worker is 820 * going to sleep. Worker on the same cpu can be woken up by 821 * returning pointer to its task. 822 * 823 * CONTEXT: 824 * spin_lock_irq(rq->lock) 825 * 826 * Return: 827 * Worker task on @cpu to wake up, %NULL if none. 828 */ 829 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) 830 { 831 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 832 struct worker_pool *pool; 833 834 /* 835 * Rescuers, which may not have all the fields set up like normal 836 * workers, also reach here, let's not access anything before 837 * checking NOT_RUNNING. 838 */ 839 if (worker->flags & WORKER_NOT_RUNNING) 840 return NULL; 841 842 pool = worker->pool; 843 844 /* this can only happen on the local cpu */ 845 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) 846 return NULL; 847 848 /* 849 * The counterpart of the following dec_and_test, implied mb, 850 * worklist not empty test sequence is in insert_work(). 851 * Please read comment there. 852 * 853 * NOT_RUNNING is clear. This means that we're bound to and 854 * running on the local cpu w/ rq lock held and preemption 855 * disabled, which in turn means that none else could be 856 * manipulating idle_list, so dereferencing idle_list without pool 857 * lock is safe. 858 */ 859 if (atomic_dec_and_test(&pool->nr_running) && 860 !list_empty(&pool->worklist)) 861 to_wakeup = first_idle_worker(pool); 862 return to_wakeup ? to_wakeup->task : NULL; 863 } 864 865 /** 866 * worker_set_flags - set worker flags and adjust nr_running accordingly 867 * @worker: self 868 * @flags: flags to set 869 * 870 * Set @flags in @worker->flags and adjust nr_running accordingly. 871 * 872 * CONTEXT: 873 * spin_lock_irq(pool->lock) 874 */ 875 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 876 { 877 struct worker_pool *pool = worker->pool; 878 879 WARN_ON_ONCE(worker->task != current); 880 881 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 882 if ((flags & WORKER_NOT_RUNNING) && 883 !(worker->flags & WORKER_NOT_RUNNING)) { 884 atomic_dec(&pool->nr_running); 885 } 886 887 worker->flags |= flags; 888 } 889 890 /** 891 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 892 * @worker: self 893 * @flags: flags to clear 894 * 895 * Clear @flags in @worker->flags and adjust nr_running accordingly. 896 * 897 * CONTEXT: 898 * spin_lock_irq(pool->lock) 899 */ 900 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 901 { 902 struct worker_pool *pool = worker->pool; 903 unsigned int oflags = worker->flags; 904 905 WARN_ON_ONCE(worker->task != current); 906 907 worker->flags &= ~flags; 908 909 /* 910 * If transitioning out of NOT_RUNNING, increment nr_running. Note 911 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 912 * of multiple flags, not a single flag. 913 */ 914 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 915 if (!(worker->flags & WORKER_NOT_RUNNING)) 916 atomic_inc(&pool->nr_running); 917 } 918 919 /** 920 * find_worker_executing_work - find worker which is executing a work 921 * @pool: pool of interest 922 * @work: work to find worker for 923 * 924 * Find a worker which is executing @work on @pool by searching 925 * @pool->busy_hash which is keyed by the address of @work. For a worker 926 * to match, its current execution should match the address of @work and 927 * its work function. This is to avoid unwanted dependency between 928 * unrelated work executions through a work item being recycled while still 929 * being executed. 930 * 931 * This is a bit tricky. A work item may be freed once its execution 932 * starts and nothing prevents the freed area from being recycled for 933 * another work item. If the same work item address ends up being reused 934 * before the original execution finishes, workqueue will identify the 935 * recycled work item as currently executing and make it wait until the 936 * current execution finishes, introducing an unwanted dependency. 937 * 938 * This function checks the work item address and work function to avoid 939 * false positives. Note that this isn't complete as one may construct a 940 * work function which can introduce dependency onto itself through a 941 * recycled work item. Well, if somebody wants to shoot oneself in the 942 * foot that badly, there's only so much we can do, and if such deadlock 943 * actually occurs, it should be easy to locate the culprit work function. 944 * 945 * CONTEXT: 946 * spin_lock_irq(pool->lock). 947 * 948 * Return: 949 * Pointer to worker which is executing @work if found, %NULL 950 * otherwise. 951 */ 952 static struct worker *find_worker_executing_work(struct worker_pool *pool, 953 struct work_struct *work) 954 { 955 struct worker *worker; 956 957 hash_for_each_possible(pool->busy_hash, worker, hentry, 958 (unsigned long)work) 959 if (worker->current_work == work && 960 worker->current_func == work->func) 961 return worker; 962 963 return NULL; 964 } 965 966 /** 967 * move_linked_works - move linked works to a list 968 * @work: start of series of works to be scheduled 969 * @head: target list to append @work to 970 * @nextp: out paramter for nested worklist walking 971 * 972 * Schedule linked works starting from @work to @head. Work series to 973 * be scheduled starts at @work and includes any consecutive work with 974 * WORK_STRUCT_LINKED set in its predecessor. 975 * 976 * If @nextp is not NULL, it's updated to point to the next work of 977 * the last scheduled work. This allows move_linked_works() to be 978 * nested inside outer list_for_each_entry_safe(). 979 * 980 * CONTEXT: 981 * spin_lock_irq(pool->lock). 982 */ 983 static void move_linked_works(struct work_struct *work, struct list_head *head, 984 struct work_struct **nextp) 985 { 986 struct work_struct *n; 987 988 /* 989 * Linked worklist will always end before the end of the list, 990 * use NULL for list head. 991 */ 992 list_for_each_entry_safe_from(work, n, NULL, entry) { 993 list_move_tail(&work->entry, head); 994 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 995 break; 996 } 997 998 /* 999 * If we're already inside safe list traversal and have moved 1000 * multiple works to the scheduled queue, the next position 1001 * needs to be updated. 1002 */ 1003 if (nextp) 1004 *nextp = n; 1005 } 1006 1007 /** 1008 * get_pwq - get an extra reference on the specified pool_workqueue 1009 * @pwq: pool_workqueue to get 1010 * 1011 * Obtain an extra reference on @pwq. The caller should guarantee that 1012 * @pwq has positive refcnt and be holding the matching pool->lock. 1013 */ 1014 static void get_pwq(struct pool_workqueue *pwq) 1015 { 1016 lockdep_assert_held(&pwq->pool->lock); 1017 WARN_ON_ONCE(pwq->refcnt <= 0); 1018 pwq->refcnt++; 1019 } 1020 1021 /** 1022 * put_pwq - put a pool_workqueue reference 1023 * @pwq: pool_workqueue to put 1024 * 1025 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1026 * destruction. The caller should be holding the matching pool->lock. 1027 */ 1028 static void put_pwq(struct pool_workqueue *pwq) 1029 { 1030 lockdep_assert_held(&pwq->pool->lock); 1031 if (likely(--pwq->refcnt)) 1032 return; 1033 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) 1034 return; 1035 /* 1036 * @pwq can't be released under pool->lock, bounce to 1037 * pwq_unbound_release_workfn(). This never recurses on the same 1038 * pool->lock as this path is taken only for unbound workqueues and 1039 * the release work item is scheduled on a per-cpu workqueue. To 1040 * avoid lockdep warning, unbound pool->locks are given lockdep 1041 * subclass of 1 in get_unbound_pool(). 1042 */ 1043 schedule_work(&pwq->unbound_release_work); 1044 } 1045 1046 /** 1047 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1048 * @pwq: pool_workqueue to put (can be %NULL) 1049 * 1050 * put_pwq() with locking. This function also allows %NULL @pwq. 1051 */ 1052 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1053 { 1054 if (pwq) { 1055 /* 1056 * As both pwqs and pools are sched-RCU protected, the 1057 * following lock operations are safe. 1058 */ 1059 spin_lock_irq(&pwq->pool->lock); 1060 put_pwq(pwq); 1061 spin_unlock_irq(&pwq->pool->lock); 1062 } 1063 } 1064 1065 static void pwq_activate_delayed_work(struct work_struct *work) 1066 { 1067 struct pool_workqueue *pwq = get_work_pwq(work); 1068 1069 trace_workqueue_activate_work(work); 1070 move_linked_works(work, &pwq->pool->worklist, NULL); 1071 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1072 pwq->nr_active++; 1073 } 1074 1075 static void pwq_activate_first_delayed(struct pool_workqueue *pwq) 1076 { 1077 struct work_struct *work = list_first_entry(&pwq->delayed_works, 1078 struct work_struct, entry); 1079 1080 pwq_activate_delayed_work(work); 1081 } 1082 1083 /** 1084 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1085 * @pwq: pwq of interest 1086 * @color: color of work which left the queue 1087 * 1088 * A work either has completed or is removed from pending queue, 1089 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1090 * 1091 * CONTEXT: 1092 * spin_lock_irq(pool->lock). 1093 */ 1094 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 1095 { 1096 /* uncolored work items don't participate in flushing or nr_active */ 1097 if (color == WORK_NO_COLOR) 1098 goto out_put; 1099 1100 pwq->nr_in_flight[color]--; 1101 1102 pwq->nr_active--; 1103 if (!list_empty(&pwq->delayed_works)) { 1104 /* one down, submit a delayed one */ 1105 if (pwq->nr_active < pwq->max_active) 1106 pwq_activate_first_delayed(pwq); 1107 } 1108 1109 /* is flush in progress and are we at the flushing tip? */ 1110 if (likely(pwq->flush_color != color)) 1111 goto out_put; 1112 1113 /* are there still in-flight works? */ 1114 if (pwq->nr_in_flight[color]) 1115 goto out_put; 1116 1117 /* this pwq is done, clear flush_color */ 1118 pwq->flush_color = -1; 1119 1120 /* 1121 * If this was the last pwq, wake up the first flusher. It 1122 * will handle the rest. 1123 */ 1124 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1125 complete(&pwq->wq->first_flusher->done); 1126 out_put: 1127 put_pwq(pwq); 1128 } 1129 1130 /** 1131 * try_to_grab_pending - steal work item from worklist and disable irq 1132 * @work: work item to steal 1133 * @is_dwork: @work is a delayed_work 1134 * @flags: place to store irq state 1135 * 1136 * Try to grab PENDING bit of @work. This function can handle @work in any 1137 * stable state - idle, on timer or on worklist. 1138 * 1139 * Return: 1140 * 1 if @work was pending and we successfully stole PENDING 1141 * 0 if @work was idle and we claimed PENDING 1142 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1143 * -ENOENT if someone else is canceling @work, this state may persist 1144 * for arbitrarily long 1145 * 1146 * Note: 1147 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1148 * interrupted while holding PENDING and @work off queue, irq must be 1149 * disabled on entry. This, combined with delayed_work->timer being 1150 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1151 * 1152 * On successful return, >= 0, irq is disabled and the caller is 1153 * responsible for releasing it using local_irq_restore(*@flags). 1154 * 1155 * This function is safe to call from any context including IRQ handler. 1156 */ 1157 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1158 unsigned long *flags) 1159 { 1160 struct worker_pool *pool; 1161 struct pool_workqueue *pwq; 1162 1163 local_irq_save(*flags); 1164 1165 /* try to steal the timer if it exists */ 1166 if (is_dwork) { 1167 struct delayed_work *dwork = to_delayed_work(work); 1168 1169 /* 1170 * dwork->timer is irqsafe. If del_timer() fails, it's 1171 * guaranteed that the timer is not queued anywhere and not 1172 * running on the local CPU. 1173 */ 1174 if (likely(del_timer(&dwork->timer))) 1175 return 1; 1176 } 1177 1178 /* try to claim PENDING the normal way */ 1179 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1180 return 0; 1181 1182 /* 1183 * The queueing is in progress, or it is already queued. Try to 1184 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1185 */ 1186 pool = get_work_pool(work); 1187 if (!pool) 1188 goto fail; 1189 1190 spin_lock(&pool->lock); 1191 /* 1192 * work->data is guaranteed to point to pwq only while the work 1193 * item is queued on pwq->wq, and both updating work->data to point 1194 * to pwq on queueing and to pool on dequeueing are done under 1195 * pwq->pool->lock. This in turn guarantees that, if work->data 1196 * points to pwq which is associated with a locked pool, the work 1197 * item is currently queued on that pool. 1198 */ 1199 pwq = get_work_pwq(work); 1200 if (pwq && pwq->pool == pool) { 1201 debug_work_deactivate(work); 1202 1203 /* 1204 * A delayed work item cannot be grabbed directly because 1205 * it might have linked NO_COLOR work items which, if left 1206 * on the delayed_list, will confuse pwq->nr_active 1207 * management later on and cause stall. Make sure the work 1208 * item is activated before grabbing. 1209 */ 1210 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1211 pwq_activate_delayed_work(work); 1212 1213 list_del_init(&work->entry); 1214 pwq_dec_nr_in_flight(pwq, get_work_color(work)); 1215 1216 /* work->data points to pwq iff queued, point to pool */ 1217 set_work_pool_and_keep_pending(work, pool->id); 1218 1219 spin_unlock(&pool->lock); 1220 return 1; 1221 } 1222 spin_unlock(&pool->lock); 1223 fail: 1224 local_irq_restore(*flags); 1225 if (work_is_canceling(work)) 1226 return -ENOENT; 1227 cpu_relax(); 1228 return -EAGAIN; 1229 } 1230 1231 /** 1232 * insert_work - insert a work into a pool 1233 * @pwq: pwq @work belongs to 1234 * @work: work to insert 1235 * @head: insertion point 1236 * @extra_flags: extra WORK_STRUCT_* flags to set 1237 * 1238 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1239 * work_struct flags. 1240 * 1241 * CONTEXT: 1242 * spin_lock_irq(pool->lock). 1243 */ 1244 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1245 struct list_head *head, unsigned int extra_flags) 1246 { 1247 struct worker_pool *pool = pwq->pool; 1248 1249 /* we own @work, set data and link */ 1250 set_work_pwq(work, pwq, extra_flags); 1251 list_add_tail(&work->entry, head); 1252 get_pwq(pwq); 1253 1254 /* 1255 * Ensure either wq_worker_sleeping() sees the above 1256 * list_add_tail() or we see zero nr_running to avoid workers lying 1257 * around lazily while there are works to be processed. 1258 */ 1259 smp_mb(); 1260 1261 if (__need_more_worker(pool)) 1262 wake_up_worker(pool); 1263 } 1264 1265 /* 1266 * Test whether @work is being queued from another work executing on the 1267 * same workqueue. 1268 */ 1269 static bool is_chained_work(struct workqueue_struct *wq) 1270 { 1271 struct worker *worker; 1272 1273 worker = current_wq_worker(); 1274 /* 1275 * Return %true iff I'm a worker execuing a work item on @wq. If 1276 * I'm @worker, it's safe to dereference it without locking. 1277 */ 1278 return worker && worker->current_pwq->wq == wq; 1279 } 1280 1281 static void __queue_work(int cpu, struct workqueue_struct *wq, 1282 struct work_struct *work) 1283 { 1284 struct pool_workqueue *pwq; 1285 struct worker_pool *last_pool; 1286 struct list_head *worklist; 1287 unsigned int work_flags; 1288 unsigned int req_cpu = cpu; 1289 1290 /* 1291 * While a work item is PENDING && off queue, a task trying to 1292 * steal the PENDING will busy-loop waiting for it to either get 1293 * queued or lose PENDING. Grabbing PENDING and queueing should 1294 * happen with IRQ disabled. 1295 */ 1296 WARN_ON_ONCE(!irqs_disabled()); 1297 1298 debug_work_activate(work); 1299 1300 /* if draining, only works from the same workqueue are allowed */ 1301 if (unlikely(wq->flags & __WQ_DRAINING) && 1302 WARN_ON_ONCE(!is_chained_work(wq))) 1303 return; 1304 retry: 1305 if (req_cpu == WORK_CPU_UNBOUND) 1306 cpu = raw_smp_processor_id(); 1307 1308 /* pwq which will be used unless @work is executing elsewhere */ 1309 if (!(wq->flags & WQ_UNBOUND)) 1310 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 1311 else 1312 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 1313 1314 /* 1315 * If @work was previously on a different pool, it might still be 1316 * running there, in which case the work needs to be queued on that 1317 * pool to guarantee non-reentrancy. 1318 */ 1319 last_pool = get_work_pool(work); 1320 if (last_pool && last_pool != pwq->pool) { 1321 struct worker *worker; 1322 1323 spin_lock(&last_pool->lock); 1324 1325 worker = find_worker_executing_work(last_pool, work); 1326 1327 if (worker && worker->current_pwq->wq == wq) { 1328 pwq = worker->current_pwq; 1329 } else { 1330 /* meh... not running there, queue here */ 1331 spin_unlock(&last_pool->lock); 1332 spin_lock(&pwq->pool->lock); 1333 } 1334 } else { 1335 spin_lock(&pwq->pool->lock); 1336 } 1337 1338 /* 1339 * pwq is determined and locked. For unbound pools, we could have 1340 * raced with pwq release and it could already be dead. If its 1341 * refcnt is zero, repeat pwq selection. Note that pwqs never die 1342 * without another pwq replacing it in the numa_pwq_tbl or while 1343 * work items are executing on it, so the retrying is guaranteed to 1344 * make forward-progress. 1345 */ 1346 if (unlikely(!pwq->refcnt)) { 1347 if (wq->flags & WQ_UNBOUND) { 1348 spin_unlock(&pwq->pool->lock); 1349 cpu_relax(); 1350 goto retry; 1351 } 1352 /* oops */ 1353 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 1354 wq->name, cpu); 1355 } 1356 1357 /* pwq determined, queue */ 1358 trace_workqueue_queue_work(req_cpu, pwq, work); 1359 1360 if (WARN_ON(!list_empty(&work->entry))) { 1361 spin_unlock(&pwq->pool->lock); 1362 return; 1363 } 1364 1365 pwq->nr_in_flight[pwq->work_color]++; 1366 work_flags = work_color_to_flags(pwq->work_color); 1367 1368 if (likely(pwq->nr_active < pwq->max_active)) { 1369 trace_workqueue_activate_work(work); 1370 pwq->nr_active++; 1371 worklist = &pwq->pool->worklist; 1372 } else { 1373 work_flags |= WORK_STRUCT_DELAYED; 1374 worklist = &pwq->delayed_works; 1375 } 1376 1377 insert_work(pwq, work, worklist, work_flags); 1378 1379 spin_unlock(&pwq->pool->lock); 1380 } 1381 1382 /** 1383 * queue_work_on - queue work on specific cpu 1384 * @cpu: CPU number to execute work on 1385 * @wq: workqueue to use 1386 * @work: work to queue 1387 * 1388 * We queue the work to a specific CPU, the caller must ensure it 1389 * can't go away. 1390 * 1391 * Return: %false if @work was already on a queue, %true otherwise. 1392 */ 1393 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1394 struct work_struct *work) 1395 { 1396 bool ret = false; 1397 unsigned long flags; 1398 1399 local_irq_save(flags); 1400 1401 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1402 __queue_work(cpu, wq, work); 1403 ret = true; 1404 } 1405 1406 local_irq_restore(flags); 1407 return ret; 1408 } 1409 EXPORT_SYMBOL(queue_work_on); 1410 1411 void delayed_work_timer_fn(unsigned long __data) 1412 { 1413 struct delayed_work *dwork = (struct delayed_work *)__data; 1414 1415 /* should have been called from irqsafe timer with irq already off */ 1416 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 1417 } 1418 EXPORT_SYMBOL(delayed_work_timer_fn); 1419 1420 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1421 struct delayed_work *dwork, unsigned long delay) 1422 { 1423 struct timer_list *timer = &dwork->timer; 1424 struct work_struct *work = &dwork->work; 1425 1426 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1427 timer->data != (unsigned long)dwork); 1428 WARN_ON_ONCE(timer_pending(timer)); 1429 WARN_ON_ONCE(!list_empty(&work->entry)); 1430 1431 /* 1432 * If @delay is 0, queue @dwork->work immediately. This is for 1433 * both optimization and correctness. The earliest @timer can 1434 * expire is on the closest next tick and delayed_work users depend 1435 * on that there's no such delay when @delay is 0. 1436 */ 1437 if (!delay) { 1438 __queue_work(cpu, wq, &dwork->work); 1439 return; 1440 } 1441 1442 timer_stats_timer_set_start_info(&dwork->timer); 1443 1444 dwork->wq = wq; 1445 dwork->cpu = cpu; 1446 timer->expires = jiffies + delay; 1447 1448 if (unlikely(cpu != WORK_CPU_UNBOUND)) 1449 add_timer_on(timer, cpu); 1450 else 1451 add_timer(timer); 1452 } 1453 1454 /** 1455 * queue_delayed_work_on - queue work on specific CPU after delay 1456 * @cpu: CPU number to execute work on 1457 * @wq: workqueue to use 1458 * @dwork: work to queue 1459 * @delay: number of jiffies to wait before queueing 1460 * 1461 * Return: %false if @work was already on a queue, %true otherwise. If 1462 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1463 * execution. 1464 */ 1465 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1466 struct delayed_work *dwork, unsigned long delay) 1467 { 1468 struct work_struct *work = &dwork->work; 1469 bool ret = false; 1470 unsigned long flags; 1471 1472 /* read the comment in __queue_work() */ 1473 local_irq_save(flags); 1474 1475 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1476 __queue_delayed_work(cpu, wq, dwork, delay); 1477 ret = true; 1478 } 1479 1480 local_irq_restore(flags); 1481 return ret; 1482 } 1483 EXPORT_SYMBOL(queue_delayed_work_on); 1484 1485 /** 1486 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1487 * @cpu: CPU number to execute work on 1488 * @wq: workqueue to use 1489 * @dwork: work to queue 1490 * @delay: number of jiffies to wait before queueing 1491 * 1492 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 1493 * modify @dwork's timer so that it expires after @delay. If @delay is 1494 * zero, @work is guaranteed to be scheduled immediately regardless of its 1495 * current state. 1496 * 1497 * Return: %false if @dwork was idle and queued, %true if @dwork was 1498 * pending and its timer was modified. 1499 * 1500 * This function is safe to call from any context including IRQ handler. 1501 * See try_to_grab_pending() for details. 1502 */ 1503 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 1504 struct delayed_work *dwork, unsigned long delay) 1505 { 1506 unsigned long flags; 1507 int ret; 1508 1509 do { 1510 ret = try_to_grab_pending(&dwork->work, true, &flags); 1511 } while (unlikely(ret == -EAGAIN)); 1512 1513 if (likely(ret >= 0)) { 1514 __queue_delayed_work(cpu, wq, dwork, delay); 1515 local_irq_restore(flags); 1516 } 1517 1518 /* -ENOENT from try_to_grab_pending() becomes %true */ 1519 return ret; 1520 } 1521 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1522 1523 /** 1524 * worker_enter_idle - enter idle state 1525 * @worker: worker which is entering idle state 1526 * 1527 * @worker is entering idle state. Update stats and idle timer if 1528 * necessary. 1529 * 1530 * LOCKING: 1531 * spin_lock_irq(pool->lock). 1532 */ 1533 static void worker_enter_idle(struct worker *worker) 1534 { 1535 struct worker_pool *pool = worker->pool; 1536 1537 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 1538 WARN_ON_ONCE(!list_empty(&worker->entry) && 1539 (worker->hentry.next || worker->hentry.pprev))) 1540 return; 1541 1542 /* can't use worker_set_flags(), also called from create_worker() */ 1543 worker->flags |= WORKER_IDLE; 1544 pool->nr_idle++; 1545 worker->last_active = jiffies; 1546 1547 /* idle_list is LIFO */ 1548 list_add(&worker->entry, &pool->idle_list); 1549 1550 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 1551 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1552 1553 /* 1554 * Sanity check nr_running. Because wq_unbind_fn() releases 1555 * pool->lock between setting %WORKER_UNBOUND and zapping 1556 * nr_running, the warning may trigger spuriously. Check iff 1557 * unbind is not in progress. 1558 */ 1559 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 1560 pool->nr_workers == pool->nr_idle && 1561 atomic_read(&pool->nr_running)); 1562 } 1563 1564 /** 1565 * worker_leave_idle - leave idle state 1566 * @worker: worker which is leaving idle state 1567 * 1568 * @worker is leaving idle state. Update stats. 1569 * 1570 * LOCKING: 1571 * spin_lock_irq(pool->lock). 1572 */ 1573 static void worker_leave_idle(struct worker *worker) 1574 { 1575 struct worker_pool *pool = worker->pool; 1576 1577 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 1578 return; 1579 worker_clr_flags(worker, WORKER_IDLE); 1580 pool->nr_idle--; 1581 list_del_init(&worker->entry); 1582 } 1583 1584 static struct worker *alloc_worker(int node) 1585 { 1586 struct worker *worker; 1587 1588 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 1589 if (worker) { 1590 INIT_LIST_HEAD(&worker->entry); 1591 INIT_LIST_HEAD(&worker->scheduled); 1592 INIT_LIST_HEAD(&worker->node); 1593 /* on creation a worker is in !idle && prep state */ 1594 worker->flags = WORKER_PREP; 1595 } 1596 return worker; 1597 } 1598 1599 /** 1600 * worker_attach_to_pool() - attach a worker to a pool 1601 * @worker: worker to be attached 1602 * @pool: the target pool 1603 * 1604 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 1605 * cpu-binding of @worker are kept coordinated with the pool across 1606 * cpu-[un]hotplugs. 1607 */ 1608 static void worker_attach_to_pool(struct worker *worker, 1609 struct worker_pool *pool) 1610 { 1611 mutex_lock(&pool->attach_mutex); 1612 1613 /* 1614 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1615 * online CPUs. It'll be re-applied when any of the CPUs come up. 1616 */ 1617 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 1618 1619 /* 1620 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains 1621 * stable across this function. See the comments above the 1622 * flag definition for details. 1623 */ 1624 if (pool->flags & POOL_DISASSOCIATED) 1625 worker->flags |= WORKER_UNBOUND; 1626 1627 list_add_tail(&worker->node, &pool->workers); 1628 1629 mutex_unlock(&pool->attach_mutex); 1630 } 1631 1632 /** 1633 * worker_detach_from_pool() - detach a worker from its pool 1634 * @worker: worker which is attached to its pool 1635 * @pool: the pool @worker is attached to 1636 * 1637 * Undo the attaching which had been done in worker_attach_to_pool(). The 1638 * caller worker shouldn't access to the pool after detached except it has 1639 * other reference to the pool. 1640 */ 1641 static void worker_detach_from_pool(struct worker *worker, 1642 struct worker_pool *pool) 1643 { 1644 struct completion *detach_completion = NULL; 1645 1646 mutex_lock(&pool->attach_mutex); 1647 list_del(&worker->node); 1648 if (list_empty(&pool->workers)) 1649 detach_completion = pool->detach_completion; 1650 mutex_unlock(&pool->attach_mutex); 1651 1652 /* clear leftover flags without pool->lock after it is detached */ 1653 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 1654 1655 if (detach_completion) 1656 complete(detach_completion); 1657 } 1658 1659 /** 1660 * create_worker - create a new workqueue worker 1661 * @pool: pool the new worker will belong to 1662 * 1663 * Create and start a new worker which is attached to @pool. 1664 * 1665 * CONTEXT: 1666 * Might sleep. Does GFP_KERNEL allocations. 1667 * 1668 * Return: 1669 * Pointer to the newly created worker. 1670 */ 1671 static struct worker *create_worker(struct worker_pool *pool) 1672 { 1673 struct worker *worker = NULL; 1674 int id = -1; 1675 char id_buf[16]; 1676 1677 /* ID is needed to determine kthread name */ 1678 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); 1679 if (id < 0) 1680 goto fail; 1681 1682 worker = alloc_worker(pool->node); 1683 if (!worker) 1684 goto fail; 1685 1686 worker->pool = pool; 1687 worker->id = id; 1688 1689 if (pool->cpu >= 0) 1690 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 1691 pool->attrs->nice < 0 ? "H" : ""); 1692 else 1693 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 1694 1695 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 1696 "kworker/%s", id_buf); 1697 if (IS_ERR(worker->task)) 1698 goto fail; 1699 1700 set_user_nice(worker->task, pool->attrs->nice); 1701 1702 /* prevent userland from meddling with cpumask of workqueue workers */ 1703 worker->task->flags |= PF_NO_SETAFFINITY; 1704 1705 /* successful, attach the worker to the pool */ 1706 worker_attach_to_pool(worker, pool); 1707 1708 /* start the newly created worker */ 1709 spin_lock_irq(&pool->lock); 1710 worker->pool->nr_workers++; 1711 worker_enter_idle(worker); 1712 wake_up_process(worker->task); 1713 spin_unlock_irq(&pool->lock); 1714 1715 return worker; 1716 1717 fail: 1718 if (id >= 0) 1719 ida_simple_remove(&pool->worker_ida, id); 1720 kfree(worker); 1721 return NULL; 1722 } 1723 1724 /** 1725 * destroy_worker - destroy a workqueue worker 1726 * @worker: worker to be destroyed 1727 * 1728 * Destroy @worker and adjust @pool stats accordingly. The worker should 1729 * be idle. 1730 * 1731 * CONTEXT: 1732 * spin_lock_irq(pool->lock). 1733 */ 1734 static void destroy_worker(struct worker *worker) 1735 { 1736 struct worker_pool *pool = worker->pool; 1737 1738 lockdep_assert_held(&pool->lock); 1739 1740 /* sanity check frenzy */ 1741 if (WARN_ON(worker->current_work) || 1742 WARN_ON(!list_empty(&worker->scheduled)) || 1743 WARN_ON(!(worker->flags & WORKER_IDLE))) 1744 return; 1745 1746 pool->nr_workers--; 1747 pool->nr_idle--; 1748 1749 list_del_init(&worker->entry); 1750 worker->flags |= WORKER_DIE; 1751 wake_up_process(worker->task); 1752 } 1753 1754 static void idle_worker_timeout(unsigned long __pool) 1755 { 1756 struct worker_pool *pool = (void *)__pool; 1757 1758 spin_lock_irq(&pool->lock); 1759 1760 while (too_many_workers(pool)) { 1761 struct worker *worker; 1762 unsigned long expires; 1763 1764 /* idle_list is kept in LIFO order, check the last one */ 1765 worker = list_entry(pool->idle_list.prev, struct worker, entry); 1766 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 1767 1768 if (time_before(jiffies, expires)) { 1769 mod_timer(&pool->idle_timer, expires); 1770 break; 1771 } 1772 1773 destroy_worker(worker); 1774 } 1775 1776 spin_unlock_irq(&pool->lock); 1777 } 1778 1779 static void send_mayday(struct work_struct *work) 1780 { 1781 struct pool_workqueue *pwq = get_work_pwq(work); 1782 struct workqueue_struct *wq = pwq->wq; 1783 1784 lockdep_assert_held(&wq_mayday_lock); 1785 1786 if (!wq->rescuer) 1787 return; 1788 1789 /* mayday mayday mayday */ 1790 if (list_empty(&pwq->mayday_node)) { 1791 /* 1792 * If @pwq is for an unbound wq, its base ref may be put at 1793 * any time due to an attribute change. Pin @pwq until the 1794 * rescuer is done with it. 1795 */ 1796 get_pwq(pwq); 1797 list_add_tail(&pwq->mayday_node, &wq->maydays); 1798 wake_up_process(wq->rescuer->task); 1799 } 1800 } 1801 1802 static void pool_mayday_timeout(unsigned long __pool) 1803 { 1804 struct worker_pool *pool = (void *)__pool; 1805 struct work_struct *work; 1806 1807 spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */ 1808 spin_lock(&pool->lock); 1809 1810 if (need_to_create_worker(pool)) { 1811 /* 1812 * We've been trying to create a new worker but 1813 * haven't been successful. We might be hitting an 1814 * allocation deadlock. Send distress signals to 1815 * rescuers. 1816 */ 1817 list_for_each_entry(work, &pool->worklist, entry) 1818 send_mayday(work); 1819 } 1820 1821 spin_unlock(&pool->lock); 1822 spin_unlock_irq(&wq_mayday_lock); 1823 1824 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1825 } 1826 1827 /** 1828 * maybe_create_worker - create a new worker if necessary 1829 * @pool: pool to create a new worker for 1830 * 1831 * Create a new worker for @pool if necessary. @pool is guaranteed to 1832 * have at least one idle worker on return from this function. If 1833 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 1834 * sent to all rescuers with works scheduled on @pool to resolve 1835 * possible allocation deadlock. 1836 * 1837 * On return, need_to_create_worker() is guaranteed to be %false and 1838 * may_start_working() %true. 1839 * 1840 * LOCKING: 1841 * spin_lock_irq(pool->lock) which may be released and regrabbed 1842 * multiple times. Does GFP_KERNEL allocations. Called only from 1843 * manager. 1844 * 1845 * Return: 1846 * %false if no action was taken and pool->lock stayed locked, %true 1847 * otherwise. 1848 */ 1849 static bool maybe_create_worker(struct worker_pool *pool) 1850 __releases(&pool->lock) 1851 __acquires(&pool->lock) 1852 { 1853 if (!need_to_create_worker(pool)) 1854 return false; 1855 restart: 1856 spin_unlock_irq(&pool->lock); 1857 1858 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 1859 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1860 1861 while (true) { 1862 if (create_worker(pool) || !need_to_create_worker(pool)) 1863 break; 1864 1865 schedule_timeout_interruptible(CREATE_COOLDOWN); 1866 1867 if (!need_to_create_worker(pool)) 1868 break; 1869 } 1870 1871 del_timer_sync(&pool->mayday_timer); 1872 spin_lock_irq(&pool->lock); 1873 /* 1874 * This is necessary even after a new worker was just successfully 1875 * created as @pool->lock was dropped and the new worker might have 1876 * already become busy. 1877 */ 1878 if (need_to_create_worker(pool)) 1879 goto restart; 1880 return true; 1881 } 1882 1883 /** 1884 * manage_workers - manage worker pool 1885 * @worker: self 1886 * 1887 * Assume the manager role and manage the worker pool @worker belongs 1888 * to. At any given time, there can be only zero or one manager per 1889 * pool. The exclusion is handled automatically by this function. 1890 * 1891 * The caller can safely start processing works on false return. On 1892 * true return, it's guaranteed that need_to_create_worker() is false 1893 * and may_start_working() is true. 1894 * 1895 * CONTEXT: 1896 * spin_lock_irq(pool->lock) which may be released and regrabbed 1897 * multiple times. Does GFP_KERNEL allocations. 1898 * 1899 * Return: 1900 * %false if the pool don't need management and the caller can safely start 1901 * processing works, %true indicates that the function released pool->lock 1902 * and reacquired it to perform some management function and that the 1903 * conditions that the caller verified while holding the lock before 1904 * calling the function might no longer be true. 1905 */ 1906 static bool manage_workers(struct worker *worker) 1907 { 1908 struct worker_pool *pool = worker->pool; 1909 bool ret = false; 1910 1911 /* 1912 * Anyone who successfully grabs manager_arb wins the arbitration 1913 * and becomes the manager. mutex_trylock() on pool->manager_arb 1914 * failure while holding pool->lock reliably indicates that someone 1915 * else is managing the pool and the worker which failed trylock 1916 * can proceed to executing work items. This means that anyone 1917 * grabbing manager_arb is responsible for actually performing 1918 * manager duties. If manager_arb is grabbed and released without 1919 * actual management, the pool may stall indefinitely. 1920 */ 1921 if (!mutex_trylock(&pool->manager_arb)) 1922 return ret; 1923 1924 ret |= maybe_create_worker(pool); 1925 1926 mutex_unlock(&pool->manager_arb); 1927 return ret; 1928 } 1929 1930 /** 1931 * process_one_work - process single work 1932 * @worker: self 1933 * @work: work to process 1934 * 1935 * Process @work. This function contains all the logics necessary to 1936 * process a single work including synchronization against and 1937 * interaction with other workers on the same cpu, queueing and 1938 * flushing. As long as context requirement is met, any worker can 1939 * call this function to process a work. 1940 * 1941 * CONTEXT: 1942 * spin_lock_irq(pool->lock) which is released and regrabbed. 1943 */ 1944 static void process_one_work(struct worker *worker, struct work_struct *work) 1945 __releases(&pool->lock) 1946 __acquires(&pool->lock) 1947 { 1948 struct pool_workqueue *pwq = get_work_pwq(work); 1949 struct worker_pool *pool = worker->pool; 1950 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 1951 int work_color; 1952 struct worker *collision; 1953 #ifdef CONFIG_LOCKDEP 1954 /* 1955 * It is permissible to free the struct work_struct from 1956 * inside the function that is called from it, this we need to 1957 * take into account for lockdep too. To avoid bogus "held 1958 * lock freed" warnings as well as problems when looking into 1959 * work->lockdep_map, make a copy and use that here. 1960 */ 1961 struct lockdep_map lockdep_map; 1962 1963 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 1964 #endif 1965 /* ensure we're on the correct CPU */ 1966 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 1967 raw_smp_processor_id() != pool->cpu); 1968 1969 /* 1970 * A single work shouldn't be executed concurrently by 1971 * multiple workers on a single cpu. Check whether anyone is 1972 * already processing the work. If so, defer the work to the 1973 * currently executing one. 1974 */ 1975 collision = find_worker_executing_work(pool, work); 1976 if (unlikely(collision)) { 1977 move_linked_works(work, &collision->scheduled, NULL); 1978 return; 1979 } 1980 1981 /* claim and dequeue */ 1982 debug_work_deactivate(work); 1983 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 1984 worker->current_work = work; 1985 worker->current_func = work->func; 1986 worker->current_pwq = pwq; 1987 work_color = get_work_color(work); 1988 1989 list_del_init(&work->entry); 1990 1991 /* 1992 * CPU intensive works don't participate in concurrency management. 1993 * They're the scheduler's responsibility. This takes @worker out 1994 * of concurrency management and the next code block will chain 1995 * execution of the pending work items. 1996 */ 1997 if (unlikely(cpu_intensive)) 1998 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 1999 2000 /* 2001 * Wake up another worker if necessary. The condition is always 2002 * false for normal per-cpu workers since nr_running would always 2003 * be >= 1 at this point. This is used to chain execution of the 2004 * pending work items for WORKER_NOT_RUNNING workers such as the 2005 * UNBOUND and CPU_INTENSIVE ones. 2006 */ 2007 if (need_more_worker(pool)) 2008 wake_up_worker(pool); 2009 2010 /* 2011 * Record the last pool and clear PENDING which should be the last 2012 * update to @work. Also, do this inside @pool->lock so that 2013 * PENDING and queued state changes happen together while IRQ is 2014 * disabled. 2015 */ 2016 set_work_pool_and_clear_pending(work, pool->id); 2017 2018 spin_unlock_irq(&pool->lock); 2019 2020 lock_map_acquire_read(&pwq->wq->lockdep_map); 2021 lock_map_acquire(&lockdep_map); 2022 trace_workqueue_execute_start(work); 2023 worker->current_func(work); 2024 /* 2025 * While we must be careful to not use "work" after this, the trace 2026 * point will only record its address. 2027 */ 2028 trace_workqueue_execute_end(work); 2029 lock_map_release(&lockdep_map); 2030 lock_map_release(&pwq->wq->lockdep_map); 2031 2032 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2033 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2034 " last function: %pf\n", 2035 current->comm, preempt_count(), task_pid_nr(current), 2036 worker->current_func); 2037 debug_show_held_locks(current); 2038 dump_stack(); 2039 } 2040 2041 /* 2042 * The following prevents a kworker from hogging CPU on !PREEMPT 2043 * kernels, where a requeueing work item waiting for something to 2044 * happen could deadlock with stop_machine as such work item could 2045 * indefinitely requeue itself while all other CPUs are trapped in 2046 * stop_machine. 2047 */ 2048 cond_resched(); 2049 2050 spin_lock_irq(&pool->lock); 2051 2052 /* clear cpu intensive status */ 2053 if (unlikely(cpu_intensive)) 2054 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2055 2056 /* we're done with it, release */ 2057 hash_del(&worker->hentry); 2058 worker->current_work = NULL; 2059 worker->current_func = NULL; 2060 worker->current_pwq = NULL; 2061 worker->desc_valid = false; 2062 pwq_dec_nr_in_flight(pwq, work_color); 2063 } 2064 2065 /** 2066 * process_scheduled_works - process scheduled works 2067 * @worker: self 2068 * 2069 * Process all scheduled works. Please note that the scheduled list 2070 * may change while processing a work, so this function repeatedly 2071 * fetches a work from the top and executes it. 2072 * 2073 * CONTEXT: 2074 * spin_lock_irq(pool->lock) which may be released and regrabbed 2075 * multiple times. 2076 */ 2077 static void process_scheduled_works(struct worker *worker) 2078 { 2079 while (!list_empty(&worker->scheduled)) { 2080 struct work_struct *work = list_first_entry(&worker->scheduled, 2081 struct work_struct, entry); 2082 process_one_work(worker, work); 2083 } 2084 } 2085 2086 /** 2087 * worker_thread - the worker thread function 2088 * @__worker: self 2089 * 2090 * The worker thread function. All workers belong to a worker_pool - 2091 * either a per-cpu one or dynamic unbound one. These workers process all 2092 * work items regardless of their specific target workqueue. The only 2093 * exception is work items which belong to workqueues with a rescuer which 2094 * will be explained in rescuer_thread(). 2095 * 2096 * Return: 0 2097 */ 2098 static int worker_thread(void *__worker) 2099 { 2100 struct worker *worker = __worker; 2101 struct worker_pool *pool = worker->pool; 2102 2103 /* tell the scheduler that this is a workqueue worker */ 2104 worker->task->flags |= PF_WQ_WORKER; 2105 woke_up: 2106 spin_lock_irq(&pool->lock); 2107 2108 /* am I supposed to die? */ 2109 if (unlikely(worker->flags & WORKER_DIE)) { 2110 spin_unlock_irq(&pool->lock); 2111 WARN_ON_ONCE(!list_empty(&worker->entry)); 2112 worker->task->flags &= ~PF_WQ_WORKER; 2113 2114 set_task_comm(worker->task, "kworker/dying"); 2115 ida_simple_remove(&pool->worker_ida, worker->id); 2116 worker_detach_from_pool(worker, pool); 2117 kfree(worker); 2118 return 0; 2119 } 2120 2121 worker_leave_idle(worker); 2122 recheck: 2123 /* no more worker necessary? */ 2124 if (!need_more_worker(pool)) 2125 goto sleep; 2126 2127 /* do we need to manage? */ 2128 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2129 goto recheck; 2130 2131 /* 2132 * ->scheduled list can only be filled while a worker is 2133 * preparing to process a work or actually processing it. 2134 * Make sure nobody diddled with it while I was sleeping. 2135 */ 2136 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2137 2138 /* 2139 * Finish PREP stage. We're guaranteed to have at least one idle 2140 * worker or that someone else has already assumed the manager 2141 * role. This is where @worker starts participating in concurrency 2142 * management if applicable and concurrency management is restored 2143 * after being rebound. See rebind_workers() for details. 2144 */ 2145 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2146 2147 do { 2148 struct work_struct *work = 2149 list_first_entry(&pool->worklist, 2150 struct work_struct, entry); 2151 2152 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 2153 /* optimization path, not strictly necessary */ 2154 process_one_work(worker, work); 2155 if (unlikely(!list_empty(&worker->scheduled))) 2156 process_scheduled_works(worker); 2157 } else { 2158 move_linked_works(work, &worker->scheduled, NULL); 2159 process_scheduled_works(worker); 2160 } 2161 } while (keep_working(pool)); 2162 2163 worker_set_flags(worker, WORKER_PREP); 2164 sleep: 2165 /* 2166 * pool->lock is held and there's no work to process and no need to 2167 * manage, sleep. Workers are woken up only while holding 2168 * pool->lock or from local cpu, so setting the current state 2169 * before releasing pool->lock is enough to prevent losing any 2170 * event. 2171 */ 2172 worker_enter_idle(worker); 2173 __set_current_state(TASK_INTERRUPTIBLE); 2174 spin_unlock_irq(&pool->lock); 2175 schedule(); 2176 goto woke_up; 2177 } 2178 2179 /** 2180 * rescuer_thread - the rescuer thread function 2181 * @__rescuer: self 2182 * 2183 * Workqueue rescuer thread function. There's one rescuer for each 2184 * workqueue which has WQ_MEM_RECLAIM set. 2185 * 2186 * Regular work processing on a pool may block trying to create a new 2187 * worker which uses GFP_KERNEL allocation which has slight chance of 2188 * developing into deadlock if some works currently on the same queue 2189 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2190 * the problem rescuer solves. 2191 * 2192 * When such condition is possible, the pool summons rescuers of all 2193 * workqueues which have works queued on the pool and let them process 2194 * those works so that forward progress can be guaranteed. 2195 * 2196 * This should happen rarely. 2197 * 2198 * Return: 0 2199 */ 2200 static int rescuer_thread(void *__rescuer) 2201 { 2202 struct worker *rescuer = __rescuer; 2203 struct workqueue_struct *wq = rescuer->rescue_wq; 2204 struct list_head *scheduled = &rescuer->scheduled; 2205 bool should_stop; 2206 2207 set_user_nice(current, RESCUER_NICE_LEVEL); 2208 2209 /* 2210 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2211 * doesn't participate in concurrency management. 2212 */ 2213 rescuer->task->flags |= PF_WQ_WORKER; 2214 repeat: 2215 set_current_state(TASK_INTERRUPTIBLE); 2216 2217 /* 2218 * By the time the rescuer is requested to stop, the workqueue 2219 * shouldn't have any work pending, but @wq->maydays may still have 2220 * pwq(s) queued. This can happen by non-rescuer workers consuming 2221 * all the work items before the rescuer got to them. Go through 2222 * @wq->maydays processing before acting on should_stop so that the 2223 * list is always empty on exit. 2224 */ 2225 should_stop = kthread_should_stop(); 2226 2227 /* see whether any pwq is asking for help */ 2228 spin_lock_irq(&wq_mayday_lock); 2229 2230 while (!list_empty(&wq->maydays)) { 2231 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2232 struct pool_workqueue, mayday_node); 2233 struct worker_pool *pool = pwq->pool; 2234 struct work_struct *work, *n; 2235 2236 __set_current_state(TASK_RUNNING); 2237 list_del_init(&pwq->mayday_node); 2238 2239 spin_unlock_irq(&wq_mayday_lock); 2240 2241 worker_attach_to_pool(rescuer, pool); 2242 2243 spin_lock_irq(&pool->lock); 2244 rescuer->pool = pool; 2245 2246 /* 2247 * Slurp in all works issued via this workqueue and 2248 * process'em. 2249 */ 2250 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2251 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2252 if (get_work_pwq(work) == pwq) 2253 move_linked_works(work, scheduled, &n); 2254 2255 process_scheduled_works(rescuer); 2256 2257 /* 2258 * Put the reference grabbed by send_mayday(). @pool won't 2259 * go away while we're still attached to it. 2260 */ 2261 put_pwq(pwq); 2262 2263 /* 2264 * Leave this pool. If need_more_worker() is %true, notify a 2265 * regular worker; otherwise, we end up with 0 concurrency 2266 * and stalling the execution. 2267 */ 2268 if (need_more_worker(pool)) 2269 wake_up_worker(pool); 2270 2271 rescuer->pool = NULL; 2272 spin_unlock_irq(&pool->lock); 2273 2274 worker_detach_from_pool(rescuer, pool); 2275 2276 spin_lock_irq(&wq_mayday_lock); 2277 } 2278 2279 spin_unlock_irq(&wq_mayday_lock); 2280 2281 if (should_stop) { 2282 __set_current_state(TASK_RUNNING); 2283 rescuer->task->flags &= ~PF_WQ_WORKER; 2284 return 0; 2285 } 2286 2287 /* rescuers should never participate in concurrency management */ 2288 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2289 schedule(); 2290 goto repeat; 2291 } 2292 2293 struct wq_barrier { 2294 struct work_struct work; 2295 struct completion done; 2296 }; 2297 2298 static void wq_barrier_func(struct work_struct *work) 2299 { 2300 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2301 complete(&barr->done); 2302 } 2303 2304 /** 2305 * insert_wq_barrier - insert a barrier work 2306 * @pwq: pwq to insert barrier into 2307 * @barr: wq_barrier to insert 2308 * @target: target work to attach @barr to 2309 * @worker: worker currently executing @target, NULL if @target is not executing 2310 * 2311 * @barr is linked to @target such that @barr is completed only after 2312 * @target finishes execution. Please note that the ordering 2313 * guarantee is observed only with respect to @target and on the local 2314 * cpu. 2315 * 2316 * Currently, a queued barrier can't be canceled. This is because 2317 * try_to_grab_pending() can't determine whether the work to be 2318 * grabbed is at the head of the queue and thus can't clear LINKED 2319 * flag of the previous work while there must be a valid next work 2320 * after a work with LINKED flag set. 2321 * 2322 * Note that when @worker is non-NULL, @target may be modified 2323 * underneath us, so we can't reliably determine pwq from @target. 2324 * 2325 * CONTEXT: 2326 * spin_lock_irq(pool->lock). 2327 */ 2328 static void insert_wq_barrier(struct pool_workqueue *pwq, 2329 struct wq_barrier *barr, 2330 struct work_struct *target, struct worker *worker) 2331 { 2332 struct list_head *head; 2333 unsigned int linked = 0; 2334 2335 /* 2336 * debugobject calls are safe here even with pool->lock locked 2337 * as we know for sure that this will not trigger any of the 2338 * checks and call back into the fixup functions where we 2339 * might deadlock. 2340 */ 2341 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2342 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2343 init_completion(&barr->done); 2344 2345 /* 2346 * If @target is currently being executed, schedule the 2347 * barrier to the worker; otherwise, put it after @target. 2348 */ 2349 if (worker) 2350 head = worker->scheduled.next; 2351 else { 2352 unsigned long *bits = work_data_bits(target); 2353 2354 head = target->entry.next; 2355 /* there can already be other linked works, inherit and set */ 2356 linked = *bits & WORK_STRUCT_LINKED; 2357 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 2358 } 2359 2360 debug_work_activate(&barr->work); 2361 insert_work(pwq, &barr->work, head, 2362 work_color_to_flags(WORK_NO_COLOR) | linked); 2363 } 2364 2365 /** 2366 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 2367 * @wq: workqueue being flushed 2368 * @flush_color: new flush color, < 0 for no-op 2369 * @work_color: new work color, < 0 for no-op 2370 * 2371 * Prepare pwqs for workqueue flushing. 2372 * 2373 * If @flush_color is non-negative, flush_color on all pwqs should be 2374 * -1. If no pwq has in-flight commands at the specified color, all 2375 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 2376 * has in flight commands, its pwq->flush_color is set to 2377 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 2378 * wakeup logic is armed and %true is returned. 2379 * 2380 * The caller should have initialized @wq->first_flusher prior to 2381 * calling this function with non-negative @flush_color. If 2382 * @flush_color is negative, no flush color update is done and %false 2383 * is returned. 2384 * 2385 * If @work_color is non-negative, all pwqs should have the same 2386 * work_color which is previous to @work_color and all will be 2387 * advanced to @work_color. 2388 * 2389 * CONTEXT: 2390 * mutex_lock(wq->mutex). 2391 * 2392 * Return: 2393 * %true if @flush_color >= 0 and there's something to flush. %false 2394 * otherwise. 2395 */ 2396 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 2397 int flush_color, int work_color) 2398 { 2399 bool wait = false; 2400 struct pool_workqueue *pwq; 2401 2402 if (flush_color >= 0) { 2403 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 2404 atomic_set(&wq->nr_pwqs_to_flush, 1); 2405 } 2406 2407 for_each_pwq(pwq, wq) { 2408 struct worker_pool *pool = pwq->pool; 2409 2410 spin_lock_irq(&pool->lock); 2411 2412 if (flush_color >= 0) { 2413 WARN_ON_ONCE(pwq->flush_color != -1); 2414 2415 if (pwq->nr_in_flight[flush_color]) { 2416 pwq->flush_color = flush_color; 2417 atomic_inc(&wq->nr_pwqs_to_flush); 2418 wait = true; 2419 } 2420 } 2421 2422 if (work_color >= 0) { 2423 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 2424 pwq->work_color = work_color; 2425 } 2426 2427 spin_unlock_irq(&pool->lock); 2428 } 2429 2430 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 2431 complete(&wq->first_flusher->done); 2432 2433 return wait; 2434 } 2435 2436 /** 2437 * flush_workqueue - ensure that any scheduled work has run to completion. 2438 * @wq: workqueue to flush 2439 * 2440 * This function sleeps until all work items which were queued on entry 2441 * have finished execution, but it is not livelocked by new incoming ones. 2442 */ 2443 void flush_workqueue(struct workqueue_struct *wq) 2444 { 2445 struct wq_flusher this_flusher = { 2446 .list = LIST_HEAD_INIT(this_flusher.list), 2447 .flush_color = -1, 2448 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2449 }; 2450 int next_color; 2451 2452 lock_map_acquire(&wq->lockdep_map); 2453 lock_map_release(&wq->lockdep_map); 2454 2455 mutex_lock(&wq->mutex); 2456 2457 /* 2458 * Start-to-wait phase 2459 */ 2460 next_color = work_next_color(wq->work_color); 2461 2462 if (next_color != wq->flush_color) { 2463 /* 2464 * Color space is not full. The current work_color 2465 * becomes our flush_color and work_color is advanced 2466 * by one. 2467 */ 2468 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 2469 this_flusher.flush_color = wq->work_color; 2470 wq->work_color = next_color; 2471 2472 if (!wq->first_flusher) { 2473 /* no flush in progress, become the first flusher */ 2474 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 2475 2476 wq->first_flusher = &this_flusher; 2477 2478 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 2479 wq->work_color)) { 2480 /* nothing to flush, done */ 2481 wq->flush_color = next_color; 2482 wq->first_flusher = NULL; 2483 goto out_unlock; 2484 } 2485 } else { 2486 /* wait in queue */ 2487 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 2488 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2489 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2490 } 2491 } else { 2492 /* 2493 * Oops, color space is full, wait on overflow queue. 2494 * The next flush completion will assign us 2495 * flush_color and transfer to flusher_queue. 2496 */ 2497 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 2498 } 2499 2500 mutex_unlock(&wq->mutex); 2501 2502 wait_for_completion(&this_flusher.done); 2503 2504 /* 2505 * Wake-up-and-cascade phase 2506 * 2507 * First flushers are responsible for cascading flushes and 2508 * handling overflow. Non-first flushers can simply return. 2509 */ 2510 if (wq->first_flusher != &this_flusher) 2511 return; 2512 2513 mutex_lock(&wq->mutex); 2514 2515 /* we might have raced, check again with mutex held */ 2516 if (wq->first_flusher != &this_flusher) 2517 goto out_unlock; 2518 2519 wq->first_flusher = NULL; 2520 2521 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 2522 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 2523 2524 while (true) { 2525 struct wq_flusher *next, *tmp; 2526 2527 /* complete all the flushers sharing the current flush color */ 2528 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 2529 if (next->flush_color != wq->flush_color) 2530 break; 2531 list_del_init(&next->list); 2532 complete(&next->done); 2533 } 2534 2535 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 2536 wq->flush_color != work_next_color(wq->work_color)); 2537 2538 /* this flush_color is finished, advance by one */ 2539 wq->flush_color = work_next_color(wq->flush_color); 2540 2541 /* one color has been freed, handle overflow queue */ 2542 if (!list_empty(&wq->flusher_overflow)) { 2543 /* 2544 * Assign the same color to all overflowed 2545 * flushers, advance work_color and append to 2546 * flusher_queue. This is the start-to-wait 2547 * phase for these overflowed flushers. 2548 */ 2549 list_for_each_entry(tmp, &wq->flusher_overflow, list) 2550 tmp->flush_color = wq->work_color; 2551 2552 wq->work_color = work_next_color(wq->work_color); 2553 2554 list_splice_tail_init(&wq->flusher_overflow, 2555 &wq->flusher_queue); 2556 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2557 } 2558 2559 if (list_empty(&wq->flusher_queue)) { 2560 WARN_ON_ONCE(wq->flush_color != wq->work_color); 2561 break; 2562 } 2563 2564 /* 2565 * Need to flush more colors. Make the next flusher 2566 * the new first flusher and arm pwqs. 2567 */ 2568 WARN_ON_ONCE(wq->flush_color == wq->work_color); 2569 WARN_ON_ONCE(wq->flush_color != next->flush_color); 2570 2571 list_del_init(&next->list); 2572 wq->first_flusher = next; 2573 2574 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 2575 break; 2576 2577 /* 2578 * Meh... this color is already done, clear first 2579 * flusher and repeat cascading. 2580 */ 2581 wq->first_flusher = NULL; 2582 } 2583 2584 out_unlock: 2585 mutex_unlock(&wq->mutex); 2586 } 2587 EXPORT_SYMBOL_GPL(flush_workqueue); 2588 2589 /** 2590 * drain_workqueue - drain a workqueue 2591 * @wq: workqueue to drain 2592 * 2593 * Wait until the workqueue becomes empty. While draining is in progress, 2594 * only chain queueing is allowed. IOW, only currently pending or running 2595 * work items on @wq can queue further work items on it. @wq is flushed 2596 * repeatedly until it becomes empty. The number of flushing is detemined 2597 * by the depth of chaining and should be relatively short. Whine if it 2598 * takes too long. 2599 */ 2600 void drain_workqueue(struct workqueue_struct *wq) 2601 { 2602 unsigned int flush_cnt = 0; 2603 struct pool_workqueue *pwq; 2604 2605 /* 2606 * __queue_work() needs to test whether there are drainers, is much 2607 * hotter than drain_workqueue() and already looks at @wq->flags. 2608 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 2609 */ 2610 mutex_lock(&wq->mutex); 2611 if (!wq->nr_drainers++) 2612 wq->flags |= __WQ_DRAINING; 2613 mutex_unlock(&wq->mutex); 2614 reflush: 2615 flush_workqueue(wq); 2616 2617 mutex_lock(&wq->mutex); 2618 2619 for_each_pwq(pwq, wq) { 2620 bool drained; 2621 2622 spin_lock_irq(&pwq->pool->lock); 2623 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 2624 spin_unlock_irq(&pwq->pool->lock); 2625 2626 if (drained) 2627 continue; 2628 2629 if (++flush_cnt == 10 || 2630 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2631 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", 2632 wq->name, flush_cnt); 2633 2634 mutex_unlock(&wq->mutex); 2635 goto reflush; 2636 } 2637 2638 if (!--wq->nr_drainers) 2639 wq->flags &= ~__WQ_DRAINING; 2640 mutex_unlock(&wq->mutex); 2641 } 2642 EXPORT_SYMBOL_GPL(drain_workqueue); 2643 2644 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2645 { 2646 struct worker *worker = NULL; 2647 struct worker_pool *pool; 2648 struct pool_workqueue *pwq; 2649 2650 might_sleep(); 2651 2652 local_irq_disable(); 2653 pool = get_work_pool(work); 2654 if (!pool) { 2655 local_irq_enable(); 2656 return false; 2657 } 2658 2659 spin_lock(&pool->lock); 2660 /* see the comment in try_to_grab_pending() with the same code */ 2661 pwq = get_work_pwq(work); 2662 if (pwq) { 2663 if (unlikely(pwq->pool != pool)) 2664 goto already_gone; 2665 } else { 2666 worker = find_worker_executing_work(pool, work); 2667 if (!worker) 2668 goto already_gone; 2669 pwq = worker->current_pwq; 2670 } 2671 2672 insert_wq_barrier(pwq, barr, work, worker); 2673 spin_unlock_irq(&pool->lock); 2674 2675 /* 2676 * If @max_active is 1 or rescuer is in use, flushing another work 2677 * item on the same workqueue may lead to deadlock. Make sure the 2678 * flusher is not running on the same workqueue by verifying write 2679 * access. 2680 */ 2681 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) 2682 lock_map_acquire(&pwq->wq->lockdep_map); 2683 else 2684 lock_map_acquire_read(&pwq->wq->lockdep_map); 2685 lock_map_release(&pwq->wq->lockdep_map); 2686 2687 return true; 2688 already_gone: 2689 spin_unlock_irq(&pool->lock); 2690 return false; 2691 } 2692 2693 /** 2694 * flush_work - wait for a work to finish executing the last queueing instance 2695 * @work: the work to flush 2696 * 2697 * Wait until @work has finished execution. @work is guaranteed to be idle 2698 * on return if it hasn't been requeued since flush started. 2699 * 2700 * Return: 2701 * %true if flush_work() waited for the work to finish execution, 2702 * %false if it was already idle. 2703 */ 2704 bool flush_work(struct work_struct *work) 2705 { 2706 struct wq_barrier barr; 2707 2708 lock_map_acquire(&work->lockdep_map); 2709 lock_map_release(&work->lockdep_map); 2710 2711 if (start_flush_work(work, &barr)) { 2712 wait_for_completion(&barr.done); 2713 destroy_work_on_stack(&barr.work); 2714 return true; 2715 } else { 2716 return false; 2717 } 2718 } 2719 EXPORT_SYMBOL_GPL(flush_work); 2720 2721 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2722 { 2723 unsigned long flags; 2724 int ret; 2725 2726 do { 2727 ret = try_to_grab_pending(work, is_dwork, &flags); 2728 /* 2729 * If someone else is canceling, wait for the same event it 2730 * would be waiting for before retrying. 2731 */ 2732 if (unlikely(ret == -ENOENT)) 2733 flush_work(work); 2734 } while (unlikely(ret < 0)); 2735 2736 /* tell other tasks trying to grab @work to back off */ 2737 mark_work_canceling(work); 2738 local_irq_restore(flags); 2739 2740 flush_work(work); 2741 clear_work_data(work); 2742 return ret; 2743 } 2744 2745 /** 2746 * cancel_work_sync - cancel a work and wait for it to finish 2747 * @work: the work to cancel 2748 * 2749 * Cancel @work and wait for its execution to finish. This function 2750 * can be used even if the work re-queues itself or migrates to 2751 * another workqueue. On return from this function, @work is 2752 * guaranteed to be not pending or executing on any CPU. 2753 * 2754 * cancel_work_sync(&delayed_work->work) must not be used for 2755 * delayed_work's. Use cancel_delayed_work_sync() instead. 2756 * 2757 * The caller must ensure that the workqueue on which @work was last 2758 * queued can't be destroyed before this function returns. 2759 * 2760 * Return: 2761 * %true if @work was pending, %false otherwise. 2762 */ 2763 bool cancel_work_sync(struct work_struct *work) 2764 { 2765 return __cancel_work_timer(work, false); 2766 } 2767 EXPORT_SYMBOL_GPL(cancel_work_sync); 2768 2769 /** 2770 * flush_delayed_work - wait for a dwork to finish executing the last queueing 2771 * @dwork: the delayed work to flush 2772 * 2773 * Delayed timer is cancelled and the pending work is queued for 2774 * immediate execution. Like flush_work(), this function only 2775 * considers the last queueing instance of @dwork. 2776 * 2777 * Return: 2778 * %true if flush_work() waited for the work to finish execution, 2779 * %false if it was already idle. 2780 */ 2781 bool flush_delayed_work(struct delayed_work *dwork) 2782 { 2783 local_irq_disable(); 2784 if (del_timer_sync(&dwork->timer)) 2785 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 2786 local_irq_enable(); 2787 return flush_work(&dwork->work); 2788 } 2789 EXPORT_SYMBOL(flush_delayed_work); 2790 2791 /** 2792 * cancel_delayed_work - cancel a delayed work 2793 * @dwork: delayed_work to cancel 2794 * 2795 * Kill off a pending delayed_work. 2796 * 2797 * Return: %true if @dwork was pending and canceled; %false if it wasn't 2798 * pending. 2799 * 2800 * Note: 2801 * The work callback function may still be running on return, unless 2802 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 2803 * use cancel_delayed_work_sync() to wait on it. 2804 * 2805 * This function is safe to call from any context including IRQ handler. 2806 */ 2807 bool cancel_delayed_work(struct delayed_work *dwork) 2808 { 2809 unsigned long flags; 2810 int ret; 2811 2812 do { 2813 ret = try_to_grab_pending(&dwork->work, true, &flags); 2814 } while (unlikely(ret == -EAGAIN)); 2815 2816 if (unlikely(ret < 0)) 2817 return false; 2818 2819 set_work_pool_and_clear_pending(&dwork->work, 2820 get_work_pool_id(&dwork->work)); 2821 local_irq_restore(flags); 2822 return ret; 2823 } 2824 EXPORT_SYMBOL(cancel_delayed_work); 2825 2826 /** 2827 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2828 * @dwork: the delayed work cancel 2829 * 2830 * This is cancel_work_sync() for delayed works. 2831 * 2832 * Return: 2833 * %true if @dwork was pending, %false otherwise. 2834 */ 2835 bool cancel_delayed_work_sync(struct delayed_work *dwork) 2836 { 2837 return __cancel_work_timer(&dwork->work, true); 2838 } 2839 EXPORT_SYMBOL(cancel_delayed_work_sync); 2840 2841 /** 2842 * schedule_on_each_cpu - execute a function synchronously on each online CPU 2843 * @func: the function to call 2844 * 2845 * schedule_on_each_cpu() executes @func on each online CPU using the 2846 * system workqueue and blocks until all CPUs have completed. 2847 * schedule_on_each_cpu() is very slow. 2848 * 2849 * Return: 2850 * 0 on success, -errno on failure. 2851 */ 2852 int schedule_on_each_cpu(work_func_t func) 2853 { 2854 int cpu; 2855 struct work_struct __percpu *works; 2856 2857 works = alloc_percpu(struct work_struct); 2858 if (!works) 2859 return -ENOMEM; 2860 2861 get_online_cpus(); 2862 2863 for_each_online_cpu(cpu) { 2864 struct work_struct *work = per_cpu_ptr(works, cpu); 2865 2866 INIT_WORK(work, func); 2867 schedule_work_on(cpu, work); 2868 } 2869 2870 for_each_online_cpu(cpu) 2871 flush_work(per_cpu_ptr(works, cpu)); 2872 2873 put_online_cpus(); 2874 free_percpu(works); 2875 return 0; 2876 } 2877 2878 /** 2879 * flush_scheduled_work - ensure that any scheduled work has run to completion. 2880 * 2881 * Forces execution of the kernel-global workqueue and blocks until its 2882 * completion. 2883 * 2884 * Think twice before calling this function! It's very easy to get into 2885 * trouble if you don't take great care. Either of the following situations 2886 * will lead to deadlock: 2887 * 2888 * One of the work items currently on the workqueue needs to acquire 2889 * a lock held by your code or its caller. 2890 * 2891 * Your code is running in the context of a work routine. 2892 * 2893 * They will be detected by lockdep when they occur, but the first might not 2894 * occur very often. It depends on what work items are on the workqueue and 2895 * what locks they need, which you have no control over. 2896 * 2897 * In most situations flushing the entire workqueue is overkill; you merely 2898 * need to know that a particular work item isn't queued and isn't running. 2899 * In such cases you should use cancel_delayed_work_sync() or 2900 * cancel_work_sync() instead. 2901 */ 2902 void flush_scheduled_work(void) 2903 { 2904 flush_workqueue(system_wq); 2905 } 2906 EXPORT_SYMBOL(flush_scheduled_work); 2907 2908 /** 2909 * execute_in_process_context - reliably execute the routine with user context 2910 * @fn: the function to execute 2911 * @ew: guaranteed storage for the execute work structure (must 2912 * be available when the work executes) 2913 * 2914 * Executes the function immediately if process context is available, 2915 * otherwise schedules the function for delayed execution. 2916 * 2917 * Return: 0 - function was executed 2918 * 1 - function was scheduled for execution 2919 */ 2920 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 2921 { 2922 if (!in_interrupt()) { 2923 fn(&ew->work); 2924 return 0; 2925 } 2926 2927 INIT_WORK(&ew->work, fn); 2928 schedule_work(&ew->work); 2929 2930 return 1; 2931 } 2932 EXPORT_SYMBOL_GPL(execute_in_process_context); 2933 2934 #ifdef CONFIG_SYSFS 2935 /* 2936 * Workqueues with WQ_SYSFS flag set is visible to userland via 2937 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 2938 * following attributes. 2939 * 2940 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 2941 * max_active RW int : maximum number of in-flight work items 2942 * 2943 * Unbound workqueues have the following extra attributes. 2944 * 2945 * id RO int : the associated pool ID 2946 * nice RW int : nice value of the workers 2947 * cpumask RW mask : bitmask of allowed CPUs for the workers 2948 */ 2949 struct wq_device { 2950 struct workqueue_struct *wq; 2951 struct device dev; 2952 }; 2953 2954 static struct workqueue_struct *dev_to_wq(struct device *dev) 2955 { 2956 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 2957 2958 return wq_dev->wq; 2959 } 2960 2961 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 2962 char *buf) 2963 { 2964 struct workqueue_struct *wq = dev_to_wq(dev); 2965 2966 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 2967 } 2968 static DEVICE_ATTR_RO(per_cpu); 2969 2970 static ssize_t max_active_show(struct device *dev, 2971 struct device_attribute *attr, char *buf) 2972 { 2973 struct workqueue_struct *wq = dev_to_wq(dev); 2974 2975 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 2976 } 2977 2978 static ssize_t max_active_store(struct device *dev, 2979 struct device_attribute *attr, const char *buf, 2980 size_t count) 2981 { 2982 struct workqueue_struct *wq = dev_to_wq(dev); 2983 int val; 2984 2985 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 2986 return -EINVAL; 2987 2988 workqueue_set_max_active(wq, val); 2989 return count; 2990 } 2991 static DEVICE_ATTR_RW(max_active); 2992 2993 static struct attribute *wq_sysfs_attrs[] = { 2994 &dev_attr_per_cpu.attr, 2995 &dev_attr_max_active.attr, 2996 NULL, 2997 }; 2998 ATTRIBUTE_GROUPS(wq_sysfs); 2999 3000 static ssize_t wq_pool_ids_show(struct device *dev, 3001 struct device_attribute *attr, char *buf) 3002 { 3003 struct workqueue_struct *wq = dev_to_wq(dev); 3004 const char *delim = ""; 3005 int node, written = 0; 3006 3007 rcu_read_lock_sched(); 3008 for_each_node(node) { 3009 written += scnprintf(buf + written, PAGE_SIZE - written, 3010 "%s%d:%d", delim, node, 3011 unbound_pwq_by_node(wq, node)->pool->id); 3012 delim = " "; 3013 } 3014 written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); 3015 rcu_read_unlock_sched(); 3016 3017 return written; 3018 } 3019 3020 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 3021 char *buf) 3022 { 3023 struct workqueue_struct *wq = dev_to_wq(dev); 3024 int written; 3025 3026 mutex_lock(&wq->mutex); 3027 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 3028 mutex_unlock(&wq->mutex); 3029 3030 return written; 3031 } 3032 3033 /* prepare workqueue_attrs for sysfs store operations */ 3034 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 3035 { 3036 struct workqueue_attrs *attrs; 3037 3038 attrs = alloc_workqueue_attrs(GFP_KERNEL); 3039 if (!attrs) 3040 return NULL; 3041 3042 mutex_lock(&wq->mutex); 3043 copy_workqueue_attrs(attrs, wq->unbound_attrs); 3044 mutex_unlock(&wq->mutex); 3045 return attrs; 3046 } 3047 3048 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 3049 const char *buf, size_t count) 3050 { 3051 struct workqueue_struct *wq = dev_to_wq(dev); 3052 struct workqueue_attrs *attrs; 3053 int ret; 3054 3055 attrs = wq_sysfs_prep_attrs(wq); 3056 if (!attrs) 3057 return -ENOMEM; 3058 3059 if (sscanf(buf, "%d", &attrs->nice) == 1 && 3060 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 3061 ret = apply_workqueue_attrs(wq, attrs); 3062 else 3063 ret = -EINVAL; 3064 3065 free_workqueue_attrs(attrs); 3066 return ret ?: count; 3067 } 3068 3069 static ssize_t wq_cpumask_show(struct device *dev, 3070 struct device_attribute *attr, char *buf) 3071 { 3072 struct workqueue_struct *wq = dev_to_wq(dev); 3073 int written; 3074 3075 mutex_lock(&wq->mutex); 3076 written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask); 3077 mutex_unlock(&wq->mutex); 3078 3079 written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); 3080 return written; 3081 } 3082 3083 static ssize_t wq_cpumask_store(struct device *dev, 3084 struct device_attribute *attr, 3085 const char *buf, size_t count) 3086 { 3087 struct workqueue_struct *wq = dev_to_wq(dev); 3088 struct workqueue_attrs *attrs; 3089 int ret; 3090 3091 attrs = wq_sysfs_prep_attrs(wq); 3092 if (!attrs) 3093 return -ENOMEM; 3094 3095 ret = cpumask_parse(buf, attrs->cpumask); 3096 if (!ret) 3097 ret = apply_workqueue_attrs(wq, attrs); 3098 3099 free_workqueue_attrs(attrs); 3100 return ret ?: count; 3101 } 3102 3103 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr, 3104 char *buf) 3105 { 3106 struct workqueue_struct *wq = dev_to_wq(dev); 3107 int written; 3108 3109 mutex_lock(&wq->mutex); 3110 written = scnprintf(buf, PAGE_SIZE, "%d\n", 3111 !wq->unbound_attrs->no_numa); 3112 mutex_unlock(&wq->mutex); 3113 3114 return written; 3115 } 3116 3117 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr, 3118 const char *buf, size_t count) 3119 { 3120 struct workqueue_struct *wq = dev_to_wq(dev); 3121 struct workqueue_attrs *attrs; 3122 int v, ret; 3123 3124 attrs = wq_sysfs_prep_attrs(wq); 3125 if (!attrs) 3126 return -ENOMEM; 3127 3128 ret = -EINVAL; 3129 if (sscanf(buf, "%d", &v) == 1) { 3130 attrs->no_numa = !v; 3131 ret = apply_workqueue_attrs(wq, attrs); 3132 } 3133 3134 free_workqueue_attrs(attrs); 3135 return ret ?: count; 3136 } 3137 3138 static struct device_attribute wq_sysfs_unbound_attrs[] = { 3139 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL), 3140 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 3141 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 3142 __ATTR(numa, 0644, wq_numa_show, wq_numa_store), 3143 __ATTR_NULL, 3144 }; 3145 3146 static struct bus_type wq_subsys = { 3147 .name = "workqueue", 3148 .dev_groups = wq_sysfs_groups, 3149 }; 3150 3151 static int __init wq_sysfs_init(void) 3152 { 3153 return subsys_virtual_register(&wq_subsys, NULL); 3154 } 3155 core_initcall(wq_sysfs_init); 3156 3157 static void wq_device_release(struct device *dev) 3158 { 3159 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 3160 3161 kfree(wq_dev); 3162 } 3163 3164 /** 3165 * workqueue_sysfs_register - make a workqueue visible in sysfs 3166 * @wq: the workqueue to register 3167 * 3168 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 3169 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 3170 * which is the preferred method. 3171 * 3172 * Workqueue user should use this function directly iff it wants to apply 3173 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 3174 * apply_workqueue_attrs() may race against userland updating the 3175 * attributes. 3176 * 3177 * Return: 0 on success, -errno on failure. 3178 */ 3179 int workqueue_sysfs_register(struct workqueue_struct *wq) 3180 { 3181 struct wq_device *wq_dev; 3182 int ret; 3183 3184 /* 3185 * Adjusting max_active or creating new pwqs by applyting 3186 * attributes breaks ordering guarantee. Disallow exposing ordered 3187 * workqueues. 3188 */ 3189 if (WARN_ON(wq->flags & __WQ_ORDERED)) 3190 return -EINVAL; 3191 3192 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 3193 if (!wq_dev) 3194 return -ENOMEM; 3195 3196 wq_dev->wq = wq; 3197 wq_dev->dev.bus = &wq_subsys; 3198 wq_dev->dev.init_name = wq->name; 3199 wq_dev->dev.release = wq_device_release; 3200 3201 /* 3202 * unbound_attrs are created separately. Suppress uevent until 3203 * everything is ready. 3204 */ 3205 dev_set_uevent_suppress(&wq_dev->dev, true); 3206 3207 ret = device_register(&wq_dev->dev); 3208 if (ret) { 3209 kfree(wq_dev); 3210 wq->wq_dev = NULL; 3211 return ret; 3212 } 3213 3214 if (wq->flags & WQ_UNBOUND) { 3215 struct device_attribute *attr; 3216 3217 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 3218 ret = device_create_file(&wq_dev->dev, attr); 3219 if (ret) { 3220 device_unregister(&wq_dev->dev); 3221 wq->wq_dev = NULL; 3222 return ret; 3223 } 3224 } 3225 } 3226 3227 dev_set_uevent_suppress(&wq_dev->dev, false); 3228 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 3229 return 0; 3230 } 3231 3232 /** 3233 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 3234 * @wq: the workqueue to unregister 3235 * 3236 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 3237 */ 3238 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 3239 { 3240 struct wq_device *wq_dev = wq->wq_dev; 3241 3242 if (!wq->wq_dev) 3243 return; 3244 3245 wq->wq_dev = NULL; 3246 device_unregister(&wq_dev->dev); 3247 } 3248 #else /* CONFIG_SYSFS */ 3249 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 3250 #endif /* CONFIG_SYSFS */ 3251 3252 /** 3253 * free_workqueue_attrs - free a workqueue_attrs 3254 * @attrs: workqueue_attrs to free 3255 * 3256 * Undo alloc_workqueue_attrs(). 3257 */ 3258 void free_workqueue_attrs(struct workqueue_attrs *attrs) 3259 { 3260 if (attrs) { 3261 free_cpumask_var(attrs->cpumask); 3262 kfree(attrs); 3263 } 3264 } 3265 3266 /** 3267 * alloc_workqueue_attrs - allocate a workqueue_attrs 3268 * @gfp_mask: allocation mask to use 3269 * 3270 * Allocate a new workqueue_attrs, initialize with default settings and 3271 * return it. 3272 * 3273 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3274 */ 3275 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 3276 { 3277 struct workqueue_attrs *attrs; 3278 3279 attrs = kzalloc(sizeof(*attrs), gfp_mask); 3280 if (!attrs) 3281 goto fail; 3282 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) 3283 goto fail; 3284 3285 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3286 return attrs; 3287 fail: 3288 free_workqueue_attrs(attrs); 3289 return NULL; 3290 } 3291 3292 static void copy_workqueue_attrs(struct workqueue_attrs *to, 3293 const struct workqueue_attrs *from) 3294 { 3295 to->nice = from->nice; 3296 cpumask_copy(to->cpumask, from->cpumask); 3297 /* 3298 * Unlike hash and equality test, this function doesn't ignore 3299 * ->no_numa as it is used for both pool and wq attrs. Instead, 3300 * get_unbound_pool() explicitly clears ->no_numa after copying. 3301 */ 3302 to->no_numa = from->no_numa; 3303 } 3304 3305 /* hash value of the content of @attr */ 3306 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 3307 { 3308 u32 hash = 0; 3309 3310 hash = jhash_1word(attrs->nice, hash); 3311 hash = jhash(cpumask_bits(attrs->cpumask), 3312 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3313 return hash; 3314 } 3315 3316 /* content equality test */ 3317 static bool wqattrs_equal(const struct workqueue_attrs *a, 3318 const struct workqueue_attrs *b) 3319 { 3320 if (a->nice != b->nice) 3321 return false; 3322 if (!cpumask_equal(a->cpumask, b->cpumask)) 3323 return false; 3324 return true; 3325 } 3326 3327 /** 3328 * init_worker_pool - initialize a newly zalloc'd worker_pool 3329 * @pool: worker_pool to initialize 3330 * 3331 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. 3332 * 3333 * Return: 0 on success, -errno on failure. Even on failure, all fields 3334 * inside @pool proper are initialized and put_unbound_pool() can be called 3335 * on @pool safely to release it. 3336 */ 3337 static int init_worker_pool(struct worker_pool *pool) 3338 { 3339 spin_lock_init(&pool->lock); 3340 pool->id = -1; 3341 pool->cpu = -1; 3342 pool->node = NUMA_NO_NODE; 3343 pool->flags |= POOL_DISASSOCIATED; 3344 INIT_LIST_HEAD(&pool->worklist); 3345 INIT_LIST_HEAD(&pool->idle_list); 3346 hash_init(pool->busy_hash); 3347 3348 init_timer_deferrable(&pool->idle_timer); 3349 pool->idle_timer.function = idle_worker_timeout; 3350 pool->idle_timer.data = (unsigned long)pool; 3351 3352 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3353 (unsigned long)pool); 3354 3355 mutex_init(&pool->manager_arb); 3356 mutex_init(&pool->attach_mutex); 3357 INIT_LIST_HEAD(&pool->workers); 3358 3359 ida_init(&pool->worker_ida); 3360 INIT_HLIST_NODE(&pool->hash_node); 3361 pool->refcnt = 1; 3362 3363 /* shouldn't fail above this point */ 3364 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 3365 if (!pool->attrs) 3366 return -ENOMEM; 3367 return 0; 3368 } 3369 3370 static void rcu_free_pool(struct rcu_head *rcu) 3371 { 3372 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 3373 3374 ida_destroy(&pool->worker_ida); 3375 free_workqueue_attrs(pool->attrs); 3376 kfree(pool); 3377 } 3378 3379 /** 3380 * put_unbound_pool - put a worker_pool 3381 * @pool: worker_pool to put 3382 * 3383 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU 3384 * safe manner. get_unbound_pool() calls this function on its failure path 3385 * and this function should be able to release pools which went through, 3386 * successfully or not, init_worker_pool(). 3387 * 3388 * Should be called with wq_pool_mutex held. 3389 */ 3390 static void put_unbound_pool(struct worker_pool *pool) 3391 { 3392 DECLARE_COMPLETION_ONSTACK(detach_completion); 3393 struct worker *worker; 3394 3395 lockdep_assert_held(&wq_pool_mutex); 3396 3397 if (--pool->refcnt) 3398 return; 3399 3400 /* sanity checks */ 3401 if (WARN_ON(!(pool->cpu < 0)) || 3402 WARN_ON(!list_empty(&pool->worklist))) 3403 return; 3404 3405 /* release id and unhash */ 3406 if (pool->id >= 0) 3407 idr_remove(&worker_pool_idr, pool->id); 3408 hash_del(&pool->hash_node); 3409 3410 /* 3411 * Become the manager and destroy all workers. Grabbing 3412 * manager_arb prevents @pool's workers from blocking on 3413 * attach_mutex. 3414 */ 3415 mutex_lock(&pool->manager_arb); 3416 3417 spin_lock_irq(&pool->lock); 3418 while ((worker = first_idle_worker(pool))) 3419 destroy_worker(worker); 3420 WARN_ON(pool->nr_workers || pool->nr_idle); 3421 spin_unlock_irq(&pool->lock); 3422 3423 mutex_lock(&pool->attach_mutex); 3424 if (!list_empty(&pool->workers)) 3425 pool->detach_completion = &detach_completion; 3426 mutex_unlock(&pool->attach_mutex); 3427 3428 if (pool->detach_completion) 3429 wait_for_completion(pool->detach_completion); 3430 3431 mutex_unlock(&pool->manager_arb); 3432 3433 /* shut down the timers */ 3434 del_timer_sync(&pool->idle_timer); 3435 del_timer_sync(&pool->mayday_timer); 3436 3437 /* sched-RCU protected to allow dereferences from get_work_pool() */ 3438 call_rcu_sched(&pool->rcu, rcu_free_pool); 3439 } 3440 3441 /** 3442 * get_unbound_pool - get a worker_pool with the specified attributes 3443 * @attrs: the attributes of the worker_pool to get 3444 * 3445 * Obtain a worker_pool which has the same attributes as @attrs, bump the 3446 * reference count and return it. If there already is a matching 3447 * worker_pool, it will be used; otherwise, this function attempts to 3448 * create a new one. 3449 * 3450 * Should be called with wq_pool_mutex held. 3451 * 3452 * Return: On success, a worker_pool with the same attributes as @attrs. 3453 * On failure, %NULL. 3454 */ 3455 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 3456 { 3457 u32 hash = wqattrs_hash(attrs); 3458 struct worker_pool *pool; 3459 int node; 3460 3461 lockdep_assert_held(&wq_pool_mutex); 3462 3463 /* do we already have a matching pool? */ 3464 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 3465 if (wqattrs_equal(pool->attrs, attrs)) { 3466 pool->refcnt++; 3467 return pool; 3468 } 3469 } 3470 3471 /* nope, create a new one */ 3472 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 3473 if (!pool || init_worker_pool(pool) < 0) 3474 goto fail; 3475 3476 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 3477 copy_workqueue_attrs(pool->attrs, attrs); 3478 3479 /* 3480 * no_numa isn't a worker_pool attribute, always clear it. See 3481 * 'struct workqueue_attrs' comments for detail. 3482 */ 3483 pool->attrs->no_numa = false; 3484 3485 /* if cpumask is contained inside a NUMA node, we belong to that node */ 3486 if (wq_numa_enabled) { 3487 for_each_node(node) { 3488 if (cpumask_subset(pool->attrs->cpumask, 3489 wq_numa_possible_cpumask[node])) { 3490 pool->node = node; 3491 break; 3492 } 3493 } 3494 } 3495 3496 if (worker_pool_assign_id(pool) < 0) 3497 goto fail; 3498 3499 /* create and start the initial worker */ 3500 if (!create_worker(pool)) 3501 goto fail; 3502 3503 /* install */ 3504 hash_add(unbound_pool_hash, &pool->hash_node, hash); 3505 3506 return pool; 3507 fail: 3508 if (pool) 3509 put_unbound_pool(pool); 3510 return NULL; 3511 } 3512 3513 static void rcu_free_pwq(struct rcu_head *rcu) 3514 { 3515 kmem_cache_free(pwq_cache, 3516 container_of(rcu, struct pool_workqueue, rcu)); 3517 } 3518 3519 /* 3520 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt 3521 * and needs to be destroyed. 3522 */ 3523 static void pwq_unbound_release_workfn(struct work_struct *work) 3524 { 3525 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 3526 unbound_release_work); 3527 struct workqueue_struct *wq = pwq->wq; 3528 struct worker_pool *pool = pwq->pool; 3529 bool is_last; 3530 3531 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) 3532 return; 3533 3534 mutex_lock(&wq->mutex); 3535 list_del_rcu(&pwq->pwqs_node); 3536 is_last = list_empty(&wq->pwqs); 3537 mutex_unlock(&wq->mutex); 3538 3539 mutex_lock(&wq_pool_mutex); 3540 put_unbound_pool(pool); 3541 mutex_unlock(&wq_pool_mutex); 3542 3543 call_rcu_sched(&pwq->rcu, rcu_free_pwq); 3544 3545 /* 3546 * If we're the last pwq going away, @wq is already dead and no one 3547 * is gonna access it anymore. Free it. 3548 */ 3549 if (is_last) { 3550 free_workqueue_attrs(wq->unbound_attrs); 3551 kfree(wq); 3552 } 3553 } 3554 3555 /** 3556 * pwq_adjust_max_active - update a pwq's max_active to the current setting 3557 * @pwq: target pool_workqueue 3558 * 3559 * If @pwq isn't freezing, set @pwq->max_active to the associated 3560 * workqueue's saved_max_active and activate delayed work items 3561 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. 3562 */ 3563 static void pwq_adjust_max_active(struct pool_workqueue *pwq) 3564 { 3565 struct workqueue_struct *wq = pwq->wq; 3566 bool freezable = wq->flags & WQ_FREEZABLE; 3567 3568 /* for @wq->saved_max_active */ 3569 lockdep_assert_held(&wq->mutex); 3570 3571 /* fast exit for non-freezable wqs */ 3572 if (!freezable && pwq->max_active == wq->saved_max_active) 3573 return; 3574 3575 spin_lock_irq(&pwq->pool->lock); 3576 3577 /* 3578 * During [un]freezing, the caller is responsible for ensuring that 3579 * this function is called at least once after @workqueue_freezing 3580 * is updated and visible. 3581 */ 3582 if (!freezable || !workqueue_freezing) { 3583 pwq->max_active = wq->saved_max_active; 3584 3585 while (!list_empty(&pwq->delayed_works) && 3586 pwq->nr_active < pwq->max_active) 3587 pwq_activate_first_delayed(pwq); 3588 3589 /* 3590 * Need to kick a worker after thawed or an unbound wq's 3591 * max_active is bumped. It's a slow path. Do it always. 3592 */ 3593 wake_up_worker(pwq->pool); 3594 } else { 3595 pwq->max_active = 0; 3596 } 3597 3598 spin_unlock_irq(&pwq->pool->lock); 3599 } 3600 3601 /* initialize newly alloced @pwq which is associated with @wq and @pool */ 3602 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 3603 struct worker_pool *pool) 3604 { 3605 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 3606 3607 memset(pwq, 0, sizeof(*pwq)); 3608 3609 pwq->pool = pool; 3610 pwq->wq = wq; 3611 pwq->flush_color = -1; 3612 pwq->refcnt = 1; 3613 INIT_LIST_HEAD(&pwq->delayed_works); 3614 INIT_LIST_HEAD(&pwq->pwqs_node); 3615 INIT_LIST_HEAD(&pwq->mayday_node); 3616 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 3617 } 3618 3619 /* sync @pwq with the current state of its associated wq and link it */ 3620 static void link_pwq(struct pool_workqueue *pwq) 3621 { 3622 struct workqueue_struct *wq = pwq->wq; 3623 3624 lockdep_assert_held(&wq->mutex); 3625 3626 /* may be called multiple times, ignore if already linked */ 3627 if (!list_empty(&pwq->pwqs_node)) 3628 return; 3629 3630 /* set the matching work_color */ 3631 pwq->work_color = wq->work_color; 3632 3633 /* sync max_active to the current setting */ 3634 pwq_adjust_max_active(pwq); 3635 3636 /* link in @pwq */ 3637 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 3638 } 3639 3640 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 3641 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 3642 const struct workqueue_attrs *attrs) 3643 { 3644 struct worker_pool *pool; 3645 struct pool_workqueue *pwq; 3646 3647 lockdep_assert_held(&wq_pool_mutex); 3648 3649 pool = get_unbound_pool(attrs); 3650 if (!pool) 3651 return NULL; 3652 3653 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 3654 if (!pwq) { 3655 put_unbound_pool(pool); 3656 return NULL; 3657 } 3658 3659 init_pwq(pwq, wq, pool); 3660 return pwq; 3661 } 3662 3663 /* undo alloc_unbound_pwq(), used only in the error path */ 3664 static void free_unbound_pwq(struct pool_workqueue *pwq) 3665 { 3666 lockdep_assert_held(&wq_pool_mutex); 3667 3668 if (pwq) { 3669 put_unbound_pool(pwq->pool); 3670 kmem_cache_free(pwq_cache, pwq); 3671 } 3672 } 3673 3674 /** 3675 * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node 3676 * @attrs: the wq_attrs of interest 3677 * @node: the target NUMA node 3678 * @cpu_going_down: if >= 0, the CPU to consider as offline 3679 * @cpumask: outarg, the resulting cpumask 3680 * 3681 * Calculate the cpumask a workqueue with @attrs should use on @node. If 3682 * @cpu_going_down is >= 0, that cpu is considered offline during 3683 * calculation. The result is stored in @cpumask. 3684 * 3685 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If 3686 * enabled and @node has online CPUs requested by @attrs, the returned 3687 * cpumask is the intersection of the possible CPUs of @node and 3688 * @attrs->cpumask. 3689 * 3690 * The caller is responsible for ensuring that the cpumask of @node stays 3691 * stable. 3692 * 3693 * Return: %true if the resulting @cpumask is different from @attrs->cpumask, 3694 * %false if equal. 3695 */ 3696 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, 3697 int cpu_going_down, cpumask_t *cpumask) 3698 { 3699 if (!wq_numa_enabled || attrs->no_numa) 3700 goto use_dfl; 3701 3702 /* does @node have any online CPUs @attrs wants? */ 3703 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask); 3704 if (cpu_going_down >= 0) 3705 cpumask_clear_cpu(cpu_going_down, cpumask); 3706 3707 if (cpumask_empty(cpumask)) 3708 goto use_dfl; 3709 3710 /* yeap, return possible CPUs in @node that @attrs wants */ 3711 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]); 3712 return !cpumask_equal(cpumask, attrs->cpumask); 3713 3714 use_dfl: 3715 cpumask_copy(cpumask, attrs->cpumask); 3716 return false; 3717 } 3718 3719 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */ 3720 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, 3721 int node, 3722 struct pool_workqueue *pwq) 3723 { 3724 struct pool_workqueue *old_pwq; 3725 3726 lockdep_assert_held(&wq->mutex); 3727 3728 /* link_pwq() can handle duplicate calls */ 3729 link_pwq(pwq); 3730 3731 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); 3732 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); 3733 return old_pwq; 3734 } 3735 3736 /** 3737 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 3738 * @wq: the target workqueue 3739 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 3740 * 3741 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA 3742 * machines, this function maps a separate pwq to each NUMA node with 3743 * possibles CPUs in @attrs->cpumask so that work items are affine to the 3744 * NUMA node it was issued on. Older pwqs are released as in-flight work 3745 * items finish. Note that a work item which repeatedly requeues itself 3746 * back-to-back will stay on its current pwq. 3747 * 3748 * Performs GFP_KERNEL allocations. 3749 * 3750 * Return: 0 on success and -errno on failure. 3751 */ 3752 int apply_workqueue_attrs(struct workqueue_struct *wq, 3753 const struct workqueue_attrs *attrs) 3754 { 3755 struct workqueue_attrs *new_attrs, *tmp_attrs; 3756 struct pool_workqueue **pwq_tbl, *dfl_pwq; 3757 int node, ret; 3758 3759 /* only unbound workqueues can change attributes */ 3760 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 3761 return -EINVAL; 3762 3763 /* creating multiple pwqs breaks ordering guarantee */ 3764 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) 3765 return -EINVAL; 3766 3767 pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL); 3768 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3769 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3770 if (!pwq_tbl || !new_attrs || !tmp_attrs) 3771 goto enomem; 3772 3773 /* make a copy of @attrs and sanitize it */ 3774 copy_workqueue_attrs(new_attrs, attrs); 3775 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 3776 3777 /* 3778 * We may create multiple pwqs with differing cpumasks. Make a 3779 * copy of @new_attrs which will be modified and used to obtain 3780 * pools. 3781 */ 3782 copy_workqueue_attrs(tmp_attrs, new_attrs); 3783 3784 /* 3785 * CPUs should stay stable across pwq creations and installations. 3786 * Pin CPUs, determine the target cpumask for each node and create 3787 * pwqs accordingly. 3788 */ 3789 get_online_cpus(); 3790 3791 mutex_lock(&wq_pool_mutex); 3792 3793 /* 3794 * If something goes wrong during CPU up/down, we'll fall back to 3795 * the default pwq covering whole @attrs->cpumask. Always create 3796 * it even if we don't use it immediately. 3797 */ 3798 dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 3799 if (!dfl_pwq) 3800 goto enomem_pwq; 3801 3802 for_each_node(node) { 3803 if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) { 3804 pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); 3805 if (!pwq_tbl[node]) 3806 goto enomem_pwq; 3807 } else { 3808 dfl_pwq->refcnt++; 3809 pwq_tbl[node] = dfl_pwq; 3810 } 3811 } 3812 3813 mutex_unlock(&wq_pool_mutex); 3814 3815 /* all pwqs have been created successfully, let's install'em */ 3816 mutex_lock(&wq->mutex); 3817 3818 copy_workqueue_attrs(wq->unbound_attrs, new_attrs); 3819 3820 /* save the previous pwq and install the new one */ 3821 for_each_node(node) 3822 pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]); 3823 3824 /* @dfl_pwq might not have been used, ensure it's linked */ 3825 link_pwq(dfl_pwq); 3826 swap(wq->dfl_pwq, dfl_pwq); 3827 3828 mutex_unlock(&wq->mutex); 3829 3830 /* put the old pwqs */ 3831 for_each_node(node) 3832 put_pwq_unlocked(pwq_tbl[node]); 3833 put_pwq_unlocked(dfl_pwq); 3834 3835 put_online_cpus(); 3836 ret = 0; 3837 /* fall through */ 3838 out_free: 3839 free_workqueue_attrs(tmp_attrs); 3840 free_workqueue_attrs(new_attrs); 3841 kfree(pwq_tbl); 3842 return ret; 3843 3844 enomem_pwq: 3845 free_unbound_pwq(dfl_pwq); 3846 for_each_node(node) 3847 if (pwq_tbl && pwq_tbl[node] != dfl_pwq) 3848 free_unbound_pwq(pwq_tbl[node]); 3849 mutex_unlock(&wq_pool_mutex); 3850 put_online_cpus(); 3851 enomem: 3852 ret = -ENOMEM; 3853 goto out_free; 3854 } 3855 3856 /** 3857 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug 3858 * @wq: the target workqueue 3859 * @cpu: the CPU coming up or going down 3860 * @online: whether @cpu is coming up or going down 3861 * 3862 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 3863 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of 3864 * @wq accordingly. 3865 * 3866 * If NUMA affinity can't be adjusted due to memory allocation failure, it 3867 * falls back to @wq->dfl_pwq which may not be optimal but is always 3868 * correct. 3869 * 3870 * Note that when the last allowed CPU of a NUMA node goes offline for a 3871 * workqueue with a cpumask spanning multiple nodes, the workers which were 3872 * already executing the work items for the workqueue will lose their CPU 3873 * affinity and may execute on any CPU. This is similar to how per-cpu 3874 * workqueues behave on CPU_DOWN. If a workqueue user wants strict 3875 * affinity, it's the user's responsibility to flush the work item from 3876 * CPU_DOWN_PREPARE. 3877 */ 3878 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, 3879 bool online) 3880 { 3881 int node = cpu_to_node(cpu); 3882 int cpu_off = online ? -1 : cpu; 3883 struct pool_workqueue *old_pwq = NULL, *pwq; 3884 struct workqueue_attrs *target_attrs; 3885 cpumask_t *cpumask; 3886 3887 lockdep_assert_held(&wq_pool_mutex); 3888 3889 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND)) 3890 return; 3891 3892 /* 3893 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 3894 * Let's use a preallocated one. The following buf is protected by 3895 * CPU hotplug exclusion. 3896 */ 3897 target_attrs = wq_update_unbound_numa_attrs_buf; 3898 cpumask = target_attrs->cpumask; 3899 3900 mutex_lock(&wq->mutex); 3901 if (wq->unbound_attrs->no_numa) 3902 goto out_unlock; 3903 3904 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 3905 pwq = unbound_pwq_by_node(wq, node); 3906 3907 /* 3908 * Let's determine what needs to be done. If the target cpumask is 3909 * different from wq's, we need to compare it to @pwq's and create 3910 * a new one if they don't match. If the target cpumask equals 3911 * wq's, the default pwq should be used. 3912 */ 3913 if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) { 3914 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) 3915 goto out_unlock; 3916 } else { 3917 goto use_dfl_pwq; 3918 } 3919 3920 mutex_unlock(&wq->mutex); 3921 3922 /* create a new pwq */ 3923 pwq = alloc_unbound_pwq(wq, target_attrs); 3924 if (!pwq) { 3925 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", 3926 wq->name); 3927 mutex_lock(&wq->mutex); 3928 goto use_dfl_pwq; 3929 } 3930 3931 /* 3932 * Install the new pwq. As this function is called only from CPU 3933 * hotplug callbacks and applying a new attrs is wrapped with 3934 * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed 3935 * inbetween. 3936 */ 3937 mutex_lock(&wq->mutex); 3938 old_pwq = numa_pwq_tbl_install(wq, node, pwq); 3939 goto out_unlock; 3940 3941 use_dfl_pwq: 3942 spin_lock_irq(&wq->dfl_pwq->pool->lock); 3943 get_pwq(wq->dfl_pwq); 3944 spin_unlock_irq(&wq->dfl_pwq->pool->lock); 3945 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); 3946 out_unlock: 3947 mutex_unlock(&wq->mutex); 3948 put_pwq_unlocked(old_pwq); 3949 } 3950 3951 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 3952 { 3953 bool highpri = wq->flags & WQ_HIGHPRI; 3954 int cpu, ret; 3955 3956 if (!(wq->flags & WQ_UNBOUND)) { 3957 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 3958 if (!wq->cpu_pwqs) 3959 return -ENOMEM; 3960 3961 for_each_possible_cpu(cpu) { 3962 struct pool_workqueue *pwq = 3963 per_cpu_ptr(wq->cpu_pwqs, cpu); 3964 struct worker_pool *cpu_pools = 3965 per_cpu(cpu_worker_pools, cpu); 3966 3967 init_pwq(pwq, wq, &cpu_pools[highpri]); 3968 3969 mutex_lock(&wq->mutex); 3970 link_pwq(pwq); 3971 mutex_unlock(&wq->mutex); 3972 } 3973 return 0; 3974 } else if (wq->flags & __WQ_ORDERED) { 3975 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 3976 /* there should only be single pwq for ordering guarantee */ 3977 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 3978 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 3979 "ordering guarantee broken for workqueue %s\n", wq->name); 3980 return ret; 3981 } else { 3982 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 3983 } 3984 } 3985 3986 static int wq_clamp_max_active(int max_active, unsigned int flags, 3987 const char *name) 3988 { 3989 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 3990 3991 if (max_active < 1 || max_active > lim) 3992 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 3993 max_active, name, 1, lim); 3994 3995 return clamp_val(max_active, 1, lim); 3996 } 3997 3998 struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 3999 unsigned int flags, 4000 int max_active, 4001 struct lock_class_key *key, 4002 const char *lock_name, ...) 4003 { 4004 size_t tbl_size = 0; 4005 va_list args; 4006 struct workqueue_struct *wq; 4007 struct pool_workqueue *pwq; 4008 4009 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4010 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4011 flags |= WQ_UNBOUND; 4012 4013 /* allocate wq and format name */ 4014 if (flags & WQ_UNBOUND) 4015 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); 4016 4017 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); 4018 if (!wq) 4019 return NULL; 4020 4021 if (flags & WQ_UNBOUND) { 4022 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); 4023 if (!wq->unbound_attrs) 4024 goto err_free_wq; 4025 } 4026 4027 va_start(args, lock_name); 4028 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4029 va_end(args); 4030 4031 max_active = max_active ?: WQ_DFL_ACTIVE; 4032 max_active = wq_clamp_max_active(max_active, flags, wq->name); 4033 4034 /* init wq */ 4035 wq->flags = flags; 4036 wq->saved_max_active = max_active; 4037 mutex_init(&wq->mutex); 4038 atomic_set(&wq->nr_pwqs_to_flush, 0); 4039 INIT_LIST_HEAD(&wq->pwqs); 4040 INIT_LIST_HEAD(&wq->flusher_queue); 4041 INIT_LIST_HEAD(&wq->flusher_overflow); 4042 INIT_LIST_HEAD(&wq->maydays); 4043 4044 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 4045 INIT_LIST_HEAD(&wq->list); 4046 4047 if (alloc_and_link_pwqs(wq) < 0) 4048 goto err_free_wq; 4049 4050 /* 4051 * Workqueues which may be used during memory reclaim should 4052 * have a rescuer to guarantee forward progress. 4053 */ 4054 if (flags & WQ_MEM_RECLAIM) { 4055 struct worker *rescuer; 4056 4057 rescuer = alloc_worker(NUMA_NO_NODE); 4058 if (!rescuer) 4059 goto err_destroy; 4060 4061 rescuer->rescue_wq = wq; 4062 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 4063 wq->name); 4064 if (IS_ERR(rescuer->task)) { 4065 kfree(rescuer); 4066 goto err_destroy; 4067 } 4068 4069 wq->rescuer = rescuer; 4070 rescuer->task->flags |= PF_NO_SETAFFINITY; 4071 wake_up_process(rescuer->task); 4072 } 4073 4074 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4075 goto err_destroy; 4076 4077 /* 4078 * wq_pool_mutex protects global freeze state and workqueues list. 4079 * Grab it, adjust max_active and add the new @wq to workqueues 4080 * list. 4081 */ 4082 mutex_lock(&wq_pool_mutex); 4083 4084 mutex_lock(&wq->mutex); 4085 for_each_pwq(pwq, wq) 4086 pwq_adjust_max_active(pwq); 4087 mutex_unlock(&wq->mutex); 4088 4089 list_add(&wq->list, &workqueues); 4090 4091 mutex_unlock(&wq_pool_mutex); 4092 4093 return wq; 4094 4095 err_free_wq: 4096 free_workqueue_attrs(wq->unbound_attrs); 4097 kfree(wq); 4098 return NULL; 4099 err_destroy: 4100 destroy_workqueue(wq); 4101 return NULL; 4102 } 4103 EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 4104 4105 /** 4106 * destroy_workqueue - safely terminate a workqueue 4107 * @wq: target workqueue 4108 * 4109 * Safely destroy a workqueue. All work currently pending will be done first. 4110 */ 4111 void destroy_workqueue(struct workqueue_struct *wq) 4112 { 4113 struct pool_workqueue *pwq; 4114 int node; 4115 4116 /* drain it before proceeding with destruction */ 4117 drain_workqueue(wq); 4118 4119 /* sanity checks */ 4120 mutex_lock(&wq->mutex); 4121 for_each_pwq(pwq, wq) { 4122 int i; 4123 4124 for (i = 0; i < WORK_NR_COLORS; i++) { 4125 if (WARN_ON(pwq->nr_in_flight[i])) { 4126 mutex_unlock(&wq->mutex); 4127 return; 4128 } 4129 } 4130 4131 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || 4132 WARN_ON(pwq->nr_active) || 4133 WARN_ON(!list_empty(&pwq->delayed_works))) { 4134 mutex_unlock(&wq->mutex); 4135 return; 4136 } 4137 } 4138 mutex_unlock(&wq->mutex); 4139 4140 /* 4141 * wq list is used to freeze wq, remove from list after 4142 * flushing is complete in case freeze races us. 4143 */ 4144 mutex_lock(&wq_pool_mutex); 4145 list_del_init(&wq->list); 4146 mutex_unlock(&wq_pool_mutex); 4147 4148 workqueue_sysfs_unregister(wq); 4149 4150 if (wq->rescuer) { 4151 kthread_stop(wq->rescuer->task); 4152 kfree(wq->rescuer); 4153 wq->rescuer = NULL; 4154 } 4155 4156 if (!(wq->flags & WQ_UNBOUND)) { 4157 /* 4158 * The base ref is never dropped on per-cpu pwqs. Directly 4159 * free the pwqs and wq. 4160 */ 4161 free_percpu(wq->cpu_pwqs); 4162 kfree(wq); 4163 } else { 4164 /* 4165 * We're the sole accessor of @wq at this point. Directly 4166 * access numa_pwq_tbl[] and dfl_pwq to put the base refs. 4167 * @wq will be freed when the last pwq is released. 4168 */ 4169 for_each_node(node) { 4170 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); 4171 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); 4172 put_pwq_unlocked(pwq); 4173 } 4174 4175 /* 4176 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is 4177 * put. Don't access it afterwards. 4178 */ 4179 pwq = wq->dfl_pwq; 4180 wq->dfl_pwq = NULL; 4181 put_pwq_unlocked(pwq); 4182 } 4183 } 4184 EXPORT_SYMBOL_GPL(destroy_workqueue); 4185 4186 /** 4187 * workqueue_set_max_active - adjust max_active of a workqueue 4188 * @wq: target workqueue 4189 * @max_active: new max_active value. 4190 * 4191 * Set max_active of @wq to @max_active. 4192 * 4193 * CONTEXT: 4194 * Don't call from IRQ context. 4195 */ 4196 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4197 { 4198 struct pool_workqueue *pwq; 4199 4200 /* disallow meddling with max_active for ordered workqueues */ 4201 if (WARN_ON(wq->flags & __WQ_ORDERED)) 4202 return; 4203 4204 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4205 4206 mutex_lock(&wq->mutex); 4207 4208 wq->saved_max_active = max_active; 4209 4210 for_each_pwq(pwq, wq) 4211 pwq_adjust_max_active(pwq); 4212 4213 mutex_unlock(&wq->mutex); 4214 } 4215 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4216 4217 /** 4218 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4219 * 4220 * Determine whether %current is a workqueue rescuer. Can be used from 4221 * work functions to determine whether it's being run off the rescuer task. 4222 * 4223 * Return: %true if %current is a workqueue rescuer. %false otherwise. 4224 */ 4225 bool current_is_workqueue_rescuer(void) 4226 { 4227 struct worker *worker = current_wq_worker(); 4228 4229 return worker && worker->rescue_wq; 4230 } 4231 4232 /** 4233 * workqueue_congested - test whether a workqueue is congested 4234 * @cpu: CPU in question 4235 * @wq: target workqueue 4236 * 4237 * Test whether @wq's cpu workqueue for @cpu is congested. There is 4238 * no synchronization around this function and the test result is 4239 * unreliable and only useful as advisory hints or for debugging. 4240 * 4241 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 4242 * Note that both per-cpu and unbound workqueues may be associated with 4243 * multiple pool_workqueues which have separate congested states. A 4244 * workqueue being congested on one CPU doesn't mean the workqueue is also 4245 * contested on other CPUs / NUMA nodes. 4246 * 4247 * Return: 4248 * %true if congested, %false otherwise. 4249 */ 4250 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4251 { 4252 struct pool_workqueue *pwq; 4253 bool ret; 4254 4255 rcu_read_lock_sched(); 4256 4257 if (cpu == WORK_CPU_UNBOUND) 4258 cpu = smp_processor_id(); 4259 4260 if (!(wq->flags & WQ_UNBOUND)) 4261 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 4262 else 4263 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 4264 4265 ret = !list_empty(&pwq->delayed_works); 4266 rcu_read_unlock_sched(); 4267 4268 return ret; 4269 } 4270 EXPORT_SYMBOL_GPL(workqueue_congested); 4271 4272 /** 4273 * work_busy - test whether a work is currently pending or running 4274 * @work: the work to be tested 4275 * 4276 * Test whether @work is currently pending or running. There is no 4277 * synchronization around this function and the test result is 4278 * unreliable and only useful as advisory hints or for debugging. 4279 * 4280 * Return: 4281 * OR'd bitmask of WORK_BUSY_* bits. 4282 */ 4283 unsigned int work_busy(struct work_struct *work) 4284 { 4285 struct worker_pool *pool; 4286 unsigned long flags; 4287 unsigned int ret = 0; 4288 4289 if (work_pending(work)) 4290 ret |= WORK_BUSY_PENDING; 4291 4292 local_irq_save(flags); 4293 pool = get_work_pool(work); 4294 if (pool) { 4295 spin_lock(&pool->lock); 4296 if (find_worker_executing_work(pool, work)) 4297 ret |= WORK_BUSY_RUNNING; 4298 spin_unlock(&pool->lock); 4299 } 4300 local_irq_restore(flags); 4301 4302 return ret; 4303 } 4304 EXPORT_SYMBOL_GPL(work_busy); 4305 4306 /** 4307 * set_worker_desc - set description for the current work item 4308 * @fmt: printf-style format string 4309 * @...: arguments for the format string 4310 * 4311 * This function can be called by a running work function to describe what 4312 * the work item is about. If the worker task gets dumped, this 4313 * information will be printed out together to help debugging. The 4314 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 4315 */ 4316 void set_worker_desc(const char *fmt, ...) 4317 { 4318 struct worker *worker = current_wq_worker(); 4319 va_list args; 4320 4321 if (worker) { 4322 va_start(args, fmt); 4323 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 4324 va_end(args); 4325 worker->desc_valid = true; 4326 } 4327 } 4328 4329 /** 4330 * print_worker_info - print out worker information and description 4331 * @log_lvl: the log level to use when printing 4332 * @task: target task 4333 * 4334 * If @task is a worker and currently executing a work item, print out the 4335 * name of the workqueue being serviced and worker description set with 4336 * set_worker_desc() by the currently executing work item. 4337 * 4338 * This function can be safely called on any task as long as the 4339 * task_struct itself is accessible. While safe, this function isn't 4340 * synchronized and may print out mixups or garbages of limited length. 4341 */ 4342 void print_worker_info(const char *log_lvl, struct task_struct *task) 4343 { 4344 work_func_t *fn = NULL; 4345 char name[WQ_NAME_LEN] = { }; 4346 char desc[WORKER_DESC_LEN] = { }; 4347 struct pool_workqueue *pwq = NULL; 4348 struct workqueue_struct *wq = NULL; 4349 bool desc_valid = false; 4350 struct worker *worker; 4351 4352 if (!(task->flags & PF_WQ_WORKER)) 4353 return; 4354 4355 /* 4356 * This function is called without any synchronization and @task 4357 * could be in any state. Be careful with dereferences. 4358 */ 4359 worker = probe_kthread_data(task); 4360 4361 /* 4362 * Carefully copy the associated workqueue's workfn and name. Keep 4363 * the original last '\0' in case the original contains garbage. 4364 */ 4365 probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); 4366 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); 4367 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); 4368 probe_kernel_read(name, wq->name, sizeof(name) - 1); 4369 4370 /* copy worker description */ 4371 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); 4372 if (desc_valid) 4373 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); 4374 4375 if (fn || name[0] || desc[0]) { 4376 printk("%sWorkqueue: %s %pf", log_lvl, name, fn); 4377 if (desc[0]) 4378 pr_cont(" (%s)", desc); 4379 pr_cont("\n"); 4380 } 4381 } 4382 4383 /* 4384 * CPU hotplug. 4385 * 4386 * There are two challenges in supporting CPU hotplug. Firstly, there 4387 * are a lot of assumptions on strong associations among work, pwq and 4388 * pool which make migrating pending and scheduled works very 4389 * difficult to implement without impacting hot paths. Secondly, 4390 * worker pools serve mix of short, long and very long running works making 4391 * blocked draining impractical. 4392 * 4393 * This is solved by allowing the pools to be disassociated from the CPU 4394 * running as an unbound one and allowing it to be reattached later if the 4395 * cpu comes back online. 4396 */ 4397 4398 static void wq_unbind_fn(struct work_struct *work) 4399 { 4400 int cpu = smp_processor_id(); 4401 struct worker_pool *pool; 4402 struct worker *worker; 4403 4404 for_each_cpu_worker_pool(pool, cpu) { 4405 mutex_lock(&pool->attach_mutex); 4406 spin_lock_irq(&pool->lock); 4407 4408 /* 4409 * We've blocked all attach/detach operations. Make all workers 4410 * unbound and set DISASSOCIATED. Before this, all workers 4411 * except for the ones which are still executing works from 4412 * before the last CPU down must be on the cpu. After 4413 * this, they may become diasporas. 4414 */ 4415 for_each_pool_worker(worker, pool) 4416 worker->flags |= WORKER_UNBOUND; 4417 4418 pool->flags |= POOL_DISASSOCIATED; 4419 4420 spin_unlock_irq(&pool->lock); 4421 mutex_unlock(&pool->attach_mutex); 4422 4423 /* 4424 * Call schedule() so that we cross rq->lock and thus can 4425 * guarantee sched callbacks see the %WORKER_UNBOUND flag. 4426 * This is necessary as scheduler callbacks may be invoked 4427 * from other cpus. 4428 */ 4429 schedule(); 4430 4431 /* 4432 * Sched callbacks are disabled now. Zap nr_running. 4433 * After this, nr_running stays zero and need_more_worker() 4434 * and keep_working() are always true as long as the 4435 * worklist is not empty. This pool now behaves as an 4436 * unbound (in terms of concurrency management) pool which 4437 * are served by workers tied to the pool. 4438 */ 4439 atomic_set(&pool->nr_running, 0); 4440 4441 /* 4442 * With concurrency management just turned off, a busy 4443 * worker blocking could lead to lengthy stalls. Kick off 4444 * unbound chain execution of currently pending work items. 4445 */ 4446 spin_lock_irq(&pool->lock); 4447 wake_up_worker(pool); 4448 spin_unlock_irq(&pool->lock); 4449 } 4450 } 4451 4452 /** 4453 * rebind_workers - rebind all workers of a pool to the associated CPU 4454 * @pool: pool of interest 4455 * 4456 * @pool->cpu is coming online. Rebind all workers to the CPU. 4457 */ 4458 static void rebind_workers(struct worker_pool *pool) 4459 { 4460 struct worker *worker; 4461 4462 lockdep_assert_held(&pool->attach_mutex); 4463 4464 /* 4465 * Restore CPU affinity of all workers. As all idle workers should 4466 * be on the run-queue of the associated CPU before any local 4467 * wake-ups for concurrency management happen, restore CPU affinty 4468 * of all workers first and then clear UNBOUND. As we're called 4469 * from CPU_ONLINE, the following shouldn't fail. 4470 */ 4471 for_each_pool_worker(worker, pool) 4472 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 4473 pool->attrs->cpumask) < 0); 4474 4475 spin_lock_irq(&pool->lock); 4476 pool->flags &= ~POOL_DISASSOCIATED; 4477 4478 for_each_pool_worker(worker, pool) { 4479 unsigned int worker_flags = worker->flags; 4480 4481 /* 4482 * A bound idle worker should actually be on the runqueue 4483 * of the associated CPU for local wake-ups targeting it to 4484 * work. Kick all idle workers so that they migrate to the 4485 * associated CPU. Doing this in the same loop as 4486 * replacing UNBOUND with REBOUND is safe as no worker will 4487 * be bound before @pool->lock is released. 4488 */ 4489 if (worker_flags & WORKER_IDLE) 4490 wake_up_process(worker->task); 4491 4492 /* 4493 * We want to clear UNBOUND but can't directly call 4494 * worker_clr_flags() or adjust nr_running. Atomically 4495 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 4496 * @worker will clear REBOUND using worker_clr_flags() when 4497 * it initiates the next execution cycle thus restoring 4498 * concurrency management. Note that when or whether 4499 * @worker clears REBOUND doesn't affect correctness. 4500 * 4501 * ACCESS_ONCE() is necessary because @worker->flags may be 4502 * tested without holding any lock in 4503 * wq_worker_waking_up(). Without it, NOT_RUNNING test may 4504 * fail incorrectly leading to premature concurrency 4505 * management operations. 4506 */ 4507 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 4508 worker_flags |= WORKER_REBOUND; 4509 worker_flags &= ~WORKER_UNBOUND; 4510 ACCESS_ONCE(worker->flags) = worker_flags; 4511 } 4512 4513 spin_unlock_irq(&pool->lock); 4514 } 4515 4516 /** 4517 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 4518 * @pool: unbound pool of interest 4519 * @cpu: the CPU which is coming up 4520 * 4521 * An unbound pool may end up with a cpumask which doesn't have any online 4522 * CPUs. When a worker of such pool get scheduled, the scheduler resets 4523 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 4524 * online CPU before, cpus_allowed of all its workers should be restored. 4525 */ 4526 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 4527 { 4528 static cpumask_t cpumask; 4529 struct worker *worker; 4530 4531 lockdep_assert_held(&pool->attach_mutex); 4532 4533 /* is @cpu allowed for @pool? */ 4534 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 4535 return; 4536 4537 /* is @cpu the only online CPU? */ 4538 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 4539 if (cpumask_weight(&cpumask) != 1) 4540 return; 4541 4542 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 4543 for_each_pool_worker(worker, pool) 4544 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 4545 pool->attrs->cpumask) < 0); 4546 } 4547 4548 /* 4549 * Workqueues should be brought up before normal priority CPU notifiers. 4550 * This will be registered high priority CPU notifier. 4551 */ 4552 static int workqueue_cpu_up_callback(struct notifier_block *nfb, 4553 unsigned long action, 4554 void *hcpu) 4555 { 4556 int cpu = (unsigned long)hcpu; 4557 struct worker_pool *pool; 4558 struct workqueue_struct *wq; 4559 int pi; 4560 4561 switch (action & ~CPU_TASKS_FROZEN) { 4562 case CPU_UP_PREPARE: 4563 for_each_cpu_worker_pool(pool, cpu) { 4564 if (pool->nr_workers) 4565 continue; 4566 if (!create_worker(pool)) 4567 return NOTIFY_BAD; 4568 } 4569 break; 4570 4571 case CPU_DOWN_FAILED: 4572 case CPU_ONLINE: 4573 mutex_lock(&wq_pool_mutex); 4574 4575 for_each_pool(pool, pi) { 4576 mutex_lock(&pool->attach_mutex); 4577 4578 if (pool->cpu == cpu) 4579 rebind_workers(pool); 4580 else if (pool->cpu < 0) 4581 restore_unbound_workers_cpumask(pool, cpu); 4582 4583 mutex_unlock(&pool->attach_mutex); 4584 } 4585 4586 /* update NUMA affinity of unbound workqueues */ 4587 list_for_each_entry(wq, &workqueues, list) 4588 wq_update_unbound_numa(wq, cpu, true); 4589 4590 mutex_unlock(&wq_pool_mutex); 4591 break; 4592 } 4593 return NOTIFY_OK; 4594 } 4595 4596 /* 4597 * Workqueues should be brought down after normal priority CPU notifiers. 4598 * This will be registered as low priority CPU notifier. 4599 */ 4600 static int workqueue_cpu_down_callback(struct notifier_block *nfb, 4601 unsigned long action, 4602 void *hcpu) 4603 { 4604 int cpu = (unsigned long)hcpu; 4605 struct work_struct unbind_work; 4606 struct workqueue_struct *wq; 4607 4608 switch (action & ~CPU_TASKS_FROZEN) { 4609 case CPU_DOWN_PREPARE: 4610 /* unbinding per-cpu workers should happen on the local CPU */ 4611 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4612 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4613 4614 /* update NUMA affinity of unbound workqueues */ 4615 mutex_lock(&wq_pool_mutex); 4616 list_for_each_entry(wq, &workqueues, list) 4617 wq_update_unbound_numa(wq, cpu, false); 4618 mutex_unlock(&wq_pool_mutex); 4619 4620 /* wait for per-cpu unbinding to finish */ 4621 flush_work(&unbind_work); 4622 destroy_work_on_stack(&unbind_work); 4623 break; 4624 } 4625 return NOTIFY_OK; 4626 } 4627 4628 #ifdef CONFIG_SMP 4629 4630 struct work_for_cpu { 4631 struct work_struct work; 4632 long (*fn)(void *); 4633 void *arg; 4634 long ret; 4635 }; 4636 4637 static void work_for_cpu_fn(struct work_struct *work) 4638 { 4639 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 4640 4641 wfc->ret = wfc->fn(wfc->arg); 4642 } 4643 4644 /** 4645 * work_on_cpu - run a function in user context on a particular cpu 4646 * @cpu: the cpu to run on 4647 * @fn: the function to run 4648 * @arg: the function arg 4649 * 4650 * It is up to the caller to ensure that the cpu doesn't go offline. 4651 * The caller must not hold any locks which would prevent @fn from completing. 4652 * 4653 * Return: The value @fn returns. 4654 */ 4655 long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 4656 { 4657 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 4658 4659 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4660 schedule_work_on(cpu, &wfc.work); 4661 flush_work(&wfc.work); 4662 destroy_work_on_stack(&wfc.work); 4663 return wfc.ret; 4664 } 4665 EXPORT_SYMBOL_GPL(work_on_cpu); 4666 #endif /* CONFIG_SMP */ 4667 4668 #ifdef CONFIG_FREEZER 4669 4670 /** 4671 * freeze_workqueues_begin - begin freezing workqueues 4672 * 4673 * Start freezing workqueues. After this function returns, all freezable 4674 * workqueues will queue new works to their delayed_works list instead of 4675 * pool->worklist. 4676 * 4677 * CONTEXT: 4678 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 4679 */ 4680 void freeze_workqueues_begin(void) 4681 { 4682 struct workqueue_struct *wq; 4683 struct pool_workqueue *pwq; 4684 4685 mutex_lock(&wq_pool_mutex); 4686 4687 WARN_ON_ONCE(workqueue_freezing); 4688 workqueue_freezing = true; 4689 4690 list_for_each_entry(wq, &workqueues, list) { 4691 mutex_lock(&wq->mutex); 4692 for_each_pwq(pwq, wq) 4693 pwq_adjust_max_active(pwq); 4694 mutex_unlock(&wq->mutex); 4695 } 4696 4697 mutex_unlock(&wq_pool_mutex); 4698 } 4699 4700 /** 4701 * freeze_workqueues_busy - are freezable workqueues still busy? 4702 * 4703 * Check whether freezing is complete. This function must be called 4704 * between freeze_workqueues_begin() and thaw_workqueues(). 4705 * 4706 * CONTEXT: 4707 * Grabs and releases wq_pool_mutex. 4708 * 4709 * Return: 4710 * %true if some freezable workqueues are still busy. %false if freezing 4711 * is complete. 4712 */ 4713 bool freeze_workqueues_busy(void) 4714 { 4715 bool busy = false; 4716 struct workqueue_struct *wq; 4717 struct pool_workqueue *pwq; 4718 4719 mutex_lock(&wq_pool_mutex); 4720 4721 WARN_ON_ONCE(!workqueue_freezing); 4722 4723 list_for_each_entry(wq, &workqueues, list) { 4724 if (!(wq->flags & WQ_FREEZABLE)) 4725 continue; 4726 /* 4727 * nr_active is monotonically decreasing. It's safe 4728 * to peek without lock. 4729 */ 4730 rcu_read_lock_sched(); 4731 for_each_pwq(pwq, wq) { 4732 WARN_ON_ONCE(pwq->nr_active < 0); 4733 if (pwq->nr_active) { 4734 busy = true; 4735 rcu_read_unlock_sched(); 4736 goto out_unlock; 4737 } 4738 } 4739 rcu_read_unlock_sched(); 4740 } 4741 out_unlock: 4742 mutex_unlock(&wq_pool_mutex); 4743 return busy; 4744 } 4745 4746 /** 4747 * thaw_workqueues - thaw workqueues 4748 * 4749 * Thaw workqueues. Normal queueing is restored and all collected 4750 * frozen works are transferred to their respective pool worklists. 4751 * 4752 * CONTEXT: 4753 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 4754 */ 4755 void thaw_workqueues(void) 4756 { 4757 struct workqueue_struct *wq; 4758 struct pool_workqueue *pwq; 4759 4760 mutex_lock(&wq_pool_mutex); 4761 4762 if (!workqueue_freezing) 4763 goto out_unlock; 4764 4765 workqueue_freezing = false; 4766 4767 /* restore max_active and repopulate worklist */ 4768 list_for_each_entry(wq, &workqueues, list) { 4769 mutex_lock(&wq->mutex); 4770 for_each_pwq(pwq, wq) 4771 pwq_adjust_max_active(pwq); 4772 mutex_unlock(&wq->mutex); 4773 } 4774 4775 out_unlock: 4776 mutex_unlock(&wq_pool_mutex); 4777 } 4778 #endif /* CONFIG_FREEZER */ 4779 4780 static void __init wq_numa_init(void) 4781 { 4782 cpumask_var_t *tbl; 4783 int node, cpu; 4784 4785 if (num_possible_nodes() <= 1) 4786 return; 4787 4788 if (wq_disable_numa) { 4789 pr_info("workqueue: NUMA affinity support disabled\n"); 4790 return; 4791 } 4792 4793 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); 4794 BUG_ON(!wq_update_unbound_numa_attrs_buf); 4795 4796 /* 4797 * We want masks of possible CPUs of each node which isn't readily 4798 * available. Build one from cpu_to_node() which should have been 4799 * fully initialized by now. 4800 */ 4801 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL); 4802 BUG_ON(!tbl); 4803 4804 for_each_node(node) 4805 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, 4806 node_online(node) ? node : NUMA_NO_NODE)); 4807 4808 for_each_possible_cpu(cpu) { 4809 node = cpu_to_node(cpu); 4810 if (WARN_ON(node == NUMA_NO_NODE)) { 4811 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu); 4812 /* happens iff arch is bonkers, let's just proceed */ 4813 return; 4814 } 4815 cpumask_set_cpu(cpu, tbl[node]); 4816 } 4817 4818 wq_numa_possible_cpumask = tbl; 4819 wq_numa_enabled = true; 4820 } 4821 4822 static int __init init_workqueues(void) 4823 { 4824 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 4825 int i, cpu; 4826 4827 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 4828 4829 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 4830 4831 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 4832 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 4833 4834 wq_numa_init(); 4835 4836 /* initialize CPU pools */ 4837 for_each_possible_cpu(cpu) { 4838 struct worker_pool *pool; 4839 4840 i = 0; 4841 for_each_cpu_worker_pool(pool, cpu) { 4842 BUG_ON(init_worker_pool(pool)); 4843 pool->cpu = cpu; 4844 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 4845 pool->attrs->nice = std_nice[i++]; 4846 pool->node = cpu_to_node(cpu); 4847 4848 /* alloc pool ID */ 4849 mutex_lock(&wq_pool_mutex); 4850 BUG_ON(worker_pool_assign_id(pool)); 4851 mutex_unlock(&wq_pool_mutex); 4852 } 4853 } 4854 4855 /* create the initial worker */ 4856 for_each_online_cpu(cpu) { 4857 struct worker_pool *pool; 4858 4859 for_each_cpu_worker_pool(pool, cpu) { 4860 pool->flags &= ~POOL_DISASSOCIATED; 4861 BUG_ON(!create_worker(pool)); 4862 } 4863 } 4864 4865 /* create default unbound and ordered wq attrs */ 4866 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 4867 struct workqueue_attrs *attrs; 4868 4869 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 4870 attrs->nice = std_nice[i]; 4871 unbound_std_wq_attrs[i] = attrs; 4872 4873 /* 4874 * An ordered wq should have only one pwq as ordering is 4875 * guaranteed by max_active which is enforced by pwqs. 4876 * Turn off NUMA so that dfl_pwq is used for all nodes. 4877 */ 4878 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 4879 attrs->nice = std_nice[i]; 4880 attrs->no_numa = true; 4881 ordered_wq_attrs[i] = attrs; 4882 } 4883 4884 system_wq = alloc_workqueue("events", 0, 0); 4885 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 4886 system_long_wq = alloc_workqueue("events_long", 0, 0); 4887 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 4888 WQ_UNBOUND_MAX_ACTIVE); 4889 system_freezable_wq = alloc_workqueue("events_freezable", 4890 WQ_FREEZABLE, 0); 4891 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 4892 WQ_POWER_EFFICIENT, 0); 4893 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 4894 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 4895 0); 4896 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 4897 !system_unbound_wq || !system_freezable_wq || 4898 !system_power_efficient_wq || 4899 !system_freezable_power_efficient_wq); 4900 return 0; 4901 } 4902 early_initcall(init_workqueues); 4903