1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 5 * Copyright (C) 2002 Ingo Molnar 6 * 7 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <dwmw2@infradead.org> 9 * Andrew Morton 10 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 11 * Theodore Ts'o <tytso@mit.edu> 12 * 13 * Made to use alloc_percpu by Christoph Lameter. 14 * 15 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17 * 18 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worker pools for each CPU (one for 21 * normal work items and the other for high priority ones) and some extra 22 * pools for workqueues which are not bound to any specific CPU - the 23 * number of these backing pools is dynamic. 24 * 25 * Please read Documentation/core-api/workqueue.rst for details. 26 */ 27 28 #include <linux/export.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/init.h> 32 #include <linux/signal.h> 33 #include <linux/completion.h> 34 #include <linux/workqueue.h> 35 #include <linux/slab.h> 36 #include <linux/cpu.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/hardirq.h> 40 #include <linux/mempolicy.h> 41 #include <linux/freezer.h> 42 #include <linux/debug_locks.h> 43 #include <linux/lockdep.h> 44 #include <linux/idr.h> 45 #include <linux/jhash.h> 46 #include <linux/hashtable.h> 47 #include <linux/rculist.h> 48 #include <linux/nodemask.h> 49 #include <linux/moduleparam.h> 50 #include <linux/uaccess.h> 51 #include <linux/sched/isolation.h> 52 #include <linux/sched/debug.h> 53 #include <linux/nmi.h> 54 #include <linux/kvm_para.h> 55 #include <linux/delay.h> 56 57 #include "workqueue_internal.h" 58 59 enum { 60 /* 61 * worker_pool flags 62 * 63 * A bound pool is either associated or disassociated with its CPU. 64 * While associated (!DISASSOCIATED), all workers are bound to the 65 * CPU and none has %WORKER_UNBOUND set and concurrency management 66 * is in effect. 67 * 68 * While DISASSOCIATED, the cpu may be offline and all workers have 69 * %WORKER_UNBOUND set and concurrency management disabled, and may 70 * be executing on any CPU. The pool behaves as an unbound one. 71 * 72 * Note that DISASSOCIATED should be flipped only while holding 73 * wq_pool_attach_mutex to avoid changing binding state while 74 * worker_attach_to_pool() is in progress. 75 */ 76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 78 79 /* worker flags */ 80 WORKER_DIE = 1 << 1, /* die die die */ 81 WORKER_IDLE = 1 << 2, /* is idle */ 82 WORKER_PREP = 1 << 3, /* preparing to run works */ 83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 85 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 86 87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 88 WORKER_UNBOUND | WORKER_REBOUND, 89 90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 91 92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 94 95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 97 98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 99 /* call for help after 10ms 100 (min two ticks) */ 101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 102 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 103 104 /* 105 * Rescue workers are used only on emergencies and shared by 106 * all cpus. Give MIN_NICE. 107 */ 108 RESCUER_NICE_LEVEL = MIN_NICE, 109 HIGHPRI_NICE_LEVEL = MIN_NICE, 110 111 WQ_NAME_LEN = 24, 112 }; 113 114 /* 115 * Structure fields follow one of the following exclusion rules. 116 * 117 * I: Modifiable by initialization/destruction paths and read-only for 118 * everyone else. 119 * 120 * P: Preemption protected. Disabling preemption is enough and should 121 * only be modified and accessed from the local cpu. 122 * 123 * L: pool->lock protected. Access with pool->lock held. 124 * 125 * K: Only modified by worker while holding pool->lock. Can be safely read by 126 * self, while holding pool->lock or from IRQ context if %current is the 127 * kworker. 128 * 129 * S: Only modified by worker self. 130 * 131 * A: wq_pool_attach_mutex protected. 132 * 133 * PL: wq_pool_mutex protected. 134 * 135 * PR: wq_pool_mutex protected for writes. RCU protected for reads. 136 * 137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 138 * 139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 140 * RCU for reads. 141 * 142 * WQ: wq->mutex protected. 143 * 144 * WR: wq->mutex protected for writes. RCU protected for reads. 145 * 146 * MD: wq_mayday_lock protected. 147 * 148 * WD: Used internally by the watchdog. 149 */ 150 151 /* struct worker is defined in workqueue_internal.h */ 152 153 struct worker_pool { 154 raw_spinlock_t lock; /* the pool lock */ 155 int cpu; /* I: the associated cpu */ 156 int node; /* I: the associated node ID */ 157 int id; /* I: pool ID */ 158 unsigned int flags; /* L: flags */ 159 160 unsigned long watchdog_ts; /* L: watchdog timestamp */ 161 bool cpu_stall; /* WD: stalled cpu bound pool */ 162 163 /* 164 * The counter is incremented in a process context on the associated CPU 165 * w/ preemption disabled, and decremented or reset in the same context 166 * but w/ pool->lock held. The readers grab pool->lock and are 167 * guaranteed to see if the counter reached zero. 168 */ 169 int nr_running; 170 171 struct list_head worklist; /* L: list of pending works */ 172 173 int nr_workers; /* L: total number of workers */ 174 int nr_idle; /* L: currently idle workers */ 175 176 struct list_head idle_list; /* L: list of idle workers */ 177 struct timer_list idle_timer; /* L: worker idle timeout */ 178 struct work_struct idle_cull_work; /* L: worker idle cleanup */ 179 180 struct timer_list mayday_timer; /* L: SOS timer for workers */ 181 182 /* a workers is either on busy_hash or idle_list, or the manager */ 183 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 184 /* L: hash of busy workers */ 185 186 struct worker *manager; /* L: purely informational */ 187 struct list_head workers; /* A: attached workers */ 188 struct list_head dying_workers; /* A: workers about to die */ 189 struct completion *detach_completion; /* all workers detached */ 190 191 struct ida worker_ida; /* worker IDs for task name */ 192 193 struct workqueue_attrs *attrs; /* I: worker attributes */ 194 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 195 int refcnt; /* PL: refcnt for unbound pools */ 196 197 /* 198 * Destruction of pool is RCU protected to allow dereferences 199 * from get_work_pool(). 200 */ 201 struct rcu_head rcu; 202 }; 203 204 /* 205 * Per-pool_workqueue statistics. These can be monitored using 206 * tools/workqueue/wq_monitor.py. 207 */ 208 enum pool_workqueue_stats { 209 PWQ_STAT_STARTED, /* work items started execution */ 210 PWQ_STAT_COMPLETED, /* work items completed execution */ 211 PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 212 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 213 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 214 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 215 PWQ_STAT_MAYDAY, /* maydays to rescuer */ 216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 217 218 PWQ_NR_STATS, 219 }; 220 221 /* 222 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 223 * of work_struct->data are used for flags and the remaining high bits 224 * point to the pwq; thus, pwqs need to be aligned at two's power of the 225 * number of flag bits. 226 */ 227 struct pool_workqueue { 228 struct worker_pool *pool; /* I: the associated pool */ 229 struct workqueue_struct *wq; /* I: the owning workqueue */ 230 int work_color; /* L: current color */ 231 int flush_color; /* L: flushing color */ 232 int refcnt; /* L: reference count */ 233 int nr_in_flight[WORK_NR_COLORS]; 234 /* L: nr of in_flight works */ 235 236 /* 237 * nr_active management and WORK_STRUCT_INACTIVE: 238 * 239 * When pwq->nr_active >= max_active, new work item is queued to 240 * pwq->inactive_works instead of pool->worklist and marked with 241 * WORK_STRUCT_INACTIVE. 242 * 243 * All work items marked with WORK_STRUCT_INACTIVE do not participate 244 * in pwq->nr_active and all work items in pwq->inactive_works are 245 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE 246 * work items are in pwq->inactive_works. Some of them are ready to 247 * run in pool->worklist or worker->scheduled. Those work itmes are 248 * only struct wq_barrier which is used for flush_work() and should 249 * not participate in pwq->nr_active. For non-barrier work item, it 250 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 251 */ 252 int nr_active; /* L: nr of active works */ 253 int max_active; /* L: max active works */ 254 struct list_head inactive_works; /* L: inactive works */ 255 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 256 struct list_head mayday_node; /* MD: node on wq->maydays */ 257 258 u64 stats[PWQ_NR_STATS]; 259 260 /* 261 * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 262 * and pwq_release_workfn() for details. pool_workqueue itself is also 263 * RCU protected so that the first pwq can be determined without 264 * grabbing wq->mutex. 265 */ 266 struct kthread_work release_work; 267 struct rcu_head rcu; 268 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 269 270 /* 271 * Structure used to wait for workqueue flush. 272 */ 273 struct wq_flusher { 274 struct list_head list; /* WQ: list of flushers */ 275 int flush_color; /* WQ: flush color waiting for */ 276 struct completion done; /* flush completion */ 277 }; 278 279 struct wq_device; 280 281 /* 282 * The externally visible workqueue. It relays the issued work items to 283 * the appropriate worker_pool through its pool_workqueues. 284 */ 285 struct workqueue_struct { 286 struct list_head pwqs; /* WR: all pwqs of this wq */ 287 struct list_head list; /* PR: list of all workqueues */ 288 289 struct mutex mutex; /* protects this wq */ 290 int work_color; /* WQ: current work color */ 291 int flush_color; /* WQ: current flush color */ 292 atomic_t nr_pwqs_to_flush; /* flush in progress */ 293 struct wq_flusher *first_flusher; /* WQ: first flusher */ 294 struct list_head flusher_queue; /* WQ: flush waiters */ 295 struct list_head flusher_overflow; /* WQ: flush overflow list */ 296 297 struct list_head maydays; /* MD: pwqs requesting rescue */ 298 struct worker *rescuer; /* MD: rescue worker */ 299 300 int nr_drainers; /* WQ: drain in progress */ 301 int saved_max_active; /* WQ: saved pwq max_active */ 302 303 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 304 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ 305 306 #ifdef CONFIG_SYSFS 307 struct wq_device *wq_dev; /* I: for sysfs interface */ 308 #endif 309 #ifdef CONFIG_LOCKDEP 310 char *lock_name; 311 struct lock_class_key key; 312 struct lockdep_map lockdep_map; 313 #endif 314 char name[WQ_NAME_LEN]; /* I: workqueue name */ 315 316 /* 317 * Destruction of workqueue_struct is RCU protected to allow walking 318 * the workqueues list without grabbing wq_pool_mutex. 319 * This is used to dump all workqueues from sysrq. 320 */ 321 struct rcu_head rcu; 322 323 /* hot fields used during command issue, aligned to cacheline */ 324 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 325 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 326 }; 327 328 static struct kmem_cache *pwq_cache; 329 330 /* 331 * Each pod type describes how CPUs should be grouped for unbound workqueues. 332 * See the comment above workqueue_attrs->affn_scope. 333 */ 334 struct wq_pod_type { 335 int nr_pods; /* number of pods */ 336 cpumask_var_t *pod_cpus; /* pod -> cpus */ 337 int *pod_node; /* pod -> node */ 338 int *cpu_pod; /* cpu -> pod */ 339 }; 340 341 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 342 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; 343 344 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 345 [WQ_AFFN_DFL] = "default", 346 [WQ_AFFN_CPU] = "cpu", 347 [WQ_AFFN_SMT] = "smt", 348 [WQ_AFFN_CACHE] = "cache", 349 [WQ_AFFN_NUMA] = "numa", 350 [WQ_AFFN_SYSTEM] = "system", 351 }; 352 353 /* 354 * Per-cpu work items which run for longer than the following threshold are 355 * automatically considered CPU intensive and excluded from concurrency 356 * management to prevent them from noticeably delaying other per-cpu work items. 357 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 358 * The actual value is initialized in wq_cpu_intensive_thresh_init(). 359 */ 360 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 361 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 362 363 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 364 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 365 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 366 367 static bool wq_online; /* can kworkers be created yet? */ 368 369 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 370 static struct workqueue_attrs *wq_update_pod_attrs_buf; 371 372 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 373 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 374 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 375 /* wait for manager to go away */ 376 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 377 378 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 379 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 380 381 /* PL&A: allowable cpus for unbound wqs and work items */ 382 static cpumask_var_t wq_unbound_cpumask; 383 384 /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 385 static struct cpumask wq_cmdline_cpumask __initdata; 386 387 /* CPU where unbound work was last round robin scheduled from this CPU */ 388 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 389 390 /* 391 * Local execution of unbound work items is no longer guaranteed. The 392 * following always forces round-robin CPU selection on unbound work items 393 * to uncover usages which depend on it. 394 */ 395 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 396 static bool wq_debug_force_rr_cpu = true; 397 #else 398 static bool wq_debug_force_rr_cpu = false; 399 #endif 400 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 401 402 /* the per-cpu worker pools */ 403 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 404 405 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 406 407 /* PL: hash of all unbound pools keyed by pool->attrs */ 408 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 409 410 /* I: attributes used when instantiating standard unbound pools on demand */ 411 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 412 413 /* I: attributes used when instantiating ordered pools on demand */ 414 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 415 416 /* 417 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 418 * process context while holding a pool lock. Bounce to a dedicated kthread 419 * worker to avoid A-A deadlocks. 420 */ 421 static struct kthread_worker *pwq_release_worker; 422 423 struct workqueue_struct *system_wq __read_mostly; 424 EXPORT_SYMBOL(system_wq); 425 struct workqueue_struct *system_highpri_wq __read_mostly; 426 EXPORT_SYMBOL_GPL(system_highpri_wq); 427 struct workqueue_struct *system_long_wq __read_mostly; 428 EXPORT_SYMBOL_GPL(system_long_wq); 429 struct workqueue_struct *system_unbound_wq __read_mostly; 430 EXPORT_SYMBOL_GPL(system_unbound_wq); 431 struct workqueue_struct *system_freezable_wq __read_mostly; 432 EXPORT_SYMBOL_GPL(system_freezable_wq); 433 struct workqueue_struct *system_power_efficient_wq __read_mostly; 434 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 435 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 436 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 437 438 static int worker_thread(void *__worker); 439 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 440 static void show_pwq(struct pool_workqueue *pwq); 441 static void show_one_worker_pool(struct worker_pool *pool); 442 443 #define CREATE_TRACE_POINTS 444 #include <trace/events/workqueue.h> 445 446 #define assert_rcu_or_pool_mutex() \ 447 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 448 !lockdep_is_held(&wq_pool_mutex), \ 449 "RCU or wq_pool_mutex should be held") 450 451 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 452 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 453 !lockdep_is_held(&wq->mutex) && \ 454 !lockdep_is_held(&wq_pool_mutex), \ 455 "RCU, wq->mutex or wq_pool_mutex should be held") 456 457 #define for_each_cpu_worker_pool(pool, cpu) \ 458 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 459 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 460 (pool)++) 461 462 /** 463 * for_each_pool - iterate through all worker_pools in the system 464 * @pool: iteration cursor 465 * @pi: integer used for iteration 466 * 467 * This must be called either with wq_pool_mutex held or RCU read 468 * locked. If the pool needs to be used beyond the locking in effect, the 469 * caller is responsible for guaranteeing that the pool stays online. 470 * 471 * The if/else clause exists only for the lockdep assertion and can be 472 * ignored. 473 */ 474 #define for_each_pool(pool, pi) \ 475 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 476 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 477 else 478 479 /** 480 * for_each_pool_worker - iterate through all workers of a worker_pool 481 * @worker: iteration cursor 482 * @pool: worker_pool to iterate workers of 483 * 484 * This must be called with wq_pool_attach_mutex. 485 * 486 * The if/else clause exists only for the lockdep assertion and can be 487 * ignored. 488 */ 489 #define for_each_pool_worker(worker, pool) \ 490 list_for_each_entry((worker), &(pool)->workers, node) \ 491 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 492 else 493 494 /** 495 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 496 * @pwq: iteration cursor 497 * @wq: the target workqueue 498 * 499 * This must be called either with wq->mutex held or RCU read locked. 500 * If the pwq needs to be used beyond the locking in effect, the caller is 501 * responsible for guaranteeing that the pwq stays online. 502 * 503 * The if/else clause exists only for the lockdep assertion and can be 504 * ignored. 505 */ 506 #define for_each_pwq(pwq, wq) \ 507 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 508 lockdep_is_held(&(wq->mutex))) 509 510 #ifdef CONFIG_DEBUG_OBJECTS_WORK 511 512 static const struct debug_obj_descr work_debug_descr; 513 514 static void *work_debug_hint(void *addr) 515 { 516 return ((struct work_struct *) addr)->func; 517 } 518 519 static bool work_is_static_object(void *addr) 520 { 521 struct work_struct *work = addr; 522 523 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 524 } 525 526 /* 527 * fixup_init is called when: 528 * - an active object is initialized 529 */ 530 static bool work_fixup_init(void *addr, enum debug_obj_state state) 531 { 532 struct work_struct *work = addr; 533 534 switch (state) { 535 case ODEBUG_STATE_ACTIVE: 536 cancel_work_sync(work); 537 debug_object_init(work, &work_debug_descr); 538 return true; 539 default: 540 return false; 541 } 542 } 543 544 /* 545 * fixup_free is called when: 546 * - an active object is freed 547 */ 548 static bool work_fixup_free(void *addr, enum debug_obj_state state) 549 { 550 struct work_struct *work = addr; 551 552 switch (state) { 553 case ODEBUG_STATE_ACTIVE: 554 cancel_work_sync(work); 555 debug_object_free(work, &work_debug_descr); 556 return true; 557 default: 558 return false; 559 } 560 } 561 562 static const struct debug_obj_descr work_debug_descr = { 563 .name = "work_struct", 564 .debug_hint = work_debug_hint, 565 .is_static_object = work_is_static_object, 566 .fixup_init = work_fixup_init, 567 .fixup_free = work_fixup_free, 568 }; 569 570 static inline void debug_work_activate(struct work_struct *work) 571 { 572 debug_object_activate(work, &work_debug_descr); 573 } 574 575 static inline void debug_work_deactivate(struct work_struct *work) 576 { 577 debug_object_deactivate(work, &work_debug_descr); 578 } 579 580 void __init_work(struct work_struct *work, int onstack) 581 { 582 if (onstack) 583 debug_object_init_on_stack(work, &work_debug_descr); 584 else 585 debug_object_init(work, &work_debug_descr); 586 } 587 EXPORT_SYMBOL_GPL(__init_work); 588 589 void destroy_work_on_stack(struct work_struct *work) 590 { 591 debug_object_free(work, &work_debug_descr); 592 } 593 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 594 595 void destroy_delayed_work_on_stack(struct delayed_work *work) 596 { 597 destroy_timer_on_stack(&work->timer); 598 debug_object_free(&work->work, &work_debug_descr); 599 } 600 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 601 602 #else 603 static inline void debug_work_activate(struct work_struct *work) { } 604 static inline void debug_work_deactivate(struct work_struct *work) { } 605 #endif 606 607 /** 608 * worker_pool_assign_id - allocate ID and assign it to @pool 609 * @pool: the pool pointer of interest 610 * 611 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 612 * successfully, -errno on failure. 613 */ 614 static int worker_pool_assign_id(struct worker_pool *pool) 615 { 616 int ret; 617 618 lockdep_assert_held(&wq_pool_mutex); 619 620 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 621 GFP_KERNEL); 622 if (ret >= 0) { 623 pool->id = ret; 624 return 0; 625 } 626 return ret; 627 } 628 629 static unsigned int work_color_to_flags(int color) 630 { 631 return color << WORK_STRUCT_COLOR_SHIFT; 632 } 633 634 static int get_work_color(unsigned long work_data) 635 { 636 return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 637 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 638 } 639 640 static int work_next_color(int color) 641 { 642 return (color + 1) % WORK_NR_COLORS; 643 } 644 645 /* 646 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 647 * contain the pointer to the queued pwq. Once execution starts, the flag 648 * is cleared and the high bits contain OFFQ flags and pool ID. 649 * 650 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 651 * and clear_work_data() can be used to set the pwq, pool or clear 652 * work->data. These functions should only be called while the work is 653 * owned - ie. while the PENDING bit is set. 654 * 655 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 656 * corresponding to a work. Pool is available once the work has been 657 * queued anywhere after initialization until it is sync canceled. pwq is 658 * available only while the work item is queued. 659 * 660 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 661 * canceled. While being canceled, a work item may have its PENDING set 662 * but stay off timer and worklist for arbitrarily long and nobody should 663 * try to steal the PENDING bit. 664 */ 665 static inline void set_work_data(struct work_struct *work, unsigned long data, 666 unsigned long flags) 667 { 668 WARN_ON_ONCE(!work_pending(work)); 669 atomic_long_set(&work->data, data | flags | work_static(work)); 670 } 671 672 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 673 unsigned long extra_flags) 674 { 675 set_work_data(work, (unsigned long)pwq, 676 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 677 } 678 679 static void set_work_pool_and_keep_pending(struct work_struct *work, 680 int pool_id) 681 { 682 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 683 WORK_STRUCT_PENDING); 684 } 685 686 static void set_work_pool_and_clear_pending(struct work_struct *work, 687 int pool_id) 688 { 689 /* 690 * The following wmb is paired with the implied mb in 691 * test_and_set_bit(PENDING) and ensures all updates to @work made 692 * here are visible to and precede any updates by the next PENDING 693 * owner. 694 */ 695 smp_wmb(); 696 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 697 /* 698 * The following mb guarantees that previous clear of a PENDING bit 699 * will not be reordered with any speculative LOADS or STORES from 700 * work->current_func, which is executed afterwards. This possible 701 * reordering can lead to a missed execution on attempt to queue 702 * the same @work. E.g. consider this case: 703 * 704 * CPU#0 CPU#1 705 * ---------------------------- -------------------------------- 706 * 707 * 1 STORE event_indicated 708 * 2 queue_work_on() { 709 * 3 test_and_set_bit(PENDING) 710 * 4 } set_..._and_clear_pending() { 711 * 5 set_work_data() # clear bit 712 * 6 smp_mb() 713 * 7 work->current_func() { 714 * 8 LOAD event_indicated 715 * } 716 * 717 * Without an explicit full barrier speculative LOAD on line 8 can 718 * be executed before CPU#0 does STORE on line 1. If that happens, 719 * CPU#0 observes the PENDING bit is still set and new execution of 720 * a @work is not queued in a hope, that CPU#1 will eventually 721 * finish the queued @work. Meanwhile CPU#1 does not see 722 * event_indicated is set, because speculative LOAD was executed 723 * before actual STORE. 724 */ 725 smp_mb(); 726 } 727 728 static void clear_work_data(struct work_struct *work) 729 { 730 smp_wmb(); /* see set_work_pool_and_clear_pending() */ 731 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 732 } 733 734 static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 735 { 736 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); 737 } 738 739 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 740 { 741 unsigned long data = atomic_long_read(&work->data); 742 743 if (data & WORK_STRUCT_PWQ) 744 return work_struct_pwq(data); 745 else 746 return NULL; 747 } 748 749 /** 750 * get_work_pool - return the worker_pool a given work was associated with 751 * @work: the work item of interest 752 * 753 * Pools are created and destroyed under wq_pool_mutex, and allows read 754 * access under RCU read lock. As such, this function should be 755 * called under wq_pool_mutex or inside of a rcu_read_lock() region. 756 * 757 * All fields of the returned pool are accessible as long as the above 758 * mentioned locking is in effect. If the returned pool needs to be used 759 * beyond the critical section, the caller is responsible for ensuring the 760 * returned pool is and stays online. 761 * 762 * Return: The worker_pool @work was last associated with. %NULL if none. 763 */ 764 static struct worker_pool *get_work_pool(struct work_struct *work) 765 { 766 unsigned long data = atomic_long_read(&work->data); 767 int pool_id; 768 769 assert_rcu_or_pool_mutex(); 770 771 if (data & WORK_STRUCT_PWQ) 772 return work_struct_pwq(data)->pool; 773 774 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 775 if (pool_id == WORK_OFFQ_POOL_NONE) 776 return NULL; 777 778 return idr_find(&worker_pool_idr, pool_id); 779 } 780 781 /** 782 * get_work_pool_id - return the worker pool ID a given work is associated with 783 * @work: the work item of interest 784 * 785 * Return: The worker_pool ID @work was last associated with. 786 * %WORK_OFFQ_POOL_NONE if none. 787 */ 788 static int get_work_pool_id(struct work_struct *work) 789 { 790 unsigned long data = atomic_long_read(&work->data); 791 792 if (data & WORK_STRUCT_PWQ) 793 return work_struct_pwq(data)->pool->id; 794 795 return data >> WORK_OFFQ_POOL_SHIFT; 796 } 797 798 static void mark_work_canceling(struct work_struct *work) 799 { 800 unsigned long pool_id = get_work_pool_id(work); 801 802 pool_id <<= WORK_OFFQ_POOL_SHIFT; 803 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 804 } 805 806 static bool work_is_canceling(struct work_struct *work) 807 { 808 unsigned long data = atomic_long_read(&work->data); 809 810 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 811 } 812 813 /* 814 * Policy functions. These define the policies on how the global worker 815 * pools are managed. Unless noted otherwise, these functions assume that 816 * they're being called with pool->lock held. 817 */ 818 819 /* 820 * Need to wake up a worker? Called from anything but currently 821 * running workers. 822 * 823 * Note that, because unbound workers never contribute to nr_running, this 824 * function will always return %true for unbound pools as long as the 825 * worklist isn't empty. 826 */ 827 static bool need_more_worker(struct worker_pool *pool) 828 { 829 return !list_empty(&pool->worklist) && !pool->nr_running; 830 } 831 832 /* Can I start working? Called from busy but !running workers. */ 833 static bool may_start_working(struct worker_pool *pool) 834 { 835 return pool->nr_idle; 836 } 837 838 /* Do I need to keep working? Called from currently running workers. */ 839 static bool keep_working(struct worker_pool *pool) 840 { 841 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 842 } 843 844 /* Do we need a new worker? Called from manager. */ 845 static bool need_to_create_worker(struct worker_pool *pool) 846 { 847 return need_more_worker(pool) && !may_start_working(pool); 848 } 849 850 /* Do we have too many workers and should some go away? */ 851 static bool too_many_workers(struct worker_pool *pool) 852 { 853 bool managing = pool->flags & POOL_MANAGER_ACTIVE; 854 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 855 int nr_busy = pool->nr_workers - nr_idle; 856 857 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 858 } 859 860 /** 861 * worker_set_flags - set worker flags and adjust nr_running accordingly 862 * @worker: self 863 * @flags: flags to set 864 * 865 * Set @flags in @worker->flags and adjust nr_running accordingly. 866 */ 867 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 868 { 869 struct worker_pool *pool = worker->pool; 870 871 lockdep_assert_held(&pool->lock); 872 873 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 874 if ((flags & WORKER_NOT_RUNNING) && 875 !(worker->flags & WORKER_NOT_RUNNING)) { 876 pool->nr_running--; 877 } 878 879 worker->flags |= flags; 880 } 881 882 /** 883 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 884 * @worker: self 885 * @flags: flags to clear 886 * 887 * Clear @flags in @worker->flags and adjust nr_running accordingly. 888 */ 889 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 890 { 891 struct worker_pool *pool = worker->pool; 892 unsigned int oflags = worker->flags; 893 894 lockdep_assert_held(&pool->lock); 895 896 worker->flags &= ~flags; 897 898 /* 899 * If transitioning out of NOT_RUNNING, increment nr_running. Note 900 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 901 * of multiple flags, not a single flag. 902 */ 903 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 904 if (!(worker->flags & WORKER_NOT_RUNNING)) 905 pool->nr_running++; 906 } 907 908 /* Return the first idle worker. Called with pool->lock held. */ 909 static struct worker *first_idle_worker(struct worker_pool *pool) 910 { 911 if (unlikely(list_empty(&pool->idle_list))) 912 return NULL; 913 914 return list_first_entry(&pool->idle_list, struct worker, entry); 915 } 916 917 /** 918 * worker_enter_idle - enter idle state 919 * @worker: worker which is entering idle state 920 * 921 * @worker is entering idle state. Update stats and idle timer if 922 * necessary. 923 * 924 * LOCKING: 925 * raw_spin_lock_irq(pool->lock). 926 */ 927 static void worker_enter_idle(struct worker *worker) 928 { 929 struct worker_pool *pool = worker->pool; 930 931 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 932 WARN_ON_ONCE(!list_empty(&worker->entry) && 933 (worker->hentry.next || worker->hentry.pprev))) 934 return; 935 936 /* can't use worker_set_flags(), also called from create_worker() */ 937 worker->flags |= WORKER_IDLE; 938 pool->nr_idle++; 939 worker->last_active = jiffies; 940 941 /* idle_list is LIFO */ 942 list_add(&worker->entry, &pool->idle_list); 943 944 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 945 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 946 947 /* Sanity check nr_running. */ 948 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 949 } 950 951 /** 952 * worker_leave_idle - leave idle state 953 * @worker: worker which is leaving idle state 954 * 955 * @worker is leaving idle state. Update stats. 956 * 957 * LOCKING: 958 * raw_spin_lock_irq(pool->lock). 959 */ 960 static void worker_leave_idle(struct worker *worker) 961 { 962 struct worker_pool *pool = worker->pool; 963 964 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 965 return; 966 worker_clr_flags(worker, WORKER_IDLE); 967 pool->nr_idle--; 968 list_del_init(&worker->entry); 969 } 970 971 /** 972 * find_worker_executing_work - find worker which is executing a work 973 * @pool: pool of interest 974 * @work: work to find worker for 975 * 976 * Find a worker which is executing @work on @pool by searching 977 * @pool->busy_hash which is keyed by the address of @work. For a worker 978 * to match, its current execution should match the address of @work and 979 * its work function. This is to avoid unwanted dependency between 980 * unrelated work executions through a work item being recycled while still 981 * being executed. 982 * 983 * This is a bit tricky. A work item may be freed once its execution 984 * starts and nothing prevents the freed area from being recycled for 985 * another work item. If the same work item address ends up being reused 986 * before the original execution finishes, workqueue will identify the 987 * recycled work item as currently executing and make it wait until the 988 * current execution finishes, introducing an unwanted dependency. 989 * 990 * This function checks the work item address and work function to avoid 991 * false positives. Note that this isn't complete as one may construct a 992 * work function which can introduce dependency onto itself through a 993 * recycled work item. Well, if somebody wants to shoot oneself in the 994 * foot that badly, there's only so much we can do, and if such deadlock 995 * actually occurs, it should be easy to locate the culprit work function. 996 * 997 * CONTEXT: 998 * raw_spin_lock_irq(pool->lock). 999 * 1000 * Return: 1001 * Pointer to worker which is executing @work if found, %NULL 1002 * otherwise. 1003 */ 1004 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1005 struct work_struct *work) 1006 { 1007 struct worker *worker; 1008 1009 hash_for_each_possible(pool->busy_hash, worker, hentry, 1010 (unsigned long)work) 1011 if (worker->current_work == work && 1012 worker->current_func == work->func) 1013 return worker; 1014 1015 return NULL; 1016 } 1017 1018 /** 1019 * move_linked_works - move linked works to a list 1020 * @work: start of series of works to be scheduled 1021 * @head: target list to append @work to 1022 * @nextp: out parameter for nested worklist walking 1023 * 1024 * Schedule linked works starting from @work to @head. Work series to be 1025 * scheduled starts at @work and includes any consecutive work with 1026 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1027 * @nextp. 1028 * 1029 * CONTEXT: 1030 * raw_spin_lock_irq(pool->lock). 1031 */ 1032 static void move_linked_works(struct work_struct *work, struct list_head *head, 1033 struct work_struct **nextp) 1034 { 1035 struct work_struct *n; 1036 1037 /* 1038 * Linked worklist will always end before the end of the list, 1039 * use NULL for list head. 1040 */ 1041 list_for_each_entry_safe_from(work, n, NULL, entry) { 1042 list_move_tail(&work->entry, head); 1043 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1044 break; 1045 } 1046 1047 /* 1048 * If we're already inside safe list traversal and have moved 1049 * multiple works to the scheduled queue, the next position 1050 * needs to be updated. 1051 */ 1052 if (nextp) 1053 *nextp = n; 1054 } 1055 1056 /** 1057 * assign_work - assign a work item and its linked work items to a worker 1058 * @work: work to assign 1059 * @worker: worker to assign to 1060 * @nextp: out parameter for nested worklist walking 1061 * 1062 * Assign @work and its linked work items to @worker. If @work is already being 1063 * executed by another worker in the same pool, it'll be punted there. 1064 * 1065 * If @nextp is not NULL, it's updated to point to the next work of the last 1066 * scheduled work. This allows assign_work() to be nested inside 1067 * list_for_each_entry_safe(). 1068 * 1069 * Returns %true if @work was successfully assigned to @worker. %false if @work 1070 * was punted to another worker already executing it. 1071 */ 1072 static bool assign_work(struct work_struct *work, struct worker *worker, 1073 struct work_struct **nextp) 1074 { 1075 struct worker_pool *pool = worker->pool; 1076 struct worker *collision; 1077 1078 lockdep_assert_held(&pool->lock); 1079 1080 /* 1081 * A single work shouldn't be executed concurrently by multiple workers. 1082 * __queue_work() ensures that @work doesn't jump to a different pool 1083 * while still running in the previous pool. Here, we should ensure that 1084 * @work is not executed concurrently by multiple workers from the same 1085 * pool. Check whether anyone is already processing the work. If so, 1086 * defer the work to the currently executing one. 1087 */ 1088 collision = find_worker_executing_work(pool, work); 1089 if (unlikely(collision)) { 1090 move_linked_works(work, &collision->scheduled, nextp); 1091 return false; 1092 } 1093 1094 move_linked_works(work, &worker->scheduled, nextp); 1095 return true; 1096 } 1097 1098 /** 1099 * kick_pool - wake up an idle worker if necessary 1100 * @pool: pool to kick 1101 * 1102 * @pool may have pending work items. Wake up worker if necessary. Returns 1103 * whether a worker was woken up. 1104 */ 1105 static bool kick_pool(struct worker_pool *pool) 1106 { 1107 struct worker *worker = first_idle_worker(pool); 1108 struct task_struct *p; 1109 1110 lockdep_assert_held(&pool->lock); 1111 1112 if (!need_more_worker(pool) || !worker) 1113 return false; 1114 1115 p = worker->task; 1116 1117 #ifdef CONFIG_SMP 1118 /* 1119 * Idle @worker is about to execute @work and waking up provides an 1120 * opportunity to migrate @worker at a lower cost by setting the task's 1121 * wake_cpu field. Let's see if we want to move @worker to improve 1122 * execution locality. 1123 * 1124 * We're waking the worker that went idle the latest and there's some 1125 * chance that @worker is marked idle but hasn't gone off CPU yet. If 1126 * so, setting the wake_cpu won't do anything. As this is a best-effort 1127 * optimization and the race window is narrow, let's leave as-is for 1128 * now. If this becomes pronounced, we can skip over workers which are 1129 * still on cpu when picking an idle worker. 1130 * 1131 * If @pool has non-strict affinity, @worker might have ended up outside 1132 * its affinity scope. Repatriate. 1133 */ 1134 if (!pool->attrs->affn_strict && 1135 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 1136 struct work_struct *work = list_first_entry(&pool->worklist, 1137 struct work_struct, entry); 1138 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask, 1139 cpu_online_mask); 1140 if (wake_cpu < nr_cpu_ids) { 1141 p->wake_cpu = wake_cpu; 1142 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 1143 } 1144 } 1145 #endif 1146 wake_up_process(p); 1147 return true; 1148 } 1149 1150 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 1151 1152 /* 1153 * Concurrency-managed per-cpu work items that hog CPU for longer than 1154 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 1155 * which prevents them from stalling other concurrency-managed work items. If a 1156 * work function keeps triggering this mechanism, it's likely that the work item 1157 * should be using an unbound workqueue instead. 1158 * 1159 * wq_cpu_intensive_report() tracks work functions which trigger such conditions 1160 * and report them so that they can be examined and converted to use unbound 1161 * workqueues as appropriate. To avoid flooding the console, each violating work 1162 * function is tracked and reported with exponential backoff. 1163 */ 1164 #define WCI_MAX_ENTS 128 1165 1166 struct wci_ent { 1167 work_func_t func; 1168 atomic64_t cnt; 1169 struct hlist_node hash_node; 1170 }; 1171 1172 static struct wci_ent wci_ents[WCI_MAX_ENTS]; 1173 static int wci_nr_ents; 1174 static DEFINE_RAW_SPINLOCK(wci_lock); 1175 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 1176 1177 static struct wci_ent *wci_find_ent(work_func_t func) 1178 { 1179 struct wci_ent *ent; 1180 1181 hash_for_each_possible_rcu(wci_hash, ent, hash_node, 1182 (unsigned long)func) { 1183 if (ent->func == func) 1184 return ent; 1185 } 1186 return NULL; 1187 } 1188 1189 static void wq_cpu_intensive_report(work_func_t func) 1190 { 1191 struct wci_ent *ent; 1192 1193 restart: 1194 ent = wci_find_ent(func); 1195 if (ent) { 1196 u64 cnt; 1197 1198 /* 1199 * Start reporting from the fourth time and back off 1200 * exponentially. 1201 */ 1202 cnt = atomic64_inc_return_relaxed(&ent->cnt); 1203 if (cnt >= 4 && is_power_of_2(cnt)) 1204 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 1205 ent->func, wq_cpu_intensive_thresh_us, 1206 atomic64_read(&ent->cnt)); 1207 return; 1208 } 1209 1210 /* 1211 * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 1212 * is exhausted, something went really wrong and we probably made enough 1213 * noise already. 1214 */ 1215 if (wci_nr_ents >= WCI_MAX_ENTS) 1216 return; 1217 1218 raw_spin_lock(&wci_lock); 1219 1220 if (wci_nr_ents >= WCI_MAX_ENTS) { 1221 raw_spin_unlock(&wci_lock); 1222 return; 1223 } 1224 1225 if (wci_find_ent(func)) { 1226 raw_spin_unlock(&wci_lock); 1227 goto restart; 1228 } 1229 1230 ent = &wci_ents[wci_nr_ents++]; 1231 ent->func = func; 1232 atomic64_set(&ent->cnt, 1); 1233 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 1234 1235 raw_spin_unlock(&wci_lock); 1236 } 1237 1238 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1239 static void wq_cpu_intensive_report(work_func_t func) {} 1240 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1241 1242 /** 1243 * wq_worker_running - a worker is running again 1244 * @task: task waking up 1245 * 1246 * This function is called when a worker returns from schedule() 1247 */ 1248 void wq_worker_running(struct task_struct *task) 1249 { 1250 struct worker *worker = kthread_data(task); 1251 1252 if (!READ_ONCE(worker->sleeping)) 1253 return; 1254 1255 /* 1256 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 1257 * and the nr_running increment below, we may ruin the nr_running reset 1258 * and leave with an unexpected pool->nr_running == 1 on the newly unbound 1259 * pool. Protect against such race. 1260 */ 1261 preempt_disable(); 1262 if (!(worker->flags & WORKER_NOT_RUNNING)) 1263 worker->pool->nr_running++; 1264 preempt_enable(); 1265 1266 /* 1267 * CPU intensive auto-detection cares about how long a work item hogged 1268 * CPU without sleeping. Reset the starting timestamp on wakeup. 1269 */ 1270 worker->current_at = worker->task->se.sum_exec_runtime; 1271 1272 WRITE_ONCE(worker->sleeping, 0); 1273 } 1274 1275 /** 1276 * wq_worker_sleeping - a worker is going to sleep 1277 * @task: task going to sleep 1278 * 1279 * This function is called from schedule() when a busy worker is 1280 * going to sleep. 1281 */ 1282 void wq_worker_sleeping(struct task_struct *task) 1283 { 1284 struct worker *worker = kthread_data(task); 1285 struct worker_pool *pool; 1286 1287 /* 1288 * Rescuers, which may not have all the fields set up like normal 1289 * workers, also reach here, let's not access anything before 1290 * checking NOT_RUNNING. 1291 */ 1292 if (worker->flags & WORKER_NOT_RUNNING) 1293 return; 1294 1295 pool = worker->pool; 1296 1297 /* Return if preempted before wq_worker_running() was reached */ 1298 if (READ_ONCE(worker->sleeping)) 1299 return; 1300 1301 WRITE_ONCE(worker->sleeping, 1); 1302 raw_spin_lock_irq(&pool->lock); 1303 1304 /* 1305 * Recheck in case unbind_workers() preempted us. We don't 1306 * want to decrement nr_running after the worker is unbound 1307 * and nr_running has been reset. 1308 */ 1309 if (worker->flags & WORKER_NOT_RUNNING) { 1310 raw_spin_unlock_irq(&pool->lock); 1311 return; 1312 } 1313 1314 pool->nr_running--; 1315 if (kick_pool(pool)) 1316 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1317 1318 raw_spin_unlock_irq(&pool->lock); 1319 } 1320 1321 /** 1322 * wq_worker_tick - a scheduler tick occurred while a kworker is running 1323 * @task: task currently running 1324 * 1325 * Called from scheduler_tick(). We're in the IRQ context and the current 1326 * worker's fields which follow the 'K' locking rule can be accessed safely. 1327 */ 1328 void wq_worker_tick(struct task_struct *task) 1329 { 1330 struct worker *worker = kthread_data(task); 1331 struct pool_workqueue *pwq = worker->current_pwq; 1332 struct worker_pool *pool = worker->pool; 1333 1334 if (!pwq) 1335 return; 1336 1337 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 1338 1339 if (!wq_cpu_intensive_thresh_us) 1340 return; 1341 1342 /* 1343 * If the current worker is concurrency managed and hogged the CPU for 1344 * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1345 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1346 * 1347 * Set @worker->sleeping means that @worker is in the process of 1348 * switching out voluntarily and won't be contributing to 1349 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1350 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1351 * double decrements. The task is releasing the CPU anyway. Let's skip. 1352 * We probably want to make this prettier in the future. 1353 */ 1354 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1355 worker->task->se.sum_exec_runtime - worker->current_at < 1356 wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1357 return; 1358 1359 raw_spin_lock(&pool->lock); 1360 1361 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 1362 wq_cpu_intensive_report(worker->current_func); 1363 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1364 1365 if (kick_pool(pool)) 1366 pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1367 1368 raw_spin_unlock(&pool->lock); 1369 } 1370 1371 /** 1372 * wq_worker_last_func - retrieve worker's last work function 1373 * @task: Task to retrieve last work function of. 1374 * 1375 * Determine the last function a worker executed. This is called from 1376 * the scheduler to get a worker's last known identity. 1377 * 1378 * CONTEXT: 1379 * raw_spin_lock_irq(rq->lock) 1380 * 1381 * This function is called during schedule() when a kworker is going 1382 * to sleep. It's used by psi to identify aggregation workers during 1383 * dequeuing, to allow periodic aggregation to shut-off when that 1384 * worker is the last task in the system or cgroup to go to sleep. 1385 * 1386 * As this function doesn't involve any workqueue-related locking, it 1387 * only returns stable values when called from inside the scheduler's 1388 * queuing and dequeuing paths, when @task, which must be a kworker, 1389 * is guaranteed to not be processing any works. 1390 * 1391 * Return: 1392 * The last work function %current executed as a worker, NULL if it 1393 * hasn't executed any work yet. 1394 */ 1395 work_func_t wq_worker_last_func(struct task_struct *task) 1396 { 1397 struct worker *worker = kthread_data(task); 1398 1399 return worker->last_func; 1400 } 1401 1402 /** 1403 * get_pwq - get an extra reference on the specified pool_workqueue 1404 * @pwq: pool_workqueue to get 1405 * 1406 * Obtain an extra reference on @pwq. The caller should guarantee that 1407 * @pwq has positive refcnt and be holding the matching pool->lock. 1408 */ 1409 static void get_pwq(struct pool_workqueue *pwq) 1410 { 1411 lockdep_assert_held(&pwq->pool->lock); 1412 WARN_ON_ONCE(pwq->refcnt <= 0); 1413 pwq->refcnt++; 1414 } 1415 1416 /** 1417 * put_pwq - put a pool_workqueue reference 1418 * @pwq: pool_workqueue to put 1419 * 1420 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1421 * destruction. The caller should be holding the matching pool->lock. 1422 */ 1423 static void put_pwq(struct pool_workqueue *pwq) 1424 { 1425 lockdep_assert_held(&pwq->pool->lock); 1426 if (likely(--pwq->refcnt)) 1427 return; 1428 /* 1429 * @pwq can't be released under pool->lock, bounce to a dedicated 1430 * kthread_worker to avoid A-A deadlocks. 1431 */ 1432 kthread_queue_work(pwq_release_worker, &pwq->release_work); 1433 } 1434 1435 /** 1436 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1437 * @pwq: pool_workqueue to put (can be %NULL) 1438 * 1439 * put_pwq() with locking. This function also allows %NULL @pwq. 1440 */ 1441 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1442 { 1443 if (pwq) { 1444 /* 1445 * As both pwqs and pools are RCU protected, the 1446 * following lock operations are safe. 1447 */ 1448 raw_spin_lock_irq(&pwq->pool->lock); 1449 put_pwq(pwq); 1450 raw_spin_unlock_irq(&pwq->pool->lock); 1451 } 1452 } 1453 1454 static void pwq_activate_inactive_work(struct work_struct *work) 1455 { 1456 struct pool_workqueue *pwq = get_work_pwq(work); 1457 1458 trace_workqueue_activate_work(work); 1459 if (list_empty(&pwq->pool->worklist)) 1460 pwq->pool->watchdog_ts = jiffies; 1461 move_linked_works(work, &pwq->pool->worklist, NULL); 1462 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); 1463 pwq->nr_active++; 1464 } 1465 1466 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) 1467 { 1468 struct work_struct *work = list_first_entry(&pwq->inactive_works, 1469 struct work_struct, entry); 1470 1471 pwq_activate_inactive_work(work); 1472 } 1473 1474 /** 1475 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1476 * @pwq: pwq of interest 1477 * @work_data: work_data of work which left the queue 1478 * 1479 * A work either has completed or is removed from pending queue, 1480 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1481 * 1482 * CONTEXT: 1483 * raw_spin_lock_irq(pool->lock). 1484 */ 1485 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 1486 { 1487 int color = get_work_color(work_data); 1488 1489 if (!(work_data & WORK_STRUCT_INACTIVE)) { 1490 pwq->nr_active--; 1491 if (!list_empty(&pwq->inactive_works)) { 1492 /* one down, submit an inactive one */ 1493 if (pwq->nr_active < pwq->max_active) 1494 pwq_activate_first_inactive(pwq); 1495 } 1496 } 1497 1498 pwq->nr_in_flight[color]--; 1499 1500 /* is flush in progress and are we at the flushing tip? */ 1501 if (likely(pwq->flush_color != color)) 1502 goto out_put; 1503 1504 /* are there still in-flight works? */ 1505 if (pwq->nr_in_flight[color]) 1506 goto out_put; 1507 1508 /* this pwq is done, clear flush_color */ 1509 pwq->flush_color = -1; 1510 1511 /* 1512 * If this was the last pwq, wake up the first flusher. It 1513 * will handle the rest. 1514 */ 1515 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1516 complete(&pwq->wq->first_flusher->done); 1517 out_put: 1518 put_pwq(pwq); 1519 } 1520 1521 /** 1522 * try_to_grab_pending - steal work item from worklist and disable irq 1523 * @work: work item to steal 1524 * @is_dwork: @work is a delayed_work 1525 * @flags: place to store irq state 1526 * 1527 * Try to grab PENDING bit of @work. This function can handle @work in any 1528 * stable state - idle, on timer or on worklist. 1529 * 1530 * Return: 1531 * 1532 * ======== ================================================================ 1533 * 1 if @work was pending and we successfully stole PENDING 1534 * 0 if @work was idle and we claimed PENDING 1535 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1536 * -ENOENT if someone else is canceling @work, this state may persist 1537 * for arbitrarily long 1538 * ======== ================================================================ 1539 * 1540 * Note: 1541 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1542 * interrupted while holding PENDING and @work off queue, irq must be 1543 * disabled on entry. This, combined with delayed_work->timer being 1544 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1545 * 1546 * On successful return, >= 0, irq is disabled and the caller is 1547 * responsible for releasing it using local_irq_restore(*@flags). 1548 * 1549 * This function is safe to call from any context including IRQ handler. 1550 */ 1551 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1552 unsigned long *flags) 1553 { 1554 struct worker_pool *pool; 1555 struct pool_workqueue *pwq; 1556 1557 local_irq_save(*flags); 1558 1559 /* try to steal the timer if it exists */ 1560 if (is_dwork) { 1561 struct delayed_work *dwork = to_delayed_work(work); 1562 1563 /* 1564 * dwork->timer is irqsafe. If del_timer() fails, it's 1565 * guaranteed that the timer is not queued anywhere and not 1566 * running on the local CPU. 1567 */ 1568 if (likely(del_timer(&dwork->timer))) 1569 return 1; 1570 } 1571 1572 /* try to claim PENDING the normal way */ 1573 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1574 return 0; 1575 1576 rcu_read_lock(); 1577 /* 1578 * The queueing is in progress, or it is already queued. Try to 1579 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1580 */ 1581 pool = get_work_pool(work); 1582 if (!pool) 1583 goto fail; 1584 1585 raw_spin_lock(&pool->lock); 1586 /* 1587 * work->data is guaranteed to point to pwq only while the work 1588 * item is queued on pwq->wq, and both updating work->data to point 1589 * to pwq on queueing and to pool on dequeueing are done under 1590 * pwq->pool->lock. This in turn guarantees that, if work->data 1591 * points to pwq which is associated with a locked pool, the work 1592 * item is currently queued on that pool. 1593 */ 1594 pwq = get_work_pwq(work); 1595 if (pwq && pwq->pool == pool) { 1596 debug_work_deactivate(work); 1597 1598 /* 1599 * A cancelable inactive work item must be in the 1600 * pwq->inactive_works since a queued barrier can't be 1601 * canceled (see the comments in insert_wq_barrier()). 1602 * 1603 * An inactive work item cannot be grabbed directly because 1604 * it might have linked barrier work items which, if left 1605 * on the inactive_works list, will confuse pwq->nr_active 1606 * management later on and cause stall. Make sure the work 1607 * item is activated before grabbing. 1608 */ 1609 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) 1610 pwq_activate_inactive_work(work); 1611 1612 list_del_init(&work->entry); 1613 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); 1614 1615 /* work->data points to pwq iff queued, point to pool */ 1616 set_work_pool_and_keep_pending(work, pool->id); 1617 1618 raw_spin_unlock(&pool->lock); 1619 rcu_read_unlock(); 1620 return 1; 1621 } 1622 raw_spin_unlock(&pool->lock); 1623 fail: 1624 rcu_read_unlock(); 1625 local_irq_restore(*flags); 1626 if (work_is_canceling(work)) 1627 return -ENOENT; 1628 cpu_relax(); 1629 return -EAGAIN; 1630 } 1631 1632 /** 1633 * insert_work - insert a work into a pool 1634 * @pwq: pwq @work belongs to 1635 * @work: work to insert 1636 * @head: insertion point 1637 * @extra_flags: extra WORK_STRUCT_* flags to set 1638 * 1639 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1640 * work_struct flags. 1641 * 1642 * CONTEXT: 1643 * raw_spin_lock_irq(pool->lock). 1644 */ 1645 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1646 struct list_head *head, unsigned int extra_flags) 1647 { 1648 debug_work_activate(work); 1649 1650 /* record the work call stack in order to print it in KASAN reports */ 1651 kasan_record_aux_stack_noalloc(work); 1652 1653 /* we own @work, set data and link */ 1654 set_work_pwq(work, pwq, extra_flags); 1655 list_add_tail(&work->entry, head); 1656 get_pwq(pwq); 1657 } 1658 1659 /* 1660 * Test whether @work is being queued from another work executing on the 1661 * same workqueue. 1662 */ 1663 static bool is_chained_work(struct workqueue_struct *wq) 1664 { 1665 struct worker *worker; 1666 1667 worker = current_wq_worker(); 1668 /* 1669 * Return %true iff I'm a worker executing a work item on @wq. If 1670 * I'm @worker, it's safe to dereference it without locking. 1671 */ 1672 return worker && worker->current_pwq->wq == wq; 1673 } 1674 1675 /* 1676 * When queueing an unbound work item to a wq, prefer local CPU if allowed 1677 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 1678 * avoid perturbing sensitive tasks. 1679 */ 1680 static int wq_select_unbound_cpu(int cpu) 1681 { 1682 int new_cpu; 1683 1684 if (likely(!wq_debug_force_rr_cpu)) { 1685 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 1686 return cpu; 1687 } else { 1688 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 1689 } 1690 1691 new_cpu = __this_cpu_read(wq_rr_cpu_last); 1692 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 1693 if (unlikely(new_cpu >= nr_cpu_ids)) { 1694 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 1695 if (unlikely(new_cpu >= nr_cpu_ids)) 1696 return cpu; 1697 } 1698 __this_cpu_write(wq_rr_cpu_last, new_cpu); 1699 1700 return new_cpu; 1701 } 1702 1703 static void __queue_work(int cpu, struct workqueue_struct *wq, 1704 struct work_struct *work) 1705 { 1706 struct pool_workqueue *pwq; 1707 struct worker_pool *last_pool, *pool; 1708 unsigned int work_flags; 1709 unsigned int req_cpu = cpu; 1710 1711 /* 1712 * While a work item is PENDING && off queue, a task trying to 1713 * steal the PENDING will busy-loop waiting for it to either get 1714 * queued or lose PENDING. Grabbing PENDING and queueing should 1715 * happen with IRQ disabled. 1716 */ 1717 lockdep_assert_irqs_disabled(); 1718 1719 1720 /* 1721 * For a draining wq, only works from the same workqueue are 1722 * allowed. The __WQ_DESTROYING helps to spot the issue that 1723 * queues a new work item to a wq after destroy_workqueue(wq). 1724 */ 1725 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 1726 WARN_ON_ONCE(!is_chained_work(wq)))) 1727 return; 1728 rcu_read_lock(); 1729 retry: 1730 /* pwq which will be used unless @work is executing elsewhere */ 1731 if (req_cpu == WORK_CPU_UNBOUND) { 1732 if (wq->flags & WQ_UNBOUND) 1733 cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 1734 else 1735 cpu = raw_smp_processor_id(); 1736 } 1737 1738 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 1739 pool = pwq->pool; 1740 1741 /* 1742 * If @work was previously on a different pool, it might still be 1743 * running there, in which case the work needs to be queued on that 1744 * pool to guarantee non-reentrancy. 1745 */ 1746 last_pool = get_work_pool(work); 1747 if (last_pool && last_pool != pool) { 1748 struct worker *worker; 1749 1750 raw_spin_lock(&last_pool->lock); 1751 1752 worker = find_worker_executing_work(last_pool, work); 1753 1754 if (worker && worker->current_pwq->wq == wq) { 1755 pwq = worker->current_pwq; 1756 pool = pwq->pool; 1757 WARN_ON_ONCE(pool != last_pool); 1758 } else { 1759 /* meh... not running there, queue here */ 1760 raw_spin_unlock(&last_pool->lock); 1761 raw_spin_lock(&pool->lock); 1762 } 1763 } else { 1764 raw_spin_lock(&pool->lock); 1765 } 1766 1767 /* 1768 * pwq is determined and locked. For unbound pools, we could have raced 1769 * with pwq release and it could already be dead. If its refcnt is zero, 1770 * repeat pwq selection. Note that unbound pwqs never die without 1771 * another pwq replacing it in cpu_pwq or while work items are executing 1772 * on it, so the retrying is guaranteed to make forward-progress. 1773 */ 1774 if (unlikely(!pwq->refcnt)) { 1775 if (wq->flags & WQ_UNBOUND) { 1776 raw_spin_unlock(&pool->lock); 1777 cpu_relax(); 1778 goto retry; 1779 } 1780 /* oops */ 1781 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 1782 wq->name, cpu); 1783 } 1784 1785 /* pwq determined, queue */ 1786 trace_workqueue_queue_work(req_cpu, pwq, work); 1787 1788 if (WARN_ON(!list_empty(&work->entry))) 1789 goto out; 1790 1791 pwq->nr_in_flight[pwq->work_color]++; 1792 work_flags = work_color_to_flags(pwq->work_color); 1793 1794 if (likely(pwq->nr_active < pwq->max_active)) { 1795 if (list_empty(&pool->worklist)) 1796 pool->watchdog_ts = jiffies; 1797 1798 trace_workqueue_activate_work(work); 1799 pwq->nr_active++; 1800 insert_work(pwq, work, &pool->worklist, work_flags); 1801 kick_pool(pool); 1802 } else { 1803 work_flags |= WORK_STRUCT_INACTIVE; 1804 insert_work(pwq, work, &pwq->inactive_works, work_flags); 1805 } 1806 1807 out: 1808 raw_spin_unlock(&pool->lock); 1809 rcu_read_unlock(); 1810 } 1811 1812 /** 1813 * queue_work_on - queue work on specific cpu 1814 * @cpu: CPU number to execute work on 1815 * @wq: workqueue to use 1816 * @work: work to queue 1817 * 1818 * We queue the work to a specific CPU, the caller must ensure it 1819 * can't go away. Callers that fail to ensure that the specified 1820 * CPU cannot go away will execute on a randomly chosen CPU. 1821 * But note well that callers specifying a CPU that never has been 1822 * online will get a splat. 1823 * 1824 * Return: %false if @work was already on a queue, %true otherwise. 1825 */ 1826 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1827 struct work_struct *work) 1828 { 1829 bool ret = false; 1830 unsigned long flags; 1831 1832 local_irq_save(flags); 1833 1834 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1835 __queue_work(cpu, wq, work); 1836 ret = true; 1837 } 1838 1839 local_irq_restore(flags); 1840 return ret; 1841 } 1842 EXPORT_SYMBOL(queue_work_on); 1843 1844 /** 1845 * select_numa_node_cpu - Select a CPU based on NUMA node 1846 * @node: NUMA node ID that we want to select a CPU from 1847 * 1848 * This function will attempt to find a "random" cpu available on a given 1849 * node. If there are no CPUs available on the given node it will return 1850 * WORK_CPU_UNBOUND indicating that we should just schedule to any 1851 * available CPU if we need to schedule this work. 1852 */ 1853 static int select_numa_node_cpu(int node) 1854 { 1855 int cpu; 1856 1857 /* Delay binding to CPU if node is not valid or online */ 1858 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 1859 return WORK_CPU_UNBOUND; 1860 1861 /* Use local node/cpu if we are already there */ 1862 cpu = raw_smp_processor_id(); 1863 if (node == cpu_to_node(cpu)) 1864 return cpu; 1865 1866 /* Use "random" otherwise know as "first" online CPU of node */ 1867 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 1868 1869 /* If CPU is valid return that, otherwise just defer */ 1870 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 1871 } 1872 1873 /** 1874 * queue_work_node - queue work on a "random" cpu for a given NUMA node 1875 * @node: NUMA node that we are targeting the work for 1876 * @wq: workqueue to use 1877 * @work: work to queue 1878 * 1879 * We queue the work to a "random" CPU within a given NUMA node. The basic 1880 * idea here is to provide a way to somehow associate work with a given 1881 * NUMA node. 1882 * 1883 * This function will only make a best effort attempt at getting this onto 1884 * the right NUMA node. If no node is requested or the requested node is 1885 * offline then we just fall back to standard queue_work behavior. 1886 * 1887 * Currently the "random" CPU ends up being the first available CPU in the 1888 * intersection of cpu_online_mask and the cpumask of the node, unless we 1889 * are running on the node. In that case we just use the current CPU. 1890 * 1891 * Return: %false if @work was already on a queue, %true otherwise. 1892 */ 1893 bool queue_work_node(int node, struct workqueue_struct *wq, 1894 struct work_struct *work) 1895 { 1896 unsigned long flags; 1897 bool ret = false; 1898 1899 /* 1900 * This current implementation is specific to unbound workqueues. 1901 * Specifically we only return the first available CPU for a given 1902 * node instead of cycling through individual CPUs within the node. 1903 * 1904 * If this is used with a per-cpu workqueue then the logic in 1905 * workqueue_select_cpu_near would need to be updated to allow for 1906 * some round robin type logic. 1907 */ 1908 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 1909 1910 local_irq_save(flags); 1911 1912 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1913 int cpu = select_numa_node_cpu(node); 1914 1915 __queue_work(cpu, wq, work); 1916 ret = true; 1917 } 1918 1919 local_irq_restore(flags); 1920 return ret; 1921 } 1922 EXPORT_SYMBOL_GPL(queue_work_node); 1923 1924 void delayed_work_timer_fn(struct timer_list *t) 1925 { 1926 struct delayed_work *dwork = from_timer(dwork, t, timer); 1927 1928 /* should have been called from irqsafe timer with irq already off */ 1929 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 1930 } 1931 EXPORT_SYMBOL(delayed_work_timer_fn); 1932 1933 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1934 struct delayed_work *dwork, unsigned long delay) 1935 { 1936 struct timer_list *timer = &dwork->timer; 1937 struct work_struct *work = &dwork->work; 1938 1939 WARN_ON_ONCE(!wq); 1940 WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 1941 WARN_ON_ONCE(timer_pending(timer)); 1942 WARN_ON_ONCE(!list_empty(&work->entry)); 1943 1944 /* 1945 * If @delay is 0, queue @dwork->work immediately. This is for 1946 * both optimization and correctness. The earliest @timer can 1947 * expire is on the closest next tick and delayed_work users depend 1948 * on that there's no such delay when @delay is 0. 1949 */ 1950 if (!delay) { 1951 __queue_work(cpu, wq, &dwork->work); 1952 return; 1953 } 1954 1955 dwork->wq = wq; 1956 dwork->cpu = cpu; 1957 timer->expires = jiffies + delay; 1958 1959 if (unlikely(cpu != WORK_CPU_UNBOUND)) 1960 add_timer_on(timer, cpu); 1961 else 1962 add_timer(timer); 1963 } 1964 1965 /** 1966 * queue_delayed_work_on - queue work on specific CPU after delay 1967 * @cpu: CPU number to execute work on 1968 * @wq: workqueue to use 1969 * @dwork: work to queue 1970 * @delay: number of jiffies to wait before queueing 1971 * 1972 * Return: %false if @work was already on a queue, %true otherwise. If 1973 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1974 * execution. 1975 */ 1976 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1977 struct delayed_work *dwork, unsigned long delay) 1978 { 1979 struct work_struct *work = &dwork->work; 1980 bool ret = false; 1981 unsigned long flags; 1982 1983 /* read the comment in __queue_work() */ 1984 local_irq_save(flags); 1985 1986 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1987 __queue_delayed_work(cpu, wq, dwork, delay); 1988 ret = true; 1989 } 1990 1991 local_irq_restore(flags); 1992 return ret; 1993 } 1994 EXPORT_SYMBOL(queue_delayed_work_on); 1995 1996 /** 1997 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1998 * @cpu: CPU number to execute work on 1999 * @wq: workqueue to use 2000 * @dwork: work to queue 2001 * @delay: number of jiffies to wait before queueing 2002 * 2003 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2004 * modify @dwork's timer so that it expires after @delay. If @delay is 2005 * zero, @work is guaranteed to be scheduled immediately regardless of its 2006 * current state. 2007 * 2008 * Return: %false if @dwork was idle and queued, %true if @dwork was 2009 * pending and its timer was modified. 2010 * 2011 * This function is safe to call from any context including IRQ handler. 2012 * See try_to_grab_pending() for details. 2013 */ 2014 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2015 struct delayed_work *dwork, unsigned long delay) 2016 { 2017 unsigned long flags; 2018 int ret; 2019 2020 do { 2021 ret = try_to_grab_pending(&dwork->work, true, &flags); 2022 } while (unlikely(ret == -EAGAIN)); 2023 2024 if (likely(ret >= 0)) { 2025 __queue_delayed_work(cpu, wq, dwork, delay); 2026 local_irq_restore(flags); 2027 } 2028 2029 /* -ENOENT from try_to_grab_pending() becomes %true */ 2030 return ret; 2031 } 2032 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2033 2034 static void rcu_work_rcufn(struct rcu_head *rcu) 2035 { 2036 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 2037 2038 /* read the comment in __queue_work() */ 2039 local_irq_disable(); 2040 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 2041 local_irq_enable(); 2042 } 2043 2044 /** 2045 * queue_rcu_work - queue work after a RCU grace period 2046 * @wq: workqueue to use 2047 * @rwork: work to queue 2048 * 2049 * Return: %false if @rwork was already pending, %true otherwise. Note 2050 * that a full RCU grace period is guaranteed only after a %true return. 2051 * While @rwork is guaranteed to be executed after a %false return, the 2052 * execution may happen before a full RCU grace period has passed. 2053 */ 2054 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 2055 { 2056 struct work_struct *work = &rwork->work; 2057 2058 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2059 rwork->wq = wq; 2060 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 2061 return true; 2062 } 2063 2064 return false; 2065 } 2066 EXPORT_SYMBOL(queue_rcu_work); 2067 2068 static struct worker *alloc_worker(int node) 2069 { 2070 struct worker *worker; 2071 2072 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2073 if (worker) { 2074 INIT_LIST_HEAD(&worker->entry); 2075 INIT_LIST_HEAD(&worker->scheduled); 2076 INIT_LIST_HEAD(&worker->node); 2077 /* on creation a worker is in !idle && prep state */ 2078 worker->flags = WORKER_PREP; 2079 } 2080 return worker; 2081 } 2082 2083 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 2084 { 2085 if (pool->cpu < 0 && pool->attrs->affn_strict) 2086 return pool->attrs->__pod_cpumask; 2087 else 2088 return pool->attrs->cpumask; 2089 } 2090 2091 /** 2092 * worker_attach_to_pool() - attach a worker to a pool 2093 * @worker: worker to be attached 2094 * @pool: the target pool 2095 * 2096 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 2097 * cpu-binding of @worker are kept coordinated with the pool across 2098 * cpu-[un]hotplugs. 2099 */ 2100 static void worker_attach_to_pool(struct worker *worker, 2101 struct worker_pool *pool) 2102 { 2103 mutex_lock(&wq_pool_attach_mutex); 2104 2105 /* 2106 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains 2107 * stable across this function. See the comments above the flag 2108 * definition for details. 2109 */ 2110 if (pool->flags & POOL_DISASSOCIATED) 2111 worker->flags |= WORKER_UNBOUND; 2112 else 2113 kthread_set_per_cpu(worker->task, pool->cpu); 2114 2115 if (worker->rescue_wq) 2116 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2117 2118 list_add_tail(&worker->node, &pool->workers); 2119 worker->pool = pool; 2120 2121 mutex_unlock(&wq_pool_attach_mutex); 2122 } 2123 2124 /** 2125 * worker_detach_from_pool() - detach a worker from its pool 2126 * @worker: worker which is attached to its pool 2127 * 2128 * Undo the attaching which had been done in worker_attach_to_pool(). The 2129 * caller worker shouldn't access to the pool after detached except it has 2130 * other reference to the pool. 2131 */ 2132 static void worker_detach_from_pool(struct worker *worker) 2133 { 2134 struct worker_pool *pool = worker->pool; 2135 struct completion *detach_completion = NULL; 2136 2137 mutex_lock(&wq_pool_attach_mutex); 2138 2139 kthread_set_per_cpu(worker->task, -1); 2140 list_del(&worker->node); 2141 worker->pool = NULL; 2142 2143 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 2144 detach_completion = pool->detach_completion; 2145 mutex_unlock(&wq_pool_attach_mutex); 2146 2147 /* clear leftover flags without pool->lock after it is detached */ 2148 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2149 2150 if (detach_completion) 2151 complete(detach_completion); 2152 } 2153 2154 /** 2155 * create_worker - create a new workqueue worker 2156 * @pool: pool the new worker will belong to 2157 * 2158 * Create and start a new worker which is attached to @pool. 2159 * 2160 * CONTEXT: 2161 * Might sleep. Does GFP_KERNEL allocations. 2162 * 2163 * Return: 2164 * Pointer to the newly created worker. 2165 */ 2166 static struct worker *create_worker(struct worker_pool *pool) 2167 { 2168 struct worker *worker; 2169 int id; 2170 char id_buf[23]; 2171 2172 /* ID is needed to determine kthread name */ 2173 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 2174 if (id < 0) { 2175 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 2176 ERR_PTR(id)); 2177 return NULL; 2178 } 2179 2180 worker = alloc_worker(pool->node); 2181 if (!worker) { 2182 pr_err_once("workqueue: Failed to allocate a worker\n"); 2183 goto fail; 2184 } 2185 2186 worker->id = id; 2187 2188 if (pool->cpu >= 0) 2189 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2190 pool->attrs->nice < 0 ? "H" : ""); 2191 else 2192 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2193 2194 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2195 "kworker/%s", id_buf); 2196 if (IS_ERR(worker->task)) { 2197 if (PTR_ERR(worker->task) == -EINTR) { 2198 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 2199 id_buf); 2200 } else { 2201 pr_err_once("workqueue: Failed to create a worker thread: %pe", 2202 worker->task); 2203 } 2204 goto fail; 2205 } 2206 2207 set_user_nice(worker->task, pool->attrs->nice); 2208 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 2209 2210 /* successful, attach the worker to the pool */ 2211 worker_attach_to_pool(worker, pool); 2212 2213 /* start the newly created worker */ 2214 raw_spin_lock_irq(&pool->lock); 2215 2216 worker->pool->nr_workers++; 2217 worker_enter_idle(worker); 2218 kick_pool(pool); 2219 2220 /* 2221 * @worker is waiting on a completion in kthread() and will trigger hung 2222 * check if not woken up soon. As kick_pool() might not have waken it 2223 * up, wake it up explicitly once more. 2224 */ 2225 wake_up_process(worker->task); 2226 2227 raw_spin_unlock_irq(&pool->lock); 2228 2229 return worker; 2230 2231 fail: 2232 ida_free(&pool->worker_ida, id); 2233 kfree(worker); 2234 return NULL; 2235 } 2236 2237 static void unbind_worker(struct worker *worker) 2238 { 2239 lockdep_assert_held(&wq_pool_attach_mutex); 2240 2241 kthread_set_per_cpu(worker->task, -1); 2242 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2243 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2244 else 2245 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2246 } 2247 2248 static void wake_dying_workers(struct list_head *cull_list) 2249 { 2250 struct worker *worker, *tmp; 2251 2252 list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2253 list_del_init(&worker->entry); 2254 unbind_worker(worker); 2255 /* 2256 * If the worker was somehow already running, then it had to be 2257 * in pool->idle_list when set_worker_dying() happened or we 2258 * wouldn't have gotten here. 2259 * 2260 * Thus, the worker must either have observed the WORKER_DIE 2261 * flag, or have set its state to TASK_IDLE. Either way, the 2262 * below will be observed by the worker and is safe to do 2263 * outside of pool->lock. 2264 */ 2265 wake_up_process(worker->task); 2266 } 2267 } 2268 2269 /** 2270 * set_worker_dying - Tag a worker for destruction 2271 * @worker: worker to be destroyed 2272 * @list: transfer worker away from its pool->idle_list and into list 2273 * 2274 * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2275 * should be idle. 2276 * 2277 * CONTEXT: 2278 * raw_spin_lock_irq(pool->lock). 2279 */ 2280 static void set_worker_dying(struct worker *worker, struct list_head *list) 2281 { 2282 struct worker_pool *pool = worker->pool; 2283 2284 lockdep_assert_held(&pool->lock); 2285 lockdep_assert_held(&wq_pool_attach_mutex); 2286 2287 /* sanity check frenzy */ 2288 if (WARN_ON(worker->current_work) || 2289 WARN_ON(!list_empty(&worker->scheduled)) || 2290 WARN_ON(!(worker->flags & WORKER_IDLE))) 2291 return; 2292 2293 pool->nr_workers--; 2294 pool->nr_idle--; 2295 2296 worker->flags |= WORKER_DIE; 2297 2298 list_move(&worker->entry, list); 2299 list_move(&worker->node, &pool->dying_workers); 2300 } 2301 2302 /** 2303 * idle_worker_timeout - check if some idle workers can now be deleted. 2304 * @t: The pool's idle_timer that just expired 2305 * 2306 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 2307 * worker_leave_idle(), as a worker flicking between idle and active while its 2308 * pool is at the too_many_workers() tipping point would cause too much timer 2309 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 2310 * it expire and re-evaluate things from there. 2311 */ 2312 static void idle_worker_timeout(struct timer_list *t) 2313 { 2314 struct worker_pool *pool = from_timer(pool, t, idle_timer); 2315 bool do_cull = false; 2316 2317 if (work_pending(&pool->idle_cull_work)) 2318 return; 2319 2320 raw_spin_lock_irq(&pool->lock); 2321 2322 if (too_many_workers(pool)) { 2323 struct worker *worker; 2324 unsigned long expires; 2325 2326 /* idle_list is kept in LIFO order, check the last one */ 2327 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2328 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2329 do_cull = !time_before(jiffies, expires); 2330 2331 if (!do_cull) 2332 mod_timer(&pool->idle_timer, expires); 2333 } 2334 raw_spin_unlock_irq(&pool->lock); 2335 2336 if (do_cull) 2337 queue_work(system_unbound_wq, &pool->idle_cull_work); 2338 } 2339 2340 /** 2341 * idle_cull_fn - cull workers that have been idle for too long. 2342 * @work: the pool's work for handling these idle workers 2343 * 2344 * This goes through a pool's idle workers and gets rid of those that have been 2345 * idle for at least IDLE_WORKER_TIMEOUT seconds. 2346 * 2347 * We don't want to disturb isolated CPUs because of a pcpu kworker being 2348 * culled, so this also resets worker affinity. This requires a sleepable 2349 * context, hence the split between timer callback and work item. 2350 */ 2351 static void idle_cull_fn(struct work_struct *work) 2352 { 2353 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 2354 LIST_HEAD(cull_list); 2355 2356 /* 2357 * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2358 * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2359 * path. This is required as a previously-preempted worker could run after 2360 * set_worker_dying() has happened but before wake_dying_workers() did. 2361 */ 2362 mutex_lock(&wq_pool_attach_mutex); 2363 raw_spin_lock_irq(&pool->lock); 2364 2365 while (too_many_workers(pool)) { 2366 struct worker *worker; 2367 unsigned long expires; 2368 2369 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2370 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2371 2372 if (time_before(jiffies, expires)) { 2373 mod_timer(&pool->idle_timer, expires); 2374 break; 2375 } 2376 2377 set_worker_dying(worker, &cull_list); 2378 } 2379 2380 raw_spin_unlock_irq(&pool->lock); 2381 wake_dying_workers(&cull_list); 2382 mutex_unlock(&wq_pool_attach_mutex); 2383 } 2384 2385 static void send_mayday(struct work_struct *work) 2386 { 2387 struct pool_workqueue *pwq = get_work_pwq(work); 2388 struct workqueue_struct *wq = pwq->wq; 2389 2390 lockdep_assert_held(&wq_mayday_lock); 2391 2392 if (!wq->rescuer) 2393 return; 2394 2395 /* mayday mayday mayday */ 2396 if (list_empty(&pwq->mayday_node)) { 2397 /* 2398 * If @pwq is for an unbound wq, its base ref may be put at 2399 * any time due to an attribute change. Pin @pwq until the 2400 * rescuer is done with it. 2401 */ 2402 get_pwq(pwq); 2403 list_add_tail(&pwq->mayday_node, &wq->maydays); 2404 wake_up_process(wq->rescuer->task); 2405 pwq->stats[PWQ_STAT_MAYDAY]++; 2406 } 2407 } 2408 2409 static void pool_mayday_timeout(struct timer_list *t) 2410 { 2411 struct worker_pool *pool = from_timer(pool, t, mayday_timer); 2412 struct work_struct *work; 2413 2414 raw_spin_lock_irq(&pool->lock); 2415 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2416 2417 if (need_to_create_worker(pool)) { 2418 /* 2419 * We've been trying to create a new worker but 2420 * haven't been successful. We might be hitting an 2421 * allocation deadlock. Send distress signals to 2422 * rescuers. 2423 */ 2424 list_for_each_entry(work, &pool->worklist, entry) 2425 send_mayday(work); 2426 } 2427 2428 raw_spin_unlock(&wq_mayday_lock); 2429 raw_spin_unlock_irq(&pool->lock); 2430 2431 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 2432 } 2433 2434 /** 2435 * maybe_create_worker - create a new worker if necessary 2436 * @pool: pool to create a new worker for 2437 * 2438 * Create a new worker for @pool if necessary. @pool is guaranteed to 2439 * have at least one idle worker on return from this function. If 2440 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 2441 * sent to all rescuers with works scheduled on @pool to resolve 2442 * possible allocation deadlock. 2443 * 2444 * On return, need_to_create_worker() is guaranteed to be %false and 2445 * may_start_working() %true. 2446 * 2447 * LOCKING: 2448 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2449 * multiple times. Does GFP_KERNEL allocations. Called only from 2450 * manager. 2451 */ 2452 static void maybe_create_worker(struct worker_pool *pool) 2453 __releases(&pool->lock) 2454 __acquires(&pool->lock) 2455 { 2456 restart: 2457 raw_spin_unlock_irq(&pool->lock); 2458 2459 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 2460 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 2461 2462 while (true) { 2463 if (create_worker(pool) || !need_to_create_worker(pool)) 2464 break; 2465 2466 schedule_timeout_interruptible(CREATE_COOLDOWN); 2467 2468 if (!need_to_create_worker(pool)) 2469 break; 2470 } 2471 2472 del_timer_sync(&pool->mayday_timer); 2473 raw_spin_lock_irq(&pool->lock); 2474 /* 2475 * This is necessary even after a new worker was just successfully 2476 * created as @pool->lock was dropped and the new worker might have 2477 * already become busy. 2478 */ 2479 if (need_to_create_worker(pool)) 2480 goto restart; 2481 } 2482 2483 /** 2484 * manage_workers - manage worker pool 2485 * @worker: self 2486 * 2487 * Assume the manager role and manage the worker pool @worker belongs 2488 * to. At any given time, there can be only zero or one manager per 2489 * pool. The exclusion is handled automatically by this function. 2490 * 2491 * The caller can safely start processing works on false return. On 2492 * true return, it's guaranteed that need_to_create_worker() is false 2493 * and may_start_working() is true. 2494 * 2495 * CONTEXT: 2496 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2497 * multiple times. Does GFP_KERNEL allocations. 2498 * 2499 * Return: 2500 * %false if the pool doesn't need management and the caller can safely 2501 * start processing works, %true if management function was performed and 2502 * the conditions that the caller verified before calling the function may 2503 * no longer be true. 2504 */ 2505 static bool manage_workers(struct worker *worker) 2506 { 2507 struct worker_pool *pool = worker->pool; 2508 2509 if (pool->flags & POOL_MANAGER_ACTIVE) 2510 return false; 2511 2512 pool->flags |= POOL_MANAGER_ACTIVE; 2513 pool->manager = worker; 2514 2515 maybe_create_worker(pool); 2516 2517 pool->manager = NULL; 2518 pool->flags &= ~POOL_MANAGER_ACTIVE; 2519 rcuwait_wake_up(&manager_wait); 2520 return true; 2521 } 2522 2523 /** 2524 * process_one_work - process single work 2525 * @worker: self 2526 * @work: work to process 2527 * 2528 * Process @work. This function contains all the logics necessary to 2529 * process a single work including synchronization against and 2530 * interaction with other workers on the same cpu, queueing and 2531 * flushing. As long as context requirement is met, any worker can 2532 * call this function to process a work. 2533 * 2534 * CONTEXT: 2535 * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 2536 */ 2537 static void process_one_work(struct worker *worker, struct work_struct *work) 2538 __releases(&pool->lock) 2539 __acquires(&pool->lock) 2540 { 2541 struct pool_workqueue *pwq = get_work_pwq(work); 2542 struct worker_pool *pool = worker->pool; 2543 unsigned long work_data; 2544 #ifdef CONFIG_LOCKDEP 2545 /* 2546 * It is permissible to free the struct work_struct from 2547 * inside the function that is called from it, this we need to 2548 * take into account for lockdep too. To avoid bogus "held 2549 * lock freed" warnings as well as problems when looking into 2550 * work->lockdep_map, make a copy and use that here. 2551 */ 2552 struct lockdep_map lockdep_map; 2553 2554 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 2555 #endif 2556 /* ensure we're on the correct CPU */ 2557 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 2558 raw_smp_processor_id() != pool->cpu); 2559 2560 /* claim and dequeue */ 2561 debug_work_deactivate(work); 2562 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2563 worker->current_work = work; 2564 worker->current_func = work->func; 2565 worker->current_pwq = pwq; 2566 worker->current_at = worker->task->se.sum_exec_runtime; 2567 work_data = *work_data_bits(work); 2568 worker->current_color = get_work_color(work_data); 2569 2570 /* 2571 * Record wq name for cmdline and debug reporting, may get 2572 * overridden through set_worker_desc(). 2573 */ 2574 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 2575 2576 list_del_init(&work->entry); 2577 2578 /* 2579 * CPU intensive works don't participate in concurrency management. 2580 * They're the scheduler's responsibility. This takes @worker out 2581 * of concurrency management and the next code block will chain 2582 * execution of the pending work items. 2583 */ 2584 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 2585 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2586 2587 /* 2588 * Kick @pool if necessary. It's always noop for per-cpu worker pools 2589 * since nr_running would always be >= 1 at this point. This is used to 2590 * chain execution of the pending work items for WORKER_NOT_RUNNING 2591 * workers such as the UNBOUND and CPU_INTENSIVE ones. 2592 */ 2593 kick_pool(pool); 2594 2595 /* 2596 * Record the last pool and clear PENDING which should be the last 2597 * update to @work. Also, do this inside @pool->lock so that 2598 * PENDING and queued state changes happen together while IRQ is 2599 * disabled. 2600 */ 2601 set_work_pool_and_clear_pending(work, pool->id); 2602 2603 pwq->stats[PWQ_STAT_STARTED]++; 2604 raw_spin_unlock_irq(&pool->lock); 2605 2606 lock_map_acquire(&pwq->wq->lockdep_map); 2607 lock_map_acquire(&lockdep_map); 2608 /* 2609 * Strictly speaking we should mark the invariant state without holding 2610 * any locks, that is, before these two lock_map_acquire()'s. 2611 * 2612 * However, that would result in: 2613 * 2614 * A(W1) 2615 * WFC(C) 2616 * A(W1) 2617 * C(C) 2618 * 2619 * Which would create W1->C->W1 dependencies, even though there is no 2620 * actual deadlock possible. There are two solutions, using a 2621 * read-recursive acquire on the work(queue) 'locks', but this will then 2622 * hit the lockdep limitation on recursive locks, or simply discard 2623 * these locks. 2624 * 2625 * AFAICT there is no possible deadlock scenario between the 2626 * flush_work() and complete() primitives (except for single-threaded 2627 * workqueues), so hiding them isn't a problem. 2628 */ 2629 lockdep_invariant_state(true); 2630 trace_workqueue_execute_start(work); 2631 worker->current_func(work); 2632 /* 2633 * While we must be careful to not use "work" after this, the trace 2634 * point will only record its address. 2635 */ 2636 trace_workqueue_execute_end(work, worker->current_func); 2637 pwq->stats[PWQ_STAT_COMPLETED]++; 2638 lock_map_release(&lockdep_map); 2639 lock_map_release(&pwq->wq->lockdep_map); 2640 2641 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2642 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2643 " last function: %ps\n", 2644 current->comm, preempt_count(), task_pid_nr(current), 2645 worker->current_func); 2646 debug_show_held_locks(current); 2647 dump_stack(); 2648 } 2649 2650 /* 2651 * The following prevents a kworker from hogging CPU on !PREEMPTION 2652 * kernels, where a requeueing work item waiting for something to 2653 * happen could deadlock with stop_machine as such work item could 2654 * indefinitely requeue itself while all other CPUs are trapped in 2655 * stop_machine. At the same time, report a quiescent RCU state so 2656 * the same condition doesn't freeze RCU. 2657 */ 2658 cond_resched(); 2659 2660 raw_spin_lock_irq(&pool->lock); 2661 2662 /* 2663 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 2664 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 2665 * wq_cpu_intensive_thresh_us. Clear it. 2666 */ 2667 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2668 2669 /* tag the worker for identification in schedule() */ 2670 worker->last_func = worker->current_func; 2671 2672 /* we're done with it, release */ 2673 hash_del(&worker->hentry); 2674 worker->current_work = NULL; 2675 worker->current_func = NULL; 2676 worker->current_pwq = NULL; 2677 worker->current_color = INT_MAX; 2678 pwq_dec_nr_in_flight(pwq, work_data); 2679 } 2680 2681 /** 2682 * process_scheduled_works - process scheduled works 2683 * @worker: self 2684 * 2685 * Process all scheduled works. Please note that the scheduled list 2686 * may change while processing a work, so this function repeatedly 2687 * fetches a work from the top and executes it. 2688 * 2689 * CONTEXT: 2690 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2691 * multiple times. 2692 */ 2693 static void process_scheduled_works(struct worker *worker) 2694 { 2695 struct work_struct *work; 2696 bool first = true; 2697 2698 while ((work = list_first_entry_or_null(&worker->scheduled, 2699 struct work_struct, entry))) { 2700 if (first) { 2701 worker->pool->watchdog_ts = jiffies; 2702 first = false; 2703 } 2704 process_one_work(worker, work); 2705 } 2706 } 2707 2708 static void set_pf_worker(bool val) 2709 { 2710 mutex_lock(&wq_pool_attach_mutex); 2711 if (val) 2712 current->flags |= PF_WQ_WORKER; 2713 else 2714 current->flags &= ~PF_WQ_WORKER; 2715 mutex_unlock(&wq_pool_attach_mutex); 2716 } 2717 2718 /** 2719 * worker_thread - the worker thread function 2720 * @__worker: self 2721 * 2722 * The worker thread function. All workers belong to a worker_pool - 2723 * either a per-cpu one or dynamic unbound one. These workers process all 2724 * work items regardless of their specific target workqueue. The only 2725 * exception is work items which belong to workqueues with a rescuer which 2726 * will be explained in rescuer_thread(). 2727 * 2728 * Return: 0 2729 */ 2730 static int worker_thread(void *__worker) 2731 { 2732 struct worker *worker = __worker; 2733 struct worker_pool *pool = worker->pool; 2734 2735 /* tell the scheduler that this is a workqueue worker */ 2736 set_pf_worker(true); 2737 woke_up: 2738 raw_spin_lock_irq(&pool->lock); 2739 2740 /* am I supposed to die? */ 2741 if (unlikely(worker->flags & WORKER_DIE)) { 2742 raw_spin_unlock_irq(&pool->lock); 2743 set_pf_worker(false); 2744 2745 set_task_comm(worker->task, "kworker/dying"); 2746 ida_free(&pool->worker_ida, worker->id); 2747 worker_detach_from_pool(worker); 2748 WARN_ON_ONCE(!list_empty(&worker->entry)); 2749 kfree(worker); 2750 return 0; 2751 } 2752 2753 worker_leave_idle(worker); 2754 recheck: 2755 /* no more worker necessary? */ 2756 if (!need_more_worker(pool)) 2757 goto sleep; 2758 2759 /* do we need to manage? */ 2760 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2761 goto recheck; 2762 2763 /* 2764 * ->scheduled list can only be filled while a worker is 2765 * preparing to process a work or actually processing it. 2766 * Make sure nobody diddled with it while I was sleeping. 2767 */ 2768 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2769 2770 /* 2771 * Finish PREP stage. We're guaranteed to have at least one idle 2772 * worker or that someone else has already assumed the manager 2773 * role. This is where @worker starts participating in concurrency 2774 * management if applicable and concurrency management is restored 2775 * after being rebound. See rebind_workers() for details. 2776 */ 2777 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2778 2779 do { 2780 struct work_struct *work = 2781 list_first_entry(&pool->worklist, 2782 struct work_struct, entry); 2783 2784 if (assign_work(work, worker, NULL)) 2785 process_scheduled_works(worker); 2786 } while (keep_working(pool)); 2787 2788 worker_set_flags(worker, WORKER_PREP); 2789 sleep: 2790 /* 2791 * pool->lock is held and there's no work to process and no need to 2792 * manage, sleep. Workers are woken up only while holding 2793 * pool->lock or from local cpu, so setting the current state 2794 * before releasing pool->lock is enough to prevent losing any 2795 * event. 2796 */ 2797 worker_enter_idle(worker); 2798 __set_current_state(TASK_IDLE); 2799 raw_spin_unlock_irq(&pool->lock); 2800 schedule(); 2801 goto woke_up; 2802 } 2803 2804 /** 2805 * rescuer_thread - the rescuer thread function 2806 * @__rescuer: self 2807 * 2808 * Workqueue rescuer thread function. There's one rescuer for each 2809 * workqueue which has WQ_MEM_RECLAIM set. 2810 * 2811 * Regular work processing on a pool may block trying to create a new 2812 * worker which uses GFP_KERNEL allocation which has slight chance of 2813 * developing into deadlock if some works currently on the same queue 2814 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2815 * the problem rescuer solves. 2816 * 2817 * When such condition is possible, the pool summons rescuers of all 2818 * workqueues which have works queued on the pool and let them process 2819 * those works so that forward progress can be guaranteed. 2820 * 2821 * This should happen rarely. 2822 * 2823 * Return: 0 2824 */ 2825 static int rescuer_thread(void *__rescuer) 2826 { 2827 struct worker *rescuer = __rescuer; 2828 struct workqueue_struct *wq = rescuer->rescue_wq; 2829 bool should_stop; 2830 2831 set_user_nice(current, RESCUER_NICE_LEVEL); 2832 2833 /* 2834 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2835 * doesn't participate in concurrency management. 2836 */ 2837 set_pf_worker(true); 2838 repeat: 2839 set_current_state(TASK_IDLE); 2840 2841 /* 2842 * By the time the rescuer is requested to stop, the workqueue 2843 * shouldn't have any work pending, but @wq->maydays may still have 2844 * pwq(s) queued. This can happen by non-rescuer workers consuming 2845 * all the work items before the rescuer got to them. Go through 2846 * @wq->maydays processing before acting on should_stop so that the 2847 * list is always empty on exit. 2848 */ 2849 should_stop = kthread_should_stop(); 2850 2851 /* see whether any pwq is asking for help */ 2852 raw_spin_lock_irq(&wq_mayday_lock); 2853 2854 while (!list_empty(&wq->maydays)) { 2855 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2856 struct pool_workqueue, mayday_node); 2857 struct worker_pool *pool = pwq->pool; 2858 struct work_struct *work, *n; 2859 2860 __set_current_state(TASK_RUNNING); 2861 list_del_init(&pwq->mayday_node); 2862 2863 raw_spin_unlock_irq(&wq_mayday_lock); 2864 2865 worker_attach_to_pool(rescuer, pool); 2866 2867 raw_spin_lock_irq(&pool->lock); 2868 2869 /* 2870 * Slurp in all works issued via this workqueue and 2871 * process'em. 2872 */ 2873 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2874 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 2875 if (get_work_pwq(work) == pwq && 2876 assign_work(work, rescuer, &n)) 2877 pwq->stats[PWQ_STAT_RESCUED]++; 2878 } 2879 2880 if (!list_empty(&rescuer->scheduled)) { 2881 process_scheduled_works(rescuer); 2882 2883 /* 2884 * The above execution of rescued work items could 2885 * have created more to rescue through 2886 * pwq_activate_first_inactive() or chained 2887 * queueing. Let's put @pwq back on mayday list so 2888 * that such back-to-back work items, which may be 2889 * being used to relieve memory pressure, don't 2890 * incur MAYDAY_INTERVAL delay inbetween. 2891 */ 2892 if (pwq->nr_active && need_to_create_worker(pool)) { 2893 raw_spin_lock(&wq_mayday_lock); 2894 /* 2895 * Queue iff we aren't racing destruction 2896 * and somebody else hasn't queued it already. 2897 */ 2898 if (wq->rescuer && list_empty(&pwq->mayday_node)) { 2899 get_pwq(pwq); 2900 list_add_tail(&pwq->mayday_node, &wq->maydays); 2901 } 2902 raw_spin_unlock(&wq_mayday_lock); 2903 } 2904 } 2905 2906 /* 2907 * Put the reference grabbed by send_mayday(). @pool won't 2908 * go away while we're still attached to it. 2909 */ 2910 put_pwq(pwq); 2911 2912 /* 2913 * Leave this pool. Notify regular workers; otherwise, we end up 2914 * with 0 concurrency and stalling the execution. 2915 */ 2916 kick_pool(pool); 2917 2918 raw_spin_unlock_irq(&pool->lock); 2919 2920 worker_detach_from_pool(rescuer); 2921 2922 raw_spin_lock_irq(&wq_mayday_lock); 2923 } 2924 2925 raw_spin_unlock_irq(&wq_mayday_lock); 2926 2927 if (should_stop) { 2928 __set_current_state(TASK_RUNNING); 2929 set_pf_worker(false); 2930 return 0; 2931 } 2932 2933 /* rescuers should never participate in concurrency management */ 2934 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2935 schedule(); 2936 goto repeat; 2937 } 2938 2939 /** 2940 * check_flush_dependency - check for flush dependency sanity 2941 * @target_wq: workqueue being flushed 2942 * @target_work: work item being flushed (NULL for workqueue flushes) 2943 * 2944 * %current is trying to flush the whole @target_wq or @target_work on it. 2945 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 2946 * reclaiming memory or running on a workqueue which doesn't have 2947 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 2948 * a deadlock. 2949 */ 2950 static void check_flush_dependency(struct workqueue_struct *target_wq, 2951 struct work_struct *target_work) 2952 { 2953 work_func_t target_func = target_work ? target_work->func : NULL; 2954 struct worker *worker; 2955 2956 if (target_wq->flags & WQ_MEM_RECLAIM) 2957 return; 2958 2959 worker = current_wq_worker(); 2960 2961 WARN_ONCE(current->flags & PF_MEMALLOC, 2962 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 2963 current->pid, current->comm, target_wq->name, target_func); 2964 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 2965 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 2966 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 2967 worker->current_pwq->wq->name, worker->current_func, 2968 target_wq->name, target_func); 2969 } 2970 2971 struct wq_barrier { 2972 struct work_struct work; 2973 struct completion done; 2974 struct task_struct *task; /* purely informational */ 2975 }; 2976 2977 static void wq_barrier_func(struct work_struct *work) 2978 { 2979 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2980 complete(&barr->done); 2981 } 2982 2983 /** 2984 * insert_wq_barrier - insert a barrier work 2985 * @pwq: pwq to insert barrier into 2986 * @barr: wq_barrier to insert 2987 * @target: target work to attach @barr to 2988 * @worker: worker currently executing @target, NULL if @target is not executing 2989 * 2990 * @barr is linked to @target such that @barr is completed only after 2991 * @target finishes execution. Please note that the ordering 2992 * guarantee is observed only with respect to @target and on the local 2993 * cpu. 2994 * 2995 * Currently, a queued barrier can't be canceled. This is because 2996 * try_to_grab_pending() can't determine whether the work to be 2997 * grabbed is at the head of the queue and thus can't clear LINKED 2998 * flag of the previous work while there must be a valid next work 2999 * after a work with LINKED flag set. 3000 * 3001 * Note that when @worker is non-NULL, @target may be modified 3002 * underneath us, so we can't reliably determine pwq from @target. 3003 * 3004 * CONTEXT: 3005 * raw_spin_lock_irq(pool->lock). 3006 */ 3007 static void insert_wq_barrier(struct pool_workqueue *pwq, 3008 struct wq_barrier *barr, 3009 struct work_struct *target, struct worker *worker) 3010 { 3011 unsigned int work_flags = 0; 3012 unsigned int work_color; 3013 struct list_head *head; 3014 3015 /* 3016 * debugobject calls are safe here even with pool->lock locked 3017 * as we know for sure that this will not trigger any of the 3018 * checks and call back into the fixup functions where we 3019 * might deadlock. 3020 */ 3021 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 3022 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3023 3024 init_completion_map(&barr->done, &target->lockdep_map); 3025 3026 barr->task = current; 3027 3028 /* The barrier work item does not participate in pwq->nr_active. */ 3029 work_flags |= WORK_STRUCT_INACTIVE; 3030 3031 /* 3032 * If @target is currently being executed, schedule the 3033 * barrier to the worker; otherwise, put it after @target. 3034 */ 3035 if (worker) { 3036 head = worker->scheduled.next; 3037 work_color = worker->current_color; 3038 } else { 3039 unsigned long *bits = work_data_bits(target); 3040 3041 head = target->entry.next; 3042 /* there can already be other linked works, inherit and set */ 3043 work_flags |= *bits & WORK_STRUCT_LINKED; 3044 work_color = get_work_color(*bits); 3045 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3046 } 3047 3048 pwq->nr_in_flight[work_color]++; 3049 work_flags |= work_color_to_flags(work_color); 3050 3051 insert_work(pwq, &barr->work, head, work_flags); 3052 } 3053 3054 /** 3055 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3056 * @wq: workqueue being flushed 3057 * @flush_color: new flush color, < 0 for no-op 3058 * @work_color: new work color, < 0 for no-op 3059 * 3060 * Prepare pwqs for workqueue flushing. 3061 * 3062 * If @flush_color is non-negative, flush_color on all pwqs should be 3063 * -1. If no pwq has in-flight commands at the specified color, all 3064 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3065 * has in flight commands, its pwq->flush_color is set to 3066 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3067 * wakeup logic is armed and %true is returned. 3068 * 3069 * The caller should have initialized @wq->first_flusher prior to 3070 * calling this function with non-negative @flush_color. If 3071 * @flush_color is negative, no flush color update is done and %false 3072 * is returned. 3073 * 3074 * If @work_color is non-negative, all pwqs should have the same 3075 * work_color which is previous to @work_color and all will be 3076 * advanced to @work_color. 3077 * 3078 * CONTEXT: 3079 * mutex_lock(wq->mutex). 3080 * 3081 * Return: 3082 * %true if @flush_color >= 0 and there's something to flush. %false 3083 * otherwise. 3084 */ 3085 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3086 int flush_color, int work_color) 3087 { 3088 bool wait = false; 3089 struct pool_workqueue *pwq; 3090 3091 if (flush_color >= 0) { 3092 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3093 atomic_set(&wq->nr_pwqs_to_flush, 1); 3094 } 3095 3096 for_each_pwq(pwq, wq) { 3097 struct worker_pool *pool = pwq->pool; 3098 3099 raw_spin_lock_irq(&pool->lock); 3100 3101 if (flush_color >= 0) { 3102 WARN_ON_ONCE(pwq->flush_color != -1); 3103 3104 if (pwq->nr_in_flight[flush_color]) { 3105 pwq->flush_color = flush_color; 3106 atomic_inc(&wq->nr_pwqs_to_flush); 3107 wait = true; 3108 } 3109 } 3110 3111 if (work_color >= 0) { 3112 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3113 pwq->work_color = work_color; 3114 } 3115 3116 raw_spin_unlock_irq(&pool->lock); 3117 } 3118 3119 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3120 complete(&wq->first_flusher->done); 3121 3122 return wait; 3123 } 3124 3125 /** 3126 * __flush_workqueue - ensure that any scheduled work has run to completion. 3127 * @wq: workqueue to flush 3128 * 3129 * This function sleeps until all work items which were queued on entry 3130 * have finished execution, but it is not livelocked by new incoming ones. 3131 */ 3132 void __flush_workqueue(struct workqueue_struct *wq) 3133 { 3134 struct wq_flusher this_flusher = { 3135 .list = LIST_HEAD_INIT(this_flusher.list), 3136 .flush_color = -1, 3137 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 3138 }; 3139 int next_color; 3140 3141 if (WARN_ON(!wq_online)) 3142 return; 3143 3144 lock_map_acquire(&wq->lockdep_map); 3145 lock_map_release(&wq->lockdep_map); 3146 3147 mutex_lock(&wq->mutex); 3148 3149 /* 3150 * Start-to-wait phase 3151 */ 3152 next_color = work_next_color(wq->work_color); 3153 3154 if (next_color != wq->flush_color) { 3155 /* 3156 * Color space is not full. The current work_color 3157 * becomes our flush_color and work_color is advanced 3158 * by one. 3159 */ 3160 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3161 this_flusher.flush_color = wq->work_color; 3162 wq->work_color = next_color; 3163 3164 if (!wq->first_flusher) { 3165 /* no flush in progress, become the first flusher */ 3166 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3167 3168 wq->first_flusher = &this_flusher; 3169 3170 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3171 wq->work_color)) { 3172 /* nothing to flush, done */ 3173 wq->flush_color = next_color; 3174 wq->first_flusher = NULL; 3175 goto out_unlock; 3176 } 3177 } else { 3178 /* wait in queue */ 3179 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3180 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3181 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3182 } 3183 } else { 3184 /* 3185 * Oops, color space is full, wait on overflow queue. 3186 * The next flush completion will assign us 3187 * flush_color and transfer to flusher_queue. 3188 */ 3189 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 3190 } 3191 3192 check_flush_dependency(wq, NULL); 3193 3194 mutex_unlock(&wq->mutex); 3195 3196 wait_for_completion(&this_flusher.done); 3197 3198 /* 3199 * Wake-up-and-cascade phase 3200 * 3201 * First flushers are responsible for cascading flushes and 3202 * handling overflow. Non-first flushers can simply return. 3203 */ 3204 if (READ_ONCE(wq->first_flusher) != &this_flusher) 3205 return; 3206 3207 mutex_lock(&wq->mutex); 3208 3209 /* we might have raced, check again with mutex held */ 3210 if (wq->first_flusher != &this_flusher) 3211 goto out_unlock; 3212 3213 WRITE_ONCE(wq->first_flusher, NULL); 3214 3215 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 3216 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3217 3218 while (true) { 3219 struct wq_flusher *next, *tmp; 3220 3221 /* complete all the flushers sharing the current flush color */ 3222 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 3223 if (next->flush_color != wq->flush_color) 3224 break; 3225 list_del_init(&next->list); 3226 complete(&next->done); 3227 } 3228 3229 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 3230 wq->flush_color != work_next_color(wq->work_color)); 3231 3232 /* this flush_color is finished, advance by one */ 3233 wq->flush_color = work_next_color(wq->flush_color); 3234 3235 /* one color has been freed, handle overflow queue */ 3236 if (!list_empty(&wq->flusher_overflow)) { 3237 /* 3238 * Assign the same color to all overflowed 3239 * flushers, advance work_color and append to 3240 * flusher_queue. This is the start-to-wait 3241 * phase for these overflowed flushers. 3242 */ 3243 list_for_each_entry(tmp, &wq->flusher_overflow, list) 3244 tmp->flush_color = wq->work_color; 3245 3246 wq->work_color = work_next_color(wq->work_color); 3247 3248 list_splice_tail_init(&wq->flusher_overflow, 3249 &wq->flusher_queue); 3250 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3251 } 3252 3253 if (list_empty(&wq->flusher_queue)) { 3254 WARN_ON_ONCE(wq->flush_color != wq->work_color); 3255 break; 3256 } 3257 3258 /* 3259 * Need to flush more colors. Make the next flusher 3260 * the new first flusher and arm pwqs. 3261 */ 3262 WARN_ON_ONCE(wq->flush_color == wq->work_color); 3263 WARN_ON_ONCE(wq->flush_color != next->flush_color); 3264 3265 list_del_init(&next->list); 3266 wq->first_flusher = next; 3267 3268 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 3269 break; 3270 3271 /* 3272 * Meh... this color is already done, clear first 3273 * flusher and repeat cascading. 3274 */ 3275 wq->first_flusher = NULL; 3276 } 3277 3278 out_unlock: 3279 mutex_unlock(&wq->mutex); 3280 } 3281 EXPORT_SYMBOL(__flush_workqueue); 3282 3283 /** 3284 * drain_workqueue - drain a workqueue 3285 * @wq: workqueue to drain 3286 * 3287 * Wait until the workqueue becomes empty. While draining is in progress, 3288 * only chain queueing is allowed. IOW, only currently pending or running 3289 * work items on @wq can queue further work items on it. @wq is flushed 3290 * repeatedly until it becomes empty. The number of flushing is determined 3291 * by the depth of chaining and should be relatively short. Whine if it 3292 * takes too long. 3293 */ 3294 void drain_workqueue(struct workqueue_struct *wq) 3295 { 3296 unsigned int flush_cnt = 0; 3297 struct pool_workqueue *pwq; 3298 3299 /* 3300 * __queue_work() needs to test whether there are drainers, is much 3301 * hotter than drain_workqueue() and already looks at @wq->flags. 3302 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 3303 */ 3304 mutex_lock(&wq->mutex); 3305 if (!wq->nr_drainers++) 3306 wq->flags |= __WQ_DRAINING; 3307 mutex_unlock(&wq->mutex); 3308 reflush: 3309 __flush_workqueue(wq); 3310 3311 mutex_lock(&wq->mutex); 3312 3313 for_each_pwq(pwq, wq) { 3314 bool drained; 3315 3316 raw_spin_lock_irq(&pwq->pool->lock); 3317 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); 3318 raw_spin_unlock_irq(&pwq->pool->lock); 3319 3320 if (drained) 3321 continue; 3322 3323 if (++flush_cnt == 10 || 3324 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3325 pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 3326 wq->name, __func__, flush_cnt); 3327 3328 mutex_unlock(&wq->mutex); 3329 goto reflush; 3330 } 3331 3332 if (!--wq->nr_drainers) 3333 wq->flags &= ~__WQ_DRAINING; 3334 mutex_unlock(&wq->mutex); 3335 } 3336 EXPORT_SYMBOL_GPL(drain_workqueue); 3337 3338 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 3339 bool from_cancel) 3340 { 3341 struct worker *worker = NULL; 3342 struct worker_pool *pool; 3343 struct pool_workqueue *pwq; 3344 3345 might_sleep(); 3346 3347 rcu_read_lock(); 3348 pool = get_work_pool(work); 3349 if (!pool) { 3350 rcu_read_unlock(); 3351 return false; 3352 } 3353 3354 raw_spin_lock_irq(&pool->lock); 3355 /* see the comment in try_to_grab_pending() with the same code */ 3356 pwq = get_work_pwq(work); 3357 if (pwq) { 3358 if (unlikely(pwq->pool != pool)) 3359 goto already_gone; 3360 } else { 3361 worker = find_worker_executing_work(pool, work); 3362 if (!worker) 3363 goto already_gone; 3364 pwq = worker->current_pwq; 3365 } 3366 3367 check_flush_dependency(pwq->wq, work); 3368 3369 insert_wq_barrier(pwq, barr, work, worker); 3370 raw_spin_unlock_irq(&pool->lock); 3371 3372 /* 3373 * Force a lock recursion deadlock when using flush_work() inside a 3374 * single-threaded or rescuer equipped workqueue. 3375 * 3376 * For single threaded workqueues the deadlock happens when the work 3377 * is after the work issuing the flush_work(). For rescuer equipped 3378 * workqueues the deadlock happens when the rescuer stalls, blocking 3379 * forward progress. 3380 */ 3381 if (!from_cancel && 3382 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { 3383 lock_map_acquire(&pwq->wq->lockdep_map); 3384 lock_map_release(&pwq->wq->lockdep_map); 3385 } 3386 rcu_read_unlock(); 3387 return true; 3388 already_gone: 3389 raw_spin_unlock_irq(&pool->lock); 3390 rcu_read_unlock(); 3391 return false; 3392 } 3393 3394 static bool __flush_work(struct work_struct *work, bool from_cancel) 3395 { 3396 struct wq_barrier barr; 3397 3398 if (WARN_ON(!wq_online)) 3399 return false; 3400 3401 if (WARN_ON(!work->func)) 3402 return false; 3403 3404 lock_map_acquire(&work->lockdep_map); 3405 lock_map_release(&work->lockdep_map); 3406 3407 if (start_flush_work(work, &barr, from_cancel)) { 3408 wait_for_completion(&barr.done); 3409 destroy_work_on_stack(&barr.work); 3410 return true; 3411 } else { 3412 return false; 3413 } 3414 } 3415 3416 /** 3417 * flush_work - wait for a work to finish executing the last queueing instance 3418 * @work: the work to flush 3419 * 3420 * Wait until @work has finished execution. @work is guaranteed to be idle 3421 * on return if it hasn't been requeued since flush started. 3422 * 3423 * Return: 3424 * %true if flush_work() waited for the work to finish execution, 3425 * %false if it was already idle. 3426 */ 3427 bool flush_work(struct work_struct *work) 3428 { 3429 return __flush_work(work, false); 3430 } 3431 EXPORT_SYMBOL_GPL(flush_work); 3432 3433 struct cwt_wait { 3434 wait_queue_entry_t wait; 3435 struct work_struct *work; 3436 }; 3437 3438 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 3439 { 3440 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 3441 3442 if (cwait->work != key) 3443 return 0; 3444 return autoremove_wake_function(wait, mode, sync, key); 3445 } 3446 3447 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 3448 { 3449 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 3450 unsigned long flags; 3451 int ret; 3452 3453 do { 3454 ret = try_to_grab_pending(work, is_dwork, &flags); 3455 /* 3456 * If someone else is already canceling, wait for it to 3457 * finish. flush_work() doesn't work for PREEMPT_NONE 3458 * because we may get scheduled between @work's completion 3459 * and the other canceling task resuming and clearing 3460 * CANCELING - flush_work() will return false immediately 3461 * as @work is no longer busy, try_to_grab_pending() will 3462 * return -ENOENT as @work is still being canceled and the 3463 * other canceling task won't be able to clear CANCELING as 3464 * we're hogging the CPU. 3465 * 3466 * Let's wait for completion using a waitqueue. As this 3467 * may lead to the thundering herd problem, use a custom 3468 * wake function which matches @work along with exclusive 3469 * wait and wakeup. 3470 */ 3471 if (unlikely(ret == -ENOENT)) { 3472 struct cwt_wait cwait; 3473 3474 init_wait(&cwait.wait); 3475 cwait.wait.func = cwt_wakefn; 3476 cwait.work = work; 3477 3478 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 3479 TASK_UNINTERRUPTIBLE); 3480 if (work_is_canceling(work)) 3481 schedule(); 3482 finish_wait(&cancel_waitq, &cwait.wait); 3483 } 3484 } while (unlikely(ret < 0)); 3485 3486 /* tell other tasks trying to grab @work to back off */ 3487 mark_work_canceling(work); 3488 local_irq_restore(flags); 3489 3490 /* 3491 * This allows canceling during early boot. We know that @work 3492 * isn't executing. 3493 */ 3494 if (wq_online) 3495 __flush_work(work, true); 3496 3497 clear_work_data(work); 3498 3499 /* 3500 * Paired with prepare_to_wait() above so that either 3501 * waitqueue_active() is visible here or !work_is_canceling() is 3502 * visible there. 3503 */ 3504 smp_mb(); 3505 if (waitqueue_active(&cancel_waitq)) 3506 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 3507 3508 return ret; 3509 } 3510 3511 /** 3512 * cancel_work_sync - cancel a work and wait for it to finish 3513 * @work: the work to cancel 3514 * 3515 * Cancel @work and wait for its execution to finish. This function 3516 * can be used even if the work re-queues itself or migrates to 3517 * another workqueue. On return from this function, @work is 3518 * guaranteed to be not pending or executing on any CPU. 3519 * 3520 * cancel_work_sync(&delayed_work->work) must not be used for 3521 * delayed_work's. Use cancel_delayed_work_sync() instead. 3522 * 3523 * The caller must ensure that the workqueue on which @work was last 3524 * queued can't be destroyed before this function returns. 3525 * 3526 * Return: 3527 * %true if @work was pending, %false otherwise. 3528 */ 3529 bool cancel_work_sync(struct work_struct *work) 3530 { 3531 return __cancel_work_timer(work, false); 3532 } 3533 EXPORT_SYMBOL_GPL(cancel_work_sync); 3534 3535 /** 3536 * flush_delayed_work - wait for a dwork to finish executing the last queueing 3537 * @dwork: the delayed work to flush 3538 * 3539 * Delayed timer is cancelled and the pending work is queued for 3540 * immediate execution. Like flush_work(), this function only 3541 * considers the last queueing instance of @dwork. 3542 * 3543 * Return: 3544 * %true if flush_work() waited for the work to finish execution, 3545 * %false if it was already idle. 3546 */ 3547 bool flush_delayed_work(struct delayed_work *dwork) 3548 { 3549 local_irq_disable(); 3550 if (del_timer_sync(&dwork->timer)) 3551 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 3552 local_irq_enable(); 3553 return flush_work(&dwork->work); 3554 } 3555 EXPORT_SYMBOL(flush_delayed_work); 3556 3557 /** 3558 * flush_rcu_work - wait for a rwork to finish executing the last queueing 3559 * @rwork: the rcu work to flush 3560 * 3561 * Return: 3562 * %true if flush_rcu_work() waited for the work to finish execution, 3563 * %false if it was already idle. 3564 */ 3565 bool flush_rcu_work(struct rcu_work *rwork) 3566 { 3567 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 3568 rcu_barrier(); 3569 flush_work(&rwork->work); 3570 return true; 3571 } else { 3572 return flush_work(&rwork->work); 3573 } 3574 } 3575 EXPORT_SYMBOL(flush_rcu_work); 3576 3577 static bool __cancel_work(struct work_struct *work, bool is_dwork) 3578 { 3579 unsigned long flags; 3580 int ret; 3581 3582 do { 3583 ret = try_to_grab_pending(work, is_dwork, &flags); 3584 } while (unlikely(ret == -EAGAIN)); 3585 3586 if (unlikely(ret < 0)) 3587 return false; 3588 3589 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); 3590 local_irq_restore(flags); 3591 return ret; 3592 } 3593 3594 /* 3595 * See cancel_delayed_work() 3596 */ 3597 bool cancel_work(struct work_struct *work) 3598 { 3599 return __cancel_work(work, false); 3600 } 3601 EXPORT_SYMBOL(cancel_work); 3602 3603 /** 3604 * cancel_delayed_work - cancel a delayed work 3605 * @dwork: delayed_work to cancel 3606 * 3607 * Kill off a pending delayed_work. 3608 * 3609 * Return: %true if @dwork was pending and canceled; %false if it wasn't 3610 * pending. 3611 * 3612 * Note: 3613 * The work callback function may still be running on return, unless 3614 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 3615 * use cancel_delayed_work_sync() to wait on it. 3616 * 3617 * This function is safe to call from any context including IRQ handler. 3618 */ 3619 bool cancel_delayed_work(struct delayed_work *dwork) 3620 { 3621 return __cancel_work(&dwork->work, true); 3622 } 3623 EXPORT_SYMBOL(cancel_delayed_work); 3624 3625 /** 3626 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 3627 * @dwork: the delayed work cancel 3628 * 3629 * This is cancel_work_sync() for delayed works. 3630 * 3631 * Return: 3632 * %true if @dwork was pending, %false otherwise. 3633 */ 3634 bool cancel_delayed_work_sync(struct delayed_work *dwork) 3635 { 3636 return __cancel_work_timer(&dwork->work, true); 3637 } 3638 EXPORT_SYMBOL(cancel_delayed_work_sync); 3639 3640 /** 3641 * schedule_on_each_cpu - execute a function synchronously on each online CPU 3642 * @func: the function to call 3643 * 3644 * schedule_on_each_cpu() executes @func on each online CPU using the 3645 * system workqueue and blocks until all CPUs have completed. 3646 * schedule_on_each_cpu() is very slow. 3647 * 3648 * Return: 3649 * 0 on success, -errno on failure. 3650 */ 3651 int schedule_on_each_cpu(work_func_t func) 3652 { 3653 int cpu; 3654 struct work_struct __percpu *works; 3655 3656 works = alloc_percpu(struct work_struct); 3657 if (!works) 3658 return -ENOMEM; 3659 3660 cpus_read_lock(); 3661 3662 for_each_online_cpu(cpu) { 3663 struct work_struct *work = per_cpu_ptr(works, cpu); 3664 3665 INIT_WORK(work, func); 3666 schedule_work_on(cpu, work); 3667 } 3668 3669 for_each_online_cpu(cpu) 3670 flush_work(per_cpu_ptr(works, cpu)); 3671 3672 cpus_read_unlock(); 3673 free_percpu(works); 3674 return 0; 3675 } 3676 3677 /** 3678 * execute_in_process_context - reliably execute the routine with user context 3679 * @fn: the function to execute 3680 * @ew: guaranteed storage for the execute work structure (must 3681 * be available when the work executes) 3682 * 3683 * Executes the function immediately if process context is available, 3684 * otherwise schedules the function for delayed execution. 3685 * 3686 * Return: 0 - function was executed 3687 * 1 - function was scheduled for execution 3688 */ 3689 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 3690 { 3691 if (!in_interrupt()) { 3692 fn(&ew->work); 3693 return 0; 3694 } 3695 3696 INIT_WORK(&ew->work, fn); 3697 schedule_work(&ew->work); 3698 3699 return 1; 3700 } 3701 EXPORT_SYMBOL_GPL(execute_in_process_context); 3702 3703 /** 3704 * free_workqueue_attrs - free a workqueue_attrs 3705 * @attrs: workqueue_attrs to free 3706 * 3707 * Undo alloc_workqueue_attrs(). 3708 */ 3709 void free_workqueue_attrs(struct workqueue_attrs *attrs) 3710 { 3711 if (attrs) { 3712 free_cpumask_var(attrs->cpumask); 3713 free_cpumask_var(attrs->__pod_cpumask); 3714 kfree(attrs); 3715 } 3716 } 3717 3718 /** 3719 * alloc_workqueue_attrs - allocate a workqueue_attrs 3720 * 3721 * Allocate a new workqueue_attrs, initialize with default settings and 3722 * return it. 3723 * 3724 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3725 */ 3726 struct workqueue_attrs *alloc_workqueue_attrs(void) 3727 { 3728 struct workqueue_attrs *attrs; 3729 3730 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 3731 if (!attrs) 3732 goto fail; 3733 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 3734 goto fail; 3735 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 3736 goto fail; 3737 3738 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3739 attrs->affn_scope = WQ_AFFN_DFL; 3740 return attrs; 3741 fail: 3742 free_workqueue_attrs(attrs); 3743 return NULL; 3744 } 3745 3746 static void copy_workqueue_attrs(struct workqueue_attrs *to, 3747 const struct workqueue_attrs *from) 3748 { 3749 to->nice = from->nice; 3750 cpumask_copy(to->cpumask, from->cpumask); 3751 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 3752 to->affn_strict = from->affn_strict; 3753 3754 /* 3755 * Unlike hash and equality test, copying shouldn't ignore wq-only 3756 * fields as copying is used for both pool and wq attrs. Instead, 3757 * get_unbound_pool() explicitly clears the fields. 3758 */ 3759 to->affn_scope = from->affn_scope; 3760 to->ordered = from->ordered; 3761 } 3762 3763 /* 3764 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 3765 * comments in 'struct workqueue_attrs' definition. 3766 */ 3767 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 3768 { 3769 attrs->affn_scope = WQ_AFFN_NR_TYPES; 3770 attrs->ordered = false; 3771 } 3772 3773 /* hash value of the content of @attr */ 3774 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 3775 { 3776 u32 hash = 0; 3777 3778 hash = jhash_1word(attrs->nice, hash); 3779 hash = jhash(cpumask_bits(attrs->cpumask), 3780 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3781 hash = jhash(cpumask_bits(attrs->__pod_cpumask), 3782 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3783 hash = jhash_1word(attrs->affn_strict, hash); 3784 return hash; 3785 } 3786 3787 /* content equality test */ 3788 static bool wqattrs_equal(const struct workqueue_attrs *a, 3789 const struct workqueue_attrs *b) 3790 { 3791 if (a->nice != b->nice) 3792 return false; 3793 if (!cpumask_equal(a->cpumask, b->cpumask)) 3794 return false; 3795 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 3796 return false; 3797 if (a->affn_strict != b->affn_strict) 3798 return false; 3799 return true; 3800 } 3801 3802 /* Update @attrs with actually available CPUs */ 3803 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 3804 const cpumask_t *unbound_cpumask) 3805 { 3806 /* 3807 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 3808 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 3809 * @unbound_cpumask. 3810 */ 3811 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 3812 if (unlikely(cpumask_empty(attrs->cpumask))) 3813 cpumask_copy(attrs->cpumask, unbound_cpumask); 3814 } 3815 3816 /* find wq_pod_type to use for @attrs */ 3817 static const struct wq_pod_type * 3818 wqattrs_pod_type(const struct workqueue_attrs *attrs) 3819 { 3820 enum wq_affn_scope scope; 3821 struct wq_pod_type *pt; 3822 3823 /* to synchronize access to wq_affn_dfl */ 3824 lockdep_assert_held(&wq_pool_mutex); 3825 3826 if (attrs->affn_scope == WQ_AFFN_DFL) 3827 scope = wq_affn_dfl; 3828 else 3829 scope = attrs->affn_scope; 3830 3831 pt = &wq_pod_types[scope]; 3832 3833 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 3834 likely(pt->nr_pods)) 3835 return pt; 3836 3837 /* 3838 * Before workqueue_init_topology(), only SYSTEM is available which is 3839 * initialized in workqueue_init_early(). 3840 */ 3841 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 3842 BUG_ON(!pt->nr_pods); 3843 return pt; 3844 } 3845 3846 /** 3847 * init_worker_pool - initialize a newly zalloc'd worker_pool 3848 * @pool: worker_pool to initialize 3849 * 3850 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3851 * 3852 * Return: 0 on success, -errno on failure. Even on failure, all fields 3853 * inside @pool proper are initialized and put_unbound_pool() can be called 3854 * on @pool safely to release it. 3855 */ 3856 static int init_worker_pool(struct worker_pool *pool) 3857 { 3858 raw_spin_lock_init(&pool->lock); 3859 pool->id = -1; 3860 pool->cpu = -1; 3861 pool->node = NUMA_NO_NODE; 3862 pool->flags |= POOL_DISASSOCIATED; 3863 pool->watchdog_ts = jiffies; 3864 INIT_LIST_HEAD(&pool->worklist); 3865 INIT_LIST_HEAD(&pool->idle_list); 3866 hash_init(pool->busy_hash); 3867 3868 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 3869 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 3870 3871 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 3872 3873 INIT_LIST_HEAD(&pool->workers); 3874 INIT_LIST_HEAD(&pool->dying_workers); 3875 3876 ida_init(&pool->worker_ida); 3877 INIT_HLIST_NODE(&pool->hash_node); 3878 pool->refcnt = 1; 3879 3880 /* shouldn't fail above this point */ 3881 pool->attrs = alloc_workqueue_attrs(); 3882 if (!pool->attrs) 3883 return -ENOMEM; 3884 3885 wqattrs_clear_for_pool(pool->attrs); 3886 3887 return 0; 3888 } 3889 3890 #ifdef CONFIG_LOCKDEP 3891 static void wq_init_lockdep(struct workqueue_struct *wq) 3892 { 3893 char *lock_name; 3894 3895 lockdep_register_key(&wq->key); 3896 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 3897 if (!lock_name) 3898 lock_name = wq->name; 3899 3900 wq->lock_name = lock_name; 3901 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 3902 } 3903 3904 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3905 { 3906 lockdep_unregister_key(&wq->key); 3907 } 3908 3909 static void wq_free_lockdep(struct workqueue_struct *wq) 3910 { 3911 if (wq->lock_name != wq->name) 3912 kfree(wq->lock_name); 3913 } 3914 #else 3915 static void wq_init_lockdep(struct workqueue_struct *wq) 3916 { 3917 } 3918 3919 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3920 { 3921 } 3922 3923 static void wq_free_lockdep(struct workqueue_struct *wq) 3924 { 3925 } 3926 #endif 3927 3928 static void rcu_free_wq(struct rcu_head *rcu) 3929 { 3930 struct workqueue_struct *wq = 3931 container_of(rcu, struct workqueue_struct, rcu); 3932 3933 wq_free_lockdep(wq); 3934 free_percpu(wq->cpu_pwq); 3935 free_workqueue_attrs(wq->unbound_attrs); 3936 kfree(wq); 3937 } 3938 3939 static void rcu_free_pool(struct rcu_head *rcu) 3940 { 3941 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 3942 3943 ida_destroy(&pool->worker_ida); 3944 free_workqueue_attrs(pool->attrs); 3945 kfree(pool); 3946 } 3947 3948 /** 3949 * put_unbound_pool - put a worker_pool 3950 * @pool: worker_pool to put 3951 * 3952 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 3953 * safe manner. get_unbound_pool() calls this function on its failure path 3954 * and this function should be able to release pools which went through, 3955 * successfully or not, init_worker_pool(). 3956 * 3957 * Should be called with wq_pool_mutex held. 3958 */ 3959 static void put_unbound_pool(struct worker_pool *pool) 3960 { 3961 DECLARE_COMPLETION_ONSTACK(detach_completion); 3962 struct worker *worker; 3963 LIST_HEAD(cull_list); 3964 3965 lockdep_assert_held(&wq_pool_mutex); 3966 3967 if (--pool->refcnt) 3968 return; 3969 3970 /* sanity checks */ 3971 if (WARN_ON(!(pool->cpu < 0)) || 3972 WARN_ON(!list_empty(&pool->worklist))) 3973 return; 3974 3975 /* release id and unhash */ 3976 if (pool->id >= 0) 3977 idr_remove(&worker_pool_idr, pool->id); 3978 hash_del(&pool->hash_node); 3979 3980 /* 3981 * Become the manager and destroy all workers. This prevents 3982 * @pool's workers from blocking on attach_mutex. We're the last 3983 * manager and @pool gets freed with the flag set. 3984 * 3985 * Having a concurrent manager is quite unlikely to happen as we can 3986 * only get here with 3987 * pwq->refcnt == pool->refcnt == 0 3988 * which implies no work queued to the pool, which implies no worker can 3989 * become the manager. However a worker could have taken the role of 3990 * manager before the refcnts dropped to 0, since maybe_create_worker() 3991 * drops pool->lock 3992 */ 3993 while (true) { 3994 rcuwait_wait_event(&manager_wait, 3995 !(pool->flags & POOL_MANAGER_ACTIVE), 3996 TASK_UNINTERRUPTIBLE); 3997 3998 mutex_lock(&wq_pool_attach_mutex); 3999 raw_spin_lock_irq(&pool->lock); 4000 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 4001 pool->flags |= POOL_MANAGER_ACTIVE; 4002 break; 4003 } 4004 raw_spin_unlock_irq(&pool->lock); 4005 mutex_unlock(&wq_pool_attach_mutex); 4006 } 4007 4008 while ((worker = first_idle_worker(pool))) 4009 set_worker_dying(worker, &cull_list); 4010 WARN_ON(pool->nr_workers || pool->nr_idle); 4011 raw_spin_unlock_irq(&pool->lock); 4012 4013 wake_dying_workers(&cull_list); 4014 4015 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 4016 pool->detach_completion = &detach_completion; 4017 mutex_unlock(&wq_pool_attach_mutex); 4018 4019 if (pool->detach_completion) 4020 wait_for_completion(pool->detach_completion); 4021 4022 /* shut down the timers */ 4023 del_timer_sync(&pool->idle_timer); 4024 cancel_work_sync(&pool->idle_cull_work); 4025 del_timer_sync(&pool->mayday_timer); 4026 4027 /* RCU protected to allow dereferences from get_work_pool() */ 4028 call_rcu(&pool->rcu, rcu_free_pool); 4029 } 4030 4031 /** 4032 * get_unbound_pool - get a worker_pool with the specified attributes 4033 * @attrs: the attributes of the worker_pool to get 4034 * 4035 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4036 * reference count and return it. If there already is a matching 4037 * worker_pool, it will be used; otherwise, this function attempts to 4038 * create a new one. 4039 * 4040 * Should be called with wq_pool_mutex held. 4041 * 4042 * Return: On success, a worker_pool with the same attributes as @attrs. 4043 * On failure, %NULL. 4044 */ 4045 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4046 { 4047 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 4048 u32 hash = wqattrs_hash(attrs); 4049 struct worker_pool *pool; 4050 int pod, node = NUMA_NO_NODE; 4051 4052 lockdep_assert_held(&wq_pool_mutex); 4053 4054 /* do we already have a matching pool? */ 4055 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4056 if (wqattrs_equal(pool->attrs, attrs)) { 4057 pool->refcnt++; 4058 return pool; 4059 } 4060 } 4061 4062 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 4063 for (pod = 0; pod < pt->nr_pods; pod++) { 4064 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 4065 node = pt->pod_node[pod]; 4066 break; 4067 } 4068 } 4069 4070 /* nope, create a new one */ 4071 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 4072 if (!pool || init_worker_pool(pool) < 0) 4073 goto fail; 4074 4075 pool->node = node; 4076 copy_workqueue_attrs(pool->attrs, attrs); 4077 wqattrs_clear_for_pool(pool->attrs); 4078 4079 if (worker_pool_assign_id(pool) < 0) 4080 goto fail; 4081 4082 /* create and start the initial worker */ 4083 if (wq_online && !create_worker(pool)) 4084 goto fail; 4085 4086 /* install */ 4087 hash_add(unbound_pool_hash, &pool->hash_node, hash); 4088 4089 return pool; 4090 fail: 4091 if (pool) 4092 put_unbound_pool(pool); 4093 return NULL; 4094 } 4095 4096 static void rcu_free_pwq(struct rcu_head *rcu) 4097 { 4098 kmem_cache_free(pwq_cache, 4099 container_of(rcu, struct pool_workqueue, rcu)); 4100 } 4101 4102 /* 4103 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4104 * refcnt and needs to be destroyed. 4105 */ 4106 static void pwq_release_workfn(struct kthread_work *work) 4107 { 4108 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4109 release_work); 4110 struct workqueue_struct *wq = pwq->wq; 4111 struct worker_pool *pool = pwq->pool; 4112 bool is_last = false; 4113 4114 /* 4115 * When @pwq is not linked, it doesn't hold any reference to the 4116 * @wq, and @wq is invalid to access. 4117 */ 4118 if (!list_empty(&pwq->pwqs_node)) { 4119 mutex_lock(&wq->mutex); 4120 list_del_rcu(&pwq->pwqs_node); 4121 is_last = list_empty(&wq->pwqs); 4122 mutex_unlock(&wq->mutex); 4123 } 4124 4125 if (wq->flags & WQ_UNBOUND) { 4126 mutex_lock(&wq_pool_mutex); 4127 put_unbound_pool(pool); 4128 mutex_unlock(&wq_pool_mutex); 4129 } 4130 4131 call_rcu(&pwq->rcu, rcu_free_pwq); 4132 4133 /* 4134 * If we're the last pwq going away, @wq is already dead and no one 4135 * is gonna access it anymore. Schedule RCU free. 4136 */ 4137 if (is_last) { 4138 wq_unregister_lockdep(wq); 4139 call_rcu(&wq->rcu, rcu_free_wq); 4140 } 4141 } 4142 4143 /** 4144 * pwq_adjust_max_active - update a pwq's max_active to the current setting 4145 * @pwq: target pool_workqueue 4146 * 4147 * If @pwq isn't freezing, set @pwq->max_active to the associated 4148 * workqueue's saved_max_active and activate inactive work items 4149 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. 4150 */ 4151 static void pwq_adjust_max_active(struct pool_workqueue *pwq) 4152 { 4153 struct workqueue_struct *wq = pwq->wq; 4154 bool freezable = wq->flags & WQ_FREEZABLE; 4155 unsigned long flags; 4156 4157 /* for @wq->saved_max_active */ 4158 lockdep_assert_held(&wq->mutex); 4159 4160 /* fast exit for non-freezable wqs */ 4161 if (!freezable && pwq->max_active == wq->saved_max_active) 4162 return; 4163 4164 /* this function can be called during early boot w/ irq disabled */ 4165 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4166 4167 /* 4168 * During [un]freezing, the caller is responsible for ensuring that 4169 * this function is called at least once after @workqueue_freezing 4170 * is updated and visible. 4171 */ 4172 if (!freezable || !workqueue_freezing) { 4173 pwq->max_active = wq->saved_max_active; 4174 4175 while (!list_empty(&pwq->inactive_works) && 4176 pwq->nr_active < pwq->max_active) 4177 pwq_activate_first_inactive(pwq); 4178 4179 kick_pool(pwq->pool); 4180 } else { 4181 pwq->max_active = 0; 4182 } 4183 4184 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 4185 } 4186 4187 /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4188 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4189 struct worker_pool *pool) 4190 { 4191 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4192 4193 memset(pwq, 0, sizeof(*pwq)); 4194 4195 pwq->pool = pool; 4196 pwq->wq = wq; 4197 pwq->flush_color = -1; 4198 pwq->refcnt = 1; 4199 INIT_LIST_HEAD(&pwq->inactive_works); 4200 INIT_LIST_HEAD(&pwq->pwqs_node); 4201 INIT_LIST_HEAD(&pwq->mayday_node); 4202 kthread_init_work(&pwq->release_work, pwq_release_workfn); 4203 } 4204 4205 /* sync @pwq with the current state of its associated wq and link it */ 4206 static void link_pwq(struct pool_workqueue *pwq) 4207 { 4208 struct workqueue_struct *wq = pwq->wq; 4209 4210 lockdep_assert_held(&wq->mutex); 4211 4212 /* may be called multiple times, ignore if already linked */ 4213 if (!list_empty(&pwq->pwqs_node)) 4214 return; 4215 4216 /* set the matching work_color */ 4217 pwq->work_color = wq->work_color; 4218 4219 /* sync max_active to the current setting */ 4220 pwq_adjust_max_active(pwq); 4221 4222 /* link in @pwq */ 4223 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 4224 } 4225 4226 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 4227 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 4228 const struct workqueue_attrs *attrs) 4229 { 4230 struct worker_pool *pool; 4231 struct pool_workqueue *pwq; 4232 4233 lockdep_assert_held(&wq_pool_mutex); 4234 4235 pool = get_unbound_pool(attrs); 4236 if (!pool) 4237 return NULL; 4238 4239 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 4240 if (!pwq) { 4241 put_unbound_pool(pool); 4242 return NULL; 4243 } 4244 4245 init_pwq(pwq, wq, pool); 4246 return pwq; 4247 } 4248 4249 /** 4250 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 4251 * @attrs: the wq_attrs of the default pwq of the target workqueue 4252 * @cpu: the target CPU 4253 * @cpu_going_down: if >= 0, the CPU to consider as offline 4254 * 4255 * Calculate the cpumask a workqueue with @attrs should use on @pod. If 4256 * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 4257 * The result is stored in @attrs->__pod_cpumask. 4258 * 4259 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 4260 * and @pod has online CPUs requested by @attrs, the returned cpumask is the 4261 * intersection of the possible CPUs of @pod and @attrs->cpumask. 4262 * 4263 * The caller is responsible for ensuring that the cpumask of @pod stays stable. 4264 */ 4265 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 4266 int cpu_going_down) 4267 { 4268 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 4269 int pod = pt->cpu_pod[cpu]; 4270 4271 /* does @pod have any online CPUs @attrs wants? */ 4272 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 4273 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 4274 if (cpu_going_down >= 0) 4275 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 4276 4277 if (cpumask_empty(attrs->__pod_cpumask)) { 4278 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 4279 return; 4280 } 4281 4282 /* yeap, return possible CPUs in @pod that @attrs wants */ 4283 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 4284 4285 if (cpumask_empty(attrs->__pod_cpumask)) 4286 pr_warn_once("WARNING: workqueue cpumask: online intersect > " 4287 "possible intersect\n"); 4288 } 4289 4290 /* install @pwq into @wq's cpu_pwq and return the old pwq */ 4291 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 4292 int cpu, struct pool_workqueue *pwq) 4293 { 4294 struct pool_workqueue *old_pwq; 4295 4296 lockdep_assert_held(&wq_pool_mutex); 4297 lockdep_assert_held(&wq->mutex); 4298 4299 /* link_pwq() can handle duplicate calls */ 4300 link_pwq(pwq); 4301 4302 old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4303 rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq); 4304 return old_pwq; 4305 } 4306 4307 /* context to store the prepared attrs & pwqs before applying */ 4308 struct apply_wqattrs_ctx { 4309 struct workqueue_struct *wq; /* target workqueue */ 4310 struct workqueue_attrs *attrs; /* attrs to apply */ 4311 struct list_head list; /* queued for batching commit */ 4312 struct pool_workqueue *dfl_pwq; 4313 struct pool_workqueue *pwq_tbl[]; 4314 }; 4315 4316 /* free the resources after success or abort */ 4317 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 4318 { 4319 if (ctx) { 4320 int cpu; 4321 4322 for_each_possible_cpu(cpu) 4323 put_pwq_unlocked(ctx->pwq_tbl[cpu]); 4324 put_pwq_unlocked(ctx->dfl_pwq); 4325 4326 free_workqueue_attrs(ctx->attrs); 4327 4328 kfree(ctx); 4329 } 4330 } 4331 4332 /* allocate the attrs and pwqs for later installation */ 4333 static struct apply_wqattrs_ctx * 4334 apply_wqattrs_prepare(struct workqueue_struct *wq, 4335 const struct workqueue_attrs *attrs, 4336 const cpumask_var_t unbound_cpumask) 4337 { 4338 struct apply_wqattrs_ctx *ctx; 4339 struct workqueue_attrs *new_attrs; 4340 int cpu; 4341 4342 lockdep_assert_held(&wq_pool_mutex); 4343 4344 if (WARN_ON(attrs->affn_scope < 0 || 4345 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 4346 return ERR_PTR(-EINVAL); 4347 4348 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 4349 4350 new_attrs = alloc_workqueue_attrs(); 4351 if (!ctx || !new_attrs) 4352 goto out_free; 4353 4354 /* 4355 * If something goes wrong during CPU up/down, we'll fall back to 4356 * the default pwq covering whole @attrs->cpumask. Always create 4357 * it even if we don't use it immediately. 4358 */ 4359 copy_workqueue_attrs(new_attrs, attrs); 4360 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 4361 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4362 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 4363 if (!ctx->dfl_pwq) 4364 goto out_free; 4365 4366 for_each_possible_cpu(cpu) { 4367 if (new_attrs->ordered) { 4368 ctx->dfl_pwq->refcnt++; 4369 ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 4370 } else { 4371 wq_calc_pod_cpumask(new_attrs, cpu, -1); 4372 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 4373 if (!ctx->pwq_tbl[cpu]) 4374 goto out_free; 4375 } 4376 } 4377 4378 /* save the user configured attrs and sanitize it. */ 4379 copy_workqueue_attrs(new_attrs, attrs); 4380 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 4381 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4382 ctx->attrs = new_attrs; 4383 4384 ctx->wq = wq; 4385 return ctx; 4386 4387 out_free: 4388 free_workqueue_attrs(new_attrs); 4389 apply_wqattrs_cleanup(ctx); 4390 return ERR_PTR(-ENOMEM); 4391 } 4392 4393 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 4394 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 4395 { 4396 int cpu; 4397 4398 /* all pwqs have been created successfully, let's install'em */ 4399 mutex_lock(&ctx->wq->mutex); 4400 4401 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 4402 4403 /* save the previous pwq and install the new one */ 4404 for_each_possible_cpu(cpu) 4405 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 4406 ctx->pwq_tbl[cpu]); 4407 4408 /* @dfl_pwq might not have been used, ensure it's linked */ 4409 link_pwq(ctx->dfl_pwq); 4410 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 4411 4412 mutex_unlock(&ctx->wq->mutex); 4413 } 4414 4415 static void apply_wqattrs_lock(void) 4416 { 4417 /* CPUs should stay stable across pwq creations and installations */ 4418 cpus_read_lock(); 4419 mutex_lock(&wq_pool_mutex); 4420 } 4421 4422 static void apply_wqattrs_unlock(void) 4423 { 4424 mutex_unlock(&wq_pool_mutex); 4425 cpus_read_unlock(); 4426 } 4427 4428 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 4429 const struct workqueue_attrs *attrs) 4430 { 4431 struct apply_wqattrs_ctx *ctx; 4432 4433 /* only unbound workqueues can change attributes */ 4434 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 4435 return -EINVAL; 4436 4437 /* creating multiple pwqs breaks ordering guarantee */ 4438 if (!list_empty(&wq->pwqs)) { 4439 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4440 return -EINVAL; 4441 4442 wq->flags &= ~__WQ_ORDERED; 4443 } 4444 4445 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 4446 if (IS_ERR(ctx)) 4447 return PTR_ERR(ctx); 4448 4449 /* the ctx has been prepared successfully, let's commit it */ 4450 apply_wqattrs_commit(ctx); 4451 apply_wqattrs_cleanup(ctx); 4452 4453 return 0; 4454 } 4455 4456 /** 4457 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 4458 * @wq: the target workqueue 4459 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 4460 * 4461 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 4462 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 4463 * work items are affine to the pod it was issued on. Older pwqs are released as 4464 * in-flight work items finish. Note that a work item which repeatedly requeues 4465 * itself back-to-back will stay on its current pwq. 4466 * 4467 * Performs GFP_KERNEL allocations. 4468 * 4469 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 4470 * 4471 * Return: 0 on success and -errno on failure. 4472 */ 4473 int apply_workqueue_attrs(struct workqueue_struct *wq, 4474 const struct workqueue_attrs *attrs) 4475 { 4476 int ret; 4477 4478 lockdep_assert_cpus_held(); 4479 4480 mutex_lock(&wq_pool_mutex); 4481 ret = apply_workqueue_attrs_locked(wq, attrs); 4482 mutex_unlock(&wq_pool_mutex); 4483 4484 return ret; 4485 } 4486 4487 /** 4488 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 4489 * @wq: the target workqueue 4490 * @cpu: the CPU to update pool association for 4491 * @hotplug_cpu: the CPU coming up or going down 4492 * @online: whether @cpu is coming up or going down 4493 * 4494 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 4495 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 4496 * @wq accordingly. 4497 * 4498 * 4499 * If pod affinity can't be adjusted due to memory allocation failure, it falls 4500 * back to @wq->dfl_pwq which may not be optimal but is always correct. 4501 * 4502 * Note that when the last allowed CPU of a pod goes offline for a workqueue 4503 * with a cpumask spanning multiple pods, the workers which were already 4504 * executing the work items for the workqueue will lose their CPU affinity and 4505 * may execute on any CPU. This is similar to how per-cpu workqueues behave on 4506 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 4507 * responsibility to flush the work item from CPU_DOWN_PREPARE. 4508 */ 4509 static void wq_update_pod(struct workqueue_struct *wq, int cpu, 4510 int hotplug_cpu, bool online) 4511 { 4512 int off_cpu = online ? -1 : hotplug_cpu; 4513 struct pool_workqueue *old_pwq = NULL, *pwq; 4514 struct workqueue_attrs *target_attrs; 4515 4516 lockdep_assert_held(&wq_pool_mutex); 4517 4518 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 4519 return; 4520 4521 /* 4522 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 4523 * Let's use a preallocated one. The following buf is protected by 4524 * CPU hotplug exclusion. 4525 */ 4526 target_attrs = wq_update_pod_attrs_buf; 4527 4528 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 4529 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 4530 4531 /* nothing to do if the target cpumask matches the current pwq */ 4532 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 4533 pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu), 4534 lockdep_is_held(&wq_pool_mutex)); 4535 if (wqattrs_equal(target_attrs, pwq->pool->attrs)) 4536 return; 4537 4538 /* create a new pwq */ 4539 pwq = alloc_unbound_pwq(wq, target_attrs); 4540 if (!pwq) { 4541 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 4542 wq->name); 4543 goto use_dfl_pwq; 4544 } 4545 4546 /* Install the new pwq. */ 4547 mutex_lock(&wq->mutex); 4548 old_pwq = install_unbound_pwq(wq, cpu, pwq); 4549 goto out_unlock; 4550 4551 use_dfl_pwq: 4552 mutex_lock(&wq->mutex); 4553 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); 4554 get_pwq(wq->dfl_pwq); 4555 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); 4556 old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq); 4557 out_unlock: 4558 mutex_unlock(&wq->mutex); 4559 put_pwq_unlocked(old_pwq); 4560 } 4561 4562 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4563 { 4564 bool highpri = wq->flags & WQ_HIGHPRI; 4565 int cpu, ret; 4566 4567 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 4568 if (!wq->cpu_pwq) 4569 goto enomem; 4570 4571 if (!(wq->flags & WQ_UNBOUND)) { 4572 for_each_possible_cpu(cpu) { 4573 struct pool_workqueue **pwq_p = 4574 per_cpu_ptr(wq->cpu_pwq, cpu); 4575 struct worker_pool *pool = 4576 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); 4577 4578 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 4579 pool->node); 4580 if (!*pwq_p) 4581 goto enomem; 4582 4583 init_pwq(*pwq_p, wq, pool); 4584 4585 mutex_lock(&wq->mutex); 4586 link_pwq(*pwq_p); 4587 mutex_unlock(&wq->mutex); 4588 } 4589 return 0; 4590 } 4591 4592 cpus_read_lock(); 4593 if (wq->flags & __WQ_ORDERED) { 4594 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4595 /* there should only be single pwq for ordering guarantee */ 4596 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4597 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 4598 "ordering guarantee broken for workqueue %s\n", wq->name); 4599 } else { 4600 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4601 } 4602 cpus_read_unlock(); 4603 4604 /* for unbound pwq, flush the pwq_release_worker ensures that the 4605 * pwq_release_workfn() completes before calling kfree(wq). 4606 */ 4607 if (ret) 4608 kthread_flush_worker(pwq_release_worker); 4609 4610 return ret; 4611 4612 enomem: 4613 if (wq->cpu_pwq) { 4614 for_each_possible_cpu(cpu) { 4615 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4616 4617 if (pwq) 4618 kmem_cache_free(pwq_cache, pwq); 4619 } 4620 free_percpu(wq->cpu_pwq); 4621 wq->cpu_pwq = NULL; 4622 } 4623 return -ENOMEM; 4624 } 4625 4626 static int wq_clamp_max_active(int max_active, unsigned int flags, 4627 const char *name) 4628 { 4629 if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 4630 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 4631 max_active, name, 1, WQ_MAX_ACTIVE); 4632 4633 return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 4634 } 4635 4636 /* 4637 * Workqueues which may be used during memory reclaim should have a rescuer 4638 * to guarantee forward progress. 4639 */ 4640 static int init_rescuer(struct workqueue_struct *wq) 4641 { 4642 struct worker *rescuer; 4643 int ret; 4644 4645 if (!(wq->flags & WQ_MEM_RECLAIM)) 4646 return 0; 4647 4648 rescuer = alloc_worker(NUMA_NO_NODE); 4649 if (!rescuer) { 4650 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 4651 wq->name); 4652 return -ENOMEM; 4653 } 4654 4655 rescuer->rescue_wq = wq; 4656 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name); 4657 if (IS_ERR(rescuer->task)) { 4658 ret = PTR_ERR(rescuer->task); 4659 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 4660 wq->name, ERR_PTR(ret)); 4661 kfree(rescuer); 4662 return ret; 4663 } 4664 4665 wq->rescuer = rescuer; 4666 kthread_bind_mask(rescuer->task, cpu_possible_mask); 4667 wake_up_process(rescuer->task); 4668 4669 return 0; 4670 } 4671 4672 __printf(1, 4) 4673 struct workqueue_struct *alloc_workqueue(const char *fmt, 4674 unsigned int flags, 4675 int max_active, ...) 4676 { 4677 va_list args; 4678 struct workqueue_struct *wq; 4679 struct pool_workqueue *pwq; 4680 4681 /* 4682 * Unbound && max_active == 1 used to imply ordered, which is no longer 4683 * the case on many machines due to per-pod pools. While 4684 * alloc_ordered_workqueue() is the right way to create an ordered 4685 * workqueue, keep the previous behavior to avoid subtle breakages. 4686 */ 4687 if ((flags & WQ_UNBOUND) && max_active == 1) 4688 flags |= __WQ_ORDERED; 4689 4690 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4691 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4692 flags |= WQ_UNBOUND; 4693 4694 /* allocate wq and format name */ 4695 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4696 if (!wq) 4697 return NULL; 4698 4699 if (flags & WQ_UNBOUND) { 4700 wq->unbound_attrs = alloc_workqueue_attrs(); 4701 if (!wq->unbound_attrs) 4702 goto err_free_wq; 4703 } 4704 4705 va_start(args, max_active); 4706 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4707 va_end(args); 4708 4709 max_active = max_active ?: WQ_DFL_ACTIVE; 4710 max_active = wq_clamp_max_active(max_active, flags, wq->name); 4711 4712 /* init wq */ 4713 wq->flags = flags; 4714 wq->saved_max_active = max_active; 4715 mutex_init(&wq->mutex); 4716 atomic_set(&wq->nr_pwqs_to_flush, 0); 4717 INIT_LIST_HEAD(&wq->pwqs); 4718 INIT_LIST_HEAD(&wq->flusher_queue); 4719 INIT_LIST_HEAD(&wq->flusher_overflow); 4720 INIT_LIST_HEAD(&wq->maydays); 4721 4722 wq_init_lockdep(wq); 4723 INIT_LIST_HEAD(&wq->list); 4724 4725 if (alloc_and_link_pwqs(wq) < 0) 4726 goto err_unreg_lockdep; 4727 4728 if (wq_online && init_rescuer(wq) < 0) 4729 goto err_destroy; 4730 4731 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4732 goto err_destroy; 4733 4734 /* 4735 * wq_pool_mutex protects global freeze state and workqueues list. 4736 * Grab it, adjust max_active and add the new @wq to workqueues 4737 * list. 4738 */ 4739 mutex_lock(&wq_pool_mutex); 4740 4741 mutex_lock(&wq->mutex); 4742 for_each_pwq(pwq, wq) 4743 pwq_adjust_max_active(pwq); 4744 mutex_unlock(&wq->mutex); 4745 4746 list_add_tail_rcu(&wq->list, &workqueues); 4747 4748 mutex_unlock(&wq_pool_mutex); 4749 4750 return wq; 4751 4752 err_unreg_lockdep: 4753 wq_unregister_lockdep(wq); 4754 wq_free_lockdep(wq); 4755 err_free_wq: 4756 free_workqueue_attrs(wq->unbound_attrs); 4757 kfree(wq); 4758 return NULL; 4759 err_destroy: 4760 destroy_workqueue(wq); 4761 return NULL; 4762 } 4763 EXPORT_SYMBOL_GPL(alloc_workqueue); 4764 4765 static bool pwq_busy(struct pool_workqueue *pwq) 4766 { 4767 int i; 4768 4769 for (i = 0; i < WORK_NR_COLORS; i++) 4770 if (pwq->nr_in_flight[i]) 4771 return true; 4772 4773 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) 4774 return true; 4775 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) 4776 return true; 4777 4778 return false; 4779 } 4780 4781 /** 4782 * destroy_workqueue - safely terminate a workqueue 4783 * @wq: target workqueue 4784 * 4785 * Safely destroy a workqueue. All work currently pending will be done first. 4786 */ 4787 void destroy_workqueue(struct workqueue_struct *wq) 4788 { 4789 struct pool_workqueue *pwq; 4790 int cpu; 4791 4792 /* 4793 * Remove it from sysfs first so that sanity check failure doesn't 4794 * lead to sysfs name conflicts. 4795 */ 4796 workqueue_sysfs_unregister(wq); 4797 4798 /* mark the workqueue destruction is in progress */ 4799 mutex_lock(&wq->mutex); 4800 wq->flags |= __WQ_DESTROYING; 4801 mutex_unlock(&wq->mutex); 4802 4803 /* drain it before proceeding with destruction */ 4804 drain_workqueue(wq); 4805 4806 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 4807 if (wq->rescuer) { 4808 struct worker *rescuer = wq->rescuer; 4809 4810 /* this prevents new queueing */ 4811 raw_spin_lock_irq(&wq_mayday_lock); 4812 wq->rescuer = NULL; 4813 raw_spin_unlock_irq(&wq_mayday_lock); 4814 4815 /* rescuer will empty maydays list before exiting */ 4816 kthread_stop(rescuer->task); 4817 kfree(rescuer); 4818 } 4819 4820 /* 4821 * Sanity checks - grab all the locks so that we wait for all 4822 * in-flight operations which may do put_pwq(). 4823 */ 4824 mutex_lock(&wq_pool_mutex); 4825 mutex_lock(&wq->mutex); 4826 for_each_pwq(pwq, wq) { 4827 raw_spin_lock_irq(&pwq->pool->lock); 4828 if (WARN_ON(pwq_busy(pwq))) { 4829 pr_warn("%s: %s has the following busy pwq\n", 4830 __func__, wq->name); 4831 show_pwq(pwq); 4832 raw_spin_unlock_irq(&pwq->pool->lock); 4833 mutex_unlock(&wq->mutex); 4834 mutex_unlock(&wq_pool_mutex); 4835 show_one_workqueue(wq); 4836 return; 4837 } 4838 raw_spin_unlock_irq(&pwq->pool->lock); 4839 } 4840 mutex_unlock(&wq->mutex); 4841 4842 /* 4843 * wq list is used to freeze wq, remove from list after 4844 * flushing is complete in case freeze races us. 4845 */ 4846 list_del_rcu(&wq->list); 4847 mutex_unlock(&wq_pool_mutex); 4848 4849 /* 4850 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 4851 * to put the base refs. @wq will be auto-destroyed from the last 4852 * pwq_put. RCU read lock prevents @wq from going away from under us. 4853 */ 4854 rcu_read_lock(); 4855 4856 for_each_possible_cpu(cpu) { 4857 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4858 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL); 4859 put_pwq_unlocked(pwq); 4860 } 4861 4862 put_pwq_unlocked(wq->dfl_pwq); 4863 wq->dfl_pwq = NULL; 4864 4865 rcu_read_unlock(); 4866 } 4867 EXPORT_SYMBOL_GPL(destroy_workqueue); 4868 4869 /** 4870 * workqueue_set_max_active - adjust max_active of a workqueue 4871 * @wq: target workqueue 4872 * @max_active: new max_active value. 4873 * 4874 * Set max_active of @wq to @max_active. 4875 * 4876 * CONTEXT: 4877 * Don't call from IRQ context. 4878 */ 4879 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4880 { 4881 struct pool_workqueue *pwq; 4882 4883 /* disallow meddling with max_active for ordered workqueues */ 4884 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4885 return; 4886 4887 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4888 4889 mutex_lock(&wq->mutex); 4890 4891 wq->flags &= ~__WQ_ORDERED; 4892 wq->saved_max_active = max_active; 4893 4894 for_each_pwq(pwq, wq) 4895 pwq_adjust_max_active(pwq); 4896 4897 mutex_unlock(&wq->mutex); 4898 } 4899 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4900 4901 /** 4902 * current_work - retrieve %current task's work struct 4903 * 4904 * Determine if %current task is a workqueue worker and what it's working on. 4905 * Useful to find out the context that the %current task is running in. 4906 * 4907 * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 4908 */ 4909 struct work_struct *current_work(void) 4910 { 4911 struct worker *worker = current_wq_worker(); 4912 4913 return worker ? worker->current_work : NULL; 4914 } 4915 EXPORT_SYMBOL(current_work); 4916 4917 /** 4918 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4919 * 4920 * Determine whether %current is a workqueue rescuer. Can be used from 4921 * work functions to determine whether it's being run off the rescuer task. 4922 * 4923 * Return: %true if %current is a workqueue rescuer. %false otherwise. 4924 */ 4925 bool current_is_workqueue_rescuer(void) 4926 { 4927 struct worker *worker = current_wq_worker(); 4928 4929 return worker && worker->rescue_wq; 4930 } 4931 4932 /** 4933 * workqueue_congested - test whether a workqueue is congested 4934 * @cpu: CPU in question 4935 * @wq: target workqueue 4936 * 4937 * Test whether @wq's cpu workqueue for @cpu is congested. There is 4938 * no synchronization around this function and the test result is 4939 * unreliable and only useful as advisory hints or for debugging. 4940 * 4941 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 4942 * 4943 * With the exception of ordered workqueues, all workqueues have per-cpu 4944 * pool_workqueues, each with its own congested state. A workqueue being 4945 * congested on one CPU doesn't mean that the workqueue is contested on any 4946 * other CPUs. 4947 * 4948 * Return: 4949 * %true if congested, %false otherwise. 4950 */ 4951 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4952 { 4953 struct pool_workqueue *pwq; 4954 bool ret; 4955 4956 rcu_read_lock(); 4957 preempt_disable(); 4958 4959 if (cpu == WORK_CPU_UNBOUND) 4960 cpu = smp_processor_id(); 4961 4962 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4963 ret = !list_empty(&pwq->inactive_works); 4964 4965 preempt_enable(); 4966 rcu_read_unlock(); 4967 4968 return ret; 4969 } 4970 EXPORT_SYMBOL_GPL(workqueue_congested); 4971 4972 /** 4973 * work_busy - test whether a work is currently pending or running 4974 * @work: the work to be tested 4975 * 4976 * Test whether @work is currently pending or running. There is no 4977 * synchronization around this function and the test result is 4978 * unreliable and only useful as advisory hints or for debugging. 4979 * 4980 * Return: 4981 * OR'd bitmask of WORK_BUSY_* bits. 4982 */ 4983 unsigned int work_busy(struct work_struct *work) 4984 { 4985 struct worker_pool *pool; 4986 unsigned long flags; 4987 unsigned int ret = 0; 4988 4989 if (work_pending(work)) 4990 ret |= WORK_BUSY_PENDING; 4991 4992 rcu_read_lock(); 4993 pool = get_work_pool(work); 4994 if (pool) { 4995 raw_spin_lock_irqsave(&pool->lock, flags); 4996 if (find_worker_executing_work(pool, work)) 4997 ret |= WORK_BUSY_RUNNING; 4998 raw_spin_unlock_irqrestore(&pool->lock, flags); 4999 } 5000 rcu_read_unlock(); 5001 5002 return ret; 5003 } 5004 EXPORT_SYMBOL_GPL(work_busy); 5005 5006 /** 5007 * set_worker_desc - set description for the current work item 5008 * @fmt: printf-style format string 5009 * @...: arguments for the format string 5010 * 5011 * This function can be called by a running work function to describe what 5012 * the work item is about. If the worker task gets dumped, this 5013 * information will be printed out together to help debugging. The 5014 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 5015 */ 5016 void set_worker_desc(const char *fmt, ...) 5017 { 5018 struct worker *worker = current_wq_worker(); 5019 va_list args; 5020 5021 if (worker) { 5022 va_start(args, fmt); 5023 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5024 va_end(args); 5025 } 5026 } 5027 EXPORT_SYMBOL_GPL(set_worker_desc); 5028 5029 /** 5030 * print_worker_info - print out worker information and description 5031 * @log_lvl: the log level to use when printing 5032 * @task: target task 5033 * 5034 * If @task is a worker and currently executing a work item, print out the 5035 * name of the workqueue being serviced and worker description set with 5036 * set_worker_desc() by the currently executing work item. 5037 * 5038 * This function can be safely called on any task as long as the 5039 * task_struct itself is accessible. While safe, this function isn't 5040 * synchronized and may print out mixups or garbages of limited length. 5041 */ 5042 void print_worker_info(const char *log_lvl, struct task_struct *task) 5043 { 5044 work_func_t *fn = NULL; 5045 char name[WQ_NAME_LEN] = { }; 5046 char desc[WORKER_DESC_LEN] = { }; 5047 struct pool_workqueue *pwq = NULL; 5048 struct workqueue_struct *wq = NULL; 5049 struct worker *worker; 5050 5051 if (!(task->flags & PF_WQ_WORKER)) 5052 return; 5053 5054 /* 5055 * This function is called without any synchronization and @task 5056 * could be in any state. Be careful with dereferences. 5057 */ 5058 worker = kthread_probe_data(task); 5059 5060 /* 5061 * Carefully copy the associated workqueue's workfn, name and desc. 5062 * Keep the original last '\0' in case the original is garbage. 5063 */ 5064 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5065 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5066 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5067 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5068 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 5069 5070 if (fn || name[0] || desc[0]) { 5071 printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 5072 if (strcmp(name, desc)) 5073 pr_cont(" (%s)", desc); 5074 pr_cont("\n"); 5075 } 5076 } 5077 5078 static void pr_cont_pool_info(struct worker_pool *pool) 5079 { 5080 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 5081 if (pool->node != NUMA_NO_NODE) 5082 pr_cont(" node=%d", pool->node); 5083 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 5084 } 5085 5086 struct pr_cont_work_struct { 5087 bool comma; 5088 work_func_t func; 5089 long ctr; 5090 }; 5091 5092 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 5093 { 5094 if (!pcwsp->ctr) 5095 goto out_record; 5096 if (func == pcwsp->func) { 5097 pcwsp->ctr++; 5098 return; 5099 } 5100 if (pcwsp->ctr == 1) 5101 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 5102 else 5103 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 5104 pcwsp->ctr = 0; 5105 out_record: 5106 if ((long)func == -1L) 5107 return; 5108 pcwsp->comma = comma; 5109 pcwsp->func = func; 5110 pcwsp->ctr = 1; 5111 } 5112 5113 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 5114 { 5115 if (work->func == wq_barrier_func) { 5116 struct wq_barrier *barr; 5117 5118 barr = container_of(work, struct wq_barrier, work); 5119 5120 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5121 pr_cont("%s BAR(%d)", comma ? "," : "", 5122 task_pid_nr(barr->task)); 5123 } else { 5124 if (!comma) 5125 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5126 pr_cont_work_flush(comma, work->func, pcwsp); 5127 } 5128 } 5129 5130 static void show_pwq(struct pool_workqueue *pwq) 5131 { 5132 struct pr_cont_work_struct pcws = { .ctr = 0, }; 5133 struct worker_pool *pool = pwq->pool; 5134 struct work_struct *work; 5135 struct worker *worker; 5136 bool has_in_flight = false, has_pending = false; 5137 int bkt; 5138 5139 pr_info(" pwq %d:", pool->id); 5140 pr_cont_pool_info(pool); 5141 5142 pr_cont(" active=%d/%d refcnt=%d%s\n", 5143 pwq->nr_active, pwq->max_active, pwq->refcnt, 5144 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 5145 5146 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5147 if (worker->current_pwq == pwq) { 5148 has_in_flight = true; 5149 break; 5150 } 5151 } 5152 if (has_in_flight) { 5153 bool comma = false; 5154 5155 pr_info(" in-flight:"); 5156 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5157 if (worker->current_pwq != pwq) 5158 continue; 5159 5160 pr_cont("%s %d%s:%ps", comma ? "," : "", 5161 task_pid_nr(worker->task), 5162 worker->rescue_wq ? "(RESCUER)" : "", 5163 worker->current_func); 5164 list_for_each_entry(work, &worker->scheduled, entry) 5165 pr_cont_work(false, work, &pcws); 5166 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5167 comma = true; 5168 } 5169 pr_cont("\n"); 5170 } 5171 5172 list_for_each_entry(work, &pool->worklist, entry) { 5173 if (get_work_pwq(work) == pwq) { 5174 has_pending = true; 5175 break; 5176 } 5177 } 5178 if (has_pending) { 5179 bool comma = false; 5180 5181 pr_info(" pending:"); 5182 list_for_each_entry(work, &pool->worklist, entry) { 5183 if (get_work_pwq(work) != pwq) 5184 continue; 5185 5186 pr_cont_work(comma, work, &pcws); 5187 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5188 } 5189 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5190 pr_cont("\n"); 5191 } 5192 5193 if (!list_empty(&pwq->inactive_works)) { 5194 bool comma = false; 5195 5196 pr_info(" inactive:"); 5197 list_for_each_entry(work, &pwq->inactive_works, entry) { 5198 pr_cont_work(comma, work, &pcws); 5199 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5200 } 5201 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5202 pr_cont("\n"); 5203 } 5204 } 5205 5206 /** 5207 * show_one_workqueue - dump state of specified workqueue 5208 * @wq: workqueue whose state will be printed 5209 */ 5210 void show_one_workqueue(struct workqueue_struct *wq) 5211 { 5212 struct pool_workqueue *pwq; 5213 bool idle = true; 5214 unsigned long flags; 5215 5216 for_each_pwq(pwq, wq) { 5217 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5218 idle = false; 5219 break; 5220 } 5221 } 5222 if (idle) /* Nothing to print for idle workqueue */ 5223 return; 5224 5225 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 5226 5227 for_each_pwq(pwq, wq) { 5228 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 5229 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5230 /* 5231 * Defer printing to avoid deadlocks in console 5232 * drivers that queue work while holding locks 5233 * also taken in their write paths. 5234 */ 5235 printk_deferred_enter(); 5236 show_pwq(pwq); 5237 printk_deferred_exit(); 5238 } 5239 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 5240 /* 5241 * We could be printing a lot from atomic context, e.g. 5242 * sysrq-t -> show_all_workqueues(). Avoid triggering 5243 * hard lockup. 5244 */ 5245 touch_nmi_watchdog(); 5246 } 5247 5248 } 5249 5250 /** 5251 * show_one_worker_pool - dump state of specified worker pool 5252 * @pool: worker pool whose state will be printed 5253 */ 5254 static void show_one_worker_pool(struct worker_pool *pool) 5255 { 5256 struct worker *worker; 5257 bool first = true; 5258 unsigned long flags; 5259 unsigned long hung = 0; 5260 5261 raw_spin_lock_irqsave(&pool->lock, flags); 5262 if (pool->nr_workers == pool->nr_idle) 5263 goto next_pool; 5264 5265 /* How long the first pending work is waiting for a worker. */ 5266 if (!list_empty(&pool->worklist)) 5267 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 5268 5269 /* 5270 * Defer printing to avoid deadlocks in console drivers that 5271 * queue work while holding locks also taken in their write 5272 * paths. 5273 */ 5274 printk_deferred_enter(); 5275 pr_info("pool %d:", pool->id); 5276 pr_cont_pool_info(pool); 5277 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 5278 if (pool->manager) 5279 pr_cont(" manager: %d", 5280 task_pid_nr(pool->manager->task)); 5281 list_for_each_entry(worker, &pool->idle_list, entry) { 5282 pr_cont(" %s%d", first ? "idle: " : "", 5283 task_pid_nr(worker->task)); 5284 first = false; 5285 } 5286 pr_cont("\n"); 5287 printk_deferred_exit(); 5288 next_pool: 5289 raw_spin_unlock_irqrestore(&pool->lock, flags); 5290 /* 5291 * We could be printing a lot from atomic context, e.g. 5292 * sysrq-t -> show_all_workqueues(). Avoid triggering 5293 * hard lockup. 5294 */ 5295 touch_nmi_watchdog(); 5296 5297 } 5298 5299 /** 5300 * show_all_workqueues - dump workqueue state 5301 * 5302 * Called from a sysrq handler and prints out all busy workqueues and pools. 5303 */ 5304 void show_all_workqueues(void) 5305 { 5306 struct workqueue_struct *wq; 5307 struct worker_pool *pool; 5308 int pi; 5309 5310 rcu_read_lock(); 5311 5312 pr_info("Showing busy workqueues and worker pools:\n"); 5313 5314 list_for_each_entry_rcu(wq, &workqueues, list) 5315 show_one_workqueue(wq); 5316 5317 for_each_pool(pool, pi) 5318 show_one_worker_pool(pool); 5319 5320 rcu_read_unlock(); 5321 } 5322 5323 /** 5324 * show_freezable_workqueues - dump freezable workqueue state 5325 * 5326 * Called from try_to_freeze_tasks() and prints out all freezable workqueues 5327 * still busy. 5328 */ 5329 void show_freezable_workqueues(void) 5330 { 5331 struct workqueue_struct *wq; 5332 5333 rcu_read_lock(); 5334 5335 pr_info("Showing freezable workqueues that are still busy:\n"); 5336 5337 list_for_each_entry_rcu(wq, &workqueues, list) { 5338 if (!(wq->flags & WQ_FREEZABLE)) 5339 continue; 5340 show_one_workqueue(wq); 5341 } 5342 5343 rcu_read_unlock(); 5344 } 5345 5346 /* used to show worker information through /proc/PID/{comm,stat,status} */ 5347 void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 5348 { 5349 int off; 5350 5351 /* always show the actual comm */ 5352 off = strscpy(buf, task->comm, size); 5353 if (off < 0) 5354 return; 5355 5356 /* stabilize PF_WQ_WORKER and worker pool association */ 5357 mutex_lock(&wq_pool_attach_mutex); 5358 5359 if (task->flags & PF_WQ_WORKER) { 5360 struct worker *worker = kthread_data(task); 5361 struct worker_pool *pool = worker->pool; 5362 5363 if (pool) { 5364 raw_spin_lock_irq(&pool->lock); 5365 /* 5366 * ->desc tracks information (wq name or 5367 * set_worker_desc()) for the latest execution. If 5368 * current, prepend '+', otherwise '-'. 5369 */ 5370 if (worker->desc[0] != '\0') { 5371 if (worker->current_work) 5372 scnprintf(buf + off, size - off, "+%s", 5373 worker->desc); 5374 else 5375 scnprintf(buf + off, size - off, "-%s", 5376 worker->desc); 5377 } 5378 raw_spin_unlock_irq(&pool->lock); 5379 } 5380 } 5381 5382 mutex_unlock(&wq_pool_attach_mutex); 5383 } 5384 5385 #ifdef CONFIG_SMP 5386 5387 /* 5388 * CPU hotplug. 5389 * 5390 * There are two challenges in supporting CPU hotplug. Firstly, there 5391 * are a lot of assumptions on strong associations among work, pwq and 5392 * pool which make migrating pending and scheduled works very 5393 * difficult to implement without impacting hot paths. Secondly, 5394 * worker pools serve mix of short, long and very long running works making 5395 * blocked draining impractical. 5396 * 5397 * This is solved by allowing the pools to be disassociated from the CPU 5398 * running as an unbound one and allowing it to be reattached later if the 5399 * cpu comes back online. 5400 */ 5401 5402 static void unbind_workers(int cpu) 5403 { 5404 struct worker_pool *pool; 5405 struct worker *worker; 5406 5407 for_each_cpu_worker_pool(pool, cpu) { 5408 mutex_lock(&wq_pool_attach_mutex); 5409 raw_spin_lock_irq(&pool->lock); 5410 5411 /* 5412 * We've blocked all attach/detach operations. Make all workers 5413 * unbound and set DISASSOCIATED. Before this, all workers 5414 * must be on the cpu. After this, they may become diasporas. 5415 * And the preemption disabled section in their sched callbacks 5416 * are guaranteed to see WORKER_UNBOUND since the code here 5417 * is on the same cpu. 5418 */ 5419 for_each_pool_worker(worker, pool) 5420 worker->flags |= WORKER_UNBOUND; 5421 5422 pool->flags |= POOL_DISASSOCIATED; 5423 5424 /* 5425 * The handling of nr_running in sched callbacks are disabled 5426 * now. Zap nr_running. After this, nr_running stays zero and 5427 * need_more_worker() and keep_working() are always true as 5428 * long as the worklist is not empty. This pool now behaves as 5429 * an unbound (in terms of concurrency management) pool which 5430 * are served by workers tied to the pool. 5431 */ 5432 pool->nr_running = 0; 5433 5434 /* 5435 * With concurrency management just turned off, a busy 5436 * worker blocking could lead to lengthy stalls. Kick off 5437 * unbound chain execution of currently pending work items. 5438 */ 5439 kick_pool(pool); 5440 5441 raw_spin_unlock_irq(&pool->lock); 5442 5443 for_each_pool_worker(worker, pool) 5444 unbind_worker(worker); 5445 5446 mutex_unlock(&wq_pool_attach_mutex); 5447 } 5448 } 5449 5450 /** 5451 * rebind_workers - rebind all workers of a pool to the associated CPU 5452 * @pool: pool of interest 5453 * 5454 * @pool->cpu is coming online. Rebind all workers to the CPU. 5455 */ 5456 static void rebind_workers(struct worker_pool *pool) 5457 { 5458 struct worker *worker; 5459 5460 lockdep_assert_held(&wq_pool_attach_mutex); 5461 5462 /* 5463 * Restore CPU affinity of all workers. As all idle workers should 5464 * be on the run-queue of the associated CPU before any local 5465 * wake-ups for concurrency management happen, restore CPU affinity 5466 * of all workers first and then clear UNBOUND. As we're called 5467 * from CPU_ONLINE, the following shouldn't fail. 5468 */ 5469 for_each_pool_worker(worker, pool) { 5470 kthread_set_per_cpu(worker->task, pool->cpu); 5471 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 5472 pool_allowed_cpus(pool)) < 0); 5473 } 5474 5475 raw_spin_lock_irq(&pool->lock); 5476 5477 pool->flags &= ~POOL_DISASSOCIATED; 5478 5479 for_each_pool_worker(worker, pool) { 5480 unsigned int worker_flags = worker->flags; 5481 5482 /* 5483 * We want to clear UNBOUND but can't directly call 5484 * worker_clr_flags() or adjust nr_running. Atomically 5485 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 5486 * @worker will clear REBOUND using worker_clr_flags() when 5487 * it initiates the next execution cycle thus restoring 5488 * concurrency management. Note that when or whether 5489 * @worker clears REBOUND doesn't affect correctness. 5490 * 5491 * WRITE_ONCE() is necessary because @worker->flags may be 5492 * tested without holding any lock in 5493 * wq_worker_running(). Without it, NOT_RUNNING test may 5494 * fail incorrectly leading to premature concurrency 5495 * management operations. 5496 */ 5497 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 5498 worker_flags |= WORKER_REBOUND; 5499 worker_flags &= ~WORKER_UNBOUND; 5500 WRITE_ONCE(worker->flags, worker_flags); 5501 } 5502 5503 raw_spin_unlock_irq(&pool->lock); 5504 } 5505 5506 /** 5507 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 5508 * @pool: unbound pool of interest 5509 * @cpu: the CPU which is coming up 5510 * 5511 * An unbound pool may end up with a cpumask which doesn't have any online 5512 * CPUs. When a worker of such pool get scheduled, the scheduler resets 5513 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 5514 * online CPU before, cpus_allowed of all its workers should be restored. 5515 */ 5516 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 5517 { 5518 static cpumask_t cpumask; 5519 struct worker *worker; 5520 5521 lockdep_assert_held(&wq_pool_attach_mutex); 5522 5523 /* is @cpu allowed for @pool? */ 5524 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 5525 return; 5526 5527 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 5528 5529 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 5530 for_each_pool_worker(worker, pool) 5531 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 5532 } 5533 5534 int workqueue_prepare_cpu(unsigned int cpu) 5535 { 5536 struct worker_pool *pool; 5537 5538 for_each_cpu_worker_pool(pool, cpu) { 5539 if (pool->nr_workers) 5540 continue; 5541 if (!create_worker(pool)) 5542 return -ENOMEM; 5543 } 5544 return 0; 5545 } 5546 5547 int workqueue_online_cpu(unsigned int cpu) 5548 { 5549 struct worker_pool *pool; 5550 struct workqueue_struct *wq; 5551 int pi; 5552 5553 mutex_lock(&wq_pool_mutex); 5554 5555 for_each_pool(pool, pi) { 5556 mutex_lock(&wq_pool_attach_mutex); 5557 5558 if (pool->cpu == cpu) 5559 rebind_workers(pool); 5560 else if (pool->cpu < 0) 5561 restore_unbound_workers_cpumask(pool, cpu); 5562 5563 mutex_unlock(&wq_pool_attach_mutex); 5564 } 5565 5566 /* update pod affinity of unbound workqueues */ 5567 list_for_each_entry(wq, &workqueues, list) { 5568 struct workqueue_attrs *attrs = wq->unbound_attrs; 5569 5570 if (attrs) { 5571 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5572 int tcpu; 5573 5574 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5575 wq_update_pod(wq, tcpu, cpu, true); 5576 } 5577 } 5578 5579 mutex_unlock(&wq_pool_mutex); 5580 return 0; 5581 } 5582 5583 int workqueue_offline_cpu(unsigned int cpu) 5584 { 5585 struct workqueue_struct *wq; 5586 5587 /* unbinding per-cpu workers should happen on the local CPU */ 5588 if (WARN_ON(cpu != smp_processor_id())) 5589 return -1; 5590 5591 unbind_workers(cpu); 5592 5593 /* update pod affinity of unbound workqueues */ 5594 mutex_lock(&wq_pool_mutex); 5595 list_for_each_entry(wq, &workqueues, list) { 5596 struct workqueue_attrs *attrs = wq->unbound_attrs; 5597 5598 if (attrs) { 5599 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5600 int tcpu; 5601 5602 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5603 wq_update_pod(wq, tcpu, cpu, false); 5604 } 5605 } 5606 mutex_unlock(&wq_pool_mutex); 5607 5608 return 0; 5609 } 5610 5611 struct work_for_cpu { 5612 struct work_struct work; 5613 long (*fn)(void *); 5614 void *arg; 5615 long ret; 5616 }; 5617 5618 static void work_for_cpu_fn(struct work_struct *work) 5619 { 5620 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 5621 5622 wfc->ret = wfc->fn(wfc->arg); 5623 } 5624 5625 /** 5626 * work_on_cpu_key - run a function in thread context on a particular cpu 5627 * @cpu: the cpu to run on 5628 * @fn: the function to run 5629 * @arg: the function arg 5630 * @key: The lock class key for lock debugging purposes 5631 * 5632 * It is up to the caller to ensure that the cpu doesn't go offline. 5633 * The caller must not hold any locks which would prevent @fn from completing. 5634 * 5635 * Return: The value @fn returns. 5636 */ 5637 long work_on_cpu_key(int cpu, long (*fn)(void *), 5638 void *arg, struct lock_class_key *key) 5639 { 5640 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 5641 5642 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); 5643 schedule_work_on(cpu, &wfc.work); 5644 flush_work(&wfc.work); 5645 destroy_work_on_stack(&wfc.work); 5646 return wfc.ret; 5647 } 5648 EXPORT_SYMBOL_GPL(work_on_cpu_key); 5649 5650 /** 5651 * work_on_cpu_safe_key - run a function in thread context on a particular cpu 5652 * @cpu: the cpu to run on 5653 * @fn: the function to run 5654 * @arg: the function argument 5655 * @key: The lock class key for lock debugging purposes 5656 * 5657 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 5658 * any locks which would prevent @fn from completing. 5659 * 5660 * Return: The value @fn returns. 5661 */ 5662 long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 5663 void *arg, struct lock_class_key *key) 5664 { 5665 long ret = -ENODEV; 5666 5667 cpus_read_lock(); 5668 if (cpu_online(cpu)) 5669 ret = work_on_cpu_key(cpu, fn, arg, key); 5670 cpus_read_unlock(); 5671 return ret; 5672 } 5673 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); 5674 #endif /* CONFIG_SMP */ 5675 5676 #ifdef CONFIG_FREEZER 5677 5678 /** 5679 * freeze_workqueues_begin - begin freezing workqueues 5680 * 5681 * Start freezing workqueues. After this function returns, all freezable 5682 * workqueues will queue new works to their inactive_works list instead of 5683 * pool->worklist. 5684 * 5685 * CONTEXT: 5686 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5687 */ 5688 void freeze_workqueues_begin(void) 5689 { 5690 struct workqueue_struct *wq; 5691 struct pool_workqueue *pwq; 5692 5693 mutex_lock(&wq_pool_mutex); 5694 5695 WARN_ON_ONCE(workqueue_freezing); 5696 workqueue_freezing = true; 5697 5698 list_for_each_entry(wq, &workqueues, list) { 5699 mutex_lock(&wq->mutex); 5700 for_each_pwq(pwq, wq) 5701 pwq_adjust_max_active(pwq); 5702 mutex_unlock(&wq->mutex); 5703 } 5704 5705 mutex_unlock(&wq_pool_mutex); 5706 } 5707 5708 /** 5709 * freeze_workqueues_busy - are freezable workqueues still busy? 5710 * 5711 * Check whether freezing is complete. This function must be called 5712 * between freeze_workqueues_begin() and thaw_workqueues(). 5713 * 5714 * CONTEXT: 5715 * Grabs and releases wq_pool_mutex. 5716 * 5717 * Return: 5718 * %true if some freezable workqueues are still busy. %false if freezing 5719 * is complete. 5720 */ 5721 bool freeze_workqueues_busy(void) 5722 { 5723 bool busy = false; 5724 struct workqueue_struct *wq; 5725 struct pool_workqueue *pwq; 5726 5727 mutex_lock(&wq_pool_mutex); 5728 5729 WARN_ON_ONCE(!workqueue_freezing); 5730 5731 list_for_each_entry(wq, &workqueues, list) { 5732 if (!(wq->flags & WQ_FREEZABLE)) 5733 continue; 5734 /* 5735 * nr_active is monotonically decreasing. It's safe 5736 * to peek without lock. 5737 */ 5738 rcu_read_lock(); 5739 for_each_pwq(pwq, wq) { 5740 WARN_ON_ONCE(pwq->nr_active < 0); 5741 if (pwq->nr_active) { 5742 busy = true; 5743 rcu_read_unlock(); 5744 goto out_unlock; 5745 } 5746 } 5747 rcu_read_unlock(); 5748 } 5749 out_unlock: 5750 mutex_unlock(&wq_pool_mutex); 5751 return busy; 5752 } 5753 5754 /** 5755 * thaw_workqueues - thaw workqueues 5756 * 5757 * Thaw workqueues. Normal queueing is restored and all collected 5758 * frozen works are transferred to their respective pool worklists. 5759 * 5760 * CONTEXT: 5761 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5762 */ 5763 void thaw_workqueues(void) 5764 { 5765 struct workqueue_struct *wq; 5766 struct pool_workqueue *pwq; 5767 5768 mutex_lock(&wq_pool_mutex); 5769 5770 if (!workqueue_freezing) 5771 goto out_unlock; 5772 5773 workqueue_freezing = false; 5774 5775 /* restore max_active and repopulate worklist */ 5776 list_for_each_entry(wq, &workqueues, list) { 5777 mutex_lock(&wq->mutex); 5778 for_each_pwq(pwq, wq) 5779 pwq_adjust_max_active(pwq); 5780 mutex_unlock(&wq->mutex); 5781 } 5782 5783 out_unlock: 5784 mutex_unlock(&wq_pool_mutex); 5785 } 5786 #endif /* CONFIG_FREEZER */ 5787 5788 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 5789 { 5790 LIST_HEAD(ctxs); 5791 int ret = 0; 5792 struct workqueue_struct *wq; 5793 struct apply_wqattrs_ctx *ctx, *n; 5794 5795 lockdep_assert_held(&wq_pool_mutex); 5796 5797 list_for_each_entry(wq, &workqueues, list) { 5798 if (!(wq->flags & WQ_UNBOUND)) 5799 continue; 5800 /* creating multiple pwqs breaks ordering guarantee */ 5801 if (wq->flags & __WQ_ORDERED) 5802 continue; 5803 5804 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 5805 if (IS_ERR(ctx)) { 5806 ret = PTR_ERR(ctx); 5807 break; 5808 } 5809 5810 list_add_tail(&ctx->list, &ctxs); 5811 } 5812 5813 list_for_each_entry_safe(ctx, n, &ctxs, list) { 5814 if (!ret) 5815 apply_wqattrs_commit(ctx); 5816 apply_wqattrs_cleanup(ctx); 5817 } 5818 5819 if (!ret) { 5820 mutex_lock(&wq_pool_attach_mutex); 5821 cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 5822 mutex_unlock(&wq_pool_attach_mutex); 5823 } 5824 return ret; 5825 } 5826 5827 /** 5828 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 5829 * @cpumask: the cpumask to set 5830 * 5831 * The low-level workqueues cpumask is a global cpumask that limits 5832 * the affinity of all unbound workqueues. This function check the @cpumask 5833 * and apply it to all unbound workqueues and updates all pwqs of them. 5834 * 5835 * Return: 0 - Success 5836 * -EINVAL - Invalid @cpumask 5837 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 5838 */ 5839 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 5840 { 5841 int ret = -EINVAL; 5842 5843 /* 5844 * Not excluding isolated cpus on purpose. 5845 * If the user wishes to include them, we allow that. 5846 */ 5847 cpumask_and(cpumask, cpumask, cpu_possible_mask); 5848 if (!cpumask_empty(cpumask)) { 5849 apply_wqattrs_lock(); 5850 if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 5851 ret = 0; 5852 goto out_unlock; 5853 } 5854 5855 ret = workqueue_apply_unbound_cpumask(cpumask); 5856 5857 out_unlock: 5858 apply_wqattrs_unlock(); 5859 } 5860 5861 return ret; 5862 } 5863 5864 static int parse_affn_scope(const char *val) 5865 { 5866 int i; 5867 5868 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 5869 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 5870 return i; 5871 } 5872 return -EINVAL; 5873 } 5874 5875 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 5876 { 5877 struct workqueue_struct *wq; 5878 int affn, cpu; 5879 5880 affn = parse_affn_scope(val); 5881 if (affn < 0) 5882 return affn; 5883 if (affn == WQ_AFFN_DFL) 5884 return -EINVAL; 5885 5886 cpus_read_lock(); 5887 mutex_lock(&wq_pool_mutex); 5888 5889 wq_affn_dfl = affn; 5890 5891 list_for_each_entry(wq, &workqueues, list) { 5892 for_each_online_cpu(cpu) { 5893 wq_update_pod(wq, cpu, cpu, true); 5894 } 5895 } 5896 5897 mutex_unlock(&wq_pool_mutex); 5898 cpus_read_unlock(); 5899 5900 return 0; 5901 } 5902 5903 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 5904 { 5905 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 5906 } 5907 5908 static const struct kernel_param_ops wq_affn_dfl_ops = { 5909 .set = wq_affn_dfl_set, 5910 .get = wq_affn_dfl_get, 5911 }; 5912 5913 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 5914 5915 #ifdef CONFIG_SYSFS 5916 /* 5917 * Workqueues with WQ_SYSFS flag set is visible to userland via 5918 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 5919 * following attributes. 5920 * 5921 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 5922 * max_active RW int : maximum number of in-flight work items 5923 * 5924 * Unbound workqueues have the following extra attributes. 5925 * 5926 * nice RW int : nice value of the workers 5927 * cpumask RW mask : bitmask of allowed CPUs for the workers 5928 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 5929 * affinity_strict RW bool : worker CPU affinity is strict 5930 */ 5931 struct wq_device { 5932 struct workqueue_struct *wq; 5933 struct device dev; 5934 }; 5935 5936 static struct workqueue_struct *dev_to_wq(struct device *dev) 5937 { 5938 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 5939 5940 return wq_dev->wq; 5941 } 5942 5943 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 5944 char *buf) 5945 { 5946 struct workqueue_struct *wq = dev_to_wq(dev); 5947 5948 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 5949 } 5950 static DEVICE_ATTR_RO(per_cpu); 5951 5952 static ssize_t max_active_show(struct device *dev, 5953 struct device_attribute *attr, char *buf) 5954 { 5955 struct workqueue_struct *wq = dev_to_wq(dev); 5956 5957 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 5958 } 5959 5960 static ssize_t max_active_store(struct device *dev, 5961 struct device_attribute *attr, const char *buf, 5962 size_t count) 5963 { 5964 struct workqueue_struct *wq = dev_to_wq(dev); 5965 int val; 5966 5967 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 5968 return -EINVAL; 5969 5970 workqueue_set_max_active(wq, val); 5971 return count; 5972 } 5973 static DEVICE_ATTR_RW(max_active); 5974 5975 static struct attribute *wq_sysfs_attrs[] = { 5976 &dev_attr_per_cpu.attr, 5977 &dev_attr_max_active.attr, 5978 NULL, 5979 }; 5980 ATTRIBUTE_GROUPS(wq_sysfs); 5981 5982 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 5983 char *buf) 5984 { 5985 struct workqueue_struct *wq = dev_to_wq(dev); 5986 int written; 5987 5988 mutex_lock(&wq->mutex); 5989 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 5990 mutex_unlock(&wq->mutex); 5991 5992 return written; 5993 } 5994 5995 /* prepare workqueue_attrs for sysfs store operations */ 5996 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 5997 { 5998 struct workqueue_attrs *attrs; 5999 6000 lockdep_assert_held(&wq_pool_mutex); 6001 6002 attrs = alloc_workqueue_attrs(); 6003 if (!attrs) 6004 return NULL; 6005 6006 copy_workqueue_attrs(attrs, wq->unbound_attrs); 6007 return attrs; 6008 } 6009 6010 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 6011 const char *buf, size_t count) 6012 { 6013 struct workqueue_struct *wq = dev_to_wq(dev); 6014 struct workqueue_attrs *attrs; 6015 int ret = -ENOMEM; 6016 6017 apply_wqattrs_lock(); 6018 6019 attrs = wq_sysfs_prep_attrs(wq); 6020 if (!attrs) 6021 goto out_unlock; 6022 6023 if (sscanf(buf, "%d", &attrs->nice) == 1 && 6024 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 6025 ret = apply_workqueue_attrs_locked(wq, attrs); 6026 else 6027 ret = -EINVAL; 6028 6029 out_unlock: 6030 apply_wqattrs_unlock(); 6031 free_workqueue_attrs(attrs); 6032 return ret ?: count; 6033 } 6034 6035 static ssize_t wq_cpumask_show(struct device *dev, 6036 struct device_attribute *attr, char *buf) 6037 { 6038 struct workqueue_struct *wq = dev_to_wq(dev); 6039 int written; 6040 6041 mutex_lock(&wq->mutex); 6042 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6043 cpumask_pr_args(wq->unbound_attrs->cpumask)); 6044 mutex_unlock(&wq->mutex); 6045 return written; 6046 } 6047 6048 static ssize_t wq_cpumask_store(struct device *dev, 6049 struct device_attribute *attr, 6050 const char *buf, size_t count) 6051 { 6052 struct workqueue_struct *wq = dev_to_wq(dev); 6053 struct workqueue_attrs *attrs; 6054 int ret = -ENOMEM; 6055 6056 apply_wqattrs_lock(); 6057 6058 attrs = wq_sysfs_prep_attrs(wq); 6059 if (!attrs) 6060 goto out_unlock; 6061 6062 ret = cpumask_parse(buf, attrs->cpumask); 6063 if (!ret) 6064 ret = apply_workqueue_attrs_locked(wq, attrs); 6065 6066 out_unlock: 6067 apply_wqattrs_unlock(); 6068 free_workqueue_attrs(attrs); 6069 return ret ?: count; 6070 } 6071 6072 static ssize_t wq_affn_scope_show(struct device *dev, 6073 struct device_attribute *attr, char *buf) 6074 { 6075 struct workqueue_struct *wq = dev_to_wq(dev); 6076 int written; 6077 6078 mutex_lock(&wq->mutex); 6079 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) 6080 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n", 6081 wq_affn_names[WQ_AFFN_DFL], 6082 wq_affn_names[wq_affn_dfl]); 6083 else 6084 written = scnprintf(buf, PAGE_SIZE, "%s\n", 6085 wq_affn_names[wq->unbound_attrs->affn_scope]); 6086 mutex_unlock(&wq->mutex); 6087 6088 return written; 6089 } 6090 6091 static ssize_t wq_affn_scope_store(struct device *dev, 6092 struct device_attribute *attr, 6093 const char *buf, size_t count) 6094 { 6095 struct workqueue_struct *wq = dev_to_wq(dev); 6096 struct workqueue_attrs *attrs; 6097 int affn, ret = -ENOMEM; 6098 6099 affn = parse_affn_scope(buf); 6100 if (affn < 0) 6101 return affn; 6102 6103 apply_wqattrs_lock(); 6104 attrs = wq_sysfs_prep_attrs(wq); 6105 if (attrs) { 6106 attrs->affn_scope = affn; 6107 ret = apply_workqueue_attrs_locked(wq, attrs); 6108 } 6109 apply_wqattrs_unlock(); 6110 free_workqueue_attrs(attrs); 6111 return ret ?: count; 6112 } 6113 6114 static ssize_t wq_affinity_strict_show(struct device *dev, 6115 struct device_attribute *attr, char *buf) 6116 { 6117 struct workqueue_struct *wq = dev_to_wq(dev); 6118 6119 return scnprintf(buf, PAGE_SIZE, "%d\n", 6120 wq->unbound_attrs->affn_strict); 6121 } 6122 6123 static ssize_t wq_affinity_strict_store(struct device *dev, 6124 struct device_attribute *attr, 6125 const char *buf, size_t count) 6126 { 6127 struct workqueue_struct *wq = dev_to_wq(dev); 6128 struct workqueue_attrs *attrs; 6129 int v, ret = -ENOMEM; 6130 6131 if (sscanf(buf, "%d", &v) != 1) 6132 return -EINVAL; 6133 6134 apply_wqattrs_lock(); 6135 attrs = wq_sysfs_prep_attrs(wq); 6136 if (attrs) { 6137 attrs->affn_strict = (bool)v; 6138 ret = apply_workqueue_attrs_locked(wq, attrs); 6139 } 6140 apply_wqattrs_unlock(); 6141 free_workqueue_attrs(attrs); 6142 return ret ?: count; 6143 } 6144 6145 static struct device_attribute wq_sysfs_unbound_attrs[] = { 6146 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 6147 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 6148 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 6149 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 6150 __ATTR_NULL, 6151 }; 6152 6153 static struct bus_type wq_subsys = { 6154 .name = "workqueue", 6155 .dev_groups = wq_sysfs_groups, 6156 }; 6157 6158 static ssize_t wq_unbound_cpumask_show(struct device *dev, 6159 struct device_attribute *attr, char *buf) 6160 { 6161 int written; 6162 6163 mutex_lock(&wq_pool_mutex); 6164 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6165 cpumask_pr_args(wq_unbound_cpumask)); 6166 mutex_unlock(&wq_pool_mutex); 6167 6168 return written; 6169 } 6170 6171 static ssize_t wq_unbound_cpumask_store(struct device *dev, 6172 struct device_attribute *attr, const char *buf, size_t count) 6173 { 6174 cpumask_var_t cpumask; 6175 int ret; 6176 6177 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6178 return -ENOMEM; 6179 6180 ret = cpumask_parse(buf, cpumask); 6181 if (!ret) 6182 ret = workqueue_set_unbound_cpumask(cpumask); 6183 6184 free_cpumask_var(cpumask); 6185 return ret ? ret : count; 6186 } 6187 6188 static struct device_attribute wq_sysfs_cpumask_attr = 6189 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 6190 wq_unbound_cpumask_store); 6191 6192 static int __init wq_sysfs_init(void) 6193 { 6194 struct device *dev_root; 6195 int err; 6196 6197 err = subsys_virtual_register(&wq_subsys, NULL); 6198 if (err) 6199 return err; 6200 6201 dev_root = bus_get_dev_root(&wq_subsys); 6202 if (dev_root) { 6203 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr); 6204 put_device(dev_root); 6205 } 6206 return err; 6207 } 6208 core_initcall(wq_sysfs_init); 6209 6210 static void wq_device_release(struct device *dev) 6211 { 6212 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6213 6214 kfree(wq_dev); 6215 } 6216 6217 /** 6218 * workqueue_sysfs_register - make a workqueue visible in sysfs 6219 * @wq: the workqueue to register 6220 * 6221 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 6222 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 6223 * which is the preferred method. 6224 * 6225 * Workqueue user should use this function directly iff it wants to apply 6226 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 6227 * apply_workqueue_attrs() may race against userland updating the 6228 * attributes. 6229 * 6230 * Return: 0 on success, -errno on failure. 6231 */ 6232 int workqueue_sysfs_register(struct workqueue_struct *wq) 6233 { 6234 struct wq_device *wq_dev; 6235 int ret; 6236 6237 /* 6238 * Adjusting max_active or creating new pwqs by applying 6239 * attributes breaks ordering guarantee. Disallow exposing ordered 6240 * workqueues. 6241 */ 6242 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 6243 return -EINVAL; 6244 6245 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 6246 if (!wq_dev) 6247 return -ENOMEM; 6248 6249 wq_dev->wq = wq; 6250 wq_dev->dev.bus = &wq_subsys; 6251 wq_dev->dev.release = wq_device_release; 6252 dev_set_name(&wq_dev->dev, "%s", wq->name); 6253 6254 /* 6255 * unbound_attrs are created separately. Suppress uevent until 6256 * everything is ready. 6257 */ 6258 dev_set_uevent_suppress(&wq_dev->dev, true); 6259 6260 ret = device_register(&wq_dev->dev); 6261 if (ret) { 6262 put_device(&wq_dev->dev); 6263 wq->wq_dev = NULL; 6264 return ret; 6265 } 6266 6267 if (wq->flags & WQ_UNBOUND) { 6268 struct device_attribute *attr; 6269 6270 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 6271 ret = device_create_file(&wq_dev->dev, attr); 6272 if (ret) { 6273 device_unregister(&wq_dev->dev); 6274 wq->wq_dev = NULL; 6275 return ret; 6276 } 6277 } 6278 } 6279 6280 dev_set_uevent_suppress(&wq_dev->dev, false); 6281 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 6282 return 0; 6283 } 6284 6285 /** 6286 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 6287 * @wq: the workqueue to unregister 6288 * 6289 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 6290 */ 6291 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 6292 { 6293 struct wq_device *wq_dev = wq->wq_dev; 6294 6295 if (!wq->wq_dev) 6296 return; 6297 6298 wq->wq_dev = NULL; 6299 device_unregister(&wq_dev->dev); 6300 } 6301 #else /* CONFIG_SYSFS */ 6302 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 6303 #endif /* CONFIG_SYSFS */ 6304 6305 /* 6306 * Workqueue watchdog. 6307 * 6308 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 6309 * flush dependency, a concurrency managed work item which stays RUNNING 6310 * indefinitely. Workqueue stalls can be very difficult to debug as the 6311 * usual warning mechanisms don't trigger and internal workqueue state is 6312 * largely opaque. 6313 * 6314 * Workqueue watchdog monitors all worker pools periodically and dumps 6315 * state if some pools failed to make forward progress for a while where 6316 * forward progress is defined as the first item on ->worklist changing. 6317 * 6318 * This mechanism is controlled through the kernel parameter 6319 * "workqueue.watchdog_thresh" which can be updated at runtime through the 6320 * corresponding sysfs parameter file. 6321 */ 6322 #ifdef CONFIG_WQ_WATCHDOG 6323 6324 static unsigned long wq_watchdog_thresh = 30; 6325 static struct timer_list wq_watchdog_timer; 6326 6327 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 6328 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 6329 6330 /* 6331 * Show workers that might prevent the processing of pending work items. 6332 * The only candidates are CPU-bound workers in the running state. 6333 * Pending work items should be handled by another idle worker 6334 * in all other situations. 6335 */ 6336 static void show_cpu_pool_hog(struct worker_pool *pool) 6337 { 6338 struct worker *worker; 6339 unsigned long flags; 6340 int bkt; 6341 6342 raw_spin_lock_irqsave(&pool->lock, flags); 6343 6344 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6345 if (task_is_running(worker->task)) { 6346 /* 6347 * Defer printing to avoid deadlocks in console 6348 * drivers that queue work while holding locks 6349 * also taken in their write paths. 6350 */ 6351 printk_deferred_enter(); 6352 6353 pr_info("pool %d:\n", pool->id); 6354 sched_show_task(worker->task); 6355 6356 printk_deferred_exit(); 6357 } 6358 } 6359 6360 raw_spin_unlock_irqrestore(&pool->lock, flags); 6361 } 6362 6363 static void show_cpu_pools_hogs(void) 6364 { 6365 struct worker_pool *pool; 6366 int pi; 6367 6368 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 6369 6370 rcu_read_lock(); 6371 6372 for_each_pool(pool, pi) { 6373 if (pool->cpu_stall) 6374 show_cpu_pool_hog(pool); 6375 6376 } 6377 6378 rcu_read_unlock(); 6379 } 6380 6381 static void wq_watchdog_reset_touched(void) 6382 { 6383 int cpu; 6384 6385 wq_watchdog_touched = jiffies; 6386 for_each_possible_cpu(cpu) 6387 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6388 } 6389 6390 static void wq_watchdog_timer_fn(struct timer_list *unused) 6391 { 6392 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 6393 bool lockup_detected = false; 6394 bool cpu_pool_stall = false; 6395 unsigned long now = jiffies; 6396 struct worker_pool *pool; 6397 int pi; 6398 6399 if (!thresh) 6400 return; 6401 6402 rcu_read_lock(); 6403 6404 for_each_pool(pool, pi) { 6405 unsigned long pool_ts, touched, ts; 6406 6407 pool->cpu_stall = false; 6408 if (list_empty(&pool->worklist)) 6409 continue; 6410 6411 /* 6412 * If a virtual machine is stopped by the host it can look to 6413 * the watchdog like a stall. 6414 */ 6415 kvm_check_and_clear_guest_paused(); 6416 6417 /* get the latest of pool and touched timestamps */ 6418 if (pool->cpu >= 0) 6419 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 6420 else 6421 touched = READ_ONCE(wq_watchdog_touched); 6422 pool_ts = READ_ONCE(pool->watchdog_ts); 6423 6424 if (time_after(pool_ts, touched)) 6425 ts = pool_ts; 6426 else 6427 ts = touched; 6428 6429 /* did we stall? */ 6430 if (time_after(now, ts + thresh)) { 6431 lockup_detected = true; 6432 if (pool->cpu >= 0) { 6433 pool->cpu_stall = true; 6434 cpu_pool_stall = true; 6435 } 6436 pr_emerg("BUG: workqueue lockup - pool"); 6437 pr_cont_pool_info(pool); 6438 pr_cont(" stuck for %us!\n", 6439 jiffies_to_msecs(now - pool_ts) / 1000); 6440 } 6441 6442 6443 } 6444 6445 rcu_read_unlock(); 6446 6447 if (lockup_detected) 6448 show_all_workqueues(); 6449 6450 if (cpu_pool_stall) 6451 show_cpu_pools_hogs(); 6452 6453 wq_watchdog_reset_touched(); 6454 mod_timer(&wq_watchdog_timer, jiffies + thresh); 6455 } 6456 6457 notrace void wq_watchdog_touch(int cpu) 6458 { 6459 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 6460 unsigned long touch_ts = READ_ONCE(wq_watchdog_touched); 6461 unsigned long now = jiffies; 6462 6463 if (cpu >= 0) 6464 per_cpu(wq_watchdog_touched_cpu, cpu) = now; 6465 else 6466 WARN_ONCE(1, "%s should be called with valid CPU", __func__); 6467 6468 /* Don't unnecessarily store to global cacheline */ 6469 if (time_after(now, touch_ts + thresh / 4)) 6470 WRITE_ONCE(wq_watchdog_touched, jiffies); 6471 } 6472 6473 static void wq_watchdog_set_thresh(unsigned long thresh) 6474 { 6475 wq_watchdog_thresh = 0; 6476 del_timer_sync(&wq_watchdog_timer); 6477 6478 if (thresh) { 6479 wq_watchdog_thresh = thresh; 6480 wq_watchdog_reset_touched(); 6481 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 6482 } 6483 } 6484 6485 static int wq_watchdog_param_set_thresh(const char *val, 6486 const struct kernel_param *kp) 6487 { 6488 unsigned long thresh; 6489 int ret; 6490 6491 ret = kstrtoul(val, 0, &thresh); 6492 if (ret) 6493 return ret; 6494 6495 if (system_wq) 6496 wq_watchdog_set_thresh(thresh); 6497 else 6498 wq_watchdog_thresh = thresh; 6499 6500 return 0; 6501 } 6502 6503 static const struct kernel_param_ops wq_watchdog_thresh_ops = { 6504 .set = wq_watchdog_param_set_thresh, 6505 .get = param_get_ulong, 6506 }; 6507 6508 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 6509 0644); 6510 6511 static void wq_watchdog_init(void) 6512 { 6513 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 6514 wq_watchdog_set_thresh(wq_watchdog_thresh); 6515 } 6516 6517 #else /* CONFIG_WQ_WATCHDOG */ 6518 6519 static inline void wq_watchdog_init(void) { } 6520 6521 #endif /* CONFIG_WQ_WATCHDOG */ 6522 6523 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) 6524 { 6525 if (!cpumask_intersects(wq_unbound_cpumask, mask)) { 6526 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n", 6527 cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask)); 6528 return; 6529 } 6530 6531 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); 6532 } 6533 6534 /** 6535 * workqueue_init_early - early init for workqueue subsystem 6536 * 6537 * This is the first step of three-staged workqueue subsystem initialization and 6538 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 6539 * up. It sets up all the data structures and system workqueues and allows early 6540 * boot code to create workqueues and queue/cancel work items. Actual work item 6541 * execution starts only after kthreads can be created and scheduled right 6542 * before early initcalls. 6543 */ 6544 void __init workqueue_init_early(void) 6545 { 6546 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 6547 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 6548 int i, cpu; 6549 6550 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6551 6552 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6553 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 6554 restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); 6555 restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); 6556 if (!cpumask_empty(&wq_cmdline_cpumask)) 6557 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); 6558 6559 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6560 6561 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 6562 BUG_ON(!wq_update_pod_attrs_buf); 6563 6564 /* initialize WQ_AFFN_SYSTEM pods */ 6565 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6566 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 6567 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6568 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 6569 6570 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 6571 6572 pt->nr_pods = 1; 6573 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 6574 pt->pod_node[0] = NUMA_NO_NODE; 6575 pt->cpu_pod[0] = 0; 6576 6577 /* initialize CPU pools */ 6578 for_each_possible_cpu(cpu) { 6579 struct worker_pool *pool; 6580 6581 i = 0; 6582 for_each_cpu_worker_pool(pool, cpu) { 6583 BUG_ON(init_worker_pool(pool)); 6584 pool->cpu = cpu; 6585 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 6586 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 6587 pool->attrs->nice = std_nice[i++]; 6588 pool->attrs->affn_strict = true; 6589 pool->node = cpu_to_node(cpu); 6590 6591 /* alloc pool ID */ 6592 mutex_lock(&wq_pool_mutex); 6593 BUG_ON(worker_pool_assign_id(pool)); 6594 mutex_unlock(&wq_pool_mutex); 6595 } 6596 } 6597 6598 /* create default unbound and ordered wq attrs */ 6599 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 6600 struct workqueue_attrs *attrs; 6601 6602 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6603 attrs->nice = std_nice[i]; 6604 unbound_std_wq_attrs[i] = attrs; 6605 6606 /* 6607 * An ordered wq should have only one pwq as ordering is 6608 * guaranteed by max_active which is enforced by pwqs. 6609 */ 6610 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6611 attrs->nice = std_nice[i]; 6612 attrs->ordered = true; 6613 ordered_wq_attrs[i] = attrs; 6614 } 6615 6616 system_wq = alloc_workqueue("events", 0, 0); 6617 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 6618 system_long_wq = alloc_workqueue("events_long", 0, 0); 6619 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 6620 WQ_MAX_ACTIVE); 6621 system_freezable_wq = alloc_workqueue("events_freezable", 6622 WQ_FREEZABLE, 0); 6623 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 6624 WQ_POWER_EFFICIENT, 0); 6625 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 6626 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 6627 0); 6628 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 6629 !system_unbound_wq || !system_freezable_wq || 6630 !system_power_efficient_wq || 6631 !system_freezable_power_efficient_wq); 6632 } 6633 6634 static void __init wq_cpu_intensive_thresh_init(void) 6635 { 6636 unsigned long thresh; 6637 unsigned long bogo; 6638 6639 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 6640 BUG_ON(IS_ERR(pwq_release_worker)); 6641 6642 /* if the user set it to a specific value, keep it */ 6643 if (wq_cpu_intensive_thresh_us != ULONG_MAX) 6644 return; 6645 6646 /* 6647 * The default of 10ms is derived from the fact that most modern (as of 6648 * 2023) processors can do a lot in 10ms and that it's just below what 6649 * most consider human-perceivable. However, the kernel also runs on a 6650 * lot slower CPUs including microcontrollers where the threshold is way 6651 * too low. 6652 * 6653 * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 6654 * This is by no means accurate but it doesn't have to be. The mechanism 6655 * is still useful even when the threshold is fully scaled up. Also, as 6656 * the reports would usually be applicable to everyone, some machines 6657 * operating on longer thresholds won't significantly diminish their 6658 * usefulness. 6659 */ 6660 thresh = 10 * USEC_PER_MSEC; 6661 6662 /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 6663 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 6664 if (bogo < 4000) 6665 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 6666 6667 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 6668 loops_per_jiffy, bogo, thresh); 6669 6670 wq_cpu_intensive_thresh_us = thresh; 6671 } 6672 6673 /** 6674 * workqueue_init - bring workqueue subsystem fully online 6675 * 6676 * This is the second step of three-staged workqueue subsystem initialization 6677 * and invoked as soon as kthreads can be created and scheduled. Workqueues have 6678 * been created and work items queued on them, but there are no kworkers 6679 * executing the work items yet. Populate the worker pools with the initial 6680 * workers and enable future kworker creations. 6681 */ 6682 void __init workqueue_init(void) 6683 { 6684 struct workqueue_struct *wq; 6685 struct worker_pool *pool; 6686 int cpu, bkt; 6687 6688 wq_cpu_intensive_thresh_init(); 6689 6690 mutex_lock(&wq_pool_mutex); 6691 6692 /* 6693 * Per-cpu pools created earlier could be missing node hint. Fix them 6694 * up. Also, create a rescuer for workqueues that requested it. 6695 */ 6696 for_each_possible_cpu(cpu) { 6697 for_each_cpu_worker_pool(pool, cpu) { 6698 pool->node = cpu_to_node(cpu); 6699 } 6700 } 6701 6702 list_for_each_entry(wq, &workqueues, list) { 6703 WARN(init_rescuer(wq), 6704 "workqueue: failed to create early rescuer for %s", 6705 wq->name); 6706 } 6707 6708 mutex_unlock(&wq_pool_mutex); 6709 6710 /* create the initial workers */ 6711 for_each_online_cpu(cpu) { 6712 for_each_cpu_worker_pool(pool, cpu) { 6713 pool->flags &= ~POOL_DISASSOCIATED; 6714 BUG_ON(!create_worker(pool)); 6715 } 6716 } 6717 6718 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 6719 BUG_ON(!create_worker(pool)); 6720 6721 wq_online = true; 6722 wq_watchdog_init(); 6723 } 6724 6725 /* 6726 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 6727 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 6728 * and consecutive pod ID. The rest of @pt is initialized accordingly. 6729 */ 6730 static void __init init_pod_type(struct wq_pod_type *pt, 6731 bool (*cpus_share_pod)(int, int)) 6732 { 6733 int cur, pre, cpu, pod; 6734 6735 pt->nr_pods = 0; 6736 6737 /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 6738 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6739 BUG_ON(!pt->cpu_pod); 6740 6741 for_each_possible_cpu(cur) { 6742 for_each_possible_cpu(pre) { 6743 if (pre >= cur) { 6744 pt->cpu_pod[cur] = pt->nr_pods++; 6745 break; 6746 } 6747 if (cpus_share_pod(cur, pre)) { 6748 pt->cpu_pod[cur] = pt->cpu_pod[pre]; 6749 break; 6750 } 6751 } 6752 } 6753 6754 /* init the rest to match @pt->cpu_pod[] */ 6755 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6756 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 6757 BUG_ON(!pt->pod_cpus || !pt->pod_node); 6758 6759 for (pod = 0; pod < pt->nr_pods; pod++) 6760 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 6761 6762 for_each_possible_cpu(cpu) { 6763 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 6764 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 6765 } 6766 } 6767 6768 static bool __init cpus_dont_share(int cpu0, int cpu1) 6769 { 6770 return false; 6771 } 6772 6773 static bool __init cpus_share_smt(int cpu0, int cpu1) 6774 { 6775 #ifdef CONFIG_SCHED_SMT 6776 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 6777 #else 6778 return false; 6779 #endif 6780 } 6781 6782 static bool __init cpus_share_numa(int cpu0, int cpu1) 6783 { 6784 return cpu_to_node(cpu0) == cpu_to_node(cpu1); 6785 } 6786 6787 /** 6788 * workqueue_init_topology - initialize CPU pods for unbound workqueues 6789 * 6790 * This is the third step of there-staged workqueue subsystem initialization and 6791 * invoked after SMP and topology information are fully initialized. It 6792 * initializes the unbound CPU pods accordingly. 6793 */ 6794 void __init workqueue_init_topology(void) 6795 { 6796 struct workqueue_struct *wq; 6797 int cpu; 6798 6799 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 6800 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 6801 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 6802 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 6803 6804 mutex_lock(&wq_pool_mutex); 6805 6806 /* 6807 * Workqueues allocated earlier would have all CPUs sharing the default 6808 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 6809 * combinations to apply per-pod sharing. 6810 */ 6811 list_for_each_entry(wq, &workqueues, list) { 6812 for_each_online_cpu(cpu) { 6813 wq_update_pod(wq, cpu, cpu, true); 6814 } 6815 } 6816 6817 mutex_unlock(&wq_pool_mutex); 6818 } 6819 6820 void __warn_flushing_systemwide_wq(void) 6821 { 6822 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 6823 dump_stack(); 6824 } 6825 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 6826 6827 static int __init workqueue_unbound_cpus_setup(char *str) 6828 { 6829 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 6830 cpumask_clear(&wq_cmdline_cpumask); 6831 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 6832 } 6833 6834 return 1; 6835 } 6836 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 6837