1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 5 * Copyright (C) 2002 Ingo Molnar 6 * 7 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <dwmw2@infradead.org> 9 * Andrew Morton 10 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 11 * Theodore Ts'o <tytso@mit.edu> 12 * 13 * Made to use alloc_percpu by Christoph Lameter. 14 * 15 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17 * 18 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worker pools for each CPU (one for 21 * normal work items and the other for high priority ones) and some extra 22 * pools for workqueues which are not bound to any specific CPU - the 23 * number of these backing pools is dynamic. 24 * 25 * Please read Documentation/core-api/workqueue.rst for details. 26 */ 27 28 #include <linux/export.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/init.h> 32 #include <linux/signal.h> 33 #include <linux/completion.h> 34 #include <linux/workqueue.h> 35 #include <linux/slab.h> 36 #include <linux/cpu.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/hardirq.h> 40 #include <linux/mempolicy.h> 41 #include <linux/freezer.h> 42 #include <linux/debug_locks.h> 43 #include <linux/lockdep.h> 44 #include <linux/idr.h> 45 #include <linux/jhash.h> 46 #include <linux/hashtable.h> 47 #include <linux/rculist.h> 48 #include <linux/nodemask.h> 49 #include <linux/moduleparam.h> 50 #include <linux/uaccess.h> 51 #include <linux/sched/isolation.h> 52 #include <linux/sched/debug.h> 53 #include <linux/nmi.h> 54 #include <linux/kvm_para.h> 55 #include <linux/delay.h> 56 57 #include "workqueue_internal.h" 58 59 enum { 60 /* 61 * worker_pool flags 62 * 63 * A bound pool is either associated or disassociated with its CPU. 64 * While associated (!DISASSOCIATED), all workers are bound to the 65 * CPU and none has %WORKER_UNBOUND set and concurrency management 66 * is in effect. 67 * 68 * While DISASSOCIATED, the cpu may be offline and all workers have 69 * %WORKER_UNBOUND set and concurrency management disabled, and may 70 * be executing on any CPU. The pool behaves as an unbound one. 71 * 72 * Note that DISASSOCIATED should be flipped only while holding 73 * wq_pool_attach_mutex to avoid changing binding state while 74 * worker_attach_to_pool() is in progress. 75 */ 76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 78 79 /* worker flags */ 80 WORKER_DIE = 1 << 1, /* die die die */ 81 WORKER_IDLE = 1 << 2, /* is idle */ 82 WORKER_PREP = 1 << 3, /* preparing to run works */ 83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 85 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 86 87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 88 WORKER_UNBOUND | WORKER_REBOUND, 89 90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 91 92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 94 95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 97 98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 99 /* call for help after 10ms 100 (min two ticks) */ 101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 102 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 103 104 /* 105 * Rescue workers are used only on emergencies and shared by 106 * all cpus. Give MIN_NICE. 107 */ 108 RESCUER_NICE_LEVEL = MIN_NICE, 109 HIGHPRI_NICE_LEVEL = MIN_NICE, 110 111 WQ_NAME_LEN = 32, 112 }; 113 114 /* 115 * Structure fields follow one of the following exclusion rules. 116 * 117 * I: Modifiable by initialization/destruction paths and read-only for 118 * everyone else. 119 * 120 * P: Preemption protected. Disabling preemption is enough and should 121 * only be modified and accessed from the local cpu. 122 * 123 * L: pool->lock protected. Access with pool->lock held. 124 * 125 * K: Only modified by worker while holding pool->lock. Can be safely read by 126 * self, while holding pool->lock or from IRQ context if %current is the 127 * kworker. 128 * 129 * S: Only modified by worker self. 130 * 131 * A: wq_pool_attach_mutex protected. 132 * 133 * PL: wq_pool_mutex protected. 134 * 135 * PR: wq_pool_mutex protected for writes. RCU protected for reads. 136 * 137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 138 * 139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 140 * RCU for reads. 141 * 142 * WQ: wq->mutex protected. 143 * 144 * WR: wq->mutex protected for writes. RCU protected for reads. 145 * 146 * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read 147 * with READ_ONCE() without locking. 148 * 149 * MD: wq_mayday_lock protected. 150 * 151 * WD: Used internally by the watchdog. 152 */ 153 154 /* struct worker is defined in workqueue_internal.h */ 155 156 struct worker_pool { 157 raw_spinlock_t lock; /* the pool lock */ 158 int cpu; /* I: the associated cpu */ 159 int node; /* I: the associated node ID */ 160 int id; /* I: pool ID */ 161 unsigned int flags; /* L: flags */ 162 163 unsigned long watchdog_ts; /* L: watchdog timestamp */ 164 bool cpu_stall; /* WD: stalled cpu bound pool */ 165 166 /* 167 * The counter is incremented in a process context on the associated CPU 168 * w/ preemption disabled, and decremented or reset in the same context 169 * but w/ pool->lock held. The readers grab pool->lock and are 170 * guaranteed to see if the counter reached zero. 171 */ 172 int nr_running; 173 174 struct list_head worklist; /* L: list of pending works */ 175 176 int nr_workers; /* L: total number of workers */ 177 int nr_idle; /* L: currently idle workers */ 178 179 struct list_head idle_list; /* L: list of idle workers */ 180 struct timer_list idle_timer; /* L: worker idle timeout */ 181 struct work_struct idle_cull_work; /* L: worker idle cleanup */ 182 183 struct timer_list mayday_timer; /* L: SOS timer for workers */ 184 185 /* a workers is either on busy_hash or idle_list, or the manager */ 186 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 187 /* L: hash of busy workers */ 188 189 struct worker *manager; /* L: purely informational */ 190 struct list_head workers; /* A: attached workers */ 191 struct list_head dying_workers; /* A: workers about to die */ 192 struct completion *detach_completion; /* all workers detached */ 193 194 struct ida worker_ida; /* worker IDs for task name */ 195 196 struct workqueue_attrs *attrs; /* I: worker attributes */ 197 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 198 int refcnt; /* PL: refcnt for unbound pools */ 199 200 /* 201 * Destruction of pool is RCU protected to allow dereferences 202 * from get_work_pool(). 203 */ 204 struct rcu_head rcu; 205 }; 206 207 /* 208 * Per-pool_workqueue statistics. These can be monitored using 209 * tools/workqueue/wq_monitor.py. 210 */ 211 enum pool_workqueue_stats { 212 PWQ_STAT_STARTED, /* work items started execution */ 213 PWQ_STAT_COMPLETED, /* work items completed execution */ 214 PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 215 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 216 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 217 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 218 PWQ_STAT_MAYDAY, /* maydays to rescuer */ 219 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 220 221 PWQ_NR_STATS, 222 }; 223 224 /* 225 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 226 * of work_struct->data are used for flags and the remaining high bits 227 * point to the pwq; thus, pwqs need to be aligned at two's power of the 228 * number of flag bits. 229 */ 230 struct pool_workqueue { 231 struct worker_pool *pool; /* I: the associated pool */ 232 struct workqueue_struct *wq; /* I: the owning workqueue */ 233 int work_color; /* L: current color */ 234 int flush_color; /* L: flushing color */ 235 int refcnt; /* L: reference count */ 236 int nr_in_flight[WORK_NR_COLORS]; 237 /* L: nr of in_flight works */ 238 239 /* 240 * nr_active management and WORK_STRUCT_INACTIVE: 241 * 242 * When pwq->nr_active >= max_active, new work item is queued to 243 * pwq->inactive_works instead of pool->worklist and marked with 244 * WORK_STRUCT_INACTIVE. 245 * 246 * All work items marked with WORK_STRUCT_INACTIVE do not participate 247 * in pwq->nr_active and all work items in pwq->inactive_works are 248 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE 249 * work items are in pwq->inactive_works. Some of them are ready to 250 * run in pool->worklist or worker->scheduled. Those work itmes are 251 * only struct wq_barrier which is used for flush_work() and should 252 * not participate in pwq->nr_active. For non-barrier work item, it 253 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 254 */ 255 int nr_active; /* L: nr of active works */ 256 struct list_head inactive_works; /* L: inactive works */ 257 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 258 struct list_head mayday_node; /* MD: node on wq->maydays */ 259 260 u64 stats[PWQ_NR_STATS]; 261 262 /* 263 * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 264 * and pwq_release_workfn() for details. pool_workqueue itself is also 265 * RCU protected so that the first pwq can be determined without 266 * grabbing wq->mutex. 267 */ 268 struct kthread_work release_work; 269 struct rcu_head rcu; 270 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 271 272 /* 273 * Structure used to wait for workqueue flush. 274 */ 275 struct wq_flusher { 276 struct list_head list; /* WQ: list of flushers */ 277 int flush_color; /* WQ: flush color waiting for */ 278 struct completion done; /* flush completion */ 279 }; 280 281 struct wq_device; 282 283 /* 284 * The externally visible workqueue. It relays the issued work items to 285 * the appropriate worker_pool through its pool_workqueues. 286 */ 287 struct workqueue_struct { 288 struct list_head pwqs; /* WR: all pwqs of this wq */ 289 struct list_head list; /* PR: list of all workqueues */ 290 291 struct mutex mutex; /* protects this wq */ 292 int work_color; /* WQ: current work color */ 293 int flush_color; /* WQ: current flush color */ 294 atomic_t nr_pwqs_to_flush; /* flush in progress */ 295 struct wq_flusher *first_flusher; /* WQ: first flusher */ 296 struct list_head flusher_queue; /* WQ: flush waiters */ 297 struct list_head flusher_overflow; /* WQ: flush overflow list */ 298 299 struct list_head maydays; /* MD: pwqs requesting rescue */ 300 struct worker *rescuer; /* MD: rescue worker */ 301 302 int nr_drainers; /* WQ: drain in progress */ 303 int max_active; /* WO: max active works */ 304 int saved_max_active; /* WQ: saved max_active */ 305 306 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 307 struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */ 308 309 #ifdef CONFIG_SYSFS 310 struct wq_device *wq_dev; /* I: for sysfs interface */ 311 #endif 312 #ifdef CONFIG_LOCKDEP 313 char *lock_name; 314 struct lock_class_key key; 315 struct lockdep_map lockdep_map; 316 #endif 317 char name[WQ_NAME_LEN]; /* I: workqueue name */ 318 319 /* 320 * Destruction of workqueue_struct is RCU protected to allow walking 321 * the workqueues list without grabbing wq_pool_mutex. 322 * This is used to dump all workqueues from sysrq. 323 */ 324 struct rcu_head rcu; 325 326 /* hot fields used during command issue, aligned to cacheline */ 327 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 328 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 329 }; 330 331 static struct kmem_cache *pwq_cache; 332 333 /* 334 * Each pod type describes how CPUs should be grouped for unbound workqueues. 335 * See the comment above workqueue_attrs->affn_scope. 336 */ 337 struct wq_pod_type { 338 int nr_pods; /* number of pods */ 339 cpumask_var_t *pod_cpus; /* pod -> cpus */ 340 int *pod_node; /* pod -> node */ 341 int *cpu_pod; /* cpu -> pod */ 342 }; 343 344 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 345 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; 346 347 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 348 [WQ_AFFN_DFL] = "default", 349 [WQ_AFFN_CPU] = "cpu", 350 [WQ_AFFN_SMT] = "smt", 351 [WQ_AFFN_CACHE] = "cache", 352 [WQ_AFFN_NUMA] = "numa", 353 [WQ_AFFN_SYSTEM] = "system", 354 }; 355 356 /* 357 * Per-cpu work items which run for longer than the following threshold are 358 * automatically considered CPU intensive and excluded from concurrency 359 * management to prevent them from noticeably delaying other per-cpu work items. 360 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 361 * The actual value is initialized in wq_cpu_intensive_thresh_init(). 362 */ 363 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 364 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 365 366 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 367 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 368 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 369 370 static bool wq_online; /* can kworkers be created yet? */ 371 372 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 373 static struct workqueue_attrs *wq_update_pod_attrs_buf; 374 375 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 376 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 377 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 378 /* wait for manager to go away */ 379 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 380 381 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 382 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 383 384 /* PL&A: allowable cpus for unbound wqs and work items */ 385 static cpumask_var_t wq_unbound_cpumask; 386 387 /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 388 static struct cpumask wq_cmdline_cpumask __initdata; 389 390 /* CPU where unbound work was last round robin scheduled from this CPU */ 391 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 392 393 /* 394 * Local execution of unbound work items is no longer guaranteed. The 395 * following always forces round-robin CPU selection on unbound work items 396 * to uncover usages which depend on it. 397 */ 398 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 399 static bool wq_debug_force_rr_cpu = true; 400 #else 401 static bool wq_debug_force_rr_cpu = false; 402 #endif 403 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 404 405 /* the per-cpu worker pools */ 406 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 407 408 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 409 410 /* PL: hash of all unbound pools keyed by pool->attrs */ 411 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 412 413 /* I: attributes used when instantiating standard unbound pools on demand */ 414 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 415 416 /* I: attributes used when instantiating ordered pools on demand */ 417 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 418 419 /* 420 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 421 * process context while holding a pool lock. Bounce to a dedicated kthread 422 * worker to avoid A-A deadlocks. 423 */ 424 static struct kthread_worker *pwq_release_worker; 425 426 struct workqueue_struct *system_wq __read_mostly; 427 EXPORT_SYMBOL(system_wq); 428 struct workqueue_struct *system_highpri_wq __read_mostly; 429 EXPORT_SYMBOL_GPL(system_highpri_wq); 430 struct workqueue_struct *system_long_wq __read_mostly; 431 EXPORT_SYMBOL_GPL(system_long_wq); 432 struct workqueue_struct *system_unbound_wq __read_mostly; 433 EXPORT_SYMBOL_GPL(system_unbound_wq); 434 struct workqueue_struct *system_freezable_wq __read_mostly; 435 EXPORT_SYMBOL_GPL(system_freezable_wq); 436 struct workqueue_struct *system_power_efficient_wq __read_mostly; 437 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 438 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 439 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 440 441 static int worker_thread(void *__worker); 442 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 443 static void show_pwq(struct pool_workqueue *pwq); 444 static void show_one_worker_pool(struct worker_pool *pool); 445 446 #define CREATE_TRACE_POINTS 447 #include <trace/events/workqueue.h> 448 449 #define assert_rcu_or_pool_mutex() \ 450 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 451 !lockdep_is_held(&wq_pool_mutex), \ 452 "RCU or wq_pool_mutex should be held") 453 454 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 455 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 456 !lockdep_is_held(&wq->mutex) && \ 457 !lockdep_is_held(&wq_pool_mutex), \ 458 "RCU, wq->mutex or wq_pool_mutex should be held") 459 460 #define for_each_cpu_worker_pool(pool, cpu) \ 461 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 462 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 463 (pool)++) 464 465 /** 466 * for_each_pool - iterate through all worker_pools in the system 467 * @pool: iteration cursor 468 * @pi: integer used for iteration 469 * 470 * This must be called either with wq_pool_mutex held or RCU read 471 * locked. If the pool needs to be used beyond the locking in effect, the 472 * caller is responsible for guaranteeing that the pool stays online. 473 * 474 * The if/else clause exists only for the lockdep assertion and can be 475 * ignored. 476 */ 477 #define for_each_pool(pool, pi) \ 478 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 479 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 480 else 481 482 /** 483 * for_each_pool_worker - iterate through all workers of a worker_pool 484 * @worker: iteration cursor 485 * @pool: worker_pool to iterate workers of 486 * 487 * This must be called with wq_pool_attach_mutex. 488 * 489 * The if/else clause exists only for the lockdep assertion and can be 490 * ignored. 491 */ 492 #define for_each_pool_worker(worker, pool) \ 493 list_for_each_entry((worker), &(pool)->workers, node) \ 494 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 495 else 496 497 /** 498 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 499 * @pwq: iteration cursor 500 * @wq: the target workqueue 501 * 502 * This must be called either with wq->mutex held or RCU read locked. 503 * If the pwq needs to be used beyond the locking in effect, the caller is 504 * responsible for guaranteeing that the pwq stays online. 505 * 506 * The if/else clause exists only for the lockdep assertion and can be 507 * ignored. 508 */ 509 #define for_each_pwq(pwq, wq) \ 510 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 511 lockdep_is_held(&(wq->mutex))) 512 513 #ifdef CONFIG_DEBUG_OBJECTS_WORK 514 515 static const struct debug_obj_descr work_debug_descr; 516 517 static void *work_debug_hint(void *addr) 518 { 519 return ((struct work_struct *) addr)->func; 520 } 521 522 static bool work_is_static_object(void *addr) 523 { 524 struct work_struct *work = addr; 525 526 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 527 } 528 529 /* 530 * fixup_init is called when: 531 * - an active object is initialized 532 */ 533 static bool work_fixup_init(void *addr, enum debug_obj_state state) 534 { 535 struct work_struct *work = addr; 536 537 switch (state) { 538 case ODEBUG_STATE_ACTIVE: 539 cancel_work_sync(work); 540 debug_object_init(work, &work_debug_descr); 541 return true; 542 default: 543 return false; 544 } 545 } 546 547 /* 548 * fixup_free is called when: 549 * - an active object is freed 550 */ 551 static bool work_fixup_free(void *addr, enum debug_obj_state state) 552 { 553 struct work_struct *work = addr; 554 555 switch (state) { 556 case ODEBUG_STATE_ACTIVE: 557 cancel_work_sync(work); 558 debug_object_free(work, &work_debug_descr); 559 return true; 560 default: 561 return false; 562 } 563 } 564 565 static const struct debug_obj_descr work_debug_descr = { 566 .name = "work_struct", 567 .debug_hint = work_debug_hint, 568 .is_static_object = work_is_static_object, 569 .fixup_init = work_fixup_init, 570 .fixup_free = work_fixup_free, 571 }; 572 573 static inline void debug_work_activate(struct work_struct *work) 574 { 575 debug_object_activate(work, &work_debug_descr); 576 } 577 578 static inline void debug_work_deactivate(struct work_struct *work) 579 { 580 debug_object_deactivate(work, &work_debug_descr); 581 } 582 583 void __init_work(struct work_struct *work, int onstack) 584 { 585 if (onstack) 586 debug_object_init_on_stack(work, &work_debug_descr); 587 else 588 debug_object_init(work, &work_debug_descr); 589 } 590 EXPORT_SYMBOL_GPL(__init_work); 591 592 void destroy_work_on_stack(struct work_struct *work) 593 { 594 debug_object_free(work, &work_debug_descr); 595 } 596 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 597 598 void destroy_delayed_work_on_stack(struct delayed_work *work) 599 { 600 destroy_timer_on_stack(&work->timer); 601 debug_object_free(&work->work, &work_debug_descr); 602 } 603 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 604 605 #else 606 static inline void debug_work_activate(struct work_struct *work) { } 607 static inline void debug_work_deactivate(struct work_struct *work) { } 608 #endif 609 610 /** 611 * worker_pool_assign_id - allocate ID and assign it to @pool 612 * @pool: the pool pointer of interest 613 * 614 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 615 * successfully, -errno on failure. 616 */ 617 static int worker_pool_assign_id(struct worker_pool *pool) 618 { 619 int ret; 620 621 lockdep_assert_held(&wq_pool_mutex); 622 623 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 624 GFP_KERNEL); 625 if (ret >= 0) { 626 pool->id = ret; 627 return 0; 628 } 629 return ret; 630 } 631 632 static struct pool_workqueue __rcu ** 633 unbound_pwq_slot(struct workqueue_struct *wq, int cpu) 634 { 635 if (cpu >= 0) 636 return per_cpu_ptr(wq->cpu_pwq, cpu); 637 else 638 return &wq->dfl_pwq; 639 } 640 641 /* @cpu < 0 for dfl_pwq */ 642 static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) 643 { 644 return rcu_dereference_check(*unbound_pwq_slot(wq, cpu), 645 lockdep_is_held(&wq_pool_mutex) || 646 lockdep_is_held(&wq->mutex)); 647 } 648 649 static unsigned int work_color_to_flags(int color) 650 { 651 return color << WORK_STRUCT_COLOR_SHIFT; 652 } 653 654 static int get_work_color(unsigned long work_data) 655 { 656 return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 657 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 658 } 659 660 static int work_next_color(int color) 661 { 662 return (color + 1) % WORK_NR_COLORS; 663 } 664 665 /* 666 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 667 * contain the pointer to the queued pwq. Once execution starts, the flag 668 * is cleared and the high bits contain OFFQ flags and pool ID. 669 * 670 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 671 * and clear_work_data() can be used to set the pwq, pool or clear 672 * work->data. These functions should only be called while the work is 673 * owned - ie. while the PENDING bit is set. 674 * 675 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 676 * corresponding to a work. Pool is available once the work has been 677 * queued anywhere after initialization until it is sync canceled. pwq is 678 * available only while the work item is queued. 679 * 680 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 681 * canceled. While being canceled, a work item may have its PENDING set 682 * but stay off timer and worklist for arbitrarily long and nobody should 683 * try to steal the PENDING bit. 684 */ 685 static inline void set_work_data(struct work_struct *work, unsigned long data, 686 unsigned long flags) 687 { 688 WARN_ON_ONCE(!work_pending(work)); 689 atomic_long_set(&work->data, data | flags | work_static(work)); 690 } 691 692 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 693 unsigned long extra_flags) 694 { 695 set_work_data(work, (unsigned long)pwq, 696 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 697 } 698 699 static void set_work_pool_and_keep_pending(struct work_struct *work, 700 int pool_id) 701 { 702 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 703 WORK_STRUCT_PENDING); 704 } 705 706 static void set_work_pool_and_clear_pending(struct work_struct *work, 707 int pool_id) 708 { 709 /* 710 * The following wmb is paired with the implied mb in 711 * test_and_set_bit(PENDING) and ensures all updates to @work made 712 * here are visible to and precede any updates by the next PENDING 713 * owner. 714 */ 715 smp_wmb(); 716 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 717 /* 718 * The following mb guarantees that previous clear of a PENDING bit 719 * will not be reordered with any speculative LOADS or STORES from 720 * work->current_func, which is executed afterwards. This possible 721 * reordering can lead to a missed execution on attempt to queue 722 * the same @work. E.g. consider this case: 723 * 724 * CPU#0 CPU#1 725 * ---------------------------- -------------------------------- 726 * 727 * 1 STORE event_indicated 728 * 2 queue_work_on() { 729 * 3 test_and_set_bit(PENDING) 730 * 4 } set_..._and_clear_pending() { 731 * 5 set_work_data() # clear bit 732 * 6 smp_mb() 733 * 7 work->current_func() { 734 * 8 LOAD event_indicated 735 * } 736 * 737 * Without an explicit full barrier speculative LOAD on line 8 can 738 * be executed before CPU#0 does STORE on line 1. If that happens, 739 * CPU#0 observes the PENDING bit is still set and new execution of 740 * a @work is not queued in a hope, that CPU#1 will eventually 741 * finish the queued @work. Meanwhile CPU#1 does not see 742 * event_indicated is set, because speculative LOAD was executed 743 * before actual STORE. 744 */ 745 smp_mb(); 746 } 747 748 static void clear_work_data(struct work_struct *work) 749 { 750 smp_wmb(); /* see set_work_pool_and_clear_pending() */ 751 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 752 } 753 754 static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 755 { 756 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); 757 } 758 759 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 760 { 761 unsigned long data = atomic_long_read(&work->data); 762 763 if (data & WORK_STRUCT_PWQ) 764 return work_struct_pwq(data); 765 else 766 return NULL; 767 } 768 769 /** 770 * get_work_pool - return the worker_pool a given work was associated with 771 * @work: the work item of interest 772 * 773 * Pools are created and destroyed under wq_pool_mutex, and allows read 774 * access under RCU read lock. As such, this function should be 775 * called under wq_pool_mutex or inside of a rcu_read_lock() region. 776 * 777 * All fields of the returned pool are accessible as long as the above 778 * mentioned locking is in effect. If the returned pool needs to be used 779 * beyond the critical section, the caller is responsible for ensuring the 780 * returned pool is and stays online. 781 * 782 * Return: The worker_pool @work was last associated with. %NULL if none. 783 */ 784 static struct worker_pool *get_work_pool(struct work_struct *work) 785 { 786 unsigned long data = atomic_long_read(&work->data); 787 int pool_id; 788 789 assert_rcu_or_pool_mutex(); 790 791 if (data & WORK_STRUCT_PWQ) 792 return work_struct_pwq(data)->pool; 793 794 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 795 if (pool_id == WORK_OFFQ_POOL_NONE) 796 return NULL; 797 798 return idr_find(&worker_pool_idr, pool_id); 799 } 800 801 /** 802 * get_work_pool_id - return the worker pool ID a given work is associated with 803 * @work: the work item of interest 804 * 805 * Return: The worker_pool ID @work was last associated with. 806 * %WORK_OFFQ_POOL_NONE if none. 807 */ 808 static int get_work_pool_id(struct work_struct *work) 809 { 810 unsigned long data = atomic_long_read(&work->data); 811 812 if (data & WORK_STRUCT_PWQ) 813 return work_struct_pwq(data)->pool->id; 814 815 return data >> WORK_OFFQ_POOL_SHIFT; 816 } 817 818 static void mark_work_canceling(struct work_struct *work) 819 { 820 unsigned long pool_id = get_work_pool_id(work); 821 822 pool_id <<= WORK_OFFQ_POOL_SHIFT; 823 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 824 } 825 826 static bool work_is_canceling(struct work_struct *work) 827 { 828 unsigned long data = atomic_long_read(&work->data); 829 830 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 831 } 832 833 /* 834 * Policy functions. These define the policies on how the global worker 835 * pools are managed. Unless noted otherwise, these functions assume that 836 * they're being called with pool->lock held. 837 */ 838 839 /* 840 * Need to wake up a worker? Called from anything but currently 841 * running workers. 842 * 843 * Note that, because unbound workers never contribute to nr_running, this 844 * function will always return %true for unbound pools as long as the 845 * worklist isn't empty. 846 */ 847 static bool need_more_worker(struct worker_pool *pool) 848 { 849 return !list_empty(&pool->worklist) && !pool->nr_running; 850 } 851 852 /* Can I start working? Called from busy but !running workers. */ 853 static bool may_start_working(struct worker_pool *pool) 854 { 855 return pool->nr_idle; 856 } 857 858 /* Do I need to keep working? Called from currently running workers. */ 859 static bool keep_working(struct worker_pool *pool) 860 { 861 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 862 } 863 864 /* Do we need a new worker? Called from manager. */ 865 static bool need_to_create_worker(struct worker_pool *pool) 866 { 867 return need_more_worker(pool) && !may_start_working(pool); 868 } 869 870 /* Do we have too many workers and should some go away? */ 871 static bool too_many_workers(struct worker_pool *pool) 872 { 873 bool managing = pool->flags & POOL_MANAGER_ACTIVE; 874 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 875 int nr_busy = pool->nr_workers - nr_idle; 876 877 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 878 } 879 880 /** 881 * worker_set_flags - set worker flags and adjust nr_running accordingly 882 * @worker: self 883 * @flags: flags to set 884 * 885 * Set @flags in @worker->flags and adjust nr_running accordingly. 886 */ 887 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 888 { 889 struct worker_pool *pool = worker->pool; 890 891 lockdep_assert_held(&pool->lock); 892 893 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 894 if ((flags & WORKER_NOT_RUNNING) && 895 !(worker->flags & WORKER_NOT_RUNNING)) { 896 pool->nr_running--; 897 } 898 899 worker->flags |= flags; 900 } 901 902 /** 903 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 904 * @worker: self 905 * @flags: flags to clear 906 * 907 * Clear @flags in @worker->flags and adjust nr_running accordingly. 908 */ 909 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 910 { 911 struct worker_pool *pool = worker->pool; 912 unsigned int oflags = worker->flags; 913 914 lockdep_assert_held(&pool->lock); 915 916 worker->flags &= ~flags; 917 918 /* 919 * If transitioning out of NOT_RUNNING, increment nr_running. Note 920 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 921 * of multiple flags, not a single flag. 922 */ 923 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 924 if (!(worker->flags & WORKER_NOT_RUNNING)) 925 pool->nr_running++; 926 } 927 928 /* Return the first idle worker. Called with pool->lock held. */ 929 static struct worker *first_idle_worker(struct worker_pool *pool) 930 { 931 if (unlikely(list_empty(&pool->idle_list))) 932 return NULL; 933 934 return list_first_entry(&pool->idle_list, struct worker, entry); 935 } 936 937 /** 938 * worker_enter_idle - enter idle state 939 * @worker: worker which is entering idle state 940 * 941 * @worker is entering idle state. Update stats and idle timer if 942 * necessary. 943 * 944 * LOCKING: 945 * raw_spin_lock_irq(pool->lock). 946 */ 947 static void worker_enter_idle(struct worker *worker) 948 { 949 struct worker_pool *pool = worker->pool; 950 951 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 952 WARN_ON_ONCE(!list_empty(&worker->entry) && 953 (worker->hentry.next || worker->hentry.pprev))) 954 return; 955 956 /* can't use worker_set_flags(), also called from create_worker() */ 957 worker->flags |= WORKER_IDLE; 958 pool->nr_idle++; 959 worker->last_active = jiffies; 960 961 /* idle_list is LIFO */ 962 list_add(&worker->entry, &pool->idle_list); 963 964 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 965 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 966 967 /* Sanity check nr_running. */ 968 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 969 } 970 971 /** 972 * worker_leave_idle - leave idle state 973 * @worker: worker which is leaving idle state 974 * 975 * @worker is leaving idle state. Update stats. 976 * 977 * LOCKING: 978 * raw_spin_lock_irq(pool->lock). 979 */ 980 static void worker_leave_idle(struct worker *worker) 981 { 982 struct worker_pool *pool = worker->pool; 983 984 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 985 return; 986 worker_clr_flags(worker, WORKER_IDLE); 987 pool->nr_idle--; 988 list_del_init(&worker->entry); 989 } 990 991 /** 992 * find_worker_executing_work - find worker which is executing a work 993 * @pool: pool of interest 994 * @work: work to find worker for 995 * 996 * Find a worker which is executing @work on @pool by searching 997 * @pool->busy_hash which is keyed by the address of @work. For a worker 998 * to match, its current execution should match the address of @work and 999 * its work function. This is to avoid unwanted dependency between 1000 * unrelated work executions through a work item being recycled while still 1001 * being executed. 1002 * 1003 * This is a bit tricky. A work item may be freed once its execution 1004 * starts and nothing prevents the freed area from being recycled for 1005 * another work item. If the same work item address ends up being reused 1006 * before the original execution finishes, workqueue will identify the 1007 * recycled work item as currently executing and make it wait until the 1008 * current execution finishes, introducing an unwanted dependency. 1009 * 1010 * This function checks the work item address and work function to avoid 1011 * false positives. Note that this isn't complete as one may construct a 1012 * work function which can introduce dependency onto itself through a 1013 * recycled work item. Well, if somebody wants to shoot oneself in the 1014 * foot that badly, there's only so much we can do, and if such deadlock 1015 * actually occurs, it should be easy to locate the culprit work function. 1016 * 1017 * CONTEXT: 1018 * raw_spin_lock_irq(pool->lock). 1019 * 1020 * Return: 1021 * Pointer to worker which is executing @work if found, %NULL 1022 * otherwise. 1023 */ 1024 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1025 struct work_struct *work) 1026 { 1027 struct worker *worker; 1028 1029 hash_for_each_possible(pool->busy_hash, worker, hentry, 1030 (unsigned long)work) 1031 if (worker->current_work == work && 1032 worker->current_func == work->func) 1033 return worker; 1034 1035 return NULL; 1036 } 1037 1038 /** 1039 * move_linked_works - move linked works to a list 1040 * @work: start of series of works to be scheduled 1041 * @head: target list to append @work to 1042 * @nextp: out parameter for nested worklist walking 1043 * 1044 * Schedule linked works starting from @work to @head. Work series to be 1045 * scheduled starts at @work and includes any consecutive work with 1046 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1047 * @nextp. 1048 * 1049 * CONTEXT: 1050 * raw_spin_lock_irq(pool->lock). 1051 */ 1052 static void move_linked_works(struct work_struct *work, struct list_head *head, 1053 struct work_struct **nextp) 1054 { 1055 struct work_struct *n; 1056 1057 /* 1058 * Linked worklist will always end before the end of the list, 1059 * use NULL for list head. 1060 */ 1061 list_for_each_entry_safe_from(work, n, NULL, entry) { 1062 list_move_tail(&work->entry, head); 1063 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1064 break; 1065 } 1066 1067 /* 1068 * If we're already inside safe list traversal and have moved 1069 * multiple works to the scheduled queue, the next position 1070 * needs to be updated. 1071 */ 1072 if (nextp) 1073 *nextp = n; 1074 } 1075 1076 /** 1077 * assign_work - assign a work item and its linked work items to a worker 1078 * @work: work to assign 1079 * @worker: worker to assign to 1080 * @nextp: out parameter for nested worklist walking 1081 * 1082 * Assign @work and its linked work items to @worker. If @work is already being 1083 * executed by another worker in the same pool, it'll be punted there. 1084 * 1085 * If @nextp is not NULL, it's updated to point to the next work of the last 1086 * scheduled work. This allows assign_work() to be nested inside 1087 * list_for_each_entry_safe(). 1088 * 1089 * Returns %true if @work was successfully assigned to @worker. %false if @work 1090 * was punted to another worker already executing it. 1091 */ 1092 static bool assign_work(struct work_struct *work, struct worker *worker, 1093 struct work_struct **nextp) 1094 { 1095 struct worker_pool *pool = worker->pool; 1096 struct worker *collision; 1097 1098 lockdep_assert_held(&pool->lock); 1099 1100 /* 1101 * A single work shouldn't be executed concurrently by multiple workers. 1102 * __queue_work() ensures that @work doesn't jump to a different pool 1103 * while still running in the previous pool. Here, we should ensure that 1104 * @work is not executed concurrently by multiple workers from the same 1105 * pool. Check whether anyone is already processing the work. If so, 1106 * defer the work to the currently executing one. 1107 */ 1108 collision = find_worker_executing_work(pool, work); 1109 if (unlikely(collision)) { 1110 move_linked_works(work, &collision->scheduled, nextp); 1111 return false; 1112 } 1113 1114 move_linked_works(work, &worker->scheduled, nextp); 1115 return true; 1116 } 1117 1118 /** 1119 * kick_pool - wake up an idle worker if necessary 1120 * @pool: pool to kick 1121 * 1122 * @pool may have pending work items. Wake up worker if necessary. Returns 1123 * whether a worker was woken up. 1124 */ 1125 static bool kick_pool(struct worker_pool *pool) 1126 { 1127 struct worker *worker = first_idle_worker(pool); 1128 struct task_struct *p; 1129 1130 lockdep_assert_held(&pool->lock); 1131 1132 if (!need_more_worker(pool) || !worker) 1133 return false; 1134 1135 p = worker->task; 1136 1137 #ifdef CONFIG_SMP 1138 /* 1139 * Idle @worker is about to execute @work and waking up provides an 1140 * opportunity to migrate @worker at a lower cost by setting the task's 1141 * wake_cpu field. Let's see if we want to move @worker to improve 1142 * execution locality. 1143 * 1144 * We're waking the worker that went idle the latest and there's some 1145 * chance that @worker is marked idle but hasn't gone off CPU yet. If 1146 * so, setting the wake_cpu won't do anything. As this is a best-effort 1147 * optimization and the race window is narrow, let's leave as-is for 1148 * now. If this becomes pronounced, we can skip over workers which are 1149 * still on cpu when picking an idle worker. 1150 * 1151 * If @pool has non-strict affinity, @worker might have ended up outside 1152 * its affinity scope. Repatriate. 1153 */ 1154 if (!pool->attrs->affn_strict && 1155 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 1156 struct work_struct *work = list_first_entry(&pool->worklist, 1157 struct work_struct, entry); 1158 p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); 1159 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 1160 } 1161 #endif 1162 wake_up_process(p); 1163 return true; 1164 } 1165 1166 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 1167 1168 /* 1169 * Concurrency-managed per-cpu work items that hog CPU for longer than 1170 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 1171 * which prevents them from stalling other concurrency-managed work items. If a 1172 * work function keeps triggering this mechanism, it's likely that the work item 1173 * should be using an unbound workqueue instead. 1174 * 1175 * wq_cpu_intensive_report() tracks work functions which trigger such conditions 1176 * and report them so that they can be examined and converted to use unbound 1177 * workqueues as appropriate. To avoid flooding the console, each violating work 1178 * function is tracked and reported with exponential backoff. 1179 */ 1180 #define WCI_MAX_ENTS 128 1181 1182 struct wci_ent { 1183 work_func_t func; 1184 atomic64_t cnt; 1185 struct hlist_node hash_node; 1186 }; 1187 1188 static struct wci_ent wci_ents[WCI_MAX_ENTS]; 1189 static int wci_nr_ents; 1190 static DEFINE_RAW_SPINLOCK(wci_lock); 1191 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 1192 1193 static struct wci_ent *wci_find_ent(work_func_t func) 1194 { 1195 struct wci_ent *ent; 1196 1197 hash_for_each_possible_rcu(wci_hash, ent, hash_node, 1198 (unsigned long)func) { 1199 if (ent->func == func) 1200 return ent; 1201 } 1202 return NULL; 1203 } 1204 1205 static void wq_cpu_intensive_report(work_func_t func) 1206 { 1207 struct wci_ent *ent; 1208 1209 restart: 1210 ent = wci_find_ent(func); 1211 if (ent) { 1212 u64 cnt; 1213 1214 /* 1215 * Start reporting from the fourth time and back off 1216 * exponentially. 1217 */ 1218 cnt = atomic64_inc_return_relaxed(&ent->cnt); 1219 if (cnt >= 4 && is_power_of_2(cnt)) 1220 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 1221 ent->func, wq_cpu_intensive_thresh_us, 1222 atomic64_read(&ent->cnt)); 1223 return; 1224 } 1225 1226 /* 1227 * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 1228 * is exhausted, something went really wrong and we probably made enough 1229 * noise already. 1230 */ 1231 if (wci_nr_ents >= WCI_MAX_ENTS) 1232 return; 1233 1234 raw_spin_lock(&wci_lock); 1235 1236 if (wci_nr_ents >= WCI_MAX_ENTS) { 1237 raw_spin_unlock(&wci_lock); 1238 return; 1239 } 1240 1241 if (wci_find_ent(func)) { 1242 raw_spin_unlock(&wci_lock); 1243 goto restart; 1244 } 1245 1246 ent = &wci_ents[wci_nr_ents++]; 1247 ent->func = func; 1248 atomic64_set(&ent->cnt, 1); 1249 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 1250 1251 raw_spin_unlock(&wci_lock); 1252 } 1253 1254 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1255 static void wq_cpu_intensive_report(work_func_t func) {} 1256 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1257 1258 /** 1259 * wq_worker_running - a worker is running again 1260 * @task: task waking up 1261 * 1262 * This function is called when a worker returns from schedule() 1263 */ 1264 void wq_worker_running(struct task_struct *task) 1265 { 1266 struct worker *worker = kthread_data(task); 1267 1268 if (!READ_ONCE(worker->sleeping)) 1269 return; 1270 1271 /* 1272 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 1273 * and the nr_running increment below, we may ruin the nr_running reset 1274 * and leave with an unexpected pool->nr_running == 1 on the newly unbound 1275 * pool. Protect against such race. 1276 */ 1277 preempt_disable(); 1278 if (!(worker->flags & WORKER_NOT_RUNNING)) 1279 worker->pool->nr_running++; 1280 preempt_enable(); 1281 1282 /* 1283 * CPU intensive auto-detection cares about how long a work item hogged 1284 * CPU without sleeping. Reset the starting timestamp on wakeup. 1285 */ 1286 worker->current_at = worker->task->se.sum_exec_runtime; 1287 1288 WRITE_ONCE(worker->sleeping, 0); 1289 } 1290 1291 /** 1292 * wq_worker_sleeping - a worker is going to sleep 1293 * @task: task going to sleep 1294 * 1295 * This function is called from schedule() when a busy worker is 1296 * going to sleep. 1297 */ 1298 void wq_worker_sleeping(struct task_struct *task) 1299 { 1300 struct worker *worker = kthread_data(task); 1301 struct worker_pool *pool; 1302 1303 /* 1304 * Rescuers, which may not have all the fields set up like normal 1305 * workers, also reach here, let's not access anything before 1306 * checking NOT_RUNNING. 1307 */ 1308 if (worker->flags & WORKER_NOT_RUNNING) 1309 return; 1310 1311 pool = worker->pool; 1312 1313 /* Return if preempted before wq_worker_running() was reached */ 1314 if (READ_ONCE(worker->sleeping)) 1315 return; 1316 1317 WRITE_ONCE(worker->sleeping, 1); 1318 raw_spin_lock_irq(&pool->lock); 1319 1320 /* 1321 * Recheck in case unbind_workers() preempted us. We don't 1322 * want to decrement nr_running after the worker is unbound 1323 * and nr_running has been reset. 1324 */ 1325 if (worker->flags & WORKER_NOT_RUNNING) { 1326 raw_spin_unlock_irq(&pool->lock); 1327 return; 1328 } 1329 1330 pool->nr_running--; 1331 if (kick_pool(pool)) 1332 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1333 1334 raw_spin_unlock_irq(&pool->lock); 1335 } 1336 1337 /** 1338 * wq_worker_tick - a scheduler tick occurred while a kworker is running 1339 * @task: task currently running 1340 * 1341 * Called from scheduler_tick(). We're in the IRQ context and the current 1342 * worker's fields which follow the 'K' locking rule can be accessed safely. 1343 */ 1344 void wq_worker_tick(struct task_struct *task) 1345 { 1346 struct worker *worker = kthread_data(task); 1347 struct pool_workqueue *pwq = worker->current_pwq; 1348 struct worker_pool *pool = worker->pool; 1349 1350 if (!pwq) 1351 return; 1352 1353 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 1354 1355 if (!wq_cpu_intensive_thresh_us) 1356 return; 1357 1358 /* 1359 * If the current worker is concurrency managed and hogged the CPU for 1360 * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1361 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1362 * 1363 * Set @worker->sleeping means that @worker is in the process of 1364 * switching out voluntarily and won't be contributing to 1365 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1366 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1367 * double decrements. The task is releasing the CPU anyway. Let's skip. 1368 * We probably want to make this prettier in the future. 1369 */ 1370 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1371 worker->task->se.sum_exec_runtime - worker->current_at < 1372 wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1373 return; 1374 1375 raw_spin_lock(&pool->lock); 1376 1377 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 1378 wq_cpu_intensive_report(worker->current_func); 1379 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1380 1381 if (kick_pool(pool)) 1382 pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1383 1384 raw_spin_unlock(&pool->lock); 1385 } 1386 1387 /** 1388 * wq_worker_last_func - retrieve worker's last work function 1389 * @task: Task to retrieve last work function of. 1390 * 1391 * Determine the last function a worker executed. This is called from 1392 * the scheduler to get a worker's last known identity. 1393 * 1394 * CONTEXT: 1395 * raw_spin_lock_irq(rq->lock) 1396 * 1397 * This function is called during schedule() when a kworker is going 1398 * to sleep. It's used by psi to identify aggregation workers during 1399 * dequeuing, to allow periodic aggregation to shut-off when that 1400 * worker is the last task in the system or cgroup to go to sleep. 1401 * 1402 * As this function doesn't involve any workqueue-related locking, it 1403 * only returns stable values when called from inside the scheduler's 1404 * queuing and dequeuing paths, when @task, which must be a kworker, 1405 * is guaranteed to not be processing any works. 1406 * 1407 * Return: 1408 * The last work function %current executed as a worker, NULL if it 1409 * hasn't executed any work yet. 1410 */ 1411 work_func_t wq_worker_last_func(struct task_struct *task) 1412 { 1413 struct worker *worker = kthread_data(task); 1414 1415 return worker->last_func; 1416 } 1417 1418 /** 1419 * get_pwq - get an extra reference on the specified pool_workqueue 1420 * @pwq: pool_workqueue to get 1421 * 1422 * Obtain an extra reference on @pwq. The caller should guarantee that 1423 * @pwq has positive refcnt and be holding the matching pool->lock. 1424 */ 1425 static void get_pwq(struct pool_workqueue *pwq) 1426 { 1427 lockdep_assert_held(&pwq->pool->lock); 1428 WARN_ON_ONCE(pwq->refcnt <= 0); 1429 pwq->refcnt++; 1430 } 1431 1432 /** 1433 * put_pwq - put a pool_workqueue reference 1434 * @pwq: pool_workqueue to put 1435 * 1436 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1437 * destruction. The caller should be holding the matching pool->lock. 1438 */ 1439 static void put_pwq(struct pool_workqueue *pwq) 1440 { 1441 lockdep_assert_held(&pwq->pool->lock); 1442 if (likely(--pwq->refcnt)) 1443 return; 1444 /* 1445 * @pwq can't be released under pool->lock, bounce to a dedicated 1446 * kthread_worker to avoid A-A deadlocks. 1447 */ 1448 kthread_queue_work(pwq_release_worker, &pwq->release_work); 1449 } 1450 1451 /** 1452 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1453 * @pwq: pool_workqueue to put (can be %NULL) 1454 * 1455 * put_pwq() with locking. This function also allows %NULL @pwq. 1456 */ 1457 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1458 { 1459 if (pwq) { 1460 /* 1461 * As both pwqs and pools are RCU protected, the 1462 * following lock operations are safe. 1463 */ 1464 raw_spin_lock_irq(&pwq->pool->lock); 1465 put_pwq(pwq); 1466 raw_spin_unlock_irq(&pwq->pool->lock); 1467 } 1468 } 1469 1470 static bool pwq_is_empty(struct pool_workqueue *pwq) 1471 { 1472 return !pwq->nr_active && list_empty(&pwq->inactive_works); 1473 } 1474 1475 static void __pwq_activate_work(struct pool_workqueue *pwq, 1476 struct work_struct *work) 1477 { 1478 unsigned long *wdb = work_data_bits(work); 1479 1480 WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); 1481 trace_workqueue_activate_work(work); 1482 if (list_empty(&pwq->pool->worklist)) 1483 pwq->pool->watchdog_ts = jiffies; 1484 move_linked_works(work, &pwq->pool->worklist, NULL); 1485 __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); 1486 } 1487 1488 /** 1489 * pwq_activate_work - Activate a work item if inactive 1490 * @pwq: pool_workqueue @work belongs to 1491 * @work: work item to activate 1492 * 1493 * Returns %true if activated. %false if already active. 1494 */ 1495 static bool pwq_activate_work(struct pool_workqueue *pwq, 1496 struct work_struct *work) 1497 { 1498 struct worker_pool *pool = pwq->pool; 1499 1500 lockdep_assert_held(&pool->lock); 1501 1502 if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE)) 1503 return false; 1504 1505 pwq->nr_active++; 1506 __pwq_activate_work(pwq, work); 1507 return true; 1508 } 1509 1510 /** 1511 * pwq_tryinc_nr_active - Try to increment nr_active for a pwq 1512 * @pwq: pool_workqueue of interest 1513 * 1514 * Try to increment nr_active for @pwq. Returns %true if an nr_active count is 1515 * successfully obtained. %false otherwise. 1516 */ 1517 static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) 1518 { 1519 struct workqueue_struct *wq = pwq->wq; 1520 struct worker_pool *pool = pwq->pool; 1521 bool obtained; 1522 1523 lockdep_assert_held(&pool->lock); 1524 1525 obtained = pwq->nr_active < READ_ONCE(wq->max_active); 1526 1527 if (obtained) 1528 pwq->nr_active++; 1529 return obtained; 1530 } 1531 1532 /** 1533 * pwq_activate_first_inactive - Activate the first inactive work item on a pwq 1534 * @pwq: pool_workqueue of interest 1535 * 1536 * Activate the first inactive work item of @pwq if available and allowed by 1537 * max_active limit. 1538 * 1539 * Returns %true if an inactive work item has been activated. %false if no 1540 * inactive work item is found or max_active limit is reached. 1541 */ 1542 static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) 1543 { 1544 struct work_struct *work = 1545 list_first_entry_or_null(&pwq->inactive_works, 1546 struct work_struct, entry); 1547 1548 if (work && pwq_tryinc_nr_active(pwq)) { 1549 __pwq_activate_work(pwq, work); 1550 return true; 1551 } else { 1552 return false; 1553 } 1554 } 1555 1556 /** 1557 * pwq_dec_nr_active - Retire an active count 1558 * @pwq: pool_workqueue of interest 1559 * 1560 * Decrement @pwq's nr_active and try to activate the first inactive work item. 1561 */ 1562 static void pwq_dec_nr_active(struct pool_workqueue *pwq) 1563 { 1564 struct worker_pool *pool = pwq->pool; 1565 1566 lockdep_assert_held(&pool->lock); 1567 1568 pwq->nr_active--; 1569 pwq_activate_first_inactive(pwq); 1570 } 1571 1572 /** 1573 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1574 * @pwq: pwq of interest 1575 * @work_data: work_data of work which left the queue 1576 * 1577 * A work either has completed or is removed from pending queue, 1578 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1579 * 1580 * CONTEXT: 1581 * raw_spin_lock_irq(pool->lock). 1582 */ 1583 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 1584 { 1585 int color = get_work_color(work_data); 1586 1587 if (!(work_data & WORK_STRUCT_INACTIVE)) 1588 pwq_dec_nr_active(pwq); 1589 1590 pwq->nr_in_flight[color]--; 1591 1592 /* is flush in progress and are we at the flushing tip? */ 1593 if (likely(pwq->flush_color != color)) 1594 goto out_put; 1595 1596 /* are there still in-flight works? */ 1597 if (pwq->nr_in_flight[color]) 1598 goto out_put; 1599 1600 /* this pwq is done, clear flush_color */ 1601 pwq->flush_color = -1; 1602 1603 /* 1604 * If this was the last pwq, wake up the first flusher. It 1605 * will handle the rest. 1606 */ 1607 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1608 complete(&pwq->wq->first_flusher->done); 1609 out_put: 1610 put_pwq(pwq); 1611 } 1612 1613 /** 1614 * try_to_grab_pending - steal work item from worklist and disable irq 1615 * @work: work item to steal 1616 * @is_dwork: @work is a delayed_work 1617 * @flags: place to store irq state 1618 * 1619 * Try to grab PENDING bit of @work. This function can handle @work in any 1620 * stable state - idle, on timer or on worklist. 1621 * 1622 * Return: 1623 * 1624 * ======== ================================================================ 1625 * 1 if @work was pending and we successfully stole PENDING 1626 * 0 if @work was idle and we claimed PENDING 1627 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1628 * -ENOENT if someone else is canceling @work, this state may persist 1629 * for arbitrarily long 1630 * ======== ================================================================ 1631 * 1632 * Note: 1633 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1634 * interrupted while holding PENDING and @work off queue, irq must be 1635 * disabled on entry. This, combined with delayed_work->timer being 1636 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1637 * 1638 * On successful return, >= 0, irq is disabled and the caller is 1639 * responsible for releasing it using local_irq_restore(*@flags). 1640 * 1641 * This function is safe to call from any context including IRQ handler. 1642 */ 1643 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1644 unsigned long *flags) 1645 { 1646 struct worker_pool *pool; 1647 struct pool_workqueue *pwq; 1648 1649 local_irq_save(*flags); 1650 1651 /* try to steal the timer if it exists */ 1652 if (is_dwork) { 1653 struct delayed_work *dwork = to_delayed_work(work); 1654 1655 /* 1656 * dwork->timer is irqsafe. If del_timer() fails, it's 1657 * guaranteed that the timer is not queued anywhere and not 1658 * running on the local CPU. 1659 */ 1660 if (likely(del_timer(&dwork->timer))) 1661 return 1; 1662 } 1663 1664 /* try to claim PENDING the normal way */ 1665 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1666 return 0; 1667 1668 rcu_read_lock(); 1669 /* 1670 * The queueing is in progress, or it is already queued. Try to 1671 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1672 */ 1673 pool = get_work_pool(work); 1674 if (!pool) 1675 goto fail; 1676 1677 raw_spin_lock(&pool->lock); 1678 /* 1679 * work->data is guaranteed to point to pwq only while the work 1680 * item is queued on pwq->wq, and both updating work->data to point 1681 * to pwq on queueing and to pool on dequeueing are done under 1682 * pwq->pool->lock. This in turn guarantees that, if work->data 1683 * points to pwq which is associated with a locked pool, the work 1684 * item is currently queued on that pool. 1685 */ 1686 pwq = get_work_pwq(work); 1687 if (pwq && pwq->pool == pool) { 1688 debug_work_deactivate(work); 1689 1690 /* 1691 * A cancelable inactive work item must be in the 1692 * pwq->inactive_works since a queued barrier can't be 1693 * canceled (see the comments in insert_wq_barrier()). 1694 * 1695 * An inactive work item cannot be grabbed directly because 1696 * it might have linked barrier work items which, if left 1697 * on the inactive_works list, will confuse pwq->nr_active 1698 * management later on and cause stall. Make sure the work 1699 * item is activated before grabbing. 1700 */ 1701 pwq_activate_work(pwq, work); 1702 1703 list_del_init(&work->entry); 1704 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); 1705 1706 /* work->data points to pwq iff queued, point to pool */ 1707 set_work_pool_and_keep_pending(work, pool->id); 1708 1709 raw_spin_unlock(&pool->lock); 1710 rcu_read_unlock(); 1711 return 1; 1712 } 1713 raw_spin_unlock(&pool->lock); 1714 fail: 1715 rcu_read_unlock(); 1716 local_irq_restore(*flags); 1717 if (work_is_canceling(work)) 1718 return -ENOENT; 1719 cpu_relax(); 1720 return -EAGAIN; 1721 } 1722 1723 /** 1724 * insert_work - insert a work into a pool 1725 * @pwq: pwq @work belongs to 1726 * @work: work to insert 1727 * @head: insertion point 1728 * @extra_flags: extra WORK_STRUCT_* flags to set 1729 * 1730 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1731 * work_struct flags. 1732 * 1733 * CONTEXT: 1734 * raw_spin_lock_irq(pool->lock). 1735 */ 1736 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1737 struct list_head *head, unsigned int extra_flags) 1738 { 1739 debug_work_activate(work); 1740 1741 /* record the work call stack in order to print it in KASAN reports */ 1742 kasan_record_aux_stack_noalloc(work); 1743 1744 /* we own @work, set data and link */ 1745 set_work_pwq(work, pwq, extra_flags); 1746 list_add_tail(&work->entry, head); 1747 get_pwq(pwq); 1748 } 1749 1750 /* 1751 * Test whether @work is being queued from another work executing on the 1752 * same workqueue. 1753 */ 1754 static bool is_chained_work(struct workqueue_struct *wq) 1755 { 1756 struct worker *worker; 1757 1758 worker = current_wq_worker(); 1759 /* 1760 * Return %true iff I'm a worker executing a work item on @wq. If 1761 * I'm @worker, it's safe to dereference it without locking. 1762 */ 1763 return worker && worker->current_pwq->wq == wq; 1764 } 1765 1766 /* 1767 * When queueing an unbound work item to a wq, prefer local CPU if allowed 1768 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 1769 * avoid perturbing sensitive tasks. 1770 */ 1771 static int wq_select_unbound_cpu(int cpu) 1772 { 1773 int new_cpu; 1774 1775 if (likely(!wq_debug_force_rr_cpu)) { 1776 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 1777 return cpu; 1778 } else { 1779 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 1780 } 1781 1782 new_cpu = __this_cpu_read(wq_rr_cpu_last); 1783 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 1784 if (unlikely(new_cpu >= nr_cpu_ids)) { 1785 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 1786 if (unlikely(new_cpu >= nr_cpu_ids)) 1787 return cpu; 1788 } 1789 __this_cpu_write(wq_rr_cpu_last, new_cpu); 1790 1791 return new_cpu; 1792 } 1793 1794 static void __queue_work(int cpu, struct workqueue_struct *wq, 1795 struct work_struct *work) 1796 { 1797 struct pool_workqueue *pwq; 1798 struct worker_pool *last_pool, *pool; 1799 unsigned int work_flags; 1800 unsigned int req_cpu = cpu; 1801 1802 /* 1803 * While a work item is PENDING && off queue, a task trying to 1804 * steal the PENDING will busy-loop waiting for it to either get 1805 * queued or lose PENDING. Grabbing PENDING and queueing should 1806 * happen with IRQ disabled. 1807 */ 1808 lockdep_assert_irqs_disabled(); 1809 1810 1811 /* 1812 * For a draining wq, only works from the same workqueue are 1813 * allowed. The __WQ_DESTROYING helps to spot the issue that 1814 * queues a new work item to a wq after destroy_workqueue(wq). 1815 */ 1816 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 1817 WARN_ON_ONCE(!is_chained_work(wq)))) 1818 return; 1819 rcu_read_lock(); 1820 retry: 1821 /* pwq which will be used unless @work is executing elsewhere */ 1822 if (req_cpu == WORK_CPU_UNBOUND) { 1823 if (wq->flags & WQ_UNBOUND) 1824 cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 1825 else 1826 cpu = raw_smp_processor_id(); 1827 } 1828 1829 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 1830 pool = pwq->pool; 1831 1832 /* 1833 * If @work was previously on a different pool, it might still be 1834 * running there, in which case the work needs to be queued on that 1835 * pool to guarantee non-reentrancy. 1836 */ 1837 last_pool = get_work_pool(work); 1838 if (last_pool && last_pool != pool) { 1839 struct worker *worker; 1840 1841 raw_spin_lock(&last_pool->lock); 1842 1843 worker = find_worker_executing_work(last_pool, work); 1844 1845 if (worker && worker->current_pwq->wq == wq) { 1846 pwq = worker->current_pwq; 1847 pool = pwq->pool; 1848 WARN_ON_ONCE(pool != last_pool); 1849 } else { 1850 /* meh... not running there, queue here */ 1851 raw_spin_unlock(&last_pool->lock); 1852 raw_spin_lock(&pool->lock); 1853 } 1854 } else { 1855 raw_spin_lock(&pool->lock); 1856 } 1857 1858 /* 1859 * pwq is determined and locked. For unbound pools, we could have raced 1860 * with pwq release and it could already be dead. If its refcnt is zero, 1861 * repeat pwq selection. Note that unbound pwqs never die without 1862 * another pwq replacing it in cpu_pwq or while work items are executing 1863 * on it, so the retrying is guaranteed to make forward-progress. 1864 */ 1865 if (unlikely(!pwq->refcnt)) { 1866 if (wq->flags & WQ_UNBOUND) { 1867 raw_spin_unlock(&pool->lock); 1868 cpu_relax(); 1869 goto retry; 1870 } 1871 /* oops */ 1872 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 1873 wq->name, cpu); 1874 } 1875 1876 /* pwq determined, queue */ 1877 trace_workqueue_queue_work(req_cpu, pwq, work); 1878 1879 if (WARN_ON(!list_empty(&work->entry))) 1880 goto out; 1881 1882 pwq->nr_in_flight[pwq->work_color]++; 1883 work_flags = work_color_to_flags(pwq->work_color); 1884 1885 /* 1886 * Limit the number of concurrently active work items to max_active. 1887 * @work must also queue behind existing inactive work items to maintain 1888 * ordering when max_active changes. See wq_adjust_max_active(). 1889 */ 1890 if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) { 1891 if (list_empty(&pool->worklist)) 1892 pool->watchdog_ts = jiffies; 1893 1894 trace_workqueue_activate_work(work); 1895 insert_work(pwq, work, &pool->worklist, work_flags); 1896 kick_pool(pool); 1897 } else { 1898 work_flags |= WORK_STRUCT_INACTIVE; 1899 insert_work(pwq, work, &pwq->inactive_works, work_flags); 1900 } 1901 1902 out: 1903 raw_spin_unlock(&pool->lock); 1904 rcu_read_unlock(); 1905 } 1906 1907 /** 1908 * queue_work_on - queue work on specific cpu 1909 * @cpu: CPU number to execute work on 1910 * @wq: workqueue to use 1911 * @work: work to queue 1912 * 1913 * We queue the work to a specific CPU, the caller must ensure it 1914 * can't go away. Callers that fail to ensure that the specified 1915 * CPU cannot go away will execute on a randomly chosen CPU. 1916 * But note well that callers specifying a CPU that never has been 1917 * online will get a splat. 1918 * 1919 * Return: %false if @work was already on a queue, %true otherwise. 1920 */ 1921 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1922 struct work_struct *work) 1923 { 1924 bool ret = false; 1925 unsigned long flags; 1926 1927 local_irq_save(flags); 1928 1929 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1930 __queue_work(cpu, wq, work); 1931 ret = true; 1932 } 1933 1934 local_irq_restore(flags); 1935 return ret; 1936 } 1937 EXPORT_SYMBOL(queue_work_on); 1938 1939 /** 1940 * select_numa_node_cpu - Select a CPU based on NUMA node 1941 * @node: NUMA node ID that we want to select a CPU from 1942 * 1943 * This function will attempt to find a "random" cpu available on a given 1944 * node. If there are no CPUs available on the given node it will return 1945 * WORK_CPU_UNBOUND indicating that we should just schedule to any 1946 * available CPU if we need to schedule this work. 1947 */ 1948 static int select_numa_node_cpu(int node) 1949 { 1950 int cpu; 1951 1952 /* Delay binding to CPU if node is not valid or online */ 1953 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 1954 return WORK_CPU_UNBOUND; 1955 1956 /* Use local node/cpu if we are already there */ 1957 cpu = raw_smp_processor_id(); 1958 if (node == cpu_to_node(cpu)) 1959 return cpu; 1960 1961 /* Use "random" otherwise know as "first" online CPU of node */ 1962 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 1963 1964 /* If CPU is valid return that, otherwise just defer */ 1965 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 1966 } 1967 1968 /** 1969 * queue_work_node - queue work on a "random" cpu for a given NUMA node 1970 * @node: NUMA node that we are targeting the work for 1971 * @wq: workqueue to use 1972 * @work: work to queue 1973 * 1974 * We queue the work to a "random" CPU within a given NUMA node. The basic 1975 * idea here is to provide a way to somehow associate work with a given 1976 * NUMA node. 1977 * 1978 * This function will only make a best effort attempt at getting this onto 1979 * the right NUMA node. If no node is requested or the requested node is 1980 * offline then we just fall back to standard queue_work behavior. 1981 * 1982 * Currently the "random" CPU ends up being the first available CPU in the 1983 * intersection of cpu_online_mask and the cpumask of the node, unless we 1984 * are running on the node. In that case we just use the current CPU. 1985 * 1986 * Return: %false if @work was already on a queue, %true otherwise. 1987 */ 1988 bool queue_work_node(int node, struct workqueue_struct *wq, 1989 struct work_struct *work) 1990 { 1991 unsigned long flags; 1992 bool ret = false; 1993 1994 /* 1995 * This current implementation is specific to unbound workqueues. 1996 * Specifically we only return the first available CPU for a given 1997 * node instead of cycling through individual CPUs within the node. 1998 * 1999 * If this is used with a per-cpu workqueue then the logic in 2000 * workqueue_select_cpu_near would need to be updated to allow for 2001 * some round robin type logic. 2002 */ 2003 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 2004 2005 local_irq_save(flags); 2006 2007 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2008 int cpu = select_numa_node_cpu(node); 2009 2010 __queue_work(cpu, wq, work); 2011 ret = true; 2012 } 2013 2014 local_irq_restore(flags); 2015 return ret; 2016 } 2017 EXPORT_SYMBOL_GPL(queue_work_node); 2018 2019 void delayed_work_timer_fn(struct timer_list *t) 2020 { 2021 struct delayed_work *dwork = from_timer(dwork, t, timer); 2022 2023 /* should have been called from irqsafe timer with irq already off */ 2024 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 2025 } 2026 EXPORT_SYMBOL(delayed_work_timer_fn); 2027 2028 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 2029 struct delayed_work *dwork, unsigned long delay) 2030 { 2031 struct timer_list *timer = &dwork->timer; 2032 struct work_struct *work = &dwork->work; 2033 2034 WARN_ON_ONCE(!wq); 2035 WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 2036 WARN_ON_ONCE(timer_pending(timer)); 2037 WARN_ON_ONCE(!list_empty(&work->entry)); 2038 2039 /* 2040 * If @delay is 0, queue @dwork->work immediately. This is for 2041 * both optimization and correctness. The earliest @timer can 2042 * expire is on the closest next tick and delayed_work users depend 2043 * on that there's no such delay when @delay is 0. 2044 */ 2045 if (!delay) { 2046 __queue_work(cpu, wq, &dwork->work); 2047 return; 2048 } 2049 2050 dwork->wq = wq; 2051 dwork->cpu = cpu; 2052 timer->expires = jiffies + delay; 2053 2054 if (unlikely(cpu != WORK_CPU_UNBOUND)) 2055 add_timer_on(timer, cpu); 2056 else 2057 add_timer(timer); 2058 } 2059 2060 /** 2061 * queue_delayed_work_on - queue work on specific CPU after delay 2062 * @cpu: CPU number to execute work on 2063 * @wq: workqueue to use 2064 * @dwork: work to queue 2065 * @delay: number of jiffies to wait before queueing 2066 * 2067 * Return: %false if @work was already on a queue, %true otherwise. If 2068 * @delay is zero and @dwork is idle, it will be scheduled for immediate 2069 * execution. 2070 */ 2071 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 2072 struct delayed_work *dwork, unsigned long delay) 2073 { 2074 struct work_struct *work = &dwork->work; 2075 bool ret = false; 2076 unsigned long flags; 2077 2078 /* read the comment in __queue_work() */ 2079 local_irq_save(flags); 2080 2081 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2082 __queue_delayed_work(cpu, wq, dwork, delay); 2083 ret = true; 2084 } 2085 2086 local_irq_restore(flags); 2087 return ret; 2088 } 2089 EXPORT_SYMBOL(queue_delayed_work_on); 2090 2091 /** 2092 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 2093 * @cpu: CPU number to execute work on 2094 * @wq: workqueue to use 2095 * @dwork: work to queue 2096 * @delay: number of jiffies to wait before queueing 2097 * 2098 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2099 * modify @dwork's timer so that it expires after @delay. If @delay is 2100 * zero, @work is guaranteed to be scheduled immediately regardless of its 2101 * current state. 2102 * 2103 * Return: %false if @dwork was idle and queued, %true if @dwork was 2104 * pending and its timer was modified. 2105 * 2106 * This function is safe to call from any context including IRQ handler. 2107 * See try_to_grab_pending() for details. 2108 */ 2109 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2110 struct delayed_work *dwork, unsigned long delay) 2111 { 2112 unsigned long flags; 2113 int ret; 2114 2115 do { 2116 ret = try_to_grab_pending(&dwork->work, true, &flags); 2117 } while (unlikely(ret == -EAGAIN)); 2118 2119 if (likely(ret >= 0)) { 2120 __queue_delayed_work(cpu, wq, dwork, delay); 2121 local_irq_restore(flags); 2122 } 2123 2124 /* -ENOENT from try_to_grab_pending() becomes %true */ 2125 return ret; 2126 } 2127 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2128 2129 static void rcu_work_rcufn(struct rcu_head *rcu) 2130 { 2131 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 2132 2133 /* read the comment in __queue_work() */ 2134 local_irq_disable(); 2135 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 2136 local_irq_enable(); 2137 } 2138 2139 /** 2140 * queue_rcu_work - queue work after a RCU grace period 2141 * @wq: workqueue to use 2142 * @rwork: work to queue 2143 * 2144 * Return: %false if @rwork was already pending, %true otherwise. Note 2145 * that a full RCU grace period is guaranteed only after a %true return. 2146 * While @rwork is guaranteed to be executed after a %false return, the 2147 * execution may happen before a full RCU grace period has passed. 2148 */ 2149 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 2150 { 2151 struct work_struct *work = &rwork->work; 2152 2153 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2154 rwork->wq = wq; 2155 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 2156 return true; 2157 } 2158 2159 return false; 2160 } 2161 EXPORT_SYMBOL(queue_rcu_work); 2162 2163 static struct worker *alloc_worker(int node) 2164 { 2165 struct worker *worker; 2166 2167 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2168 if (worker) { 2169 INIT_LIST_HEAD(&worker->entry); 2170 INIT_LIST_HEAD(&worker->scheduled); 2171 INIT_LIST_HEAD(&worker->node); 2172 /* on creation a worker is in !idle && prep state */ 2173 worker->flags = WORKER_PREP; 2174 } 2175 return worker; 2176 } 2177 2178 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 2179 { 2180 if (pool->cpu < 0 && pool->attrs->affn_strict) 2181 return pool->attrs->__pod_cpumask; 2182 else 2183 return pool->attrs->cpumask; 2184 } 2185 2186 /** 2187 * worker_attach_to_pool() - attach a worker to a pool 2188 * @worker: worker to be attached 2189 * @pool: the target pool 2190 * 2191 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 2192 * cpu-binding of @worker are kept coordinated with the pool across 2193 * cpu-[un]hotplugs. 2194 */ 2195 static void worker_attach_to_pool(struct worker *worker, 2196 struct worker_pool *pool) 2197 { 2198 mutex_lock(&wq_pool_attach_mutex); 2199 2200 /* 2201 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains 2202 * stable across this function. See the comments above the flag 2203 * definition for details. 2204 */ 2205 if (pool->flags & POOL_DISASSOCIATED) 2206 worker->flags |= WORKER_UNBOUND; 2207 else 2208 kthread_set_per_cpu(worker->task, pool->cpu); 2209 2210 if (worker->rescue_wq) 2211 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2212 2213 list_add_tail(&worker->node, &pool->workers); 2214 worker->pool = pool; 2215 2216 mutex_unlock(&wq_pool_attach_mutex); 2217 } 2218 2219 /** 2220 * worker_detach_from_pool() - detach a worker from its pool 2221 * @worker: worker which is attached to its pool 2222 * 2223 * Undo the attaching which had been done in worker_attach_to_pool(). The 2224 * caller worker shouldn't access to the pool after detached except it has 2225 * other reference to the pool. 2226 */ 2227 static void worker_detach_from_pool(struct worker *worker) 2228 { 2229 struct worker_pool *pool = worker->pool; 2230 struct completion *detach_completion = NULL; 2231 2232 mutex_lock(&wq_pool_attach_mutex); 2233 2234 kthread_set_per_cpu(worker->task, -1); 2235 list_del(&worker->node); 2236 worker->pool = NULL; 2237 2238 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 2239 detach_completion = pool->detach_completion; 2240 mutex_unlock(&wq_pool_attach_mutex); 2241 2242 /* clear leftover flags without pool->lock after it is detached */ 2243 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2244 2245 if (detach_completion) 2246 complete(detach_completion); 2247 } 2248 2249 /** 2250 * create_worker - create a new workqueue worker 2251 * @pool: pool the new worker will belong to 2252 * 2253 * Create and start a new worker which is attached to @pool. 2254 * 2255 * CONTEXT: 2256 * Might sleep. Does GFP_KERNEL allocations. 2257 * 2258 * Return: 2259 * Pointer to the newly created worker. 2260 */ 2261 static struct worker *create_worker(struct worker_pool *pool) 2262 { 2263 struct worker *worker; 2264 int id; 2265 char id_buf[23]; 2266 2267 /* ID is needed to determine kthread name */ 2268 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 2269 if (id < 0) { 2270 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 2271 ERR_PTR(id)); 2272 return NULL; 2273 } 2274 2275 worker = alloc_worker(pool->node); 2276 if (!worker) { 2277 pr_err_once("workqueue: Failed to allocate a worker\n"); 2278 goto fail; 2279 } 2280 2281 worker->id = id; 2282 2283 if (pool->cpu >= 0) 2284 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2285 pool->attrs->nice < 0 ? "H" : ""); 2286 else 2287 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2288 2289 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2290 "kworker/%s", id_buf); 2291 if (IS_ERR(worker->task)) { 2292 if (PTR_ERR(worker->task) == -EINTR) { 2293 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 2294 id_buf); 2295 } else { 2296 pr_err_once("workqueue: Failed to create a worker thread: %pe", 2297 worker->task); 2298 } 2299 goto fail; 2300 } 2301 2302 set_user_nice(worker->task, pool->attrs->nice); 2303 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 2304 2305 /* successful, attach the worker to the pool */ 2306 worker_attach_to_pool(worker, pool); 2307 2308 /* start the newly created worker */ 2309 raw_spin_lock_irq(&pool->lock); 2310 2311 worker->pool->nr_workers++; 2312 worker_enter_idle(worker); 2313 kick_pool(pool); 2314 2315 /* 2316 * @worker is waiting on a completion in kthread() and will trigger hung 2317 * check if not woken up soon. As kick_pool() might not have waken it 2318 * up, wake it up explicitly once more. 2319 */ 2320 wake_up_process(worker->task); 2321 2322 raw_spin_unlock_irq(&pool->lock); 2323 2324 return worker; 2325 2326 fail: 2327 ida_free(&pool->worker_ida, id); 2328 kfree(worker); 2329 return NULL; 2330 } 2331 2332 static void unbind_worker(struct worker *worker) 2333 { 2334 lockdep_assert_held(&wq_pool_attach_mutex); 2335 2336 kthread_set_per_cpu(worker->task, -1); 2337 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2338 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2339 else 2340 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2341 } 2342 2343 static void wake_dying_workers(struct list_head *cull_list) 2344 { 2345 struct worker *worker, *tmp; 2346 2347 list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2348 list_del_init(&worker->entry); 2349 unbind_worker(worker); 2350 /* 2351 * If the worker was somehow already running, then it had to be 2352 * in pool->idle_list when set_worker_dying() happened or we 2353 * wouldn't have gotten here. 2354 * 2355 * Thus, the worker must either have observed the WORKER_DIE 2356 * flag, or have set its state to TASK_IDLE. Either way, the 2357 * below will be observed by the worker and is safe to do 2358 * outside of pool->lock. 2359 */ 2360 wake_up_process(worker->task); 2361 } 2362 } 2363 2364 /** 2365 * set_worker_dying - Tag a worker for destruction 2366 * @worker: worker to be destroyed 2367 * @list: transfer worker away from its pool->idle_list and into list 2368 * 2369 * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2370 * should be idle. 2371 * 2372 * CONTEXT: 2373 * raw_spin_lock_irq(pool->lock). 2374 */ 2375 static void set_worker_dying(struct worker *worker, struct list_head *list) 2376 { 2377 struct worker_pool *pool = worker->pool; 2378 2379 lockdep_assert_held(&pool->lock); 2380 lockdep_assert_held(&wq_pool_attach_mutex); 2381 2382 /* sanity check frenzy */ 2383 if (WARN_ON(worker->current_work) || 2384 WARN_ON(!list_empty(&worker->scheduled)) || 2385 WARN_ON(!(worker->flags & WORKER_IDLE))) 2386 return; 2387 2388 pool->nr_workers--; 2389 pool->nr_idle--; 2390 2391 worker->flags |= WORKER_DIE; 2392 2393 list_move(&worker->entry, list); 2394 list_move(&worker->node, &pool->dying_workers); 2395 } 2396 2397 /** 2398 * idle_worker_timeout - check if some idle workers can now be deleted. 2399 * @t: The pool's idle_timer that just expired 2400 * 2401 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 2402 * worker_leave_idle(), as a worker flicking between idle and active while its 2403 * pool is at the too_many_workers() tipping point would cause too much timer 2404 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 2405 * it expire and re-evaluate things from there. 2406 */ 2407 static void idle_worker_timeout(struct timer_list *t) 2408 { 2409 struct worker_pool *pool = from_timer(pool, t, idle_timer); 2410 bool do_cull = false; 2411 2412 if (work_pending(&pool->idle_cull_work)) 2413 return; 2414 2415 raw_spin_lock_irq(&pool->lock); 2416 2417 if (too_many_workers(pool)) { 2418 struct worker *worker; 2419 unsigned long expires; 2420 2421 /* idle_list is kept in LIFO order, check the last one */ 2422 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2423 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2424 do_cull = !time_before(jiffies, expires); 2425 2426 if (!do_cull) 2427 mod_timer(&pool->idle_timer, expires); 2428 } 2429 raw_spin_unlock_irq(&pool->lock); 2430 2431 if (do_cull) 2432 queue_work(system_unbound_wq, &pool->idle_cull_work); 2433 } 2434 2435 /** 2436 * idle_cull_fn - cull workers that have been idle for too long. 2437 * @work: the pool's work for handling these idle workers 2438 * 2439 * This goes through a pool's idle workers and gets rid of those that have been 2440 * idle for at least IDLE_WORKER_TIMEOUT seconds. 2441 * 2442 * We don't want to disturb isolated CPUs because of a pcpu kworker being 2443 * culled, so this also resets worker affinity. This requires a sleepable 2444 * context, hence the split between timer callback and work item. 2445 */ 2446 static void idle_cull_fn(struct work_struct *work) 2447 { 2448 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 2449 LIST_HEAD(cull_list); 2450 2451 /* 2452 * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2453 * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2454 * path. This is required as a previously-preempted worker could run after 2455 * set_worker_dying() has happened but before wake_dying_workers() did. 2456 */ 2457 mutex_lock(&wq_pool_attach_mutex); 2458 raw_spin_lock_irq(&pool->lock); 2459 2460 while (too_many_workers(pool)) { 2461 struct worker *worker; 2462 unsigned long expires; 2463 2464 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2465 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2466 2467 if (time_before(jiffies, expires)) { 2468 mod_timer(&pool->idle_timer, expires); 2469 break; 2470 } 2471 2472 set_worker_dying(worker, &cull_list); 2473 } 2474 2475 raw_spin_unlock_irq(&pool->lock); 2476 wake_dying_workers(&cull_list); 2477 mutex_unlock(&wq_pool_attach_mutex); 2478 } 2479 2480 static void send_mayday(struct work_struct *work) 2481 { 2482 struct pool_workqueue *pwq = get_work_pwq(work); 2483 struct workqueue_struct *wq = pwq->wq; 2484 2485 lockdep_assert_held(&wq_mayday_lock); 2486 2487 if (!wq->rescuer) 2488 return; 2489 2490 /* mayday mayday mayday */ 2491 if (list_empty(&pwq->mayday_node)) { 2492 /* 2493 * If @pwq is for an unbound wq, its base ref may be put at 2494 * any time due to an attribute change. Pin @pwq until the 2495 * rescuer is done with it. 2496 */ 2497 get_pwq(pwq); 2498 list_add_tail(&pwq->mayday_node, &wq->maydays); 2499 wake_up_process(wq->rescuer->task); 2500 pwq->stats[PWQ_STAT_MAYDAY]++; 2501 } 2502 } 2503 2504 static void pool_mayday_timeout(struct timer_list *t) 2505 { 2506 struct worker_pool *pool = from_timer(pool, t, mayday_timer); 2507 struct work_struct *work; 2508 2509 raw_spin_lock_irq(&pool->lock); 2510 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2511 2512 if (need_to_create_worker(pool)) { 2513 /* 2514 * We've been trying to create a new worker but 2515 * haven't been successful. We might be hitting an 2516 * allocation deadlock. Send distress signals to 2517 * rescuers. 2518 */ 2519 list_for_each_entry(work, &pool->worklist, entry) 2520 send_mayday(work); 2521 } 2522 2523 raw_spin_unlock(&wq_mayday_lock); 2524 raw_spin_unlock_irq(&pool->lock); 2525 2526 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 2527 } 2528 2529 /** 2530 * maybe_create_worker - create a new worker if necessary 2531 * @pool: pool to create a new worker for 2532 * 2533 * Create a new worker for @pool if necessary. @pool is guaranteed to 2534 * have at least one idle worker on return from this function. If 2535 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 2536 * sent to all rescuers with works scheduled on @pool to resolve 2537 * possible allocation deadlock. 2538 * 2539 * On return, need_to_create_worker() is guaranteed to be %false and 2540 * may_start_working() %true. 2541 * 2542 * LOCKING: 2543 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2544 * multiple times. Does GFP_KERNEL allocations. Called only from 2545 * manager. 2546 */ 2547 static void maybe_create_worker(struct worker_pool *pool) 2548 __releases(&pool->lock) 2549 __acquires(&pool->lock) 2550 { 2551 restart: 2552 raw_spin_unlock_irq(&pool->lock); 2553 2554 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 2555 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 2556 2557 while (true) { 2558 if (create_worker(pool) || !need_to_create_worker(pool)) 2559 break; 2560 2561 schedule_timeout_interruptible(CREATE_COOLDOWN); 2562 2563 if (!need_to_create_worker(pool)) 2564 break; 2565 } 2566 2567 del_timer_sync(&pool->mayday_timer); 2568 raw_spin_lock_irq(&pool->lock); 2569 /* 2570 * This is necessary even after a new worker was just successfully 2571 * created as @pool->lock was dropped and the new worker might have 2572 * already become busy. 2573 */ 2574 if (need_to_create_worker(pool)) 2575 goto restart; 2576 } 2577 2578 /** 2579 * manage_workers - manage worker pool 2580 * @worker: self 2581 * 2582 * Assume the manager role and manage the worker pool @worker belongs 2583 * to. At any given time, there can be only zero or one manager per 2584 * pool. The exclusion is handled automatically by this function. 2585 * 2586 * The caller can safely start processing works on false return. On 2587 * true return, it's guaranteed that need_to_create_worker() is false 2588 * and may_start_working() is true. 2589 * 2590 * CONTEXT: 2591 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2592 * multiple times. Does GFP_KERNEL allocations. 2593 * 2594 * Return: 2595 * %false if the pool doesn't need management and the caller can safely 2596 * start processing works, %true if management function was performed and 2597 * the conditions that the caller verified before calling the function may 2598 * no longer be true. 2599 */ 2600 static bool manage_workers(struct worker *worker) 2601 { 2602 struct worker_pool *pool = worker->pool; 2603 2604 if (pool->flags & POOL_MANAGER_ACTIVE) 2605 return false; 2606 2607 pool->flags |= POOL_MANAGER_ACTIVE; 2608 pool->manager = worker; 2609 2610 maybe_create_worker(pool); 2611 2612 pool->manager = NULL; 2613 pool->flags &= ~POOL_MANAGER_ACTIVE; 2614 rcuwait_wake_up(&manager_wait); 2615 return true; 2616 } 2617 2618 /** 2619 * process_one_work - process single work 2620 * @worker: self 2621 * @work: work to process 2622 * 2623 * Process @work. This function contains all the logics necessary to 2624 * process a single work including synchronization against and 2625 * interaction with other workers on the same cpu, queueing and 2626 * flushing. As long as context requirement is met, any worker can 2627 * call this function to process a work. 2628 * 2629 * CONTEXT: 2630 * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 2631 */ 2632 static void process_one_work(struct worker *worker, struct work_struct *work) 2633 __releases(&pool->lock) 2634 __acquires(&pool->lock) 2635 { 2636 struct pool_workqueue *pwq = get_work_pwq(work); 2637 struct worker_pool *pool = worker->pool; 2638 unsigned long work_data; 2639 #ifdef CONFIG_LOCKDEP 2640 /* 2641 * It is permissible to free the struct work_struct from 2642 * inside the function that is called from it, this we need to 2643 * take into account for lockdep too. To avoid bogus "held 2644 * lock freed" warnings as well as problems when looking into 2645 * work->lockdep_map, make a copy and use that here. 2646 */ 2647 struct lockdep_map lockdep_map; 2648 2649 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 2650 #endif 2651 /* ensure we're on the correct CPU */ 2652 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 2653 raw_smp_processor_id() != pool->cpu); 2654 2655 /* claim and dequeue */ 2656 debug_work_deactivate(work); 2657 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2658 worker->current_work = work; 2659 worker->current_func = work->func; 2660 worker->current_pwq = pwq; 2661 worker->current_at = worker->task->se.sum_exec_runtime; 2662 work_data = *work_data_bits(work); 2663 worker->current_color = get_work_color(work_data); 2664 2665 /* 2666 * Record wq name for cmdline and debug reporting, may get 2667 * overridden through set_worker_desc(). 2668 */ 2669 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 2670 2671 list_del_init(&work->entry); 2672 2673 /* 2674 * CPU intensive works don't participate in concurrency management. 2675 * They're the scheduler's responsibility. This takes @worker out 2676 * of concurrency management and the next code block will chain 2677 * execution of the pending work items. 2678 */ 2679 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 2680 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2681 2682 /* 2683 * Kick @pool if necessary. It's always noop for per-cpu worker pools 2684 * since nr_running would always be >= 1 at this point. This is used to 2685 * chain execution of the pending work items for WORKER_NOT_RUNNING 2686 * workers such as the UNBOUND and CPU_INTENSIVE ones. 2687 */ 2688 kick_pool(pool); 2689 2690 /* 2691 * Record the last pool and clear PENDING which should be the last 2692 * update to @work. Also, do this inside @pool->lock so that 2693 * PENDING and queued state changes happen together while IRQ is 2694 * disabled. 2695 */ 2696 set_work_pool_and_clear_pending(work, pool->id); 2697 2698 pwq->stats[PWQ_STAT_STARTED]++; 2699 raw_spin_unlock_irq(&pool->lock); 2700 2701 lock_map_acquire(&pwq->wq->lockdep_map); 2702 lock_map_acquire(&lockdep_map); 2703 /* 2704 * Strictly speaking we should mark the invariant state without holding 2705 * any locks, that is, before these two lock_map_acquire()'s. 2706 * 2707 * However, that would result in: 2708 * 2709 * A(W1) 2710 * WFC(C) 2711 * A(W1) 2712 * C(C) 2713 * 2714 * Which would create W1->C->W1 dependencies, even though there is no 2715 * actual deadlock possible. There are two solutions, using a 2716 * read-recursive acquire on the work(queue) 'locks', but this will then 2717 * hit the lockdep limitation on recursive locks, or simply discard 2718 * these locks. 2719 * 2720 * AFAICT there is no possible deadlock scenario between the 2721 * flush_work() and complete() primitives (except for single-threaded 2722 * workqueues), so hiding them isn't a problem. 2723 */ 2724 lockdep_invariant_state(true); 2725 trace_workqueue_execute_start(work); 2726 worker->current_func(work); 2727 /* 2728 * While we must be careful to not use "work" after this, the trace 2729 * point will only record its address. 2730 */ 2731 trace_workqueue_execute_end(work, worker->current_func); 2732 pwq->stats[PWQ_STAT_COMPLETED]++; 2733 lock_map_release(&lockdep_map); 2734 lock_map_release(&pwq->wq->lockdep_map); 2735 2736 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2737 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2738 " last function: %ps\n", 2739 current->comm, preempt_count(), task_pid_nr(current), 2740 worker->current_func); 2741 debug_show_held_locks(current); 2742 dump_stack(); 2743 } 2744 2745 /* 2746 * The following prevents a kworker from hogging CPU on !PREEMPTION 2747 * kernels, where a requeueing work item waiting for something to 2748 * happen could deadlock with stop_machine as such work item could 2749 * indefinitely requeue itself while all other CPUs are trapped in 2750 * stop_machine. At the same time, report a quiescent RCU state so 2751 * the same condition doesn't freeze RCU. 2752 */ 2753 cond_resched(); 2754 2755 raw_spin_lock_irq(&pool->lock); 2756 2757 /* 2758 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 2759 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 2760 * wq_cpu_intensive_thresh_us. Clear it. 2761 */ 2762 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2763 2764 /* tag the worker for identification in schedule() */ 2765 worker->last_func = worker->current_func; 2766 2767 /* we're done with it, release */ 2768 hash_del(&worker->hentry); 2769 worker->current_work = NULL; 2770 worker->current_func = NULL; 2771 worker->current_pwq = NULL; 2772 worker->current_color = INT_MAX; 2773 pwq_dec_nr_in_flight(pwq, work_data); 2774 } 2775 2776 /** 2777 * process_scheduled_works - process scheduled works 2778 * @worker: self 2779 * 2780 * Process all scheduled works. Please note that the scheduled list 2781 * may change while processing a work, so this function repeatedly 2782 * fetches a work from the top and executes it. 2783 * 2784 * CONTEXT: 2785 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2786 * multiple times. 2787 */ 2788 static void process_scheduled_works(struct worker *worker) 2789 { 2790 struct work_struct *work; 2791 bool first = true; 2792 2793 while ((work = list_first_entry_or_null(&worker->scheduled, 2794 struct work_struct, entry))) { 2795 if (first) { 2796 worker->pool->watchdog_ts = jiffies; 2797 first = false; 2798 } 2799 process_one_work(worker, work); 2800 } 2801 } 2802 2803 static void set_pf_worker(bool val) 2804 { 2805 mutex_lock(&wq_pool_attach_mutex); 2806 if (val) 2807 current->flags |= PF_WQ_WORKER; 2808 else 2809 current->flags &= ~PF_WQ_WORKER; 2810 mutex_unlock(&wq_pool_attach_mutex); 2811 } 2812 2813 /** 2814 * worker_thread - the worker thread function 2815 * @__worker: self 2816 * 2817 * The worker thread function. All workers belong to a worker_pool - 2818 * either a per-cpu one or dynamic unbound one. These workers process all 2819 * work items regardless of their specific target workqueue. The only 2820 * exception is work items which belong to workqueues with a rescuer which 2821 * will be explained in rescuer_thread(). 2822 * 2823 * Return: 0 2824 */ 2825 static int worker_thread(void *__worker) 2826 { 2827 struct worker *worker = __worker; 2828 struct worker_pool *pool = worker->pool; 2829 2830 /* tell the scheduler that this is a workqueue worker */ 2831 set_pf_worker(true); 2832 woke_up: 2833 raw_spin_lock_irq(&pool->lock); 2834 2835 /* am I supposed to die? */ 2836 if (unlikely(worker->flags & WORKER_DIE)) { 2837 raw_spin_unlock_irq(&pool->lock); 2838 set_pf_worker(false); 2839 2840 set_task_comm(worker->task, "kworker/dying"); 2841 ida_free(&pool->worker_ida, worker->id); 2842 worker_detach_from_pool(worker); 2843 WARN_ON_ONCE(!list_empty(&worker->entry)); 2844 kfree(worker); 2845 return 0; 2846 } 2847 2848 worker_leave_idle(worker); 2849 recheck: 2850 /* no more worker necessary? */ 2851 if (!need_more_worker(pool)) 2852 goto sleep; 2853 2854 /* do we need to manage? */ 2855 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2856 goto recheck; 2857 2858 /* 2859 * ->scheduled list can only be filled while a worker is 2860 * preparing to process a work or actually processing it. 2861 * Make sure nobody diddled with it while I was sleeping. 2862 */ 2863 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2864 2865 /* 2866 * Finish PREP stage. We're guaranteed to have at least one idle 2867 * worker or that someone else has already assumed the manager 2868 * role. This is where @worker starts participating in concurrency 2869 * management if applicable and concurrency management is restored 2870 * after being rebound. See rebind_workers() for details. 2871 */ 2872 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2873 2874 do { 2875 struct work_struct *work = 2876 list_first_entry(&pool->worklist, 2877 struct work_struct, entry); 2878 2879 if (assign_work(work, worker, NULL)) 2880 process_scheduled_works(worker); 2881 } while (keep_working(pool)); 2882 2883 worker_set_flags(worker, WORKER_PREP); 2884 sleep: 2885 /* 2886 * pool->lock is held and there's no work to process and no need to 2887 * manage, sleep. Workers are woken up only while holding 2888 * pool->lock or from local cpu, so setting the current state 2889 * before releasing pool->lock is enough to prevent losing any 2890 * event. 2891 */ 2892 worker_enter_idle(worker); 2893 __set_current_state(TASK_IDLE); 2894 raw_spin_unlock_irq(&pool->lock); 2895 schedule(); 2896 goto woke_up; 2897 } 2898 2899 /** 2900 * rescuer_thread - the rescuer thread function 2901 * @__rescuer: self 2902 * 2903 * Workqueue rescuer thread function. There's one rescuer for each 2904 * workqueue which has WQ_MEM_RECLAIM set. 2905 * 2906 * Regular work processing on a pool may block trying to create a new 2907 * worker which uses GFP_KERNEL allocation which has slight chance of 2908 * developing into deadlock if some works currently on the same queue 2909 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2910 * the problem rescuer solves. 2911 * 2912 * When such condition is possible, the pool summons rescuers of all 2913 * workqueues which have works queued on the pool and let them process 2914 * those works so that forward progress can be guaranteed. 2915 * 2916 * This should happen rarely. 2917 * 2918 * Return: 0 2919 */ 2920 static int rescuer_thread(void *__rescuer) 2921 { 2922 struct worker *rescuer = __rescuer; 2923 struct workqueue_struct *wq = rescuer->rescue_wq; 2924 bool should_stop; 2925 2926 set_user_nice(current, RESCUER_NICE_LEVEL); 2927 2928 /* 2929 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2930 * doesn't participate in concurrency management. 2931 */ 2932 set_pf_worker(true); 2933 repeat: 2934 set_current_state(TASK_IDLE); 2935 2936 /* 2937 * By the time the rescuer is requested to stop, the workqueue 2938 * shouldn't have any work pending, but @wq->maydays may still have 2939 * pwq(s) queued. This can happen by non-rescuer workers consuming 2940 * all the work items before the rescuer got to them. Go through 2941 * @wq->maydays processing before acting on should_stop so that the 2942 * list is always empty on exit. 2943 */ 2944 should_stop = kthread_should_stop(); 2945 2946 /* see whether any pwq is asking for help */ 2947 raw_spin_lock_irq(&wq_mayday_lock); 2948 2949 while (!list_empty(&wq->maydays)) { 2950 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2951 struct pool_workqueue, mayday_node); 2952 struct worker_pool *pool = pwq->pool; 2953 struct work_struct *work, *n; 2954 2955 __set_current_state(TASK_RUNNING); 2956 list_del_init(&pwq->mayday_node); 2957 2958 raw_spin_unlock_irq(&wq_mayday_lock); 2959 2960 worker_attach_to_pool(rescuer, pool); 2961 2962 raw_spin_lock_irq(&pool->lock); 2963 2964 /* 2965 * Slurp in all works issued via this workqueue and 2966 * process'em. 2967 */ 2968 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2969 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 2970 if (get_work_pwq(work) == pwq && 2971 assign_work(work, rescuer, &n)) 2972 pwq->stats[PWQ_STAT_RESCUED]++; 2973 } 2974 2975 if (!list_empty(&rescuer->scheduled)) { 2976 process_scheduled_works(rescuer); 2977 2978 /* 2979 * The above execution of rescued work items could 2980 * have created more to rescue through 2981 * pwq_activate_first_inactive() or chained 2982 * queueing. Let's put @pwq back on mayday list so 2983 * that such back-to-back work items, which may be 2984 * being used to relieve memory pressure, don't 2985 * incur MAYDAY_INTERVAL delay inbetween. 2986 */ 2987 if (pwq->nr_active && need_to_create_worker(pool)) { 2988 raw_spin_lock(&wq_mayday_lock); 2989 /* 2990 * Queue iff we aren't racing destruction 2991 * and somebody else hasn't queued it already. 2992 */ 2993 if (wq->rescuer && list_empty(&pwq->mayday_node)) { 2994 get_pwq(pwq); 2995 list_add_tail(&pwq->mayday_node, &wq->maydays); 2996 } 2997 raw_spin_unlock(&wq_mayday_lock); 2998 } 2999 } 3000 3001 /* 3002 * Put the reference grabbed by send_mayday(). @pool won't 3003 * go away while we're still attached to it. 3004 */ 3005 put_pwq(pwq); 3006 3007 /* 3008 * Leave this pool. Notify regular workers; otherwise, we end up 3009 * with 0 concurrency and stalling the execution. 3010 */ 3011 kick_pool(pool); 3012 3013 raw_spin_unlock_irq(&pool->lock); 3014 3015 worker_detach_from_pool(rescuer); 3016 3017 raw_spin_lock_irq(&wq_mayday_lock); 3018 } 3019 3020 raw_spin_unlock_irq(&wq_mayday_lock); 3021 3022 if (should_stop) { 3023 __set_current_state(TASK_RUNNING); 3024 set_pf_worker(false); 3025 return 0; 3026 } 3027 3028 /* rescuers should never participate in concurrency management */ 3029 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 3030 schedule(); 3031 goto repeat; 3032 } 3033 3034 /** 3035 * check_flush_dependency - check for flush dependency sanity 3036 * @target_wq: workqueue being flushed 3037 * @target_work: work item being flushed (NULL for workqueue flushes) 3038 * 3039 * %current is trying to flush the whole @target_wq or @target_work on it. 3040 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 3041 * reclaiming memory or running on a workqueue which doesn't have 3042 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 3043 * a deadlock. 3044 */ 3045 static void check_flush_dependency(struct workqueue_struct *target_wq, 3046 struct work_struct *target_work) 3047 { 3048 work_func_t target_func = target_work ? target_work->func : NULL; 3049 struct worker *worker; 3050 3051 if (target_wq->flags & WQ_MEM_RECLAIM) 3052 return; 3053 3054 worker = current_wq_worker(); 3055 3056 WARN_ONCE(current->flags & PF_MEMALLOC, 3057 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 3058 current->pid, current->comm, target_wq->name, target_func); 3059 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 3060 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 3061 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 3062 worker->current_pwq->wq->name, worker->current_func, 3063 target_wq->name, target_func); 3064 } 3065 3066 struct wq_barrier { 3067 struct work_struct work; 3068 struct completion done; 3069 struct task_struct *task; /* purely informational */ 3070 }; 3071 3072 static void wq_barrier_func(struct work_struct *work) 3073 { 3074 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 3075 complete(&barr->done); 3076 } 3077 3078 /** 3079 * insert_wq_barrier - insert a barrier work 3080 * @pwq: pwq to insert barrier into 3081 * @barr: wq_barrier to insert 3082 * @target: target work to attach @barr to 3083 * @worker: worker currently executing @target, NULL if @target is not executing 3084 * 3085 * @barr is linked to @target such that @barr is completed only after 3086 * @target finishes execution. Please note that the ordering 3087 * guarantee is observed only with respect to @target and on the local 3088 * cpu. 3089 * 3090 * Currently, a queued barrier can't be canceled. This is because 3091 * try_to_grab_pending() can't determine whether the work to be 3092 * grabbed is at the head of the queue and thus can't clear LINKED 3093 * flag of the previous work while there must be a valid next work 3094 * after a work with LINKED flag set. 3095 * 3096 * Note that when @worker is non-NULL, @target may be modified 3097 * underneath us, so we can't reliably determine pwq from @target. 3098 * 3099 * CONTEXT: 3100 * raw_spin_lock_irq(pool->lock). 3101 */ 3102 static void insert_wq_barrier(struct pool_workqueue *pwq, 3103 struct wq_barrier *barr, 3104 struct work_struct *target, struct worker *worker) 3105 { 3106 unsigned int work_flags = 0; 3107 unsigned int work_color; 3108 struct list_head *head; 3109 3110 /* 3111 * debugobject calls are safe here even with pool->lock locked 3112 * as we know for sure that this will not trigger any of the 3113 * checks and call back into the fixup functions where we 3114 * might deadlock. 3115 */ 3116 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 3117 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3118 3119 init_completion_map(&barr->done, &target->lockdep_map); 3120 3121 barr->task = current; 3122 3123 /* The barrier work item does not participate in pwq->nr_active. */ 3124 work_flags |= WORK_STRUCT_INACTIVE; 3125 3126 /* 3127 * If @target is currently being executed, schedule the 3128 * barrier to the worker; otherwise, put it after @target. 3129 */ 3130 if (worker) { 3131 head = worker->scheduled.next; 3132 work_color = worker->current_color; 3133 } else { 3134 unsigned long *bits = work_data_bits(target); 3135 3136 head = target->entry.next; 3137 /* there can already be other linked works, inherit and set */ 3138 work_flags |= *bits & WORK_STRUCT_LINKED; 3139 work_color = get_work_color(*bits); 3140 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3141 } 3142 3143 pwq->nr_in_flight[work_color]++; 3144 work_flags |= work_color_to_flags(work_color); 3145 3146 insert_work(pwq, &barr->work, head, work_flags); 3147 } 3148 3149 /** 3150 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3151 * @wq: workqueue being flushed 3152 * @flush_color: new flush color, < 0 for no-op 3153 * @work_color: new work color, < 0 for no-op 3154 * 3155 * Prepare pwqs for workqueue flushing. 3156 * 3157 * If @flush_color is non-negative, flush_color on all pwqs should be 3158 * -1. If no pwq has in-flight commands at the specified color, all 3159 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3160 * has in flight commands, its pwq->flush_color is set to 3161 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3162 * wakeup logic is armed and %true is returned. 3163 * 3164 * The caller should have initialized @wq->first_flusher prior to 3165 * calling this function with non-negative @flush_color. If 3166 * @flush_color is negative, no flush color update is done and %false 3167 * is returned. 3168 * 3169 * If @work_color is non-negative, all pwqs should have the same 3170 * work_color which is previous to @work_color and all will be 3171 * advanced to @work_color. 3172 * 3173 * CONTEXT: 3174 * mutex_lock(wq->mutex). 3175 * 3176 * Return: 3177 * %true if @flush_color >= 0 and there's something to flush. %false 3178 * otherwise. 3179 */ 3180 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3181 int flush_color, int work_color) 3182 { 3183 bool wait = false; 3184 struct pool_workqueue *pwq; 3185 3186 if (flush_color >= 0) { 3187 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3188 atomic_set(&wq->nr_pwqs_to_flush, 1); 3189 } 3190 3191 for_each_pwq(pwq, wq) { 3192 struct worker_pool *pool = pwq->pool; 3193 3194 raw_spin_lock_irq(&pool->lock); 3195 3196 if (flush_color >= 0) { 3197 WARN_ON_ONCE(pwq->flush_color != -1); 3198 3199 if (pwq->nr_in_flight[flush_color]) { 3200 pwq->flush_color = flush_color; 3201 atomic_inc(&wq->nr_pwqs_to_flush); 3202 wait = true; 3203 } 3204 } 3205 3206 if (work_color >= 0) { 3207 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3208 pwq->work_color = work_color; 3209 } 3210 3211 raw_spin_unlock_irq(&pool->lock); 3212 } 3213 3214 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3215 complete(&wq->first_flusher->done); 3216 3217 return wait; 3218 } 3219 3220 /** 3221 * __flush_workqueue - ensure that any scheduled work has run to completion. 3222 * @wq: workqueue to flush 3223 * 3224 * This function sleeps until all work items which were queued on entry 3225 * have finished execution, but it is not livelocked by new incoming ones. 3226 */ 3227 void __flush_workqueue(struct workqueue_struct *wq) 3228 { 3229 struct wq_flusher this_flusher = { 3230 .list = LIST_HEAD_INIT(this_flusher.list), 3231 .flush_color = -1, 3232 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 3233 }; 3234 int next_color; 3235 3236 if (WARN_ON(!wq_online)) 3237 return; 3238 3239 lock_map_acquire(&wq->lockdep_map); 3240 lock_map_release(&wq->lockdep_map); 3241 3242 mutex_lock(&wq->mutex); 3243 3244 /* 3245 * Start-to-wait phase 3246 */ 3247 next_color = work_next_color(wq->work_color); 3248 3249 if (next_color != wq->flush_color) { 3250 /* 3251 * Color space is not full. The current work_color 3252 * becomes our flush_color and work_color is advanced 3253 * by one. 3254 */ 3255 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3256 this_flusher.flush_color = wq->work_color; 3257 wq->work_color = next_color; 3258 3259 if (!wq->first_flusher) { 3260 /* no flush in progress, become the first flusher */ 3261 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3262 3263 wq->first_flusher = &this_flusher; 3264 3265 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3266 wq->work_color)) { 3267 /* nothing to flush, done */ 3268 wq->flush_color = next_color; 3269 wq->first_flusher = NULL; 3270 goto out_unlock; 3271 } 3272 } else { 3273 /* wait in queue */ 3274 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3275 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3276 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3277 } 3278 } else { 3279 /* 3280 * Oops, color space is full, wait on overflow queue. 3281 * The next flush completion will assign us 3282 * flush_color and transfer to flusher_queue. 3283 */ 3284 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 3285 } 3286 3287 check_flush_dependency(wq, NULL); 3288 3289 mutex_unlock(&wq->mutex); 3290 3291 wait_for_completion(&this_flusher.done); 3292 3293 /* 3294 * Wake-up-and-cascade phase 3295 * 3296 * First flushers are responsible for cascading flushes and 3297 * handling overflow. Non-first flushers can simply return. 3298 */ 3299 if (READ_ONCE(wq->first_flusher) != &this_flusher) 3300 return; 3301 3302 mutex_lock(&wq->mutex); 3303 3304 /* we might have raced, check again with mutex held */ 3305 if (wq->first_flusher != &this_flusher) 3306 goto out_unlock; 3307 3308 WRITE_ONCE(wq->first_flusher, NULL); 3309 3310 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 3311 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3312 3313 while (true) { 3314 struct wq_flusher *next, *tmp; 3315 3316 /* complete all the flushers sharing the current flush color */ 3317 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 3318 if (next->flush_color != wq->flush_color) 3319 break; 3320 list_del_init(&next->list); 3321 complete(&next->done); 3322 } 3323 3324 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 3325 wq->flush_color != work_next_color(wq->work_color)); 3326 3327 /* this flush_color is finished, advance by one */ 3328 wq->flush_color = work_next_color(wq->flush_color); 3329 3330 /* one color has been freed, handle overflow queue */ 3331 if (!list_empty(&wq->flusher_overflow)) { 3332 /* 3333 * Assign the same color to all overflowed 3334 * flushers, advance work_color and append to 3335 * flusher_queue. This is the start-to-wait 3336 * phase for these overflowed flushers. 3337 */ 3338 list_for_each_entry(tmp, &wq->flusher_overflow, list) 3339 tmp->flush_color = wq->work_color; 3340 3341 wq->work_color = work_next_color(wq->work_color); 3342 3343 list_splice_tail_init(&wq->flusher_overflow, 3344 &wq->flusher_queue); 3345 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3346 } 3347 3348 if (list_empty(&wq->flusher_queue)) { 3349 WARN_ON_ONCE(wq->flush_color != wq->work_color); 3350 break; 3351 } 3352 3353 /* 3354 * Need to flush more colors. Make the next flusher 3355 * the new first flusher and arm pwqs. 3356 */ 3357 WARN_ON_ONCE(wq->flush_color == wq->work_color); 3358 WARN_ON_ONCE(wq->flush_color != next->flush_color); 3359 3360 list_del_init(&next->list); 3361 wq->first_flusher = next; 3362 3363 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 3364 break; 3365 3366 /* 3367 * Meh... this color is already done, clear first 3368 * flusher and repeat cascading. 3369 */ 3370 wq->first_flusher = NULL; 3371 } 3372 3373 out_unlock: 3374 mutex_unlock(&wq->mutex); 3375 } 3376 EXPORT_SYMBOL(__flush_workqueue); 3377 3378 /** 3379 * drain_workqueue - drain a workqueue 3380 * @wq: workqueue to drain 3381 * 3382 * Wait until the workqueue becomes empty. While draining is in progress, 3383 * only chain queueing is allowed. IOW, only currently pending or running 3384 * work items on @wq can queue further work items on it. @wq is flushed 3385 * repeatedly until it becomes empty. The number of flushing is determined 3386 * by the depth of chaining and should be relatively short. Whine if it 3387 * takes too long. 3388 */ 3389 void drain_workqueue(struct workqueue_struct *wq) 3390 { 3391 unsigned int flush_cnt = 0; 3392 struct pool_workqueue *pwq; 3393 3394 /* 3395 * __queue_work() needs to test whether there are drainers, is much 3396 * hotter than drain_workqueue() and already looks at @wq->flags. 3397 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 3398 */ 3399 mutex_lock(&wq->mutex); 3400 if (!wq->nr_drainers++) 3401 wq->flags |= __WQ_DRAINING; 3402 mutex_unlock(&wq->mutex); 3403 reflush: 3404 __flush_workqueue(wq); 3405 3406 mutex_lock(&wq->mutex); 3407 3408 for_each_pwq(pwq, wq) { 3409 bool drained; 3410 3411 raw_spin_lock_irq(&pwq->pool->lock); 3412 drained = pwq_is_empty(pwq); 3413 raw_spin_unlock_irq(&pwq->pool->lock); 3414 3415 if (drained) 3416 continue; 3417 3418 if (++flush_cnt == 10 || 3419 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3420 pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 3421 wq->name, __func__, flush_cnt); 3422 3423 mutex_unlock(&wq->mutex); 3424 goto reflush; 3425 } 3426 3427 if (!--wq->nr_drainers) 3428 wq->flags &= ~__WQ_DRAINING; 3429 mutex_unlock(&wq->mutex); 3430 } 3431 EXPORT_SYMBOL_GPL(drain_workqueue); 3432 3433 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 3434 bool from_cancel) 3435 { 3436 struct worker *worker = NULL; 3437 struct worker_pool *pool; 3438 struct pool_workqueue *pwq; 3439 3440 might_sleep(); 3441 3442 rcu_read_lock(); 3443 pool = get_work_pool(work); 3444 if (!pool) { 3445 rcu_read_unlock(); 3446 return false; 3447 } 3448 3449 raw_spin_lock_irq(&pool->lock); 3450 /* see the comment in try_to_grab_pending() with the same code */ 3451 pwq = get_work_pwq(work); 3452 if (pwq) { 3453 if (unlikely(pwq->pool != pool)) 3454 goto already_gone; 3455 } else { 3456 worker = find_worker_executing_work(pool, work); 3457 if (!worker) 3458 goto already_gone; 3459 pwq = worker->current_pwq; 3460 } 3461 3462 check_flush_dependency(pwq->wq, work); 3463 3464 insert_wq_barrier(pwq, barr, work, worker); 3465 raw_spin_unlock_irq(&pool->lock); 3466 3467 /* 3468 * Force a lock recursion deadlock when using flush_work() inside a 3469 * single-threaded or rescuer equipped workqueue. 3470 * 3471 * For single threaded workqueues the deadlock happens when the work 3472 * is after the work issuing the flush_work(). For rescuer equipped 3473 * workqueues the deadlock happens when the rescuer stalls, blocking 3474 * forward progress. 3475 */ 3476 if (!from_cancel && 3477 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { 3478 lock_map_acquire(&pwq->wq->lockdep_map); 3479 lock_map_release(&pwq->wq->lockdep_map); 3480 } 3481 rcu_read_unlock(); 3482 return true; 3483 already_gone: 3484 raw_spin_unlock_irq(&pool->lock); 3485 rcu_read_unlock(); 3486 return false; 3487 } 3488 3489 static bool __flush_work(struct work_struct *work, bool from_cancel) 3490 { 3491 struct wq_barrier barr; 3492 3493 if (WARN_ON(!wq_online)) 3494 return false; 3495 3496 if (WARN_ON(!work->func)) 3497 return false; 3498 3499 lock_map_acquire(&work->lockdep_map); 3500 lock_map_release(&work->lockdep_map); 3501 3502 if (start_flush_work(work, &barr, from_cancel)) { 3503 wait_for_completion(&barr.done); 3504 destroy_work_on_stack(&barr.work); 3505 return true; 3506 } else { 3507 return false; 3508 } 3509 } 3510 3511 /** 3512 * flush_work - wait for a work to finish executing the last queueing instance 3513 * @work: the work to flush 3514 * 3515 * Wait until @work has finished execution. @work is guaranteed to be idle 3516 * on return if it hasn't been requeued since flush started. 3517 * 3518 * Return: 3519 * %true if flush_work() waited for the work to finish execution, 3520 * %false if it was already idle. 3521 */ 3522 bool flush_work(struct work_struct *work) 3523 { 3524 return __flush_work(work, false); 3525 } 3526 EXPORT_SYMBOL_GPL(flush_work); 3527 3528 struct cwt_wait { 3529 wait_queue_entry_t wait; 3530 struct work_struct *work; 3531 }; 3532 3533 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 3534 { 3535 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 3536 3537 if (cwait->work != key) 3538 return 0; 3539 return autoremove_wake_function(wait, mode, sync, key); 3540 } 3541 3542 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 3543 { 3544 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 3545 unsigned long flags; 3546 int ret; 3547 3548 do { 3549 ret = try_to_grab_pending(work, is_dwork, &flags); 3550 /* 3551 * If someone else is already canceling, wait for it to 3552 * finish. flush_work() doesn't work for PREEMPT_NONE 3553 * because we may get scheduled between @work's completion 3554 * and the other canceling task resuming and clearing 3555 * CANCELING - flush_work() will return false immediately 3556 * as @work is no longer busy, try_to_grab_pending() will 3557 * return -ENOENT as @work is still being canceled and the 3558 * other canceling task won't be able to clear CANCELING as 3559 * we're hogging the CPU. 3560 * 3561 * Let's wait for completion using a waitqueue. As this 3562 * may lead to the thundering herd problem, use a custom 3563 * wake function which matches @work along with exclusive 3564 * wait and wakeup. 3565 */ 3566 if (unlikely(ret == -ENOENT)) { 3567 struct cwt_wait cwait; 3568 3569 init_wait(&cwait.wait); 3570 cwait.wait.func = cwt_wakefn; 3571 cwait.work = work; 3572 3573 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 3574 TASK_UNINTERRUPTIBLE); 3575 if (work_is_canceling(work)) 3576 schedule(); 3577 finish_wait(&cancel_waitq, &cwait.wait); 3578 } 3579 } while (unlikely(ret < 0)); 3580 3581 /* tell other tasks trying to grab @work to back off */ 3582 mark_work_canceling(work); 3583 local_irq_restore(flags); 3584 3585 /* 3586 * This allows canceling during early boot. We know that @work 3587 * isn't executing. 3588 */ 3589 if (wq_online) 3590 __flush_work(work, true); 3591 3592 clear_work_data(work); 3593 3594 /* 3595 * Paired with prepare_to_wait() above so that either 3596 * waitqueue_active() is visible here or !work_is_canceling() is 3597 * visible there. 3598 */ 3599 smp_mb(); 3600 if (waitqueue_active(&cancel_waitq)) 3601 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 3602 3603 return ret; 3604 } 3605 3606 /** 3607 * cancel_work_sync - cancel a work and wait for it to finish 3608 * @work: the work to cancel 3609 * 3610 * Cancel @work and wait for its execution to finish. This function 3611 * can be used even if the work re-queues itself or migrates to 3612 * another workqueue. On return from this function, @work is 3613 * guaranteed to be not pending or executing on any CPU. 3614 * 3615 * cancel_work_sync(&delayed_work->work) must not be used for 3616 * delayed_work's. Use cancel_delayed_work_sync() instead. 3617 * 3618 * The caller must ensure that the workqueue on which @work was last 3619 * queued can't be destroyed before this function returns. 3620 * 3621 * Return: 3622 * %true if @work was pending, %false otherwise. 3623 */ 3624 bool cancel_work_sync(struct work_struct *work) 3625 { 3626 return __cancel_work_timer(work, false); 3627 } 3628 EXPORT_SYMBOL_GPL(cancel_work_sync); 3629 3630 /** 3631 * flush_delayed_work - wait for a dwork to finish executing the last queueing 3632 * @dwork: the delayed work to flush 3633 * 3634 * Delayed timer is cancelled and the pending work is queued for 3635 * immediate execution. Like flush_work(), this function only 3636 * considers the last queueing instance of @dwork. 3637 * 3638 * Return: 3639 * %true if flush_work() waited for the work to finish execution, 3640 * %false if it was already idle. 3641 */ 3642 bool flush_delayed_work(struct delayed_work *dwork) 3643 { 3644 local_irq_disable(); 3645 if (del_timer_sync(&dwork->timer)) 3646 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 3647 local_irq_enable(); 3648 return flush_work(&dwork->work); 3649 } 3650 EXPORT_SYMBOL(flush_delayed_work); 3651 3652 /** 3653 * flush_rcu_work - wait for a rwork to finish executing the last queueing 3654 * @rwork: the rcu work to flush 3655 * 3656 * Return: 3657 * %true if flush_rcu_work() waited for the work to finish execution, 3658 * %false if it was already idle. 3659 */ 3660 bool flush_rcu_work(struct rcu_work *rwork) 3661 { 3662 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 3663 rcu_barrier(); 3664 flush_work(&rwork->work); 3665 return true; 3666 } else { 3667 return flush_work(&rwork->work); 3668 } 3669 } 3670 EXPORT_SYMBOL(flush_rcu_work); 3671 3672 static bool __cancel_work(struct work_struct *work, bool is_dwork) 3673 { 3674 unsigned long flags; 3675 int ret; 3676 3677 do { 3678 ret = try_to_grab_pending(work, is_dwork, &flags); 3679 } while (unlikely(ret == -EAGAIN)); 3680 3681 if (unlikely(ret < 0)) 3682 return false; 3683 3684 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); 3685 local_irq_restore(flags); 3686 return ret; 3687 } 3688 3689 /* 3690 * See cancel_delayed_work() 3691 */ 3692 bool cancel_work(struct work_struct *work) 3693 { 3694 return __cancel_work(work, false); 3695 } 3696 EXPORT_SYMBOL(cancel_work); 3697 3698 /** 3699 * cancel_delayed_work - cancel a delayed work 3700 * @dwork: delayed_work to cancel 3701 * 3702 * Kill off a pending delayed_work. 3703 * 3704 * Return: %true if @dwork was pending and canceled; %false if it wasn't 3705 * pending. 3706 * 3707 * Note: 3708 * The work callback function may still be running on return, unless 3709 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 3710 * use cancel_delayed_work_sync() to wait on it. 3711 * 3712 * This function is safe to call from any context including IRQ handler. 3713 */ 3714 bool cancel_delayed_work(struct delayed_work *dwork) 3715 { 3716 return __cancel_work(&dwork->work, true); 3717 } 3718 EXPORT_SYMBOL(cancel_delayed_work); 3719 3720 /** 3721 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 3722 * @dwork: the delayed work cancel 3723 * 3724 * This is cancel_work_sync() for delayed works. 3725 * 3726 * Return: 3727 * %true if @dwork was pending, %false otherwise. 3728 */ 3729 bool cancel_delayed_work_sync(struct delayed_work *dwork) 3730 { 3731 return __cancel_work_timer(&dwork->work, true); 3732 } 3733 EXPORT_SYMBOL(cancel_delayed_work_sync); 3734 3735 /** 3736 * schedule_on_each_cpu - execute a function synchronously on each online CPU 3737 * @func: the function to call 3738 * 3739 * schedule_on_each_cpu() executes @func on each online CPU using the 3740 * system workqueue and blocks until all CPUs have completed. 3741 * schedule_on_each_cpu() is very slow. 3742 * 3743 * Return: 3744 * 0 on success, -errno on failure. 3745 */ 3746 int schedule_on_each_cpu(work_func_t func) 3747 { 3748 int cpu; 3749 struct work_struct __percpu *works; 3750 3751 works = alloc_percpu(struct work_struct); 3752 if (!works) 3753 return -ENOMEM; 3754 3755 cpus_read_lock(); 3756 3757 for_each_online_cpu(cpu) { 3758 struct work_struct *work = per_cpu_ptr(works, cpu); 3759 3760 INIT_WORK(work, func); 3761 schedule_work_on(cpu, work); 3762 } 3763 3764 for_each_online_cpu(cpu) 3765 flush_work(per_cpu_ptr(works, cpu)); 3766 3767 cpus_read_unlock(); 3768 free_percpu(works); 3769 return 0; 3770 } 3771 3772 /** 3773 * execute_in_process_context - reliably execute the routine with user context 3774 * @fn: the function to execute 3775 * @ew: guaranteed storage for the execute work structure (must 3776 * be available when the work executes) 3777 * 3778 * Executes the function immediately if process context is available, 3779 * otherwise schedules the function for delayed execution. 3780 * 3781 * Return: 0 - function was executed 3782 * 1 - function was scheduled for execution 3783 */ 3784 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 3785 { 3786 if (!in_interrupt()) { 3787 fn(&ew->work); 3788 return 0; 3789 } 3790 3791 INIT_WORK(&ew->work, fn); 3792 schedule_work(&ew->work); 3793 3794 return 1; 3795 } 3796 EXPORT_SYMBOL_GPL(execute_in_process_context); 3797 3798 /** 3799 * free_workqueue_attrs - free a workqueue_attrs 3800 * @attrs: workqueue_attrs to free 3801 * 3802 * Undo alloc_workqueue_attrs(). 3803 */ 3804 void free_workqueue_attrs(struct workqueue_attrs *attrs) 3805 { 3806 if (attrs) { 3807 free_cpumask_var(attrs->cpumask); 3808 free_cpumask_var(attrs->__pod_cpumask); 3809 kfree(attrs); 3810 } 3811 } 3812 3813 /** 3814 * alloc_workqueue_attrs - allocate a workqueue_attrs 3815 * 3816 * Allocate a new workqueue_attrs, initialize with default settings and 3817 * return it. 3818 * 3819 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3820 */ 3821 struct workqueue_attrs *alloc_workqueue_attrs(void) 3822 { 3823 struct workqueue_attrs *attrs; 3824 3825 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 3826 if (!attrs) 3827 goto fail; 3828 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 3829 goto fail; 3830 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 3831 goto fail; 3832 3833 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3834 attrs->affn_scope = WQ_AFFN_DFL; 3835 return attrs; 3836 fail: 3837 free_workqueue_attrs(attrs); 3838 return NULL; 3839 } 3840 3841 static void copy_workqueue_attrs(struct workqueue_attrs *to, 3842 const struct workqueue_attrs *from) 3843 { 3844 to->nice = from->nice; 3845 cpumask_copy(to->cpumask, from->cpumask); 3846 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 3847 to->affn_strict = from->affn_strict; 3848 3849 /* 3850 * Unlike hash and equality test, copying shouldn't ignore wq-only 3851 * fields as copying is used for both pool and wq attrs. Instead, 3852 * get_unbound_pool() explicitly clears the fields. 3853 */ 3854 to->affn_scope = from->affn_scope; 3855 to->ordered = from->ordered; 3856 } 3857 3858 /* 3859 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 3860 * comments in 'struct workqueue_attrs' definition. 3861 */ 3862 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 3863 { 3864 attrs->affn_scope = WQ_AFFN_NR_TYPES; 3865 attrs->ordered = false; 3866 } 3867 3868 /* hash value of the content of @attr */ 3869 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 3870 { 3871 u32 hash = 0; 3872 3873 hash = jhash_1word(attrs->nice, hash); 3874 hash = jhash(cpumask_bits(attrs->cpumask), 3875 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3876 hash = jhash(cpumask_bits(attrs->__pod_cpumask), 3877 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3878 hash = jhash_1word(attrs->affn_strict, hash); 3879 return hash; 3880 } 3881 3882 /* content equality test */ 3883 static bool wqattrs_equal(const struct workqueue_attrs *a, 3884 const struct workqueue_attrs *b) 3885 { 3886 if (a->nice != b->nice) 3887 return false; 3888 if (!cpumask_equal(a->cpumask, b->cpumask)) 3889 return false; 3890 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 3891 return false; 3892 if (a->affn_strict != b->affn_strict) 3893 return false; 3894 return true; 3895 } 3896 3897 /* Update @attrs with actually available CPUs */ 3898 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 3899 const cpumask_t *unbound_cpumask) 3900 { 3901 /* 3902 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 3903 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 3904 * @unbound_cpumask. 3905 */ 3906 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 3907 if (unlikely(cpumask_empty(attrs->cpumask))) 3908 cpumask_copy(attrs->cpumask, unbound_cpumask); 3909 } 3910 3911 /* find wq_pod_type to use for @attrs */ 3912 static const struct wq_pod_type * 3913 wqattrs_pod_type(const struct workqueue_attrs *attrs) 3914 { 3915 enum wq_affn_scope scope; 3916 struct wq_pod_type *pt; 3917 3918 /* to synchronize access to wq_affn_dfl */ 3919 lockdep_assert_held(&wq_pool_mutex); 3920 3921 if (attrs->affn_scope == WQ_AFFN_DFL) 3922 scope = wq_affn_dfl; 3923 else 3924 scope = attrs->affn_scope; 3925 3926 pt = &wq_pod_types[scope]; 3927 3928 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 3929 likely(pt->nr_pods)) 3930 return pt; 3931 3932 /* 3933 * Before workqueue_init_topology(), only SYSTEM is available which is 3934 * initialized in workqueue_init_early(). 3935 */ 3936 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 3937 BUG_ON(!pt->nr_pods); 3938 return pt; 3939 } 3940 3941 /** 3942 * init_worker_pool - initialize a newly zalloc'd worker_pool 3943 * @pool: worker_pool to initialize 3944 * 3945 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3946 * 3947 * Return: 0 on success, -errno on failure. Even on failure, all fields 3948 * inside @pool proper are initialized and put_unbound_pool() can be called 3949 * on @pool safely to release it. 3950 */ 3951 static int init_worker_pool(struct worker_pool *pool) 3952 { 3953 raw_spin_lock_init(&pool->lock); 3954 pool->id = -1; 3955 pool->cpu = -1; 3956 pool->node = NUMA_NO_NODE; 3957 pool->flags |= POOL_DISASSOCIATED; 3958 pool->watchdog_ts = jiffies; 3959 INIT_LIST_HEAD(&pool->worklist); 3960 INIT_LIST_HEAD(&pool->idle_list); 3961 hash_init(pool->busy_hash); 3962 3963 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 3964 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 3965 3966 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 3967 3968 INIT_LIST_HEAD(&pool->workers); 3969 INIT_LIST_HEAD(&pool->dying_workers); 3970 3971 ida_init(&pool->worker_ida); 3972 INIT_HLIST_NODE(&pool->hash_node); 3973 pool->refcnt = 1; 3974 3975 /* shouldn't fail above this point */ 3976 pool->attrs = alloc_workqueue_attrs(); 3977 if (!pool->attrs) 3978 return -ENOMEM; 3979 3980 wqattrs_clear_for_pool(pool->attrs); 3981 3982 return 0; 3983 } 3984 3985 #ifdef CONFIG_LOCKDEP 3986 static void wq_init_lockdep(struct workqueue_struct *wq) 3987 { 3988 char *lock_name; 3989 3990 lockdep_register_key(&wq->key); 3991 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 3992 if (!lock_name) 3993 lock_name = wq->name; 3994 3995 wq->lock_name = lock_name; 3996 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 3997 } 3998 3999 static void wq_unregister_lockdep(struct workqueue_struct *wq) 4000 { 4001 lockdep_unregister_key(&wq->key); 4002 } 4003 4004 static void wq_free_lockdep(struct workqueue_struct *wq) 4005 { 4006 if (wq->lock_name != wq->name) 4007 kfree(wq->lock_name); 4008 } 4009 #else 4010 static void wq_init_lockdep(struct workqueue_struct *wq) 4011 { 4012 } 4013 4014 static void wq_unregister_lockdep(struct workqueue_struct *wq) 4015 { 4016 } 4017 4018 static void wq_free_lockdep(struct workqueue_struct *wq) 4019 { 4020 } 4021 #endif 4022 4023 static void rcu_free_wq(struct rcu_head *rcu) 4024 { 4025 struct workqueue_struct *wq = 4026 container_of(rcu, struct workqueue_struct, rcu); 4027 4028 wq_free_lockdep(wq); 4029 free_percpu(wq->cpu_pwq); 4030 free_workqueue_attrs(wq->unbound_attrs); 4031 kfree(wq); 4032 } 4033 4034 static void rcu_free_pool(struct rcu_head *rcu) 4035 { 4036 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 4037 4038 ida_destroy(&pool->worker_ida); 4039 free_workqueue_attrs(pool->attrs); 4040 kfree(pool); 4041 } 4042 4043 /** 4044 * put_unbound_pool - put a worker_pool 4045 * @pool: worker_pool to put 4046 * 4047 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 4048 * safe manner. get_unbound_pool() calls this function on its failure path 4049 * and this function should be able to release pools which went through, 4050 * successfully or not, init_worker_pool(). 4051 * 4052 * Should be called with wq_pool_mutex held. 4053 */ 4054 static void put_unbound_pool(struct worker_pool *pool) 4055 { 4056 DECLARE_COMPLETION_ONSTACK(detach_completion); 4057 struct worker *worker; 4058 LIST_HEAD(cull_list); 4059 4060 lockdep_assert_held(&wq_pool_mutex); 4061 4062 if (--pool->refcnt) 4063 return; 4064 4065 /* sanity checks */ 4066 if (WARN_ON(!(pool->cpu < 0)) || 4067 WARN_ON(!list_empty(&pool->worklist))) 4068 return; 4069 4070 /* release id and unhash */ 4071 if (pool->id >= 0) 4072 idr_remove(&worker_pool_idr, pool->id); 4073 hash_del(&pool->hash_node); 4074 4075 /* 4076 * Become the manager and destroy all workers. This prevents 4077 * @pool's workers from blocking on attach_mutex. We're the last 4078 * manager and @pool gets freed with the flag set. 4079 * 4080 * Having a concurrent manager is quite unlikely to happen as we can 4081 * only get here with 4082 * pwq->refcnt == pool->refcnt == 0 4083 * which implies no work queued to the pool, which implies no worker can 4084 * become the manager. However a worker could have taken the role of 4085 * manager before the refcnts dropped to 0, since maybe_create_worker() 4086 * drops pool->lock 4087 */ 4088 while (true) { 4089 rcuwait_wait_event(&manager_wait, 4090 !(pool->flags & POOL_MANAGER_ACTIVE), 4091 TASK_UNINTERRUPTIBLE); 4092 4093 mutex_lock(&wq_pool_attach_mutex); 4094 raw_spin_lock_irq(&pool->lock); 4095 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 4096 pool->flags |= POOL_MANAGER_ACTIVE; 4097 break; 4098 } 4099 raw_spin_unlock_irq(&pool->lock); 4100 mutex_unlock(&wq_pool_attach_mutex); 4101 } 4102 4103 while ((worker = first_idle_worker(pool))) 4104 set_worker_dying(worker, &cull_list); 4105 WARN_ON(pool->nr_workers || pool->nr_idle); 4106 raw_spin_unlock_irq(&pool->lock); 4107 4108 wake_dying_workers(&cull_list); 4109 4110 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 4111 pool->detach_completion = &detach_completion; 4112 mutex_unlock(&wq_pool_attach_mutex); 4113 4114 if (pool->detach_completion) 4115 wait_for_completion(pool->detach_completion); 4116 4117 /* shut down the timers */ 4118 del_timer_sync(&pool->idle_timer); 4119 cancel_work_sync(&pool->idle_cull_work); 4120 del_timer_sync(&pool->mayday_timer); 4121 4122 /* RCU protected to allow dereferences from get_work_pool() */ 4123 call_rcu(&pool->rcu, rcu_free_pool); 4124 } 4125 4126 /** 4127 * get_unbound_pool - get a worker_pool with the specified attributes 4128 * @attrs: the attributes of the worker_pool to get 4129 * 4130 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4131 * reference count and return it. If there already is a matching 4132 * worker_pool, it will be used; otherwise, this function attempts to 4133 * create a new one. 4134 * 4135 * Should be called with wq_pool_mutex held. 4136 * 4137 * Return: On success, a worker_pool with the same attributes as @attrs. 4138 * On failure, %NULL. 4139 */ 4140 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4141 { 4142 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 4143 u32 hash = wqattrs_hash(attrs); 4144 struct worker_pool *pool; 4145 int pod, node = NUMA_NO_NODE; 4146 4147 lockdep_assert_held(&wq_pool_mutex); 4148 4149 /* do we already have a matching pool? */ 4150 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4151 if (wqattrs_equal(pool->attrs, attrs)) { 4152 pool->refcnt++; 4153 return pool; 4154 } 4155 } 4156 4157 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 4158 for (pod = 0; pod < pt->nr_pods; pod++) { 4159 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 4160 node = pt->pod_node[pod]; 4161 break; 4162 } 4163 } 4164 4165 /* nope, create a new one */ 4166 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 4167 if (!pool || init_worker_pool(pool) < 0) 4168 goto fail; 4169 4170 pool->node = node; 4171 copy_workqueue_attrs(pool->attrs, attrs); 4172 wqattrs_clear_for_pool(pool->attrs); 4173 4174 if (worker_pool_assign_id(pool) < 0) 4175 goto fail; 4176 4177 /* create and start the initial worker */ 4178 if (wq_online && !create_worker(pool)) 4179 goto fail; 4180 4181 /* install */ 4182 hash_add(unbound_pool_hash, &pool->hash_node, hash); 4183 4184 return pool; 4185 fail: 4186 if (pool) 4187 put_unbound_pool(pool); 4188 return NULL; 4189 } 4190 4191 static void rcu_free_pwq(struct rcu_head *rcu) 4192 { 4193 kmem_cache_free(pwq_cache, 4194 container_of(rcu, struct pool_workqueue, rcu)); 4195 } 4196 4197 /* 4198 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4199 * refcnt and needs to be destroyed. 4200 */ 4201 static void pwq_release_workfn(struct kthread_work *work) 4202 { 4203 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4204 release_work); 4205 struct workqueue_struct *wq = pwq->wq; 4206 struct worker_pool *pool = pwq->pool; 4207 bool is_last = false; 4208 4209 /* 4210 * When @pwq is not linked, it doesn't hold any reference to the 4211 * @wq, and @wq is invalid to access. 4212 */ 4213 if (!list_empty(&pwq->pwqs_node)) { 4214 mutex_lock(&wq->mutex); 4215 list_del_rcu(&pwq->pwqs_node); 4216 is_last = list_empty(&wq->pwqs); 4217 mutex_unlock(&wq->mutex); 4218 } 4219 4220 if (wq->flags & WQ_UNBOUND) { 4221 mutex_lock(&wq_pool_mutex); 4222 put_unbound_pool(pool); 4223 mutex_unlock(&wq_pool_mutex); 4224 } 4225 4226 call_rcu(&pwq->rcu, rcu_free_pwq); 4227 4228 /* 4229 * If we're the last pwq going away, @wq is already dead and no one 4230 * is gonna access it anymore. Schedule RCU free. 4231 */ 4232 if (is_last) { 4233 wq_unregister_lockdep(wq); 4234 call_rcu(&wq->rcu, rcu_free_wq); 4235 } 4236 } 4237 4238 /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4239 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4240 struct worker_pool *pool) 4241 { 4242 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4243 4244 memset(pwq, 0, sizeof(*pwq)); 4245 4246 pwq->pool = pool; 4247 pwq->wq = wq; 4248 pwq->flush_color = -1; 4249 pwq->refcnt = 1; 4250 INIT_LIST_HEAD(&pwq->inactive_works); 4251 INIT_LIST_HEAD(&pwq->pwqs_node); 4252 INIT_LIST_HEAD(&pwq->mayday_node); 4253 kthread_init_work(&pwq->release_work, pwq_release_workfn); 4254 } 4255 4256 /* sync @pwq with the current state of its associated wq and link it */ 4257 static void link_pwq(struct pool_workqueue *pwq) 4258 { 4259 struct workqueue_struct *wq = pwq->wq; 4260 4261 lockdep_assert_held(&wq->mutex); 4262 4263 /* may be called multiple times, ignore if already linked */ 4264 if (!list_empty(&pwq->pwqs_node)) 4265 return; 4266 4267 /* set the matching work_color */ 4268 pwq->work_color = wq->work_color; 4269 4270 /* link in @pwq */ 4271 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 4272 } 4273 4274 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 4275 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 4276 const struct workqueue_attrs *attrs) 4277 { 4278 struct worker_pool *pool; 4279 struct pool_workqueue *pwq; 4280 4281 lockdep_assert_held(&wq_pool_mutex); 4282 4283 pool = get_unbound_pool(attrs); 4284 if (!pool) 4285 return NULL; 4286 4287 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 4288 if (!pwq) { 4289 put_unbound_pool(pool); 4290 return NULL; 4291 } 4292 4293 init_pwq(pwq, wq, pool); 4294 return pwq; 4295 } 4296 4297 /** 4298 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 4299 * @attrs: the wq_attrs of the default pwq of the target workqueue 4300 * @cpu: the target CPU 4301 * @cpu_going_down: if >= 0, the CPU to consider as offline 4302 * 4303 * Calculate the cpumask a workqueue with @attrs should use on @pod. If 4304 * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 4305 * The result is stored in @attrs->__pod_cpumask. 4306 * 4307 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 4308 * and @pod has online CPUs requested by @attrs, the returned cpumask is the 4309 * intersection of the possible CPUs of @pod and @attrs->cpumask. 4310 * 4311 * The caller is responsible for ensuring that the cpumask of @pod stays stable. 4312 */ 4313 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 4314 int cpu_going_down) 4315 { 4316 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 4317 int pod = pt->cpu_pod[cpu]; 4318 4319 /* does @pod have any online CPUs @attrs wants? */ 4320 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 4321 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 4322 if (cpu_going_down >= 0) 4323 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 4324 4325 if (cpumask_empty(attrs->__pod_cpumask)) { 4326 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 4327 return; 4328 } 4329 4330 /* yeap, return possible CPUs in @pod that @attrs wants */ 4331 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 4332 4333 if (cpumask_empty(attrs->__pod_cpumask)) 4334 pr_warn_once("WARNING: workqueue cpumask: online intersect > " 4335 "possible intersect\n"); 4336 } 4337 4338 /* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */ 4339 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 4340 int cpu, struct pool_workqueue *pwq) 4341 { 4342 struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu); 4343 struct pool_workqueue *old_pwq; 4344 4345 lockdep_assert_held(&wq_pool_mutex); 4346 lockdep_assert_held(&wq->mutex); 4347 4348 /* link_pwq() can handle duplicate calls */ 4349 link_pwq(pwq); 4350 4351 old_pwq = rcu_access_pointer(*slot); 4352 rcu_assign_pointer(*slot, pwq); 4353 return old_pwq; 4354 } 4355 4356 /* context to store the prepared attrs & pwqs before applying */ 4357 struct apply_wqattrs_ctx { 4358 struct workqueue_struct *wq; /* target workqueue */ 4359 struct workqueue_attrs *attrs; /* attrs to apply */ 4360 struct list_head list; /* queued for batching commit */ 4361 struct pool_workqueue *dfl_pwq; 4362 struct pool_workqueue *pwq_tbl[]; 4363 }; 4364 4365 /* free the resources after success or abort */ 4366 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 4367 { 4368 if (ctx) { 4369 int cpu; 4370 4371 for_each_possible_cpu(cpu) 4372 put_pwq_unlocked(ctx->pwq_tbl[cpu]); 4373 put_pwq_unlocked(ctx->dfl_pwq); 4374 4375 free_workqueue_attrs(ctx->attrs); 4376 4377 kfree(ctx); 4378 } 4379 } 4380 4381 /* allocate the attrs and pwqs for later installation */ 4382 static struct apply_wqattrs_ctx * 4383 apply_wqattrs_prepare(struct workqueue_struct *wq, 4384 const struct workqueue_attrs *attrs, 4385 const cpumask_var_t unbound_cpumask) 4386 { 4387 struct apply_wqattrs_ctx *ctx; 4388 struct workqueue_attrs *new_attrs; 4389 int cpu; 4390 4391 lockdep_assert_held(&wq_pool_mutex); 4392 4393 if (WARN_ON(attrs->affn_scope < 0 || 4394 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 4395 return ERR_PTR(-EINVAL); 4396 4397 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 4398 4399 new_attrs = alloc_workqueue_attrs(); 4400 if (!ctx || !new_attrs) 4401 goto out_free; 4402 4403 /* 4404 * If something goes wrong during CPU up/down, we'll fall back to 4405 * the default pwq covering whole @attrs->cpumask. Always create 4406 * it even if we don't use it immediately. 4407 */ 4408 copy_workqueue_attrs(new_attrs, attrs); 4409 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 4410 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4411 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 4412 if (!ctx->dfl_pwq) 4413 goto out_free; 4414 4415 for_each_possible_cpu(cpu) { 4416 if (new_attrs->ordered) { 4417 ctx->dfl_pwq->refcnt++; 4418 ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 4419 } else { 4420 wq_calc_pod_cpumask(new_attrs, cpu, -1); 4421 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 4422 if (!ctx->pwq_tbl[cpu]) 4423 goto out_free; 4424 } 4425 } 4426 4427 /* save the user configured attrs and sanitize it. */ 4428 copy_workqueue_attrs(new_attrs, attrs); 4429 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 4430 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4431 ctx->attrs = new_attrs; 4432 4433 ctx->wq = wq; 4434 return ctx; 4435 4436 out_free: 4437 free_workqueue_attrs(new_attrs); 4438 apply_wqattrs_cleanup(ctx); 4439 return ERR_PTR(-ENOMEM); 4440 } 4441 4442 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 4443 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 4444 { 4445 int cpu; 4446 4447 /* all pwqs have been created successfully, let's install'em */ 4448 mutex_lock(&ctx->wq->mutex); 4449 4450 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 4451 4452 /* save the previous pwqs and install the new ones */ 4453 for_each_possible_cpu(cpu) 4454 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 4455 ctx->pwq_tbl[cpu]); 4456 ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); 4457 4458 mutex_unlock(&ctx->wq->mutex); 4459 } 4460 4461 static void apply_wqattrs_lock(void) 4462 { 4463 /* CPUs should stay stable across pwq creations and installations */ 4464 cpus_read_lock(); 4465 mutex_lock(&wq_pool_mutex); 4466 } 4467 4468 static void apply_wqattrs_unlock(void) 4469 { 4470 mutex_unlock(&wq_pool_mutex); 4471 cpus_read_unlock(); 4472 } 4473 4474 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 4475 const struct workqueue_attrs *attrs) 4476 { 4477 struct apply_wqattrs_ctx *ctx; 4478 4479 /* only unbound workqueues can change attributes */ 4480 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 4481 return -EINVAL; 4482 4483 /* creating multiple pwqs breaks ordering guarantee */ 4484 if (!list_empty(&wq->pwqs)) { 4485 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4486 return -EINVAL; 4487 4488 wq->flags &= ~__WQ_ORDERED; 4489 } 4490 4491 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 4492 if (IS_ERR(ctx)) 4493 return PTR_ERR(ctx); 4494 4495 /* the ctx has been prepared successfully, let's commit it */ 4496 apply_wqattrs_commit(ctx); 4497 apply_wqattrs_cleanup(ctx); 4498 4499 return 0; 4500 } 4501 4502 /** 4503 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 4504 * @wq: the target workqueue 4505 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 4506 * 4507 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 4508 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 4509 * work items are affine to the pod it was issued on. Older pwqs are released as 4510 * in-flight work items finish. Note that a work item which repeatedly requeues 4511 * itself back-to-back will stay on its current pwq. 4512 * 4513 * Performs GFP_KERNEL allocations. 4514 * 4515 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 4516 * 4517 * Return: 0 on success and -errno on failure. 4518 */ 4519 int apply_workqueue_attrs(struct workqueue_struct *wq, 4520 const struct workqueue_attrs *attrs) 4521 { 4522 int ret; 4523 4524 lockdep_assert_cpus_held(); 4525 4526 mutex_lock(&wq_pool_mutex); 4527 ret = apply_workqueue_attrs_locked(wq, attrs); 4528 mutex_unlock(&wq_pool_mutex); 4529 4530 return ret; 4531 } 4532 4533 /** 4534 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 4535 * @wq: the target workqueue 4536 * @cpu: the CPU to update pool association for 4537 * @hotplug_cpu: the CPU coming up or going down 4538 * @online: whether @cpu is coming up or going down 4539 * 4540 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 4541 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 4542 * @wq accordingly. 4543 * 4544 * 4545 * If pod affinity can't be adjusted due to memory allocation failure, it falls 4546 * back to @wq->dfl_pwq which may not be optimal but is always correct. 4547 * 4548 * Note that when the last allowed CPU of a pod goes offline for a workqueue 4549 * with a cpumask spanning multiple pods, the workers which were already 4550 * executing the work items for the workqueue will lose their CPU affinity and 4551 * may execute on any CPU. This is similar to how per-cpu workqueues behave on 4552 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 4553 * responsibility to flush the work item from CPU_DOWN_PREPARE. 4554 */ 4555 static void wq_update_pod(struct workqueue_struct *wq, int cpu, 4556 int hotplug_cpu, bool online) 4557 { 4558 int off_cpu = online ? -1 : hotplug_cpu; 4559 struct pool_workqueue *old_pwq = NULL, *pwq; 4560 struct workqueue_attrs *target_attrs; 4561 4562 lockdep_assert_held(&wq_pool_mutex); 4563 4564 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 4565 return; 4566 4567 /* 4568 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 4569 * Let's use a preallocated one. The following buf is protected by 4570 * CPU hotplug exclusion. 4571 */ 4572 target_attrs = wq_update_pod_attrs_buf; 4573 4574 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 4575 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 4576 4577 /* nothing to do if the target cpumask matches the current pwq */ 4578 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 4579 if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) 4580 return; 4581 4582 /* create a new pwq */ 4583 pwq = alloc_unbound_pwq(wq, target_attrs); 4584 if (!pwq) { 4585 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 4586 wq->name); 4587 goto use_dfl_pwq; 4588 } 4589 4590 /* Install the new pwq. */ 4591 mutex_lock(&wq->mutex); 4592 old_pwq = install_unbound_pwq(wq, cpu, pwq); 4593 goto out_unlock; 4594 4595 use_dfl_pwq: 4596 mutex_lock(&wq->mutex); 4597 pwq = unbound_pwq(wq, -1); 4598 raw_spin_lock_irq(&pwq->pool->lock); 4599 get_pwq(pwq); 4600 raw_spin_unlock_irq(&pwq->pool->lock); 4601 old_pwq = install_unbound_pwq(wq, cpu, pwq); 4602 out_unlock: 4603 mutex_unlock(&wq->mutex); 4604 put_pwq_unlocked(old_pwq); 4605 } 4606 4607 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4608 { 4609 bool highpri = wq->flags & WQ_HIGHPRI; 4610 int cpu, ret; 4611 4612 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 4613 if (!wq->cpu_pwq) 4614 goto enomem; 4615 4616 if (!(wq->flags & WQ_UNBOUND)) { 4617 for_each_possible_cpu(cpu) { 4618 struct pool_workqueue **pwq_p = 4619 per_cpu_ptr(wq->cpu_pwq, cpu); 4620 struct worker_pool *pool = 4621 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); 4622 4623 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 4624 pool->node); 4625 if (!*pwq_p) 4626 goto enomem; 4627 4628 init_pwq(*pwq_p, wq, pool); 4629 4630 mutex_lock(&wq->mutex); 4631 link_pwq(*pwq_p); 4632 mutex_unlock(&wq->mutex); 4633 } 4634 return 0; 4635 } 4636 4637 cpus_read_lock(); 4638 if (wq->flags & __WQ_ORDERED) { 4639 struct pool_workqueue *dfl_pwq; 4640 4641 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4642 /* there should only be single pwq for ordering guarantee */ 4643 dfl_pwq = rcu_access_pointer(wq->dfl_pwq); 4644 WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node || 4645 wq->pwqs.prev != &dfl_pwq->pwqs_node), 4646 "ordering guarantee broken for workqueue %s\n", wq->name); 4647 } else { 4648 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4649 } 4650 cpus_read_unlock(); 4651 4652 /* for unbound pwq, flush the pwq_release_worker ensures that the 4653 * pwq_release_workfn() completes before calling kfree(wq). 4654 */ 4655 if (ret) 4656 kthread_flush_worker(pwq_release_worker); 4657 4658 return ret; 4659 4660 enomem: 4661 if (wq->cpu_pwq) { 4662 for_each_possible_cpu(cpu) { 4663 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4664 4665 if (pwq) 4666 kmem_cache_free(pwq_cache, pwq); 4667 } 4668 free_percpu(wq->cpu_pwq); 4669 wq->cpu_pwq = NULL; 4670 } 4671 return -ENOMEM; 4672 } 4673 4674 static int wq_clamp_max_active(int max_active, unsigned int flags, 4675 const char *name) 4676 { 4677 if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 4678 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 4679 max_active, name, 1, WQ_MAX_ACTIVE); 4680 4681 return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 4682 } 4683 4684 /* 4685 * Workqueues which may be used during memory reclaim should have a rescuer 4686 * to guarantee forward progress. 4687 */ 4688 static int init_rescuer(struct workqueue_struct *wq) 4689 { 4690 struct worker *rescuer; 4691 int ret; 4692 4693 if (!(wq->flags & WQ_MEM_RECLAIM)) 4694 return 0; 4695 4696 rescuer = alloc_worker(NUMA_NO_NODE); 4697 if (!rescuer) { 4698 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 4699 wq->name); 4700 return -ENOMEM; 4701 } 4702 4703 rescuer->rescue_wq = wq; 4704 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name); 4705 if (IS_ERR(rescuer->task)) { 4706 ret = PTR_ERR(rescuer->task); 4707 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 4708 wq->name, ERR_PTR(ret)); 4709 kfree(rescuer); 4710 return ret; 4711 } 4712 4713 wq->rescuer = rescuer; 4714 kthread_bind_mask(rescuer->task, cpu_possible_mask); 4715 wake_up_process(rescuer->task); 4716 4717 return 0; 4718 } 4719 4720 /** 4721 * wq_adjust_max_active - update a wq's max_active to the current setting 4722 * @wq: target workqueue 4723 * 4724 * If @wq isn't freezing, set @wq->max_active to the saved_max_active and 4725 * activate inactive work items accordingly. If @wq is freezing, clear 4726 * @wq->max_active to zero. 4727 */ 4728 static void wq_adjust_max_active(struct workqueue_struct *wq) 4729 { 4730 bool activated; 4731 4732 lockdep_assert_held(&wq->mutex); 4733 4734 if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { 4735 WRITE_ONCE(wq->max_active, 0); 4736 return; 4737 } 4738 4739 if (wq->max_active == wq->saved_max_active) 4740 return; 4741 4742 /* 4743 * Update @wq->max_active and then kick inactive work items if more 4744 * active work items are allowed. This doesn't break work item ordering 4745 * because new work items are always queued behind existing inactive 4746 * work items if there are any. 4747 */ 4748 WRITE_ONCE(wq->max_active, wq->saved_max_active); 4749 4750 /* 4751 * Round-robin through pwq's activating the first inactive work item 4752 * until max_active is filled. 4753 */ 4754 do { 4755 struct pool_workqueue *pwq; 4756 4757 activated = false; 4758 for_each_pwq(pwq, wq) { 4759 unsigned long flags; 4760 4761 /* can be called during early boot w/ irq disabled */ 4762 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4763 if (pwq_activate_first_inactive(pwq)) { 4764 activated = true; 4765 kick_pool(pwq->pool); 4766 } 4767 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 4768 } 4769 } while (activated); 4770 } 4771 4772 __printf(1, 4) 4773 struct workqueue_struct *alloc_workqueue(const char *fmt, 4774 unsigned int flags, 4775 int max_active, ...) 4776 { 4777 va_list args; 4778 struct workqueue_struct *wq; 4779 int len; 4780 4781 /* 4782 * Unbound && max_active == 1 used to imply ordered, which is no longer 4783 * the case on many machines due to per-pod pools. While 4784 * alloc_ordered_workqueue() is the right way to create an ordered 4785 * workqueue, keep the previous behavior to avoid subtle breakages. 4786 */ 4787 if ((flags & WQ_UNBOUND) && max_active == 1) 4788 flags |= __WQ_ORDERED; 4789 4790 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4791 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4792 flags |= WQ_UNBOUND; 4793 4794 /* allocate wq and format name */ 4795 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4796 if (!wq) 4797 return NULL; 4798 4799 if (flags & WQ_UNBOUND) { 4800 wq->unbound_attrs = alloc_workqueue_attrs(); 4801 if (!wq->unbound_attrs) 4802 goto err_free_wq; 4803 } 4804 4805 va_start(args, max_active); 4806 len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4807 va_end(args); 4808 4809 if (len >= WQ_NAME_LEN) 4810 pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); 4811 4812 max_active = max_active ?: WQ_DFL_ACTIVE; 4813 max_active = wq_clamp_max_active(max_active, flags, wq->name); 4814 4815 /* init wq */ 4816 wq->flags = flags; 4817 wq->max_active = max_active; 4818 wq->saved_max_active = max_active; 4819 mutex_init(&wq->mutex); 4820 atomic_set(&wq->nr_pwqs_to_flush, 0); 4821 INIT_LIST_HEAD(&wq->pwqs); 4822 INIT_LIST_HEAD(&wq->flusher_queue); 4823 INIT_LIST_HEAD(&wq->flusher_overflow); 4824 INIT_LIST_HEAD(&wq->maydays); 4825 4826 wq_init_lockdep(wq); 4827 INIT_LIST_HEAD(&wq->list); 4828 4829 if (alloc_and_link_pwqs(wq) < 0) 4830 goto err_unreg_lockdep; 4831 4832 if (wq_online && init_rescuer(wq) < 0) 4833 goto err_destroy; 4834 4835 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4836 goto err_destroy; 4837 4838 /* 4839 * wq_pool_mutex protects global freeze state and workqueues list. 4840 * Grab it, adjust max_active and add the new @wq to workqueues 4841 * list. 4842 */ 4843 mutex_lock(&wq_pool_mutex); 4844 4845 mutex_lock(&wq->mutex); 4846 wq_adjust_max_active(wq); 4847 mutex_unlock(&wq->mutex); 4848 4849 list_add_tail_rcu(&wq->list, &workqueues); 4850 4851 mutex_unlock(&wq_pool_mutex); 4852 4853 return wq; 4854 4855 err_unreg_lockdep: 4856 wq_unregister_lockdep(wq); 4857 wq_free_lockdep(wq); 4858 err_free_wq: 4859 free_workqueue_attrs(wq->unbound_attrs); 4860 kfree(wq); 4861 return NULL; 4862 err_destroy: 4863 destroy_workqueue(wq); 4864 return NULL; 4865 } 4866 EXPORT_SYMBOL_GPL(alloc_workqueue); 4867 4868 static bool pwq_busy(struct pool_workqueue *pwq) 4869 { 4870 int i; 4871 4872 for (i = 0; i < WORK_NR_COLORS; i++) 4873 if (pwq->nr_in_flight[i]) 4874 return true; 4875 4876 if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1)) 4877 return true; 4878 if (!pwq_is_empty(pwq)) 4879 return true; 4880 4881 return false; 4882 } 4883 4884 /** 4885 * destroy_workqueue - safely terminate a workqueue 4886 * @wq: target workqueue 4887 * 4888 * Safely destroy a workqueue. All work currently pending will be done first. 4889 */ 4890 void destroy_workqueue(struct workqueue_struct *wq) 4891 { 4892 struct pool_workqueue *pwq; 4893 int cpu; 4894 4895 /* 4896 * Remove it from sysfs first so that sanity check failure doesn't 4897 * lead to sysfs name conflicts. 4898 */ 4899 workqueue_sysfs_unregister(wq); 4900 4901 /* mark the workqueue destruction is in progress */ 4902 mutex_lock(&wq->mutex); 4903 wq->flags |= __WQ_DESTROYING; 4904 mutex_unlock(&wq->mutex); 4905 4906 /* drain it before proceeding with destruction */ 4907 drain_workqueue(wq); 4908 4909 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 4910 if (wq->rescuer) { 4911 struct worker *rescuer = wq->rescuer; 4912 4913 /* this prevents new queueing */ 4914 raw_spin_lock_irq(&wq_mayday_lock); 4915 wq->rescuer = NULL; 4916 raw_spin_unlock_irq(&wq_mayday_lock); 4917 4918 /* rescuer will empty maydays list before exiting */ 4919 kthread_stop(rescuer->task); 4920 kfree(rescuer); 4921 } 4922 4923 /* 4924 * Sanity checks - grab all the locks so that we wait for all 4925 * in-flight operations which may do put_pwq(). 4926 */ 4927 mutex_lock(&wq_pool_mutex); 4928 mutex_lock(&wq->mutex); 4929 for_each_pwq(pwq, wq) { 4930 raw_spin_lock_irq(&pwq->pool->lock); 4931 if (WARN_ON(pwq_busy(pwq))) { 4932 pr_warn("%s: %s has the following busy pwq\n", 4933 __func__, wq->name); 4934 show_pwq(pwq); 4935 raw_spin_unlock_irq(&pwq->pool->lock); 4936 mutex_unlock(&wq->mutex); 4937 mutex_unlock(&wq_pool_mutex); 4938 show_one_workqueue(wq); 4939 return; 4940 } 4941 raw_spin_unlock_irq(&pwq->pool->lock); 4942 } 4943 mutex_unlock(&wq->mutex); 4944 4945 /* 4946 * wq list is used to freeze wq, remove from list after 4947 * flushing is complete in case freeze races us. 4948 */ 4949 list_del_rcu(&wq->list); 4950 mutex_unlock(&wq_pool_mutex); 4951 4952 /* 4953 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 4954 * to put the base refs. @wq will be auto-destroyed from the last 4955 * pwq_put. RCU read lock prevents @wq from going away from under us. 4956 */ 4957 rcu_read_lock(); 4958 4959 for_each_possible_cpu(cpu) { 4960 put_pwq_unlocked(unbound_pwq(wq, cpu)); 4961 RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL); 4962 } 4963 4964 put_pwq_unlocked(unbound_pwq(wq, -1)); 4965 RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL); 4966 4967 rcu_read_unlock(); 4968 } 4969 EXPORT_SYMBOL_GPL(destroy_workqueue); 4970 4971 /** 4972 * workqueue_set_max_active - adjust max_active of a workqueue 4973 * @wq: target workqueue 4974 * @max_active: new max_active value. 4975 * 4976 * Set max_active of @wq to @max_active. 4977 * 4978 * CONTEXT: 4979 * Don't call from IRQ context. 4980 */ 4981 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4982 { 4983 /* disallow meddling with max_active for ordered workqueues */ 4984 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4985 return; 4986 4987 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4988 4989 mutex_lock(&wq->mutex); 4990 4991 wq->flags &= ~__WQ_ORDERED; 4992 wq->saved_max_active = max_active; 4993 wq_adjust_max_active(wq); 4994 4995 mutex_unlock(&wq->mutex); 4996 } 4997 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4998 4999 /** 5000 * current_work - retrieve %current task's work struct 5001 * 5002 * Determine if %current task is a workqueue worker and what it's working on. 5003 * Useful to find out the context that the %current task is running in. 5004 * 5005 * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 5006 */ 5007 struct work_struct *current_work(void) 5008 { 5009 struct worker *worker = current_wq_worker(); 5010 5011 return worker ? worker->current_work : NULL; 5012 } 5013 EXPORT_SYMBOL(current_work); 5014 5015 /** 5016 * current_is_workqueue_rescuer - is %current workqueue rescuer? 5017 * 5018 * Determine whether %current is a workqueue rescuer. Can be used from 5019 * work functions to determine whether it's being run off the rescuer task. 5020 * 5021 * Return: %true if %current is a workqueue rescuer. %false otherwise. 5022 */ 5023 bool current_is_workqueue_rescuer(void) 5024 { 5025 struct worker *worker = current_wq_worker(); 5026 5027 return worker && worker->rescue_wq; 5028 } 5029 5030 /** 5031 * workqueue_congested - test whether a workqueue is congested 5032 * @cpu: CPU in question 5033 * @wq: target workqueue 5034 * 5035 * Test whether @wq's cpu workqueue for @cpu is congested. There is 5036 * no synchronization around this function and the test result is 5037 * unreliable and only useful as advisory hints or for debugging. 5038 * 5039 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 5040 * 5041 * With the exception of ordered workqueues, all workqueues have per-cpu 5042 * pool_workqueues, each with its own congested state. A workqueue being 5043 * congested on one CPU doesn't mean that the workqueue is contested on any 5044 * other CPUs. 5045 * 5046 * Return: 5047 * %true if congested, %false otherwise. 5048 */ 5049 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 5050 { 5051 struct pool_workqueue *pwq; 5052 bool ret; 5053 5054 rcu_read_lock(); 5055 preempt_disable(); 5056 5057 if (cpu == WORK_CPU_UNBOUND) 5058 cpu = smp_processor_id(); 5059 5060 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 5061 ret = !list_empty(&pwq->inactive_works); 5062 5063 preempt_enable(); 5064 rcu_read_unlock(); 5065 5066 return ret; 5067 } 5068 EXPORT_SYMBOL_GPL(workqueue_congested); 5069 5070 /** 5071 * work_busy - test whether a work is currently pending or running 5072 * @work: the work to be tested 5073 * 5074 * Test whether @work is currently pending or running. There is no 5075 * synchronization around this function and the test result is 5076 * unreliable and only useful as advisory hints or for debugging. 5077 * 5078 * Return: 5079 * OR'd bitmask of WORK_BUSY_* bits. 5080 */ 5081 unsigned int work_busy(struct work_struct *work) 5082 { 5083 struct worker_pool *pool; 5084 unsigned long flags; 5085 unsigned int ret = 0; 5086 5087 if (work_pending(work)) 5088 ret |= WORK_BUSY_PENDING; 5089 5090 rcu_read_lock(); 5091 pool = get_work_pool(work); 5092 if (pool) { 5093 raw_spin_lock_irqsave(&pool->lock, flags); 5094 if (find_worker_executing_work(pool, work)) 5095 ret |= WORK_BUSY_RUNNING; 5096 raw_spin_unlock_irqrestore(&pool->lock, flags); 5097 } 5098 rcu_read_unlock(); 5099 5100 return ret; 5101 } 5102 EXPORT_SYMBOL_GPL(work_busy); 5103 5104 /** 5105 * set_worker_desc - set description for the current work item 5106 * @fmt: printf-style format string 5107 * @...: arguments for the format string 5108 * 5109 * This function can be called by a running work function to describe what 5110 * the work item is about. If the worker task gets dumped, this 5111 * information will be printed out together to help debugging. The 5112 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 5113 */ 5114 void set_worker_desc(const char *fmt, ...) 5115 { 5116 struct worker *worker = current_wq_worker(); 5117 va_list args; 5118 5119 if (worker) { 5120 va_start(args, fmt); 5121 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5122 va_end(args); 5123 } 5124 } 5125 EXPORT_SYMBOL_GPL(set_worker_desc); 5126 5127 /** 5128 * print_worker_info - print out worker information and description 5129 * @log_lvl: the log level to use when printing 5130 * @task: target task 5131 * 5132 * If @task is a worker and currently executing a work item, print out the 5133 * name of the workqueue being serviced and worker description set with 5134 * set_worker_desc() by the currently executing work item. 5135 * 5136 * This function can be safely called on any task as long as the 5137 * task_struct itself is accessible. While safe, this function isn't 5138 * synchronized and may print out mixups or garbages of limited length. 5139 */ 5140 void print_worker_info(const char *log_lvl, struct task_struct *task) 5141 { 5142 work_func_t *fn = NULL; 5143 char name[WQ_NAME_LEN] = { }; 5144 char desc[WORKER_DESC_LEN] = { }; 5145 struct pool_workqueue *pwq = NULL; 5146 struct workqueue_struct *wq = NULL; 5147 struct worker *worker; 5148 5149 if (!(task->flags & PF_WQ_WORKER)) 5150 return; 5151 5152 /* 5153 * This function is called without any synchronization and @task 5154 * could be in any state. Be careful with dereferences. 5155 */ 5156 worker = kthread_probe_data(task); 5157 5158 /* 5159 * Carefully copy the associated workqueue's workfn, name and desc. 5160 * Keep the original last '\0' in case the original is garbage. 5161 */ 5162 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5163 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5164 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5165 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5166 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 5167 5168 if (fn || name[0] || desc[0]) { 5169 printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 5170 if (strcmp(name, desc)) 5171 pr_cont(" (%s)", desc); 5172 pr_cont("\n"); 5173 } 5174 } 5175 5176 static void pr_cont_pool_info(struct worker_pool *pool) 5177 { 5178 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 5179 if (pool->node != NUMA_NO_NODE) 5180 pr_cont(" node=%d", pool->node); 5181 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 5182 } 5183 5184 struct pr_cont_work_struct { 5185 bool comma; 5186 work_func_t func; 5187 long ctr; 5188 }; 5189 5190 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 5191 { 5192 if (!pcwsp->ctr) 5193 goto out_record; 5194 if (func == pcwsp->func) { 5195 pcwsp->ctr++; 5196 return; 5197 } 5198 if (pcwsp->ctr == 1) 5199 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 5200 else 5201 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 5202 pcwsp->ctr = 0; 5203 out_record: 5204 if ((long)func == -1L) 5205 return; 5206 pcwsp->comma = comma; 5207 pcwsp->func = func; 5208 pcwsp->ctr = 1; 5209 } 5210 5211 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 5212 { 5213 if (work->func == wq_barrier_func) { 5214 struct wq_barrier *barr; 5215 5216 barr = container_of(work, struct wq_barrier, work); 5217 5218 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5219 pr_cont("%s BAR(%d)", comma ? "," : "", 5220 task_pid_nr(barr->task)); 5221 } else { 5222 if (!comma) 5223 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5224 pr_cont_work_flush(comma, work->func, pcwsp); 5225 } 5226 } 5227 5228 static void show_pwq(struct pool_workqueue *pwq) 5229 { 5230 struct pr_cont_work_struct pcws = { .ctr = 0, }; 5231 struct worker_pool *pool = pwq->pool; 5232 struct work_struct *work; 5233 struct worker *worker; 5234 bool has_in_flight = false, has_pending = false; 5235 int bkt; 5236 5237 pr_info(" pwq %d:", pool->id); 5238 pr_cont_pool_info(pool); 5239 5240 pr_cont(" active=%d refcnt=%d%s\n", 5241 pwq->nr_active, pwq->refcnt, 5242 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 5243 5244 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5245 if (worker->current_pwq == pwq) { 5246 has_in_flight = true; 5247 break; 5248 } 5249 } 5250 if (has_in_flight) { 5251 bool comma = false; 5252 5253 pr_info(" in-flight:"); 5254 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5255 if (worker->current_pwq != pwq) 5256 continue; 5257 5258 pr_cont("%s %d%s:%ps", comma ? "," : "", 5259 task_pid_nr(worker->task), 5260 worker->rescue_wq ? "(RESCUER)" : "", 5261 worker->current_func); 5262 list_for_each_entry(work, &worker->scheduled, entry) 5263 pr_cont_work(false, work, &pcws); 5264 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5265 comma = true; 5266 } 5267 pr_cont("\n"); 5268 } 5269 5270 list_for_each_entry(work, &pool->worklist, entry) { 5271 if (get_work_pwq(work) == pwq) { 5272 has_pending = true; 5273 break; 5274 } 5275 } 5276 if (has_pending) { 5277 bool comma = false; 5278 5279 pr_info(" pending:"); 5280 list_for_each_entry(work, &pool->worklist, entry) { 5281 if (get_work_pwq(work) != pwq) 5282 continue; 5283 5284 pr_cont_work(comma, work, &pcws); 5285 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5286 } 5287 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5288 pr_cont("\n"); 5289 } 5290 5291 if (!list_empty(&pwq->inactive_works)) { 5292 bool comma = false; 5293 5294 pr_info(" inactive:"); 5295 list_for_each_entry(work, &pwq->inactive_works, entry) { 5296 pr_cont_work(comma, work, &pcws); 5297 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5298 } 5299 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5300 pr_cont("\n"); 5301 } 5302 } 5303 5304 /** 5305 * show_one_workqueue - dump state of specified workqueue 5306 * @wq: workqueue whose state will be printed 5307 */ 5308 void show_one_workqueue(struct workqueue_struct *wq) 5309 { 5310 struct pool_workqueue *pwq; 5311 bool idle = true; 5312 unsigned long flags; 5313 5314 for_each_pwq(pwq, wq) { 5315 if (!pwq_is_empty(pwq)) { 5316 idle = false; 5317 break; 5318 } 5319 } 5320 if (idle) /* Nothing to print for idle workqueue */ 5321 return; 5322 5323 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 5324 5325 for_each_pwq(pwq, wq) { 5326 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 5327 if (!pwq_is_empty(pwq)) { 5328 /* 5329 * Defer printing to avoid deadlocks in console 5330 * drivers that queue work while holding locks 5331 * also taken in their write paths. 5332 */ 5333 printk_deferred_enter(); 5334 show_pwq(pwq); 5335 printk_deferred_exit(); 5336 } 5337 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 5338 /* 5339 * We could be printing a lot from atomic context, e.g. 5340 * sysrq-t -> show_all_workqueues(). Avoid triggering 5341 * hard lockup. 5342 */ 5343 touch_nmi_watchdog(); 5344 } 5345 5346 } 5347 5348 /** 5349 * show_one_worker_pool - dump state of specified worker pool 5350 * @pool: worker pool whose state will be printed 5351 */ 5352 static void show_one_worker_pool(struct worker_pool *pool) 5353 { 5354 struct worker *worker; 5355 bool first = true; 5356 unsigned long flags; 5357 unsigned long hung = 0; 5358 5359 raw_spin_lock_irqsave(&pool->lock, flags); 5360 if (pool->nr_workers == pool->nr_idle) 5361 goto next_pool; 5362 5363 /* How long the first pending work is waiting for a worker. */ 5364 if (!list_empty(&pool->worklist)) 5365 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 5366 5367 /* 5368 * Defer printing to avoid deadlocks in console drivers that 5369 * queue work while holding locks also taken in their write 5370 * paths. 5371 */ 5372 printk_deferred_enter(); 5373 pr_info("pool %d:", pool->id); 5374 pr_cont_pool_info(pool); 5375 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 5376 if (pool->manager) 5377 pr_cont(" manager: %d", 5378 task_pid_nr(pool->manager->task)); 5379 list_for_each_entry(worker, &pool->idle_list, entry) { 5380 pr_cont(" %s%d", first ? "idle: " : "", 5381 task_pid_nr(worker->task)); 5382 first = false; 5383 } 5384 pr_cont("\n"); 5385 printk_deferred_exit(); 5386 next_pool: 5387 raw_spin_unlock_irqrestore(&pool->lock, flags); 5388 /* 5389 * We could be printing a lot from atomic context, e.g. 5390 * sysrq-t -> show_all_workqueues(). Avoid triggering 5391 * hard lockup. 5392 */ 5393 touch_nmi_watchdog(); 5394 5395 } 5396 5397 /** 5398 * show_all_workqueues - dump workqueue state 5399 * 5400 * Called from a sysrq handler and prints out all busy workqueues and pools. 5401 */ 5402 void show_all_workqueues(void) 5403 { 5404 struct workqueue_struct *wq; 5405 struct worker_pool *pool; 5406 int pi; 5407 5408 rcu_read_lock(); 5409 5410 pr_info("Showing busy workqueues and worker pools:\n"); 5411 5412 list_for_each_entry_rcu(wq, &workqueues, list) 5413 show_one_workqueue(wq); 5414 5415 for_each_pool(pool, pi) 5416 show_one_worker_pool(pool); 5417 5418 rcu_read_unlock(); 5419 } 5420 5421 /** 5422 * show_freezable_workqueues - dump freezable workqueue state 5423 * 5424 * Called from try_to_freeze_tasks() and prints out all freezable workqueues 5425 * still busy. 5426 */ 5427 void show_freezable_workqueues(void) 5428 { 5429 struct workqueue_struct *wq; 5430 5431 rcu_read_lock(); 5432 5433 pr_info("Showing freezable workqueues that are still busy:\n"); 5434 5435 list_for_each_entry_rcu(wq, &workqueues, list) { 5436 if (!(wq->flags & WQ_FREEZABLE)) 5437 continue; 5438 show_one_workqueue(wq); 5439 } 5440 5441 rcu_read_unlock(); 5442 } 5443 5444 /* used to show worker information through /proc/PID/{comm,stat,status} */ 5445 void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 5446 { 5447 int off; 5448 5449 /* always show the actual comm */ 5450 off = strscpy(buf, task->comm, size); 5451 if (off < 0) 5452 return; 5453 5454 /* stabilize PF_WQ_WORKER and worker pool association */ 5455 mutex_lock(&wq_pool_attach_mutex); 5456 5457 if (task->flags & PF_WQ_WORKER) { 5458 struct worker *worker = kthread_data(task); 5459 struct worker_pool *pool = worker->pool; 5460 5461 if (pool) { 5462 raw_spin_lock_irq(&pool->lock); 5463 /* 5464 * ->desc tracks information (wq name or 5465 * set_worker_desc()) for the latest execution. If 5466 * current, prepend '+', otherwise '-'. 5467 */ 5468 if (worker->desc[0] != '\0') { 5469 if (worker->current_work) 5470 scnprintf(buf + off, size - off, "+%s", 5471 worker->desc); 5472 else 5473 scnprintf(buf + off, size - off, "-%s", 5474 worker->desc); 5475 } 5476 raw_spin_unlock_irq(&pool->lock); 5477 } 5478 } 5479 5480 mutex_unlock(&wq_pool_attach_mutex); 5481 } 5482 5483 #ifdef CONFIG_SMP 5484 5485 /* 5486 * CPU hotplug. 5487 * 5488 * There are two challenges in supporting CPU hotplug. Firstly, there 5489 * are a lot of assumptions on strong associations among work, pwq and 5490 * pool which make migrating pending and scheduled works very 5491 * difficult to implement without impacting hot paths. Secondly, 5492 * worker pools serve mix of short, long and very long running works making 5493 * blocked draining impractical. 5494 * 5495 * This is solved by allowing the pools to be disassociated from the CPU 5496 * running as an unbound one and allowing it to be reattached later if the 5497 * cpu comes back online. 5498 */ 5499 5500 static void unbind_workers(int cpu) 5501 { 5502 struct worker_pool *pool; 5503 struct worker *worker; 5504 5505 for_each_cpu_worker_pool(pool, cpu) { 5506 mutex_lock(&wq_pool_attach_mutex); 5507 raw_spin_lock_irq(&pool->lock); 5508 5509 /* 5510 * We've blocked all attach/detach operations. Make all workers 5511 * unbound and set DISASSOCIATED. Before this, all workers 5512 * must be on the cpu. After this, they may become diasporas. 5513 * And the preemption disabled section in their sched callbacks 5514 * are guaranteed to see WORKER_UNBOUND since the code here 5515 * is on the same cpu. 5516 */ 5517 for_each_pool_worker(worker, pool) 5518 worker->flags |= WORKER_UNBOUND; 5519 5520 pool->flags |= POOL_DISASSOCIATED; 5521 5522 /* 5523 * The handling of nr_running in sched callbacks are disabled 5524 * now. Zap nr_running. After this, nr_running stays zero and 5525 * need_more_worker() and keep_working() are always true as 5526 * long as the worklist is not empty. This pool now behaves as 5527 * an unbound (in terms of concurrency management) pool which 5528 * are served by workers tied to the pool. 5529 */ 5530 pool->nr_running = 0; 5531 5532 /* 5533 * With concurrency management just turned off, a busy 5534 * worker blocking could lead to lengthy stalls. Kick off 5535 * unbound chain execution of currently pending work items. 5536 */ 5537 kick_pool(pool); 5538 5539 raw_spin_unlock_irq(&pool->lock); 5540 5541 for_each_pool_worker(worker, pool) 5542 unbind_worker(worker); 5543 5544 mutex_unlock(&wq_pool_attach_mutex); 5545 } 5546 } 5547 5548 /** 5549 * rebind_workers - rebind all workers of a pool to the associated CPU 5550 * @pool: pool of interest 5551 * 5552 * @pool->cpu is coming online. Rebind all workers to the CPU. 5553 */ 5554 static void rebind_workers(struct worker_pool *pool) 5555 { 5556 struct worker *worker; 5557 5558 lockdep_assert_held(&wq_pool_attach_mutex); 5559 5560 /* 5561 * Restore CPU affinity of all workers. As all idle workers should 5562 * be on the run-queue of the associated CPU before any local 5563 * wake-ups for concurrency management happen, restore CPU affinity 5564 * of all workers first and then clear UNBOUND. As we're called 5565 * from CPU_ONLINE, the following shouldn't fail. 5566 */ 5567 for_each_pool_worker(worker, pool) { 5568 kthread_set_per_cpu(worker->task, pool->cpu); 5569 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 5570 pool_allowed_cpus(pool)) < 0); 5571 } 5572 5573 raw_spin_lock_irq(&pool->lock); 5574 5575 pool->flags &= ~POOL_DISASSOCIATED; 5576 5577 for_each_pool_worker(worker, pool) { 5578 unsigned int worker_flags = worker->flags; 5579 5580 /* 5581 * We want to clear UNBOUND but can't directly call 5582 * worker_clr_flags() or adjust nr_running. Atomically 5583 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 5584 * @worker will clear REBOUND using worker_clr_flags() when 5585 * it initiates the next execution cycle thus restoring 5586 * concurrency management. Note that when or whether 5587 * @worker clears REBOUND doesn't affect correctness. 5588 * 5589 * WRITE_ONCE() is necessary because @worker->flags may be 5590 * tested without holding any lock in 5591 * wq_worker_running(). Without it, NOT_RUNNING test may 5592 * fail incorrectly leading to premature concurrency 5593 * management operations. 5594 */ 5595 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 5596 worker_flags |= WORKER_REBOUND; 5597 worker_flags &= ~WORKER_UNBOUND; 5598 WRITE_ONCE(worker->flags, worker_flags); 5599 } 5600 5601 raw_spin_unlock_irq(&pool->lock); 5602 } 5603 5604 /** 5605 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 5606 * @pool: unbound pool of interest 5607 * @cpu: the CPU which is coming up 5608 * 5609 * An unbound pool may end up with a cpumask which doesn't have any online 5610 * CPUs. When a worker of such pool get scheduled, the scheduler resets 5611 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 5612 * online CPU before, cpus_allowed of all its workers should be restored. 5613 */ 5614 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 5615 { 5616 static cpumask_t cpumask; 5617 struct worker *worker; 5618 5619 lockdep_assert_held(&wq_pool_attach_mutex); 5620 5621 /* is @cpu allowed for @pool? */ 5622 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 5623 return; 5624 5625 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 5626 5627 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 5628 for_each_pool_worker(worker, pool) 5629 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 5630 } 5631 5632 int workqueue_prepare_cpu(unsigned int cpu) 5633 { 5634 struct worker_pool *pool; 5635 5636 for_each_cpu_worker_pool(pool, cpu) { 5637 if (pool->nr_workers) 5638 continue; 5639 if (!create_worker(pool)) 5640 return -ENOMEM; 5641 } 5642 return 0; 5643 } 5644 5645 int workqueue_online_cpu(unsigned int cpu) 5646 { 5647 struct worker_pool *pool; 5648 struct workqueue_struct *wq; 5649 int pi; 5650 5651 mutex_lock(&wq_pool_mutex); 5652 5653 for_each_pool(pool, pi) { 5654 mutex_lock(&wq_pool_attach_mutex); 5655 5656 if (pool->cpu == cpu) 5657 rebind_workers(pool); 5658 else if (pool->cpu < 0) 5659 restore_unbound_workers_cpumask(pool, cpu); 5660 5661 mutex_unlock(&wq_pool_attach_mutex); 5662 } 5663 5664 /* update pod affinity of unbound workqueues */ 5665 list_for_each_entry(wq, &workqueues, list) { 5666 struct workqueue_attrs *attrs = wq->unbound_attrs; 5667 5668 if (attrs) { 5669 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5670 int tcpu; 5671 5672 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5673 wq_update_pod(wq, tcpu, cpu, true); 5674 } 5675 } 5676 5677 mutex_unlock(&wq_pool_mutex); 5678 return 0; 5679 } 5680 5681 int workqueue_offline_cpu(unsigned int cpu) 5682 { 5683 struct workqueue_struct *wq; 5684 5685 /* unbinding per-cpu workers should happen on the local CPU */ 5686 if (WARN_ON(cpu != smp_processor_id())) 5687 return -1; 5688 5689 unbind_workers(cpu); 5690 5691 /* update pod affinity of unbound workqueues */ 5692 mutex_lock(&wq_pool_mutex); 5693 list_for_each_entry(wq, &workqueues, list) { 5694 struct workqueue_attrs *attrs = wq->unbound_attrs; 5695 5696 if (attrs) { 5697 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5698 int tcpu; 5699 5700 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5701 wq_update_pod(wq, tcpu, cpu, false); 5702 } 5703 } 5704 mutex_unlock(&wq_pool_mutex); 5705 5706 return 0; 5707 } 5708 5709 struct work_for_cpu { 5710 struct work_struct work; 5711 long (*fn)(void *); 5712 void *arg; 5713 long ret; 5714 }; 5715 5716 static void work_for_cpu_fn(struct work_struct *work) 5717 { 5718 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 5719 5720 wfc->ret = wfc->fn(wfc->arg); 5721 } 5722 5723 /** 5724 * work_on_cpu_key - run a function in thread context on a particular cpu 5725 * @cpu: the cpu to run on 5726 * @fn: the function to run 5727 * @arg: the function arg 5728 * @key: The lock class key for lock debugging purposes 5729 * 5730 * It is up to the caller to ensure that the cpu doesn't go offline. 5731 * The caller must not hold any locks which would prevent @fn from completing. 5732 * 5733 * Return: The value @fn returns. 5734 */ 5735 long work_on_cpu_key(int cpu, long (*fn)(void *), 5736 void *arg, struct lock_class_key *key) 5737 { 5738 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 5739 5740 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); 5741 schedule_work_on(cpu, &wfc.work); 5742 flush_work(&wfc.work); 5743 destroy_work_on_stack(&wfc.work); 5744 return wfc.ret; 5745 } 5746 EXPORT_SYMBOL_GPL(work_on_cpu_key); 5747 5748 /** 5749 * work_on_cpu_safe_key - run a function in thread context on a particular cpu 5750 * @cpu: the cpu to run on 5751 * @fn: the function to run 5752 * @arg: the function argument 5753 * @key: The lock class key for lock debugging purposes 5754 * 5755 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 5756 * any locks which would prevent @fn from completing. 5757 * 5758 * Return: The value @fn returns. 5759 */ 5760 long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 5761 void *arg, struct lock_class_key *key) 5762 { 5763 long ret = -ENODEV; 5764 5765 cpus_read_lock(); 5766 if (cpu_online(cpu)) 5767 ret = work_on_cpu_key(cpu, fn, arg, key); 5768 cpus_read_unlock(); 5769 return ret; 5770 } 5771 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); 5772 #endif /* CONFIG_SMP */ 5773 5774 #ifdef CONFIG_FREEZER 5775 5776 /** 5777 * freeze_workqueues_begin - begin freezing workqueues 5778 * 5779 * Start freezing workqueues. After this function returns, all freezable 5780 * workqueues will queue new works to their inactive_works list instead of 5781 * pool->worklist. 5782 * 5783 * CONTEXT: 5784 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5785 */ 5786 void freeze_workqueues_begin(void) 5787 { 5788 struct workqueue_struct *wq; 5789 5790 mutex_lock(&wq_pool_mutex); 5791 5792 WARN_ON_ONCE(workqueue_freezing); 5793 workqueue_freezing = true; 5794 5795 list_for_each_entry(wq, &workqueues, list) { 5796 mutex_lock(&wq->mutex); 5797 wq_adjust_max_active(wq); 5798 mutex_unlock(&wq->mutex); 5799 } 5800 5801 mutex_unlock(&wq_pool_mutex); 5802 } 5803 5804 /** 5805 * freeze_workqueues_busy - are freezable workqueues still busy? 5806 * 5807 * Check whether freezing is complete. This function must be called 5808 * between freeze_workqueues_begin() and thaw_workqueues(). 5809 * 5810 * CONTEXT: 5811 * Grabs and releases wq_pool_mutex. 5812 * 5813 * Return: 5814 * %true if some freezable workqueues are still busy. %false if freezing 5815 * is complete. 5816 */ 5817 bool freeze_workqueues_busy(void) 5818 { 5819 bool busy = false; 5820 struct workqueue_struct *wq; 5821 struct pool_workqueue *pwq; 5822 5823 mutex_lock(&wq_pool_mutex); 5824 5825 WARN_ON_ONCE(!workqueue_freezing); 5826 5827 list_for_each_entry(wq, &workqueues, list) { 5828 if (!(wq->flags & WQ_FREEZABLE)) 5829 continue; 5830 /* 5831 * nr_active is monotonically decreasing. It's safe 5832 * to peek without lock. 5833 */ 5834 rcu_read_lock(); 5835 for_each_pwq(pwq, wq) { 5836 WARN_ON_ONCE(pwq->nr_active < 0); 5837 if (pwq->nr_active) { 5838 busy = true; 5839 rcu_read_unlock(); 5840 goto out_unlock; 5841 } 5842 } 5843 rcu_read_unlock(); 5844 } 5845 out_unlock: 5846 mutex_unlock(&wq_pool_mutex); 5847 return busy; 5848 } 5849 5850 /** 5851 * thaw_workqueues - thaw workqueues 5852 * 5853 * Thaw workqueues. Normal queueing is restored and all collected 5854 * frozen works are transferred to their respective pool worklists. 5855 * 5856 * CONTEXT: 5857 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5858 */ 5859 void thaw_workqueues(void) 5860 { 5861 struct workqueue_struct *wq; 5862 5863 mutex_lock(&wq_pool_mutex); 5864 5865 if (!workqueue_freezing) 5866 goto out_unlock; 5867 5868 workqueue_freezing = false; 5869 5870 /* restore max_active and repopulate worklist */ 5871 list_for_each_entry(wq, &workqueues, list) { 5872 mutex_lock(&wq->mutex); 5873 wq_adjust_max_active(wq); 5874 mutex_unlock(&wq->mutex); 5875 } 5876 5877 out_unlock: 5878 mutex_unlock(&wq_pool_mutex); 5879 } 5880 #endif /* CONFIG_FREEZER */ 5881 5882 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 5883 { 5884 LIST_HEAD(ctxs); 5885 int ret = 0; 5886 struct workqueue_struct *wq; 5887 struct apply_wqattrs_ctx *ctx, *n; 5888 5889 lockdep_assert_held(&wq_pool_mutex); 5890 5891 list_for_each_entry(wq, &workqueues, list) { 5892 if (!(wq->flags & WQ_UNBOUND)) 5893 continue; 5894 /* creating multiple pwqs breaks ordering guarantee */ 5895 if (wq->flags & __WQ_ORDERED) 5896 continue; 5897 5898 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 5899 if (IS_ERR(ctx)) { 5900 ret = PTR_ERR(ctx); 5901 break; 5902 } 5903 5904 list_add_tail(&ctx->list, &ctxs); 5905 } 5906 5907 list_for_each_entry_safe(ctx, n, &ctxs, list) { 5908 if (!ret) 5909 apply_wqattrs_commit(ctx); 5910 apply_wqattrs_cleanup(ctx); 5911 } 5912 5913 if (!ret) { 5914 mutex_lock(&wq_pool_attach_mutex); 5915 cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 5916 mutex_unlock(&wq_pool_attach_mutex); 5917 } 5918 return ret; 5919 } 5920 5921 /** 5922 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 5923 * @cpumask: the cpumask to set 5924 * 5925 * The low-level workqueues cpumask is a global cpumask that limits 5926 * the affinity of all unbound workqueues. This function check the @cpumask 5927 * and apply it to all unbound workqueues and updates all pwqs of them. 5928 * 5929 * Return: 0 - Success 5930 * -EINVAL - Invalid @cpumask 5931 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 5932 */ 5933 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 5934 { 5935 int ret = -EINVAL; 5936 5937 /* 5938 * Not excluding isolated cpus on purpose. 5939 * If the user wishes to include them, we allow that. 5940 */ 5941 cpumask_and(cpumask, cpumask, cpu_possible_mask); 5942 if (!cpumask_empty(cpumask)) { 5943 apply_wqattrs_lock(); 5944 if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 5945 ret = 0; 5946 goto out_unlock; 5947 } 5948 5949 ret = workqueue_apply_unbound_cpumask(cpumask); 5950 5951 out_unlock: 5952 apply_wqattrs_unlock(); 5953 } 5954 5955 return ret; 5956 } 5957 5958 static int parse_affn_scope(const char *val) 5959 { 5960 int i; 5961 5962 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 5963 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 5964 return i; 5965 } 5966 return -EINVAL; 5967 } 5968 5969 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 5970 { 5971 struct workqueue_struct *wq; 5972 int affn, cpu; 5973 5974 affn = parse_affn_scope(val); 5975 if (affn < 0) 5976 return affn; 5977 if (affn == WQ_AFFN_DFL) 5978 return -EINVAL; 5979 5980 cpus_read_lock(); 5981 mutex_lock(&wq_pool_mutex); 5982 5983 wq_affn_dfl = affn; 5984 5985 list_for_each_entry(wq, &workqueues, list) { 5986 for_each_online_cpu(cpu) { 5987 wq_update_pod(wq, cpu, cpu, true); 5988 } 5989 } 5990 5991 mutex_unlock(&wq_pool_mutex); 5992 cpus_read_unlock(); 5993 5994 return 0; 5995 } 5996 5997 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 5998 { 5999 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 6000 } 6001 6002 static const struct kernel_param_ops wq_affn_dfl_ops = { 6003 .set = wq_affn_dfl_set, 6004 .get = wq_affn_dfl_get, 6005 }; 6006 6007 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 6008 6009 #ifdef CONFIG_SYSFS 6010 /* 6011 * Workqueues with WQ_SYSFS flag set is visible to userland via 6012 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 6013 * following attributes. 6014 * 6015 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 6016 * max_active RW int : maximum number of in-flight work items 6017 * 6018 * Unbound workqueues have the following extra attributes. 6019 * 6020 * nice RW int : nice value of the workers 6021 * cpumask RW mask : bitmask of allowed CPUs for the workers 6022 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 6023 * affinity_strict RW bool : worker CPU affinity is strict 6024 */ 6025 struct wq_device { 6026 struct workqueue_struct *wq; 6027 struct device dev; 6028 }; 6029 6030 static struct workqueue_struct *dev_to_wq(struct device *dev) 6031 { 6032 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6033 6034 return wq_dev->wq; 6035 } 6036 6037 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 6038 char *buf) 6039 { 6040 struct workqueue_struct *wq = dev_to_wq(dev); 6041 6042 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 6043 } 6044 static DEVICE_ATTR_RO(per_cpu); 6045 6046 static ssize_t max_active_show(struct device *dev, 6047 struct device_attribute *attr, char *buf) 6048 { 6049 struct workqueue_struct *wq = dev_to_wq(dev); 6050 6051 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 6052 } 6053 6054 static ssize_t max_active_store(struct device *dev, 6055 struct device_attribute *attr, const char *buf, 6056 size_t count) 6057 { 6058 struct workqueue_struct *wq = dev_to_wq(dev); 6059 int val; 6060 6061 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 6062 return -EINVAL; 6063 6064 workqueue_set_max_active(wq, val); 6065 return count; 6066 } 6067 static DEVICE_ATTR_RW(max_active); 6068 6069 static struct attribute *wq_sysfs_attrs[] = { 6070 &dev_attr_per_cpu.attr, 6071 &dev_attr_max_active.attr, 6072 NULL, 6073 }; 6074 ATTRIBUTE_GROUPS(wq_sysfs); 6075 6076 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 6077 char *buf) 6078 { 6079 struct workqueue_struct *wq = dev_to_wq(dev); 6080 int written; 6081 6082 mutex_lock(&wq->mutex); 6083 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 6084 mutex_unlock(&wq->mutex); 6085 6086 return written; 6087 } 6088 6089 /* prepare workqueue_attrs for sysfs store operations */ 6090 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 6091 { 6092 struct workqueue_attrs *attrs; 6093 6094 lockdep_assert_held(&wq_pool_mutex); 6095 6096 attrs = alloc_workqueue_attrs(); 6097 if (!attrs) 6098 return NULL; 6099 6100 copy_workqueue_attrs(attrs, wq->unbound_attrs); 6101 return attrs; 6102 } 6103 6104 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 6105 const char *buf, size_t count) 6106 { 6107 struct workqueue_struct *wq = dev_to_wq(dev); 6108 struct workqueue_attrs *attrs; 6109 int ret = -ENOMEM; 6110 6111 apply_wqattrs_lock(); 6112 6113 attrs = wq_sysfs_prep_attrs(wq); 6114 if (!attrs) 6115 goto out_unlock; 6116 6117 if (sscanf(buf, "%d", &attrs->nice) == 1 && 6118 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 6119 ret = apply_workqueue_attrs_locked(wq, attrs); 6120 else 6121 ret = -EINVAL; 6122 6123 out_unlock: 6124 apply_wqattrs_unlock(); 6125 free_workqueue_attrs(attrs); 6126 return ret ?: count; 6127 } 6128 6129 static ssize_t wq_cpumask_show(struct device *dev, 6130 struct device_attribute *attr, char *buf) 6131 { 6132 struct workqueue_struct *wq = dev_to_wq(dev); 6133 int written; 6134 6135 mutex_lock(&wq->mutex); 6136 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6137 cpumask_pr_args(wq->unbound_attrs->cpumask)); 6138 mutex_unlock(&wq->mutex); 6139 return written; 6140 } 6141 6142 static ssize_t wq_cpumask_store(struct device *dev, 6143 struct device_attribute *attr, 6144 const char *buf, size_t count) 6145 { 6146 struct workqueue_struct *wq = dev_to_wq(dev); 6147 struct workqueue_attrs *attrs; 6148 int ret = -ENOMEM; 6149 6150 apply_wqattrs_lock(); 6151 6152 attrs = wq_sysfs_prep_attrs(wq); 6153 if (!attrs) 6154 goto out_unlock; 6155 6156 ret = cpumask_parse(buf, attrs->cpumask); 6157 if (!ret) 6158 ret = apply_workqueue_attrs_locked(wq, attrs); 6159 6160 out_unlock: 6161 apply_wqattrs_unlock(); 6162 free_workqueue_attrs(attrs); 6163 return ret ?: count; 6164 } 6165 6166 static ssize_t wq_affn_scope_show(struct device *dev, 6167 struct device_attribute *attr, char *buf) 6168 { 6169 struct workqueue_struct *wq = dev_to_wq(dev); 6170 int written; 6171 6172 mutex_lock(&wq->mutex); 6173 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) 6174 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n", 6175 wq_affn_names[WQ_AFFN_DFL], 6176 wq_affn_names[wq_affn_dfl]); 6177 else 6178 written = scnprintf(buf, PAGE_SIZE, "%s\n", 6179 wq_affn_names[wq->unbound_attrs->affn_scope]); 6180 mutex_unlock(&wq->mutex); 6181 6182 return written; 6183 } 6184 6185 static ssize_t wq_affn_scope_store(struct device *dev, 6186 struct device_attribute *attr, 6187 const char *buf, size_t count) 6188 { 6189 struct workqueue_struct *wq = dev_to_wq(dev); 6190 struct workqueue_attrs *attrs; 6191 int affn, ret = -ENOMEM; 6192 6193 affn = parse_affn_scope(buf); 6194 if (affn < 0) 6195 return affn; 6196 6197 apply_wqattrs_lock(); 6198 attrs = wq_sysfs_prep_attrs(wq); 6199 if (attrs) { 6200 attrs->affn_scope = affn; 6201 ret = apply_workqueue_attrs_locked(wq, attrs); 6202 } 6203 apply_wqattrs_unlock(); 6204 free_workqueue_attrs(attrs); 6205 return ret ?: count; 6206 } 6207 6208 static ssize_t wq_affinity_strict_show(struct device *dev, 6209 struct device_attribute *attr, char *buf) 6210 { 6211 struct workqueue_struct *wq = dev_to_wq(dev); 6212 6213 return scnprintf(buf, PAGE_SIZE, "%d\n", 6214 wq->unbound_attrs->affn_strict); 6215 } 6216 6217 static ssize_t wq_affinity_strict_store(struct device *dev, 6218 struct device_attribute *attr, 6219 const char *buf, size_t count) 6220 { 6221 struct workqueue_struct *wq = dev_to_wq(dev); 6222 struct workqueue_attrs *attrs; 6223 int v, ret = -ENOMEM; 6224 6225 if (sscanf(buf, "%d", &v) != 1) 6226 return -EINVAL; 6227 6228 apply_wqattrs_lock(); 6229 attrs = wq_sysfs_prep_attrs(wq); 6230 if (attrs) { 6231 attrs->affn_strict = (bool)v; 6232 ret = apply_workqueue_attrs_locked(wq, attrs); 6233 } 6234 apply_wqattrs_unlock(); 6235 free_workqueue_attrs(attrs); 6236 return ret ?: count; 6237 } 6238 6239 static struct device_attribute wq_sysfs_unbound_attrs[] = { 6240 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 6241 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 6242 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 6243 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 6244 __ATTR_NULL, 6245 }; 6246 6247 static struct bus_type wq_subsys = { 6248 .name = "workqueue", 6249 .dev_groups = wq_sysfs_groups, 6250 }; 6251 6252 static ssize_t wq_unbound_cpumask_show(struct device *dev, 6253 struct device_attribute *attr, char *buf) 6254 { 6255 int written; 6256 6257 mutex_lock(&wq_pool_mutex); 6258 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6259 cpumask_pr_args(wq_unbound_cpumask)); 6260 mutex_unlock(&wq_pool_mutex); 6261 6262 return written; 6263 } 6264 6265 static ssize_t wq_unbound_cpumask_store(struct device *dev, 6266 struct device_attribute *attr, const char *buf, size_t count) 6267 { 6268 cpumask_var_t cpumask; 6269 int ret; 6270 6271 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6272 return -ENOMEM; 6273 6274 ret = cpumask_parse(buf, cpumask); 6275 if (!ret) 6276 ret = workqueue_set_unbound_cpumask(cpumask); 6277 6278 free_cpumask_var(cpumask); 6279 return ret ? ret : count; 6280 } 6281 6282 static struct device_attribute wq_sysfs_cpumask_attr = 6283 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 6284 wq_unbound_cpumask_store); 6285 6286 static int __init wq_sysfs_init(void) 6287 { 6288 struct device *dev_root; 6289 int err; 6290 6291 err = subsys_virtual_register(&wq_subsys, NULL); 6292 if (err) 6293 return err; 6294 6295 dev_root = bus_get_dev_root(&wq_subsys); 6296 if (dev_root) { 6297 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr); 6298 put_device(dev_root); 6299 } 6300 return err; 6301 } 6302 core_initcall(wq_sysfs_init); 6303 6304 static void wq_device_release(struct device *dev) 6305 { 6306 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6307 6308 kfree(wq_dev); 6309 } 6310 6311 /** 6312 * workqueue_sysfs_register - make a workqueue visible in sysfs 6313 * @wq: the workqueue to register 6314 * 6315 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 6316 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 6317 * which is the preferred method. 6318 * 6319 * Workqueue user should use this function directly iff it wants to apply 6320 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 6321 * apply_workqueue_attrs() may race against userland updating the 6322 * attributes. 6323 * 6324 * Return: 0 on success, -errno on failure. 6325 */ 6326 int workqueue_sysfs_register(struct workqueue_struct *wq) 6327 { 6328 struct wq_device *wq_dev; 6329 int ret; 6330 6331 /* 6332 * Adjusting max_active or creating new pwqs by applying 6333 * attributes breaks ordering guarantee. Disallow exposing ordered 6334 * workqueues. 6335 */ 6336 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 6337 return -EINVAL; 6338 6339 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 6340 if (!wq_dev) 6341 return -ENOMEM; 6342 6343 wq_dev->wq = wq; 6344 wq_dev->dev.bus = &wq_subsys; 6345 wq_dev->dev.release = wq_device_release; 6346 dev_set_name(&wq_dev->dev, "%s", wq->name); 6347 6348 /* 6349 * unbound_attrs are created separately. Suppress uevent until 6350 * everything is ready. 6351 */ 6352 dev_set_uevent_suppress(&wq_dev->dev, true); 6353 6354 ret = device_register(&wq_dev->dev); 6355 if (ret) { 6356 put_device(&wq_dev->dev); 6357 wq->wq_dev = NULL; 6358 return ret; 6359 } 6360 6361 if (wq->flags & WQ_UNBOUND) { 6362 struct device_attribute *attr; 6363 6364 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 6365 ret = device_create_file(&wq_dev->dev, attr); 6366 if (ret) { 6367 device_unregister(&wq_dev->dev); 6368 wq->wq_dev = NULL; 6369 return ret; 6370 } 6371 } 6372 } 6373 6374 dev_set_uevent_suppress(&wq_dev->dev, false); 6375 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 6376 return 0; 6377 } 6378 6379 /** 6380 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 6381 * @wq: the workqueue to unregister 6382 * 6383 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 6384 */ 6385 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 6386 { 6387 struct wq_device *wq_dev = wq->wq_dev; 6388 6389 if (!wq->wq_dev) 6390 return; 6391 6392 wq->wq_dev = NULL; 6393 device_unregister(&wq_dev->dev); 6394 } 6395 #else /* CONFIG_SYSFS */ 6396 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 6397 #endif /* CONFIG_SYSFS */ 6398 6399 /* 6400 * Workqueue watchdog. 6401 * 6402 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 6403 * flush dependency, a concurrency managed work item which stays RUNNING 6404 * indefinitely. Workqueue stalls can be very difficult to debug as the 6405 * usual warning mechanisms don't trigger and internal workqueue state is 6406 * largely opaque. 6407 * 6408 * Workqueue watchdog monitors all worker pools periodically and dumps 6409 * state if some pools failed to make forward progress for a while where 6410 * forward progress is defined as the first item on ->worklist changing. 6411 * 6412 * This mechanism is controlled through the kernel parameter 6413 * "workqueue.watchdog_thresh" which can be updated at runtime through the 6414 * corresponding sysfs parameter file. 6415 */ 6416 #ifdef CONFIG_WQ_WATCHDOG 6417 6418 static unsigned long wq_watchdog_thresh = 30; 6419 static struct timer_list wq_watchdog_timer; 6420 6421 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 6422 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 6423 6424 /* 6425 * Show workers that might prevent the processing of pending work items. 6426 * The only candidates are CPU-bound workers in the running state. 6427 * Pending work items should be handled by another idle worker 6428 * in all other situations. 6429 */ 6430 static void show_cpu_pool_hog(struct worker_pool *pool) 6431 { 6432 struct worker *worker; 6433 unsigned long flags; 6434 int bkt; 6435 6436 raw_spin_lock_irqsave(&pool->lock, flags); 6437 6438 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6439 if (task_is_running(worker->task)) { 6440 /* 6441 * Defer printing to avoid deadlocks in console 6442 * drivers that queue work while holding locks 6443 * also taken in their write paths. 6444 */ 6445 printk_deferred_enter(); 6446 6447 pr_info("pool %d:\n", pool->id); 6448 sched_show_task(worker->task); 6449 6450 printk_deferred_exit(); 6451 } 6452 } 6453 6454 raw_spin_unlock_irqrestore(&pool->lock, flags); 6455 } 6456 6457 static void show_cpu_pools_hogs(void) 6458 { 6459 struct worker_pool *pool; 6460 int pi; 6461 6462 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 6463 6464 rcu_read_lock(); 6465 6466 for_each_pool(pool, pi) { 6467 if (pool->cpu_stall) 6468 show_cpu_pool_hog(pool); 6469 6470 } 6471 6472 rcu_read_unlock(); 6473 } 6474 6475 static void wq_watchdog_reset_touched(void) 6476 { 6477 int cpu; 6478 6479 wq_watchdog_touched = jiffies; 6480 for_each_possible_cpu(cpu) 6481 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6482 } 6483 6484 static void wq_watchdog_timer_fn(struct timer_list *unused) 6485 { 6486 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 6487 bool lockup_detected = false; 6488 bool cpu_pool_stall = false; 6489 unsigned long now = jiffies; 6490 struct worker_pool *pool; 6491 int pi; 6492 6493 if (!thresh) 6494 return; 6495 6496 rcu_read_lock(); 6497 6498 for_each_pool(pool, pi) { 6499 unsigned long pool_ts, touched, ts; 6500 6501 pool->cpu_stall = false; 6502 if (list_empty(&pool->worklist)) 6503 continue; 6504 6505 /* 6506 * If a virtual machine is stopped by the host it can look to 6507 * the watchdog like a stall. 6508 */ 6509 kvm_check_and_clear_guest_paused(); 6510 6511 /* get the latest of pool and touched timestamps */ 6512 if (pool->cpu >= 0) 6513 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 6514 else 6515 touched = READ_ONCE(wq_watchdog_touched); 6516 pool_ts = READ_ONCE(pool->watchdog_ts); 6517 6518 if (time_after(pool_ts, touched)) 6519 ts = pool_ts; 6520 else 6521 ts = touched; 6522 6523 /* did we stall? */ 6524 if (time_after(now, ts + thresh)) { 6525 lockup_detected = true; 6526 if (pool->cpu >= 0) { 6527 pool->cpu_stall = true; 6528 cpu_pool_stall = true; 6529 } 6530 pr_emerg("BUG: workqueue lockup - pool"); 6531 pr_cont_pool_info(pool); 6532 pr_cont(" stuck for %us!\n", 6533 jiffies_to_msecs(now - pool_ts) / 1000); 6534 } 6535 6536 6537 } 6538 6539 rcu_read_unlock(); 6540 6541 if (lockup_detected) 6542 show_all_workqueues(); 6543 6544 if (cpu_pool_stall) 6545 show_cpu_pools_hogs(); 6546 6547 wq_watchdog_reset_touched(); 6548 mod_timer(&wq_watchdog_timer, jiffies + thresh); 6549 } 6550 6551 notrace void wq_watchdog_touch(int cpu) 6552 { 6553 if (cpu >= 0) 6554 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6555 6556 wq_watchdog_touched = jiffies; 6557 } 6558 6559 static void wq_watchdog_set_thresh(unsigned long thresh) 6560 { 6561 wq_watchdog_thresh = 0; 6562 del_timer_sync(&wq_watchdog_timer); 6563 6564 if (thresh) { 6565 wq_watchdog_thresh = thresh; 6566 wq_watchdog_reset_touched(); 6567 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 6568 } 6569 } 6570 6571 static int wq_watchdog_param_set_thresh(const char *val, 6572 const struct kernel_param *kp) 6573 { 6574 unsigned long thresh; 6575 int ret; 6576 6577 ret = kstrtoul(val, 0, &thresh); 6578 if (ret) 6579 return ret; 6580 6581 if (system_wq) 6582 wq_watchdog_set_thresh(thresh); 6583 else 6584 wq_watchdog_thresh = thresh; 6585 6586 return 0; 6587 } 6588 6589 static const struct kernel_param_ops wq_watchdog_thresh_ops = { 6590 .set = wq_watchdog_param_set_thresh, 6591 .get = param_get_ulong, 6592 }; 6593 6594 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 6595 0644); 6596 6597 static void wq_watchdog_init(void) 6598 { 6599 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 6600 wq_watchdog_set_thresh(wq_watchdog_thresh); 6601 } 6602 6603 #else /* CONFIG_WQ_WATCHDOG */ 6604 6605 static inline void wq_watchdog_init(void) { } 6606 6607 #endif /* CONFIG_WQ_WATCHDOG */ 6608 6609 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) 6610 { 6611 if (!cpumask_intersects(wq_unbound_cpumask, mask)) { 6612 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n", 6613 cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask)); 6614 return; 6615 } 6616 6617 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); 6618 } 6619 6620 /** 6621 * workqueue_init_early - early init for workqueue subsystem 6622 * 6623 * This is the first step of three-staged workqueue subsystem initialization and 6624 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 6625 * up. It sets up all the data structures and system workqueues and allows early 6626 * boot code to create workqueues and queue/cancel work items. Actual work item 6627 * execution starts only after kthreads can be created and scheduled right 6628 * before early initcalls. 6629 */ 6630 void __init workqueue_init_early(void) 6631 { 6632 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 6633 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 6634 int i, cpu; 6635 6636 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6637 6638 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6639 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 6640 restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); 6641 restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); 6642 if (!cpumask_empty(&wq_cmdline_cpumask)) 6643 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask); 6644 6645 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6646 6647 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 6648 BUG_ON(!wq_update_pod_attrs_buf); 6649 6650 /* initialize WQ_AFFN_SYSTEM pods */ 6651 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6652 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 6653 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6654 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 6655 6656 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 6657 6658 pt->nr_pods = 1; 6659 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 6660 pt->pod_node[0] = NUMA_NO_NODE; 6661 pt->cpu_pod[0] = 0; 6662 6663 /* initialize CPU pools */ 6664 for_each_possible_cpu(cpu) { 6665 struct worker_pool *pool; 6666 6667 i = 0; 6668 for_each_cpu_worker_pool(pool, cpu) { 6669 BUG_ON(init_worker_pool(pool)); 6670 pool->cpu = cpu; 6671 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 6672 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 6673 pool->attrs->nice = std_nice[i++]; 6674 pool->attrs->affn_strict = true; 6675 pool->node = cpu_to_node(cpu); 6676 6677 /* alloc pool ID */ 6678 mutex_lock(&wq_pool_mutex); 6679 BUG_ON(worker_pool_assign_id(pool)); 6680 mutex_unlock(&wq_pool_mutex); 6681 } 6682 } 6683 6684 /* create default unbound and ordered wq attrs */ 6685 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 6686 struct workqueue_attrs *attrs; 6687 6688 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6689 attrs->nice = std_nice[i]; 6690 unbound_std_wq_attrs[i] = attrs; 6691 6692 /* 6693 * An ordered wq should have only one pwq as ordering is 6694 * guaranteed by max_active which is enforced by pwqs. 6695 */ 6696 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6697 attrs->nice = std_nice[i]; 6698 attrs->ordered = true; 6699 ordered_wq_attrs[i] = attrs; 6700 } 6701 6702 system_wq = alloc_workqueue("events", 0, 0); 6703 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 6704 system_long_wq = alloc_workqueue("events_long", 0, 0); 6705 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 6706 WQ_MAX_ACTIVE); 6707 system_freezable_wq = alloc_workqueue("events_freezable", 6708 WQ_FREEZABLE, 0); 6709 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 6710 WQ_POWER_EFFICIENT, 0); 6711 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 6712 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 6713 0); 6714 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 6715 !system_unbound_wq || !system_freezable_wq || 6716 !system_power_efficient_wq || 6717 !system_freezable_power_efficient_wq); 6718 } 6719 6720 static void __init wq_cpu_intensive_thresh_init(void) 6721 { 6722 unsigned long thresh; 6723 unsigned long bogo; 6724 6725 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 6726 BUG_ON(IS_ERR(pwq_release_worker)); 6727 6728 /* if the user set it to a specific value, keep it */ 6729 if (wq_cpu_intensive_thresh_us != ULONG_MAX) 6730 return; 6731 6732 /* 6733 * The default of 10ms is derived from the fact that most modern (as of 6734 * 2023) processors can do a lot in 10ms and that it's just below what 6735 * most consider human-perceivable. However, the kernel also runs on a 6736 * lot slower CPUs including microcontrollers where the threshold is way 6737 * too low. 6738 * 6739 * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 6740 * This is by no means accurate but it doesn't have to be. The mechanism 6741 * is still useful even when the threshold is fully scaled up. Also, as 6742 * the reports would usually be applicable to everyone, some machines 6743 * operating on longer thresholds won't significantly diminish their 6744 * usefulness. 6745 */ 6746 thresh = 10 * USEC_PER_MSEC; 6747 6748 /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 6749 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 6750 if (bogo < 4000) 6751 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 6752 6753 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 6754 loops_per_jiffy, bogo, thresh); 6755 6756 wq_cpu_intensive_thresh_us = thresh; 6757 } 6758 6759 /** 6760 * workqueue_init - bring workqueue subsystem fully online 6761 * 6762 * This is the second step of three-staged workqueue subsystem initialization 6763 * and invoked as soon as kthreads can be created and scheduled. Workqueues have 6764 * been created and work items queued on them, but there are no kworkers 6765 * executing the work items yet. Populate the worker pools with the initial 6766 * workers and enable future kworker creations. 6767 */ 6768 void __init workqueue_init(void) 6769 { 6770 struct workqueue_struct *wq; 6771 struct worker_pool *pool; 6772 int cpu, bkt; 6773 6774 wq_cpu_intensive_thresh_init(); 6775 6776 mutex_lock(&wq_pool_mutex); 6777 6778 /* 6779 * Per-cpu pools created earlier could be missing node hint. Fix them 6780 * up. Also, create a rescuer for workqueues that requested it. 6781 */ 6782 for_each_possible_cpu(cpu) { 6783 for_each_cpu_worker_pool(pool, cpu) { 6784 pool->node = cpu_to_node(cpu); 6785 } 6786 } 6787 6788 list_for_each_entry(wq, &workqueues, list) { 6789 WARN(init_rescuer(wq), 6790 "workqueue: failed to create early rescuer for %s", 6791 wq->name); 6792 } 6793 6794 mutex_unlock(&wq_pool_mutex); 6795 6796 /* create the initial workers */ 6797 for_each_online_cpu(cpu) { 6798 for_each_cpu_worker_pool(pool, cpu) { 6799 pool->flags &= ~POOL_DISASSOCIATED; 6800 BUG_ON(!create_worker(pool)); 6801 } 6802 } 6803 6804 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 6805 BUG_ON(!create_worker(pool)); 6806 6807 wq_online = true; 6808 wq_watchdog_init(); 6809 } 6810 6811 /* 6812 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 6813 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 6814 * and consecutive pod ID. The rest of @pt is initialized accordingly. 6815 */ 6816 static void __init init_pod_type(struct wq_pod_type *pt, 6817 bool (*cpus_share_pod)(int, int)) 6818 { 6819 int cur, pre, cpu, pod; 6820 6821 pt->nr_pods = 0; 6822 6823 /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 6824 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6825 BUG_ON(!pt->cpu_pod); 6826 6827 for_each_possible_cpu(cur) { 6828 for_each_possible_cpu(pre) { 6829 if (pre >= cur) { 6830 pt->cpu_pod[cur] = pt->nr_pods++; 6831 break; 6832 } 6833 if (cpus_share_pod(cur, pre)) { 6834 pt->cpu_pod[cur] = pt->cpu_pod[pre]; 6835 break; 6836 } 6837 } 6838 } 6839 6840 /* init the rest to match @pt->cpu_pod[] */ 6841 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6842 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 6843 BUG_ON(!pt->pod_cpus || !pt->pod_node); 6844 6845 for (pod = 0; pod < pt->nr_pods; pod++) 6846 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 6847 6848 for_each_possible_cpu(cpu) { 6849 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 6850 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 6851 } 6852 } 6853 6854 static bool __init cpus_dont_share(int cpu0, int cpu1) 6855 { 6856 return false; 6857 } 6858 6859 static bool __init cpus_share_smt(int cpu0, int cpu1) 6860 { 6861 #ifdef CONFIG_SCHED_SMT 6862 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 6863 #else 6864 return false; 6865 #endif 6866 } 6867 6868 static bool __init cpus_share_numa(int cpu0, int cpu1) 6869 { 6870 return cpu_to_node(cpu0) == cpu_to_node(cpu1); 6871 } 6872 6873 /** 6874 * workqueue_init_topology - initialize CPU pods for unbound workqueues 6875 * 6876 * This is the third step of there-staged workqueue subsystem initialization and 6877 * invoked after SMP and topology information are fully initialized. It 6878 * initializes the unbound CPU pods accordingly. 6879 */ 6880 void __init workqueue_init_topology(void) 6881 { 6882 struct workqueue_struct *wq; 6883 int cpu; 6884 6885 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 6886 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 6887 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 6888 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 6889 6890 mutex_lock(&wq_pool_mutex); 6891 6892 /* 6893 * Workqueues allocated earlier would have all CPUs sharing the default 6894 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 6895 * combinations to apply per-pod sharing. 6896 */ 6897 list_for_each_entry(wq, &workqueues, list) { 6898 for_each_online_cpu(cpu) { 6899 wq_update_pod(wq, cpu, cpu, true); 6900 } 6901 } 6902 6903 mutex_unlock(&wq_pool_mutex); 6904 } 6905 6906 void __warn_flushing_systemwide_wq(void) 6907 { 6908 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 6909 dump_stack(); 6910 } 6911 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 6912 6913 static int __init workqueue_unbound_cpus_setup(char *str) 6914 { 6915 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 6916 cpumask_clear(&wq_cmdline_cpumask); 6917 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 6918 } 6919 6920 return 1; 6921 } 6922 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 6923