1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 5 * Copyright (C) 2002 Ingo Molnar 6 * 7 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <dwmw2@infradead.org> 9 * Andrew Morton 10 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 11 * Theodore Ts'o <tytso@mit.edu> 12 * 13 * Made to use alloc_percpu by Christoph Lameter. 14 * 15 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17 * 18 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worker pools for each CPU (one for 21 * normal work items and the other for high priority ones) and some extra 22 * pools for workqueues which are not bound to any specific CPU - the 23 * number of these backing pools is dynamic. 24 * 25 * Please read Documentation/core-api/workqueue.rst for details. 26 */ 27 28 #include <linux/export.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/init.h> 32 #include <linux/signal.h> 33 #include <linux/completion.h> 34 #include <linux/workqueue.h> 35 #include <linux/slab.h> 36 #include <linux/cpu.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/hardirq.h> 40 #include <linux/mempolicy.h> 41 #include <linux/freezer.h> 42 #include <linux/debug_locks.h> 43 #include <linux/lockdep.h> 44 #include <linux/idr.h> 45 #include <linux/jhash.h> 46 #include <linux/hashtable.h> 47 #include <linux/rculist.h> 48 #include <linux/nodemask.h> 49 #include <linux/moduleparam.h> 50 #include <linux/uaccess.h> 51 #include <linux/sched/isolation.h> 52 #include <linux/sched/debug.h> 53 #include <linux/nmi.h> 54 #include <linux/kvm_para.h> 55 #include <linux/delay.h> 56 57 #include "workqueue_internal.h" 58 59 enum { 60 /* 61 * worker_pool flags 62 * 63 * A bound pool is either associated or disassociated with its CPU. 64 * While associated (!DISASSOCIATED), all workers are bound to the 65 * CPU and none has %WORKER_UNBOUND set and concurrency management 66 * is in effect. 67 * 68 * While DISASSOCIATED, the cpu may be offline and all workers have 69 * %WORKER_UNBOUND set and concurrency management disabled, and may 70 * be executing on any CPU. The pool behaves as an unbound one. 71 * 72 * Note that DISASSOCIATED should be flipped only while holding 73 * wq_pool_attach_mutex to avoid changing binding state while 74 * worker_attach_to_pool() is in progress. 75 */ 76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 78 79 /* worker flags */ 80 WORKER_DIE = 1 << 1, /* die die die */ 81 WORKER_IDLE = 1 << 2, /* is idle */ 82 WORKER_PREP = 1 << 3, /* preparing to run works */ 83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 85 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 86 87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 88 WORKER_UNBOUND | WORKER_REBOUND, 89 90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 91 92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 94 95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 97 98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 99 /* call for help after 10ms 100 (min two ticks) */ 101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 102 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 103 104 /* 105 * Rescue workers are used only on emergencies and shared by 106 * all cpus. Give MIN_NICE. 107 */ 108 RESCUER_NICE_LEVEL = MIN_NICE, 109 HIGHPRI_NICE_LEVEL = MIN_NICE, 110 111 WQ_NAME_LEN = 24, 112 }; 113 114 /* 115 * Structure fields follow one of the following exclusion rules. 116 * 117 * I: Modifiable by initialization/destruction paths and read-only for 118 * everyone else. 119 * 120 * P: Preemption protected. Disabling preemption is enough and should 121 * only be modified and accessed from the local cpu. 122 * 123 * L: pool->lock protected. Access with pool->lock held. 124 * 125 * K: Only modified by worker while holding pool->lock. Can be safely read by 126 * self, while holding pool->lock or from IRQ context if %current is the 127 * kworker. 128 * 129 * S: Only modified by worker self. 130 * 131 * A: wq_pool_attach_mutex protected. 132 * 133 * PL: wq_pool_mutex protected. 134 * 135 * PR: wq_pool_mutex protected for writes. RCU protected for reads. 136 * 137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 138 * 139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 140 * RCU for reads. 141 * 142 * WQ: wq->mutex protected. 143 * 144 * WR: wq->mutex protected for writes. RCU protected for reads. 145 * 146 * MD: wq_mayday_lock protected. 147 * 148 * WD: Used internally by the watchdog. 149 */ 150 151 /* struct worker is defined in workqueue_internal.h */ 152 153 struct worker_pool { 154 raw_spinlock_t lock; /* the pool lock */ 155 int cpu; /* I: the associated cpu */ 156 int node; /* I: the associated node ID */ 157 int id; /* I: pool ID */ 158 unsigned int flags; /* L: flags */ 159 160 unsigned long watchdog_ts; /* L: watchdog timestamp */ 161 bool cpu_stall; /* WD: stalled cpu bound pool */ 162 163 /* 164 * The counter is incremented in a process context on the associated CPU 165 * w/ preemption disabled, and decremented or reset in the same context 166 * but w/ pool->lock held. The readers grab pool->lock and are 167 * guaranteed to see if the counter reached zero. 168 */ 169 int nr_running; 170 171 struct list_head worklist; /* L: list of pending works */ 172 173 int nr_workers; /* L: total number of workers */ 174 int nr_idle; /* L: currently idle workers */ 175 176 struct list_head idle_list; /* L: list of idle workers */ 177 struct timer_list idle_timer; /* L: worker idle timeout */ 178 struct work_struct idle_cull_work; /* L: worker idle cleanup */ 179 180 struct timer_list mayday_timer; /* L: SOS timer for workers */ 181 182 /* a workers is either on busy_hash or idle_list, or the manager */ 183 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 184 /* L: hash of busy workers */ 185 186 struct worker *manager; /* L: purely informational */ 187 struct list_head workers; /* A: attached workers */ 188 struct list_head dying_workers; /* A: workers about to die */ 189 struct completion *detach_completion; /* all workers detached */ 190 191 struct ida worker_ida; /* worker IDs for task name */ 192 193 struct workqueue_attrs *attrs; /* I: worker attributes */ 194 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 195 int refcnt; /* PL: refcnt for unbound pools */ 196 197 /* 198 * Destruction of pool is RCU protected to allow dereferences 199 * from get_work_pool(). 200 */ 201 struct rcu_head rcu; 202 }; 203 204 /* 205 * Per-pool_workqueue statistics. These can be monitored using 206 * tools/workqueue/wq_monitor.py. 207 */ 208 enum pool_workqueue_stats { 209 PWQ_STAT_STARTED, /* work items started execution */ 210 PWQ_STAT_COMPLETED, /* work items completed execution */ 211 PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 212 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 213 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 214 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 215 PWQ_STAT_MAYDAY, /* maydays to rescuer */ 216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 217 218 PWQ_NR_STATS, 219 }; 220 221 /* 222 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 223 * of work_struct->data are used for flags and the remaining high bits 224 * point to the pwq; thus, pwqs need to be aligned at two's power of the 225 * number of flag bits. 226 */ 227 struct pool_workqueue { 228 struct worker_pool *pool; /* I: the associated pool */ 229 struct workqueue_struct *wq; /* I: the owning workqueue */ 230 int work_color; /* L: current color */ 231 int flush_color; /* L: flushing color */ 232 int refcnt; /* L: reference count */ 233 int nr_in_flight[WORK_NR_COLORS]; 234 /* L: nr of in_flight works */ 235 236 /* 237 * nr_active management and WORK_STRUCT_INACTIVE: 238 * 239 * When pwq->nr_active >= max_active, new work item is queued to 240 * pwq->inactive_works instead of pool->worklist and marked with 241 * WORK_STRUCT_INACTIVE. 242 * 243 * All work items marked with WORK_STRUCT_INACTIVE do not participate 244 * in pwq->nr_active and all work items in pwq->inactive_works are 245 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE 246 * work items are in pwq->inactive_works. Some of them are ready to 247 * run in pool->worklist or worker->scheduled. Those work itmes are 248 * only struct wq_barrier which is used for flush_work() and should 249 * not participate in pwq->nr_active. For non-barrier work item, it 250 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 251 */ 252 int nr_active; /* L: nr of active works */ 253 int max_active; /* L: max active works */ 254 struct list_head inactive_works; /* L: inactive works */ 255 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 256 struct list_head mayday_node; /* MD: node on wq->maydays */ 257 258 u64 stats[PWQ_NR_STATS]; 259 260 /* 261 * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 262 * and pwq_release_workfn() for details. pool_workqueue itself is also 263 * RCU protected so that the first pwq can be determined without 264 * grabbing wq->mutex. 265 */ 266 struct kthread_work release_work; 267 struct rcu_head rcu; 268 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 269 270 /* 271 * Structure used to wait for workqueue flush. 272 */ 273 struct wq_flusher { 274 struct list_head list; /* WQ: list of flushers */ 275 int flush_color; /* WQ: flush color waiting for */ 276 struct completion done; /* flush completion */ 277 }; 278 279 struct wq_device; 280 281 /* 282 * The externally visible workqueue. It relays the issued work items to 283 * the appropriate worker_pool through its pool_workqueues. 284 */ 285 struct workqueue_struct { 286 struct list_head pwqs; /* WR: all pwqs of this wq */ 287 struct list_head list; /* PR: list of all workqueues */ 288 289 struct mutex mutex; /* protects this wq */ 290 int work_color; /* WQ: current work color */ 291 int flush_color; /* WQ: current flush color */ 292 atomic_t nr_pwqs_to_flush; /* flush in progress */ 293 struct wq_flusher *first_flusher; /* WQ: first flusher */ 294 struct list_head flusher_queue; /* WQ: flush waiters */ 295 struct list_head flusher_overflow; /* WQ: flush overflow list */ 296 297 struct list_head maydays; /* MD: pwqs requesting rescue */ 298 struct worker *rescuer; /* MD: rescue worker */ 299 300 int nr_drainers; /* WQ: drain in progress */ 301 int saved_max_active; /* WQ: saved pwq max_active */ 302 303 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 304 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ 305 306 #ifdef CONFIG_SYSFS 307 struct wq_device *wq_dev; /* I: for sysfs interface */ 308 #endif 309 #ifdef CONFIG_LOCKDEP 310 char *lock_name; 311 struct lock_class_key key; 312 struct lockdep_map lockdep_map; 313 #endif 314 char name[WQ_NAME_LEN]; /* I: workqueue name */ 315 316 /* 317 * Destruction of workqueue_struct is RCU protected to allow walking 318 * the workqueues list without grabbing wq_pool_mutex. 319 * This is used to dump all workqueues from sysrq. 320 */ 321 struct rcu_head rcu; 322 323 /* hot fields used during command issue, aligned to cacheline */ 324 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 325 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 326 }; 327 328 static struct kmem_cache *pwq_cache; 329 330 /* 331 * Each pod type describes how CPUs should be grouped for unbound workqueues. 332 * See the comment above workqueue_attrs->affn_scope. 333 */ 334 struct wq_pod_type { 335 int nr_pods; /* number of pods */ 336 cpumask_var_t *pod_cpus; /* pod -> cpus */ 337 int *pod_node; /* pod -> node */ 338 int *cpu_pod; /* cpu -> pod */ 339 }; 340 341 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 342 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; 343 344 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 345 [WQ_AFFN_DFL] = "default", 346 [WQ_AFFN_CPU] = "cpu", 347 [WQ_AFFN_SMT] = "smt", 348 [WQ_AFFN_CACHE] = "cache", 349 [WQ_AFFN_NUMA] = "numa", 350 [WQ_AFFN_SYSTEM] = "system", 351 }; 352 353 /* 354 * Per-cpu work items which run for longer than the following threshold are 355 * automatically considered CPU intensive and excluded from concurrency 356 * management to prevent them from noticeably delaying other per-cpu work items. 357 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 358 * The actual value is initialized in wq_cpu_intensive_thresh_init(). 359 */ 360 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 361 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 362 363 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 364 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 365 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 366 367 static bool wq_online; /* can kworkers be created yet? */ 368 369 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 370 static struct workqueue_attrs *wq_update_pod_attrs_buf; 371 372 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 373 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 374 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 375 /* wait for manager to go away */ 376 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 377 378 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 379 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 380 381 /* PL&A: allowable cpus for unbound wqs and work items */ 382 static cpumask_var_t wq_unbound_cpumask; 383 384 /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 385 static struct cpumask wq_cmdline_cpumask __initdata; 386 387 /* CPU where unbound work was last round robin scheduled from this CPU */ 388 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 389 390 /* 391 * Local execution of unbound work items is no longer guaranteed. The 392 * following always forces round-robin CPU selection on unbound work items 393 * to uncover usages which depend on it. 394 */ 395 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 396 static bool wq_debug_force_rr_cpu = true; 397 #else 398 static bool wq_debug_force_rr_cpu = false; 399 #endif 400 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 401 402 /* the per-cpu worker pools */ 403 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 404 405 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 406 407 /* PL: hash of all unbound pools keyed by pool->attrs */ 408 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 409 410 /* I: attributes used when instantiating standard unbound pools on demand */ 411 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 412 413 /* I: attributes used when instantiating ordered pools on demand */ 414 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 415 416 /* 417 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 418 * process context while holding a pool lock. Bounce to a dedicated kthread 419 * worker to avoid A-A deadlocks. 420 */ 421 static struct kthread_worker *pwq_release_worker; 422 423 struct workqueue_struct *system_wq __read_mostly; 424 EXPORT_SYMBOL(system_wq); 425 struct workqueue_struct *system_highpri_wq __read_mostly; 426 EXPORT_SYMBOL_GPL(system_highpri_wq); 427 struct workqueue_struct *system_long_wq __read_mostly; 428 EXPORT_SYMBOL_GPL(system_long_wq); 429 struct workqueue_struct *system_unbound_wq __read_mostly; 430 EXPORT_SYMBOL_GPL(system_unbound_wq); 431 struct workqueue_struct *system_freezable_wq __read_mostly; 432 EXPORT_SYMBOL_GPL(system_freezable_wq); 433 struct workqueue_struct *system_power_efficient_wq __read_mostly; 434 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 435 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 436 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 437 438 static int worker_thread(void *__worker); 439 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 440 static void show_pwq(struct pool_workqueue *pwq); 441 static void show_one_worker_pool(struct worker_pool *pool); 442 443 #define CREATE_TRACE_POINTS 444 #include <trace/events/workqueue.h> 445 446 #define assert_rcu_or_pool_mutex() \ 447 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 448 !lockdep_is_held(&wq_pool_mutex), \ 449 "RCU or wq_pool_mutex should be held") 450 451 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 452 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 453 !lockdep_is_held(&wq->mutex) && \ 454 !lockdep_is_held(&wq_pool_mutex), \ 455 "RCU, wq->mutex or wq_pool_mutex should be held") 456 457 #define for_each_cpu_worker_pool(pool, cpu) \ 458 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 459 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 460 (pool)++) 461 462 /** 463 * for_each_pool - iterate through all worker_pools in the system 464 * @pool: iteration cursor 465 * @pi: integer used for iteration 466 * 467 * This must be called either with wq_pool_mutex held or RCU read 468 * locked. If the pool needs to be used beyond the locking in effect, the 469 * caller is responsible for guaranteeing that the pool stays online. 470 * 471 * The if/else clause exists only for the lockdep assertion and can be 472 * ignored. 473 */ 474 #define for_each_pool(pool, pi) \ 475 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 476 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 477 else 478 479 /** 480 * for_each_pool_worker - iterate through all workers of a worker_pool 481 * @worker: iteration cursor 482 * @pool: worker_pool to iterate workers of 483 * 484 * This must be called with wq_pool_attach_mutex. 485 * 486 * The if/else clause exists only for the lockdep assertion and can be 487 * ignored. 488 */ 489 #define for_each_pool_worker(worker, pool) \ 490 list_for_each_entry((worker), &(pool)->workers, node) \ 491 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 492 else 493 494 /** 495 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 496 * @pwq: iteration cursor 497 * @wq: the target workqueue 498 * 499 * This must be called either with wq->mutex held or RCU read locked. 500 * If the pwq needs to be used beyond the locking in effect, the caller is 501 * responsible for guaranteeing that the pwq stays online. 502 * 503 * The if/else clause exists only for the lockdep assertion and can be 504 * ignored. 505 */ 506 #define for_each_pwq(pwq, wq) \ 507 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 508 lockdep_is_held(&(wq->mutex))) 509 510 #ifdef CONFIG_DEBUG_OBJECTS_WORK 511 512 static const struct debug_obj_descr work_debug_descr; 513 514 static void *work_debug_hint(void *addr) 515 { 516 return ((struct work_struct *) addr)->func; 517 } 518 519 static bool work_is_static_object(void *addr) 520 { 521 struct work_struct *work = addr; 522 523 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 524 } 525 526 /* 527 * fixup_init is called when: 528 * - an active object is initialized 529 */ 530 static bool work_fixup_init(void *addr, enum debug_obj_state state) 531 { 532 struct work_struct *work = addr; 533 534 switch (state) { 535 case ODEBUG_STATE_ACTIVE: 536 cancel_work_sync(work); 537 debug_object_init(work, &work_debug_descr); 538 return true; 539 default: 540 return false; 541 } 542 } 543 544 /* 545 * fixup_free is called when: 546 * - an active object is freed 547 */ 548 static bool work_fixup_free(void *addr, enum debug_obj_state state) 549 { 550 struct work_struct *work = addr; 551 552 switch (state) { 553 case ODEBUG_STATE_ACTIVE: 554 cancel_work_sync(work); 555 debug_object_free(work, &work_debug_descr); 556 return true; 557 default: 558 return false; 559 } 560 } 561 562 static const struct debug_obj_descr work_debug_descr = { 563 .name = "work_struct", 564 .debug_hint = work_debug_hint, 565 .is_static_object = work_is_static_object, 566 .fixup_init = work_fixup_init, 567 .fixup_free = work_fixup_free, 568 }; 569 570 static inline void debug_work_activate(struct work_struct *work) 571 { 572 debug_object_activate(work, &work_debug_descr); 573 } 574 575 static inline void debug_work_deactivate(struct work_struct *work) 576 { 577 debug_object_deactivate(work, &work_debug_descr); 578 } 579 580 void __init_work(struct work_struct *work, int onstack) 581 { 582 if (onstack) 583 debug_object_init_on_stack(work, &work_debug_descr); 584 else 585 debug_object_init(work, &work_debug_descr); 586 } 587 EXPORT_SYMBOL_GPL(__init_work); 588 589 void destroy_work_on_stack(struct work_struct *work) 590 { 591 debug_object_free(work, &work_debug_descr); 592 } 593 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 594 595 void destroy_delayed_work_on_stack(struct delayed_work *work) 596 { 597 destroy_timer_on_stack(&work->timer); 598 debug_object_free(&work->work, &work_debug_descr); 599 } 600 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 601 602 #else 603 static inline void debug_work_activate(struct work_struct *work) { } 604 static inline void debug_work_deactivate(struct work_struct *work) { } 605 #endif 606 607 /** 608 * worker_pool_assign_id - allocate ID and assign it to @pool 609 * @pool: the pool pointer of interest 610 * 611 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 612 * successfully, -errno on failure. 613 */ 614 static int worker_pool_assign_id(struct worker_pool *pool) 615 { 616 int ret; 617 618 lockdep_assert_held(&wq_pool_mutex); 619 620 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 621 GFP_KERNEL); 622 if (ret >= 0) { 623 pool->id = ret; 624 return 0; 625 } 626 return ret; 627 } 628 629 static unsigned int work_color_to_flags(int color) 630 { 631 return color << WORK_STRUCT_COLOR_SHIFT; 632 } 633 634 static int get_work_color(unsigned long work_data) 635 { 636 return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 637 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 638 } 639 640 static int work_next_color(int color) 641 { 642 return (color + 1) % WORK_NR_COLORS; 643 } 644 645 /* 646 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 647 * contain the pointer to the queued pwq. Once execution starts, the flag 648 * is cleared and the high bits contain OFFQ flags and pool ID. 649 * 650 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 651 * and clear_work_data() can be used to set the pwq, pool or clear 652 * work->data. These functions should only be called while the work is 653 * owned - ie. while the PENDING bit is set. 654 * 655 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 656 * corresponding to a work. Pool is available once the work has been 657 * queued anywhere after initialization until it is sync canceled. pwq is 658 * available only while the work item is queued. 659 * 660 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 661 * canceled. While being canceled, a work item may have its PENDING set 662 * but stay off timer and worklist for arbitrarily long and nobody should 663 * try to steal the PENDING bit. 664 */ 665 static inline void set_work_data(struct work_struct *work, unsigned long data, 666 unsigned long flags) 667 { 668 WARN_ON_ONCE(!work_pending(work)); 669 atomic_long_set(&work->data, data | flags | work_static(work)); 670 } 671 672 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 673 unsigned long extra_flags) 674 { 675 set_work_data(work, (unsigned long)pwq, 676 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 677 } 678 679 static void set_work_pool_and_keep_pending(struct work_struct *work, 680 int pool_id) 681 { 682 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 683 WORK_STRUCT_PENDING); 684 } 685 686 static void set_work_pool_and_clear_pending(struct work_struct *work, 687 int pool_id) 688 { 689 /* 690 * The following wmb is paired with the implied mb in 691 * test_and_set_bit(PENDING) and ensures all updates to @work made 692 * here are visible to and precede any updates by the next PENDING 693 * owner. 694 */ 695 smp_wmb(); 696 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 697 /* 698 * The following mb guarantees that previous clear of a PENDING bit 699 * will not be reordered with any speculative LOADS or STORES from 700 * work->current_func, which is executed afterwards. This possible 701 * reordering can lead to a missed execution on attempt to queue 702 * the same @work. E.g. consider this case: 703 * 704 * CPU#0 CPU#1 705 * ---------------------------- -------------------------------- 706 * 707 * 1 STORE event_indicated 708 * 2 queue_work_on() { 709 * 3 test_and_set_bit(PENDING) 710 * 4 } set_..._and_clear_pending() { 711 * 5 set_work_data() # clear bit 712 * 6 smp_mb() 713 * 7 work->current_func() { 714 * 8 LOAD event_indicated 715 * } 716 * 717 * Without an explicit full barrier speculative LOAD on line 8 can 718 * be executed before CPU#0 does STORE on line 1. If that happens, 719 * CPU#0 observes the PENDING bit is still set and new execution of 720 * a @work is not queued in a hope, that CPU#1 will eventually 721 * finish the queued @work. Meanwhile CPU#1 does not see 722 * event_indicated is set, because speculative LOAD was executed 723 * before actual STORE. 724 */ 725 smp_mb(); 726 } 727 728 static void clear_work_data(struct work_struct *work) 729 { 730 smp_wmb(); /* see set_work_pool_and_clear_pending() */ 731 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 732 } 733 734 static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 735 { 736 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); 737 } 738 739 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 740 { 741 unsigned long data = atomic_long_read(&work->data); 742 743 if (data & WORK_STRUCT_PWQ) 744 return work_struct_pwq(data); 745 else 746 return NULL; 747 } 748 749 /** 750 * get_work_pool - return the worker_pool a given work was associated with 751 * @work: the work item of interest 752 * 753 * Pools are created and destroyed under wq_pool_mutex, and allows read 754 * access under RCU read lock. As such, this function should be 755 * called under wq_pool_mutex or inside of a rcu_read_lock() region. 756 * 757 * All fields of the returned pool are accessible as long as the above 758 * mentioned locking is in effect. If the returned pool needs to be used 759 * beyond the critical section, the caller is responsible for ensuring the 760 * returned pool is and stays online. 761 * 762 * Return: The worker_pool @work was last associated with. %NULL if none. 763 */ 764 static struct worker_pool *get_work_pool(struct work_struct *work) 765 { 766 unsigned long data = atomic_long_read(&work->data); 767 int pool_id; 768 769 assert_rcu_or_pool_mutex(); 770 771 if (data & WORK_STRUCT_PWQ) 772 return work_struct_pwq(data)->pool; 773 774 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 775 if (pool_id == WORK_OFFQ_POOL_NONE) 776 return NULL; 777 778 return idr_find(&worker_pool_idr, pool_id); 779 } 780 781 /** 782 * get_work_pool_id - return the worker pool ID a given work is associated with 783 * @work: the work item of interest 784 * 785 * Return: The worker_pool ID @work was last associated with. 786 * %WORK_OFFQ_POOL_NONE if none. 787 */ 788 static int get_work_pool_id(struct work_struct *work) 789 { 790 unsigned long data = atomic_long_read(&work->data); 791 792 if (data & WORK_STRUCT_PWQ) 793 return work_struct_pwq(data)->pool->id; 794 795 return data >> WORK_OFFQ_POOL_SHIFT; 796 } 797 798 static void mark_work_canceling(struct work_struct *work) 799 { 800 unsigned long pool_id = get_work_pool_id(work); 801 802 pool_id <<= WORK_OFFQ_POOL_SHIFT; 803 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 804 } 805 806 static bool work_is_canceling(struct work_struct *work) 807 { 808 unsigned long data = atomic_long_read(&work->data); 809 810 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 811 } 812 813 /* 814 * Policy functions. These define the policies on how the global worker 815 * pools are managed. Unless noted otherwise, these functions assume that 816 * they're being called with pool->lock held. 817 */ 818 819 /* 820 * Need to wake up a worker? Called from anything but currently 821 * running workers. 822 * 823 * Note that, because unbound workers never contribute to nr_running, this 824 * function will always return %true for unbound pools as long as the 825 * worklist isn't empty. 826 */ 827 static bool need_more_worker(struct worker_pool *pool) 828 { 829 return !list_empty(&pool->worklist) && !pool->nr_running; 830 } 831 832 /* Can I start working? Called from busy but !running workers. */ 833 static bool may_start_working(struct worker_pool *pool) 834 { 835 return pool->nr_idle; 836 } 837 838 /* Do I need to keep working? Called from currently running workers. */ 839 static bool keep_working(struct worker_pool *pool) 840 { 841 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 842 } 843 844 /* Do we need a new worker? Called from manager. */ 845 static bool need_to_create_worker(struct worker_pool *pool) 846 { 847 return need_more_worker(pool) && !may_start_working(pool); 848 } 849 850 /* Do we have too many workers and should some go away? */ 851 static bool too_many_workers(struct worker_pool *pool) 852 { 853 bool managing = pool->flags & POOL_MANAGER_ACTIVE; 854 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 855 int nr_busy = pool->nr_workers - nr_idle; 856 857 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 858 } 859 860 /** 861 * worker_set_flags - set worker flags and adjust nr_running accordingly 862 * @worker: self 863 * @flags: flags to set 864 * 865 * Set @flags in @worker->flags and adjust nr_running accordingly. 866 */ 867 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 868 { 869 struct worker_pool *pool = worker->pool; 870 871 lockdep_assert_held(&pool->lock); 872 873 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 874 if ((flags & WORKER_NOT_RUNNING) && 875 !(worker->flags & WORKER_NOT_RUNNING)) { 876 pool->nr_running--; 877 } 878 879 worker->flags |= flags; 880 } 881 882 /** 883 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 884 * @worker: self 885 * @flags: flags to clear 886 * 887 * Clear @flags in @worker->flags and adjust nr_running accordingly. 888 */ 889 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 890 { 891 struct worker_pool *pool = worker->pool; 892 unsigned int oflags = worker->flags; 893 894 lockdep_assert_held(&pool->lock); 895 896 worker->flags &= ~flags; 897 898 /* 899 * If transitioning out of NOT_RUNNING, increment nr_running. Note 900 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 901 * of multiple flags, not a single flag. 902 */ 903 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 904 if (!(worker->flags & WORKER_NOT_RUNNING)) 905 pool->nr_running++; 906 } 907 908 /* Return the first idle worker. Called with pool->lock held. */ 909 static struct worker *first_idle_worker(struct worker_pool *pool) 910 { 911 if (unlikely(list_empty(&pool->idle_list))) 912 return NULL; 913 914 return list_first_entry(&pool->idle_list, struct worker, entry); 915 } 916 917 /** 918 * worker_enter_idle - enter idle state 919 * @worker: worker which is entering idle state 920 * 921 * @worker is entering idle state. Update stats and idle timer if 922 * necessary. 923 * 924 * LOCKING: 925 * raw_spin_lock_irq(pool->lock). 926 */ 927 static void worker_enter_idle(struct worker *worker) 928 { 929 struct worker_pool *pool = worker->pool; 930 931 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 932 WARN_ON_ONCE(!list_empty(&worker->entry) && 933 (worker->hentry.next || worker->hentry.pprev))) 934 return; 935 936 /* can't use worker_set_flags(), also called from create_worker() */ 937 worker->flags |= WORKER_IDLE; 938 pool->nr_idle++; 939 worker->last_active = jiffies; 940 941 /* idle_list is LIFO */ 942 list_add(&worker->entry, &pool->idle_list); 943 944 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 945 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 946 947 /* Sanity check nr_running. */ 948 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 949 } 950 951 /** 952 * worker_leave_idle - leave idle state 953 * @worker: worker which is leaving idle state 954 * 955 * @worker is leaving idle state. Update stats. 956 * 957 * LOCKING: 958 * raw_spin_lock_irq(pool->lock). 959 */ 960 static void worker_leave_idle(struct worker *worker) 961 { 962 struct worker_pool *pool = worker->pool; 963 964 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 965 return; 966 worker_clr_flags(worker, WORKER_IDLE); 967 pool->nr_idle--; 968 list_del_init(&worker->entry); 969 } 970 971 /** 972 * find_worker_executing_work - find worker which is executing a work 973 * @pool: pool of interest 974 * @work: work to find worker for 975 * 976 * Find a worker which is executing @work on @pool by searching 977 * @pool->busy_hash which is keyed by the address of @work. For a worker 978 * to match, its current execution should match the address of @work and 979 * its work function. This is to avoid unwanted dependency between 980 * unrelated work executions through a work item being recycled while still 981 * being executed. 982 * 983 * This is a bit tricky. A work item may be freed once its execution 984 * starts and nothing prevents the freed area from being recycled for 985 * another work item. If the same work item address ends up being reused 986 * before the original execution finishes, workqueue will identify the 987 * recycled work item as currently executing and make it wait until the 988 * current execution finishes, introducing an unwanted dependency. 989 * 990 * This function checks the work item address and work function to avoid 991 * false positives. Note that this isn't complete as one may construct a 992 * work function which can introduce dependency onto itself through a 993 * recycled work item. Well, if somebody wants to shoot oneself in the 994 * foot that badly, there's only so much we can do, and if such deadlock 995 * actually occurs, it should be easy to locate the culprit work function. 996 * 997 * CONTEXT: 998 * raw_spin_lock_irq(pool->lock). 999 * 1000 * Return: 1001 * Pointer to worker which is executing @work if found, %NULL 1002 * otherwise. 1003 */ 1004 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1005 struct work_struct *work) 1006 { 1007 struct worker *worker; 1008 1009 hash_for_each_possible(pool->busy_hash, worker, hentry, 1010 (unsigned long)work) 1011 if (worker->current_work == work && 1012 worker->current_func == work->func) 1013 return worker; 1014 1015 return NULL; 1016 } 1017 1018 /** 1019 * move_linked_works - move linked works to a list 1020 * @work: start of series of works to be scheduled 1021 * @head: target list to append @work to 1022 * @nextp: out parameter for nested worklist walking 1023 * 1024 * Schedule linked works starting from @work to @head. Work series to be 1025 * scheduled starts at @work and includes any consecutive work with 1026 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1027 * @nextp. 1028 * 1029 * CONTEXT: 1030 * raw_spin_lock_irq(pool->lock). 1031 */ 1032 static void move_linked_works(struct work_struct *work, struct list_head *head, 1033 struct work_struct **nextp) 1034 { 1035 struct work_struct *n; 1036 1037 /* 1038 * Linked worklist will always end before the end of the list, 1039 * use NULL for list head. 1040 */ 1041 list_for_each_entry_safe_from(work, n, NULL, entry) { 1042 list_move_tail(&work->entry, head); 1043 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1044 break; 1045 } 1046 1047 /* 1048 * If we're already inside safe list traversal and have moved 1049 * multiple works to the scheduled queue, the next position 1050 * needs to be updated. 1051 */ 1052 if (nextp) 1053 *nextp = n; 1054 } 1055 1056 /** 1057 * assign_work - assign a work item and its linked work items to a worker 1058 * @work: work to assign 1059 * @worker: worker to assign to 1060 * @nextp: out parameter for nested worklist walking 1061 * 1062 * Assign @work and its linked work items to @worker. If @work is already being 1063 * executed by another worker in the same pool, it'll be punted there. 1064 * 1065 * If @nextp is not NULL, it's updated to point to the next work of the last 1066 * scheduled work. This allows assign_work() to be nested inside 1067 * list_for_each_entry_safe(). 1068 * 1069 * Returns %true if @work was successfully assigned to @worker. %false if @work 1070 * was punted to another worker already executing it. 1071 */ 1072 static bool assign_work(struct work_struct *work, struct worker *worker, 1073 struct work_struct **nextp) 1074 { 1075 struct worker_pool *pool = worker->pool; 1076 struct worker *collision; 1077 1078 lockdep_assert_held(&pool->lock); 1079 1080 /* 1081 * A single work shouldn't be executed concurrently by multiple workers. 1082 * __queue_work() ensures that @work doesn't jump to a different pool 1083 * while still running in the previous pool. Here, we should ensure that 1084 * @work is not executed concurrently by multiple workers from the same 1085 * pool. Check whether anyone is already processing the work. If so, 1086 * defer the work to the currently executing one. 1087 */ 1088 collision = find_worker_executing_work(pool, work); 1089 if (unlikely(collision)) { 1090 move_linked_works(work, &collision->scheduled, nextp); 1091 return false; 1092 } 1093 1094 move_linked_works(work, &worker->scheduled, nextp); 1095 return true; 1096 } 1097 1098 /** 1099 * kick_pool - wake up an idle worker if necessary 1100 * @pool: pool to kick 1101 * 1102 * @pool may have pending work items. Wake up worker if necessary. Returns 1103 * whether a worker was woken up. 1104 */ 1105 static bool kick_pool(struct worker_pool *pool) 1106 { 1107 struct worker *worker = first_idle_worker(pool); 1108 struct task_struct *p; 1109 1110 lockdep_assert_held(&pool->lock); 1111 1112 if (!need_more_worker(pool) || !worker) 1113 return false; 1114 1115 p = worker->task; 1116 1117 #ifdef CONFIG_SMP 1118 /* 1119 * Idle @worker is about to execute @work and waking up provides an 1120 * opportunity to migrate @worker at a lower cost by setting the task's 1121 * wake_cpu field. Let's see if we want to move @worker to improve 1122 * execution locality. 1123 * 1124 * We're waking the worker that went idle the latest and there's some 1125 * chance that @worker is marked idle but hasn't gone off CPU yet. If 1126 * so, setting the wake_cpu won't do anything. As this is a best-effort 1127 * optimization and the race window is narrow, let's leave as-is for 1128 * now. If this becomes pronounced, we can skip over workers which are 1129 * still on cpu when picking an idle worker. 1130 * 1131 * If @pool has non-strict affinity, @worker might have ended up outside 1132 * its affinity scope. Repatriate. 1133 */ 1134 if (!pool->attrs->affn_strict && 1135 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 1136 struct work_struct *work = list_first_entry(&pool->worklist, 1137 struct work_struct, entry); 1138 p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); 1139 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 1140 } 1141 #endif 1142 wake_up_process(p); 1143 return true; 1144 } 1145 1146 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 1147 1148 /* 1149 * Concurrency-managed per-cpu work items that hog CPU for longer than 1150 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 1151 * which prevents them from stalling other concurrency-managed work items. If a 1152 * work function keeps triggering this mechanism, it's likely that the work item 1153 * should be using an unbound workqueue instead. 1154 * 1155 * wq_cpu_intensive_report() tracks work functions which trigger such conditions 1156 * and report them so that they can be examined and converted to use unbound 1157 * workqueues as appropriate. To avoid flooding the console, each violating work 1158 * function is tracked and reported with exponential backoff. 1159 */ 1160 #define WCI_MAX_ENTS 128 1161 1162 struct wci_ent { 1163 work_func_t func; 1164 atomic64_t cnt; 1165 struct hlist_node hash_node; 1166 }; 1167 1168 static struct wci_ent wci_ents[WCI_MAX_ENTS]; 1169 static int wci_nr_ents; 1170 static DEFINE_RAW_SPINLOCK(wci_lock); 1171 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 1172 1173 static struct wci_ent *wci_find_ent(work_func_t func) 1174 { 1175 struct wci_ent *ent; 1176 1177 hash_for_each_possible_rcu(wci_hash, ent, hash_node, 1178 (unsigned long)func) { 1179 if (ent->func == func) 1180 return ent; 1181 } 1182 return NULL; 1183 } 1184 1185 static void wq_cpu_intensive_report(work_func_t func) 1186 { 1187 struct wci_ent *ent; 1188 1189 restart: 1190 ent = wci_find_ent(func); 1191 if (ent) { 1192 u64 cnt; 1193 1194 /* 1195 * Start reporting from the fourth time and back off 1196 * exponentially. 1197 */ 1198 cnt = atomic64_inc_return_relaxed(&ent->cnt); 1199 if (cnt >= 4 && is_power_of_2(cnt)) 1200 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 1201 ent->func, wq_cpu_intensive_thresh_us, 1202 atomic64_read(&ent->cnt)); 1203 return; 1204 } 1205 1206 /* 1207 * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 1208 * is exhausted, something went really wrong and we probably made enough 1209 * noise already. 1210 */ 1211 if (wci_nr_ents >= WCI_MAX_ENTS) 1212 return; 1213 1214 raw_spin_lock(&wci_lock); 1215 1216 if (wci_nr_ents >= WCI_MAX_ENTS) { 1217 raw_spin_unlock(&wci_lock); 1218 return; 1219 } 1220 1221 if (wci_find_ent(func)) { 1222 raw_spin_unlock(&wci_lock); 1223 goto restart; 1224 } 1225 1226 ent = &wci_ents[wci_nr_ents++]; 1227 ent->func = func; 1228 atomic64_set(&ent->cnt, 1); 1229 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 1230 1231 raw_spin_unlock(&wci_lock); 1232 } 1233 1234 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1235 static void wq_cpu_intensive_report(work_func_t func) {} 1236 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1237 1238 /** 1239 * wq_worker_running - a worker is running again 1240 * @task: task waking up 1241 * 1242 * This function is called when a worker returns from schedule() 1243 */ 1244 void wq_worker_running(struct task_struct *task) 1245 { 1246 struct worker *worker = kthread_data(task); 1247 1248 if (!READ_ONCE(worker->sleeping)) 1249 return; 1250 1251 /* 1252 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 1253 * and the nr_running increment below, we may ruin the nr_running reset 1254 * and leave with an unexpected pool->nr_running == 1 on the newly unbound 1255 * pool. Protect against such race. 1256 */ 1257 preempt_disable(); 1258 if (!(worker->flags & WORKER_NOT_RUNNING)) 1259 worker->pool->nr_running++; 1260 preempt_enable(); 1261 1262 /* 1263 * CPU intensive auto-detection cares about how long a work item hogged 1264 * CPU without sleeping. Reset the starting timestamp on wakeup. 1265 */ 1266 worker->current_at = worker->task->se.sum_exec_runtime; 1267 1268 WRITE_ONCE(worker->sleeping, 0); 1269 } 1270 1271 /** 1272 * wq_worker_sleeping - a worker is going to sleep 1273 * @task: task going to sleep 1274 * 1275 * This function is called from schedule() when a busy worker is 1276 * going to sleep. 1277 */ 1278 void wq_worker_sleeping(struct task_struct *task) 1279 { 1280 struct worker *worker = kthread_data(task); 1281 struct worker_pool *pool; 1282 1283 /* 1284 * Rescuers, which may not have all the fields set up like normal 1285 * workers, also reach here, let's not access anything before 1286 * checking NOT_RUNNING. 1287 */ 1288 if (worker->flags & WORKER_NOT_RUNNING) 1289 return; 1290 1291 pool = worker->pool; 1292 1293 /* Return if preempted before wq_worker_running() was reached */ 1294 if (READ_ONCE(worker->sleeping)) 1295 return; 1296 1297 WRITE_ONCE(worker->sleeping, 1); 1298 raw_spin_lock_irq(&pool->lock); 1299 1300 /* 1301 * Recheck in case unbind_workers() preempted us. We don't 1302 * want to decrement nr_running after the worker is unbound 1303 * and nr_running has been reset. 1304 */ 1305 if (worker->flags & WORKER_NOT_RUNNING) { 1306 raw_spin_unlock_irq(&pool->lock); 1307 return; 1308 } 1309 1310 pool->nr_running--; 1311 if (kick_pool(pool)) 1312 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1313 1314 raw_spin_unlock_irq(&pool->lock); 1315 } 1316 1317 /** 1318 * wq_worker_tick - a scheduler tick occurred while a kworker is running 1319 * @task: task currently running 1320 * 1321 * Called from scheduler_tick(). We're in the IRQ context and the current 1322 * worker's fields which follow the 'K' locking rule can be accessed safely. 1323 */ 1324 void wq_worker_tick(struct task_struct *task) 1325 { 1326 struct worker *worker = kthread_data(task); 1327 struct pool_workqueue *pwq = worker->current_pwq; 1328 struct worker_pool *pool = worker->pool; 1329 1330 if (!pwq) 1331 return; 1332 1333 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 1334 1335 if (!wq_cpu_intensive_thresh_us) 1336 return; 1337 1338 /* 1339 * If the current worker is concurrency managed and hogged the CPU for 1340 * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1341 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1342 * 1343 * Set @worker->sleeping means that @worker is in the process of 1344 * switching out voluntarily and won't be contributing to 1345 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1346 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1347 * double decrements. The task is releasing the CPU anyway. Let's skip. 1348 * We probably want to make this prettier in the future. 1349 */ 1350 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1351 worker->task->se.sum_exec_runtime - worker->current_at < 1352 wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1353 return; 1354 1355 raw_spin_lock(&pool->lock); 1356 1357 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 1358 wq_cpu_intensive_report(worker->current_func); 1359 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1360 1361 if (kick_pool(pool)) 1362 pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1363 1364 raw_spin_unlock(&pool->lock); 1365 } 1366 1367 /** 1368 * wq_worker_last_func - retrieve worker's last work function 1369 * @task: Task to retrieve last work function of. 1370 * 1371 * Determine the last function a worker executed. This is called from 1372 * the scheduler to get a worker's last known identity. 1373 * 1374 * CONTEXT: 1375 * raw_spin_lock_irq(rq->lock) 1376 * 1377 * This function is called during schedule() when a kworker is going 1378 * to sleep. It's used by psi to identify aggregation workers during 1379 * dequeuing, to allow periodic aggregation to shut-off when that 1380 * worker is the last task in the system or cgroup to go to sleep. 1381 * 1382 * As this function doesn't involve any workqueue-related locking, it 1383 * only returns stable values when called from inside the scheduler's 1384 * queuing and dequeuing paths, when @task, which must be a kworker, 1385 * is guaranteed to not be processing any works. 1386 * 1387 * Return: 1388 * The last work function %current executed as a worker, NULL if it 1389 * hasn't executed any work yet. 1390 */ 1391 work_func_t wq_worker_last_func(struct task_struct *task) 1392 { 1393 struct worker *worker = kthread_data(task); 1394 1395 return worker->last_func; 1396 } 1397 1398 /** 1399 * get_pwq - get an extra reference on the specified pool_workqueue 1400 * @pwq: pool_workqueue to get 1401 * 1402 * Obtain an extra reference on @pwq. The caller should guarantee that 1403 * @pwq has positive refcnt and be holding the matching pool->lock. 1404 */ 1405 static void get_pwq(struct pool_workqueue *pwq) 1406 { 1407 lockdep_assert_held(&pwq->pool->lock); 1408 WARN_ON_ONCE(pwq->refcnt <= 0); 1409 pwq->refcnt++; 1410 } 1411 1412 /** 1413 * put_pwq - put a pool_workqueue reference 1414 * @pwq: pool_workqueue to put 1415 * 1416 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1417 * destruction. The caller should be holding the matching pool->lock. 1418 */ 1419 static void put_pwq(struct pool_workqueue *pwq) 1420 { 1421 lockdep_assert_held(&pwq->pool->lock); 1422 if (likely(--pwq->refcnt)) 1423 return; 1424 /* 1425 * @pwq can't be released under pool->lock, bounce to a dedicated 1426 * kthread_worker to avoid A-A deadlocks. 1427 */ 1428 kthread_queue_work(pwq_release_worker, &pwq->release_work); 1429 } 1430 1431 /** 1432 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1433 * @pwq: pool_workqueue to put (can be %NULL) 1434 * 1435 * put_pwq() with locking. This function also allows %NULL @pwq. 1436 */ 1437 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1438 { 1439 if (pwq) { 1440 /* 1441 * As both pwqs and pools are RCU protected, the 1442 * following lock operations are safe. 1443 */ 1444 raw_spin_lock_irq(&pwq->pool->lock); 1445 put_pwq(pwq); 1446 raw_spin_unlock_irq(&pwq->pool->lock); 1447 } 1448 } 1449 1450 static void pwq_activate_inactive_work(struct work_struct *work) 1451 { 1452 struct pool_workqueue *pwq = get_work_pwq(work); 1453 1454 trace_workqueue_activate_work(work); 1455 if (list_empty(&pwq->pool->worklist)) 1456 pwq->pool->watchdog_ts = jiffies; 1457 move_linked_works(work, &pwq->pool->worklist, NULL); 1458 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); 1459 pwq->nr_active++; 1460 } 1461 1462 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) 1463 { 1464 struct work_struct *work = list_first_entry(&pwq->inactive_works, 1465 struct work_struct, entry); 1466 1467 pwq_activate_inactive_work(work); 1468 } 1469 1470 /** 1471 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1472 * @pwq: pwq of interest 1473 * @work_data: work_data of work which left the queue 1474 * 1475 * A work either has completed or is removed from pending queue, 1476 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1477 * 1478 * CONTEXT: 1479 * raw_spin_lock_irq(pool->lock). 1480 */ 1481 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 1482 { 1483 int color = get_work_color(work_data); 1484 1485 if (!(work_data & WORK_STRUCT_INACTIVE)) { 1486 pwq->nr_active--; 1487 if (!list_empty(&pwq->inactive_works)) { 1488 /* one down, submit an inactive one */ 1489 if (pwq->nr_active < pwq->max_active) 1490 pwq_activate_first_inactive(pwq); 1491 } 1492 } 1493 1494 pwq->nr_in_flight[color]--; 1495 1496 /* is flush in progress and are we at the flushing tip? */ 1497 if (likely(pwq->flush_color != color)) 1498 goto out_put; 1499 1500 /* are there still in-flight works? */ 1501 if (pwq->nr_in_flight[color]) 1502 goto out_put; 1503 1504 /* this pwq is done, clear flush_color */ 1505 pwq->flush_color = -1; 1506 1507 /* 1508 * If this was the last pwq, wake up the first flusher. It 1509 * will handle the rest. 1510 */ 1511 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1512 complete(&pwq->wq->first_flusher->done); 1513 out_put: 1514 put_pwq(pwq); 1515 } 1516 1517 /** 1518 * try_to_grab_pending - steal work item from worklist and disable irq 1519 * @work: work item to steal 1520 * @is_dwork: @work is a delayed_work 1521 * @flags: place to store irq state 1522 * 1523 * Try to grab PENDING bit of @work. This function can handle @work in any 1524 * stable state - idle, on timer or on worklist. 1525 * 1526 * Return: 1527 * 1528 * ======== ================================================================ 1529 * 1 if @work was pending and we successfully stole PENDING 1530 * 0 if @work was idle and we claimed PENDING 1531 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1532 * -ENOENT if someone else is canceling @work, this state may persist 1533 * for arbitrarily long 1534 * ======== ================================================================ 1535 * 1536 * Note: 1537 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1538 * interrupted while holding PENDING and @work off queue, irq must be 1539 * disabled on entry. This, combined with delayed_work->timer being 1540 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1541 * 1542 * On successful return, >= 0, irq is disabled and the caller is 1543 * responsible for releasing it using local_irq_restore(*@flags). 1544 * 1545 * This function is safe to call from any context including IRQ handler. 1546 */ 1547 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1548 unsigned long *flags) 1549 { 1550 struct worker_pool *pool; 1551 struct pool_workqueue *pwq; 1552 1553 local_irq_save(*flags); 1554 1555 /* try to steal the timer if it exists */ 1556 if (is_dwork) { 1557 struct delayed_work *dwork = to_delayed_work(work); 1558 1559 /* 1560 * dwork->timer is irqsafe. If del_timer() fails, it's 1561 * guaranteed that the timer is not queued anywhere and not 1562 * running on the local CPU. 1563 */ 1564 if (likely(del_timer(&dwork->timer))) 1565 return 1; 1566 } 1567 1568 /* try to claim PENDING the normal way */ 1569 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1570 return 0; 1571 1572 rcu_read_lock(); 1573 /* 1574 * The queueing is in progress, or it is already queued. Try to 1575 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1576 */ 1577 pool = get_work_pool(work); 1578 if (!pool) 1579 goto fail; 1580 1581 raw_spin_lock(&pool->lock); 1582 /* 1583 * work->data is guaranteed to point to pwq only while the work 1584 * item is queued on pwq->wq, and both updating work->data to point 1585 * to pwq on queueing and to pool on dequeueing are done under 1586 * pwq->pool->lock. This in turn guarantees that, if work->data 1587 * points to pwq which is associated with a locked pool, the work 1588 * item is currently queued on that pool. 1589 */ 1590 pwq = get_work_pwq(work); 1591 if (pwq && pwq->pool == pool) { 1592 debug_work_deactivate(work); 1593 1594 /* 1595 * A cancelable inactive work item must be in the 1596 * pwq->inactive_works since a queued barrier can't be 1597 * canceled (see the comments in insert_wq_barrier()). 1598 * 1599 * An inactive work item cannot be grabbed directly because 1600 * it might have linked barrier work items which, if left 1601 * on the inactive_works list, will confuse pwq->nr_active 1602 * management later on and cause stall. Make sure the work 1603 * item is activated before grabbing. 1604 */ 1605 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) 1606 pwq_activate_inactive_work(work); 1607 1608 list_del_init(&work->entry); 1609 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); 1610 1611 /* work->data points to pwq iff queued, point to pool */ 1612 set_work_pool_and_keep_pending(work, pool->id); 1613 1614 raw_spin_unlock(&pool->lock); 1615 rcu_read_unlock(); 1616 return 1; 1617 } 1618 raw_spin_unlock(&pool->lock); 1619 fail: 1620 rcu_read_unlock(); 1621 local_irq_restore(*flags); 1622 if (work_is_canceling(work)) 1623 return -ENOENT; 1624 cpu_relax(); 1625 return -EAGAIN; 1626 } 1627 1628 /** 1629 * insert_work - insert a work into a pool 1630 * @pwq: pwq @work belongs to 1631 * @work: work to insert 1632 * @head: insertion point 1633 * @extra_flags: extra WORK_STRUCT_* flags to set 1634 * 1635 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1636 * work_struct flags. 1637 * 1638 * CONTEXT: 1639 * raw_spin_lock_irq(pool->lock). 1640 */ 1641 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1642 struct list_head *head, unsigned int extra_flags) 1643 { 1644 debug_work_activate(work); 1645 1646 /* record the work call stack in order to print it in KASAN reports */ 1647 kasan_record_aux_stack_noalloc(work); 1648 1649 /* we own @work, set data and link */ 1650 set_work_pwq(work, pwq, extra_flags); 1651 list_add_tail(&work->entry, head); 1652 get_pwq(pwq); 1653 } 1654 1655 /* 1656 * Test whether @work is being queued from another work executing on the 1657 * same workqueue. 1658 */ 1659 static bool is_chained_work(struct workqueue_struct *wq) 1660 { 1661 struct worker *worker; 1662 1663 worker = current_wq_worker(); 1664 /* 1665 * Return %true iff I'm a worker executing a work item on @wq. If 1666 * I'm @worker, it's safe to dereference it without locking. 1667 */ 1668 return worker && worker->current_pwq->wq == wq; 1669 } 1670 1671 /* 1672 * When queueing an unbound work item to a wq, prefer local CPU if allowed 1673 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 1674 * avoid perturbing sensitive tasks. 1675 */ 1676 static int wq_select_unbound_cpu(int cpu) 1677 { 1678 int new_cpu; 1679 1680 if (likely(!wq_debug_force_rr_cpu)) { 1681 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 1682 return cpu; 1683 } else { 1684 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 1685 } 1686 1687 if (cpumask_empty(wq_unbound_cpumask)) 1688 return cpu; 1689 1690 new_cpu = __this_cpu_read(wq_rr_cpu_last); 1691 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 1692 if (unlikely(new_cpu >= nr_cpu_ids)) { 1693 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 1694 if (unlikely(new_cpu >= nr_cpu_ids)) 1695 return cpu; 1696 } 1697 __this_cpu_write(wq_rr_cpu_last, new_cpu); 1698 1699 return new_cpu; 1700 } 1701 1702 static void __queue_work(int cpu, struct workqueue_struct *wq, 1703 struct work_struct *work) 1704 { 1705 struct pool_workqueue *pwq; 1706 struct worker_pool *last_pool, *pool; 1707 unsigned int work_flags; 1708 unsigned int req_cpu = cpu; 1709 1710 /* 1711 * While a work item is PENDING && off queue, a task trying to 1712 * steal the PENDING will busy-loop waiting for it to either get 1713 * queued or lose PENDING. Grabbing PENDING and queueing should 1714 * happen with IRQ disabled. 1715 */ 1716 lockdep_assert_irqs_disabled(); 1717 1718 1719 /* 1720 * For a draining wq, only works from the same workqueue are 1721 * allowed. The __WQ_DESTROYING helps to spot the issue that 1722 * queues a new work item to a wq after destroy_workqueue(wq). 1723 */ 1724 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 1725 WARN_ON_ONCE(!is_chained_work(wq)))) 1726 return; 1727 rcu_read_lock(); 1728 retry: 1729 /* pwq which will be used unless @work is executing elsewhere */ 1730 if (req_cpu == WORK_CPU_UNBOUND) { 1731 if (wq->flags & WQ_UNBOUND) 1732 cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 1733 else 1734 cpu = raw_smp_processor_id(); 1735 } 1736 1737 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 1738 pool = pwq->pool; 1739 1740 /* 1741 * If @work was previously on a different pool, it might still be 1742 * running there, in which case the work needs to be queued on that 1743 * pool to guarantee non-reentrancy. 1744 */ 1745 last_pool = get_work_pool(work); 1746 if (last_pool && last_pool != pool) { 1747 struct worker *worker; 1748 1749 raw_spin_lock(&last_pool->lock); 1750 1751 worker = find_worker_executing_work(last_pool, work); 1752 1753 if (worker && worker->current_pwq->wq == wq) { 1754 pwq = worker->current_pwq; 1755 pool = pwq->pool; 1756 WARN_ON_ONCE(pool != last_pool); 1757 } else { 1758 /* meh... not running there, queue here */ 1759 raw_spin_unlock(&last_pool->lock); 1760 raw_spin_lock(&pool->lock); 1761 } 1762 } else { 1763 raw_spin_lock(&pool->lock); 1764 } 1765 1766 /* 1767 * pwq is determined and locked. For unbound pools, we could have raced 1768 * with pwq release and it could already be dead. If its refcnt is zero, 1769 * repeat pwq selection. Note that unbound pwqs never die without 1770 * another pwq replacing it in cpu_pwq or while work items are executing 1771 * on it, so the retrying is guaranteed to make forward-progress. 1772 */ 1773 if (unlikely(!pwq->refcnt)) { 1774 if (wq->flags & WQ_UNBOUND) { 1775 raw_spin_unlock(&pool->lock); 1776 cpu_relax(); 1777 goto retry; 1778 } 1779 /* oops */ 1780 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 1781 wq->name, cpu); 1782 } 1783 1784 /* pwq determined, queue */ 1785 trace_workqueue_queue_work(req_cpu, pwq, work); 1786 1787 if (WARN_ON(!list_empty(&work->entry))) 1788 goto out; 1789 1790 pwq->nr_in_flight[pwq->work_color]++; 1791 work_flags = work_color_to_flags(pwq->work_color); 1792 1793 if (likely(pwq->nr_active < pwq->max_active)) { 1794 if (list_empty(&pool->worklist)) 1795 pool->watchdog_ts = jiffies; 1796 1797 trace_workqueue_activate_work(work); 1798 pwq->nr_active++; 1799 insert_work(pwq, work, &pool->worklist, work_flags); 1800 kick_pool(pool); 1801 } else { 1802 work_flags |= WORK_STRUCT_INACTIVE; 1803 insert_work(pwq, work, &pwq->inactive_works, work_flags); 1804 } 1805 1806 out: 1807 raw_spin_unlock(&pool->lock); 1808 rcu_read_unlock(); 1809 } 1810 1811 /** 1812 * queue_work_on - queue work on specific cpu 1813 * @cpu: CPU number to execute work on 1814 * @wq: workqueue to use 1815 * @work: work to queue 1816 * 1817 * We queue the work to a specific CPU, the caller must ensure it 1818 * can't go away. Callers that fail to ensure that the specified 1819 * CPU cannot go away will execute on a randomly chosen CPU. 1820 * But note well that callers specifying a CPU that never has been 1821 * online will get a splat. 1822 * 1823 * Return: %false if @work was already on a queue, %true otherwise. 1824 */ 1825 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1826 struct work_struct *work) 1827 { 1828 bool ret = false; 1829 unsigned long flags; 1830 1831 local_irq_save(flags); 1832 1833 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1834 __queue_work(cpu, wq, work); 1835 ret = true; 1836 } 1837 1838 local_irq_restore(flags); 1839 return ret; 1840 } 1841 EXPORT_SYMBOL(queue_work_on); 1842 1843 /** 1844 * select_numa_node_cpu - Select a CPU based on NUMA node 1845 * @node: NUMA node ID that we want to select a CPU from 1846 * 1847 * This function will attempt to find a "random" cpu available on a given 1848 * node. If there are no CPUs available on the given node it will return 1849 * WORK_CPU_UNBOUND indicating that we should just schedule to any 1850 * available CPU if we need to schedule this work. 1851 */ 1852 static int select_numa_node_cpu(int node) 1853 { 1854 int cpu; 1855 1856 /* Delay binding to CPU if node is not valid or online */ 1857 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 1858 return WORK_CPU_UNBOUND; 1859 1860 /* Use local node/cpu if we are already there */ 1861 cpu = raw_smp_processor_id(); 1862 if (node == cpu_to_node(cpu)) 1863 return cpu; 1864 1865 /* Use "random" otherwise know as "first" online CPU of node */ 1866 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 1867 1868 /* If CPU is valid return that, otherwise just defer */ 1869 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 1870 } 1871 1872 /** 1873 * queue_work_node - queue work on a "random" cpu for a given NUMA node 1874 * @node: NUMA node that we are targeting the work for 1875 * @wq: workqueue to use 1876 * @work: work to queue 1877 * 1878 * We queue the work to a "random" CPU within a given NUMA node. The basic 1879 * idea here is to provide a way to somehow associate work with a given 1880 * NUMA node. 1881 * 1882 * This function will only make a best effort attempt at getting this onto 1883 * the right NUMA node. If no node is requested or the requested node is 1884 * offline then we just fall back to standard queue_work behavior. 1885 * 1886 * Currently the "random" CPU ends up being the first available CPU in the 1887 * intersection of cpu_online_mask and the cpumask of the node, unless we 1888 * are running on the node. In that case we just use the current CPU. 1889 * 1890 * Return: %false if @work was already on a queue, %true otherwise. 1891 */ 1892 bool queue_work_node(int node, struct workqueue_struct *wq, 1893 struct work_struct *work) 1894 { 1895 unsigned long flags; 1896 bool ret = false; 1897 1898 /* 1899 * This current implementation is specific to unbound workqueues. 1900 * Specifically we only return the first available CPU for a given 1901 * node instead of cycling through individual CPUs within the node. 1902 * 1903 * If this is used with a per-cpu workqueue then the logic in 1904 * workqueue_select_cpu_near would need to be updated to allow for 1905 * some round robin type logic. 1906 */ 1907 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 1908 1909 local_irq_save(flags); 1910 1911 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1912 int cpu = select_numa_node_cpu(node); 1913 1914 __queue_work(cpu, wq, work); 1915 ret = true; 1916 } 1917 1918 local_irq_restore(flags); 1919 return ret; 1920 } 1921 EXPORT_SYMBOL_GPL(queue_work_node); 1922 1923 void delayed_work_timer_fn(struct timer_list *t) 1924 { 1925 struct delayed_work *dwork = from_timer(dwork, t, timer); 1926 1927 /* should have been called from irqsafe timer with irq already off */ 1928 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 1929 } 1930 EXPORT_SYMBOL(delayed_work_timer_fn); 1931 1932 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1933 struct delayed_work *dwork, unsigned long delay) 1934 { 1935 struct timer_list *timer = &dwork->timer; 1936 struct work_struct *work = &dwork->work; 1937 1938 WARN_ON_ONCE(!wq); 1939 WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 1940 WARN_ON_ONCE(timer_pending(timer)); 1941 WARN_ON_ONCE(!list_empty(&work->entry)); 1942 1943 /* 1944 * If @delay is 0, queue @dwork->work immediately. This is for 1945 * both optimization and correctness. The earliest @timer can 1946 * expire is on the closest next tick and delayed_work users depend 1947 * on that there's no such delay when @delay is 0. 1948 */ 1949 if (!delay) { 1950 __queue_work(cpu, wq, &dwork->work); 1951 return; 1952 } 1953 1954 dwork->wq = wq; 1955 dwork->cpu = cpu; 1956 timer->expires = jiffies + delay; 1957 1958 if (unlikely(cpu != WORK_CPU_UNBOUND)) 1959 add_timer_on(timer, cpu); 1960 else 1961 add_timer(timer); 1962 } 1963 1964 /** 1965 * queue_delayed_work_on - queue work on specific CPU after delay 1966 * @cpu: CPU number to execute work on 1967 * @wq: workqueue to use 1968 * @dwork: work to queue 1969 * @delay: number of jiffies to wait before queueing 1970 * 1971 * Return: %false if @work was already on a queue, %true otherwise. If 1972 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1973 * execution. 1974 */ 1975 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1976 struct delayed_work *dwork, unsigned long delay) 1977 { 1978 struct work_struct *work = &dwork->work; 1979 bool ret = false; 1980 unsigned long flags; 1981 1982 /* read the comment in __queue_work() */ 1983 local_irq_save(flags); 1984 1985 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1986 __queue_delayed_work(cpu, wq, dwork, delay); 1987 ret = true; 1988 } 1989 1990 local_irq_restore(flags); 1991 return ret; 1992 } 1993 EXPORT_SYMBOL(queue_delayed_work_on); 1994 1995 /** 1996 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1997 * @cpu: CPU number to execute work on 1998 * @wq: workqueue to use 1999 * @dwork: work to queue 2000 * @delay: number of jiffies to wait before queueing 2001 * 2002 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2003 * modify @dwork's timer so that it expires after @delay. If @delay is 2004 * zero, @work is guaranteed to be scheduled immediately regardless of its 2005 * current state. 2006 * 2007 * Return: %false if @dwork was idle and queued, %true if @dwork was 2008 * pending and its timer was modified. 2009 * 2010 * This function is safe to call from any context including IRQ handler. 2011 * See try_to_grab_pending() for details. 2012 */ 2013 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2014 struct delayed_work *dwork, unsigned long delay) 2015 { 2016 unsigned long flags; 2017 int ret; 2018 2019 do { 2020 ret = try_to_grab_pending(&dwork->work, true, &flags); 2021 } while (unlikely(ret == -EAGAIN)); 2022 2023 if (likely(ret >= 0)) { 2024 __queue_delayed_work(cpu, wq, dwork, delay); 2025 local_irq_restore(flags); 2026 } 2027 2028 /* -ENOENT from try_to_grab_pending() becomes %true */ 2029 return ret; 2030 } 2031 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2032 2033 static void rcu_work_rcufn(struct rcu_head *rcu) 2034 { 2035 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 2036 2037 /* read the comment in __queue_work() */ 2038 local_irq_disable(); 2039 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 2040 local_irq_enable(); 2041 } 2042 2043 /** 2044 * queue_rcu_work - queue work after a RCU grace period 2045 * @wq: workqueue to use 2046 * @rwork: work to queue 2047 * 2048 * Return: %false if @rwork was already pending, %true otherwise. Note 2049 * that a full RCU grace period is guaranteed only after a %true return. 2050 * While @rwork is guaranteed to be executed after a %false return, the 2051 * execution may happen before a full RCU grace period has passed. 2052 */ 2053 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 2054 { 2055 struct work_struct *work = &rwork->work; 2056 2057 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2058 rwork->wq = wq; 2059 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 2060 return true; 2061 } 2062 2063 return false; 2064 } 2065 EXPORT_SYMBOL(queue_rcu_work); 2066 2067 static struct worker *alloc_worker(int node) 2068 { 2069 struct worker *worker; 2070 2071 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2072 if (worker) { 2073 INIT_LIST_HEAD(&worker->entry); 2074 INIT_LIST_HEAD(&worker->scheduled); 2075 INIT_LIST_HEAD(&worker->node); 2076 /* on creation a worker is in !idle && prep state */ 2077 worker->flags = WORKER_PREP; 2078 } 2079 return worker; 2080 } 2081 2082 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 2083 { 2084 if (pool->cpu < 0 && pool->attrs->affn_strict) 2085 return pool->attrs->__pod_cpumask; 2086 else 2087 return pool->attrs->cpumask; 2088 } 2089 2090 /** 2091 * worker_attach_to_pool() - attach a worker to a pool 2092 * @worker: worker to be attached 2093 * @pool: the target pool 2094 * 2095 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 2096 * cpu-binding of @worker are kept coordinated with the pool across 2097 * cpu-[un]hotplugs. 2098 */ 2099 static void worker_attach_to_pool(struct worker *worker, 2100 struct worker_pool *pool) 2101 { 2102 mutex_lock(&wq_pool_attach_mutex); 2103 2104 /* 2105 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains 2106 * stable across this function. See the comments above the flag 2107 * definition for details. 2108 */ 2109 if (pool->flags & POOL_DISASSOCIATED) 2110 worker->flags |= WORKER_UNBOUND; 2111 else 2112 kthread_set_per_cpu(worker->task, pool->cpu); 2113 2114 if (worker->rescue_wq) 2115 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2116 2117 list_add_tail(&worker->node, &pool->workers); 2118 worker->pool = pool; 2119 2120 mutex_unlock(&wq_pool_attach_mutex); 2121 } 2122 2123 /** 2124 * worker_detach_from_pool() - detach a worker from its pool 2125 * @worker: worker which is attached to its pool 2126 * 2127 * Undo the attaching which had been done in worker_attach_to_pool(). The 2128 * caller worker shouldn't access to the pool after detached except it has 2129 * other reference to the pool. 2130 */ 2131 static void worker_detach_from_pool(struct worker *worker) 2132 { 2133 struct worker_pool *pool = worker->pool; 2134 struct completion *detach_completion = NULL; 2135 2136 mutex_lock(&wq_pool_attach_mutex); 2137 2138 kthread_set_per_cpu(worker->task, -1); 2139 list_del(&worker->node); 2140 worker->pool = NULL; 2141 2142 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 2143 detach_completion = pool->detach_completion; 2144 mutex_unlock(&wq_pool_attach_mutex); 2145 2146 /* clear leftover flags without pool->lock after it is detached */ 2147 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2148 2149 if (detach_completion) 2150 complete(detach_completion); 2151 } 2152 2153 /** 2154 * create_worker - create a new workqueue worker 2155 * @pool: pool the new worker will belong to 2156 * 2157 * Create and start a new worker which is attached to @pool. 2158 * 2159 * CONTEXT: 2160 * Might sleep. Does GFP_KERNEL allocations. 2161 * 2162 * Return: 2163 * Pointer to the newly created worker. 2164 */ 2165 static struct worker *create_worker(struct worker_pool *pool) 2166 { 2167 struct worker *worker; 2168 int id; 2169 char id_buf[16]; 2170 2171 /* ID is needed to determine kthread name */ 2172 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 2173 if (id < 0) { 2174 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 2175 ERR_PTR(id)); 2176 return NULL; 2177 } 2178 2179 worker = alloc_worker(pool->node); 2180 if (!worker) { 2181 pr_err_once("workqueue: Failed to allocate a worker\n"); 2182 goto fail; 2183 } 2184 2185 worker->id = id; 2186 2187 if (pool->cpu >= 0) 2188 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2189 pool->attrs->nice < 0 ? "H" : ""); 2190 else 2191 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2192 2193 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2194 "kworker/%s", id_buf); 2195 if (IS_ERR(worker->task)) { 2196 if (PTR_ERR(worker->task) == -EINTR) { 2197 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 2198 id_buf); 2199 } else { 2200 pr_err_once("workqueue: Failed to create a worker thread: %pe", 2201 worker->task); 2202 } 2203 goto fail; 2204 } 2205 2206 set_user_nice(worker->task, pool->attrs->nice); 2207 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 2208 2209 /* successful, attach the worker to the pool */ 2210 worker_attach_to_pool(worker, pool); 2211 2212 /* start the newly created worker */ 2213 raw_spin_lock_irq(&pool->lock); 2214 2215 worker->pool->nr_workers++; 2216 worker_enter_idle(worker); 2217 kick_pool(pool); 2218 2219 /* 2220 * @worker is waiting on a completion in kthread() and will trigger hung 2221 * check if not woken up soon. As kick_pool() might not have waken it 2222 * up, wake it up explicitly once more. 2223 */ 2224 wake_up_process(worker->task); 2225 2226 raw_spin_unlock_irq(&pool->lock); 2227 2228 return worker; 2229 2230 fail: 2231 ida_free(&pool->worker_ida, id); 2232 kfree(worker); 2233 return NULL; 2234 } 2235 2236 static void unbind_worker(struct worker *worker) 2237 { 2238 lockdep_assert_held(&wq_pool_attach_mutex); 2239 2240 kthread_set_per_cpu(worker->task, -1); 2241 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2242 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2243 else 2244 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2245 } 2246 2247 static void wake_dying_workers(struct list_head *cull_list) 2248 { 2249 struct worker *worker, *tmp; 2250 2251 list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2252 list_del_init(&worker->entry); 2253 unbind_worker(worker); 2254 /* 2255 * If the worker was somehow already running, then it had to be 2256 * in pool->idle_list when set_worker_dying() happened or we 2257 * wouldn't have gotten here. 2258 * 2259 * Thus, the worker must either have observed the WORKER_DIE 2260 * flag, or have set its state to TASK_IDLE. Either way, the 2261 * below will be observed by the worker and is safe to do 2262 * outside of pool->lock. 2263 */ 2264 wake_up_process(worker->task); 2265 } 2266 } 2267 2268 /** 2269 * set_worker_dying - Tag a worker for destruction 2270 * @worker: worker to be destroyed 2271 * @list: transfer worker away from its pool->idle_list and into list 2272 * 2273 * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2274 * should be idle. 2275 * 2276 * CONTEXT: 2277 * raw_spin_lock_irq(pool->lock). 2278 */ 2279 static void set_worker_dying(struct worker *worker, struct list_head *list) 2280 { 2281 struct worker_pool *pool = worker->pool; 2282 2283 lockdep_assert_held(&pool->lock); 2284 lockdep_assert_held(&wq_pool_attach_mutex); 2285 2286 /* sanity check frenzy */ 2287 if (WARN_ON(worker->current_work) || 2288 WARN_ON(!list_empty(&worker->scheduled)) || 2289 WARN_ON(!(worker->flags & WORKER_IDLE))) 2290 return; 2291 2292 pool->nr_workers--; 2293 pool->nr_idle--; 2294 2295 worker->flags |= WORKER_DIE; 2296 2297 list_move(&worker->entry, list); 2298 list_move(&worker->node, &pool->dying_workers); 2299 } 2300 2301 /** 2302 * idle_worker_timeout - check if some idle workers can now be deleted. 2303 * @t: The pool's idle_timer that just expired 2304 * 2305 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 2306 * worker_leave_idle(), as a worker flicking between idle and active while its 2307 * pool is at the too_many_workers() tipping point would cause too much timer 2308 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 2309 * it expire and re-evaluate things from there. 2310 */ 2311 static void idle_worker_timeout(struct timer_list *t) 2312 { 2313 struct worker_pool *pool = from_timer(pool, t, idle_timer); 2314 bool do_cull = false; 2315 2316 if (work_pending(&pool->idle_cull_work)) 2317 return; 2318 2319 raw_spin_lock_irq(&pool->lock); 2320 2321 if (too_many_workers(pool)) { 2322 struct worker *worker; 2323 unsigned long expires; 2324 2325 /* idle_list is kept in LIFO order, check the last one */ 2326 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2327 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2328 do_cull = !time_before(jiffies, expires); 2329 2330 if (!do_cull) 2331 mod_timer(&pool->idle_timer, expires); 2332 } 2333 raw_spin_unlock_irq(&pool->lock); 2334 2335 if (do_cull) 2336 queue_work(system_unbound_wq, &pool->idle_cull_work); 2337 } 2338 2339 /** 2340 * idle_cull_fn - cull workers that have been idle for too long. 2341 * @work: the pool's work for handling these idle workers 2342 * 2343 * This goes through a pool's idle workers and gets rid of those that have been 2344 * idle for at least IDLE_WORKER_TIMEOUT seconds. 2345 * 2346 * We don't want to disturb isolated CPUs because of a pcpu kworker being 2347 * culled, so this also resets worker affinity. This requires a sleepable 2348 * context, hence the split between timer callback and work item. 2349 */ 2350 static void idle_cull_fn(struct work_struct *work) 2351 { 2352 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 2353 LIST_HEAD(cull_list); 2354 2355 /* 2356 * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2357 * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2358 * path. This is required as a previously-preempted worker could run after 2359 * set_worker_dying() has happened but before wake_dying_workers() did. 2360 */ 2361 mutex_lock(&wq_pool_attach_mutex); 2362 raw_spin_lock_irq(&pool->lock); 2363 2364 while (too_many_workers(pool)) { 2365 struct worker *worker; 2366 unsigned long expires; 2367 2368 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2369 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2370 2371 if (time_before(jiffies, expires)) { 2372 mod_timer(&pool->idle_timer, expires); 2373 break; 2374 } 2375 2376 set_worker_dying(worker, &cull_list); 2377 } 2378 2379 raw_spin_unlock_irq(&pool->lock); 2380 wake_dying_workers(&cull_list); 2381 mutex_unlock(&wq_pool_attach_mutex); 2382 } 2383 2384 static void send_mayday(struct work_struct *work) 2385 { 2386 struct pool_workqueue *pwq = get_work_pwq(work); 2387 struct workqueue_struct *wq = pwq->wq; 2388 2389 lockdep_assert_held(&wq_mayday_lock); 2390 2391 if (!wq->rescuer) 2392 return; 2393 2394 /* mayday mayday mayday */ 2395 if (list_empty(&pwq->mayday_node)) { 2396 /* 2397 * If @pwq is for an unbound wq, its base ref may be put at 2398 * any time due to an attribute change. Pin @pwq until the 2399 * rescuer is done with it. 2400 */ 2401 get_pwq(pwq); 2402 list_add_tail(&pwq->mayday_node, &wq->maydays); 2403 wake_up_process(wq->rescuer->task); 2404 pwq->stats[PWQ_STAT_MAYDAY]++; 2405 } 2406 } 2407 2408 static void pool_mayday_timeout(struct timer_list *t) 2409 { 2410 struct worker_pool *pool = from_timer(pool, t, mayday_timer); 2411 struct work_struct *work; 2412 2413 raw_spin_lock_irq(&pool->lock); 2414 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2415 2416 if (need_to_create_worker(pool)) { 2417 /* 2418 * We've been trying to create a new worker but 2419 * haven't been successful. We might be hitting an 2420 * allocation deadlock. Send distress signals to 2421 * rescuers. 2422 */ 2423 list_for_each_entry(work, &pool->worklist, entry) 2424 send_mayday(work); 2425 } 2426 2427 raw_spin_unlock(&wq_mayday_lock); 2428 raw_spin_unlock_irq(&pool->lock); 2429 2430 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 2431 } 2432 2433 /** 2434 * maybe_create_worker - create a new worker if necessary 2435 * @pool: pool to create a new worker for 2436 * 2437 * Create a new worker for @pool if necessary. @pool is guaranteed to 2438 * have at least one idle worker on return from this function. If 2439 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 2440 * sent to all rescuers with works scheduled on @pool to resolve 2441 * possible allocation deadlock. 2442 * 2443 * On return, need_to_create_worker() is guaranteed to be %false and 2444 * may_start_working() %true. 2445 * 2446 * LOCKING: 2447 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2448 * multiple times. Does GFP_KERNEL allocations. Called only from 2449 * manager. 2450 */ 2451 static void maybe_create_worker(struct worker_pool *pool) 2452 __releases(&pool->lock) 2453 __acquires(&pool->lock) 2454 { 2455 restart: 2456 raw_spin_unlock_irq(&pool->lock); 2457 2458 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 2459 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 2460 2461 while (true) { 2462 if (create_worker(pool) || !need_to_create_worker(pool)) 2463 break; 2464 2465 schedule_timeout_interruptible(CREATE_COOLDOWN); 2466 2467 if (!need_to_create_worker(pool)) 2468 break; 2469 } 2470 2471 del_timer_sync(&pool->mayday_timer); 2472 raw_spin_lock_irq(&pool->lock); 2473 /* 2474 * This is necessary even after a new worker was just successfully 2475 * created as @pool->lock was dropped and the new worker might have 2476 * already become busy. 2477 */ 2478 if (need_to_create_worker(pool)) 2479 goto restart; 2480 } 2481 2482 /** 2483 * manage_workers - manage worker pool 2484 * @worker: self 2485 * 2486 * Assume the manager role and manage the worker pool @worker belongs 2487 * to. At any given time, there can be only zero or one manager per 2488 * pool. The exclusion is handled automatically by this function. 2489 * 2490 * The caller can safely start processing works on false return. On 2491 * true return, it's guaranteed that need_to_create_worker() is false 2492 * and may_start_working() is true. 2493 * 2494 * CONTEXT: 2495 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2496 * multiple times. Does GFP_KERNEL allocations. 2497 * 2498 * Return: 2499 * %false if the pool doesn't need management and the caller can safely 2500 * start processing works, %true if management function was performed and 2501 * the conditions that the caller verified before calling the function may 2502 * no longer be true. 2503 */ 2504 static bool manage_workers(struct worker *worker) 2505 { 2506 struct worker_pool *pool = worker->pool; 2507 2508 if (pool->flags & POOL_MANAGER_ACTIVE) 2509 return false; 2510 2511 pool->flags |= POOL_MANAGER_ACTIVE; 2512 pool->manager = worker; 2513 2514 maybe_create_worker(pool); 2515 2516 pool->manager = NULL; 2517 pool->flags &= ~POOL_MANAGER_ACTIVE; 2518 rcuwait_wake_up(&manager_wait); 2519 return true; 2520 } 2521 2522 /** 2523 * process_one_work - process single work 2524 * @worker: self 2525 * @work: work to process 2526 * 2527 * Process @work. This function contains all the logics necessary to 2528 * process a single work including synchronization against and 2529 * interaction with other workers on the same cpu, queueing and 2530 * flushing. As long as context requirement is met, any worker can 2531 * call this function to process a work. 2532 * 2533 * CONTEXT: 2534 * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 2535 */ 2536 static void process_one_work(struct worker *worker, struct work_struct *work) 2537 __releases(&pool->lock) 2538 __acquires(&pool->lock) 2539 { 2540 struct pool_workqueue *pwq = get_work_pwq(work); 2541 struct worker_pool *pool = worker->pool; 2542 unsigned long work_data; 2543 #ifdef CONFIG_LOCKDEP 2544 /* 2545 * It is permissible to free the struct work_struct from 2546 * inside the function that is called from it, this we need to 2547 * take into account for lockdep too. To avoid bogus "held 2548 * lock freed" warnings as well as problems when looking into 2549 * work->lockdep_map, make a copy and use that here. 2550 */ 2551 struct lockdep_map lockdep_map; 2552 2553 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 2554 #endif 2555 /* ensure we're on the correct CPU */ 2556 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 2557 raw_smp_processor_id() != pool->cpu); 2558 2559 /* claim and dequeue */ 2560 debug_work_deactivate(work); 2561 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2562 worker->current_work = work; 2563 worker->current_func = work->func; 2564 worker->current_pwq = pwq; 2565 worker->current_at = worker->task->se.sum_exec_runtime; 2566 work_data = *work_data_bits(work); 2567 worker->current_color = get_work_color(work_data); 2568 2569 /* 2570 * Record wq name for cmdline and debug reporting, may get 2571 * overridden through set_worker_desc(). 2572 */ 2573 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 2574 2575 list_del_init(&work->entry); 2576 2577 /* 2578 * CPU intensive works don't participate in concurrency management. 2579 * They're the scheduler's responsibility. This takes @worker out 2580 * of concurrency management and the next code block will chain 2581 * execution of the pending work items. 2582 */ 2583 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 2584 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2585 2586 /* 2587 * Kick @pool if necessary. It's always noop for per-cpu worker pools 2588 * since nr_running would always be >= 1 at this point. This is used to 2589 * chain execution of the pending work items for WORKER_NOT_RUNNING 2590 * workers such as the UNBOUND and CPU_INTENSIVE ones. 2591 */ 2592 kick_pool(pool); 2593 2594 /* 2595 * Record the last pool and clear PENDING which should be the last 2596 * update to @work. Also, do this inside @pool->lock so that 2597 * PENDING and queued state changes happen together while IRQ is 2598 * disabled. 2599 */ 2600 set_work_pool_and_clear_pending(work, pool->id); 2601 2602 pwq->stats[PWQ_STAT_STARTED]++; 2603 raw_spin_unlock_irq(&pool->lock); 2604 2605 lock_map_acquire(&pwq->wq->lockdep_map); 2606 lock_map_acquire(&lockdep_map); 2607 /* 2608 * Strictly speaking we should mark the invariant state without holding 2609 * any locks, that is, before these two lock_map_acquire()'s. 2610 * 2611 * However, that would result in: 2612 * 2613 * A(W1) 2614 * WFC(C) 2615 * A(W1) 2616 * C(C) 2617 * 2618 * Which would create W1->C->W1 dependencies, even though there is no 2619 * actual deadlock possible. There are two solutions, using a 2620 * read-recursive acquire on the work(queue) 'locks', but this will then 2621 * hit the lockdep limitation on recursive locks, or simply discard 2622 * these locks. 2623 * 2624 * AFAICT there is no possible deadlock scenario between the 2625 * flush_work() and complete() primitives (except for single-threaded 2626 * workqueues), so hiding them isn't a problem. 2627 */ 2628 lockdep_invariant_state(true); 2629 trace_workqueue_execute_start(work); 2630 worker->current_func(work); 2631 /* 2632 * While we must be careful to not use "work" after this, the trace 2633 * point will only record its address. 2634 */ 2635 trace_workqueue_execute_end(work, worker->current_func); 2636 pwq->stats[PWQ_STAT_COMPLETED]++; 2637 lock_map_release(&lockdep_map); 2638 lock_map_release(&pwq->wq->lockdep_map); 2639 2640 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2641 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2642 " last function: %ps\n", 2643 current->comm, preempt_count(), task_pid_nr(current), 2644 worker->current_func); 2645 debug_show_held_locks(current); 2646 dump_stack(); 2647 } 2648 2649 /* 2650 * The following prevents a kworker from hogging CPU on !PREEMPTION 2651 * kernels, where a requeueing work item waiting for something to 2652 * happen could deadlock with stop_machine as such work item could 2653 * indefinitely requeue itself while all other CPUs are trapped in 2654 * stop_machine. At the same time, report a quiescent RCU state so 2655 * the same condition doesn't freeze RCU. 2656 */ 2657 cond_resched(); 2658 2659 raw_spin_lock_irq(&pool->lock); 2660 2661 /* 2662 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 2663 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 2664 * wq_cpu_intensive_thresh_us. Clear it. 2665 */ 2666 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2667 2668 /* tag the worker for identification in schedule() */ 2669 worker->last_func = worker->current_func; 2670 2671 /* we're done with it, release */ 2672 hash_del(&worker->hentry); 2673 worker->current_work = NULL; 2674 worker->current_func = NULL; 2675 worker->current_pwq = NULL; 2676 worker->current_color = INT_MAX; 2677 pwq_dec_nr_in_flight(pwq, work_data); 2678 } 2679 2680 /** 2681 * process_scheduled_works - process scheduled works 2682 * @worker: self 2683 * 2684 * Process all scheduled works. Please note that the scheduled list 2685 * may change while processing a work, so this function repeatedly 2686 * fetches a work from the top and executes it. 2687 * 2688 * CONTEXT: 2689 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2690 * multiple times. 2691 */ 2692 static void process_scheduled_works(struct worker *worker) 2693 { 2694 struct work_struct *work; 2695 bool first = true; 2696 2697 while ((work = list_first_entry_or_null(&worker->scheduled, 2698 struct work_struct, entry))) { 2699 if (first) { 2700 worker->pool->watchdog_ts = jiffies; 2701 first = false; 2702 } 2703 process_one_work(worker, work); 2704 } 2705 } 2706 2707 static void set_pf_worker(bool val) 2708 { 2709 mutex_lock(&wq_pool_attach_mutex); 2710 if (val) 2711 current->flags |= PF_WQ_WORKER; 2712 else 2713 current->flags &= ~PF_WQ_WORKER; 2714 mutex_unlock(&wq_pool_attach_mutex); 2715 } 2716 2717 /** 2718 * worker_thread - the worker thread function 2719 * @__worker: self 2720 * 2721 * The worker thread function. All workers belong to a worker_pool - 2722 * either a per-cpu one or dynamic unbound one. These workers process all 2723 * work items regardless of their specific target workqueue. The only 2724 * exception is work items which belong to workqueues with a rescuer which 2725 * will be explained in rescuer_thread(). 2726 * 2727 * Return: 0 2728 */ 2729 static int worker_thread(void *__worker) 2730 { 2731 struct worker *worker = __worker; 2732 struct worker_pool *pool = worker->pool; 2733 2734 /* tell the scheduler that this is a workqueue worker */ 2735 set_pf_worker(true); 2736 woke_up: 2737 raw_spin_lock_irq(&pool->lock); 2738 2739 /* am I supposed to die? */ 2740 if (unlikely(worker->flags & WORKER_DIE)) { 2741 raw_spin_unlock_irq(&pool->lock); 2742 set_pf_worker(false); 2743 2744 set_task_comm(worker->task, "kworker/dying"); 2745 ida_free(&pool->worker_ida, worker->id); 2746 worker_detach_from_pool(worker); 2747 WARN_ON_ONCE(!list_empty(&worker->entry)); 2748 kfree(worker); 2749 return 0; 2750 } 2751 2752 worker_leave_idle(worker); 2753 recheck: 2754 /* no more worker necessary? */ 2755 if (!need_more_worker(pool)) 2756 goto sleep; 2757 2758 /* do we need to manage? */ 2759 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2760 goto recheck; 2761 2762 /* 2763 * ->scheduled list can only be filled while a worker is 2764 * preparing to process a work or actually processing it. 2765 * Make sure nobody diddled with it while I was sleeping. 2766 */ 2767 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2768 2769 /* 2770 * Finish PREP stage. We're guaranteed to have at least one idle 2771 * worker or that someone else has already assumed the manager 2772 * role. This is where @worker starts participating in concurrency 2773 * management if applicable and concurrency management is restored 2774 * after being rebound. See rebind_workers() for details. 2775 */ 2776 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2777 2778 do { 2779 struct work_struct *work = 2780 list_first_entry(&pool->worklist, 2781 struct work_struct, entry); 2782 2783 if (assign_work(work, worker, NULL)) 2784 process_scheduled_works(worker); 2785 } while (keep_working(pool)); 2786 2787 worker_set_flags(worker, WORKER_PREP); 2788 sleep: 2789 /* 2790 * pool->lock is held and there's no work to process and no need to 2791 * manage, sleep. Workers are woken up only while holding 2792 * pool->lock or from local cpu, so setting the current state 2793 * before releasing pool->lock is enough to prevent losing any 2794 * event. 2795 */ 2796 worker_enter_idle(worker); 2797 __set_current_state(TASK_IDLE); 2798 raw_spin_unlock_irq(&pool->lock); 2799 schedule(); 2800 goto woke_up; 2801 } 2802 2803 /** 2804 * rescuer_thread - the rescuer thread function 2805 * @__rescuer: self 2806 * 2807 * Workqueue rescuer thread function. There's one rescuer for each 2808 * workqueue which has WQ_MEM_RECLAIM set. 2809 * 2810 * Regular work processing on a pool may block trying to create a new 2811 * worker which uses GFP_KERNEL allocation which has slight chance of 2812 * developing into deadlock if some works currently on the same queue 2813 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2814 * the problem rescuer solves. 2815 * 2816 * When such condition is possible, the pool summons rescuers of all 2817 * workqueues which have works queued on the pool and let them process 2818 * those works so that forward progress can be guaranteed. 2819 * 2820 * This should happen rarely. 2821 * 2822 * Return: 0 2823 */ 2824 static int rescuer_thread(void *__rescuer) 2825 { 2826 struct worker *rescuer = __rescuer; 2827 struct workqueue_struct *wq = rescuer->rescue_wq; 2828 bool should_stop; 2829 2830 set_user_nice(current, RESCUER_NICE_LEVEL); 2831 2832 /* 2833 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2834 * doesn't participate in concurrency management. 2835 */ 2836 set_pf_worker(true); 2837 repeat: 2838 set_current_state(TASK_IDLE); 2839 2840 /* 2841 * By the time the rescuer is requested to stop, the workqueue 2842 * shouldn't have any work pending, but @wq->maydays may still have 2843 * pwq(s) queued. This can happen by non-rescuer workers consuming 2844 * all the work items before the rescuer got to them. Go through 2845 * @wq->maydays processing before acting on should_stop so that the 2846 * list is always empty on exit. 2847 */ 2848 should_stop = kthread_should_stop(); 2849 2850 /* see whether any pwq is asking for help */ 2851 raw_spin_lock_irq(&wq_mayday_lock); 2852 2853 while (!list_empty(&wq->maydays)) { 2854 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2855 struct pool_workqueue, mayday_node); 2856 struct worker_pool *pool = pwq->pool; 2857 struct work_struct *work, *n; 2858 2859 __set_current_state(TASK_RUNNING); 2860 list_del_init(&pwq->mayday_node); 2861 2862 raw_spin_unlock_irq(&wq_mayday_lock); 2863 2864 worker_attach_to_pool(rescuer, pool); 2865 2866 raw_spin_lock_irq(&pool->lock); 2867 2868 /* 2869 * Slurp in all works issued via this workqueue and 2870 * process'em. 2871 */ 2872 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2873 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 2874 if (get_work_pwq(work) == pwq && 2875 assign_work(work, rescuer, &n)) 2876 pwq->stats[PWQ_STAT_RESCUED]++; 2877 } 2878 2879 if (!list_empty(&rescuer->scheduled)) { 2880 process_scheduled_works(rescuer); 2881 2882 /* 2883 * The above execution of rescued work items could 2884 * have created more to rescue through 2885 * pwq_activate_first_inactive() or chained 2886 * queueing. Let's put @pwq back on mayday list so 2887 * that such back-to-back work items, which may be 2888 * being used to relieve memory pressure, don't 2889 * incur MAYDAY_INTERVAL delay inbetween. 2890 */ 2891 if (pwq->nr_active && need_to_create_worker(pool)) { 2892 raw_spin_lock(&wq_mayday_lock); 2893 /* 2894 * Queue iff we aren't racing destruction 2895 * and somebody else hasn't queued it already. 2896 */ 2897 if (wq->rescuer && list_empty(&pwq->mayday_node)) { 2898 get_pwq(pwq); 2899 list_add_tail(&pwq->mayday_node, &wq->maydays); 2900 } 2901 raw_spin_unlock(&wq_mayday_lock); 2902 } 2903 } 2904 2905 /* 2906 * Put the reference grabbed by send_mayday(). @pool won't 2907 * go away while we're still attached to it. 2908 */ 2909 put_pwq(pwq); 2910 2911 /* 2912 * Leave this pool. Notify regular workers; otherwise, we end up 2913 * with 0 concurrency and stalling the execution. 2914 */ 2915 kick_pool(pool); 2916 2917 raw_spin_unlock_irq(&pool->lock); 2918 2919 worker_detach_from_pool(rescuer); 2920 2921 raw_spin_lock_irq(&wq_mayday_lock); 2922 } 2923 2924 raw_spin_unlock_irq(&wq_mayday_lock); 2925 2926 if (should_stop) { 2927 __set_current_state(TASK_RUNNING); 2928 set_pf_worker(false); 2929 return 0; 2930 } 2931 2932 /* rescuers should never participate in concurrency management */ 2933 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2934 schedule(); 2935 goto repeat; 2936 } 2937 2938 /** 2939 * check_flush_dependency - check for flush dependency sanity 2940 * @target_wq: workqueue being flushed 2941 * @target_work: work item being flushed (NULL for workqueue flushes) 2942 * 2943 * %current is trying to flush the whole @target_wq or @target_work on it. 2944 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 2945 * reclaiming memory or running on a workqueue which doesn't have 2946 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 2947 * a deadlock. 2948 */ 2949 static void check_flush_dependency(struct workqueue_struct *target_wq, 2950 struct work_struct *target_work) 2951 { 2952 work_func_t target_func = target_work ? target_work->func : NULL; 2953 struct worker *worker; 2954 2955 if (target_wq->flags & WQ_MEM_RECLAIM) 2956 return; 2957 2958 worker = current_wq_worker(); 2959 2960 WARN_ONCE(current->flags & PF_MEMALLOC, 2961 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 2962 current->pid, current->comm, target_wq->name, target_func); 2963 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 2964 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 2965 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 2966 worker->current_pwq->wq->name, worker->current_func, 2967 target_wq->name, target_func); 2968 } 2969 2970 struct wq_barrier { 2971 struct work_struct work; 2972 struct completion done; 2973 struct task_struct *task; /* purely informational */ 2974 }; 2975 2976 static void wq_barrier_func(struct work_struct *work) 2977 { 2978 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2979 complete(&barr->done); 2980 } 2981 2982 /** 2983 * insert_wq_barrier - insert a barrier work 2984 * @pwq: pwq to insert barrier into 2985 * @barr: wq_barrier to insert 2986 * @target: target work to attach @barr to 2987 * @worker: worker currently executing @target, NULL if @target is not executing 2988 * 2989 * @barr is linked to @target such that @barr is completed only after 2990 * @target finishes execution. Please note that the ordering 2991 * guarantee is observed only with respect to @target and on the local 2992 * cpu. 2993 * 2994 * Currently, a queued barrier can't be canceled. This is because 2995 * try_to_grab_pending() can't determine whether the work to be 2996 * grabbed is at the head of the queue and thus can't clear LINKED 2997 * flag of the previous work while there must be a valid next work 2998 * after a work with LINKED flag set. 2999 * 3000 * Note that when @worker is non-NULL, @target may be modified 3001 * underneath us, so we can't reliably determine pwq from @target. 3002 * 3003 * CONTEXT: 3004 * raw_spin_lock_irq(pool->lock). 3005 */ 3006 static void insert_wq_barrier(struct pool_workqueue *pwq, 3007 struct wq_barrier *barr, 3008 struct work_struct *target, struct worker *worker) 3009 { 3010 unsigned int work_flags = 0; 3011 unsigned int work_color; 3012 struct list_head *head; 3013 3014 /* 3015 * debugobject calls are safe here even with pool->lock locked 3016 * as we know for sure that this will not trigger any of the 3017 * checks and call back into the fixup functions where we 3018 * might deadlock. 3019 */ 3020 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 3021 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3022 3023 init_completion_map(&barr->done, &target->lockdep_map); 3024 3025 barr->task = current; 3026 3027 /* The barrier work item does not participate in pwq->nr_active. */ 3028 work_flags |= WORK_STRUCT_INACTIVE; 3029 3030 /* 3031 * If @target is currently being executed, schedule the 3032 * barrier to the worker; otherwise, put it after @target. 3033 */ 3034 if (worker) { 3035 head = worker->scheduled.next; 3036 work_color = worker->current_color; 3037 } else { 3038 unsigned long *bits = work_data_bits(target); 3039 3040 head = target->entry.next; 3041 /* there can already be other linked works, inherit and set */ 3042 work_flags |= *bits & WORK_STRUCT_LINKED; 3043 work_color = get_work_color(*bits); 3044 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3045 } 3046 3047 pwq->nr_in_flight[work_color]++; 3048 work_flags |= work_color_to_flags(work_color); 3049 3050 insert_work(pwq, &barr->work, head, work_flags); 3051 } 3052 3053 /** 3054 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3055 * @wq: workqueue being flushed 3056 * @flush_color: new flush color, < 0 for no-op 3057 * @work_color: new work color, < 0 for no-op 3058 * 3059 * Prepare pwqs for workqueue flushing. 3060 * 3061 * If @flush_color is non-negative, flush_color on all pwqs should be 3062 * -1. If no pwq has in-flight commands at the specified color, all 3063 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3064 * has in flight commands, its pwq->flush_color is set to 3065 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3066 * wakeup logic is armed and %true is returned. 3067 * 3068 * The caller should have initialized @wq->first_flusher prior to 3069 * calling this function with non-negative @flush_color. If 3070 * @flush_color is negative, no flush color update is done and %false 3071 * is returned. 3072 * 3073 * If @work_color is non-negative, all pwqs should have the same 3074 * work_color which is previous to @work_color and all will be 3075 * advanced to @work_color. 3076 * 3077 * CONTEXT: 3078 * mutex_lock(wq->mutex). 3079 * 3080 * Return: 3081 * %true if @flush_color >= 0 and there's something to flush. %false 3082 * otherwise. 3083 */ 3084 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3085 int flush_color, int work_color) 3086 { 3087 bool wait = false; 3088 struct pool_workqueue *pwq; 3089 3090 if (flush_color >= 0) { 3091 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3092 atomic_set(&wq->nr_pwqs_to_flush, 1); 3093 } 3094 3095 for_each_pwq(pwq, wq) { 3096 struct worker_pool *pool = pwq->pool; 3097 3098 raw_spin_lock_irq(&pool->lock); 3099 3100 if (flush_color >= 0) { 3101 WARN_ON_ONCE(pwq->flush_color != -1); 3102 3103 if (pwq->nr_in_flight[flush_color]) { 3104 pwq->flush_color = flush_color; 3105 atomic_inc(&wq->nr_pwqs_to_flush); 3106 wait = true; 3107 } 3108 } 3109 3110 if (work_color >= 0) { 3111 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3112 pwq->work_color = work_color; 3113 } 3114 3115 raw_spin_unlock_irq(&pool->lock); 3116 } 3117 3118 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3119 complete(&wq->first_flusher->done); 3120 3121 return wait; 3122 } 3123 3124 /** 3125 * __flush_workqueue - ensure that any scheduled work has run to completion. 3126 * @wq: workqueue to flush 3127 * 3128 * This function sleeps until all work items which were queued on entry 3129 * have finished execution, but it is not livelocked by new incoming ones. 3130 */ 3131 void __flush_workqueue(struct workqueue_struct *wq) 3132 { 3133 struct wq_flusher this_flusher = { 3134 .list = LIST_HEAD_INIT(this_flusher.list), 3135 .flush_color = -1, 3136 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 3137 }; 3138 int next_color; 3139 3140 if (WARN_ON(!wq_online)) 3141 return; 3142 3143 lock_map_acquire(&wq->lockdep_map); 3144 lock_map_release(&wq->lockdep_map); 3145 3146 mutex_lock(&wq->mutex); 3147 3148 /* 3149 * Start-to-wait phase 3150 */ 3151 next_color = work_next_color(wq->work_color); 3152 3153 if (next_color != wq->flush_color) { 3154 /* 3155 * Color space is not full. The current work_color 3156 * becomes our flush_color and work_color is advanced 3157 * by one. 3158 */ 3159 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3160 this_flusher.flush_color = wq->work_color; 3161 wq->work_color = next_color; 3162 3163 if (!wq->first_flusher) { 3164 /* no flush in progress, become the first flusher */ 3165 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3166 3167 wq->first_flusher = &this_flusher; 3168 3169 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3170 wq->work_color)) { 3171 /* nothing to flush, done */ 3172 wq->flush_color = next_color; 3173 wq->first_flusher = NULL; 3174 goto out_unlock; 3175 } 3176 } else { 3177 /* wait in queue */ 3178 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3179 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3180 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3181 } 3182 } else { 3183 /* 3184 * Oops, color space is full, wait on overflow queue. 3185 * The next flush completion will assign us 3186 * flush_color and transfer to flusher_queue. 3187 */ 3188 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 3189 } 3190 3191 check_flush_dependency(wq, NULL); 3192 3193 mutex_unlock(&wq->mutex); 3194 3195 wait_for_completion(&this_flusher.done); 3196 3197 /* 3198 * Wake-up-and-cascade phase 3199 * 3200 * First flushers are responsible for cascading flushes and 3201 * handling overflow. Non-first flushers can simply return. 3202 */ 3203 if (READ_ONCE(wq->first_flusher) != &this_flusher) 3204 return; 3205 3206 mutex_lock(&wq->mutex); 3207 3208 /* we might have raced, check again with mutex held */ 3209 if (wq->first_flusher != &this_flusher) 3210 goto out_unlock; 3211 3212 WRITE_ONCE(wq->first_flusher, NULL); 3213 3214 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 3215 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3216 3217 while (true) { 3218 struct wq_flusher *next, *tmp; 3219 3220 /* complete all the flushers sharing the current flush color */ 3221 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 3222 if (next->flush_color != wq->flush_color) 3223 break; 3224 list_del_init(&next->list); 3225 complete(&next->done); 3226 } 3227 3228 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 3229 wq->flush_color != work_next_color(wq->work_color)); 3230 3231 /* this flush_color is finished, advance by one */ 3232 wq->flush_color = work_next_color(wq->flush_color); 3233 3234 /* one color has been freed, handle overflow queue */ 3235 if (!list_empty(&wq->flusher_overflow)) { 3236 /* 3237 * Assign the same color to all overflowed 3238 * flushers, advance work_color and append to 3239 * flusher_queue. This is the start-to-wait 3240 * phase for these overflowed flushers. 3241 */ 3242 list_for_each_entry(tmp, &wq->flusher_overflow, list) 3243 tmp->flush_color = wq->work_color; 3244 3245 wq->work_color = work_next_color(wq->work_color); 3246 3247 list_splice_tail_init(&wq->flusher_overflow, 3248 &wq->flusher_queue); 3249 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3250 } 3251 3252 if (list_empty(&wq->flusher_queue)) { 3253 WARN_ON_ONCE(wq->flush_color != wq->work_color); 3254 break; 3255 } 3256 3257 /* 3258 * Need to flush more colors. Make the next flusher 3259 * the new first flusher and arm pwqs. 3260 */ 3261 WARN_ON_ONCE(wq->flush_color == wq->work_color); 3262 WARN_ON_ONCE(wq->flush_color != next->flush_color); 3263 3264 list_del_init(&next->list); 3265 wq->first_flusher = next; 3266 3267 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 3268 break; 3269 3270 /* 3271 * Meh... this color is already done, clear first 3272 * flusher and repeat cascading. 3273 */ 3274 wq->first_flusher = NULL; 3275 } 3276 3277 out_unlock: 3278 mutex_unlock(&wq->mutex); 3279 } 3280 EXPORT_SYMBOL(__flush_workqueue); 3281 3282 /** 3283 * drain_workqueue - drain a workqueue 3284 * @wq: workqueue to drain 3285 * 3286 * Wait until the workqueue becomes empty. While draining is in progress, 3287 * only chain queueing is allowed. IOW, only currently pending or running 3288 * work items on @wq can queue further work items on it. @wq is flushed 3289 * repeatedly until it becomes empty. The number of flushing is determined 3290 * by the depth of chaining and should be relatively short. Whine if it 3291 * takes too long. 3292 */ 3293 void drain_workqueue(struct workqueue_struct *wq) 3294 { 3295 unsigned int flush_cnt = 0; 3296 struct pool_workqueue *pwq; 3297 3298 /* 3299 * __queue_work() needs to test whether there are drainers, is much 3300 * hotter than drain_workqueue() and already looks at @wq->flags. 3301 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 3302 */ 3303 mutex_lock(&wq->mutex); 3304 if (!wq->nr_drainers++) 3305 wq->flags |= __WQ_DRAINING; 3306 mutex_unlock(&wq->mutex); 3307 reflush: 3308 __flush_workqueue(wq); 3309 3310 mutex_lock(&wq->mutex); 3311 3312 for_each_pwq(pwq, wq) { 3313 bool drained; 3314 3315 raw_spin_lock_irq(&pwq->pool->lock); 3316 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); 3317 raw_spin_unlock_irq(&pwq->pool->lock); 3318 3319 if (drained) 3320 continue; 3321 3322 if (++flush_cnt == 10 || 3323 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3324 pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 3325 wq->name, __func__, flush_cnt); 3326 3327 mutex_unlock(&wq->mutex); 3328 goto reflush; 3329 } 3330 3331 if (!--wq->nr_drainers) 3332 wq->flags &= ~__WQ_DRAINING; 3333 mutex_unlock(&wq->mutex); 3334 } 3335 EXPORT_SYMBOL_GPL(drain_workqueue); 3336 3337 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 3338 bool from_cancel) 3339 { 3340 struct worker *worker = NULL; 3341 struct worker_pool *pool; 3342 struct pool_workqueue *pwq; 3343 3344 might_sleep(); 3345 3346 rcu_read_lock(); 3347 pool = get_work_pool(work); 3348 if (!pool) { 3349 rcu_read_unlock(); 3350 return false; 3351 } 3352 3353 raw_spin_lock_irq(&pool->lock); 3354 /* see the comment in try_to_grab_pending() with the same code */ 3355 pwq = get_work_pwq(work); 3356 if (pwq) { 3357 if (unlikely(pwq->pool != pool)) 3358 goto already_gone; 3359 } else { 3360 worker = find_worker_executing_work(pool, work); 3361 if (!worker) 3362 goto already_gone; 3363 pwq = worker->current_pwq; 3364 } 3365 3366 check_flush_dependency(pwq->wq, work); 3367 3368 insert_wq_barrier(pwq, barr, work, worker); 3369 raw_spin_unlock_irq(&pool->lock); 3370 3371 /* 3372 * Force a lock recursion deadlock when using flush_work() inside a 3373 * single-threaded or rescuer equipped workqueue. 3374 * 3375 * For single threaded workqueues the deadlock happens when the work 3376 * is after the work issuing the flush_work(). For rescuer equipped 3377 * workqueues the deadlock happens when the rescuer stalls, blocking 3378 * forward progress. 3379 */ 3380 if (!from_cancel && 3381 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { 3382 lock_map_acquire(&pwq->wq->lockdep_map); 3383 lock_map_release(&pwq->wq->lockdep_map); 3384 } 3385 rcu_read_unlock(); 3386 return true; 3387 already_gone: 3388 raw_spin_unlock_irq(&pool->lock); 3389 rcu_read_unlock(); 3390 return false; 3391 } 3392 3393 static bool __flush_work(struct work_struct *work, bool from_cancel) 3394 { 3395 struct wq_barrier barr; 3396 3397 if (WARN_ON(!wq_online)) 3398 return false; 3399 3400 if (WARN_ON(!work->func)) 3401 return false; 3402 3403 lock_map_acquire(&work->lockdep_map); 3404 lock_map_release(&work->lockdep_map); 3405 3406 if (start_flush_work(work, &barr, from_cancel)) { 3407 wait_for_completion(&barr.done); 3408 destroy_work_on_stack(&barr.work); 3409 return true; 3410 } else { 3411 return false; 3412 } 3413 } 3414 3415 /** 3416 * flush_work - wait for a work to finish executing the last queueing instance 3417 * @work: the work to flush 3418 * 3419 * Wait until @work has finished execution. @work is guaranteed to be idle 3420 * on return if it hasn't been requeued since flush started. 3421 * 3422 * Return: 3423 * %true if flush_work() waited for the work to finish execution, 3424 * %false if it was already idle. 3425 */ 3426 bool flush_work(struct work_struct *work) 3427 { 3428 return __flush_work(work, false); 3429 } 3430 EXPORT_SYMBOL_GPL(flush_work); 3431 3432 struct cwt_wait { 3433 wait_queue_entry_t wait; 3434 struct work_struct *work; 3435 }; 3436 3437 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 3438 { 3439 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 3440 3441 if (cwait->work != key) 3442 return 0; 3443 return autoremove_wake_function(wait, mode, sync, key); 3444 } 3445 3446 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 3447 { 3448 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 3449 unsigned long flags; 3450 int ret; 3451 3452 do { 3453 ret = try_to_grab_pending(work, is_dwork, &flags); 3454 /* 3455 * If someone else is already canceling, wait for it to 3456 * finish. flush_work() doesn't work for PREEMPT_NONE 3457 * because we may get scheduled between @work's completion 3458 * and the other canceling task resuming and clearing 3459 * CANCELING - flush_work() will return false immediately 3460 * as @work is no longer busy, try_to_grab_pending() will 3461 * return -ENOENT as @work is still being canceled and the 3462 * other canceling task won't be able to clear CANCELING as 3463 * we're hogging the CPU. 3464 * 3465 * Let's wait for completion using a waitqueue. As this 3466 * may lead to the thundering herd problem, use a custom 3467 * wake function which matches @work along with exclusive 3468 * wait and wakeup. 3469 */ 3470 if (unlikely(ret == -ENOENT)) { 3471 struct cwt_wait cwait; 3472 3473 init_wait(&cwait.wait); 3474 cwait.wait.func = cwt_wakefn; 3475 cwait.work = work; 3476 3477 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 3478 TASK_UNINTERRUPTIBLE); 3479 if (work_is_canceling(work)) 3480 schedule(); 3481 finish_wait(&cancel_waitq, &cwait.wait); 3482 } 3483 } while (unlikely(ret < 0)); 3484 3485 /* tell other tasks trying to grab @work to back off */ 3486 mark_work_canceling(work); 3487 local_irq_restore(flags); 3488 3489 /* 3490 * This allows canceling during early boot. We know that @work 3491 * isn't executing. 3492 */ 3493 if (wq_online) 3494 __flush_work(work, true); 3495 3496 clear_work_data(work); 3497 3498 /* 3499 * Paired with prepare_to_wait() above so that either 3500 * waitqueue_active() is visible here or !work_is_canceling() is 3501 * visible there. 3502 */ 3503 smp_mb(); 3504 if (waitqueue_active(&cancel_waitq)) 3505 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 3506 3507 return ret; 3508 } 3509 3510 /** 3511 * cancel_work_sync - cancel a work and wait for it to finish 3512 * @work: the work to cancel 3513 * 3514 * Cancel @work and wait for its execution to finish. This function 3515 * can be used even if the work re-queues itself or migrates to 3516 * another workqueue. On return from this function, @work is 3517 * guaranteed to be not pending or executing on any CPU. 3518 * 3519 * cancel_work_sync(&delayed_work->work) must not be used for 3520 * delayed_work's. Use cancel_delayed_work_sync() instead. 3521 * 3522 * The caller must ensure that the workqueue on which @work was last 3523 * queued can't be destroyed before this function returns. 3524 * 3525 * Return: 3526 * %true if @work was pending, %false otherwise. 3527 */ 3528 bool cancel_work_sync(struct work_struct *work) 3529 { 3530 return __cancel_work_timer(work, false); 3531 } 3532 EXPORT_SYMBOL_GPL(cancel_work_sync); 3533 3534 /** 3535 * flush_delayed_work - wait for a dwork to finish executing the last queueing 3536 * @dwork: the delayed work to flush 3537 * 3538 * Delayed timer is cancelled and the pending work is queued for 3539 * immediate execution. Like flush_work(), this function only 3540 * considers the last queueing instance of @dwork. 3541 * 3542 * Return: 3543 * %true if flush_work() waited for the work to finish execution, 3544 * %false if it was already idle. 3545 */ 3546 bool flush_delayed_work(struct delayed_work *dwork) 3547 { 3548 local_irq_disable(); 3549 if (del_timer_sync(&dwork->timer)) 3550 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 3551 local_irq_enable(); 3552 return flush_work(&dwork->work); 3553 } 3554 EXPORT_SYMBOL(flush_delayed_work); 3555 3556 /** 3557 * flush_rcu_work - wait for a rwork to finish executing the last queueing 3558 * @rwork: the rcu work to flush 3559 * 3560 * Return: 3561 * %true if flush_rcu_work() waited for the work to finish execution, 3562 * %false if it was already idle. 3563 */ 3564 bool flush_rcu_work(struct rcu_work *rwork) 3565 { 3566 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 3567 rcu_barrier(); 3568 flush_work(&rwork->work); 3569 return true; 3570 } else { 3571 return flush_work(&rwork->work); 3572 } 3573 } 3574 EXPORT_SYMBOL(flush_rcu_work); 3575 3576 static bool __cancel_work(struct work_struct *work, bool is_dwork) 3577 { 3578 unsigned long flags; 3579 int ret; 3580 3581 do { 3582 ret = try_to_grab_pending(work, is_dwork, &flags); 3583 } while (unlikely(ret == -EAGAIN)); 3584 3585 if (unlikely(ret < 0)) 3586 return false; 3587 3588 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); 3589 local_irq_restore(flags); 3590 return ret; 3591 } 3592 3593 /* 3594 * See cancel_delayed_work() 3595 */ 3596 bool cancel_work(struct work_struct *work) 3597 { 3598 return __cancel_work(work, false); 3599 } 3600 EXPORT_SYMBOL(cancel_work); 3601 3602 /** 3603 * cancel_delayed_work - cancel a delayed work 3604 * @dwork: delayed_work to cancel 3605 * 3606 * Kill off a pending delayed_work. 3607 * 3608 * Return: %true if @dwork was pending and canceled; %false if it wasn't 3609 * pending. 3610 * 3611 * Note: 3612 * The work callback function may still be running on return, unless 3613 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 3614 * use cancel_delayed_work_sync() to wait on it. 3615 * 3616 * This function is safe to call from any context including IRQ handler. 3617 */ 3618 bool cancel_delayed_work(struct delayed_work *dwork) 3619 { 3620 return __cancel_work(&dwork->work, true); 3621 } 3622 EXPORT_SYMBOL(cancel_delayed_work); 3623 3624 /** 3625 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 3626 * @dwork: the delayed work cancel 3627 * 3628 * This is cancel_work_sync() for delayed works. 3629 * 3630 * Return: 3631 * %true if @dwork was pending, %false otherwise. 3632 */ 3633 bool cancel_delayed_work_sync(struct delayed_work *dwork) 3634 { 3635 return __cancel_work_timer(&dwork->work, true); 3636 } 3637 EXPORT_SYMBOL(cancel_delayed_work_sync); 3638 3639 /** 3640 * schedule_on_each_cpu - execute a function synchronously on each online CPU 3641 * @func: the function to call 3642 * 3643 * schedule_on_each_cpu() executes @func on each online CPU using the 3644 * system workqueue and blocks until all CPUs have completed. 3645 * schedule_on_each_cpu() is very slow. 3646 * 3647 * Return: 3648 * 0 on success, -errno on failure. 3649 */ 3650 int schedule_on_each_cpu(work_func_t func) 3651 { 3652 int cpu; 3653 struct work_struct __percpu *works; 3654 3655 works = alloc_percpu(struct work_struct); 3656 if (!works) 3657 return -ENOMEM; 3658 3659 cpus_read_lock(); 3660 3661 for_each_online_cpu(cpu) { 3662 struct work_struct *work = per_cpu_ptr(works, cpu); 3663 3664 INIT_WORK(work, func); 3665 schedule_work_on(cpu, work); 3666 } 3667 3668 for_each_online_cpu(cpu) 3669 flush_work(per_cpu_ptr(works, cpu)); 3670 3671 cpus_read_unlock(); 3672 free_percpu(works); 3673 return 0; 3674 } 3675 3676 /** 3677 * execute_in_process_context - reliably execute the routine with user context 3678 * @fn: the function to execute 3679 * @ew: guaranteed storage for the execute work structure (must 3680 * be available when the work executes) 3681 * 3682 * Executes the function immediately if process context is available, 3683 * otherwise schedules the function for delayed execution. 3684 * 3685 * Return: 0 - function was executed 3686 * 1 - function was scheduled for execution 3687 */ 3688 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 3689 { 3690 if (!in_interrupt()) { 3691 fn(&ew->work); 3692 return 0; 3693 } 3694 3695 INIT_WORK(&ew->work, fn); 3696 schedule_work(&ew->work); 3697 3698 return 1; 3699 } 3700 EXPORT_SYMBOL_GPL(execute_in_process_context); 3701 3702 /** 3703 * free_workqueue_attrs - free a workqueue_attrs 3704 * @attrs: workqueue_attrs to free 3705 * 3706 * Undo alloc_workqueue_attrs(). 3707 */ 3708 void free_workqueue_attrs(struct workqueue_attrs *attrs) 3709 { 3710 if (attrs) { 3711 free_cpumask_var(attrs->cpumask); 3712 free_cpumask_var(attrs->__pod_cpumask); 3713 kfree(attrs); 3714 } 3715 } 3716 3717 /** 3718 * alloc_workqueue_attrs - allocate a workqueue_attrs 3719 * 3720 * Allocate a new workqueue_attrs, initialize with default settings and 3721 * return it. 3722 * 3723 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3724 */ 3725 struct workqueue_attrs *alloc_workqueue_attrs(void) 3726 { 3727 struct workqueue_attrs *attrs; 3728 3729 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 3730 if (!attrs) 3731 goto fail; 3732 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 3733 goto fail; 3734 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 3735 goto fail; 3736 3737 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3738 attrs->affn_scope = WQ_AFFN_DFL; 3739 return attrs; 3740 fail: 3741 free_workqueue_attrs(attrs); 3742 return NULL; 3743 } 3744 3745 static void copy_workqueue_attrs(struct workqueue_attrs *to, 3746 const struct workqueue_attrs *from) 3747 { 3748 to->nice = from->nice; 3749 cpumask_copy(to->cpumask, from->cpumask); 3750 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 3751 to->affn_strict = from->affn_strict; 3752 3753 /* 3754 * Unlike hash and equality test, copying shouldn't ignore wq-only 3755 * fields as copying is used for both pool and wq attrs. Instead, 3756 * get_unbound_pool() explicitly clears the fields. 3757 */ 3758 to->affn_scope = from->affn_scope; 3759 to->ordered = from->ordered; 3760 } 3761 3762 /* 3763 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 3764 * comments in 'struct workqueue_attrs' definition. 3765 */ 3766 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 3767 { 3768 attrs->affn_scope = WQ_AFFN_NR_TYPES; 3769 attrs->ordered = false; 3770 } 3771 3772 /* hash value of the content of @attr */ 3773 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 3774 { 3775 u32 hash = 0; 3776 3777 hash = jhash_1word(attrs->nice, hash); 3778 hash = jhash(cpumask_bits(attrs->cpumask), 3779 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3780 hash = jhash(cpumask_bits(attrs->__pod_cpumask), 3781 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3782 hash = jhash_1word(attrs->affn_strict, hash); 3783 return hash; 3784 } 3785 3786 /* content equality test */ 3787 static bool wqattrs_equal(const struct workqueue_attrs *a, 3788 const struct workqueue_attrs *b) 3789 { 3790 if (a->nice != b->nice) 3791 return false; 3792 if (!cpumask_equal(a->cpumask, b->cpumask)) 3793 return false; 3794 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 3795 return false; 3796 if (a->affn_strict != b->affn_strict) 3797 return false; 3798 return true; 3799 } 3800 3801 /* Update @attrs with actually available CPUs */ 3802 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 3803 const cpumask_t *unbound_cpumask) 3804 { 3805 /* 3806 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 3807 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 3808 * @unbound_cpumask. 3809 */ 3810 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 3811 if (unlikely(cpumask_empty(attrs->cpumask))) 3812 cpumask_copy(attrs->cpumask, unbound_cpumask); 3813 } 3814 3815 /* find wq_pod_type to use for @attrs */ 3816 static const struct wq_pod_type * 3817 wqattrs_pod_type(const struct workqueue_attrs *attrs) 3818 { 3819 enum wq_affn_scope scope; 3820 struct wq_pod_type *pt; 3821 3822 /* to synchronize access to wq_affn_dfl */ 3823 lockdep_assert_held(&wq_pool_mutex); 3824 3825 if (attrs->affn_scope == WQ_AFFN_DFL) 3826 scope = wq_affn_dfl; 3827 else 3828 scope = attrs->affn_scope; 3829 3830 pt = &wq_pod_types[scope]; 3831 3832 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 3833 likely(pt->nr_pods)) 3834 return pt; 3835 3836 /* 3837 * Before workqueue_init_topology(), only SYSTEM is available which is 3838 * initialized in workqueue_init_early(). 3839 */ 3840 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 3841 BUG_ON(!pt->nr_pods); 3842 return pt; 3843 } 3844 3845 /** 3846 * init_worker_pool - initialize a newly zalloc'd worker_pool 3847 * @pool: worker_pool to initialize 3848 * 3849 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3850 * 3851 * Return: 0 on success, -errno on failure. Even on failure, all fields 3852 * inside @pool proper are initialized and put_unbound_pool() can be called 3853 * on @pool safely to release it. 3854 */ 3855 static int init_worker_pool(struct worker_pool *pool) 3856 { 3857 raw_spin_lock_init(&pool->lock); 3858 pool->id = -1; 3859 pool->cpu = -1; 3860 pool->node = NUMA_NO_NODE; 3861 pool->flags |= POOL_DISASSOCIATED; 3862 pool->watchdog_ts = jiffies; 3863 INIT_LIST_HEAD(&pool->worklist); 3864 INIT_LIST_HEAD(&pool->idle_list); 3865 hash_init(pool->busy_hash); 3866 3867 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 3868 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 3869 3870 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 3871 3872 INIT_LIST_HEAD(&pool->workers); 3873 INIT_LIST_HEAD(&pool->dying_workers); 3874 3875 ida_init(&pool->worker_ida); 3876 INIT_HLIST_NODE(&pool->hash_node); 3877 pool->refcnt = 1; 3878 3879 /* shouldn't fail above this point */ 3880 pool->attrs = alloc_workqueue_attrs(); 3881 if (!pool->attrs) 3882 return -ENOMEM; 3883 3884 wqattrs_clear_for_pool(pool->attrs); 3885 3886 return 0; 3887 } 3888 3889 #ifdef CONFIG_LOCKDEP 3890 static void wq_init_lockdep(struct workqueue_struct *wq) 3891 { 3892 char *lock_name; 3893 3894 lockdep_register_key(&wq->key); 3895 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 3896 if (!lock_name) 3897 lock_name = wq->name; 3898 3899 wq->lock_name = lock_name; 3900 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 3901 } 3902 3903 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3904 { 3905 lockdep_unregister_key(&wq->key); 3906 } 3907 3908 static void wq_free_lockdep(struct workqueue_struct *wq) 3909 { 3910 if (wq->lock_name != wq->name) 3911 kfree(wq->lock_name); 3912 } 3913 #else 3914 static void wq_init_lockdep(struct workqueue_struct *wq) 3915 { 3916 } 3917 3918 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3919 { 3920 } 3921 3922 static void wq_free_lockdep(struct workqueue_struct *wq) 3923 { 3924 } 3925 #endif 3926 3927 static void rcu_free_wq(struct rcu_head *rcu) 3928 { 3929 struct workqueue_struct *wq = 3930 container_of(rcu, struct workqueue_struct, rcu); 3931 3932 wq_free_lockdep(wq); 3933 free_percpu(wq->cpu_pwq); 3934 free_workqueue_attrs(wq->unbound_attrs); 3935 kfree(wq); 3936 } 3937 3938 static void rcu_free_pool(struct rcu_head *rcu) 3939 { 3940 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 3941 3942 ida_destroy(&pool->worker_ida); 3943 free_workqueue_attrs(pool->attrs); 3944 kfree(pool); 3945 } 3946 3947 /** 3948 * put_unbound_pool - put a worker_pool 3949 * @pool: worker_pool to put 3950 * 3951 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 3952 * safe manner. get_unbound_pool() calls this function on its failure path 3953 * and this function should be able to release pools which went through, 3954 * successfully or not, init_worker_pool(). 3955 * 3956 * Should be called with wq_pool_mutex held. 3957 */ 3958 static void put_unbound_pool(struct worker_pool *pool) 3959 { 3960 DECLARE_COMPLETION_ONSTACK(detach_completion); 3961 struct worker *worker; 3962 LIST_HEAD(cull_list); 3963 3964 lockdep_assert_held(&wq_pool_mutex); 3965 3966 if (--pool->refcnt) 3967 return; 3968 3969 /* sanity checks */ 3970 if (WARN_ON(!(pool->cpu < 0)) || 3971 WARN_ON(!list_empty(&pool->worklist))) 3972 return; 3973 3974 /* release id and unhash */ 3975 if (pool->id >= 0) 3976 idr_remove(&worker_pool_idr, pool->id); 3977 hash_del(&pool->hash_node); 3978 3979 /* 3980 * Become the manager and destroy all workers. This prevents 3981 * @pool's workers from blocking on attach_mutex. We're the last 3982 * manager and @pool gets freed with the flag set. 3983 * 3984 * Having a concurrent manager is quite unlikely to happen as we can 3985 * only get here with 3986 * pwq->refcnt == pool->refcnt == 0 3987 * which implies no work queued to the pool, which implies no worker can 3988 * become the manager. However a worker could have taken the role of 3989 * manager before the refcnts dropped to 0, since maybe_create_worker() 3990 * drops pool->lock 3991 */ 3992 while (true) { 3993 rcuwait_wait_event(&manager_wait, 3994 !(pool->flags & POOL_MANAGER_ACTIVE), 3995 TASK_UNINTERRUPTIBLE); 3996 3997 mutex_lock(&wq_pool_attach_mutex); 3998 raw_spin_lock_irq(&pool->lock); 3999 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 4000 pool->flags |= POOL_MANAGER_ACTIVE; 4001 break; 4002 } 4003 raw_spin_unlock_irq(&pool->lock); 4004 mutex_unlock(&wq_pool_attach_mutex); 4005 } 4006 4007 while ((worker = first_idle_worker(pool))) 4008 set_worker_dying(worker, &cull_list); 4009 WARN_ON(pool->nr_workers || pool->nr_idle); 4010 raw_spin_unlock_irq(&pool->lock); 4011 4012 wake_dying_workers(&cull_list); 4013 4014 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 4015 pool->detach_completion = &detach_completion; 4016 mutex_unlock(&wq_pool_attach_mutex); 4017 4018 if (pool->detach_completion) 4019 wait_for_completion(pool->detach_completion); 4020 4021 /* shut down the timers */ 4022 del_timer_sync(&pool->idle_timer); 4023 cancel_work_sync(&pool->idle_cull_work); 4024 del_timer_sync(&pool->mayday_timer); 4025 4026 /* RCU protected to allow dereferences from get_work_pool() */ 4027 call_rcu(&pool->rcu, rcu_free_pool); 4028 } 4029 4030 /** 4031 * get_unbound_pool - get a worker_pool with the specified attributes 4032 * @attrs: the attributes of the worker_pool to get 4033 * 4034 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4035 * reference count and return it. If there already is a matching 4036 * worker_pool, it will be used; otherwise, this function attempts to 4037 * create a new one. 4038 * 4039 * Should be called with wq_pool_mutex held. 4040 * 4041 * Return: On success, a worker_pool with the same attributes as @attrs. 4042 * On failure, %NULL. 4043 */ 4044 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4045 { 4046 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 4047 u32 hash = wqattrs_hash(attrs); 4048 struct worker_pool *pool; 4049 int pod, node = NUMA_NO_NODE; 4050 4051 lockdep_assert_held(&wq_pool_mutex); 4052 4053 /* do we already have a matching pool? */ 4054 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4055 if (wqattrs_equal(pool->attrs, attrs)) { 4056 pool->refcnt++; 4057 return pool; 4058 } 4059 } 4060 4061 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 4062 for (pod = 0; pod < pt->nr_pods; pod++) { 4063 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 4064 node = pt->pod_node[pod]; 4065 break; 4066 } 4067 } 4068 4069 /* nope, create a new one */ 4070 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 4071 if (!pool || init_worker_pool(pool) < 0) 4072 goto fail; 4073 4074 pool->node = node; 4075 copy_workqueue_attrs(pool->attrs, attrs); 4076 wqattrs_clear_for_pool(pool->attrs); 4077 4078 if (worker_pool_assign_id(pool) < 0) 4079 goto fail; 4080 4081 /* create and start the initial worker */ 4082 if (wq_online && !create_worker(pool)) 4083 goto fail; 4084 4085 /* install */ 4086 hash_add(unbound_pool_hash, &pool->hash_node, hash); 4087 4088 return pool; 4089 fail: 4090 if (pool) 4091 put_unbound_pool(pool); 4092 return NULL; 4093 } 4094 4095 static void rcu_free_pwq(struct rcu_head *rcu) 4096 { 4097 kmem_cache_free(pwq_cache, 4098 container_of(rcu, struct pool_workqueue, rcu)); 4099 } 4100 4101 /* 4102 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4103 * refcnt and needs to be destroyed. 4104 */ 4105 static void pwq_release_workfn(struct kthread_work *work) 4106 { 4107 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4108 release_work); 4109 struct workqueue_struct *wq = pwq->wq; 4110 struct worker_pool *pool = pwq->pool; 4111 bool is_last = false; 4112 4113 /* 4114 * When @pwq is not linked, it doesn't hold any reference to the 4115 * @wq, and @wq is invalid to access. 4116 */ 4117 if (!list_empty(&pwq->pwqs_node)) { 4118 mutex_lock(&wq->mutex); 4119 list_del_rcu(&pwq->pwqs_node); 4120 is_last = list_empty(&wq->pwqs); 4121 mutex_unlock(&wq->mutex); 4122 } 4123 4124 if (wq->flags & WQ_UNBOUND) { 4125 mutex_lock(&wq_pool_mutex); 4126 put_unbound_pool(pool); 4127 mutex_unlock(&wq_pool_mutex); 4128 } 4129 4130 call_rcu(&pwq->rcu, rcu_free_pwq); 4131 4132 /* 4133 * If we're the last pwq going away, @wq is already dead and no one 4134 * is gonna access it anymore. Schedule RCU free. 4135 */ 4136 if (is_last) { 4137 wq_unregister_lockdep(wq); 4138 call_rcu(&wq->rcu, rcu_free_wq); 4139 } 4140 } 4141 4142 /** 4143 * pwq_adjust_max_active - update a pwq's max_active to the current setting 4144 * @pwq: target pool_workqueue 4145 * 4146 * If @pwq isn't freezing, set @pwq->max_active to the associated 4147 * workqueue's saved_max_active and activate inactive work items 4148 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. 4149 */ 4150 static void pwq_adjust_max_active(struct pool_workqueue *pwq) 4151 { 4152 struct workqueue_struct *wq = pwq->wq; 4153 bool freezable = wq->flags & WQ_FREEZABLE; 4154 unsigned long flags; 4155 4156 /* for @wq->saved_max_active */ 4157 lockdep_assert_held(&wq->mutex); 4158 4159 /* fast exit for non-freezable wqs */ 4160 if (!freezable && pwq->max_active == wq->saved_max_active) 4161 return; 4162 4163 /* this function can be called during early boot w/ irq disabled */ 4164 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4165 4166 /* 4167 * During [un]freezing, the caller is responsible for ensuring that 4168 * this function is called at least once after @workqueue_freezing 4169 * is updated and visible. 4170 */ 4171 if (!freezable || !workqueue_freezing) { 4172 pwq->max_active = wq->saved_max_active; 4173 4174 while (!list_empty(&pwq->inactive_works) && 4175 pwq->nr_active < pwq->max_active) 4176 pwq_activate_first_inactive(pwq); 4177 4178 kick_pool(pwq->pool); 4179 } else { 4180 pwq->max_active = 0; 4181 } 4182 4183 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 4184 } 4185 4186 /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4187 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4188 struct worker_pool *pool) 4189 { 4190 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4191 4192 memset(pwq, 0, sizeof(*pwq)); 4193 4194 pwq->pool = pool; 4195 pwq->wq = wq; 4196 pwq->flush_color = -1; 4197 pwq->refcnt = 1; 4198 INIT_LIST_HEAD(&pwq->inactive_works); 4199 INIT_LIST_HEAD(&pwq->pwqs_node); 4200 INIT_LIST_HEAD(&pwq->mayday_node); 4201 kthread_init_work(&pwq->release_work, pwq_release_workfn); 4202 } 4203 4204 /* sync @pwq with the current state of its associated wq and link it */ 4205 static void link_pwq(struct pool_workqueue *pwq) 4206 { 4207 struct workqueue_struct *wq = pwq->wq; 4208 4209 lockdep_assert_held(&wq->mutex); 4210 4211 /* may be called multiple times, ignore if already linked */ 4212 if (!list_empty(&pwq->pwqs_node)) 4213 return; 4214 4215 /* set the matching work_color */ 4216 pwq->work_color = wq->work_color; 4217 4218 /* sync max_active to the current setting */ 4219 pwq_adjust_max_active(pwq); 4220 4221 /* link in @pwq */ 4222 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 4223 } 4224 4225 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 4226 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 4227 const struct workqueue_attrs *attrs) 4228 { 4229 struct worker_pool *pool; 4230 struct pool_workqueue *pwq; 4231 4232 lockdep_assert_held(&wq_pool_mutex); 4233 4234 pool = get_unbound_pool(attrs); 4235 if (!pool) 4236 return NULL; 4237 4238 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 4239 if (!pwq) { 4240 put_unbound_pool(pool); 4241 return NULL; 4242 } 4243 4244 init_pwq(pwq, wq, pool); 4245 return pwq; 4246 } 4247 4248 /** 4249 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 4250 * @attrs: the wq_attrs of the default pwq of the target workqueue 4251 * @cpu: the target CPU 4252 * @cpu_going_down: if >= 0, the CPU to consider as offline 4253 * 4254 * Calculate the cpumask a workqueue with @attrs should use on @pod. If 4255 * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 4256 * The result is stored in @attrs->__pod_cpumask. 4257 * 4258 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 4259 * and @pod has online CPUs requested by @attrs, the returned cpumask is the 4260 * intersection of the possible CPUs of @pod and @attrs->cpumask. 4261 * 4262 * The caller is responsible for ensuring that the cpumask of @pod stays stable. 4263 */ 4264 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 4265 int cpu_going_down) 4266 { 4267 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 4268 int pod = pt->cpu_pod[cpu]; 4269 4270 /* does @pod have any online CPUs @attrs wants? */ 4271 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 4272 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 4273 if (cpu_going_down >= 0) 4274 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 4275 4276 if (cpumask_empty(attrs->__pod_cpumask)) { 4277 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 4278 return; 4279 } 4280 4281 /* yeap, return possible CPUs in @pod that @attrs wants */ 4282 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 4283 4284 if (cpumask_empty(attrs->__pod_cpumask)) 4285 pr_warn_once("WARNING: workqueue cpumask: online intersect > " 4286 "possible intersect\n"); 4287 } 4288 4289 /* install @pwq into @wq's cpu_pwq and return the old pwq */ 4290 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 4291 int cpu, struct pool_workqueue *pwq) 4292 { 4293 struct pool_workqueue *old_pwq; 4294 4295 lockdep_assert_held(&wq_pool_mutex); 4296 lockdep_assert_held(&wq->mutex); 4297 4298 /* link_pwq() can handle duplicate calls */ 4299 link_pwq(pwq); 4300 4301 old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4302 rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq); 4303 return old_pwq; 4304 } 4305 4306 /* context to store the prepared attrs & pwqs before applying */ 4307 struct apply_wqattrs_ctx { 4308 struct workqueue_struct *wq; /* target workqueue */ 4309 struct workqueue_attrs *attrs; /* attrs to apply */ 4310 struct list_head list; /* queued for batching commit */ 4311 struct pool_workqueue *dfl_pwq; 4312 struct pool_workqueue *pwq_tbl[]; 4313 }; 4314 4315 /* free the resources after success or abort */ 4316 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 4317 { 4318 if (ctx) { 4319 int cpu; 4320 4321 for_each_possible_cpu(cpu) 4322 put_pwq_unlocked(ctx->pwq_tbl[cpu]); 4323 put_pwq_unlocked(ctx->dfl_pwq); 4324 4325 free_workqueue_attrs(ctx->attrs); 4326 4327 kfree(ctx); 4328 } 4329 } 4330 4331 /* allocate the attrs and pwqs for later installation */ 4332 static struct apply_wqattrs_ctx * 4333 apply_wqattrs_prepare(struct workqueue_struct *wq, 4334 const struct workqueue_attrs *attrs, 4335 const cpumask_var_t unbound_cpumask) 4336 { 4337 struct apply_wqattrs_ctx *ctx; 4338 struct workqueue_attrs *new_attrs; 4339 int cpu; 4340 4341 lockdep_assert_held(&wq_pool_mutex); 4342 4343 if (WARN_ON(attrs->affn_scope < 0 || 4344 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 4345 return ERR_PTR(-EINVAL); 4346 4347 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 4348 4349 new_attrs = alloc_workqueue_attrs(); 4350 if (!ctx || !new_attrs) 4351 goto out_free; 4352 4353 /* 4354 * If something goes wrong during CPU up/down, we'll fall back to 4355 * the default pwq covering whole @attrs->cpumask. Always create 4356 * it even if we don't use it immediately. 4357 */ 4358 copy_workqueue_attrs(new_attrs, attrs); 4359 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 4360 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4361 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 4362 if (!ctx->dfl_pwq) 4363 goto out_free; 4364 4365 for_each_possible_cpu(cpu) { 4366 if (new_attrs->ordered) { 4367 ctx->dfl_pwq->refcnt++; 4368 ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 4369 } else { 4370 wq_calc_pod_cpumask(new_attrs, cpu, -1); 4371 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 4372 if (!ctx->pwq_tbl[cpu]) 4373 goto out_free; 4374 } 4375 } 4376 4377 /* save the user configured attrs and sanitize it. */ 4378 copy_workqueue_attrs(new_attrs, attrs); 4379 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 4380 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4381 ctx->attrs = new_attrs; 4382 4383 ctx->wq = wq; 4384 return ctx; 4385 4386 out_free: 4387 free_workqueue_attrs(new_attrs); 4388 apply_wqattrs_cleanup(ctx); 4389 return ERR_PTR(-ENOMEM); 4390 } 4391 4392 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 4393 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 4394 { 4395 int cpu; 4396 4397 /* all pwqs have been created successfully, let's install'em */ 4398 mutex_lock(&ctx->wq->mutex); 4399 4400 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 4401 4402 /* save the previous pwq and install the new one */ 4403 for_each_possible_cpu(cpu) 4404 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 4405 ctx->pwq_tbl[cpu]); 4406 4407 /* @dfl_pwq might not have been used, ensure it's linked */ 4408 link_pwq(ctx->dfl_pwq); 4409 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 4410 4411 mutex_unlock(&ctx->wq->mutex); 4412 } 4413 4414 static void apply_wqattrs_lock(void) 4415 { 4416 /* CPUs should stay stable across pwq creations and installations */ 4417 cpus_read_lock(); 4418 mutex_lock(&wq_pool_mutex); 4419 } 4420 4421 static void apply_wqattrs_unlock(void) 4422 { 4423 mutex_unlock(&wq_pool_mutex); 4424 cpus_read_unlock(); 4425 } 4426 4427 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 4428 const struct workqueue_attrs *attrs) 4429 { 4430 struct apply_wqattrs_ctx *ctx; 4431 4432 /* only unbound workqueues can change attributes */ 4433 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 4434 return -EINVAL; 4435 4436 /* creating multiple pwqs breaks ordering guarantee */ 4437 if (!list_empty(&wq->pwqs)) { 4438 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4439 return -EINVAL; 4440 4441 wq->flags &= ~__WQ_ORDERED; 4442 } 4443 4444 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 4445 if (IS_ERR(ctx)) 4446 return PTR_ERR(ctx); 4447 4448 /* the ctx has been prepared successfully, let's commit it */ 4449 apply_wqattrs_commit(ctx); 4450 apply_wqattrs_cleanup(ctx); 4451 4452 return 0; 4453 } 4454 4455 /** 4456 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 4457 * @wq: the target workqueue 4458 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 4459 * 4460 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 4461 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 4462 * work items are affine to the pod it was issued on. Older pwqs are released as 4463 * in-flight work items finish. Note that a work item which repeatedly requeues 4464 * itself back-to-back will stay on its current pwq. 4465 * 4466 * Performs GFP_KERNEL allocations. 4467 * 4468 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 4469 * 4470 * Return: 0 on success and -errno on failure. 4471 */ 4472 int apply_workqueue_attrs(struct workqueue_struct *wq, 4473 const struct workqueue_attrs *attrs) 4474 { 4475 int ret; 4476 4477 lockdep_assert_cpus_held(); 4478 4479 mutex_lock(&wq_pool_mutex); 4480 ret = apply_workqueue_attrs_locked(wq, attrs); 4481 mutex_unlock(&wq_pool_mutex); 4482 4483 return ret; 4484 } 4485 4486 /** 4487 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 4488 * @wq: the target workqueue 4489 * @cpu: the CPU to update pool association for 4490 * @hotplug_cpu: the CPU coming up or going down 4491 * @online: whether @cpu is coming up or going down 4492 * 4493 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 4494 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 4495 * @wq accordingly. 4496 * 4497 * 4498 * If pod affinity can't be adjusted due to memory allocation failure, it falls 4499 * back to @wq->dfl_pwq which may not be optimal but is always correct. 4500 * 4501 * Note that when the last allowed CPU of a pod goes offline for a workqueue 4502 * with a cpumask spanning multiple pods, the workers which were already 4503 * executing the work items for the workqueue will lose their CPU affinity and 4504 * may execute on any CPU. This is similar to how per-cpu workqueues behave on 4505 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 4506 * responsibility to flush the work item from CPU_DOWN_PREPARE. 4507 */ 4508 static void wq_update_pod(struct workqueue_struct *wq, int cpu, 4509 int hotplug_cpu, bool online) 4510 { 4511 int off_cpu = online ? -1 : hotplug_cpu; 4512 struct pool_workqueue *old_pwq = NULL, *pwq; 4513 struct workqueue_attrs *target_attrs; 4514 4515 lockdep_assert_held(&wq_pool_mutex); 4516 4517 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 4518 return; 4519 4520 /* 4521 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 4522 * Let's use a preallocated one. The following buf is protected by 4523 * CPU hotplug exclusion. 4524 */ 4525 target_attrs = wq_update_pod_attrs_buf; 4526 4527 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 4528 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 4529 4530 /* nothing to do if the target cpumask matches the current pwq */ 4531 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 4532 pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu), 4533 lockdep_is_held(&wq_pool_mutex)); 4534 if (wqattrs_equal(target_attrs, pwq->pool->attrs)) 4535 return; 4536 4537 /* create a new pwq */ 4538 pwq = alloc_unbound_pwq(wq, target_attrs); 4539 if (!pwq) { 4540 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 4541 wq->name); 4542 goto use_dfl_pwq; 4543 } 4544 4545 /* Install the new pwq. */ 4546 mutex_lock(&wq->mutex); 4547 old_pwq = install_unbound_pwq(wq, cpu, pwq); 4548 goto out_unlock; 4549 4550 use_dfl_pwq: 4551 mutex_lock(&wq->mutex); 4552 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); 4553 get_pwq(wq->dfl_pwq); 4554 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); 4555 old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq); 4556 out_unlock: 4557 mutex_unlock(&wq->mutex); 4558 put_pwq_unlocked(old_pwq); 4559 } 4560 4561 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4562 { 4563 bool highpri = wq->flags & WQ_HIGHPRI; 4564 int cpu, ret; 4565 4566 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 4567 if (!wq->cpu_pwq) 4568 goto enomem; 4569 4570 if (!(wq->flags & WQ_UNBOUND)) { 4571 for_each_possible_cpu(cpu) { 4572 struct pool_workqueue **pwq_p = 4573 per_cpu_ptr(wq->cpu_pwq, cpu); 4574 struct worker_pool *pool = 4575 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); 4576 4577 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 4578 pool->node); 4579 if (!*pwq_p) 4580 goto enomem; 4581 4582 init_pwq(*pwq_p, wq, pool); 4583 4584 mutex_lock(&wq->mutex); 4585 link_pwq(*pwq_p); 4586 mutex_unlock(&wq->mutex); 4587 } 4588 return 0; 4589 } 4590 4591 cpus_read_lock(); 4592 if (wq->flags & __WQ_ORDERED) { 4593 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4594 /* there should only be single pwq for ordering guarantee */ 4595 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4596 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 4597 "ordering guarantee broken for workqueue %s\n", wq->name); 4598 } else { 4599 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4600 } 4601 cpus_read_unlock(); 4602 4603 return ret; 4604 4605 enomem: 4606 if (wq->cpu_pwq) { 4607 for_each_possible_cpu(cpu) 4608 kfree(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4609 free_percpu(wq->cpu_pwq); 4610 wq->cpu_pwq = NULL; 4611 } 4612 return -ENOMEM; 4613 } 4614 4615 static int wq_clamp_max_active(int max_active, unsigned int flags, 4616 const char *name) 4617 { 4618 if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 4619 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 4620 max_active, name, 1, WQ_MAX_ACTIVE); 4621 4622 return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 4623 } 4624 4625 /* 4626 * Workqueues which may be used during memory reclaim should have a rescuer 4627 * to guarantee forward progress. 4628 */ 4629 static int init_rescuer(struct workqueue_struct *wq) 4630 { 4631 struct worker *rescuer; 4632 int ret; 4633 4634 if (!(wq->flags & WQ_MEM_RECLAIM)) 4635 return 0; 4636 4637 rescuer = alloc_worker(NUMA_NO_NODE); 4638 if (!rescuer) { 4639 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 4640 wq->name); 4641 return -ENOMEM; 4642 } 4643 4644 rescuer->rescue_wq = wq; 4645 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name); 4646 if (IS_ERR(rescuer->task)) { 4647 ret = PTR_ERR(rescuer->task); 4648 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 4649 wq->name, ERR_PTR(ret)); 4650 kfree(rescuer); 4651 return ret; 4652 } 4653 4654 wq->rescuer = rescuer; 4655 kthread_bind_mask(rescuer->task, cpu_possible_mask); 4656 wake_up_process(rescuer->task); 4657 4658 return 0; 4659 } 4660 4661 __printf(1, 4) 4662 struct workqueue_struct *alloc_workqueue(const char *fmt, 4663 unsigned int flags, 4664 int max_active, ...) 4665 { 4666 va_list args; 4667 struct workqueue_struct *wq; 4668 struct pool_workqueue *pwq; 4669 4670 /* 4671 * Unbound && max_active == 1 used to imply ordered, which is no longer 4672 * the case on many machines due to per-pod pools. While 4673 * alloc_ordered_workqueue() is the right way to create an ordered 4674 * workqueue, keep the previous behavior to avoid subtle breakages. 4675 */ 4676 if ((flags & WQ_UNBOUND) && max_active == 1) 4677 flags |= __WQ_ORDERED; 4678 4679 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4680 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4681 flags |= WQ_UNBOUND; 4682 4683 /* allocate wq and format name */ 4684 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4685 if (!wq) 4686 return NULL; 4687 4688 if (flags & WQ_UNBOUND) { 4689 wq->unbound_attrs = alloc_workqueue_attrs(); 4690 if (!wq->unbound_attrs) 4691 goto err_free_wq; 4692 } 4693 4694 va_start(args, max_active); 4695 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4696 va_end(args); 4697 4698 max_active = max_active ?: WQ_DFL_ACTIVE; 4699 max_active = wq_clamp_max_active(max_active, flags, wq->name); 4700 4701 /* init wq */ 4702 wq->flags = flags; 4703 wq->saved_max_active = max_active; 4704 mutex_init(&wq->mutex); 4705 atomic_set(&wq->nr_pwqs_to_flush, 0); 4706 INIT_LIST_HEAD(&wq->pwqs); 4707 INIT_LIST_HEAD(&wq->flusher_queue); 4708 INIT_LIST_HEAD(&wq->flusher_overflow); 4709 INIT_LIST_HEAD(&wq->maydays); 4710 4711 wq_init_lockdep(wq); 4712 INIT_LIST_HEAD(&wq->list); 4713 4714 if (alloc_and_link_pwqs(wq) < 0) 4715 goto err_unreg_lockdep; 4716 4717 if (wq_online && init_rescuer(wq) < 0) 4718 goto err_destroy; 4719 4720 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4721 goto err_destroy; 4722 4723 /* 4724 * wq_pool_mutex protects global freeze state and workqueues list. 4725 * Grab it, adjust max_active and add the new @wq to workqueues 4726 * list. 4727 */ 4728 mutex_lock(&wq_pool_mutex); 4729 4730 mutex_lock(&wq->mutex); 4731 for_each_pwq(pwq, wq) 4732 pwq_adjust_max_active(pwq); 4733 mutex_unlock(&wq->mutex); 4734 4735 list_add_tail_rcu(&wq->list, &workqueues); 4736 4737 mutex_unlock(&wq_pool_mutex); 4738 4739 return wq; 4740 4741 err_unreg_lockdep: 4742 wq_unregister_lockdep(wq); 4743 wq_free_lockdep(wq); 4744 err_free_wq: 4745 free_workqueue_attrs(wq->unbound_attrs); 4746 kfree(wq); 4747 return NULL; 4748 err_destroy: 4749 destroy_workqueue(wq); 4750 return NULL; 4751 } 4752 EXPORT_SYMBOL_GPL(alloc_workqueue); 4753 4754 static bool pwq_busy(struct pool_workqueue *pwq) 4755 { 4756 int i; 4757 4758 for (i = 0; i < WORK_NR_COLORS; i++) 4759 if (pwq->nr_in_flight[i]) 4760 return true; 4761 4762 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) 4763 return true; 4764 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) 4765 return true; 4766 4767 return false; 4768 } 4769 4770 /** 4771 * destroy_workqueue - safely terminate a workqueue 4772 * @wq: target workqueue 4773 * 4774 * Safely destroy a workqueue. All work currently pending will be done first. 4775 */ 4776 void destroy_workqueue(struct workqueue_struct *wq) 4777 { 4778 struct pool_workqueue *pwq; 4779 int cpu; 4780 4781 /* 4782 * Remove it from sysfs first so that sanity check failure doesn't 4783 * lead to sysfs name conflicts. 4784 */ 4785 workqueue_sysfs_unregister(wq); 4786 4787 /* mark the workqueue destruction is in progress */ 4788 mutex_lock(&wq->mutex); 4789 wq->flags |= __WQ_DESTROYING; 4790 mutex_unlock(&wq->mutex); 4791 4792 /* drain it before proceeding with destruction */ 4793 drain_workqueue(wq); 4794 4795 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 4796 if (wq->rescuer) { 4797 struct worker *rescuer = wq->rescuer; 4798 4799 /* this prevents new queueing */ 4800 raw_spin_lock_irq(&wq_mayday_lock); 4801 wq->rescuer = NULL; 4802 raw_spin_unlock_irq(&wq_mayday_lock); 4803 4804 /* rescuer will empty maydays list before exiting */ 4805 kthread_stop(rescuer->task); 4806 kfree(rescuer); 4807 } 4808 4809 /* 4810 * Sanity checks - grab all the locks so that we wait for all 4811 * in-flight operations which may do put_pwq(). 4812 */ 4813 mutex_lock(&wq_pool_mutex); 4814 mutex_lock(&wq->mutex); 4815 for_each_pwq(pwq, wq) { 4816 raw_spin_lock_irq(&pwq->pool->lock); 4817 if (WARN_ON(pwq_busy(pwq))) { 4818 pr_warn("%s: %s has the following busy pwq\n", 4819 __func__, wq->name); 4820 show_pwq(pwq); 4821 raw_spin_unlock_irq(&pwq->pool->lock); 4822 mutex_unlock(&wq->mutex); 4823 mutex_unlock(&wq_pool_mutex); 4824 show_one_workqueue(wq); 4825 return; 4826 } 4827 raw_spin_unlock_irq(&pwq->pool->lock); 4828 } 4829 mutex_unlock(&wq->mutex); 4830 4831 /* 4832 * wq list is used to freeze wq, remove from list after 4833 * flushing is complete in case freeze races us. 4834 */ 4835 list_del_rcu(&wq->list); 4836 mutex_unlock(&wq_pool_mutex); 4837 4838 /* 4839 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 4840 * to put the base refs. @wq will be auto-destroyed from the last 4841 * pwq_put. RCU read lock prevents @wq from going away from under us. 4842 */ 4843 rcu_read_lock(); 4844 4845 for_each_possible_cpu(cpu) { 4846 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4847 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL); 4848 put_pwq_unlocked(pwq); 4849 } 4850 4851 put_pwq_unlocked(wq->dfl_pwq); 4852 wq->dfl_pwq = NULL; 4853 4854 rcu_read_unlock(); 4855 } 4856 EXPORT_SYMBOL_GPL(destroy_workqueue); 4857 4858 /** 4859 * workqueue_set_max_active - adjust max_active of a workqueue 4860 * @wq: target workqueue 4861 * @max_active: new max_active value. 4862 * 4863 * Set max_active of @wq to @max_active. 4864 * 4865 * CONTEXT: 4866 * Don't call from IRQ context. 4867 */ 4868 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4869 { 4870 struct pool_workqueue *pwq; 4871 4872 /* disallow meddling with max_active for ordered workqueues */ 4873 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4874 return; 4875 4876 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4877 4878 mutex_lock(&wq->mutex); 4879 4880 wq->flags &= ~__WQ_ORDERED; 4881 wq->saved_max_active = max_active; 4882 4883 for_each_pwq(pwq, wq) 4884 pwq_adjust_max_active(pwq); 4885 4886 mutex_unlock(&wq->mutex); 4887 } 4888 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4889 4890 /** 4891 * current_work - retrieve %current task's work struct 4892 * 4893 * Determine if %current task is a workqueue worker and what it's working on. 4894 * Useful to find out the context that the %current task is running in. 4895 * 4896 * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 4897 */ 4898 struct work_struct *current_work(void) 4899 { 4900 struct worker *worker = current_wq_worker(); 4901 4902 return worker ? worker->current_work : NULL; 4903 } 4904 EXPORT_SYMBOL(current_work); 4905 4906 /** 4907 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4908 * 4909 * Determine whether %current is a workqueue rescuer. Can be used from 4910 * work functions to determine whether it's being run off the rescuer task. 4911 * 4912 * Return: %true if %current is a workqueue rescuer. %false otherwise. 4913 */ 4914 bool current_is_workqueue_rescuer(void) 4915 { 4916 struct worker *worker = current_wq_worker(); 4917 4918 return worker && worker->rescue_wq; 4919 } 4920 4921 /** 4922 * workqueue_congested - test whether a workqueue is congested 4923 * @cpu: CPU in question 4924 * @wq: target workqueue 4925 * 4926 * Test whether @wq's cpu workqueue for @cpu is congested. There is 4927 * no synchronization around this function and the test result is 4928 * unreliable and only useful as advisory hints or for debugging. 4929 * 4930 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 4931 * 4932 * With the exception of ordered workqueues, all workqueues have per-cpu 4933 * pool_workqueues, each with its own congested state. A workqueue being 4934 * congested on one CPU doesn't mean that the workqueue is contested on any 4935 * other CPUs. 4936 * 4937 * Return: 4938 * %true if congested, %false otherwise. 4939 */ 4940 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4941 { 4942 struct pool_workqueue *pwq; 4943 bool ret; 4944 4945 rcu_read_lock(); 4946 preempt_disable(); 4947 4948 if (cpu == WORK_CPU_UNBOUND) 4949 cpu = smp_processor_id(); 4950 4951 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4952 ret = !list_empty(&pwq->inactive_works); 4953 4954 preempt_enable(); 4955 rcu_read_unlock(); 4956 4957 return ret; 4958 } 4959 EXPORT_SYMBOL_GPL(workqueue_congested); 4960 4961 /** 4962 * work_busy - test whether a work is currently pending or running 4963 * @work: the work to be tested 4964 * 4965 * Test whether @work is currently pending or running. There is no 4966 * synchronization around this function and the test result is 4967 * unreliable and only useful as advisory hints or for debugging. 4968 * 4969 * Return: 4970 * OR'd bitmask of WORK_BUSY_* bits. 4971 */ 4972 unsigned int work_busy(struct work_struct *work) 4973 { 4974 struct worker_pool *pool; 4975 unsigned long flags; 4976 unsigned int ret = 0; 4977 4978 if (work_pending(work)) 4979 ret |= WORK_BUSY_PENDING; 4980 4981 rcu_read_lock(); 4982 pool = get_work_pool(work); 4983 if (pool) { 4984 raw_spin_lock_irqsave(&pool->lock, flags); 4985 if (find_worker_executing_work(pool, work)) 4986 ret |= WORK_BUSY_RUNNING; 4987 raw_spin_unlock_irqrestore(&pool->lock, flags); 4988 } 4989 rcu_read_unlock(); 4990 4991 return ret; 4992 } 4993 EXPORT_SYMBOL_GPL(work_busy); 4994 4995 /** 4996 * set_worker_desc - set description for the current work item 4997 * @fmt: printf-style format string 4998 * @...: arguments for the format string 4999 * 5000 * This function can be called by a running work function to describe what 5001 * the work item is about. If the worker task gets dumped, this 5002 * information will be printed out together to help debugging. The 5003 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 5004 */ 5005 void set_worker_desc(const char *fmt, ...) 5006 { 5007 struct worker *worker = current_wq_worker(); 5008 va_list args; 5009 5010 if (worker) { 5011 va_start(args, fmt); 5012 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5013 va_end(args); 5014 } 5015 } 5016 EXPORT_SYMBOL_GPL(set_worker_desc); 5017 5018 /** 5019 * print_worker_info - print out worker information and description 5020 * @log_lvl: the log level to use when printing 5021 * @task: target task 5022 * 5023 * If @task is a worker and currently executing a work item, print out the 5024 * name of the workqueue being serviced and worker description set with 5025 * set_worker_desc() by the currently executing work item. 5026 * 5027 * This function can be safely called on any task as long as the 5028 * task_struct itself is accessible. While safe, this function isn't 5029 * synchronized and may print out mixups or garbages of limited length. 5030 */ 5031 void print_worker_info(const char *log_lvl, struct task_struct *task) 5032 { 5033 work_func_t *fn = NULL; 5034 char name[WQ_NAME_LEN] = { }; 5035 char desc[WORKER_DESC_LEN] = { }; 5036 struct pool_workqueue *pwq = NULL; 5037 struct workqueue_struct *wq = NULL; 5038 struct worker *worker; 5039 5040 if (!(task->flags & PF_WQ_WORKER)) 5041 return; 5042 5043 /* 5044 * This function is called without any synchronization and @task 5045 * could be in any state. Be careful with dereferences. 5046 */ 5047 worker = kthread_probe_data(task); 5048 5049 /* 5050 * Carefully copy the associated workqueue's workfn, name and desc. 5051 * Keep the original last '\0' in case the original is garbage. 5052 */ 5053 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5054 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5055 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5056 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5057 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 5058 5059 if (fn || name[0] || desc[0]) { 5060 printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 5061 if (strcmp(name, desc)) 5062 pr_cont(" (%s)", desc); 5063 pr_cont("\n"); 5064 } 5065 } 5066 5067 static void pr_cont_pool_info(struct worker_pool *pool) 5068 { 5069 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 5070 if (pool->node != NUMA_NO_NODE) 5071 pr_cont(" node=%d", pool->node); 5072 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 5073 } 5074 5075 struct pr_cont_work_struct { 5076 bool comma; 5077 work_func_t func; 5078 long ctr; 5079 }; 5080 5081 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 5082 { 5083 if (!pcwsp->ctr) 5084 goto out_record; 5085 if (func == pcwsp->func) { 5086 pcwsp->ctr++; 5087 return; 5088 } 5089 if (pcwsp->ctr == 1) 5090 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 5091 else 5092 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 5093 pcwsp->ctr = 0; 5094 out_record: 5095 if ((long)func == -1L) 5096 return; 5097 pcwsp->comma = comma; 5098 pcwsp->func = func; 5099 pcwsp->ctr = 1; 5100 } 5101 5102 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 5103 { 5104 if (work->func == wq_barrier_func) { 5105 struct wq_barrier *barr; 5106 5107 barr = container_of(work, struct wq_barrier, work); 5108 5109 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5110 pr_cont("%s BAR(%d)", comma ? "," : "", 5111 task_pid_nr(barr->task)); 5112 } else { 5113 if (!comma) 5114 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5115 pr_cont_work_flush(comma, work->func, pcwsp); 5116 } 5117 } 5118 5119 static void show_pwq(struct pool_workqueue *pwq) 5120 { 5121 struct pr_cont_work_struct pcws = { .ctr = 0, }; 5122 struct worker_pool *pool = pwq->pool; 5123 struct work_struct *work; 5124 struct worker *worker; 5125 bool has_in_flight = false, has_pending = false; 5126 int bkt; 5127 5128 pr_info(" pwq %d:", pool->id); 5129 pr_cont_pool_info(pool); 5130 5131 pr_cont(" active=%d/%d refcnt=%d%s\n", 5132 pwq->nr_active, pwq->max_active, pwq->refcnt, 5133 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 5134 5135 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5136 if (worker->current_pwq == pwq) { 5137 has_in_flight = true; 5138 break; 5139 } 5140 } 5141 if (has_in_flight) { 5142 bool comma = false; 5143 5144 pr_info(" in-flight:"); 5145 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5146 if (worker->current_pwq != pwq) 5147 continue; 5148 5149 pr_cont("%s %d%s:%ps", comma ? "," : "", 5150 task_pid_nr(worker->task), 5151 worker->rescue_wq ? "(RESCUER)" : "", 5152 worker->current_func); 5153 list_for_each_entry(work, &worker->scheduled, entry) 5154 pr_cont_work(false, work, &pcws); 5155 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5156 comma = true; 5157 } 5158 pr_cont("\n"); 5159 } 5160 5161 list_for_each_entry(work, &pool->worklist, entry) { 5162 if (get_work_pwq(work) == pwq) { 5163 has_pending = true; 5164 break; 5165 } 5166 } 5167 if (has_pending) { 5168 bool comma = false; 5169 5170 pr_info(" pending:"); 5171 list_for_each_entry(work, &pool->worklist, entry) { 5172 if (get_work_pwq(work) != pwq) 5173 continue; 5174 5175 pr_cont_work(comma, work, &pcws); 5176 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5177 } 5178 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5179 pr_cont("\n"); 5180 } 5181 5182 if (!list_empty(&pwq->inactive_works)) { 5183 bool comma = false; 5184 5185 pr_info(" inactive:"); 5186 list_for_each_entry(work, &pwq->inactive_works, entry) { 5187 pr_cont_work(comma, work, &pcws); 5188 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5189 } 5190 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5191 pr_cont("\n"); 5192 } 5193 } 5194 5195 /** 5196 * show_one_workqueue - dump state of specified workqueue 5197 * @wq: workqueue whose state will be printed 5198 */ 5199 void show_one_workqueue(struct workqueue_struct *wq) 5200 { 5201 struct pool_workqueue *pwq; 5202 bool idle = true; 5203 unsigned long flags; 5204 5205 for_each_pwq(pwq, wq) { 5206 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5207 idle = false; 5208 break; 5209 } 5210 } 5211 if (idle) /* Nothing to print for idle workqueue */ 5212 return; 5213 5214 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 5215 5216 for_each_pwq(pwq, wq) { 5217 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 5218 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5219 /* 5220 * Defer printing to avoid deadlocks in console 5221 * drivers that queue work while holding locks 5222 * also taken in their write paths. 5223 */ 5224 printk_deferred_enter(); 5225 show_pwq(pwq); 5226 printk_deferred_exit(); 5227 } 5228 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 5229 /* 5230 * We could be printing a lot from atomic context, e.g. 5231 * sysrq-t -> show_all_workqueues(). Avoid triggering 5232 * hard lockup. 5233 */ 5234 touch_nmi_watchdog(); 5235 } 5236 5237 } 5238 5239 /** 5240 * show_one_worker_pool - dump state of specified worker pool 5241 * @pool: worker pool whose state will be printed 5242 */ 5243 static void show_one_worker_pool(struct worker_pool *pool) 5244 { 5245 struct worker *worker; 5246 bool first = true; 5247 unsigned long flags; 5248 unsigned long hung = 0; 5249 5250 raw_spin_lock_irqsave(&pool->lock, flags); 5251 if (pool->nr_workers == pool->nr_idle) 5252 goto next_pool; 5253 5254 /* How long the first pending work is waiting for a worker. */ 5255 if (!list_empty(&pool->worklist)) 5256 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 5257 5258 /* 5259 * Defer printing to avoid deadlocks in console drivers that 5260 * queue work while holding locks also taken in their write 5261 * paths. 5262 */ 5263 printk_deferred_enter(); 5264 pr_info("pool %d:", pool->id); 5265 pr_cont_pool_info(pool); 5266 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 5267 if (pool->manager) 5268 pr_cont(" manager: %d", 5269 task_pid_nr(pool->manager->task)); 5270 list_for_each_entry(worker, &pool->idle_list, entry) { 5271 pr_cont(" %s%d", first ? "idle: " : "", 5272 task_pid_nr(worker->task)); 5273 first = false; 5274 } 5275 pr_cont("\n"); 5276 printk_deferred_exit(); 5277 next_pool: 5278 raw_spin_unlock_irqrestore(&pool->lock, flags); 5279 /* 5280 * We could be printing a lot from atomic context, e.g. 5281 * sysrq-t -> show_all_workqueues(). Avoid triggering 5282 * hard lockup. 5283 */ 5284 touch_nmi_watchdog(); 5285 5286 } 5287 5288 /** 5289 * show_all_workqueues - dump workqueue state 5290 * 5291 * Called from a sysrq handler and prints out all busy workqueues and pools. 5292 */ 5293 void show_all_workqueues(void) 5294 { 5295 struct workqueue_struct *wq; 5296 struct worker_pool *pool; 5297 int pi; 5298 5299 rcu_read_lock(); 5300 5301 pr_info("Showing busy workqueues and worker pools:\n"); 5302 5303 list_for_each_entry_rcu(wq, &workqueues, list) 5304 show_one_workqueue(wq); 5305 5306 for_each_pool(pool, pi) 5307 show_one_worker_pool(pool); 5308 5309 rcu_read_unlock(); 5310 } 5311 5312 /** 5313 * show_freezable_workqueues - dump freezable workqueue state 5314 * 5315 * Called from try_to_freeze_tasks() and prints out all freezable workqueues 5316 * still busy. 5317 */ 5318 void show_freezable_workqueues(void) 5319 { 5320 struct workqueue_struct *wq; 5321 5322 rcu_read_lock(); 5323 5324 pr_info("Showing freezable workqueues that are still busy:\n"); 5325 5326 list_for_each_entry_rcu(wq, &workqueues, list) { 5327 if (!(wq->flags & WQ_FREEZABLE)) 5328 continue; 5329 show_one_workqueue(wq); 5330 } 5331 5332 rcu_read_unlock(); 5333 } 5334 5335 /* used to show worker information through /proc/PID/{comm,stat,status} */ 5336 void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 5337 { 5338 int off; 5339 5340 /* always show the actual comm */ 5341 off = strscpy(buf, task->comm, size); 5342 if (off < 0) 5343 return; 5344 5345 /* stabilize PF_WQ_WORKER and worker pool association */ 5346 mutex_lock(&wq_pool_attach_mutex); 5347 5348 if (task->flags & PF_WQ_WORKER) { 5349 struct worker *worker = kthread_data(task); 5350 struct worker_pool *pool = worker->pool; 5351 5352 if (pool) { 5353 raw_spin_lock_irq(&pool->lock); 5354 /* 5355 * ->desc tracks information (wq name or 5356 * set_worker_desc()) for the latest execution. If 5357 * current, prepend '+', otherwise '-'. 5358 */ 5359 if (worker->desc[0] != '\0') { 5360 if (worker->current_work) 5361 scnprintf(buf + off, size - off, "+%s", 5362 worker->desc); 5363 else 5364 scnprintf(buf + off, size - off, "-%s", 5365 worker->desc); 5366 } 5367 raw_spin_unlock_irq(&pool->lock); 5368 } 5369 } 5370 5371 mutex_unlock(&wq_pool_attach_mutex); 5372 } 5373 5374 #ifdef CONFIG_SMP 5375 5376 /* 5377 * CPU hotplug. 5378 * 5379 * There are two challenges in supporting CPU hotplug. Firstly, there 5380 * are a lot of assumptions on strong associations among work, pwq and 5381 * pool which make migrating pending and scheduled works very 5382 * difficult to implement without impacting hot paths. Secondly, 5383 * worker pools serve mix of short, long and very long running works making 5384 * blocked draining impractical. 5385 * 5386 * This is solved by allowing the pools to be disassociated from the CPU 5387 * running as an unbound one and allowing it to be reattached later if the 5388 * cpu comes back online. 5389 */ 5390 5391 static void unbind_workers(int cpu) 5392 { 5393 struct worker_pool *pool; 5394 struct worker *worker; 5395 5396 for_each_cpu_worker_pool(pool, cpu) { 5397 mutex_lock(&wq_pool_attach_mutex); 5398 raw_spin_lock_irq(&pool->lock); 5399 5400 /* 5401 * We've blocked all attach/detach operations. Make all workers 5402 * unbound and set DISASSOCIATED. Before this, all workers 5403 * must be on the cpu. After this, they may become diasporas. 5404 * And the preemption disabled section in their sched callbacks 5405 * are guaranteed to see WORKER_UNBOUND since the code here 5406 * is on the same cpu. 5407 */ 5408 for_each_pool_worker(worker, pool) 5409 worker->flags |= WORKER_UNBOUND; 5410 5411 pool->flags |= POOL_DISASSOCIATED; 5412 5413 /* 5414 * The handling of nr_running in sched callbacks are disabled 5415 * now. Zap nr_running. After this, nr_running stays zero and 5416 * need_more_worker() and keep_working() are always true as 5417 * long as the worklist is not empty. This pool now behaves as 5418 * an unbound (in terms of concurrency management) pool which 5419 * are served by workers tied to the pool. 5420 */ 5421 pool->nr_running = 0; 5422 5423 /* 5424 * With concurrency management just turned off, a busy 5425 * worker blocking could lead to lengthy stalls. Kick off 5426 * unbound chain execution of currently pending work items. 5427 */ 5428 kick_pool(pool); 5429 5430 raw_spin_unlock_irq(&pool->lock); 5431 5432 for_each_pool_worker(worker, pool) 5433 unbind_worker(worker); 5434 5435 mutex_unlock(&wq_pool_attach_mutex); 5436 } 5437 } 5438 5439 /** 5440 * rebind_workers - rebind all workers of a pool to the associated CPU 5441 * @pool: pool of interest 5442 * 5443 * @pool->cpu is coming online. Rebind all workers to the CPU. 5444 */ 5445 static void rebind_workers(struct worker_pool *pool) 5446 { 5447 struct worker *worker; 5448 5449 lockdep_assert_held(&wq_pool_attach_mutex); 5450 5451 /* 5452 * Restore CPU affinity of all workers. As all idle workers should 5453 * be on the run-queue of the associated CPU before any local 5454 * wake-ups for concurrency management happen, restore CPU affinity 5455 * of all workers first and then clear UNBOUND. As we're called 5456 * from CPU_ONLINE, the following shouldn't fail. 5457 */ 5458 for_each_pool_worker(worker, pool) { 5459 kthread_set_per_cpu(worker->task, pool->cpu); 5460 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 5461 pool_allowed_cpus(pool)) < 0); 5462 } 5463 5464 raw_spin_lock_irq(&pool->lock); 5465 5466 pool->flags &= ~POOL_DISASSOCIATED; 5467 5468 for_each_pool_worker(worker, pool) { 5469 unsigned int worker_flags = worker->flags; 5470 5471 /* 5472 * We want to clear UNBOUND but can't directly call 5473 * worker_clr_flags() or adjust nr_running. Atomically 5474 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 5475 * @worker will clear REBOUND using worker_clr_flags() when 5476 * it initiates the next execution cycle thus restoring 5477 * concurrency management. Note that when or whether 5478 * @worker clears REBOUND doesn't affect correctness. 5479 * 5480 * WRITE_ONCE() is necessary because @worker->flags may be 5481 * tested without holding any lock in 5482 * wq_worker_running(). Without it, NOT_RUNNING test may 5483 * fail incorrectly leading to premature concurrency 5484 * management operations. 5485 */ 5486 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 5487 worker_flags |= WORKER_REBOUND; 5488 worker_flags &= ~WORKER_UNBOUND; 5489 WRITE_ONCE(worker->flags, worker_flags); 5490 } 5491 5492 raw_spin_unlock_irq(&pool->lock); 5493 } 5494 5495 /** 5496 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 5497 * @pool: unbound pool of interest 5498 * @cpu: the CPU which is coming up 5499 * 5500 * An unbound pool may end up with a cpumask which doesn't have any online 5501 * CPUs. When a worker of such pool get scheduled, the scheduler resets 5502 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 5503 * online CPU before, cpus_allowed of all its workers should be restored. 5504 */ 5505 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 5506 { 5507 static cpumask_t cpumask; 5508 struct worker *worker; 5509 5510 lockdep_assert_held(&wq_pool_attach_mutex); 5511 5512 /* is @cpu allowed for @pool? */ 5513 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 5514 return; 5515 5516 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 5517 5518 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 5519 for_each_pool_worker(worker, pool) 5520 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 5521 } 5522 5523 int workqueue_prepare_cpu(unsigned int cpu) 5524 { 5525 struct worker_pool *pool; 5526 5527 for_each_cpu_worker_pool(pool, cpu) { 5528 if (pool->nr_workers) 5529 continue; 5530 if (!create_worker(pool)) 5531 return -ENOMEM; 5532 } 5533 return 0; 5534 } 5535 5536 int workqueue_online_cpu(unsigned int cpu) 5537 { 5538 struct worker_pool *pool; 5539 struct workqueue_struct *wq; 5540 int pi; 5541 5542 mutex_lock(&wq_pool_mutex); 5543 5544 for_each_pool(pool, pi) { 5545 mutex_lock(&wq_pool_attach_mutex); 5546 5547 if (pool->cpu == cpu) 5548 rebind_workers(pool); 5549 else if (pool->cpu < 0) 5550 restore_unbound_workers_cpumask(pool, cpu); 5551 5552 mutex_unlock(&wq_pool_attach_mutex); 5553 } 5554 5555 /* update pod affinity of unbound workqueues */ 5556 list_for_each_entry(wq, &workqueues, list) { 5557 struct workqueue_attrs *attrs = wq->unbound_attrs; 5558 5559 if (attrs) { 5560 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5561 int tcpu; 5562 5563 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5564 wq_update_pod(wq, tcpu, cpu, true); 5565 } 5566 } 5567 5568 mutex_unlock(&wq_pool_mutex); 5569 return 0; 5570 } 5571 5572 int workqueue_offline_cpu(unsigned int cpu) 5573 { 5574 struct workqueue_struct *wq; 5575 5576 /* unbinding per-cpu workers should happen on the local CPU */ 5577 if (WARN_ON(cpu != smp_processor_id())) 5578 return -1; 5579 5580 unbind_workers(cpu); 5581 5582 /* update pod affinity of unbound workqueues */ 5583 mutex_lock(&wq_pool_mutex); 5584 list_for_each_entry(wq, &workqueues, list) { 5585 struct workqueue_attrs *attrs = wq->unbound_attrs; 5586 5587 if (attrs) { 5588 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5589 int tcpu; 5590 5591 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5592 wq_update_pod(wq, tcpu, cpu, false); 5593 } 5594 } 5595 mutex_unlock(&wq_pool_mutex); 5596 5597 return 0; 5598 } 5599 5600 struct work_for_cpu { 5601 struct work_struct work; 5602 long (*fn)(void *); 5603 void *arg; 5604 long ret; 5605 }; 5606 5607 static void work_for_cpu_fn(struct work_struct *work) 5608 { 5609 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 5610 5611 wfc->ret = wfc->fn(wfc->arg); 5612 } 5613 5614 /** 5615 * work_on_cpu - run a function in thread context on a particular cpu 5616 * @cpu: the cpu to run on 5617 * @fn: the function to run 5618 * @arg: the function arg 5619 * 5620 * It is up to the caller to ensure that the cpu doesn't go offline. 5621 * The caller must not hold any locks which would prevent @fn from completing. 5622 * 5623 * Return: The value @fn returns. 5624 */ 5625 long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 5626 { 5627 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 5628 5629 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 5630 schedule_work_on(cpu, &wfc.work); 5631 flush_work(&wfc.work); 5632 destroy_work_on_stack(&wfc.work); 5633 return wfc.ret; 5634 } 5635 EXPORT_SYMBOL_GPL(work_on_cpu); 5636 5637 /** 5638 * work_on_cpu_safe - run a function in thread context on a particular cpu 5639 * @cpu: the cpu to run on 5640 * @fn: the function to run 5641 * @arg: the function argument 5642 * 5643 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 5644 * any locks which would prevent @fn from completing. 5645 * 5646 * Return: The value @fn returns. 5647 */ 5648 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 5649 { 5650 long ret = -ENODEV; 5651 5652 cpus_read_lock(); 5653 if (cpu_online(cpu)) 5654 ret = work_on_cpu(cpu, fn, arg); 5655 cpus_read_unlock(); 5656 return ret; 5657 } 5658 EXPORT_SYMBOL_GPL(work_on_cpu_safe); 5659 #endif /* CONFIG_SMP */ 5660 5661 #ifdef CONFIG_FREEZER 5662 5663 /** 5664 * freeze_workqueues_begin - begin freezing workqueues 5665 * 5666 * Start freezing workqueues. After this function returns, all freezable 5667 * workqueues will queue new works to their inactive_works list instead of 5668 * pool->worklist. 5669 * 5670 * CONTEXT: 5671 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5672 */ 5673 void freeze_workqueues_begin(void) 5674 { 5675 struct workqueue_struct *wq; 5676 struct pool_workqueue *pwq; 5677 5678 mutex_lock(&wq_pool_mutex); 5679 5680 WARN_ON_ONCE(workqueue_freezing); 5681 workqueue_freezing = true; 5682 5683 list_for_each_entry(wq, &workqueues, list) { 5684 mutex_lock(&wq->mutex); 5685 for_each_pwq(pwq, wq) 5686 pwq_adjust_max_active(pwq); 5687 mutex_unlock(&wq->mutex); 5688 } 5689 5690 mutex_unlock(&wq_pool_mutex); 5691 } 5692 5693 /** 5694 * freeze_workqueues_busy - are freezable workqueues still busy? 5695 * 5696 * Check whether freezing is complete. This function must be called 5697 * between freeze_workqueues_begin() and thaw_workqueues(). 5698 * 5699 * CONTEXT: 5700 * Grabs and releases wq_pool_mutex. 5701 * 5702 * Return: 5703 * %true if some freezable workqueues are still busy. %false if freezing 5704 * is complete. 5705 */ 5706 bool freeze_workqueues_busy(void) 5707 { 5708 bool busy = false; 5709 struct workqueue_struct *wq; 5710 struct pool_workqueue *pwq; 5711 5712 mutex_lock(&wq_pool_mutex); 5713 5714 WARN_ON_ONCE(!workqueue_freezing); 5715 5716 list_for_each_entry(wq, &workqueues, list) { 5717 if (!(wq->flags & WQ_FREEZABLE)) 5718 continue; 5719 /* 5720 * nr_active is monotonically decreasing. It's safe 5721 * to peek without lock. 5722 */ 5723 rcu_read_lock(); 5724 for_each_pwq(pwq, wq) { 5725 WARN_ON_ONCE(pwq->nr_active < 0); 5726 if (pwq->nr_active) { 5727 busy = true; 5728 rcu_read_unlock(); 5729 goto out_unlock; 5730 } 5731 } 5732 rcu_read_unlock(); 5733 } 5734 out_unlock: 5735 mutex_unlock(&wq_pool_mutex); 5736 return busy; 5737 } 5738 5739 /** 5740 * thaw_workqueues - thaw workqueues 5741 * 5742 * Thaw workqueues. Normal queueing is restored and all collected 5743 * frozen works are transferred to their respective pool worklists. 5744 * 5745 * CONTEXT: 5746 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5747 */ 5748 void thaw_workqueues(void) 5749 { 5750 struct workqueue_struct *wq; 5751 struct pool_workqueue *pwq; 5752 5753 mutex_lock(&wq_pool_mutex); 5754 5755 if (!workqueue_freezing) 5756 goto out_unlock; 5757 5758 workqueue_freezing = false; 5759 5760 /* restore max_active and repopulate worklist */ 5761 list_for_each_entry(wq, &workqueues, list) { 5762 mutex_lock(&wq->mutex); 5763 for_each_pwq(pwq, wq) 5764 pwq_adjust_max_active(pwq); 5765 mutex_unlock(&wq->mutex); 5766 } 5767 5768 out_unlock: 5769 mutex_unlock(&wq_pool_mutex); 5770 } 5771 #endif /* CONFIG_FREEZER */ 5772 5773 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 5774 { 5775 LIST_HEAD(ctxs); 5776 int ret = 0; 5777 struct workqueue_struct *wq; 5778 struct apply_wqattrs_ctx *ctx, *n; 5779 5780 lockdep_assert_held(&wq_pool_mutex); 5781 5782 list_for_each_entry(wq, &workqueues, list) { 5783 if (!(wq->flags & WQ_UNBOUND)) 5784 continue; 5785 /* creating multiple pwqs breaks ordering guarantee */ 5786 if (wq->flags & __WQ_ORDERED) 5787 continue; 5788 5789 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 5790 if (IS_ERR(ctx)) { 5791 ret = PTR_ERR(ctx); 5792 break; 5793 } 5794 5795 list_add_tail(&ctx->list, &ctxs); 5796 } 5797 5798 list_for_each_entry_safe(ctx, n, &ctxs, list) { 5799 if (!ret) 5800 apply_wqattrs_commit(ctx); 5801 apply_wqattrs_cleanup(ctx); 5802 } 5803 5804 if (!ret) { 5805 mutex_lock(&wq_pool_attach_mutex); 5806 cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 5807 mutex_unlock(&wq_pool_attach_mutex); 5808 } 5809 return ret; 5810 } 5811 5812 /** 5813 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 5814 * @cpumask: the cpumask to set 5815 * 5816 * The low-level workqueues cpumask is a global cpumask that limits 5817 * the affinity of all unbound workqueues. This function check the @cpumask 5818 * and apply it to all unbound workqueues and updates all pwqs of them. 5819 * 5820 * Return: 0 - Success 5821 * -EINVAL - Invalid @cpumask 5822 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 5823 */ 5824 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 5825 { 5826 int ret = -EINVAL; 5827 5828 /* 5829 * Not excluding isolated cpus on purpose. 5830 * If the user wishes to include them, we allow that. 5831 */ 5832 cpumask_and(cpumask, cpumask, cpu_possible_mask); 5833 if (!cpumask_empty(cpumask)) { 5834 apply_wqattrs_lock(); 5835 if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 5836 ret = 0; 5837 goto out_unlock; 5838 } 5839 5840 ret = workqueue_apply_unbound_cpumask(cpumask); 5841 5842 out_unlock: 5843 apply_wqattrs_unlock(); 5844 } 5845 5846 return ret; 5847 } 5848 5849 static int parse_affn_scope(const char *val) 5850 { 5851 int i; 5852 5853 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 5854 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 5855 return i; 5856 } 5857 return -EINVAL; 5858 } 5859 5860 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 5861 { 5862 struct workqueue_struct *wq; 5863 int affn, cpu; 5864 5865 affn = parse_affn_scope(val); 5866 if (affn < 0) 5867 return affn; 5868 if (affn == WQ_AFFN_DFL) 5869 return -EINVAL; 5870 5871 cpus_read_lock(); 5872 mutex_lock(&wq_pool_mutex); 5873 5874 wq_affn_dfl = affn; 5875 5876 list_for_each_entry(wq, &workqueues, list) { 5877 for_each_online_cpu(cpu) { 5878 wq_update_pod(wq, cpu, cpu, true); 5879 } 5880 } 5881 5882 mutex_unlock(&wq_pool_mutex); 5883 cpus_read_unlock(); 5884 5885 return 0; 5886 } 5887 5888 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 5889 { 5890 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 5891 } 5892 5893 static const struct kernel_param_ops wq_affn_dfl_ops = { 5894 .set = wq_affn_dfl_set, 5895 .get = wq_affn_dfl_get, 5896 }; 5897 5898 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 5899 5900 #ifdef CONFIG_SYSFS 5901 /* 5902 * Workqueues with WQ_SYSFS flag set is visible to userland via 5903 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 5904 * following attributes. 5905 * 5906 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 5907 * max_active RW int : maximum number of in-flight work items 5908 * 5909 * Unbound workqueues have the following extra attributes. 5910 * 5911 * nice RW int : nice value of the workers 5912 * cpumask RW mask : bitmask of allowed CPUs for the workers 5913 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 5914 * affinity_strict RW bool : worker CPU affinity is strict 5915 */ 5916 struct wq_device { 5917 struct workqueue_struct *wq; 5918 struct device dev; 5919 }; 5920 5921 static struct workqueue_struct *dev_to_wq(struct device *dev) 5922 { 5923 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 5924 5925 return wq_dev->wq; 5926 } 5927 5928 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 5929 char *buf) 5930 { 5931 struct workqueue_struct *wq = dev_to_wq(dev); 5932 5933 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 5934 } 5935 static DEVICE_ATTR_RO(per_cpu); 5936 5937 static ssize_t max_active_show(struct device *dev, 5938 struct device_attribute *attr, char *buf) 5939 { 5940 struct workqueue_struct *wq = dev_to_wq(dev); 5941 5942 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 5943 } 5944 5945 static ssize_t max_active_store(struct device *dev, 5946 struct device_attribute *attr, const char *buf, 5947 size_t count) 5948 { 5949 struct workqueue_struct *wq = dev_to_wq(dev); 5950 int val; 5951 5952 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 5953 return -EINVAL; 5954 5955 workqueue_set_max_active(wq, val); 5956 return count; 5957 } 5958 static DEVICE_ATTR_RW(max_active); 5959 5960 static struct attribute *wq_sysfs_attrs[] = { 5961 &dev_attr_per_cpu.attr, 5962 &dev_attr_max_active.attr, 5963 NULL, 5964 }; 5965 ATTRIBUTE_GROUPS(wq_sysfs); 5966 5967 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 5968 char *buf) 5969 { 5970 struct workqueue_struct *wq = dev_to_wq(dev); 5971 int written; 5972 5973 mutex_lock(&wq->mutex); 5974 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 5975 mutex_unlock(&wq->mutex); 5976 5977 return written; 5978 } 5979 5980 /* prepare workqueue_attrs for sysfs store operations */ 5981 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 5982 { 5983 struct workqueue_attrs *attrs; 5984 5985 lockdep_assert_held(&wq_pool_mutex); 5986 5987 attrs = alloc_workqueue_attrs(); 5988 if (!attrs) 5989 return NULL; 5990 5991 copy_workqueue_attrs(attrs, wq->unbound_attrs); 5992 return attrs; 5993 } 5994 5995 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 5996 const char *buf, size_t count) 5997 { 5998 struct workqueue_struct *wq = dev_to_wq(dev); 5999 struct workqueue_attrs *attrs; 6000 int ret = -ENOMEM; 6001 6002 apply_wqattrs_lock(); 6003 6004 attrs = wq_sysfs_prep_attrs(wq); 6005 if (!attrs) 6006 goto out_unlock; 6007 6008 if (sscanf(buf, "%d", &attrs->nice) == 1 && 6009 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 6010 ret = apply_workqueue_attrs_locked(wq, attrs); 6011 else 6012 ret = -EINVAL; 6013 6014 out_unlock: 6015 apply_wqattrs_unlock(); 6016 free_workqueue_attrs(attrs); 6017 return ret ?: count; 6018 } 6019 6020 static ssize_t wq_cpumask_show(struct device *dev, 6021 struct device_attribute *attr, char *buf) 6022 { 6023 struct workqueue_struct *wq = dev_to_wq(dev); 6024 int written; 6025 6026 mutex_lock(&wq->mutex); 6027 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6028 cpumask_pr_args(wq->unbound_attrs->cpumask)); 6029 mutex_unlock(&wq->mutex); 6030 return written; 6031 } 6032 6033 static ssize_t wq_cpumask_store(struct device *dev, 6034 struct device_attribute *attr, 6035 const char *buf, size_t count) 6036 { 6037 struct workqueue_struct *wq = dev_to_wq(dev); 6038 struct workqueue_attrs *attrs; 6039 int ret = -ENOMEM; 6040 6041 apply_wqattrs_lock(); 6042 6043 attrs = wq_sysfs_prep_attrs(wq); 6044 if (!attrs) 6045 goto out_unlock; 6046 6047 ret = cpumask_parse(buf, attrs->cpumask); 6048 if (!ret) 6049 ret = apply_workqueue_attrs_locked(wq, attrs); 6050 6051 out_unlock: 6052 apply_wqattrs_unlock(); 6053 free_workqueue_attrs(attrs); 6054 return ret ?: count; 6055 } 6056 6057 static ssize_t wq_affn_scope_show(struct device *dev, 6058 struct device_attribute *attr, char *buf) 6059 { 6060 struct workqueue_struct *wq = dev_to_wq(dev); 6061 int written; 6062 6063 mutex_lock(&wq->mutex); 6064 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) 6065 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n", 6066 wq_affn_names[WQ_AFFN_DFL], 6067 wq_affn_names[wq_affn_dfl]); 6068 else 6069 written = scnprintf(buf, PAGE_SIZE, "%s\n", 6070 wq_affn_names[wq->unbound_attrs->affn_scope]); 6071 mutex_unlock(&wq->mutex); 6072 6073 return written; 6074 } 6075 6076 static ssize_t wq_affn_scope_store(struct device *dev, 6077 struct device_attribute *attr, 6078 const char *buf, size_t count) 6079 { 6080 struct workqueue_struct *wq = dev_to_wq(dev); 6081 struct workqueue_attrs *attrs; 6082 int affn, ret = -ENOMEM; 6083 6084 affn = parse_affn_scope(buf); 6085 if (affn < 0) 6086 return affn; 6087 6088 apply_wqattrs_lock(); 6089 attrs = wq_sysfs_prep_attrs(wq); 6090 if (attrs) { 6091 attrs->affn_scope = affn; 6092 ret = apply_workqueue_attrs_locked(wq, attrs); 6093 } 6094 apply_wqattrs_unlock(); 6095 free_workqueue_attrs(attrs); 6096 return ret ?: count; 6097 } 6098 6099 static ssize_t wq_affinity_strict_show(struct device *dev, 6100 struct device_attribute *attr, char *buf) 6101 { 6102 struct workqueue_struct *wq = dev_to_wq(dev); 6103 6104 return scnprintf(buf, PAGE_SIZE, "%d\n", 6105 wq->unbound_attrs->affn_strict); 6106 } 6107 6108 static ssize_t wq_affinity_strict_store(struct device *dev, 6109 struct device_attribute *attr, 6110 const char *buf, size_t count) 6111 { 6112 struct workqueue_struct *wq = dev_to_wq(dev); 6113 struct workqueue_attrs *attrs; 6114 int v, ret = -ENOMEM; 6115 6116 if (sscanf(buf, "%d", &v) != 1) 6117 return -EINVAL; 6118 6119 apply_wqattrs_lock(); 6120 attrs = wq_sysfs_prep_attrs(wq); 6121 if (attrs) { 6122 attrs->affn_strict = (bool)v; 6123 ret = apply_workqueue_attrs_locked(wq, attrs); 6124 } 6125 apply_wqattrs_unlock(); 6126 free_workqueue_attrs(attrs); 6127 return ret ?: count; 6128 } 6129 6130 static struct device_attribute wq_sysfs_unbound_attrs[] = { 6131 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 6132 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 6133 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 6134 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 6135 __ATTR_NULL, 6136 }; 6137 6138 static struct bus_type wq_subsys = { 6139 .name = "workqueue", 6140 .dev_groups = wq_sysfs_groups, 6141 }; 6142 6143 static ssize_t wq_unbound_cpumask_show(struct device *dev, 6144 struct device_attribute *attr, char *buf) 6145 { 6146 int written; 6147 6148 mutex_lock(&wq_pool_mutex); 6149 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6150 cpumask_pr_args(wq_unbound_cpumask)); 6151 mutex_unlock(&wq_pool_mutex); 6152 6153 return written; 6154 } 6155 6156 static ssize_t wq_unbound_cpumask_store(struct device *dev, 6157 struct device_attribute *attr, const char *buf, size_t count) 6158 { 6159 cpumask_var_t cpumask; 6160 int ret; 6161 6162 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6163 return -ENOMEM; 6164 6165 ret = cpumask_parse(buf, cpumask); 6166 if (!ret) 6167 ret = workqueue_set_unbound_cpumask(cpumask); 6168 6169 free_cpumask_var(cpumask); 6170 return ret ? ret : count; 6171 } 6172 6173 static struct device_attribute wq_sysfs_cpumask_attr = 6174 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 6175 wq_unbound_cpumask_store); 6176 6177 static int __init wq_sysfs_init(void) 6178 { 6179 struct device *dev_root; 6180 int err; 6181 6182 err = subsys_virtual_register(&wq_subsys, NULL); 6183 if (err) 6184 return err; 6185 6186 dev_root = bus_get_dev_root(&wq_subsys); 6187 if (dev_root) { 6188 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr); 6189 put_device(dev_root); 6190 } 6191 return err; 6192 } 6193 core_initcall(wq_sysfs_init); 6194 6195 static void wq_device_release(struct device *dev) 6196 { 6197 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6198 6199 kfree(wq_dev); 6200 } 6201 6202 /** 6203 * workqueue_sysfs_register - make a workqueue visible in sysfs 6204 * @wq: the workqueue to register 6205 * 6206 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 6207 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 6208 * which is the preferred method. 6209 * 6210 * Workqueue user should use this function directly iff it wants to apply 6211 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 6212 * apply_workqueue_attrs() may race against userland updating the 6213 * attributes. 6214 * 6215 * Return: 0 on success, -errno on failure. 6216 */ 6217 int workqueue_sysfs_register(struct workqueue_struct *wq) 6218 { 6219 struct wq_device *wq_dev; 6220 int ret; 6221 6222 /* 6223 * Adjusting max_active or creating new pwqs by applying 6224 * attributes breaks ordering guarantee. Disallow exposing ordered 6225 * workqueues. 6226 */ 6227 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 6228 return -EINVAL; 6229 6230 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 6231 if (!wq_dev) 6232 return -ENOMEM; 6233 6234 wq_dev->wq = wq; 6235 wq_dev->dev.bus = &wq_subsys; 6236 wq_dev->dev.release = wq_device_release; 6237 dev_set_name(&wq_dev->dev, "%s", wq->name); 6238 6239 /* 6240 * unbound_attrs are created separately. Suppress uevent until 6241 * everything is ready. 6242 */ 6243 dev_set_uevent_suppress(&wq_dev->dev, true); 6244 6245 ret = device_register(&wq_dev->dev); 6246 if (ret) { 6247 put_device(&wq_dev->dev); 6248 wq->wq_dev = NULL; 6249 return ret; 6250 } 6251 6252 if (wq->flags & WQ_UNBOUND) { 6253 struct device_attribute *attr; 6254 6255 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 6256 ret = device_create_file(&wq_dev->dev, attr); 6257 if (ret) { 6258 device_unregister(&wq_dev->dev); 6259 wq->wq_dev = NULL; 6260 return ret; 6261 } 6262 } 6263 } 6264 6265 dev_set_uevent_suppress(&wq_dev->dev, false); 6266 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 6267 return 0; 6268 } 6269 6270 /** 6271 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 6272 * @wq: the workqueue to unregister 6273 * 6274 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 6275 */ 6276 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 6277 { 6278 struct wq_device *wq_dev = wq->wq_dev; 6279 6280 if (!wq->wq_dev) 6281 return; 6282 6283 wq->wq_dev = NULL; 6284 device_unregister(&wq_dev->dev); 6285 } 6286 #else /* CONFIG_SYSFS */ 6287 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 6288 #endif /* CONFIG_SYSFS */ 6289 6290 /* 6291 * Workqueue watchdog. 6292 * 6293 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 6294 * flush dependency, a concurrency managed work item which stays RUNNING 6295 * indefinitely. Workqueue stalls can be very difficult to debug as the 6296 * usual warning mechanisms don't trigger and internal workqueue state is 6297 * largely opaque. 6298 * 6299 * Workqueue watchdog monitors all worker pools periodically and dumps 6300 * state if some pools failed to make forward progress for a while where 6301 * forward progress is defined as the first item on ->worklist changing. 6302 * 6303 * This mechanism is controlled through the kernel parameter 6304 * "workqueue.watchdog_thresh" which can be updated at runtime through the 6305 * corresponding sysfs parameter file. 6306 */ 6307 #ifdef CONFIG_WQ_WATCHDOG 6308 6309 static unsigned long wq_watchdog_thresh = 30; 6310 static struct timer_list wq_watchdog_timer; 6311 6312 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 6313 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 6314 6315 /* 6316 * Show workers that might prevent the processing of pending work items. 6317 * The only candidates are CPU-bound workers in the running state. 6318 * Pending work items should be handled by another idle worker 6319 * in all other situations. 6320 */ 6321 static void show_cpu_pool_hog(struct worker_pool *pool) 6322 { 6323 struct worker *worker; 6324 unsigned long flags; 6325 int bkt; 6326 6327 raw_spin_lock_irqsave(&pool->lock, flags); 6328 6329 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6330 if (task_is_running(worker->task)) { 6331 /* 6332 * Defer printing to avoid deadlocks in console 6333 * drivers that queue work while holding locks 6334 * also taken in their write paths. 6335 */ 6336 printk_deferred_enter(); 6337 6338 pr_info("pool %d:\n", pool->id); 6339 sched_show_task(worker->task); 6340 6341 printk_deferred_exit(); 6342 } 6343 } 6344 6345 raw_spin_unlock_irqrestore(&pool->lock, flags); 6346 } 6347 6348 static void show_cpu_pools_hogs(void) 6349 { 6350 struct worker_pool *pool; 6351 int pi; 6352 6353 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 6354 6355 rcu_read_lock(); 6356 6357 for_each_pool(pool, pi) { 6358 if (pool->cpu_stall) 6359 show_cpu_pool_hog(pool); 6360 6361 } 6362 6363 rcu_read_unlock(); 6364 } 6365 6366 static void wq_watchdog_reset_touched(void) 6367 { 6368 int cpu; 6369 6370 wq_watchdog_touched = jiffies; 6371 for_each_possible_cpu(cpu) 6372 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6373 } 6374 6375 static void wq_watchdog_timer_fn(struct timer_list *unused) 6376 { 6377 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 6378 bool lockup_detected = false; 6379 bool cpu_pool_stall = false; 6380 unsigned long now = jiffies; 6381 struct worker_pool *pool; 6382 int pi; 6383 6384 if (!thresh) 6385 return; 6386 6387 rcu_read_lock(); 6388 6389 for_each_pool(pool, pi) { 6390 unsigned long pool_ts, touched, ts; 6391 6392 pool->cpu_stall = false; 6393 if (list_empty(&pool->worklist)) 6394 continue; 6395 6396 /* 6397 * If a virtual machine is stopped by the host it can look to 6398 * the watchdog like a stall. 6399 */ 6400 kvm_check_and_clear_guest_paused(); 6401 6402 /* get the latest of pool and touched timestamps */ 6403 if (pool->cpu >= 0) 6404 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 6405 else 6406 touched = READ_ONCE(wq_watchdog_touched); 6407 pool_ts = READ_ONCE(pool->watchdog_ts); 6408 6409 if (time_after(pool_ts, touched)) 6410 ts = pool_ts; 6411 else 6412 ts = touched; 6413 6414 /* did we stall? */ 6415 if (time_after(now, ts + thresh)) { 6416 lockup_detected = true; 6417 if (pool->cpu >= 0) { 6418 pool->cpu_stall = true; 6419 cpu_pool_stall = true; 6420 } 6421 pr_emerg("BUG: workqueue lockup - pool"); 6422 pr_cont_pool_info(pool); 6423 pr_cont(" stuck for %us!\n", 6424 jiffies_to_msecs(now - pool_ts) / 1000); 6425 } 6426 6427 6428 } 6429 6430 rcu_read_unlock(); 6431 6432 if (lockup_detected) 6433 show_all_workqueues(); 6434 6435 if (cpu_pool_stall) 6436 show_cpu_pools_hogs(); 6437 6438 wq_watchdog_reset_touched(); 6439 mod_timer(&wq_watchdog_timer, jiffies + thresh); 6440 } 6441 6442 notrace void wq_watchdog_touch(int cpu) 6443 { 6444 if (cpu >= 0) 6445 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6446 6447 wq_watchdog_touched = jiffies; 6448 } 6449 6450 static void wq_watchdog_set_thresh(unsigned long thresh) 6451 { 6452 wq_watchdog_thresh = 0; 6453 del_timer_sync(&wq_watchdog_timer); 6454 6455 if (thresh) { 6456 wq_watchdog_thresh = thresh; 6457 wq_watchdog_reset_touched(); 6458 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 6459 } 6460 } 6461 6462 static int wq_watchdog_param_set_thresh(const char *val, 6463 const struct kernel_param *kp) 6464 { 6465 unsigned long thresh; 6466 int ret; 6467 6468 ret = kstrtoul(val, 0, &thresh); 6469 if (ret) 6470 return ret; 6471 6472 if (system_wq) 6473 wq_watchdog_set_thresh(thresh); 6474 else 6475 wq_watchdog_thresh = thresh; 6476 6477 return 0; 6478 } 6479 6480 static const struct kernel_param_ops wq_watchdog_thresh_ops = { 6481 .set = wq_watchdog_param_set_thresh, 6482 .get = param_get_ulong, 6483 }; 6484 6485 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 6486 0644); 6487 6488 static void wq_watchdog_init(void) 6489 { 6490 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 6491 wq_watchdog_set_thresh(wq_watchdog_thresh); 6492 } 6493 6494 #else /* CONFIG_WQ_WATCHDOG */ 6495 6496 static inline void wq_watchdog_init(void) { } 6497 6498 #endif /* CONFIG_WQ_WATCHDOG */ 6499 6500 /** 6501 * workqueue_init_early - early init for workqueue subsystem 6502 * 6503 * This is the first step of three-staged workqueue subsystem initialization and 6504 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 6505 * up. It sets up all the data structures and system workqueues and allows early 6506 * boot code to create workqueues and queue/cancel work items. Actual work item 6507 * execution starts only after kthreads can be created and scheduled right 6508 * before early initcalls. 6509 */ 6510 void __init workqueue_init_early(void) 6511 { 6512 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 6513 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 6514 int i, cpu; 6515 6516 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6517 6518 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6519 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ)); 6520 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN)); 6521 6522 if (!cpumask_empty(&wq_cmdline_cpumask)) 6523 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask); 6524 6525 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6526 6527 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 6528 BUG_ON(!wq_update_pod_attrs_buf); 6529 6530 /* initialize WQ_AFFN_SYSTEM pods */ 6531 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6532 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 6533 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6534 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 6535 6536 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 6537 6538 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 6539 BUG_ON(!wq_update_pod_attrs_buf); 6540 6541 pt->nr_pods = 1; 6542 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 6543 pt->pod_node[0] = NUMA_NO_NODE; 6544 pt->cpu_pod[0] = 0; 6545 6546 /* initialize CPU pools */ 6547 for_each_possible_cpu(cpu) { 6548 struct worker_pool *pool; 6549 6550 i = 0; 6551 for_each_cpu_worker_pool(pool, cpu) { 6552 BUG_ON(init_worker_pool(pool)); 6553 pool->cpu = cpu; 6554 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 6555 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 6556 pool->attrs->nice = std_nice[i++]; 6557 pool->attrs->affn_strict = true; 6558 pool->node = cpu_to_node(cpu); 6559 6560 /* alloc pool ID */ 6561 mutex_lock(&wq_pool_mutex); 6562 BUG_ON(worker_pool_assign_id(pool)); 6563 mutex_unlock(&wq_pool_mutex); 6564 } 6565 } 6566 6567 /* create default unbound and ordered wq attrs */ 6568 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 6569 struct workqueue_attrs *attrs; 6570 6571 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6572 attrs->nice = std_nice[i]; 6573 unbound_std_wq_attrs[i] = attrs; 6574 6575 /* 6576 * An ordered wq should have only one pwq as ordering is 6577 * guaranteed by max_active which is enforced by pwqs. 6578 */ 6579 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6580 attrs->nice = std_nice[i]; 6581 attrs->ordered = true; 6582 ordered_wq_attrs[i] = attrs; 6583 } 6584 6585 system_wq = alloc_workqueue("events", 0, 0); 6586 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 6587 system_long_wq = alloc_workqueue("events_long", 0, 0); 6588 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 6589 WQ_MAX_ACTIVE); 6590 system_freezable_wq = alloc_workqueue("events_freezable", 6591 WQ_FREEZABLE, 0); 6592 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 6593 WQ_POWER_EFFICIENT, 0); 6594 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 6595 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 6596 0); 6597 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 6598 !system_unbound_wq || !system_freezable_wq || 6599 !system_power_efficient_wq || 6600 !system_freezable_power_efficient_wq); 6601 } 6602 6603 static void __init wq_cpu_intensive_thresh_init(void) 6604 { 6605 unsigned long thresh; 6606 unsigned long bogo; 6607 6608 /* if the user set it to a specific value, keep it */ 6609 if (wq_cpu_intensive_thresh_us != ULONG_MAX) 6610 return; 6611 6612 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 6613 BUG_ON(IS_ERR(pwq_release_worker)); 6614 6615 /* 6616 * The default of 10ms is derived from the fact that most modern (as of 6617 * 2023) processors can do a lot in 10ms and that it's just below what 6618 * most consider human-perceivable. However, the kernel also runs on a 6619 * lot slower CPUs including microcontrollers where the threshold is way 6620 * too low. 6621 * 6622 * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 6623 * This is by no means accurate but it doesn't have to be. The mechanism 6624 * is still useful even when the threshold is fully scaled up. Also, as 6625 * the reports would usually be applicable to everyone, some machines 6626 * operating on longer thresholds won't significantly diminish their 6627 * usefulness. 6628 */ 6629 thresh = 10 * USEC_PER_MSEC; 6630 6631 /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 6632 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 6633 if (bogo < 4000) 6634 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 6635 6636 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 6637 loops_per_jiffy, bogo, thresh); 6638 6639 wq_cpu_intensive_thresh_us = thresh; 6640 } 6641 6642 /** 6643 * workqueue_init - bring workqueue subsystem fully online 6644 * 6645 * This is the second step of three-staged workqueue subsystem initialization 6646 * and invoked as soon as kthreads can be created and scheduled. Workqueues have 6647 * been created and work items queued on them, but there are no kworkers 6648 * executing the work items yet. Populate the worker pools with the initial 6649 * workers and enable future kworker creations. 6650 */ 6651 void __init workqueue_init(void) 6652 { 6653 struct workqueue_struct *wq; 6654 struct worker_pool *pool; 6655 int cpu, bkt; 6656 6657 wq_cpu_intensive_thresh_init(); 6658 6659 mutex_lock(&wq_pool_mutex); 6660 6661 /* 6662 * Per-cpu pools created earlier could be missing node hint. Fix them 6663 * up. Also, create a rescuer for workqueues that requested it. 6664 */ 6665 for_each_possible_cpu(cpu) { 6666 for_each_cpu_worker_pool(pool, cpu) { 6667 pool->node = cpu_to_node(cpu); 6668 } 6669 } 6670 6671 list_for_each_entry(wq, &workqueues, list) { 6672 WARN(init_rescuer(wq), 6673 "workqueue: failed to create early rescuer for %s", 6674 wq->name); 6675 } 6676 6677 mutex_unlock(&wq_pool_mutex); 6678 6679 /* create the initial workers */ 6680 for_each_online_cpu(cpu) { 6681 for_each_cpu_worker_pool(pool, cpu) { 6682 pool->flags &= ~POOL_DISASSOCIATED; 6683 BUG_ON(!create_worker(pool)); 6684 } 6685 } 6686 6687 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 6688 BUG_ON(!create_worker(pool)); 6689 6690 wq_online = true; 6691 wq_watchdog_init(); 6692 } 6693 6694 /* 6695 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 6696 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 6697 * and consecutive pod ID. The rest of @pt is initialized accordingly. 6698 */ 6699 static void __init init_pod_type(struct wq_pod_type *pt, 6700 bool (*cpus_share_pod)(int, int)) 6701 { 6702 int cur, pre, cpu, pod; 6703 6704 pt->nr_pods = 0; 6705 6706 /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 6707 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6708 BUG_ON(!pt->cpu_pod); 6709 6710 for_each_possible_cpu(cur) { 6711 for_each_possible_cpu(pre) { 6712 if (pre >= cur) { 6713 pt->cpu_pod[cur] = pt->nr_pods++; 6714 break; 6715 } 6716 if (cpus_share_pod(cur, pre)) { 6717 pt->cpu_pod[cur] = pt->cpu_pod[pre]; 6718 break; 6719 } 6720 } 6721 } 6722 6723 /* init the rest to match @pt->cpu_pod[] */ 6724 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6725 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 6726 BUG_ON(!pt->pod_cpus || !pt->pod_node); 6727 6728 for (pod = 0; pod < pt->nr_pods; pod++) 6729 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 6730 6731 for_each_possible_cpu(cpu) { 6732 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 6733 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 6734 } 6735 } 6736 6737 static bool __init cpus_dont_share(int cpu0, int cpu1) 6738 { 6739 return false; 6740 } 6741 6742 static bool __init cpus_share_smt(int cpu0, int cpu1) 6743 { 6744 #ifdef CONFIG_SCHED_SMT 6745 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 6746 #else 6747 return false; 6748 #endif 6749 } 6750 6751 static bool __init cpus_share_numa(int cpu0, int cpu1) 6752 { 6753 return cpu_to_node(cpu0) == cpu_to_node(cpu1); 6754 } 6755 6756 /** 6757 * workqueue_init_topology - initialize CPU pods for unbound workqueues 6758 * 6759 * This is the third step of there-staged workqueue subsystem initialization and 6760 * invoked after SMP and topology information are fully initialized. It 6761 * initializes the unbound CPU pods accordingly. 6762 */ 6763 void __init workqueue_init_topology(void) 6764 { 6765 struct workqueue_struct *wq; 6766 int cpu; 6767 6768 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 6769 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 6770 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 6771 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 6772 6773 mutex_lock(&wq_pool_mutex); 6774 6775 /* 6776 * Workqueues allocated earlier would have all CPUs sharing the default 6777 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 6778 * combinations to apply per-pod sharing. 6779 */ 6780 list_for_each_entry(wq, &workqueues, list) { 6781 for_each_online_cpu(cpu) { 6782 wq_update_pod(wq, cpu, cpu, true); 6783 } 6784 } 6785 6786 mutex_unlock(&wq_pool_mutex); 6787 } 6788 6789 void __warn_flushing_systemwide_wq(void) 6790 { 6791 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 6792 dump_stack(); 6793 } 6794 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 6795 6796 static int __init workqueue_unbound_cpus_setup(char *str) 6797 { 6798 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 6799 cpumask_clear(&wq_cmdline_cpumask); 6800 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 6801 } 6802 6803 return 1; 6804 } 6805 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 6806